aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2012-05-02 04:21:50 -0400
committerDave Airlie <airlied@redhat.com>2012-05-02 04:22:29 -0400
commit5bc69bf9aeb73547cad8e1ce683a103fe9728282 (patch)
treed3ef275532fc4391cb645f8b4d45d39d7fbb73f4
parentc6543a6e64ad8e456674a1c4a01dd024e38b665f (diff)
parenta85d4bcb8a0cd5b3c754f98ff91ef2b9b3a73bc5 (diff)
Merge tag 'drm-intel-next-2012-04-23' of git://people.freedesktop.org/~danvet/drm-intel into drm-core-next
Daniel Vetter writes: A new drm-intel-next pull. Highlights: - More gmbus patches from Daniel Kurtz, I think gmbus is now ready, all known issues fixed. - Fencing cleanup and pipelined fencing removal from Chris. - rc6 residency interface from Ben, useful for powertop. - Cleanups and code reorg around the ringbuffer code (Ben&me). - Use hw semaphores in the pageflip code from Ben. - More vlv stuff from Jesse, unfortunately his vlv cpu is doa, so less merged than I've hoped for - we still have the unused function warning :( - More hsw patches from Eugeni, again, not yet enabled fully. - intel_pm.c refactoring from Eugeni. - Ironlake sprite support from Chris. - And various smaller improvements/fixes all over the place. Note that this pull request also contains a backmerge of -rc3 to sort out a few things in -next. I've also had to frob the shortlog a bit to exclude anything that -rc3 brings in with this pull. Regression wise we have a few strange bugs going on, but for all of them closer inspection revealed that they've been pre-existing, just now slightly more likely to be hit. And for most of them we have a patch already. Otherwise QA has not reported any regressions, and I'm also not aware of anything bad happening in 3.4. * tag 'drm-intel-next-2012-04-23' of git://people.freedesktop.org/~danvet/drm-intel: (420 commits) drm/i915: rc6 residency (fix the fix) drm/i915/tv: fix open-coded ARRAY_SIZE. drm/i915: invalidate render cache on gen2 drm/i915: Silence the change of LVDS sync polarity drm/i915: add generic power management initialization drm/i915: move clock gating functionality into intel_pm module drm/i915: move emon functionality into intel_pm module drm/i915: move drps, rps and rc6-related functions to intel_pm drm/i915: fix line breaks in intel_pm drm/i915: move watermarks settings into intel_pm module drm/i915: move fbc-related functionality into intel_pm module drm/i915: Refactor get_fence() to use the common fence writing routine drm/i915: Refactor fence clearing to use the common fence writing routine drm/i915: Refactor put_fence() to use the common fence writing routine drm/i915: Prepare to consolidate fence writing drm/i915: Remove the unsightly "optimisation" from flush_fence() drm/i915: Simplify fence finding drm/i915: Discard the unused obj->last_fenced_ring drm/i915: Remove unused ring->setup_seqno drm/i915: Remove fence pipelining ...
-rw-r--r--Documentation/ABI/stable/sysfs-driver-usb-usbtmc14
-rw-r--r--Documentation/ABI/testing/sysfs-block-rssd18
-rw-r--r--Documentation/ABI/testing/sysfs-cfq-target-latency8
-rw-r--r--Documentation/cgroups/memory.txt5
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--Documentation/filesystems/vfs.txt2
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt4
-rw-r--r--Documentation/usb/URB.txt22
-rw-r--r--Documentation/usb/usbmon.txt6
-rw-r--r--MAINTAINERS21
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/include/asm/atomic.h68
-rw-r--r--arch/alpha/include/asm/cmpxchg.h71
-rw-r--r--arch/alpha/include/asm/xchg.h4
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c2
-rw-r--r--arch/arm/boot/compressed/head.S2
-rw-r--r--arch/arm/boot/dts/at91sam9g20.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9g45.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi1
-rw-r--r--arch/arm/boot/dts/db8500.dtsi1
-rw-r--r--arch/arm/boot/dts/highbank.dts1
-rw-r--r--arch/arm/common/vic.c9
-rw-r--r--arch/arm/include/asm/jump_label.h2
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/smp_twd.c6
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h4
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-clock.h6
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c2
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c46
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c2
-rw-r--r--arch/arm/mach-msm/board-halibut.c3
-rw-r--r--arch/arm/mach-msm/board-trout-panel.c1
-rw-r--r--arch/arm/mach-msm/board-trout.c1
-rw-r--r--arch/arm/mach-msm/proc_comm.c2
-rw-r--r--arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c80
-rw-r--r--arch/arm/mach-omap2/clock.c5
-rw-r--r--arch/arm/mach-omap2/clock.h8
-rw-r--r--arch/arm/mach-s5pv210/dma.c2
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c4
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c4
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/mm/nommu.c2
-rw-r--r--arch/arm/mm/proc-v7.S12
-rw-r--r--arch/arm/plat-omap/clock.c26
-rw-r--r--arch/arm/plat-omap/include/plat/clock.h10
-rw-r--r--arch/arm/plat-samsung/Kconfig1
-rw-r--r--arch/c6x/include/asm/irq.h4
-rw-r--r--arch/c6x/kernel/irq.c13
-rw-r--r--arch/ia64/include/asm/cmpxchg.h148
-rw-r--r--arch/ia64/include/asm/intrinsics.h114
-rw-r--r--arch/powerpc/include/asm/irq.h2
-rw-r--r--arch/powerpc/kernel/entry_32.S39
-rw-r--r--arch/powerpc/kernel/irq.c6
-rw-r--r--arch/powerpc/kernel/process.c4
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c2
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c2
-rw-r--r--arch/powerpc/platforms/powermac/smp.c2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c3
-rw-r--r--arch/sparc/kernel/ds.c2
-rw-r--r--arch/sparc/kernel/leon_pci.c13
-rw-r--r--arch/sparc/kernel/rtrap_64.S7
-rw-r--r--arch/sparc/mm/fault_32.c37
-rw-r--r--arch/sparc/mm/fault_64.c37
-rw-r--r--arch/tile/kernel/proc.c4
-rw-r--r--arch/tile/kernel/smpboot.c2
-rw-r--r--arch/um/drivers/cow.h35
-rw-r--r--arch/um/drivers/cow_user.c43
-rw-r--r--arch/um/drivers/mconsole_kern.c1
-rw-r--r--arch/um/include/asm/Kbuild3
-rw-r--r--arch/um/kernel/Makefile7
-rw-r--r--arch/um/kernel/process.c6
-rw-r--r--arch/um/kernel/skas/mmu.c1
-rw-r--r--arch/x86/Makefile.um3
-rw-r--r--arch/x86/include/asm/cmpxchg.h4
-rw-r--r--arch/x86/include/asm/uaccess.h2
-rw-r--r--arch/x86/include/asm/uaccess_32.h5
-rw-r--r--arch/x86/include/asm/uaccess_64.h4
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/lib/usercopy.c103
-rw-r--r--arch/x86/lib/usercopy_32.c87
-rw-r--r--arch/x86/lib/usercopy_64.c49
-rw-r--r--arch/x86/um/asm/barrier.h75
-rw-r--r--arch/x86/um/asm/system.h135
-rw-r--r--block/blk-core.c5
-rw-r--r--block/blk-throttle.c2
-rw-r--r--block/cfq-iosched.c10
-rw-r--r--crypto/Kconfig6
-rw-r--r--drivers/amba/bus.c42
-rw-r--r--drivers/base/soc.c4
-rw-r--r--drivers/bcma/Kconfig2
-rw-r--r--drivers/bcma/driver_pci_host.c1
-rw-r--r--drivers/block/cciss_scsi.c3
-rw-r--r--drivers/block/mtip32xx/Kconfig2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c860
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h58
-rw-r--r--drivers/block/virtio_blk.c1
-rw-r--r--drivers/block/xen-blkback/blkback.c50
-rw-r--r--drivers/block/xen-blkback/common.h6
-rw-r--r--drivers/block/xen-blkback/xenbus.c89
-rw-r--r--drivers/block/xen-blkfront.c41
-rw-r--r--drivers/bluetooth/ath3k.c4
-rw-r--r--drivers/bluetooth/btusb.c5
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/char/hpet.c4
-rw-r--r--drivers/char/random.c11
-rw-r--r--drivers/clocksource/acpi_pm.c24
-rw-r--r--drivers/cpufreq/Kconfig.arm1
-rw-r--r--drivers/dma/dmaengine.c14
-rw-r--r--drivers/dma/ioat/dma.c16
-rw-r--r--drivers/dma/ioat/dma.h6
-rw-r--r--drivers/dma/ioat/dma_v2.c12
-rw-r--r--drivers/dma/ioat/dma_v2.h4
-rw-r--r--drivers/dma/ioat/dma_v3.c49
-rw-r--r--drivers/dma/iop-adma.c4
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-adp5588.c2
-rw-r--r--drivers/gpio/gpio-samsung.c16
-rw-r--r--drivers/gpio/gpio-sodaville.c23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c47
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c14
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c45
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.c107
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_hdmi.h23
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c20
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c42
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c40
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c13
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h27
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c626
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c81
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c10
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c136
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h48
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c12
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c111
-rw-r--r--drivers/gpu/drm/i915/i915_trace_points.c2
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c54
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3268
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c57
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h31
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c167
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c3075
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c528
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c26
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c78
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c12
-rw-r--r--drivers/gpu/drm/savage/savage_state.c6
-rw-r--r--drivers/hwmon/acpi_power_meter.c1
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c17
-rw-r--r--drivers/hwmon/smsc47b397.c14
-rw-r--r--drivers/hwmon/smsc47m1.c19
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c1
-rw-r--r--drivers/infiniband/core/sysfs.c9
-rw-r--r--drivers/infiniband/hw/mlx4/main.c5
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c1
-rw-r--r--drivers/input/misc/da9052_onkey.c3
-rw-r--r--drivers/input/mouse/elantech.c10
-rw-r--r--drivers/input/mouse/gpio_mouse.c2
-rw-r--r--drivers/input/mouse/sentelic.c8
-rw-r--r--drivers/input/mouse/trackpoint.c14
-rw-r--r--drivers/input/touchscreen/tps6507x-ts.c4
-rw-r--r--drivers/isdn/gigaset/interface.c2
-rw-r--r--drivers/md/bitmap.c5
-rw-r--r--drivers/md/raid1.c3
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/dvb/dvb-core/dvb_frontend.c12
-rw-r--r--drivers/media/dvb/dvb-usb/it913x.c54
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c4
-rw-r--r--drivers/media/video/uvc/uvc_video.c50
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mtd/mtdchar.c20
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c6
-rw-r--r--drivers/net/wireless/rtlwifi/base.c5
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c6
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c34
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h6
-rw-r--r--drivers/of/gpio.c2
-rw-r--r--drivers/pci/pci.c57
-rw-r--r--drivers/regulator/anatop-regulator.c6
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/rtc/rtc-pl031.c3
-rw-r--r--drivers/rtc/rtc-r9701.c22
-rw-r--r--drivers/rtc/rtc-s3c.c31
-rw-r--r--drivers/rtc/rtc-twl.c43
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/spi/spi-davinci.c6
-rw-r--r--drivers/spi/spi-fsl-spi.c4
-rw-r--r--drivers/spi/spi-imx.c12
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/staging/android/Kconfig3
-rw-r--r--drivers/staging/android/lowmemorykiller.c48
-rw-r--r--drivers/staging/android/persistent_ram.c11
-rw-r--r--drivers/staging/android/timed_gpio.c27
-rw-r--r--drivers/staging/iio/inkern.c1
-rw-r--r--drivers/staging/iio/magnetometer/ak8975.c8
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c4
-rw-r--r--drivers/staging/media/as102/as102_fw.c2
-rw-r--r--drivers/staging/omapdrm/omap_drv.c7
-rw-r--r--drivers/staging/ozwpan/TODO4
-rw-r--r--drivers/staging/ramster/Kconfig6
-rw-r--r--drivers/staging/rts_pstor/ms.c3
-rw-r--r--drivers/staging/rts_pstor/rtsx.c5
-rw-r--r--drivers/staging/rts_pstor/rtsx_transport.c11
-rw-r--r--drivers/staging/sep/sep_main.c6
-rw-r--r--drivers/staging/vme/devices/vme_pio2_core.c4
-rw-r--r--drivers/staging/vt6655/key.c3
-rw-r--r--drivers/staging/vt6656/ioctl.c3
-rw-r--r--drivers/staging/vt6656/key.c3
-rw-r--r--drivers/staging/xgifb/vb_init.c2
-rw-r--r--drivers/staging/xgifb/vb_setmode.c7
-rw-r--r--drivers/staging/xgifb/vb_table.h11
-rw-r--r--drivers/staging/zsmalloc/zsmalloc-main.c30
-rw-r--r--drivers/tty/serial/8250/8250.c12
-rw-r--r--drivers/tty/serial/8250/8250_pci.c16
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/altera_uart.c4
-rw-r--r--drivers/tty/serial/amba-pl011.c8
-rw-r--r--drivers/tty/serial/atmel_serial.c4
-rw-r--r--drivers/tty/serial/omap-serial.c43
-rw-r--r--drivers/tty/serial/pch_uart.c8
-rw-r--r--drivers/tty/serial/samsung.c1
-rw-r--r--drivers/tty/vt/vt.c3
-rw-r--r--drivers/usb/Kconfig16
-rw-r--r--drivers/usb/core/driver.c9
-rw-r--r--drivers/usb/core/hcd.c12
-rw-r--r--drivers/usb/core/hub.c16
-rw-r--r--drivers/usb/core/message.c11
-rw-r--r--drivers/usb/core/urb.c12
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/host/ehci-hcd.c3
-rw-r--r--drivers/usb/host/ehci-hub.c31
-rw-r--r--drivers/usb/host/ehci-tegra.c2
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/pci-quirks.c10
-rw-r--r--drivers/usb/host/uhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-dbg.c2
-rw-r--r--drivers/usb/host/xhci-ext-caps.h5
-rw-r--r--drivers/usb/host/xhci-mem.c9
-rw-r--r--drivers/usb/host/xhci-pci.c4
-rw-r--r--drivers/usb/host/xhci-ring.c6
-rw-r--r--drivers/usb/host/xhci.c12
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/serial/bus.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c36
-rw-r--r--drivers/usb/serial/metro-usb.c6
-rw-r--r--drivers/usb/serial/option.c1
-rw-r--r--drivers/usb/serial/pl2303.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/serial/usb-serial.c31
-rw-r--r--drivers/usb/storage/usb.c30
-rw-r--r--drivers/video/au1100fb.c5
-rw-r--r--drivers/video/au1200fb.c2
-rw-r--r--drivers/video/kyro/STG4000Reg.h376
-rw-r--r--drivers/video/msm/mddi.c8
-rw-r--r--drivers/video/uvesafb.c11
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/extent-tree.c11
-rw-r--r--fs/btrfs/extent_io.c6
-rw-r--r--fs/btrfs/free-space-cache.c9
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/transaction.c9
-rw-r--r--fs/btrfs/volumes.c20
-rw-r--r--fs/gfs2/Kconfig7
-rw-r--r--fs/gfs2/aops.c4
-rw-r--r--fs/gfs2/bmap.c6
-rw-r--r--fs/gfs2/dir.c4
-rw-r--r--fs/gfs2/inode.c13
-rw-r--r--fs/gfs2/rgrp.c8
-rw-r--r--fs/gfs2/xattr.c12
-rw-r--r--fs/libfs.c1
-rw-r--r--fs/proc/stat.c34
-rw-r--r--fs/sysfs/dir.c5
-rw-r--r--fs/sysfs/group.c6
-rw-r--r--include/drm/exynos_drm.h5
-rw-r--r--include/linux/amba/bus.h7
-rw-r--r--include/linux/amba/pl022.h2
-rw-r--r--include/linux/blkdev.h18
-rw-r--r--include/linux/dmaengine.h1
-rw-r--r--include/linux/irq.h5
-rw-r--r--include/linux/irqdomain.h12
-rw-r--r--include/linux/kconfig.h22
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h12
-rw-r--r--include/linux/serial_core.h2
-rw-r--r--include/linux/skbuff.h13
-rw-r--r--include/linux/stddef.h8
-rw-r--r--include/linux/types.h6
-rw-r--r--include/linux/usb/serial.h8
-rw-r--r--include/linux/vgaarb.h2
-rw-r--r--include/net/bluetooth/hci.h3
-rw-r--r--include/net/bluetooth/hci_core.h12
-rw-r--r--include/net/bluetooth/mgmt.h2
-rw-r--r--include/net/mac80211.h2
-rw-r--r--include/scsi/scsi_cmnd.h3
-rw-r--r--include/sound/core.h10
-rw-r--r--kernel/cred.c2
-rw-r--r--kernel/irq/Kconfig2
-rw-r--r--kernel/irq/irqdomain.c47
-rw-r--r--kernel/irq_work.c1
-rw-r--r--kernel/itimer.c8
-rw-r--r--kernel/panic.c2
-rw-r--r--kernel/time/Kconfig4
-rw-r--r--kernel/time/tick-broadcast.c4
-rw-r--r--kernel/time/tick-sched.c4
-rw-r--r--lib/kobject.c14
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/vmscan.c7
-rw-r--r--net/bluetooth/hci_core.c7
-rw-r--r--net/bluetooth/l2cap_core.c3
-rw-r--r--net/bluetooth/l2cap_sock.c5
-rw-r--r--net/bluetooth/mgmt.c13
-rw-r--r--net/bridge/br_multicast.c81
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/core/skbuff.c4
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c12
-rw-r--r--net/ipv4/tcp.c15
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c14
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/mac80211/mlme.c3
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/nfc/llcp/commands.c4
-rw-r--r--net/wireless/nl80211.c31
-rw-r--r--net/wireless/wext-core.c6
-rw-r--r--scripts/kconfig/confdata.c38
-rw-r--r--scripts/mod/modpost.c7
-rw-r--r--scripts/mod/modpost.h1
-rw-r--r--security/smack/smack_lsm.c19
-rw-r--r--sound/isa/sscape.c6
-rw-r--r--sound/oss/msnd_pinnacle.c8
-rw-r--r--sound/pci/Kconfig4
-rw-r--r--sound/pci/asihpi/hpi_internal.h4
-rw-r--r--sound/pci/asihpi/hpios.c10
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/hda_eld.c6
-rw-r--r--sound/pci/hda/hda_proc.c13
-rw-r--r--sound/pci/hda/patch_conexant.c108
-rw-r--r--sound/pci/hda/patch_hdmi.c9
-rw-r--r--sound/pci/hda/patch_realtek.c36
-rw-r--r--sound/soc/codecs/ak4642.c2
-rw-r--r--sound/soc/codecs/sgtl5000.c25
-rw-r--r--sound/soc/imx/imx-audmux.c5
-rw-r--r--sound/soc/pxa/pxa2xx-i2s.c1
-rw-r--r--sound/soc/soc-core.c2
-rw-r--r--sound/soc/tegra/tegra_i2s.c6
-rw-r--r--sound/soc/tegra/tegra_spdif.c4
-rw-r--r--tools/perf/builtin-sched.c1
-rw-r--r--tools/perf/builtin-top.c36
-rw-r--r--tools/perf/util/annotate.c16
-rw-r--r--tools/perf/util/hist.c12
-rw-r--r--tools/perf/util/map.c1
-rw-r--r--tools/perf/util/map.h1
-rw-r--r--tools/perf/util/session.c12
-rw-r--r--tools/perf/util/ui/browsers/hists.c3
381 files changed, 7668 insertions, 6645 deletions
diff --git a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
index 2a7f9a00cb0a..e960cd027e1e 100644
--- a/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
+++ b/Documentation/ABI/stable/sysfs-driver-usb-usbtmc
@@ -1,5 +1,5 @@
1What: /sys/bus/usb/drivers/usbtmc/devices/*/interface_capabilities 1What: /sys/bus/usb/drivers/usbtmc/*/interface_capabilities
2What: /sys/bus/usb/drivers/usbtmc/devices/*/device_capabilities 2What: /sys/bus/usb/drivers/usbtmc/*/device_capabilities
3Date: August 2008 3Date: August 2008
4Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 4Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
5Description: 5Description:
@@ -12,8 +12,8 @@ Description:
12 The files are read only. 12 The files are read only.
13 13
14 14
15What: /sys/bus/usb/drivers/usbtmc/devices/*/usb488_interface_capabilities 15What: /sys/bus/usb/drivers/usbtmc/*/usb488_interface_capabilities
16What: /sys/bus/usb/drivers/usbtmc/devices/*/usb488_device_capabilities 16What: /sys/bus/usb/drivers/usbtmc/*/usb488_device_capabilities
17Date: August 2008 17Date: August 2008
18Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 18Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
19Description: 19Description:
@@ -27,7 +27,7 @@ Description:
27 The files are read only. 27 The files are read only.
28 28
29 29
30What: /sys/bus/usb/drivers/usbtmc/devices/*/TermChar 30What: /sys/bus/usb/drivers/usbtmc/*/TermChar
31Date: August 2008 31Date: August 2008
32Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 32Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
33Description: 33Description:
@@ -40,7 +40,7 @@ Description:
40 sent to the device or not. 40 sent to the device or not.
41 41
42 42
43What: /sys/bus/usb/drivers/usbtmc/devices/*/TermCharEnabled 43What: /sys/bus/usb/drivers/usbtmc/*/TermCharEnabled
44Date: August 2008 44Date: August 2008
45Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 45Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
46Description: 46Description:
@@ -51,7 +51,7 @@ Description:
51 published by the USB-IF. 51 published by the USB-IF.
52 52
53 53
54What: /sys/bus/usb/drivers/usbtmc/devices/*/auto_abort 54What: /sys/bus/usb/drivers/usbtmc/*/auto_abort
55Date: August 2008 55Date: August 2008
56Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 56Contact: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
57Description: 57Description:
diff --git a/Documentation/ABI/testing/sysfs-block-rssd b/Documentation/ABI/testing/sysfs-block-rssd
new file mode 100644
index 000000000000..d535757799fe
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-block-rssd
@@ -0,0 +1,18 @@
1What: /sys/block/rssd*/registers
2Date: March 2012
3KernelVersion: 3.3
4Contact: Asai Thambi S P <asamymuthupa@micron.com>
5Description: This is a read-only file. Dumps below driver information and
6 hardware registers.
7 - S ACTive
8 - Command Issue
9 - Allocated
10 - Completed
11 - PORT IRQ STAT
12 - HOST IRQ STAT
13
14What: /sys/block/rssd*/status
15Date: April 2012
16KernelVersion: 3.4
17Contact: Asai Thambi S P <asamymuthupa@micron.com>
18Description: This is a read-only file. Indicates the status of the device.
diff --git a/Documentation/ABI/testing/sysfs-cfq-target-latency b/Documentation/ABI/testing/sysfs-cfq-target-latency
new file mode 100644
index 000000000000..df0f7828c5e3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-cfq-target-latency
@@ -0,0 +1,8 @@
1What: /sys/block/<device>/iosched/target_latency
2Date: March 2012
3contact: Tao Ma <boyu.mt@taobao.com>
4Description:
5 The /sys/block/<device>/iosched/target_latency only exists
6 when the user sets cfq to /sys/block/<device>/scheduler.
7 It contains an estimated latency time for the cfq. cfq will
8 use it to calculate the time slice used for every task.
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 4c95c0034a4b..9b1067afb224 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -34,8 +34,7 @@ Current Status: linux-2.6.34-mmotm(development version of 2010/April)
34 34
35Features: 35Features:
36 - accounting anonymous pages, file caches, swap caches usage and limiting them. 36 - accounting anonymous pages, file caches, swap caches usage and limiting them.
37 - private LRU and reclaim routine. (system's global LRU and private LRU 37 - pages are linked to per-memcg LRU exclusively, and there is no global LRU.
38 work independently from each other)
39 - optionally, memory+swap usage can be accounted and limited. 38 - optionally, memory+swap usage can be accounted and limited.
40 - hierarchical accounting 39 - hierarchical accounting
41 - soft limit 40 - soft limit
@@ -154,7 +153,7 @@ updated. page_cgroup has its own LRU on cgroup.
1542.2.1 Accounting details 1532.2.1 Accounting details
155 154
156All mapped anon pages (RSS) and cache pages (Page Cache) are accounted. 155All mapped anon pages (RSS) and cache pages (Page Cache) are accounted.
157Some pages which are never reclaimable and will not be on the global LRU 156Some pages which are never reclaimable and will not be on the LRU
158are not accounted. We just account pages under usual VM management. 157are not accounted. We just account pages under usual VM management.
159 158
160RSS pages are accounted at page_fault unless they've already been accounted 159RSS pages are accounted at page_fault unless they've already been accounted
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 709e08e9a222..03ca210406ed 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -531,3 +531,11 @@ Why: There appear to be no production users of the get_robust_list syscall,
531 of ASLR. It was only ever intended for debugging, so it should be 531 of ASLR. It was only ever intended for debugging, so it should be
532 removed. 532 removed.
533Who: Kees Cook <keescook@chromium.org> 533Who: Kees Cook <keescook@chromium.org>
534
535----------------------------
536
537What: setitimer accepts user NULL pointer (value)
538When: 3.6
539Why: setitimer is not returning -EFAULT if user pointer is NULL. This
540 violates the spec.
541Who: Sasikantha Babu <sasikanth.v19@gmail.com>
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index e916e3d36488..0d0492028082 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -114,7 +114,7 @@ members are defined:
114struct file_system_type { 114struct file_system_type {
115 const char *name; 115 const char *name;
116 int fs_flags; 116 int fs_flags;
117 struct dentry (*mount) (struct file_system_type *, int, 117 struct dentry *(*mount) (struct file_system_type *, int,
118 const char *, void *); 118 const char *, void *);
119 void (*kill_sb) (struct super_block *); 119 void (*kill_sb) (struct super_block *);
120 struct module *owner; 120 struct module *owner;
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index d97d992ced14..03f7897c6414 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -43,7 +43,9 @@ ALC680
43 43
44ALC882/883/885/888/889 44ALC882/883/885/888/889
45====================== 45======================
46 N/A 46 acer-aspire-4930g Acer Aspire 4930G/5930G/6530G/6930G/7730G
47 acer-aspire-8930g Acer Aspire 8330G/6935G
48 acer-aspire Acer Aspire others
47 49
48ALC861/660 50ALC861/660
49========== 51==========
diff --git a/Documentation/usb/URB.txt b/Documentation/usb/URB.txt
index 8ffce746d496..00d2c644068e 100644
--- a/Documentation/usb/URB.txt
+++ b/Documentation/usb/URB.txt
@@ -168,6 +168,28 @@ that if the completion handler or anyone else tries to resubmit it
168they will get a -EPERM error. Thus you can be sure that when 168they will get a -EPERM error. Thus you can be sure that when
169usb_kill_urb() returns, the URB is totally idle. 169usb_kill_urb() returns, the URB is totally idle.
170 170
171There is a lifetime issue to consider. An URB may complete at any
172time, and the completion handler may free the URB. If this happens
173while usb_unlink_urb or usb_kill_urb is running, it will cause a
174memory-access violation. The driver is responsible for avoiding this,
175which often means some sort of lock will be needed to prevent the URB
176from being deallocated while it is still in use.
177
178On the other hand, since usb_unlink_urb may end up calling the
179completion handler, the handler must not take any lock that is held
180when usb_unlink_urb is invoked. The general solution to this problem
181is to increment the URB's reference count while holding the lock, then
182drop the lock and call usb_unlink_urb or usb_kill_urb, and then
183decrement the URB's reference count. You increment the reference
184count by calling
185
186 struct urb *usb_get_urb(struct urb *urb)
187
188(ignore the return value; it is the same as the argument) and
189decrement the reference count by calling usb_free_urb. Of course,
190none of this is necessary if there's no danger of the URB being freed
191by the completion handler.
192
171 193
1721.7. What about the completion handler? 1941.7. What about the completion handler?
173 195
diff --git a/Documentation/usb/usbmon.txt b/Documentation/usb/usbmon.txt
index 5335fa8b06eb..c42bb9cd3b43 100644
--- a/Documentation/usb/usbmon.txt
+++ b/Documentation/usb/usbmon.txt
@@ -183,10 +183,10 @@ An input control transfer to get a port status.
183d5ea89a0 3575914555 S Ci:1:001:0 s a3 00 0000 0003 0004 4 < 183d5ea89a0 3575914555 S Ci:1:001:0 s a3 00 0000 0003 0004 4 <
184d5ea89a0 3575914560 C Ci:1:001:0 0 4 = 01050000 184d5ea89a0 3575914560 C Ci:1:001:0 0 4 = 01050000
185 185
186An output bulk transfer to send a SCSI command 0x5E in a 31-byte Bulk wrapper 186An output bulk transfer to send a SCSI command 0x28 (READ_10) in a 31-byte
187to a storage device at address 5: 187Bulk wrapper to a storage device at address 5:
188 188
189dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 5e000000 00000000 00000600 00000000 00000000 00000000 000000 189dd65f0e8 4128379752 S Bo:1:005:2 -115 31 = 55534243 ad000000 00800000 80010a28 20000000 20000040 00000000 000000
190dd65f0e8 4128379808 C Bo:1:005:2 0 31 > 190dd65f0e8 4128379808 C Bo:1:005:2 0 31 >
191 191
192* Raw binary format and API 192* Raw binary format and API
diff --git a/MAINTAINERS b/MAINTAINERS
index 2dcfca850639..b0f1073c40b0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1521,8 +1521,8 @@ M: Gustavo Padovan <gustavo@padovan.org>
1521M: Johan Hedberg <johan.hedberg@gmail.com> 1521M: Johan Hedberg <johan.hedberg@gmail.com>
1522L: linux-bluetooth@vger.kernel.org 1522L: linux-bluetooth@vger.kernel.org
1523W: http://www.bluez.org/ 1523W: http://www.bluez.org/
1524T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git 1524T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
1525T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git 1525T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
1526S: Maintained 1526S: Maintained
1527F: drivers/bluetooth/ 1527F: drivers/bluetooth/
1528 1528
@@ -1532,8 +1532,8 @@ M: Gustavo Padovan <gustavo@padovan.org>
1532M: Johan Hedberg <johan.hedberg@gmail.com> 1532M: Johan Hedberg <johan.hedberg@gmail.com>
1533L: linux-bluetooth@vger.kernel.org 1533L: linux-bluetooth@vger.kernel.org
1534W: http://www.bluez.org/ 1534W: http://www.bluez.org/
1535T: git git://git.kernel.org/pub/scm/linux/kernel/git/padovan/bluetooth.git 1535T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
1536T: git git://git.kernel.org/pub/scm/linux/kernel/git/jh/bluetooth.git 1536T: git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
1537S: Maintained 1537S: Maintained
1538F: net/bluetooth/ 1538F: net/bluetooth/
1539F: include/net/bluetooth/ 1539F: include/net/bluetooth/
@@ -4533,8 +4533,7 @@ S: Supported
4533F: drivers/net/ethernet/myricom/myri10ge/ 4533F: drivers/net/ethernet/myricom/myri10ge/
4534 4534
4535NATSEMI ETHERNET DRIVER (DP8381x) 4535NATSEMI ETHERNET DRIVER (DP8381x)
4536M: Tim Hockin <thockin@hockin.org> 4536S: Orphan
4537S: Maintained
4538F: drivers/net/ethernet/natsemi/natsemi.c 4537F: drivers/net/ethernet/natsemi/natsemi.c
4539 4538
4540NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER 4539NATIVE INSTRUMENTS USB SOUND INTERFACE DRIVER
@@ -4803,6 +4802,7 @@ F: arch/arm/mach-omap2/clockdomain2xxx_3xxx.c
4803F: arch/arm/mach-omap2/clockdomain44xx.c 4802F: arch/arm/mach-omap2/clockdomain44xx.c
4804 4803
4805OMAP AUDIO SUPPORT 4804OMAP AUDIO SUPPORT
4805M: Peter Ujfalusi <peter.ujfalusi@ti.com>
4806M: Jarkko Nikula <jarkko.nikula@bitmer.com> 4806M: Jarkko Nikula <jarkko.nikula@bitmer.com>
4807L: alsa-devel@alsa-project.org (subscribers-only) 4807L: alsa-devel@alsa-project.org (subscribers-only)
4808L: linux-omap@vger.kernel.org 4808L: linux-omap@vger.kernel.org
@@ -5117,6 +5117,11 @@ F: drivers/i2c/busses/i2c-pca-*
5117F: include/linux/i2c-algo-pca.h 5117F: include/linux/i2c-algo-pca.h
5118F: include/linux/i2c-pca-platform.h 5118F: include/linux/i2c-pca-platform.h
5119 5119
5120PCDP - PRIMARY CONSOLE AND DEBUG PORT
5121M: Khalid Aziz <khalid.aziz@hp.com>
5122S: Maintained
5123F: drivers/firmware/pcdp.*
5124
5120PCI ERROR RECOVERY 5125PCI ERROR RECOVERY
5121M: Linas Vepstas <linasvepstas@gmail.com> 5126M: Linas Vepstas <linasvepstas@gmail.com>
5122L: linux-pci@vger.kernel.org 5127L: linux-pci@vger.kernel.org
@@ -6466,6 +6471,7 @@ S: Odd Fixes
6466F: drivers/staging/olpc_dcon/ 6471F: drivers/staging/olpc_dcon/
6467 6472
6468STAGING - OZMO DEVICES USB OVER WIFI DRIVER 6473STAGING - OZMO DEVICES USB OVER WIFI DRIVER
6474M: Rupesh Gujare <rgujare@ozmodevices.com>
6469M: Chris Kelly <ckelly@ozmodevices.com> 6475M: Chris Kelly <ckelly@ozmodevices.com>
6470S: Maintained 6476S: Maintained
6471F: drivers/staging/ozwpan/ 6477F: drivers/staging/ozwpan/
@@ -7461,8 +7467,7 @@ F: include/linux/wm97xx.h
7461 7467
7462WOLFSON MICROELECTRONICS DRIVERS 7468WOLFSON MICROELECTRONICS DRIVERS
7463M: Mark Brown <broonie@opensource.wolfsonmicro.com> 7469M: Mark Brown <broonie@opensource.wolfsonmicro.com>
7464M: Ian Lartey <ian@opensource.wolfsonmicro.com> 7470L: patches@opensource.wolfsonmicro.com
7465M: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7466T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc 7471T: git git://opensource.wolfsonmicro.com/linux-2.6-asoc
7467T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus 7472T: git git://opensource.wolfsonmicro.com/linux-2.6-audioplus
7468W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices 7473W: http://opensource.wolfsonmicro.com/content/linux-drivers-wolfson-devices
diff --git a/Makefile b/Makefile
index 0df3d003a079..f6578f47e21e 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 4 2PATCHLEVEL = 4
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc3
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index f62251e82ffa..3bb7ffeae3bc 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -3,6 +3,7 @@
3 3
4#include <linux/types.h> 4#include <linux/types.h>
5#include <asm/barrier.h> 5#include <asm/barrier.h>
6#include <asm/cmpxchg.h>
6 7
7/* 8/*
8 * Atomic operations that C can't guarantee us. Useful for 9 * Atomic operations that C can't guarantee us. Useful for
@@ -168,73 +169,6 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
168 return result; 169 return result;
169} 170}
170 171
171/*
172 * Atomic exchange routines.
173 */
174
175#define __ASM__MB
176#define ____xchg(type, args...) __xchg ## type ## _local(args)
177#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
178#include <asm/xchg.h>
179
180#define xchg_local(ptr,x) \
181 ({ \
182 __typeof__(*(ptr)) _x_ = (x); \
183 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
184 sizeof(*(ptr))); \
185 })
186
187#define cmpxchg_local(ptr, o, n) \
188 ({ \
189 __typeof__(*(ptr)) _o_ = (o); \
190 __typeof__(*(ptr)) _n_ = (n); \
191 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
192 (unsigned long)_n_, \
193 sizeof(*(ptr))); \
194 })
195
196#define cmpxchg64_local(ptr, o, n) \
197 ({ \
198 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
199 cmpxchg_local((ptr), (o), (n)); \
200 })
201
202#ifdef CONFIG_SMP
203#undef __ASM__MB
204#define __ASM__MB "\tmb\n"
205#endif
206#undef ____xchg
207#undef ____cmpxchg
208#define ____xchg(type, args...) __xchg ##type(args)
209#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
210#include <asm/xchg.h>
211
212#define xchg(ptr,x) \
213 ({ \
214 __typeof__(*(ptr)) _x_ = (x); \
215 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
216 sizeof(*(ptr))); \
217 })
218
219#define cmpxchg(ptr, o, n) \
220 ({ \
221 __typeof__(*(ptr)) _o_ = (o); \
222 __typeof__(*(ptr)) _n_ = (n); \
223 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
224 (unsigned long)_n_, sizeof(*(ptr)));\
225 })
226
227#define cmpxchg64(ptr, o, n) \
228 ({ \
229 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
230 cmpxchg((ptr), (o), (n)); \
231 })
232
233#undef __ASM__MB
234#undef ____cmpxchg
235
236#define __HAVE_ARCH_CMPXCHG 1
237
238#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) 172#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
239#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) 173#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
240 174
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
new file mode 100644
index 000000000000..429e8cd0d78e
--- /dev/null
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -0,0 +1,71 @@
1#ifndef _ALPHA_CMPXCHG_H
2#define _ALPHA_CMPXCHG_H
3
4/*
5 * Atomic exchange routines.
6 */
7
8#define __ASM__MB
9#define ____xchg(type, args...) __xchg ## type ## _local(args)
10#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
11#include <asm/xchg.h>
12
13#define xchg_local(ptr, x) \
14({ \
15 __typeof__(*(ptr)) _x_ = (x); \
16 (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
17 sizeof(*(ptr))); \
18})
19
20#define cmpxchg_local(ptr, o, n) \
21({ \
22 __typeof__(*(ptr)) _o_ = (o); \
23 __typeof__(*(ptr)) _n_ = (n); \
24 (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
25 (unsigned long)_n_, \
26 sizeof(*(ptr))); \
27})
28
29#define cmpxchg64_local(ptr, o, n) \
30({ \
31 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
32 cmpxchg_local((ptr), (o), (n)); \
33})
34
35#ifdef CONFIG_SMP
36#undef __ASM__MB
37#define __ASM__MB "\tmb\n"
38#endif
39#undef ____xchg
40#undef ____cmpxchg
41#define ____xchg(type, args...) __xchg ##type(args)
42#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
43#include <asm/xchg.h>
44
45#define xchg(ptr, x) \
46({ \
47 __typeof__(*(ptr)) _x_ = (x); \
48 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
49 sizeof(*(ptr))); \
50})
51
52#define cmpxchg(ptr, o, n) \
53({ \
54 __typeof__(*(ptr)) _o_ = (o); \
55 __typeof__(*(ptr)) _n_ = (n); \
56 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
57 (unsigned long)_n_, sizeof(*(ptr)));\
58})
59
60#define cmpxchg64(ptr, o, n) \
61({ \
62 BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
63 cmpxchg((ptr), (o), (n)); \
64})
65
66#undef __ASM__MB
67#undef ____cmpxchg
68
69#define __HAVE_ARCH_CMPXCHG 1
70
71#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 1d1b436fbff2..0ca9724597c1 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -1,10 +1,10 @@
1#ifndef _ALPHA_ATOMIC_H 1#ifndef _ALPHA_CMPXCHG_H
2#error Do not include xchg.h directly! 2#error Do not include xchg.h directly!
3#else 3#else
4/* 4/*
5 * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code 5 * xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
6 * except that local version do not have the expensive memory barrier. 6 * except that local version do not have the expensive memory barrier.
7 * So this file is included twice from asm/system.h. 7 * So this file is included twice from asm/cmpxchg.h.
8 */ 8 */
9 9
10/* 10/*
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index 6ce11c481178..797f04bedb47 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -77,6 +77,8 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
77 } else if (atag->hdr.tag == ATAG_MEM) { 77 } else if (atag->hdr.tag == ATAG_MEM) {
78 if (memcount >= sizeof(mem_reg_property)/4) 78 if (memcount >= sizeof(mem_reg_property)/4)
79 continue; 79 continue;
80 if (!atag->u.mem.size)
81 continue;
80 mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); 82 mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start);
81 mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); 83 mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size);
82 } else if (atag->hdr.tag == ATAG_INITRD2) { 84 } else if (atag->hdr.tag == ATAG_INITRD2) {
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 5f6045f1766c..dc7e8ce8e6be 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -273,7 +273,7 @@ restart: adr r0, LC0
273 add r0, r0, #0x100 273 add r0, r0, #0x100
274 mov r1, r6 274 mov r1, r6
275 sub r2, sp, r6 275 sub r2, sp, r6
276 blne atags_to_fdt 276 bleq atags_to_fdt
277 277
278 ldmfd sp!, {r0-r3, ip, lr} 278 ldmfd sp!, {r0-r3, ip, lr}
279 sub sp, sp, #0x10000 279 sub sp, sp, #0x10000
diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi
index 799ad1889b51..773ef484037a 100644
--- a/arch/arm/boot/dts/at91sam9g20.dtsi
+++ b/arch/arm/boot/dts/at91sam9g20.dtsi
@@ -55,7 +55,6 @@
55 #interrupt-cells = <2>; 55 #interrupt-cells = <2>;
56 compatible = "atmel,at91rm9200-aic"; 56 compatible = "atmel,at91rm9200-aic";
57 interrupt-controller; 57 interrupt-controller;
58 interrupt-parent;
59 reg = <0xfffff000 0x200>; 58 reg = <0xfffff000 0x200>;
60 }; 59 };
61 60
diff --git a/arch/arm/boot/dts/at91sam9g45.dtsi b/arch/arm/boot/dts/at91sam9g45.dtsi
index 9e6eb6ecea0e..c8042147eaa2 100644
--- a/arch/arm/boot/dts/at91sam9g45.dtsi
+++ b/arch/arm/boot/dts/at91sam9g45.dtsi
@@ -56,7 +56,6 @@
56 #interrupt-cells = <2>; 56 #interrupt-cells = <2>;
57 compatible = "atmel,at91rm9200-aic"; 57 compatible = "atmel,at91rm9200-aic";
58 interrupt-controller; 58 interrupt-controller;
59 interrupt-parent;
60 reg = <0xfffff000 0x200>; 59 reg = <0xfffff000 0x200>;
61 }; 60 };
62 61
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 70ab3a4e026f..dd4ed748469a 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -54,7 +54,6 @@
54 #interrupt-cells = <2>; 54 #interrupt-cells = <2>;
55 compatible = "atmel,at91rm9200-aic"; 55 compatible = "atmel,at91rm9200-aic";
56 interrupt-controller; 56 interrupt-controller;
57 interrupt-parent;
58 reg = <0xfffff000 0x200>; 57 reg = <0xfffff000 0x200>;
59 }; 58 };
60 59
diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi
index d73dce645667..14bc30705099 100644
--- a/arch/arm/boot/dts/db8500.dtsi
+++ b/arch/arm/boot/dts/db8500.dtsi
@@ -24,7 +24,6 @@
24 #interrupt-cells = <3>; 24 #interrupt-cells = <3>;
25 #address-cells = <1>; 25 #address-cells = <1>;
26 interrupt-controller; 26 interrupt-controller;
27 interrupt-parent;
28 reg = <0xa0411000 0x1000>, 27 reg = <0xa0411000 0x1000>,
29 <0xa0410100 0x100>; 28 <0xa0410100 0x100>;
30 }; 29 };
diff --git a/arch/arm/boot/dts/highbank.dts b/arch/arm/boot/dts/highbank.dts
index 37c0ff9c8b90..83e72294aefb 100644
--- a/arch/arm/boot/dts/highbank.dts
+++ b/arch/arm/boot/dts/highbank.dts
@@ -89,7 +89,6 @@
89 #size-cells = <0>; 89 #size-cells = <0>;
90 #address-cells = <1>; 90 #address-cells = <1>;
91 interrupt-controller; 91 interrupt-controller;
92 interrupt-parent;
93 reg = <0xfff11000 0x1000>, 92 reg = <0xfff11000 0x1000>,
94 <0xfff10100 0x100>; 93 <0xfff10100 0x100>;
95 }; 94 };
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index 7a66311f3066..7e288f96cedf 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -427,19 +427,18 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent)
427 427
428/* 428/*
429 * Handle each interrupt in a single VIC. Returns non-zero if we've 429 * Handle each interrupt in a single VIC. Returns non-zero if we've
430 * handled at least one interrupt. This does a single read of the 430 * handled at least one interrupt. This reads the status register
431 * status register and handles all interrupts in order from LSB first. 431 * before handling each interrupt, which is necessary given that
432 * handle_IRQ may briefly re-enable interrupts for soft IRQ handling.
432 */ 433 */
433static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) 434static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
434{ 435{
435 u32 stat, irq; 436 u32 stat, irq;
436 int handled = 0; 437 int handled = 0;
437 438
438 stat = readl_relaxed(vic->base + VIC_IRQ_STATUS); 439 while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) {
439 while (stat) {
440 irq = ffs(stat) - 1; 440 irq = ffs(stat) - 1;
441 handle_IRQ(irq_find_mapping(vic->domain, irq), regs); 441 handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
442 stat &= ~(1 << irq);
443 handled = 1; 442 handled = 1;
444 } 443 }
445 444
diff --git a/arch/arm/include/asm/jump_label.h b/arch/arm/include/asm/jump_label.h
index 5c5ca2ea62b0..bfc198c75913 100644
--- a/arch/arm/include/asm/jump_label.h
+++ b/arch/arm/include/asm/jump_label.h
@@ -14,7 +14,7 @@
14#define JUMP_LABEL_NOP "nop" 14#define JUMP_LABEL_NOP "nop"
15#endif 15#endif
16 16
17static __always_inline bool arch_static_branch(struct jump_label_key *key) 17static __always_inline bool arch_static_branch(struct static_key *key)
18{ 18{
19 asm goto("1:\n\t" 19 asm goto("1:\n\t"
20 JUMP_LABEL_NOP "\n\t" 20 JUMP_LABEL_NOP "\n\t"
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b91411371ae1..ebfac782593f 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -523,7 +523,21 @@ int __init arm_add_memory(phys_addr_t start, unsigned long size)
523 */ 523 */
524 size -= start & ~PAGE_MASK; 524 size -= start & ~PAGE_MASK;
525 bank->start = PAGE_ALIGN(start); 525 bank->start = PAGE_ALIGN(start);
526 bank->size = size & PAGE_MASK; 526
527#ifndef CONFIG_LPAE
528 if (bank->start + size < bank->start) {
529 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
530 "32-bit physical address space\n", (long long)start);
531 /*
532 * To ensure bank->start + bank->size is representable in
533 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
534 * This means we lose a page after masking.
535 */
536 size = ULONG_MAX - bank->start;
537 }
538#endif
539
540 bank->size = size & PAGE_MASK;
527 541
528 /* 542 /*
529 * Check whether this memory region has non-zero size or 543 * Check whether this memory region has non-zero size or
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index fef42b21cecb..5b150afb995b 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -118,10 +118,14 @@ static int twd_cpufreq_transition(struct notifier_block *nb,
118 * The twd clock events must be reprogrammed to account for the new 118 * The twd clock events must be reprogrammed to account for the new
119 * frequency. The timer is local to a cpu, so cross-call to the 119 * frequency. The timer is local to a cpu, so cross-call to the
120 * changing cpu. 120 * changing cpu.
121 *
122 * Only wait for it to finish, if the cpu is active to avoid
123 * deadlock when cpu1 is spinning on while(!cpu_active(cpu1)) during
124 * booting of that cpu.
121 */ 125 */
122 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE) 126 if (state == CPUFREQ_POSTCHANGE || state == CPUFREQ_RESUMECHANGE)
123 smp_call_function_single(freqs->cpu, twd_update_frequency, 127 smp_call_function_single(freqs->cpu, twd_update_frequency,
124 NULL, 1); 128 NULL, cpu_active(freqs->cpu));
125 129
126 return NOTIFY_OK; 130 return NOTIFY_OK;
127} 131}
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 0491ceef1cda..e81c35f936b5 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -368,6 +368,7 @@ comment "Flattened Device Tree based board for EXYNOS SoCs"
368 368
369config MACH_EXYNOS4_DT 369config MACH_EXYNOS4_DT
370 bool "Samsung Exynos4 Machine using device tree" 370 bool "Samsung Exynos4 Machine using device tree"
371 depends on ARCH_EXYNOS4
371 select CPU_EXYNOS4210 372 select CPU_EXYNOS4210
372 select USE_OF 373 select USE_OF
373 select ARM_AMBA 374 select ARM_AMBA
@@ -380,6 +381,7 @@ config MACH_EXYNOS4_DT
380 381
381config MACH_EXYNOS5_DT 382config MACH_EXYNOS5_DT
382 bool "SAMSUNG EXYNOS5 Machine using device tree" 383 bool "SAMSUNG EXYNOS5 Machine using device tree"
384 depends on ARCH_EXYNOS5
383 select SOC_EXYNOS5250 385 select SOC_EXYNOS5250
384 select USE_OF 386 select USE_OF
385 select ARM_AMBA 387 select ARM_AMBA
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index 9bee8535d9e0..591e78521a9f 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -212,6 +212,8 @@
212#define IRQ_MFC EXYNOS4_IRQ_MFC 212#define IRQ_MFC EXYNOS4_IRQ_MFC
213#define IRQ_SDO EXYNOS4_IRQ_SDO 213#define IRQ_SDO EXYNOS4_IRQ_SDO
214 214
215#define IRQ_I2S0 EXYNOS4_IRQ_I2S0
216
215#define IRQ_ADC EXYNOS4_IRQ_ADC0 217#define IRQ_ADC EXYNOS4_IRQ_ADC0
216#define IRQ_TC EXYNOS4_IRQ_PEN0 218#define IRQ_TC EXYNOS4_IRQ_PEN0
217 219
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index 024d38ff1718..6e6d11ff352a 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -89,6 +89,10 @@
89#define EXYNOS4_PA_MDMA1 0x12840000 89#define EXYNOS4_PA_MDMA1 0x12840000
90#define EXYNOS4_PA_PDMA0 0x12680000 90#define EXYNOS4_PA_PDMA0 0x12680000
91#define EXYNOS4_PA_PDMA1 0x12690000 91#define EXYNOS4_PA_PDMA1 0x12690000
92#define EXYNOS5_PA_MDMA0 0x10800000
93#define EXYNOS5_PA_MDMA1 0x11C10000
94#define EXYNOS5_PA_PDMA0 0x121A0000
95#define EXYNOS5_PA_PDMA1 0x121B0000
92 96
93#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000 97#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
94#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000 98#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index e141c1fd68d8..d9578a58ae7f 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -255,9 +255,15 @@
255 255
256/* For EXYNOS5250 */ 256/* For EXYNOS5250 */
257 257
258#define EXYNOS5_APLL_LOCK EXYNOS_CLKREG(0x00000)
258#define EXYNOS5_APLL_CON0 EXYNOS_CLKREG(0x00100) 259#define EXYNOS5_APLL_CON0 EXYNOS_CLKREG(0x00100)
259#define EXYNOS5_CLKSRC_CPU EXYNOS_CLKREG(0x00200) 260#define EXYNOS5_CLKSRC_CPU EXYNOS_CLKREG(0x00200)
261#define EXYNOS5_CLKMUX_STATCPU EXYNOS_CLKREG(0x00400)
260#define EXYNOS5_CLKDIV_CPU0 EXYNOS_CLKREG(0x00500) 262#define EXYNOS5_CLKDIV_CPU0 EXYNOS_CLKREG(0x00500)
263#define EXYNOS5_CLKDIV_CPU1 EXYNOS_CLKREG(0x00504)
264#define EXYNOS5_CLKDIV_STATCPU0 EXYNOS_CLKREG(0x00600)
265#define EXYNOS5_CLKDIV_STATCPU1 EXYNOS_CLKREG(0x00604)
266
261#define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100) 267#define EXYNOS5_MPLL_CON0 EXYNOS_CLKREG(0x04100)
262#define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204) 268#define EXYNOS5_CLKSRC_CORE1 EXYNOS_CLKREG(0x04204)
263 269
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 0d26f50081ad..4711c8920e37 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -45,7 +45,7 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
45 "exynos4210-uart.3", NULL), 45 "exynos4210-uart.3", NULL),
46 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL), 46 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
47 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL), 47 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
48 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.2", NULL), 48 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
49 {}, 49 {},
50}; 50};
51 51
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index b3982c867c9c..b4f1f902ce6d 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -307,49 +307,7 @@ static struct i2c_board_info i2c1_devs[] __initdata = {
307}; 307};
308 308
309/* TSP */ 309/* TSP */
310static u8 mxt_init_vals[] = {
311 /* MXT_GEN_COMMAND(6) */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
313 /* MXT_GEN_POWER(7) */
314 0x20, 0xff, 0x32,
315 /* MXT_GEN_ACQUIRE(8) */
316 0x0a, 0x00, 0x05, 0x00, 0x00, 0x00, 0x09, 0x23,
317 /* MXT_TOUCH_MULTI(9) */
318 0x00, 0x00, 0x00, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x02, 0x00,
319 0x00, 0x01, 0x01, 0x0e, 0x0a, 0x0a, 0x0a, 0x0a, 0x00, 0x00,
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
321 0x00,
322 /* MXT_TOUCH_KEYARRAY(15) */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00,
324 0x00,
325 /* MXT_SPT_GPIOPWM(19) */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
328 /* MXT_PROCI_GRIPFACE(20) */
329 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50, 0x28, 0x04,
330 0x0f, 0x0a,
331 /* MXT_PROCG_NOISE(22) */
332 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x23, 0x00,
333 0x00, 0x05, 0x0f, 0x19, 0x23, 0x2d, 0x03,
334 /* MXT_TOUCH_PROXIMITY(23) */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
336 0x00, 0x00, 0x00, 0x00, 0x00,
337 /* MXT_PROCI_ONETOUCH(24) */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
340 /* MXT_SPT_SELFTEST(25) */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
342 0x00, 0x00, 0x00, 0x00,
343 /* MXT_PROCI_TWOTOUCH(27) */
344 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
345 /* MXT_SPT_CTECONFIG(28) */
346 0x00, 0x00, 0x02, 0x08, 0x10, 0x00,
347};
348
349static struct mxt_platform_data mxt_platform_data = { 310static struct mxt_platform_data mxt_platform_data = {
350 .config = mxt_init_vals,
351 .config_length = ARRAY_SIZE(mxt_init_vals),
352
353 .x_line = 18, 311 .x_line = 18,
354 .y_line = 11, 312 .y_line = 11,
355 .x_size = 1024, 313 .x_size = 1024,
@@ -571,7 +529,7 @@ static struct regulator_init_data __initdata max8997_ldo7_data = {
571 529
572static struct regulator_init_data __initdata max8997_ldo8_data = { 530static struct regulator_init_data __initdata max8997_ldo8_data = {
573 .constraints = { 531 .constraints = {
574 .name = "VUSB/VDAC_3.3V_C210", 532 .name = "VUSB+VDAC_3.3V_C210",
575 .min_uV = 3300000, 533 .min_uV = 3300000,
576 .max_uV = 3300000, 534 .max_uV = 3300000,
577 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 535 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
@@ -1347,6 +1305,7 @@ static struct platform_device *nuri_devices[] __initdata = {
1347 1305
1348static void __init nuri_map_io(void) 1306static void __init nuri_map_io(void)
1349{ 1307{
1308 clk_xusbxti.rate = 24000000;
1350 exynos_init_io(NULL, 0); 1309 exynos_init_io(NULL, 0);
1351 s3c24xx_init_clocks(24000000); 1310 s3c24xx_init_clocks(24000000);
1352 s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs)); 1311 s3c24xx_init_uarts(nuri_uartcfgs, ARRAY_SIZE(nuri_uartcfgs));
@@ -1379,7 +1338,6 @@ static void __init nuri_machine_init(void)
1379 nuri_camera_init(); 1338 nuri_camera_init();
1380 1339
1381 nuri_ehci_init(); 1340 nuri_ehci_init();
1382 clk_xusbxti.rate = 24000000;
1383 1341
1384 /* Last */ 1342 /* Last */
1385 platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices)); 1343 platform_add_devices(nuri_devices, ARRAY_SIZE(nuri_devices));
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 6bb9dbdd73fd..7ebf79c2ab34 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -29,6 +29,7 @@
29#include <asm/mach-types.h> 29#include <asm/mach-types.h>
30 30
31#include <plat/regs-serial.h> 31#include <plat/regs-serial.h>
32#include <plat/clock.h>
32#include <plat/cpu.h> 33#include <plat/cpu.h>
33#include <plat/devs.h> 34#include <plat/devs.h>
34#include <plat/iic.h> 35#include <plat/iic.h>
@@ -1057,6 +1058,7 @@ static struct platform_device *universal_devices[] __initdata = {
1057 1058
1058static void __init universal_map_io(void) 1059static void __init universal_map_io(void)
1059{ 1060{
1061 clk_xusbxti.rate = 24000000;
1060 exynos_init_io(NULL, 0); 1062 exynos_init_io(NULL, 0);
1061 s3c24xx_init_clocks(24000000); 1063 s3c24xx_init_clocks(24000000);
1062 s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs)); 1064 s3c24xx_init_uarts(universal_uartcfgs, ARRAY_SIZE(universal_uartcfgs));
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 3698a370d636..26aac363a064 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -86,9 +86,6 @@ static void __init halibut_init(void)
86static void __init halibut_fixup(struct tag *tags, char **cmdline, 86static void __init halibut_fixup(struct tag *tags, char **cmdline,
87 struct meminfo *mi) 87 struct meminfo *mi)
88{ 88{
89 mi->nr_banks=1;
90 mi->bank[0].start = PHYS_OFFSET;
91 mi->bank[0].size = (101*1024*1024);
92} 89}
93 90
94static void __init halibut_map_io(void) 91static void __init halibut_map_io(void)
diff --git a/arch/arm/mach-msm/board-trout-panel.c b/arch/arm/mach-msm/board-trout-panel.c
index 25105c1027fe..89bf6b426699 100644
--- a/arch/arm/mach-msm/board-trout-panel.c
+++ b/arch/arm/mach-msm/board-trout-panel.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/mach-types.h> 14#include <asm/mach-types.h>
15#include <asm/system_info.h>
15 16
16#include <mach/msm_fb.h> 17#include <mach/msm_fb.h>
17#include <mach/vreg.h> 18#include <mach/vreg.h>
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index 5414f76ec0a9..d4060a37e23d 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/clkdev.h> 20#include <linux/clkdev.h>
21 21
22#include <asm/system_info.h>
22#include <asm/mach-types.h> 23#include <asm/mach-types.h>
23#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
24#include <asm/mach/map.h> 25#include <asm/mach/map.h>
diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c
index 67e701c7f183..9980dc736e7b 100644
--- a/arch/arm/mach-msm/proc_comm.c
+++ b/arch/arm/mach-msm/proc_comm.c
@@ -121,7 +121,7 @@ int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2)
121 * and unknown state. This function should be called early to 121 * and unknown state. This function should be called early to
122 * wait on the ARM9. 122 * wait on the ARM9.
123 */ 123 */
124void __init proc_comm_boot_wait(void) 124void __devinit proc_comm_boot_wait(void)
125{ 125{
126 void __iomem *base = MSM_SHARED_RAM_BASE; 126 void __iomem *base = MSM_SHARED_RAM_BASE;
127 127
diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
index 7072e0d651b1..3d9d746b221a 100644
--- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
+++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c
@@ -165,83 +165,3 @@ int omap2_select_table_rate(struct clk *clk, unsigned long rate)
165 165
166 return 0; 166 return 0;
167} 167}
168
169#ifdef CONFIG_CPU_FREQ
170/*
171 * Walk PRCM rate table and fillout cpufreq freq_table
172 * XXX This should be replaced by an OPP layer in the near future
173 */
174static struct cpufreq_frequency_table *freq_table;
175
176void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
177{
178 const struct prcm_config *prcm;
179 int i = 0;
180 int tbl_sz = 0;
181
182 if (!cpu_is_omap24xx())
183 return;
184
185 for (prcm = rate_table; prcm->mpu_speed; prcm++) {
186 if (!(prcm->flags & cpu_mask))
187 continue;
188 if (prcm->xtal_speed != sclk->rate)
189 continue;
190
191 /* don't put bypass rates in table */
192 if (prcm->dpll_speed == prcm->xtal_speed)
193 continue;
194
195 tbl_sz++;
196 }
197
198 /*
199 * XXX Ensure that we're doing what CPUFreq expects for this error
200 * case and the following one
201 */
202 if (tbl_sz == 0) {
203 pr_warning("%s: no matching entries in rate_table\n",
204 __func__);
205 return;
206 }
207
208 /* Include the CPUFREQ_TABLE_END terminator entry */
209 tbl_sz++;
210
211 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * tbl_sz,
212 GFP_ATOMIC);
213 if (!freq_table) {
214 pr_err("%s: could not kzalloc frequency table\n", __func__);
215 return;
216 }
217
218 for (prcm = rate_table; prcm->mpu_speed; prcm++) {
219 if (!(prcm->flags & cpu_mask))
220 continue;
221 if (prcm->xtal_speed != sclk->rate)
222 continue;
223
224 /* don't put bypass rates in table */
225 if (prcm->dpll_speed == prcm->xtal_speed)
226 continue;
227
228 freq_table[i].index = i;
229 freq_table[i].frequency = prcm->mpu_speed / 1000;
230 i++;
231 }
232
233 freq_table[i].index = i;
234 freq_table[i].frequency = CPUFREQ_TABLE_END;
235
236 *table = &freq_table[0];
237}
238
239void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
240{
241 if (!cpu_is_omap24xx())
242 return;
243
244 kfree(freq_table);
245}
246
247#endif
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
index f57ed5baeccf..d9f4931513f9 100644
--- a/arch/arm/mach-omap2/clock.c
+++ b/arch/arm/mach-omap2/clock.c
@@ -536,10 +536,5 @@ struct clk_functions omap2_clk_functions = {
536 .clk_set_rate = omap2_clk_set_rate, 536 .clk_set_rate = omap2_clk_set_rate,
537 .clk_set_parent = omap2_clk_set_parent, 537 .clk_set_parent = omap2_clk_set_parent,
538 .clk_disable_unused = omap2_clk_disable_unused, 538 .clk_disable_unused = omap2_clk_disable_unused,
539#ifdef CONFIG_CPU_FREQ
540 /* These will be removed when the OPP code is integrated */
541 .clk_init_cpufreq_table = omap2_clk_init_cpufreq_table,
542 .clk_exit_cpufreq_table = omap2_clk_exit_cpufreq_table,
543#endif
544}; 539};
545 540
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
index b8c2a686481c..a1bb23a23351 100644
--- a/arch/arm/mach-omap2/clock.h
+++ b/arch/arm/mach-omap2/clock.h
@@ -146,14 +146,6 @@ extern const struct clksel_rate gpt_sys_rates[];
146extern const struct clksel_rate gfx_l3_rates[]; 146extern const struct clksel_rate gfx_l3_rates[];
147extern const struct clksel_rate dsp_ick_rates[]; 147extern const struct clksel_rate dsp_ick_rates[];
148 148
149#if defined(CONFIG_ARCH_OMAP2) && defined(CONFIG_CPU_FREQ)
150extern void omap2_clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
151extern void omap2_clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
152#else
153#define omap2_clk_init_cpufreq_table 0
154#define omap2_clk_exit_cpufreq_table 0
155#endif
156
157extern const struct clkops clkops_omap2_iclk_dflt_wait; 149extern const struct clkops clkops_omap2_iclk_dflt_wait;
158extern const struct clkops clkops_omap2_iclk_dflt; 150extern const struct clkops clkops_omap2_iclk_dflt;
159extern const struct clkops clkops_omap2_iclk_idle_only; 151extern const struct clkops clkops_omap2_iclk_idle_only;
diff --git a/arch/arm/mach-s5pv210/dma.c b/arch/arm/mach-s5pv210/dma.c
index 86ce62f66190..b8337e248b09 100644
--- a/arch/arm/mach-s5pv210/dma.c
+++ b/arch/arm/mach-s5pv210/dma.c
@@ -33,8 +33,6 @@
33#include <mach/irqs.h> 33#include <mach/irqs.h>
34#include <mach/dma.h> 34#include <mach/dma.h>
35 35
36static u64 dma_dmamask = DMA_BIT_MASK(32);
37
38static u8 pdma0_peri[] = { 36static u8 pdma0_peri[] = {
39 DMACH_UART0_RX, 37 DMACH_UART0_RX,
40 DMACH_UART0_TX, 38 DMACH_UART0_TX,
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index a9ea64e0da0d..48d018f2332b 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -484,8 +484,8 @@ static struct wm8994_pdata wm8994_platform_data = {
484 .gpio_defaults[8] = 0x0100, 484 .gpio_defaults[8] = 0x0100,
485 .gpio_defaults[9] = 0x0100, 485 .gpio_defaults[9] = 0x0100,
486 .gpio_defaults[10] = 0x0100, 486 .gpio_defaults[10] = 0x0100,
487 .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */ 487 .ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */
488 .ldo[1] = { 0, NULL, &wm8994_ldo2_data }, 488 .ldo[1] = { 0, &wm8994_ldo2_data },
489}; 489};
490 490
491/* GPIO I2C PMIC */ 491/* GPIO I2C PMIC */
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index 2cf5ed75f390..a8933de3d627 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -674,8 +674,8 @@ static struct wm8994_pdata wm8994_platform_data = {
674 .gpio_defaults[8] = 0x0100, 674 .gpio_defaults[8] = 0x0100,
675 .gpio_defaults[9] = 0x0100, 675 .gpio_defaults[9] = 0x0100,
676 .gpio_defaults[10] = 0x0100, 676 .gpio_defaults[10] = 0x0100,
677 .ldo[0] = { S5PV210_MP03(6), NULL, &wm8994_ldo1_data }, /* XM0FRNB_2 */ 677 .ldo[0] = { S5PV210_MP03(6), &wm8994_ldo1_data }, /* XM0FRNB_2 */
678 .ldo[1] = { 0, NULL, &wm8994_ldo2_data }, 678 .ldo[1] = { 0, &wm8994_ldo2_data },
679}; 679};
680 680
681/* GPIO I2C PMIC */ 681/* GPIO I2C PMIC */
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 7edef9121632..7c8a7d8467bf 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -723,7 +723,7 @@ config CPU_HIGH_VECTOR
723 bool "Select the High exception vector" 723 bool "Select the High exception vector"
724 help 724 help
725 Say Y here to select high exception vector(0xFFFF0000~). 725 Say Y here to select high exception vector(0xFFFF0000~).
726 The exception vector can be vary depending on the platform 726 The exception vector can vary depending on the platform
727 design in nommu mode. If your platform needs to select 727 design in nommu mode. If your platform needs to select
728 high exception vector, say Y. 728 high exception vector, say Y.
729 Otherwise or if you are unsure, say N, and the low exception 729 Otherwise or if you are unsure, say N, and the low exception
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 9055b5a84ec5..f07467533365 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -320,7 +320,7 @@ retry:
320 */ 320 */
321 321
322 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); 322 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
323 if (flags & FAULT_FLAG_ALLOW_RETRY) { 323 if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
324 if (fault & VM_FAULT_MAJOR) { 324 if (fault & VM_FAULT_MAJOR) {
325 tsk->maj_flt++; 325 tsk->maj_flt++;
326 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 326 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 6486d2f253cd..d51225f90ae2 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -13,6 +13,7 @@
13#include <asm/sections.h> 13#include <asm/sections.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/setup.h> 15#include <asm/setup.h>
16#include <asm/traps.h>
16#include <asm/mach/arch.h> 17#include <asm/mach/arch.h>
17 18
18#include "mm.h" 19#include "mm.h"
@@ -39,6 +40,7 @@ void __init sanity_check_meminfo(void)
39 */ 40 */
40void __init paging_init(struct machine_desc *mdesc) 41void __init paging_init(struct machine_desc *mdesc)
41{ 42{
43 early_trap_init((void *)CONFIG_VECTORS_BASE);
42 bootmem_init(); 44 bootmem_init();
43} 45}
44 46
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index f1c8486f7501..c2e2b66f72b5 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -255,6 +255,18 @@ __v7_setup:
255 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 255 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
256 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 256 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
257#endif 257#endif
258#ifndef CONFIG_ARM_THUMBEE
259 mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
260 and r0, r0, #(0xf << 12) @ ThumbEE enabled field
261 teq r0, #(1 << 12) @ check if ThumbEE is present
262 bne 1f
263 mov r5, #0
264 mcr p14, 6, r5, c1, c0, 0 @ Initialize TEEHBR to 0
265 mrc p14, 6, r0, c0, c0, 0 @ load TEECR
266 orr r0, r0, #1 @ set the 1st bit in order to
267 mcr p14, 6, r0, c0, c0, 0 @ stop userspace TEEHBR access
2681:
269#endif
258 adr r5, v7_crval 270 adr r5, v7_crval
259 ldmia r5, {r5, r6} 271 ldmia r5, {r5, r6}
260#ifdef CONFIG_CPU_ENDIAN_BE8 272#ifdef CONFIG_CPU_ENDIAN_BE8
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 8506cbb7fea4..62ec5c452792 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -398,32 +398,6 @@ struct clk dummy_ck = {
398 .ops = &clkops_null, 398 .ops = &clkops_null,
399}; 399};
400 400
401#ifdef CONFIG_CPU_FREQ
402void clk_init_cpufreq_table(struct cpufreq_frequency_table **table)
403{
404 unsigned long flags;
405
406 if (!arch_clock || !arch_clock->clk_init_cpufreq_table)
407 return;
408
409 spin_lock_irqsave(&clockfw_lock, flags);
410 arch_clock->clk_init_cpufreq_table(table);
411 spin_unlock_irqrestore(&clockfw_lock, flags);
412}
413
414void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table)
415{
416 unsigned long flags;
417
418 if (!arch_clock || !arch_clock->clk_exit_cpufreq_table)
419 return;
420
421 spin_lock_irqsave(&clockfw_lock, flags);
422 arch_clock->clk_exit_cpufreq_table(table);
423 spin_unlock_irqrestore(&clockfw_lock, flags);
424}
425#endif
426
427/* 401/*
428 * 402 *
429 */ 403 */
diff --git a/arch/arm/plat-omap/include/plat/clock.h b/arch/arm/plat-omap/include/plat/clock.h
index 240a7b9fd946..d0ef57c1d71b 100644
--- a/arch/arm/plat-omap/include/plat/clock.h
+++ b/arch/arm/plat-omap/include/plat/clock.h
@@ -272,8 +272,6 @@ struct clk {
272#endif 272#endif
273}; 273};
274 274
275struct cpufreq_frequency_table;
276
277struct clk_functions { 275struct clk_functions {
278 int (*clk_enable)(struct clk *clk); 276 int (*clk_enable)(struct clk *clk);
279 void (*clk_disable)(struct clk *clk); 277 void (*clk_disable)(struct clk *clk);
@@ -283,10 +281,6 @@ struct clk_functions {
283 void (*clk_allow_idle)(struct clk *clk); 281 void (*clk_allow_idle)(struct clk *clk);
284 void (*clk_deny_idle)(struct clk *clk); 282 void (*clk_deny_idle)(struct clk *clk);
285 void (*clk_disable_unused)(struct clk *clk); 283 void (*clk_disable_unused)(struct clk *clk);
286#ifdef CONFIG_CPU_FREQ
287 void (*clk_init_cpufreq_table)(struct cpufreq_frequency_table **);
288 void (*clk_exit_cpufreq_table)(struct cpufreq_frequency_table **);
289#endif
290}; 284};
291 285
292extern int mpurate; 286extern int mpurate;
@@ -301,10 +295,6 @@ extern void recalculate_root_clocks(void);
301extern unsigned long followparent_recalc(struct clk *clk); 295extern unsigned long followparent_recalc(struct clk *clk);
302extern void clk_enable_init_clocks(void); 296extern void clk_enable_init_clocks(void);
303unsigned long omap_fixed_divisor_recalc(struct clk *clk); 297unsigned long omap_fixed_divisor_recalc(struct clk *clk);
304#ifdef CONFIG_CPU_FREQ
305extern void clk_init_cpufreq_table(struct cpufreq_frequency_table **table);
306extern void clk_exit_cpufreq_table(struct cpufreq_frequency_table **table);
307#endif
308extern struct clk *omap_clk_get_by_name(const char *name); 298extern struct clk *omap_clk_get_by_name(const char *name);
309extern int omap_clk_enable_autoidle_all(void); 299extern int omap_clk_enable_autoidle_all(void);
310extern int omap_clk_disable_autoidle_all(void); 300extern int omap_clk_disable_autoidle_all(void);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index 71553f410016..a0ffc77da809 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -302,6 +302,7 @@ comment "Power management"
302config SAMSUNG_PM_DEBUG 302config SAMSUNG_PM_DEBUG
303 bool "S3C2410 PM Suspend debug" 303 bool "S3C2410 PM Suspend debug"
304 depends on PM 304 depends on PM
305 select DEBUG_LL
305 help 306 help
306 Say Y here if you want verbose debugging from the PM Suspend and 307 Say Y here if you want verbose debugging from the PM Suspend and
307 Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt> 308 Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
index f13b78d5e1ca..ab4577f93d96 100644
--- a/arch/c6x/include/asm/irq.h
+++ b/arch/c6x/include/asm/irq.h
@@ -42,10 +42,6 @@
42/* This number is used when no interrupt has been assigned */ 42/* This number is used when no interrupt has been assigned */
43#define NO_IRQ 0 43#define NO_IRQ 0
44 44
45struct irq_data;
46extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
47extern irq_hw_number_t virq_to_hw(unsigned int virq);
48
49extern void __init init_pic_c64xplus(void); 45extern void __init init_pic_c64xplus(void);
50 46
51extern void init_IRQ(void); 47extern void init_IRQ(void);
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
index 65b8ddf54b44..c90fb5e82ad7 100644
--- a/arch/c6x/kernel/irq.c
+++ b/arch/c6x/kernel/irq.c
@@ -130,16 +130,3 @@ int arch_show_interrupts(struct seq_file *p, int prec)
130 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); 130 seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
131 return 0; 131 return 0;
132} 132}
133
134irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
135{
136 return d->hwirq;
137}
138EXPORT_SYMBOL_GPL(irqd_to_hwirq);
139
140irq_hw_number_t virq_to_hw(unsigned int virq)
141{
142 struct irq_data *irq_data = irq_get_irq_data(virq);
143 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
144}
145EXPORT_SYMBOL_GPL(virq_to_hw);
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
index 4c96187e2049..4f37dbbb8640 100644
--- a/arch/ia64/include/asm/cmpxchg.h
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -1 +1,147 @@
1#include <asm/intrinsics.h> 1#ifndef _ASM_IA64_CMPXCHG_H
2#define _ASM_IA64_CMPXCHG_H
3
4/*
5 * Compare/Exchange, forked from asm/intrinsics.h
6 * which was:
7 *
8 * Copyright (C) 2002-2003 Hewlett-Packard Co
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 */
11
12#ifndef __ASSEMBLY__
13
14#include <linux/types.h>
15/* include compiler specific intrinsics */
16#include <asm/ia64regs.h>
17#ifdef __INTEL_COMPILER
18# include <asm/intel_intrin.h>
19#else
20# include <asm/gcc_intrin.h>
21#endif
22
23/*
24 * This function doesn't exist, so you'll get a linker error if
25 * something tries to do an invalid xchg().
26 */
27extern void ia64_xchg_called_with_bad_pointer(void);
28
29#define __xchg(x, ptr, size) \
30({ \
31 unsigned long __xchg_result; \
32 \
33 switch (size) { \
34 case 1: \
35 __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
36 break; \
37 \
38 case 2: \
39 __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
40 break; \
41 \
42 case 4: \
43 __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
44 break; \
45 \
46 case 8: \
47 __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
48 break; \
49 default: \
50 ia64_xchg_called_with_bad_pointer(); \
51 } \
52 __xchg_result; \
53})
54
55#define xchg(ptr, x) \
56((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
57
58/*
59 * Atomic compare and exchange. Compare OLD with MEM, if identical,
60 * store NEW in MEM. Return the initial value in MEM. Success is
61 * indicated by comparing RETURN with OLD.
62 */
63
64#define __HAVE_ARCH_CMPXCHG 1
65
66/*
67 * This function doesn't exist, so you'll get a linker error
68 * if something tries to do an invalid cmpxchg().
69 */
70extern long ia64_cmpxchg_called_with_bad_pointer(void);
71
72#define ia64_cmpxchg(sem, ptr, old, new, size) \
73({ \
74 __u64 _o_, _r_; \
75 \
76 switch (size) { \
77 case 1: \
78 _o_ = (__u8) (long) (old); \
79 break; \
80 case 2: \
81 _o_ = (__u16) (long) (old); \
82 break; \
83 case 4: \
84 _o_ = (__u32) (long) (old); \
85 break; \
86 case 8: \
87 _o_ = (__u64) (long) (old); \
88 break; \
89 default: \
90 break; \
91 } \
92 switch (size) { \
93 case 1: \
94 _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
95 break; \
96 \
97 case 2: \
98 _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
99 break; \
100 \
101 case 4: \
102 _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
103 break; \
104 \
105 case 8: \
106 _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
107 break; \
108 \
109 default: \
110 _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
111 break; \
112 } \
113 (__typeof__(old)) _r_; \
114})
115
116#define cmpxchg_acq(ptr, o, n) \
117 ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
118#define cmpxchg_rel(ptr, o, n) \
119 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
120
121/* for compatibility with other platforms: */
122#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
123#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
124
125#define cmpxchg_local cmpxchg
126#define cmpxchg64_local cmpxchg64
127
128#ifdef CONFIG_IA64_DEBUG_CMPXCHG
129# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
130# define CMPXCHG_BUGCHECK(v) \
131do { \
132 if (_cmpxchg_bugcheck_count-- <= 0) { \
133 void *ip; \
134 extern int printk(const char *fmt, ...); \
135 ip = (void *) ia64_getreg(_IA64_REG_IP); \
136 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
137 break; \
138 } \
139} while (0)
140#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
141# define CMPXCHG_BUGCHECK_DECL
142# define CMPXCHG_BUGCHECK(v)
143#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
144
145#endif /* !__ASSEMBLY__ */
146
147#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index e4076b511829..d129e367e764 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -18,6 +18,7 @@
18#else 18#else
19# include <asm/gcc_intrin.h> 19# include <asm/gcc_intrin.h>
20#endif 20#endif
21#include <asm/cmpxchg.h>
21 22
22#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I) 23#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
23 24
@@ -81,119 +82,6 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
81 82
82#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */ 83#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
83 84
84/*
85 * This function doesn't exist, so you'll get a linker error if
86 * something tries to do an invalid xchg().
87 */
88extern void ia64_xchg_called_with_bad_pointer (void);
89
90#define __xchg(x,ptr,size) \
91({ \
92 unsigned long __xchg_result; \
93 \
94 switch (size) { \
95 case 1: \
96 __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
97 break; \
98 \
99 case 2: \
100 __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
101 break; \
102 \
103 case 4: \
104 __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
105 break; \
106 \
107 case 8: \
108 __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
109 break; \
110 default: \
111 ia64_xchg_called_with_bad_pointer(); \
112 } \
113 __xchg_result; \
114})
115
116#define xchg(ptr,x) \
117 ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
118
119/*
120 * Atomic compare and exchange. Compare OLD with MEM, if identical,
121 * store NEW in MEM. Return the initial value in MEM. Success is
122 * indicated by comparing RETURN with OLD.
123 */
124
125#define __HAVE_ARCH_CMPXCHG 1
126
127/*
128 * This function doesn't exist, so you'll get a linker error
129 * if something tries to do an invalid cmpxchg().
130 */
131extern long ia64_cmpxchg_called_with_bad_pointer (void);
132
133#define ia64_cmpxchg(sem,ptr,old,new,size) \
134({ \
135 __u64 _o_, _r_; \
136 \
137 switch (size) { \
138 case 1: _o_ = (__u8 ) (long) (old); break; \
139 case 2: _o_ = (__u16) (long) (old); break; \
140 case 4: _o_ = (__u32) (long) (old); break; \
141 case 8: _o_ = (__u64) (long) (old); break; \
142 default: break; \
143 } \
144 switch (size) { \
145 case 1: \
146 _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
147 break; \
148 \
149 case 2: \
150 _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
151 break; \
152 \
153 case 4: \
154 _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
155 break; \
156 \
157 case 8: \
158 _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
159 break; \
160 \
161 default: \
162 _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
163 break; \
164 } \
165 (__typeof__(old)) _r_; \
166})
167
168#define cmpxchg_acq(ptr, o, n) \
169 ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
170#define cmpxchg_rel(ptr, o, n) \
171 ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
172
173/* for compatibility with other platforms: */
174#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
175#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
176
177#define cmpxchg_local cmpxchg
178#define cmpxchg64_local cmpxchg64
179
180#ifdef CONFIG_IA64_DEBUG_CMPXCHG
181# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
182# define CMPXCHG_BUGCHECK(v) \
183 do { \
184 if (_cmpxchg_bugcheck_count-- <= 0) { \
185 void *ip; \
186 extern int printk(const char *fmt, ...); \
187 ip = (void *) ia64_getreg(_IA64_REG_IP); \
188 printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
189 break; \
190 } \
191 } while (0)
192#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
193# define CMPXCHG_BUGCHECK_DECL
194# define CMPXCHG_BUGCHECK(v)
195#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
196
197#endif 85#endif
198 86
199#ifdef __KERNEL__ 87#ifdef __KERNEL__
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index cf417e510736..e648af92ced1 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -33,8 +33,6 @@ extern atomic_t ppc_n_lost_interrupts;
33/* Same thing, used by the generic IRQ code */ 33/* Same thing, used by the generic IRQ code */
34#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS 34#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
35 35
36struct irq_data;
37extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
38extern irq_hw_number_t virq_to_hw(unsigned int virq); 36extern irq_hw_number_t virq_to_hw(unsigned int virq);
39 37
40/** 38/**
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 3e57a00b8cba..ba3aeb4bc06a 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -206,40 +206,43 @@ reenable_mmu: /* re-enable mmu so we can */
206 andi. r10,r10,MSR_EE /* Did EE change? */ 206 andi. r10,r10,MSR_EE /* Did EE change? */
207 beq 1f 207 beq 1f
208 208
209 /* Save handler and return address into the 2 unused words
210 * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
211 * else can be recovered from the pt_regs except r3 which for
212 * normal interrupts has been set to pt_regs and for syscalls
213 * is an argument, so we temporarily use ORIG_GPR3 to save it
214 */
215 stw r9,8(r1)
216 stw r11,12(r1)
217 stw r3,ORIG_GPR3(r1)
218 /* 209 /*
219 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1. 210 * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
220 * If from user mode there is only one stack frame on the stack, and 211 * If from user mode there is only one stack frame on the stack, and
221 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy 212 * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
222 * stack frame to make trace_hardirqs_off happy. 213 * stack frame to make trace_hardirqs_off happy.
214 *
215 * This is handy because we also need to save a bunch of GPRs,
216 * r3 can be different from GPR3(r1) at this point, r9 and r11
217 * contains the old MSR and handler address respectively,
218 * r4 & r5 can contain page fault arguments that need to be passed
219 * along as well. r12, CCR, CTR, XER etc... are left clobbered as
220 * they aren't useful past this point (aren't syscall arguments),
221 * the rest is restored from the exception frame.
223 */ 222 */
223 stwu r1,-32(r1)
224 stw r9,8(r1)
225 stw r11,12(r1)
226 stw r3,16(r1)
227 stw r4,20(r1)
228 stw r5,24(r1)
224 andi. r12,r12,MSR_PR 229 andi. r12,r12,MSR_PR
225 beq 11f 230 b 11f
226 stwu r1,-16(r1)
227 bl trace_hardirqs_off 231 bl trace_hardirqs_off
228 addi r1,r1,16
229 b 12f 232 b 12f
230
23111: 23311:
232 bl trace_hardirqs_off 234 bl trace_hardirqs_off
23312: 23512:
236 lwz r5,24(r1)
237 lwz r4,20(r1)
238 lwz r3,16(r1)
239 lwz r11,12(r1)
240 lwz r9,8(r1)
241 addi r1,r1,32
234 lwz r0,GPR0(r1) 242 lwz r0,GPR0(r1)
235 lwz r3,ORIG_GPR3(r1)
236 lwz r4,GPR4(r1)
237 lwz r5,GPR5(r1)
238 lwz r6,GPR6(r1) 243 lwz r6,GPR6(r1)
239 lwz r7,GPR7(r1) 244 lwz r7,GPR7(r1)
240 lwz r8,GPR8(r1) 245 lwz r8,GPR8(r1)
241 lwz r9,8(r1)
242 lwz r11,12(r1)
2431: mtctr r11 2461: mtctr r11
244 mtlr r9 247 mtlr r9
245 bctr /* jump to handler */ 248 bctr /* jump to handler */
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 243dbabfe74d..5ec1b2354ca6 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -560,12 +560,6 @@ void do_softirq(void)
560 local_irq_restore(flags); 560 local_irq_restore(flags);
561} 561}
562 562
563irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
564{
565 return d->hwirq;
566}
567EXPORT_SYMBOL_GPL(irqd_to_hwirq);
568
569irq_hw_number_t virq_to_hw(unsigned int virq) 563irq_hw_number_t virq_to_hw(unsigned int virq)
570{ 564{
571 struct irq_data *irq_data = irq_get_irq_data(virq); 565 struct irq_data *irq_data = irq_get_irq_data(virq);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index f88698c0f332..4937c9690090 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1235,7 +1235,7 @@ void __ppc64_runlatch_on(void)
1235 ctrl |= CTRL_RUNLATCH; 1235 ctrl |= CTRL_RUNLATCH;
1236 mtspr(SPRN_CTRLT, ctrl); 1236 mtspr(SPRN_CTRLT, ctrl);
1237 1237
1238 ti->local_flags |= TLF_RUNLATCH; 1238 ti->local_flags |= _TLF_RUNLATCH;
1239} 1239}
1240 1240
1241/* Called with hard IRQs off */ 1241/* Called with hard IRQs off */
@@ -1244,7 +1244,7 @@ void __ppc64_runlatch_off(void)
1244 struct thread_info *ti = current_thread_info(); 1244 struct thread_info *ti = current_thread_info();
1245 unsigned long ctrl; 1245 unsigned long ctrl;
1246 1246
1247 ti->local_flags &= ~TLF_RUNLATCH; 1247 ti->local_flags &= ~_TLF_RUNLATCH;
1248 1248
1249 ctrl = mfspr(SPRN_CTRLF); 1249 ctrl = mfspr(SPRN_CTRLF);
1250 ctrl &= ~CTRL_RUNLATCH; 1250 ctrl &= ~CTRL_RUNLATCH;
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index db360fc4cf0e..d09f3e8e6867 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -392,7 +392,7 @@ static int axon_msi_probe(struct platform_device *device)
392 } 392 }
393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
394 394
395 msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic); 395 msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
396 if (!msic->irq_domain) { 396 if (!msic->irq_domain) {
397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n", 397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
398 dn->full_name); 398 dn->full_name);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index e5c3a2c6090d..f9a48af335cb 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -239,7 +239,7 @@ void __init beatic_init_IRQ(void)
239 ppc_md.get_irq = beatic_get_irq; 239 ppc_md.get_irq = beatic_get_irq;
240 240
241 /* Allocate an irq host */ 241 /* Allocate an irq host */
242 beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL); 242 beatic_host = irq_domain_add_nomap(NULL, 0, &beatic_pic_host_ops, NULL);
243 BUG_ON(beatic_host == NULL); 243 BUG_ON(beatic_host == NULL);
244 irq_set_default_host(beatic_host); 244 irq_set_default_host(beatic_host);
245} 245}
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index a81e5a88fbdf..b4ddaa3fbb29 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -192,7 +192,7 @@ static int psurge_secondary_ipi_init(void)
192{ 192{
193 int rc = -ENOMEM; 193 int rc = -ENOMEM;
194 194
195 psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL); 195 psurge_host = irq_domain_add_nomap(NULL, 0, &psurge_host_ops, NULL);
196 196
197 if (psurge_host) 197 if (psurge_host)
198 psurge_secondary_virq = irq_create_direct_mapping(psurge_host); 198 psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 2a4ff86cc21f..5f3b23220b8e 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -753,9 +753,8 @@ void __init ps3_init_IRQ(void)
753 unsigned cpu; 753 unsigned cpu;
754 struct irq_domain *host; 754 struct irq_domain *host;
755 755
756 host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL); 756 host = irq_domain_add_nomap(NULL, PS3_PLUG_MAX + 1, &ps3_host_ops, NULL);
757 irq_set_default_host(host); 757 irq_set_default_host(host);
758 irq_set_virq_count(PS3_PLUG_MAX + 1);
759 758
760 for_each_possible_cpu(cpu) { 759 for_each_possible_cpu(cpu) {
761 struct ps3_private *pd = &per_cpu(ps3_private, cpu); 760 struct ps3_private *pd = &per_cpu(ps3_private, cpu);
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index fea13c7b1aee..b93c2c9ccb1d 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1264,4 +1264,4 @@ static int __init ds_init(void)
1264 return vio_register_driver(&ds_driver); 1264 return vio_register_driver(&ds_driver);
1265} 1265}
1266 1266
1267subsys_initcall(ds_init); 1267fs_initcall(ds_init);
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
index aba6b958b2a5..19f56058742b 100644
--- a/arch/sparc/kernel/leon_pci.c
+++ b/arch/sparc/kernel/leon_pci.c
@@ -45,7 +45,6 @@ void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
45 45
46void __devinit pcibios_fixup_bus(struct pci_bus *pbus) 46void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
47{ 47{
48 struct leon_pci_info *info = pbus->sysdata;
49 struct pci_dev *dev; 48 struct pci_dev *dev;
50 int i, has_io, has_mem; 49 int i, has_io, has_mem;
51 u16 cmd; 50 u16 cmd;
@@ -111,18 +110,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
111 return pci_enable_resources(dev, mask); 110 return pci_enable_resources(dev, mask);
112} 111}
113 112
114struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
115{
116 /*
117 * Currently the OpenBoot nodes are not connected with the PCI device,
118 * this is because the LEON PROM does not create PCI nodes. Eventually
119 * this will change and the same approach as pcic.c can be used to
120 * match PROM nodes with pci devices.
121 */
122 return NULL;
123}
124EXPORT_SYMBOL(pci_device_to_OF_node);
125
126void __devinit pcibios_update_irq(struct pci_dev *dev, int irq) 113void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
127{ 114{
128#ifdef CONFIG_PCI_DEBUG 115#ifdef CONFIG_PCI_DEBUG
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 77f1b95e0806..9171fc238def 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -20,11 +20,6 @@
20 20
21 .text 21 .text
22 .align 32 22 .align 32
23__handle_softirq:
24 call do_softirq
25 nop
26 ba,a,pt %xcc, __handle_softirq_continue
27 nop
28__handle_preemption: 23__handle_preemption:
29 call schedule 24 call schedule
30 wrpr %g0, RTRAP_PSTATE, %pstate 25 wrpr %g0, RTRAP_PSTATE, %pstate
@@ -89,9 +84,7 @@ rtrap:
89 cmp %l1, 0 84 cmp %l1, 0
90 85
91 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ 86 /* mm/ultra.S:xcall_report_regs KNOWS about this load. */
92 bne,pn %icc, __handle_softirq
93 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 87 ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
94__handle_softirq_continue:
95rtrap_xcall: 88rtrap_xcall:
96 sethi %hi(0xf << 20), %l4 89 sethi %hi(0xf << 20), %l4
97 and %l1, %l4, %l4 90 and %l1, %l4, %l4
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 7705c6731e28..df3155a17991 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -225,6 +225,8 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
225 unsigned long g2; 225 unsigned long g2;
226 int from_user = !(regs->psr & PSR_PS); 226 int from_user = !(regs->psr & PSR_PS);
227 int fault, code; 227 int fault, code;
228 unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
229 (write ? FAULT_FLAG_WRITE : 0));
228 230
229 if(text_fault) 231 if(text_fault)
230 address = regs->pc; 232 address = regs->pc;
@@ -251,6 +253,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
251 253
252 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); 254 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
253 255
256retry:
254 down_read(&mm->mmap_sem); 257 down_read(&mm->mmap_sem);
255 258
256 /* 259 /*
@@ -289,7 +292,11 @@ good_area:
289 * make sure we exit gracefully rather than endlessly redo 292 * make sure we exit gracefully rather than endlessly redo
290 * the fault. 293 * the fault.
291 */ 294 */
292 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 295 fault = handle_mm_fault(mm, vma, address, flags);
296
297 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
298 return;
299
293 if (unlikely(fault & VM_FAULT_ERROR)) { 300 if (unlikely(fault & VM_FAULT_ERROR)) {
294 if (fault & VM_FAULT_OOM) 301 if (fault & VM_FAULT_OOM)
295 goto out_of_memory; 302 goto out_of_memory;
@@ -297,13 +304,29 @@ good_area:
297 goto do_sigbus; 304 goto do_sigbus;
298 BUG(); 305 BUG();
299 } 306 }
300 if (fault & VM_FAULT_MAJOR) { 307
301 current->maj_flt++; 308 if (flags & FAULT_FLAG_ALLOW_RETRY) {
302 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 309 if (fault & VM_FAULT_MAJOR) {
303 } else { 310 current->maj_flt++;
304 current->min_flt++; 311 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
305 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 312 1, regs, address);
313 } else {
314 current->min_flt++;
315 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
316 1, regs, address);
317 }
318 if (fault & VM_FAULT_RETRY) {
319 flags &= ~FAULT_FLAG_ALLOW_RETRY;
320
321 /* No need to up_read(&mm->mmap_sem) as we would
322 * have already released it in __lock_page_or_retry
323 * in mm/filemap.c.
324 */
325
326 goto retry;
327 }
306 } 328 }
329
307 up_read(&mm->mmap_sem); 330 up_read(&mm->mmap_sem);
308 return; 331 return;
309 332
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 504c0622f729..1fe0429b6314 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -279,6 +279,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
279 unsigned int insn = 0; 279 unsigned int insn = 0;
280 int si_code, fault_code, fault; 280 int si_code, fault_code, fault;
281 unsigned long address, mm_rss; 281 unsigned long address, mm_rss;
282 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
282 283
283 fault_code = get_thread_fault_code(); 284 fault_code = get_thread_fault_code();
284 285
@@ -333,6 +334,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
333 insn = get_fault_insn(regs, insn); 334 insn = get_fault_insn(regs, insn);
334 goto handle_kernel_fault; 335 goto handle_kernel_fault;
335 } 336 }
337
338retry:
336 down_read(&mm->mmap_sem); 339 down_read(&mm->mmap_sem);
337 } 340 }
338 341
@@ -423,7 +426,12 @@ good_area:
423 goto bad_area; 426 goto bad_area;
424 } 427 }
425 428
426 fault = handle_mm_fault(mm, vma, address, (fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0); 429 flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
430 fault = handle_mm_fault(mm, vma, address, flags);
431
432 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
433 return;
434
427 if (unlikely(fault & VM_FAULT_ERROR)) { 435 if (unlikely(fault & VM_FAULT_ERROR)) {
428 if (fault & VM_FAULT_OOM) 436 if (fault & VM_FAULT_OOM)
429 goto out_of_memory; 437 goto out_of_memory;
@@ -431,12 +439,27 @@ good_area:
431 goto do_sigbus; 439 goto do_sigbus;
432 BUG(); 440 BUG();
433 } 441 }
434 if (fault & VM_FAULT_MAJOR) { 442
435 current->maj_flt++; 443 if (flags & FAULT_FLAG_ALLOW_RETRY) {
436 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); 444 if (fault & VM_FAULT_MAJOR) {
437 } else { 445 current->maj_flt++;
438 current->min_flt++; 446 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ,
439 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); 447 1, regs, address);
448 } else {
449 current->min_flt++;
450 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN,
451 1, regs, address);
452 }
453 if (fault & VM_FAULT_RETRY) {
454 flags &= ~FAULT_FLAG_ALLOW_RETRY;
455
456 /* No need to up_read(&mm->mmap_sem) as we would
457 * have already released it in __lock_page_or_retry
458 * in mm/filemap.c.
459 */
460
461 goto retry;
462 }
440 } 463 }
441 up_read(&mm->mmap_sem); 464 up_read(&mm->mmap_sem);
442 465
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 7a9327046404..446a7f52cc11 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -146,7 +146,6 @@ static ctl_table unaligned_table[] = {
146 }, 146 },
147 {} 147 {}
148}; 148};
149#endif
150 149
151static struct ctl_path tile_path[] = { 150static struct ctl_path tile_path[] = {
152 { .procname = "tile" }, 151 { .procname = "tile" },
@@ -155,10 +154,9 @@ static struct ctl_path tile_path[] = {
155 154
156static int __init proc_sys_tile_init(void) 155static int __init proc_sys_tile_init(void)
157{ 156{
158#ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */
159 register_sysctl_paths(tile_path, unaligned_table); 157 register_sysctl_paths(tile_path, unaligned_table);
160#endif
161 return 0; 158 return 0;
162} 159}
163 160
164arch_initcall(proc_sys_tile_init); 161arch_initcall(proc_sys_tile_init);
162#endif
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index b949edcec200..172aef7d3159 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -196,6 +196,8 @@ void __cpuinit online_secondary(void)
196 /* This must be done before setting cpu_online_mask */ 196 /* This must be done before setting cpu_online_mask */
197 wmb(); 197 wmb();
198 198
199 notify_cpu_starting(smp_processor_id());
200
199 /* 201 /*
200 * We need to hold call_lock, so there is no inconsistency 202 * We need to hold call_lock, so there is no inconsistency
201 * between the time smp_call_function() determines number of 203 * between the time smp_call_function() determines number of
diff --git a/arch/um/drivers/cow.h b/arch/um/drivers/cow.h
index dc36b222100b..6673508f3426 100644
--- a/arch/um/drivers/cow.h
+++ b/arch/um/drivers/cow.h
@@ -3,41 +3,6 @@
3 3
4#include <asm/types.h> 4#include <asm/types.h>
5 5
6#if defined(__KERNEL__)
7
8# include <asm/byteorder.h>
9
10# if defined(__BIG_ENDIAN)
11# define ntohll(x) (x)
12# define htonll(x) (x)
13# elif defined(__LITTLE_ENDIAN)
14# define ntohll(x) be64_to_cpu(x)
15# define htonll(x) cpu_to_be64(x)
16# else
17# error "Could not determine byte order"
18# endif
19
20#else
21/* For the definition of ntohl, htonl and __BYTE_ORDER */
22#include <endian.h>
23#include <netinet/in.h>
24#if defined(__BYTE_ORDER)
25
26# if __BYTE_ORDER == __BIG_ENDIAN
27# define ntohll(x) (x)
28# define htonll(x) (x)
29# elif __BYTE_ORDER == __LITTLE_ENDIAN
30# define ntohll(x) bswap_64(x)
31# define htonll(x) bswap_64(x)
32# else
33# error "Could not determine byte order: __BYTE_ORDER uncorrectly defined"
34# endif
35
36#else /* ! defined(__BYTE_ORDER) */
37# error "Could not determine byte order: __BYTE_ORDER not defined"
38#endif
39#endif /* ! defined(__KERNEL__) */
40
41extern int init_cow_file(int fd, char *cow_file, char *backing_file, 6extern int init_cow_file(int fd, char *cow_file, char *backing_file,
42 int sectorsize, int alignment, int *bitmap_offset_out, 7 int sectorsize, int alignment, int *bitmap_offset_out,
43 unsigned long *bitmap_len_out, int *data_offset_out); 8 unsigned long *bitmap_len_out, int *data_offset_out);
diff --git a/arch/um/drivers/cow_user.c b/arch/um/drivers/cow_user.c
index 9cbb426c0b91..0ee9cc6cc4c7 100644
--- a/arch/um/drivers/cow_user.c
+++ b/arch/um/drivers/cow_user.c
@@ -8,11 +8,10 @@
8 * that. 8 * that.
9 */ 9 */
10#include <unistd.h> 10#include <unistd.h>
11#include <byteswap.h>
12#include <errno.h> 11#include <errno.h>
13#include <string.h> 12#include <string.h>
14#include <arpa/inet.h> 13#include <arpa/inet.h>
15#include <asm/types.h> 14#include <endian.h>
16#include "cow.h" 15#include "cow.h"
17#include "cow_sys.h" 16#include "cow_sys.h"
18 17
@@ -214,8 +213,8 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
214 "header\n"); 213 "header\n");
215 goto out; 214 goto out;
216 } 215 }
217 header->magic = htonl(COW_MAGIC); 216 header->magic = htobe32(COW_MAGIC);
218 header->version = htonl(COW_VERSION); 217 header->version = htobe32(COW_VERSION);
219 218
220 err = -EINVAL; 219 err = -EINVAL;
221 if (strlen(backing_file) > sizeof(header->backing_file) - 1) { 220 if (strlen(backing_file) > sizeof(header->backing_file) - 1) {
@@ -246,10 +245,10 @@ int write_cow_header(char *cow_file, int fd, char *backing_file,
246 goto out_free; 245 goto out_free;
247 } 246 }
248 247
249 header->mtime = htonl(modtime); 248 header->mtime = htobe32(modtime);
250 header->size = htonll(*size); 249 header->size = htobe64(*size);
251 header->sectorsize = htonl(sectorsize); 250 header->sectorsize = htobe32(sectorsize);
252 header->alignment = htonl(alignment); 251 header->alignment = htobe32(alignment);
253 header->cow_format = COW_BITMAP; 252 header->cow_format = COW_BITMAP;
254 253
255 err = cow_write_file(fd, header, sizeof(*header)); 254 err = cow_write_file(fd, header, sizeof(*header));
@@ -301,8 +300,8 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
301 magic = header->v1.magic; 300 magic = header->v1.magic;
302 if (magic == COW_MAGIC) 301 if (magic == COW_MAGIC)
303 version = header->v1.version; 302 version = header->v1.version;
304 else if (magic == ntohl(COW_MAGIC)) 303 else if (magic == be32toh(COW_MAGIC))
305 version = ntohl(header->v1.version); 304 version = be32toh(header->v1.version);
306 /* No error printed because the non-COW case comes through here */ 305 /* No error printed because the non-COW case comes through here */
307 else goto out; 306 else goto out;
308 307
@@ -327,9 +326,9 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
327 "header\n"); 326 "header\n");
328 goto out; 327 goto out;
329 } 328 }
330 *mtime_out = ntohl(header->v2.mtime); 329 *mtime_out = be32toh(header->v2.mtime);
331 *size_out = ntohll(header->v2.size); 330 *size_out = be64toh(header->v2.size);
332 *sectorsize_out = ntohl(header->v2.sectorsize); 331 *sectorsize_out = be32toh(header->v2.sectorsize);
333 *bitmap_offset_out = sizeof(header->v2); 332 *bitmap_offset_out = sizeof(header->v2);
334 *align_out = *sectorsize_out; 333 *align_out = *sectorsize_out;
335 file = header->v2.backing_file; 334 file = header->v2.backing_file;
@@ -341,10 +340,10 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
341 "header\n"); 340 "header\n");
342 goto out; 341 goto out;
343 } 342 }
344 *mtime_out = ntohl(header->v3.mtime); 343 *mtime_out = be32toh(header->v3.mtime);
345 *size_out = ntohll(header->v3.size); 344 *size_out = be64toh(header->v3.size);
346 *sectorsize_out = ntohl(header->v3.sectorsize); 345 *sectorsize_out = be32toh(header->v3.sectorsize);
347 *align_out = ntohl(header->v3.alignment); 346 *align_out = be32toh(header->v3.alignment);
348 if (*align_out == 0) { 347 if (*align_out == 0) {
349 cow_printf("read_cow_header - invalid COW header, " 348 cow_printf("read_cow_header - invalid COW header, "
350 "align == 0\n"); 349 "align == 0\n");
@@ -366,16 +365,16 @@ int read_cow_header(int (*reader)(__u64, char *, int, void *), void *arg,
366 * this was used until Dec2005 - 64bits are needed to represent 365 * this was used until Dec2005 - 64bits are needed to represent
367 * 2038+. I.e. we can safely do this truncating cast. 366 * 2038+. I.e. we can safely do this truncating cast.
368 * 367 *
369 * Additionally, we must use ntohl() instead of ntohll(), since 368 * Additionally, we must use be32toh() instead of be64toh(), since
370 * the program used to use the former (tested - I got mtime 369 * the program used to use the former (tested - I got mtime
371 * mismatch "0 vs whatever"). 370 * mismatch "0 vs whatever").
372 * 371 *
373 * Ever heard about bug-to-bug-compatibility ? ;-) */ 372 * Ever heard about bug-to-bug-compatibility ? ;-) */
374 *mtime_out = (time32_t) ntohl(header->v3_b.mtime); 373 *mtime_out = (time32_t) be32toh(header->v3_b.mtime);
375 374
376 *size_out = ntohll(header->v3_b.size); 375 *size_out = be64toh(header->v3_b.size);
377 *sectorsize_out = ntohl(header->v3_b.sectorsize); 376 *sectorsize_out = be32toh(header->v3_b.sectorsize);
378 *align_out = ntohl(header->v3_b.alignment); 377 *align_out = be32toh(header->v3_b.alignment);
379 if (*align_out == 0) { 378 if (*align_out == 0) {
380 cow_printf("read_cow_header - invalid COW header, " 379 cow_printf("read_cow_header - invalid COW header, "
381 "align == 0\n"); 380 "align == 0\n");
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index e672bd6d43e3..43b39d61b538 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -22,6 +22,7 @@
22#include <linux/workqueue.h> 22#include <linux/workqueue.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <asm/uaccess.h> 24#include <asm/uaccess.h>
25#include <asm/switch_to.h>
25 26
26#include "init.h" 27#include "init.h"
27#include "irq_kern.h" 28#include "irq_kern.h"
diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild
index 8419f5cf2ac7..fff24352255d 100644
--- a/arch/um/include/asm/Kbuild
+++ b/arch/um/include/asm/Kbuild
@@ -1,3 +1,4 @@
1generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h 1generic-y += bug.h cputime.h device.h emergency-restart.h futex.h hardirq.h
2generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h 2generic-y += hw_irq.h irq_regs.h kdebug.h percpu.h sections.h topology.h xor.h
3generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h 3generic-y += ftrace.h pci.h io.h param.h delay.h mutex.h current.h exec.h
4generic-y += switch_to.h
diff --git a/arch/um/kernel/Makefile b/arch/um/kernel/Makefile
index 492bc4c1b62b..65a1c3d690ea 100644
--- a/arch/um/kernel/Makefile
+++ b/arch/um/kernel/Makefile
@@ -3,9 +3,10 @@
3# Licensed under the GPL 3# Licensed under the GPL
4# 4#
5 5
6CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \ 6CPPFLAGS_vmlinux.lds := -DSTART=$(LDS_START) \
7 -DELF_ARCH=$(LDS_ELF_ARCH) \ 7 -DELF_ARCH=$(LDS_ELF_ARCH) \
8 -DELF_FORMAT=$(LDS_ELF_FORMAT) 8 -DELF_FORMAT=$(LDS_ELF_FORMAT) \
9 $(LDS_EXTRA)
9extra-y := vmlinux.lds 10extra-y := vmlinux.lds
10clean-files := 11clean-files :=
11 12
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index f386d04a84a5..2b73dedb44ca 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -88,11 +88,8 @@ static inline void set_current(struct task_struct *task)
88 88
89extern void arch_switch_to(struct task_struct *to); 89extern void arch_switch_to(struct task_struct *to);
90 90
91void *_switch_to(void *prev, void *next, void *last) 91void *__switch_to(struct task_struct *from, struct task_struct *to)
92{ 92{
93 struct task_struct *from = prev;
94 struct task_struct *to = next;
95
96 to->thread.prev_sched = from; 93 to->thread.prev_sched = from;
97 set_current(to); 94 set_current(to);
98 95
@@ -111,7 +108,6 @@ void *_switch_to(void *prev, void *next, void *last)
111 } while (current->thread.saved_task); 108 } while (current->thread.saved_task);
112 109
113 return current->thread.prev_sched; 110 return current->thread.prev_sched;
114
115} 111}
116 112
117void interrupt_end(void) 113void interrupt_end(void)
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 4947b319f53a..0a49ef0c2bf4 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -103,7 +103,6 @@ int init_new_context(struct task_struct *task, struct mm_struct *mm)
103 103
104void uml_setup_stubs(struct mm_struct *mm) 104void uml_setup_stubs(struct mm_struct *mm)
105{ 105{
106 struct page **pages;
107 int err, ret; 106 int err, ret;
108 107
109 if (!skas_needs_stub) 108 if (!skas_needs_stub)
diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um
index 4be406abeefd..36b62bc52638 100644
--- a/arch/x86/Makefile.um
+++ b/arch/x86/Makefile.um
@@ -14,6 +14,9 @@ LINK-y += $(call cc-option,-m32)
14 14
15export LDFLAGS 15export LDFLAGS
16 16
17LDS_EXTRA := -Ui386
18export LDS_EXTRA
19
17# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y. 20# First of all, tune CFLAGS for the specific CPU. This actually sets cflags-y.
18include $(srctree)/arch/x86/Makefile_32.cpu 21include $(srctree)/arch/x86/Makefile_32.cpu
19 22
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index b3b733262909..99480e55973d 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -43,7 +43,7 @@ extern void __add_wrong_size(void)
43 switch (sizeof(*(ptr))) { \ 43 switch (sizeof(*(ptr))) { \
44 case __X86_CASE_B: \ 44 case __X86_CASE_B: \
45 asm volatile (lock #op "b %b0, %1\n" \ 45 asm volatile (lock #op "b %b0, %1\n" \
46 : "+r" (__ret), "+m" (*(ptr)) \ 46 : "+q" (__ret), "+m" (*(ptr)) \
47 : : "memory", "cc"); \ 47 : : "memory", "cc"); \
48 break; \ 48 break; \
49 case __X86_CASE_W: \ 49 case __X86_CASE_W: \
@@ -173,7 +173,7 @@ extern void __add_wrong_size(void)
173 switch (sizeof(*(ptr))) { \ 173 switch (sizeof(*(ptr))) { \
174 case __X86_CASE_B: \ 174 case __X86_CASE_B: \
175 asm volatile (lock "addb %b1, %0\n" \ 175 asm volatile (lock "addb %b1, %0\n" \
176 : "+m" (*(ptr)) : "ri" (inc) \ 176 : "+m" (*(ptr)) : "qi" (inc) \
177 : "memory", "cc"); \ 177 : "memory", "cc"); \
178 break; \ 178 break; \
179 case __X86_CASE_W: \ 179 case __X86_CASE_W: \
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 8be5f54d9360..e0544597cfe7 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -557,6 +557,8 @@ struct __large_struct { unsigned long buf[100]; };
557 557
558extern unsigned long 558extern unsigned long
559copy_from_user_nmi(void *to, const void __user *from, unsigned long n); 559copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
560extern __must_check long
561strncpy_from_user(char *dst, const char __user *src, long count);
560 562
561/* 563/*
562 * movsl can be slow when source and dest are not both 8-byte aligned 564 * movsl can be slow when source and dest are not both 8-byte aligned
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 566e803cc602..8084bc73b18c 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,11 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
213 return n; 213 return n;
214} 214}
215 215
216long __must_check strncpy_from_user(char *dst, const char __user *src,
217 long count);
218long __must_check __strncpy_from_user(char *dst,
219 const char __user *src, long count);
220
221/** 216/**
222 * strlen_user: - Get the size of a string in user space. 217 * strlen_user: - Get the size of a string in user space.
223 * @str: The string to measure. 218 * @str: The string to measure.
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 1c66d30971ad..fcd4b6f3ef02 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,10 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
208 } 208 }
209} 209}
210 210
211__must_check long
212strncpy_from_user(char *dst, const char __user *src, long count);
213__must_check long
214__strncpy_from_user(char *dst, const char __user *src, long count);
215__must_check long strnlen_user(const char __user *str, long n); 211__must_check long strnlen_user(const char __user *str, long n);
216__must_check long __strnlen_user(const char __user *str, long n); 212__must_check long __strnlen_user(const char __user *str, long n);
217__must_check long strlen_user(const char __user *str); 213__must_check long strlen_user(const char __user *str);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index f386dc49f988..7515cf0e1805 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -216,9 +216,9 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
216 current_thread_info()->sig_on_uaccess_error = 1; 216 current_thread_info()->sig_on_uaccess_error = 1;
217 217
218 /* 218 /*
219 * 0 is a valid user pointer (in the access_ok sense) on 32-bit and 219 * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
220 * 64-bit, so we don't need to special-case it here. For all the 220 * 64-bit, so we don't need to special-case it here. For all the
221 * vsyscalls, 0 means "don't write anything" not "write it at 221 * vsyscalls, NULL means "don't write anything" not "write it at
222 * address 0". 222 * address 0".
223 */ 223 */
224 ret = -EFAULT; 224 ret = -EFAULT;
@@ -247,7 +247,7 @@ bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
247 247
248 ret = sys_getcpu((unsigned __user *)regs->di, 248 ret = sys_getcpu((unsigned __user *)regs->di,
249 (unsigned __user *)regs->si, 249 (unsigned __user *)regs->si,
250 0); 250 NULL);
251 break; 251 break;
252 } 252 }
253 253
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 97be9cb54483..d6ae30bbd7bb 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -7,6 +7,8 @@
7#include <linux/highmem.h> 7#include <linux/highmem.h>
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10#include <asm/word-at-a-time.h>
11
10/* 12/*
11 * best effort, GUP based copy_from_user() that is NMI-safe 13 * best effort, GUP based copy_from_user() that is NMI-safe
12 */ 14 */
@@ -41,3 +43,104 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
41 return len; 43 return len;
42} 44}
43EXPORT_SYMBOL_GPL(copy_from_user_nmi); 45EXPORT_SYMBOL_GPL(copy_from_user_nmi);
46
47static inline unsigned long count_bytes(unsigned long mask)
48{
49 mask = (mask - 1) & ~mask;
50 mask >>= 7;
51 return count_masked_bytes(mask);
52}
53
54/*
55 * Do a strncpy, return length of string without final '\0'.
56 * 'count' is the user-supplied count (return 'count' if we
57 * hit it), 'max' is the address space maximum (and we return
58 * -EFAULT if we hit it).
59 */
60static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
61{
62 long res = 0;
63
64 /*
65 * Truncate 'max' to the user-specified limit, so that
66 * we only have one limit we need to check in the loop
67 */
68 if (max > count)
69 max = count;
70
71 while (max >= sizeof(unsigned long)) {
72 unsigned long c;
73
74 /* Fall back to byte-at-a-time if we get a page fault */
75 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
76 break;
77 /* This can write a few bytes past the NUL character, but that's ok */
78 *(unsigned long *)(dst+res) = c;
79 c = has_zero(c);
80 if (c)
81 return res + count_bytes(c);
82 res += sizeof(unsigned long);
83 max -= sizeof(unsigned long);
84 }
85
86 while (max) {
87 char c;
88
89 if (unlikely(__get_user(c,src+res)))
90 return -EFAULT;
91 dst[res] = c;
92 if (!c)
93 return res;
94 res++;
95 max--;
96 }
97
98 /*
99 * Uhhuh. We hit 'max'. But was that the user-specified maximum
100 * too? If so, that's ok - we got as much as the user asked for.
101 */
102 if (res >= count)
103 return res;
104
105 /*
106 * Nope: we hit the address space limit, and we still had more
107 * characters the caller would have wanted. That's an EFAULT.
108 */
109 return -EFAULT;
110}
111
112/**
113 * strncpy_from_user: - Copy a NUL terminated string from userspace.
114 * @dst: Destination address, in kernel space. This buffer must be at
115 * least @count bytes long.
116 * @src: Source address, in user space.
117 * @count: Maximum number of bytes to copy, including the trailing NUL.
118 *
119 * Copies a NUL-terminated string from userspace to kernel space.
120 *
121 * On success, returns the length of the string (not including the trailing
122 * NUL).
123 *
124 * If access to userspace fails, returns -EFAULT (some data may have been
125 * copied).
126 *
127 * If @count is smaller than the length of the string, copies @count bytes
128 * and returns @count.
129 */
130long
131strncpy_from_user(char *dst, const char __user *src, long count)
132{
133 unsigned long max_addr, src_addr;
134
135 if (unlikely(count <= 0))
136 return 0;
137
138 max_addr = current_thread_info()->addr_limit.seg;
139 src_addr = (unsigned long)src;
140 if (likely(src_addr < max_addr)) {
141 unsigned long max = max_addr - src_addr;
142 return do_strncpy_from_user(dst, src, count, max);
143 }
144 return -EFAULT;
145}
146EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index d9b094ca7aaa..ef2a6a5d78e3 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -33,93 +33,6 @@ static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned lon
33 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n)) 33 __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
34 34
35/* 35/*
36 * Copy a null terminated string from userspace.
37 */
38
39#define __do_strncpy_from_user(dst, src, count, res) \
40do { \
41 int __d0, __d1, __d2; \
42 might_fault(); \
43 __asm__ __volatile__( \
44 " testl %1,%1\n" \
45 " jz 2f\n" \
46 "0: lodsb\n" \
47 " stosb\n" \
48 " testb %%al,%%al\n" \
49 " jz 1f\n" \
50 " decl %1\n" \
51 " jnz 0b\n" \
52 "1: subl %1,%0\n" \
53 "2:\n" \
54 ".section .fixup,\"ax\"\n" \
55 "3: movl %5,%0\n" \
56 " jmp 2b\n" \
57 ".previous\n" \
58 _ASM_EXTABLE(0b,3b) \
59 : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
60 "=&D" (__d2) \
61 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
62 : "memory"); \
63} while (0)
64
65/**
66 * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking.
67 * @dst: Destination address, in kernel space. This buffer must be at
68 * least @count bytes long.
69 * @src: Source address, in user space.
70 * @count: Maximum number of bytes to copy, including the trailing NUL.
71 *
72 * Copies a NUL-terminated string from userspace to kernel space.
73 * Caller must check the specified block with access_ok() before calling
74 * this function.
75 *
76 * On success, returns the length of the string (not including the trailing
77 * NUL).
78 *
79 * If access to userspace fails, returns -EFAULT (some data may have been
80 * copied).
81 *
82 * If @count is smaller than the length of the string, copies @count bytes
83 * and returns @count.
84 */
85long
86__strncpy_from_user(char *dst, const char __user *src, long count)
87{
88 long res;
89 __do_strncpy_from_user(dst, src, count, res);
90 return res;
91}
92EXPORT_SYMBOL(__strncpy_from_user);
93
94/**
95 * strncpy_from_user: - Copy a NUL terminated string from userspace.
96 * @dst: Destination address, in kernel space. This buffer must be at
97 * least @count bytes long.
98 * @src: Source address, in user space.
99 * @count: Maximum number of bytes to copy, including the trailing NUL.
100 *
101 * Copies a NUL-terminated string from userspace to kernel space.
102 *
103 * On success, returns the length of the string (not including the trailing
104 * NUL).
105 *
106 * If access to userspace fails, returns -EFAULT (some data may have been
107 * copied).
108 *
109 * If @count is smaller than the length of the string, copies @count bytes
110 * and returns @count.
111 */
112long
113strncpy_from_user(char *dst, const char __user *src, long count)
114{
115 long res = -EFAULT;
116 if (access_ok(VERIFY_READ, src, 1))
117 __do_strncpy_from_user(dst, src, count, res);
118 return res;
119}
120EXPORT_SYMBOL(strncpy_from_user);
121
122/*
123 * Zero Userspace 36 * Zero Userspace
124 */ 37 */
125 38
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index b7c2849ffb66..0d0326f388c0 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -9,55 +9,6 @@
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10 10
11/* 11/*
12 * Copy a null terminated string from userspace.
13 */
14
15#define __do_strncpy_from_user(dst,src,count,res) \
16do { \
17 long __d0, __d1, __d2; \
18 might_fault(); \
19 __asm__ __volatile__( \
20 " testq %1,%1\n" \
21 " jz 2f\n" \
22 "0: lodsb\n" \
23 " stosb\n" \
24 " testb %%al,%%al\n" \
25 " jz 1f\n" \
26 " decq %1\n" \
27 " jnz 0b\n" \
28 "1: subq %1,%0\n" \
29 "2:\n" \
30 ".section .fixup,\"ax\"\n" \
31 "3: movq %5,%0\n" \
32 " jmp 2b\n" \
33 ".previous\n" \
34 _ASM_EXTABLE(0b,3b) \
35 : "=&r"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \
36 "=&D" (__d2) \
37 : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \
38 : "memory"); \
39} while (0)
40
41long
42__strncpy_from_user(char *dst, const char __user *src, long count)
43{
44 long res;
45 __do_strncpy_from_user(dst, src, count, res);
46 return res;
47}
48EXPORT_SYMBOL(__strncpy_from_user);
49
50long
51strncpy_from_user(char *dst, const char __user *src, long count)
52{
53 long res = -EFAULT;
54 if (access_ok(VERIFY_READ, src, 1))
55 return __strncpy_from_user(dst, src, count);
56 return res;
57}
58EXPORT_SYMBOL(strncpy_from_user);
59
60/*
61 * Zero Userspace 12 * Zero Userspace
62 */ 13 */
63 14
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
new file mode 100644
index 000000000000..7d01b8c56c00
--- /dev/null
+++ b/arch/x86/um/asm/barrier.h
@@ -0,0 +1,75 @@
1#ifndef _ASM_UM_BARRIER_H_
2#define _ASM_UM_BARRIER_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/*
14 * Force strict CPU ordering.
15 * And yes, this is required on UP too when we're talking
16 * to devices.
17 */
18#ifdef CONFIG_X86_32
19
20#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
21#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
22#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
23
24#else /* CONFIG_X86_32 */
25
26#define mb() asm volatile("mfence" : : : "memory")
27#define rmb() asm volatile("lfence" : : : "memory")
28#define wmb() asm volatile("sfence" : : : "memory")
29
30#endif /* CONFIG_X86_32 */
31
32#define read_barrier_depends() do { } while (0)
33
34#ifdef CONFIG_SMP
35
36#define smp_mb() mb()
37#ifdef CONFIG_X86_PPRO_FENCE
38#define smp_rmb() rmb()
39#else /* CONFIG_X86_PPRO_FENCE */
40#define smp_rmb() barrier()
41#endif /* CONFIG_X86_PPRO_FENCE */
42
43#ifdef CONFIG_X86_OOSTORE
44#define smp_wmb() wmb()
45#else /* CONFIG_X86_OOSTORE */
46#define smp_wmb() barrier()
47#endif /* CONFIG_X86_OOSTORE */
48
49#define smp_read_barrier_depends() read_barrier_depends()
50#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
51
52#else /* CONFIG_SMP */
53
54#define smp_mb() barrier()
55#define smp_rmb() barrier()
56#define smp_wmb() barrier()
57#define smp_read_barrier_depends() do { } while (0)
58#define set_mb(var, value) do { var = value; barrier(); } while (0)
59
60#endif /* CONFIG_SMP */
61
62/*
63 * Stop RDTSC speculation. This is needed when you need to use RDTSC
64 * (or get_cycles or vread that possibly accesses the TSC) in a defined
65 * code region.
66 *
67 * (Could use an alternative three way for this if there was one.)
68 */
69static inline void rdtsc_barrier(void)
70{
71 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
72 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
73}
74
75#endif
diff --git a/arch/x86/um/asm/system.h b/arch/x86/um/asm/system.h
deleted file mode 100644
index a459fd9b7598..000000000000
--- a/arch/x86/um/asm/system.h
+++ /dev/null
@@ -1,135 +0,0 @@
1#ifndef _ASM_X86_SYSTEM_H_
2#define _ASM_X86_SYSTEM_H_
3
4#include <asm/asm.h>
5#include <asm/segment.h>
6#include <asm/cpufeature.h>
7#include <asm/cmpxchg.h>
8#include <asm/nops.h>
9
10#include <linux/kernel.h>
11#include <linux/irqflags.h>
12
13/* entries in ARCH_DLINFO: */
14#ifdef CONFIG_IA32_EMULATION
15# define AT_VECTOR_SIZE_ARCH 2
16#else
17# define AT_VECTOR_SIZE_ARCH 1
18#endif
19
20extern unsigned long arch_align_stack(unsigned long sp);
21
22void default_idle(void);
23
24/*
25 * Force strict CPU ordering.
26 * And yes, this is required on UP too when we're talking
27 * to devices.
28 */
29#ifdef CONFIG_X86_32
30/*
31 * Some non-Intel clones support out of order store. wmb() ceases to be a
32 * nop for these.
33 */
34#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
35#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
36#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
37#else
38#define mb() asm volatile("mfence":::"memory")
39#define rmb() asm volatile("lfence":::"memory")
40#define wmb() asm volatile("sfence" ::: "memory")
41#endif
42
43/**
44 * read_barrier_depends - Flush all pending reads that subsequents reads
45 * depend on.
46 *
47 * No data-dependent reads from memory-like regions are ever reordered
48 * over this barrier. All reads preceding this primitive are guaranteed
49 * to access memory (but not necessarily other CPUs' caches) before any
50 * reads following this primitive that depend on the data return by
51 * any of the preceding reads. This primitive is much lighter weight than
52 * rmb() on most CPUs, and is never heavier weight than is
53 * rmb().
54 *
55 * These ordering constraints are respected by both the local CPU
56 * and the compiler.
57 *
58 * Ordering is not guaranteed by anything other than these primitives,
59 * not even by data dependencies. See the documentation for
60 * memory_barrier() for examples and URLs to more information.
61 *
62 * For example, the following code would force ordering (the initial
63 * value of "a" is zero, "b" is one, and "p" is "&a"):
64 *
65 * <programlisting>
66 * CPU 0 CPU 1
67 *
68 * b = 2;
69 * memory_barrier();
70 * p = &b; q = p;
71 * read_barrier_depends();
72 * d = *q;
73 * </programlisting>
74 *
75 * because the read of "*q" depends on the read of "p" and these
76 * two reads are separated by a read_barrier_depends(). However,
77 * the following code, with the same initial values for "a" and "b":
78 *
79 * <programlisting>
80 * CPU 0 CPU 1
81 *
82 * a = 2;
83 * memory_barrier();
84 * b = 3; y = b;
85 * read_barrier_depends();
86 * x = a;
87 * </programlisting>
88 *
89 * does not enforce ordering, since there is no data dependency between
90 * the read of "a" and the read of "b". Therefore, on some CPUs, such
91 * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
92 * in cases like this where there are no data dependencies.
93 **/
94
95#define read_barrier_depends() do { } while (0)
96
97#ifdef CONFIG_SMP
98#define smp_mb() mb()
99#ifdef CONFIG_X86_PPRO_FENCE
100# define smp_rmb() rmb()
101#else
102# define smp_rmb() barrier()
103#endif
104#ifdef CONFIG_X86_OOSTORE
105# define smp_wmb() wmb()
106#else
107# define smp_wmb() barrier()
108#endif
109#define smp_read_barrier_depends() read_barrier_depends()
110#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
111#else
112#define smp_mb() barrier()
113#define smp_rmb() barrier()
114#define smp_wmb() barrier()
115#define smp_read_barrier_depends() do { } while (0)
116#define set_mb(var, value) do { var = value; barrier(); } while (0)
117#endif
118
119/*
120 * Stop RDTSC speculation. This is needed when you need to use RDTSC
121 * (or get_cycles or vread that possibly accesses the TSC) in a defined
122 * code region.
123 *
124 * (Could use an alternative three way for this if there was one.)
125 */
126static inline void rdtsc_barrier(void)
127{
128 alternative(ASM_NOP3, "mfence", X86_FEATURE_MFENCE_RDTSC);
129 alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
130}
131
132extern void *_switch_to(void *prev, void *next, void *last);
133#define switch_to(prev, next, last) prev = _switch_to(prev, next, last)
134
135#endif
diff --git a/block/blk-core.c b/block/blk-core.c
index 3a78b00edd71..1f61b74867e4 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -483,7 +483,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
483 if (!q) 483 if (!q)
484 return NULL; 484 return NULL;
485 485
486 q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL); 486 q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
487 if (q->id < 0) 487 if (q->id < 0)
488 goto fail_q; 488 goto fail_q;
489 489
@@ -1277,7 +1277,8 @@ static bool attempt_plug_merge(struct request_queue *q, struct bio *bio,
1277 list_for_each_entry_reverse(rq, &plug->list, queuelist) { 1277 list_for_each_entry_reverse(rq, &plug->list, queuelist) {
1278 int el_ret; 1278 int el_ret;
1279 1279
1280 (*request_count)++; 1280 if (rq->q == q)
1281 (*request_count)++;
1281 1282
1282 if (rq->q != q || !blk_rq_merge_ok(rq, bio)) 1283 if (rq->q != q || !blk_rq_merge_ok(rq, bio))
1283 continue; 1284 continue;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a76721d..f2ddb94626bd 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1218,7 +1218,7 @@ void blk_throtl_drain(struct request_queue *q)
1218 struct bio_list bl; 1218 struct bio_list bl;
1219 struct bio *bio; 1219 struct bio *bio;
1220 1220
1221 WARN_ON_ONCE(!queue_is_locked(q)); 1221 queue_lockdep_assert_held(q);
1222 1222
1223 bio_list_init(&bl); 1223 bio_list_init(&bl);
1224 1224
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 457295253566..3c38536bd52c 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -295,6 +295,7 @@ struct cfq_data {
295 unsigned int cfq_slice_idle; 295 unsigned int cfq_slice_idle;
296 unsigned int cfq_group_idle; 296 unsigned int cfq_group_idle;
297 unsigned int cfq_latency; 297 unsigned int cfq_latency;
298 unsigned int cfq_target_latency;
298 299
299 /* 300 /*
300 * Fallback dummy cfqq for extreme OOM conditions 301 * Fallback dummy cfqq for extreme OOM conditions
@@ -604,7 +605,7 @@ cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
604{ 605{
605 struct cfq_rb_root *st = &cfqd->grp_service_tree; 606 struct cfq_rb_root *st = &cfqd->grp_service_tree;
606 607
607 return cfq_target_latency * cfqg->weight / st->total_weight; 608 return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
608} 609}
609 610
610static inline unsigned 611static inline unsigned
@@ -2271,7 +2272,8 @@ new_workload:
2271 * to have higher weight. A more accurate thing would be to 2272 * to have higher weight. A more accurate thing would be to
2272 * calculate system wide asnc/sync ratio. 2273 * calculate system wide asnc/sync ratio.
2273 */ 2274 */
2274 tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg); 2275 tmp = cfqd->cfq_target_latency *
2276 cfqg_busy_async_queues(cfqd, cfqg);
2275 tmp = tmp/cfqd->busy_queues; 2277 tmp = tmp/cfqd->busy_queues;
2276 slice = min_t(unsigned, slice, tmp); 2278 slice = min_t(unsigned, slice, tmp);
2277 2279
@@ -3737,6 +3739,7 @@ static void *cfq_init_queue(struct request_queue *q)
3737 cfqd->cfq_back_penalty = cfq_back_penalty; 3739 cfqd->cfq_back_penalty = cfq_back_penalty;
3738 cfqd->cfq_slice[0] = cfq_slice_async; 3740 cfqd->cfq_slice[0] = cfq_slice_async;
3739 cfqd->cfq_slice[1] = cfq_slice_sync; 3741 cfqd->cfq_slice[1] = cfq_slice_sync;
3742 cfqd->cfq_target_latency = cfq_target_latency;
3740 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 3743 cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
3741 cfqd->cfq_slice_idle = cfq_slice_idle; 3744 cfqd->cfq_slice_idle = cfq_slice_idle;
3742 cfqd->cfq_group_idle = cfq_group_idle; 3745 cfqd->cfq_group_idle = cfq_group_idle;
@@ -3788,6 +3791,7 @@ SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
3788SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 3791SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
3789SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 3792SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
3790SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); 3793SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
3794SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
3791#undef SHOW_FUNCTION 3795#undef SHOW_FUNCTION
3792 3796
3793#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 3797#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -3821,6 +3825,7 @@ STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
3821STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 3825STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
3822 UINT_MAX, 0); 3826 UINT_MAX, 0);
3823STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); 3827STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
3828STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
3824#undef STORE_FUNCTION 3829#undef STORE_FUNCTION
3825 3830
3826#define CFQ_ATTR(name) \ 3831#define CFQ_ATTR(name) \
@@ -3838,6 +3843,7 @@ static struct elv_fs_entry cfq_attrs[] = {
3838 CFQ_ATTR(slice_idle), 3843 CFQ_ATTR(slice_idle),
3839 CFQ_ATTR(group_idle), 3844 CFQ_ATTR(group_idle),
3840 CFQ_ATTR(low_latency), 3845 CFQ_ATTR(low_latency),
3846 CFQ_ATTR(target_latency),
3841 __ATTR_NULL 3847 __ATTR_NULL
3842}; 3848};
3843 3849
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 21ff9d015432..8e84225c096b 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -627,7 +627,7 @@ config CRYPTO_BLOWFISH_COMMON
627 627
628config CRYPTO_BLOWFISH_X86_64 628config CRYPTO_BLOWFISH_X86_64
629 tristate "Blowfish cipher algorithm (x86_64)" 629 tristate "Blowfish cipher algorithm (x86_64)"
630 depends on (X86 || UML_X86) && 64BIT 630 depends on X86 && 64BIT
631 select CRYPTO_ALGAPI 631 select CRYPTO_ALGAPI
632 select CRYPTO_BLOWFISH_COMMON 632 select CRYPTO_BLOWFISH_COMMON
633 help 633 help
@@ -657,7 +657,7 @@ config CRYPTO_CAMELLIA
657 657
658config CRYPTO_CAMELLIA_X86_64 658config CRYPTO_CAMELLIA_X86_64
659 tristate "Camellia cipher algorithm (x86_64)" 659 tristate "Camellia cipher algorithm (x86_64)"
660 depends on (X86 || UML_X86) && 64BIT 660 depends on X86 && 64BIT
661 depends on CRYPTO 661 depends on CRYPTO
662 select CRYPTO_ALGAPI 662 select CRYPTO_ALGAPI
663 select CRYPTO_LRW 663 select CRYPTO_LRW
@@ -893,7 +893,7 @@ config CRYPTO_TWOFISH_X86_64
893 893
894config CRYPTO_TWOFISH_X86_64_3WAY 894config CRYPTO_TWOFISH_X86_64_3WAY
895 tristate "Twofish cipher algorithm (x86_64, 3-way parallel)" 895 tristate "Twofish cipher algorithm (x86_64, 3-way parallel)"
896 depends on (X86 || UML_X86) && 64BIT 896 depends on X86 && 64BIT
897 select CRYPTO_ALGAPI 897 select CRYPTO_ALGAPI
898 select CRYPTO_TWOFISH_COMMON 898 select CRYPTO_TWOFISH_COMMON
899 select CRYPTO_TWOFISH_X86_64 899 select CRYPTO_TWOFISH_X86_64
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 01c2cf4efcdd..cc273226dbd0 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -247,8 +247,7 @@ static int amba_pm_restore(struct device *dev)
247/* 247/*
248 * Hooks to provide runtime PM of the pclk (bus clock). It is safe to 248 * Hooks to provide runtime PM of the pclk (bus clock). It is safe to
249 * enable/disable the bus clock at runtime PM suspend/resume as this 249 * enable/disable the bus clock at runtime PM suspend/resume as this
250 * does not result in loss of context. However, disabling vcore power 250 * does not result in loss of context.
251 * would do, so we leave that to the driver.
252 */ 251 */
253static int amba_pm_runtime_suspend(struct device *dev) 252static int amba_pm_runtime_suspend(struct device *dev)
254{ 253{
@@ -354,39 +353,6 @@ static void amba_put_disable_pclk(struct amba_device *pcdev)
354 clk_put(pclk); 353 clk_put(pclk);
355} 354}
356 355
357static int amba_get_enable_vcore(struct amba_device *pcdev)
358{
359 struct regulator *vcore = regulator_get(&pcdev->dev, "vcore");
360 int ret;
361
362 pcdev->vcore = vcore;
363
364 if (IS_ERR(vcore)) {
365 /* It is OK not to supply a vcore regulator */
366 if (PTR_ERR(vcore) == -ENODEV)
367 return 0;
368 return PTR_ERR(vcore);
369 }
370
371 ret = regulator_enable(vcore);
372 if (ret) {
373 regulator_put(vcore);
374 pcdev->vcore = ERR_PTR(-ENODEV);
375 }
376
377 return ret;
378}
379
380static void amba_put_disable_vcore(struct amba_device *pcdev)
381{
382 struct regulator *vcore = pcdev->vcore;
383
384 if (!IS_ERR(vcore)) {
385 regulator_disable(vcore);
386 regulator_put(vcore);
387 }
388}
389
390/* 356/*
391 * These are the device model conversion veneers; they convert the 357 * These are the device model conversion veneers; they convert the
392 * device model structures to our more specific structures. 358 * device model structures to our more specific structures.
@@ -399,10 +365,6 @@ static int amba_probe(struct device *dev)
399 int ret; 365 int ret;
400 366
401 do { 367 do {
402 ret = amba_get_enable_vcore(pcdev);
403 if (ret)
404 break;
405
406 ret = amba_get_enable_pclk(pcdev); 368 ret = amba_get_enable_pclk(pcdev);
407 if (ret) 369 if (ret)
408 break; 370 break;
@@ -420,7 +382,6 @@ static int amba_probe(struct device *dev)
420 pm_runtime_put_noidle(dev); 382 pm_runtime_put_noidle(dev);
421 383
422 amba_put_disable_pclk(pcdev); 384 amba_put_disable_pclk(pcdev);
423 amba_put_disable_vcore(pcdev);
424 } while (0); 385 } while (0);
425 386
426 return ret; 387 return ret;
@@ -442,7 +403,6 @@ static int amba_remove(struct device *dev)
442 pm_runtime_put_noidle(dev); 403 pm_runtime_put_noidle(dev);
443 404
444 amba_put_disable_pclk(pcdev); 405 amba_put_disable_pclk(pcdev);
445 amba_put_disable_vcore(pcdev);
446 406
447 return ret; 407 return ret;
448} 408}
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index 05f150382da8..ba29b2e73d48 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -15,7 +15,7 @@
15#include <linux/sys_soc.h> 15#include <linux/sys_soc.h>
16#include <linux/err.h> 16#include <linux/err.h>
17 17
18static DEFINE_IDR(soc_ida); 18static DEFINE_IDA(soc_ida);
19static DEFINE_SPINLOCK(soc_lock); 19static DEFINE_SPINLOCK(soc_lock);
20 20
21static ssize_t soc_info_get(struct device *dev, 21static ssize_t soc_info_get(struct device *dev,
@@ -168,8 +168,6 @@ void soc_device_unregister(struct soc_device *soc_dev)
168 168
169static int __init soc_bus_register(void) 169static int __init soc_bus_register(void)
170{ 170{
171 spin_lock_init(&soc_lock);
172
173 return bus_register(&soc_bus_type); 171 return bus_register(&soc_bus_type);
174} 172}
175core_initcall(soc_bus_register); 173core_initcall(soc_bus_register);
diff --git a/drivers/bcma/Kconfig b/drivers/bcma/Kconfig
index c1172dafdffa..fb7c80fb721e 100644
--- a/drivers/bcma/Kconfig
+++ b/drivers/bcma/Kconfig
@@ -29,7 +29,7 @@ config BCMA_HOST_PCI
29 29
30config BCMA_DRIVER_PCI_HOSTMODE 30config BCMA_DRIVER_PCI_HOSTMODE
31 bool "Driver for PCI core working in hostmode" 31 bool "Driver for PCI core working in hostmode"
32 depends on BCMA && MIPS 32 depends on BCMA && MIPS && BCMA_HOST_PCI
33 help 33 help
34 PCI core hostmode operation (external PCI bus). 34 PCI core hostmode operation (external PCI bus).
35 35
diff --git a/drivers/bcma/driver_pci_host.c b/drivers/bcma/driver_pci_host.c
index 4e20bcfa7ec5..d2097a11c3c7 100644
--- a/drivers/bcma/driver_pci_host.c
+++ b/drivers/bcma/driver_pci_host.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include "bcma_private.h" 12#include "bcma_private.h"
13#include <linux/pci.h>
13#include <linux/export.h> 14#include <linux/export.h>
14#include <linux/bcma/bcma.h> 15#include <linux/bcma/bcma.h>
15#include <asm/paccess.h> 16#include <asm/paccess.h>
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e820b68d2f6c..acda773b3720 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -866,6 +866,7 @@ cciss_scsi_detect(ctlr_info_t *h)
866 sh->can_queue = cciss_tape_cmds; 866 sh->can_queue = cciss_tape_cmds;
867 sh->sg_tablesize = h->maxsgentries; 867 sh->sg_tablesize = h->maxsgentries;
868 sh->max_cmd_len = MAX_COMMAND_SIZE; 868 sh->max_cmd_len = MAX_COMMAND_SIZE;
869 sh->max_sectors = h->cciss_max_sectors;
869 870
870 ((struct cciss_scsi_adapter_data_t *) 871 ((struct cciss_scsi_adapter_data_t *)
871 h->scsi_ctlr)->scsi_host = sh; 872 h->scsi_ctlr)->scsi_host = sh;
@@ -1410,7 +1411,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
1410 /* track how many SG entries we are using */ 1411 /* track how many SG entries we are using */
1411 if (request_nsgs > h->maxSG) 1412 if (request_nsgs > h->maxSG)
1412 h->maxSG = request_nsgs; 1413 h->maxSG = request_nsgs;
1413 c->Header.SGTotal = (__u8) request_nsgs + chained; 1414 c->Header.SGTotal = (u16) request_nsgs + chained;
1414 if (request_nsgs > h->max_cmd_sgentries) 1415 if (request_nsgs > h->max_cmd_sgentries)
1415 c->Header.SGList = h->max_cmd_sgentries; 1416 c->Header.SGList = h->max_cmd_sgentries;
1416 else 1417 else
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index b5dd14e072f2..0ba837fc62a8 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -4,6 +4,6 @@
4 4
5config BLK_DEV_PCIESSD_MTIP32XX 5config BLK_DEV_PCIESSD_MTIP32XX
6 tristate "Block Device Driver for Micron PCIe SSDs" 6 tristate "Block Device Driver for Micron PCIe SSDs"
7 depends on HOTPLUG_PCI_PCIE 7 depends on PCI
8 help 8 help
9 This enables the block driver for Micron PCIe SSDs. 9 This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 8eb81c96608f..00f9fc992090 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -36,6 +36,7 @@
36#include <linux/idr.h> 36#include <linux/idr.h>
37#include <linux/kthread.h> 37#include <linux/kthread.h>
38#include <../drivers/ata/ahci.h> 38#include <../drivers/ata/ahci.h>
39#include <linux/export.h>
39#include "mtip32xx.h" 40#include "mtip32xx.h"
40 41
41#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) 42#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -44,6 +45,7 @@
44#define HW_PORT_PRIV_DMA_SZ \ 45#define HW_PORT_PRIV_DMA_SZ \
45 (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ) 46 (HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
46 47
48#define HOST_CAP_NZDMA (1 << 19)
47#define HOST_HSORG 0xFC 49#define HOST_HSORG 0xFC
48#define HSORG_DISABLE_SLOTGRP_INTR (1<<24) 50#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
49#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16) 51#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
@@ -139,6 +141,12 @@ static void mtip_command_cleanup(struct driver_data *dd)
139 int group = 0, commandslot = 0, commandindex = 0; 141 int group = 0, commandslot = 0, commandindex = 0;
140 struct mtip_cmd *command; 142 struct mtip_cmd *command;
141 struct mtip_port *port = dd->port; 143 struct mtip_port *port = dd->port;
144 static int in_progress;
145
146 if (in_progress)
147 return;
148
149 in_progress = 1;
142 150
143 for (group = 0; group < 4; group++) { 151 for (group = 0; group < 4; group++) {
144 for (commandslot = 0; commandslot < 32; commandslot++) { 152 for (commandslot = 0; commandslot < 32; commandslot++) {
@@ -165,7 +173,8 @@ static void mtip_command_cleanup(struct driver_data *dd)
165 173
166 up(&port->cmd_slot); 174 up(&port->cmd_slot);
167 175
168 atomic_set(&dd->drv_cleanup_done, true); 176 set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
177 in_progress = 0;
169} 178}
170 179
171/* 180/*
@@ -262,6 +271,9 @@ static int hba_reset_nosleep(struct driver_data *dd)
262 && time_before(jiffies, timeout)) 271 && time_before(jiffies, timeout))
263 mdelay(1); 272 mdelay(1);
264 273
274 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
275 return -1;
276
265 if (readl(dd->mmio + HOST_CTL) & HOST_RESET) 277 if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
266 return -1; 278 return -1;
267 279
@@ -294,6 +306,10 @@ static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
294 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 306 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
295 307
296 spin_unlock_irqrestore(&port->cmd_issue_lock, flags); 308 spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
309
310 /* Set the command's timeout value.*/
311 port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
312 MTIP_NCQ_COMMAND_TIMEOUT_MS);
297} 313}
298 314
299/* 315/*
@@ -420,7 +436,12 @@ static void mtip_init_port(struct mtip_port *port)
420 writel(0xFFFFFFFF, port->completed[i]); 436 writel(0xFFFFFFFF, port->completed[i]);
421 437
422 /* Clear any pending interrupts for this port */ 438 /* Clear any pending interrupts for this port */
423 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT); 439 writel(readl(port->dd->mmio + PORT_IRQ_STAT),
440 port->dd->mmio + PORT_IRQ_STAT);
441
442 /* Clear any pending interrupts on the HBA. */
443 writel(readl(port->dd->mmio + HOST_IRQ_STAT),
444 port->dd->mmio + HOST_IRQ_STAT);
424 445
425 /* Enable port interrupts */ 446 /* Enable port interrupts */
426 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK); 447 writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
@@ -447,6 +468,9 @@ static void mtip_restart_port(struct mtip_port *port)
447 && time_before(jiffies, timeout)) 468 && time_before(jiffies, timeout))
448 ; 469 ;
449 470
471 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
472 return;
473
450 /* 474 /*
451 * Chip quirk: escalate to hba reset if 475 * Chip quirk: escalate to hba reset if
452 * PxCMD.CR not clear after 500 ms 476 * PxCMD.CR not clear after 500 ms
@@ -475,6 +499,9 @@ static void mtip_restart_port(struct mtip_port *port)
475 while (time_before(jiffies, timeout)) 499 while (time_before(jiffies, timeout))
476 ; 500 ;
477 501
502 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
503 return;
504
478 /* Clear PxSCTL.DET */ 505 /* Clear PxSCTL.DET */
479 writel(readl(port->mmio + PORT_SCR_CTL) & ~1, 506 writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
480 port->mmio + PORT_SCR_CTL); 507 port->mmio + PORT_SCR_CTL);
@@ -486,15 +513,35 @@ static void mtip_restart_port(struct mtip_port *port)
486 && time_before(jiffies, timeout)) 513 && time_before(jiffies, timeout))
487 ; 514 ;
488 515
516 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
517 return;
518
489 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0) 519 if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
490 dev_warn(&port->dd->pdev->dev, 520 dev_warn(&port->dd->pdev->dev,
491 "COM reset failed\n"); 521 "COM reset failed\n");
492 522
493 /* Clear SError, the PxSERR.DIAG.x should be set so clear it */ 523 mtip_init_port(port);
494 writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR); 524 mtip_start_port(port);
495 525
496 /* Enable the DMA engine */ 526}
497 mtip_enable_engine(port, 1); 527
528/*
529 * Helper function for tag logging
530 */
531static void print_tags(struct driver_data *dd,
532 char *msg,
533 unsigned long *tagbits,
534 int cnt)
535{
536 unsigned char tagmap[128];
537 int group, tagmap_len = 0;
538
539 memset(tagmap, 0, sizeof(tagmap));
540 for (group = SLOTBITS_IN_LONGS; group > 0; group--)
541 tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ",
542 tagbits[group-1]);
543 dev_warn(&dd->pdev->dev,
544 "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
498} 545}
499 546
500/* 547/*
@@ -514,15 +561,18 @@ static void mtip_timeout_function(unsigned long int data)
514 int tag, cmdto_cnt = 0; 561 int tag, cmdto_cnt = 0;
515 unsigned int bit, group; 562 unsigned int bit, group;
516 unsigned int num_command_slots = port->dd->slot_groups * 32; 563 unsigned int num_command_slots = port->dd->slot_groups * 32;
564 unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
517 565
518 if (unlikely(!port)) 566 if (unlikely(!port))
519 return; 567 return;
520 568
521 if (atomic_read(&port->dd->resumeflag) == true) { 569 if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
522 mod_timer(&port->cmd_timer, 570 mod_timer(&port->cmd_timer,
523 jiffies + msecs_to_jiffies(30000)); 571 jiffies + msecs_to_jiffies(30000));
524 return; 572 return;
525 } 573 }
574 /* clear the tag accumulator */
575 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
526 576
527 for (tag = 0; tag < num_command_slots; tag++) { 577 for (tag = 0; tag < num_command_slots; tag++) {
528 /* 578 /*
@@ -540,12 +590,10 @@ static void mtip_timeout_function(unsigned long int data)
540 command = &port->commands[tag]; 590 command = &port->commands[tag];
541 fis = (struct host_to_dev_fis *) command->command; 591 fis = (struct host_to_dev_fis *) command->command;
542 592
543 dev_warn(&port->dd->pdev->dev, 593 set_bit(tag, tagaccum);
544 "Timeout for command tag %d\n", tag);
545
546 cmdto_cnt++; 594 cmdto_cnt++;
547 if (cmdto_cnt == 1) 595 if (cmdto_cnt == 1)
548 set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags); 596 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
549 597
550 /* 598 /*
551 * Clear the completed bit. This should prevent 599 * Clear the completed bit. This should prevent
@@ -578,15 +626,29 @@ static void mtip_timeout_function(unsigned long int data)
578 } 626 }
579 } 627 }
580 628
581 if (cmdto_cnt) { 629 if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
582 dev_warn(&port->dd->pdev->dev, 630 print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
583 "%d commands timed out: restarting port", 631
584 cmdto_cnt);
585 mtip_restart_port(port); 632 mtip_restart_port(port);
586 clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags); 633 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
587 wake_up_interruptible(&port->svc_wait); 634 wake_up_interruptible(&port->svc_wait);
588 } 635 }
589 636
637 if (port->ic_pause_timer) {
638 to = port->ic_pause_timer + msecs_to_jiffies(1000);
639 if (time_after(jiffies, to)) {
640 if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
641 port->ic_pause_timer = 0;
642 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
643 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
644 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
645 wake_up_interruptible(&port->svc_wait);
646 }
647
648
649 }
650 }
651
590 /* Restart the timer */ 652 /* Restart the timer */
591 mod_timer(&port->cmd_timer, 653 mod_timer(&port->cmd_timer,
592 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); 654 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
@@ -681,23 +743,18 @@ static void mtip_completion(struct mtip_port *port,
681 complete(waiting); 743 complete(waiting);
682} 744}
683 745
684/* 746static void mtip_null_completion(struct mtip_port *port,
685 * Helper function for tag logging 747 int tag,
686 */ 748 void *data,
687static void print_tags(struct driver_data *dd, 749 int status)
688 char *msg,
689 unsigned long *tagbits)
690{ 750{
691 unsigned int tag, count = 0; 751 return;
692
693 for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
694 if (test_bit(tag, tagbits))
695 count++;
696 }
697 if (count)
698 dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
699} 752}
700 753
754static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
755 dma_addr_t buffer_dma, unsigned int sectors);
756static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
757 struct smart_attr *attrib);
701/* 758/*
702 * Handle an error. 759 * Handle an error.
703 * 760 *
@@ -708,12 +765,16 @@ static void print_tags(struct driver_data *dd,
708 */ 765 */
709static void mtip_handle_tfe(struct driver_data *dd) 766static void mtip_handle_tfe(struct driver_data *dd)
710{ 767{
711 int group, tag, bit, reissue; 768 int group, tag, bit, reissue, rv;
712 struct mtip_port *port; 769 struct mtip_port *port;
713 struct mtip_cmd *command; 770 struct mtip_cmd *cmd;
714 u32 completed; 771 u32 completed;
715 struct host_to_dev_fis *fis; 772 struct host_to_dev_fis *fis;
716 unsigned long tagaccum[SLOTBITS_IN_LONGS]; 773 unsigned long tagaccum[SLOTBITS_IN_LONGS];
774 unsigned int cmd_cnt = 0;
775 unsigned char *buf;
776 char *fail_reason = NULL;
777 int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
717 778
718 dev_warn(&dd->pdev->dev, "Taskfile error\n"); 779 dev_warn(&dd->pdev->dev, "Taskfile error\n");
719 780
@@ -722,8 +783,11 @@ static void mtip_handle_tfe(struct driver_data *dd)
722 /* Stop the timer to prevent command timeouts. */ 783 /* Stop the timer to prevent command timeouts. */
723 del_timer(&port->cmd_timer); 784 del_timer(&port->cmd_timer);
724 785
786 /* clear the tag accumulator */
787 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
788
725 /* Set eh_active */ 789 /* Set eh_active */
726 set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags); 790 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
727 791
728 /* Loop through all the groups */ 792 /* Loop through all the groups */
729 for (group = 0; group < dd->slot_groups; group++) { 793 for (group = 0; group < dd->slot_groups; group++) {
@@ -732,9 +796,6 @@ static void mtip_handle_tfe(struct driver_data *dd)
732 /* clear completed status register in the hardware.*/ 796 /* clear completed status register in the hardware.*/
733 writel(completed, port->completed[group]); 797 writel(completed, port->completed[group]);
734 798
735 /* clear the tag accumulator */
736 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
737
738 /* Process successfully completed commands */ 799 /* Process successfully completed commands */
739 for (bit = 0; bit < 32 && completed; bit++) { 800 for (bit = 0; bit < 32 && completed; bit++) {
740 if (!(completed & (1<<bit))) 801 if (!(completed & (1<<bit)))
@@ -745,13 +806,14 @@ static void mtip_handle_tfe(struct driver_data *dd)
745 if (tag == MTIP_TAG_INTERNAL) 806 if (tag == MTIP_TAG_INTERNAL)
746 continue; 807 continue;
747 808
748 command = &port->commands[tag]; 809 cmd = &port->commands[tag];
749 if (likely(command->comp_func)) { 810 if (likely(cmd->comp_func)) {
750 set_bit(tag, tagaccum); 811 set_bit(tag, tagaccum);
751 atomic_set(&port->commands[tag].active, 0); 812 cmd_cnt++;
752 command->comp_func(port, 813 atomic_set(&cmd->active, 0);
814 cmd->comp_func(port,
753 tag, 815 tag,
754 command->comp_data, 816 cmd->comp_data,
755 0); 817 0);
756 } else { 818 } else {
757 dev_err(&port->dd->pdev->dev, 819 dev_err(&port->dd->pdev->dev,
@@ -765,12 +827,45 @@ static void mtip_handle_tfe(struct driver_data *dd)
765 } 827 }
766 } 828 }
767 } 829 }
768 print_tags(dd, "TFE tags completed:", tagaccum); 830
831 print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
769 832
770 /* Restart the port */ 833 /* Restart the port */
771 mdelay(20); 834 mdelay(20);
772 mtip_restart_port(port); 835 mtip_restart_port(port);
773 836
837 /* Trying to determine the cause of the error */
838 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
839 dd->port->log_buf,
840 dd->port->log_buf_dma, 1);
841 if (rv) {
842 dev_warn(&dd->pdev->dev,
843 "Error in READ LOG EXT (10h) command\n");
844 /* non-critical error, don't fail the load */
845 } else {
846 buf = (unsigned char *)dd->port->log_buf;
847 if (buf[259] & 0x1) {
848 dev_info(&dd->pdev->dev,
849 "Write protect bit is set.\n");
850 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
851 fail_all_ncq_write = 1;
852 fail_reason = "write protect";
853 }
854 if (buf[288] == 0xF7) {
855 dev_info(&dd->pdev->dev,
856 "Exceeded Tmax, drive in thermal shutdown.\n");
857 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
858 fail_all_ncq_cmds = 1;
859 fail_reason = "thermal shutdown";
860 }
861 if (buf[288] == 0xBF) {
862 dev_info(&dd->pdev->dev,
863 "Drive indicates rebuild has failed.\n");
864 fail_all_ncq_cmds = 1;
865 fail_reason = "rebuild failed";
866 }
867 }
868
774 /* clear the tag accumulator */ 869 /* clear the tag accumulator */
775 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 870 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
776 871
@@ -779,32 +874,47 @@ static void mtip_handle_tfe(struct driver_data *dd)
779 for (bit = 0; bit < 32; bit++) { 874 for (bit = 0; bit < 32; bit++) {
780 reissue = 1; 875 reissue = 1;
781 tag = (group << 5) + bit; 876 tag = (group << 5) + bit;
877 cmd = &port->commands[tag];
782 878
783 /* If the active bit is set re-issue the command */ 879 /* If the active bit is set re-issue the command */
784 if (atomic_read(&port->commands[tag].active) == 0) 880 if (atomic_read(&cmd->active) == 0)
785 continue; 881 continue;
786 882
787 fis = (struct host_to_dev_fis *) 883 fis = (struct host_to_dev_fis *)cmd->command;
788 port->commands[tag].command;
789 884
790 /* Should re-issue? */ 885 /* Should re-issue? */
791 if (tag == MTIP_TAG_INTERNAL || 886 if (tag == MTIP_TAG_INTERNAL ||
792 fis->command == ATA_CMD_SET_FEATURES) 887 fis->command == ATA_CMD_SET_FEATURES)
793 reissue = 0; 888 reissue = 0;
889 else {
890 if (fail_all_ncq_cmds ||
891 (fail_all_ncq_write &&
892 fis->command == ATA_CMD_FPDMA_WRITE)) {
893 dev_warn(&dd->pdev->dev,
894 " Fail: %s w/tag %d [%s].\n",
895 fis->command == ATA_CMD_FPDMA_WRITE ?
896 "write" : "read",
897 tag,
898 fail_reason != NULL ?
899 fail_reason : "unknown");
900 atomic_set(&cmd->active, 0);
901 if (cmd->comp_func) {
902 cmd->comp_func(port, tag,
903 cmd->comp_data,
904 -ENODATA);
905 }
906 continue;
907 }
908 }
794 909
795 /* 910 /*
796 * First check if this command has 911 * First check if this command has
797 * exceeded its retries. 912 * exceeded its retries.
798 */ 913 */
799 if (reissue && 914 if (reissue && (cmd->retries-- > 0)) {
800 (port->commands[tag].retries-- > 0)) {
801 915
802 set_bit(tag, tagaccum); 916 set_bit(tag, tagaccum);
803 917
804 /* Update the timeout value. */
805 port->commands[tag].comp_time =
806 jiffies + msecs_to_jiffies(
807 MTIP_NCQ_COMMAND_TIMEOUT_MS);
808 /* Re-issue the command. */ 918 /* Re-issue the command. */
809 mtip_issue_ncq_command(port, tag); 919 mtip_issue_ncq_command(port, tag);
810 920
@@ -814,13 +924,13 @@ static void mtip_handle_tfe(struct driver_data *dd)
814 /* Retire a command that will not be reissued */ 924 /* Retire a command that will not be reissued */
815 dev_warn(&port->dd->pdev->dev, 925 dev_warn(&port->dd->pdev->dev,
816 "retiring tag %d\n", tag); 926 "retiring tag %d\n", tag);
817 atomic_set(&port->commands[tag].active, 0); 927 atomic_set(&cmd->active, 0);
818 928
819 if (port->commands[tag].comp_func) 929 if (cmd->comp_func)
820 port->commands[tag].comp_func( 930 cmd->comp_func(
821 port, 931 port,
822 tag, 932 tag,
823 port->commands[tag].comp_data, 933 cmd->comp_data,
824 PORT_IRQ_TF_ERR); 934 PORT_IRQ_TF_ERR);
825 else 935 else
826 dev_warn(&port->dd->pdev->dev, 936 dev_warn(&port->dd->pdev->dev,
@@ -828,10 +938,10 @@ static void mtip_handle_tfe(struct driver_data *dd)
828 tag); 938 tag);
829 } 939 }
830 } 940 }
831 print_tags(dd, "TFE tags reissued:", tagaccum); 941 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
832 942
833 /* clear eh_active */ 943 /* clear eh_active */
834 clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags); 944 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
835 wake_up_interruptible(&port->svc_wait); 945 wake_up_interruptible(&port->svc_wait);
836 946
837 mod_timer(&port->cmd_timer, 947 mod_timer(&port->cmd_timer,
@@ -899,7 +1009,7 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
899 struct mtip_port *port = dd->port; 1009 struct mtip_port *port = dd->port;
900 struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL]; 1010 struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
901 1011
902 if (test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) && 1012 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
903 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1013 (cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
904 & (1 << MTIP_TAG_INTERNAL))) { 1014 & (1 << MTIP_TAG_INTERNAL))) {
905 if (cmd->comp_func) { 1015 if (cmd->comp_func) {
@@ -911,8 +1021,6 @@ static inline void mtip_process_legacy(struct driver_data *dd, u32 port_stat)
911 } 1021 }
912 } 1022 }
913 1023
914 dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
915
916 return; 1024 return;
917} 1025}
918 1026
@@ -968,6 +1076,9 @@ static inline irqreturn_t mtip_handle_irq(struct driver_data *data)
968 /* don't proceed further */ 1076 /* don't proceed further */
969 return IRQ_HANDLED; 1077 return IRQ_HANDLED;
970 } 1078 }
1079 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1080 &dd->dd_flag))
1081 return rv;
971 1082
972 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR); 1083 mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
973 } 1084 }
@@ -1015,6 +1126,39 @@ static void mtip_issue_non_ncq_command(struct mtip_port *port, int tag)
1015 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 1126 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
1016} 1127}
1017 1128
1129static bool mtip_pause_ncq(struct mtip_port *port,
1130 struct host_to_dev_fis *fis)
1131{
1132 struct host_to_dev_fis *reply;
1133 unsigned long task_file_data;
1134
1135 reply = port->rxfis + RX_FIS_D2H_REG;
1136 task_file_data = readl(port->mmio+PORT_TFDATA);
1137
1138 if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT))
1139 return false;
1140
1141 if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
1142 set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1143 port->ic_pause_timer = jiffies;
1144 return true;
1145 } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
1146 (fis->features == 0x03)) {
1147 set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1148 port->ic_pause_timer = jiffies;
1149 return true;
1150 } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
1151 ((fis->command == 0xFC) &&
1152 (fis->features == 0x27 || fis->features == 0x72 ||
1153 fis->features == 0x62 || fis->features == 0x26))) {
1154 /* Com reset after secure erase or lowlevel format */
1155 mtip_restart_port(port);
1156 return false;
1157 }
1158
1159 return false;
1160}
1161
1018/* 1162/*
1019 * Wait for port to quiesce 1163 * Wait for port to quiesce
1020 * 1164 *
@@ -1033,11 +1177,13 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
1033 1177
1034 to = jiffies + msecs_to_jiffies(timeout); 1178 to = jiffies + msecs_to_jiffies(timeout);
1035 do { 1179 do {
1036 if (test_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags) && 1180 if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
1037 test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) { 1181 test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
1038 msleep(20); 1182 msleep(20);
1039 continue; /* svc thd is actively issuing commands */ 1183 continue; /* svc thd is actively issuing commands */
1040 } 1184 }
1185 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1186 return -EFAULT;
1041 /* 1187 /*
1042 * Ignore s_active bit 0 of array element 0. 1188 * Ignore s_active bit 0 of array element 0.
1043 * This bit will always be set 1189 * This bit will always be set
@@ -1074,7 +1220,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout)
1074 * -EAGAIN Time out waiting for command to complete. 1220 * -EAGAIN Time out waiting for command to complete.
1075 */ 1221 */
1076static int mtip_exec_internal_command(struct mtip_port *port, 1222static int mtip_exec_internal_command(struct mtip_port *port,
1077 void *fis, 1223 struct host_to_dev_fis *fis,
1078 int fis_len, 1224 int fis_len,
1079 dma_addr_t buffer, 1225 dma_addr_t buffer,
1080 int buf_len, 1226 int buf_len,
@@ -1084,8 +1230,9 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1084{ 1230{
1085 struct mtip_cmd_sg *command_sg; 1231 struct mtip_cmd_sg *command_sg;
1086 DECLARE_COMPLETION_ONSTACK(wait); 1232 DECLARE_COMPLETION_ONSTACK(wait);
1087 int rv = 0; 1233 int rv = 0, ready2go = 1;
1088 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL]; 1234 struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
1235 unsigned long to;
1089 1236
1090 /* Make sure the buffer is 8 byte aligned. This is asic specific. */ 1237 /* Make sure the buffer is 8 byte aligned. This is asic specific. */
1091 if (buffer & 0x00000007) { 1238 if (buffer & 0x00000007) {
@@ -1094,23 +1241,38 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1094 return -EFAULT; 1241 return -EFAULT;
1095 } 1242 }
1096 1243
1097 /* Only one internal command should be running at a time */ 1244 to = jiffies + msecs_to_jiffies(timeout);
1098 if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) { 1245 do {
1246 ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL,
1247 port->allocated);
1248 if (ready2go)
1249 break;
1250 mdelay(100);
1251 } while (time_before(jiffies, to));
1252 if (!ready2go) {
1099 dev_warn(&port->dd->pdev->dev, 1253 dev_warn(&port->dd->pdev->dev,
1100 "Internal command already active\n"); 1254 "Internal cmd active. new cmd [%02X]\n", fis->command);
1101 return -EBUSY; 1255 return -EBUSY;
1102 } 1256 }
1103 set_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags); 1257 set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1258 port->ic_pause_timer = 0;
1259
1260 if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
1261 clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
1262 else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
1263 clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
1104 1264
1105 if (atomic == GFP_KERNEL) { 1265 if (atomic == GFP_KERNEL) {
1106 /* wait for io to complete if non atomic */ 1266 if (fis->command != ATA_CMD_STANDBYNOW1) {
1107 if (mtip_quiesce_io(port, 5000) < 0) { 1267 /* wait for io to complete if non atomic */
1108 dev_warn(&port->dd->pdev->dev, 1268 if (mtip_quiesce_io(port, 5000) < 0) {
1109 "Failed to quiesce IO\n"); 1269 dev_warn(&port->dd->pdev->dev,
1110 release_slot(port, MTIP_TAG_INTERNAL); 1270 "Failed to quiesce IO\n");
1111 clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags); 1271 release_slot(port, MTIP_TAG_INTERNAL);
1112 wake_up_interruptible(&port->svc_wait); 1272 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1113 return -EBUSY; 1273 wake_up_interruptible(&port->svc_wait);
1274 return -EBUSY;
1275 }
1114 } 1276 }
1115 1277
1116 /* Set the completion function and data for the command. */ 1278 /* Set the completion function and data for the command. */
@@ -1120,7 +1282,7 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1120 } else { 1282 } else {
1121 /* Clear completion - we're going to poll */ 1283 /* Clear completion - we're going to poll */
1122 int_cmd->comp_data = NULL; 1284 int_cmd->comp_data = NULL;
1123 int_cmd->comp_func = NULL; 1285 int_cmd->comp_func = mtip_null_completion;
1124 } 1286 }
1125 1287
1126 /* Copy the command to the command table */ 1288 /* Copy the command to the command table */
@@ -1159,6 +1321,12 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1159 "Internal command did not complete [%d] " 1321 "Internal command did not complete [%d] "
1160 "within timeout of %lu ms\n", 1322 "within timeout of %lu ms\n",
1161 atomic, timeout); 1323 atomic, timeout);
1324 if (mtip_check_surprise_removal(port->dd->pdev) ||
1325 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1326 &port->dd->dd_flag)) {
1327 rv = -ENXIO;
1328 goto exec_ic_exit;
1329 }
1162 rv = -EAGAIN; 1330 rv = -EAGAIN;
1163 } 1331 }
1164 1332
@@ -1166,31 +1334,59 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1166 & (1 << MTIP_TAG_INTERNAL)) { 1334 & (1 << MTIP_TAG_INTERNAL)) {
1167 dev_warn(&port->dd->pdev->dev, 1335 dev_warn(&port->dd->pdev->dev,
1168 "Retiring internal command but CI is 1.\n"); 1336 "Retiring internal command but CI is 1.\n");
1337 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1338 &port->dd->dd_flag)) {
1339 hba_reset_nosleep(port->dd);
1340 rv = -ENXIO;
1341 } else {
1342 mtip_restart_port(port);
1343 rv = -EAGAIN;
1344 }
1345 goto exec_ic_exit;
1169 } 1346 }
1170 1347
1171 } else { 1348 } else {
1172 /* Spin for <timeout> checking if command still outstanding */ 1349 /* Spin for <timeout> checking if command still outstanding */
1173 timeout = jiffies + msecs_to_jiffies(timeout); 1350 timeout = jiffies + msecs_to_jiffies(timeout);
1174 1351 while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1175 while ((readl( 1352 & (1 << MTIP_TAG_INTERNAL))
1176 port->cmd_issue[MTIP_TAG_INTERNAL]) 1353 && time_before(jiffies, timeout)) {
1177 & (1 << MTIP_TAG_INTERNAL)) 1354 if (mtip_check_surprise_removal(port->dd->pdev)) {
1178 && time_before(jiffies, timeout)) 1355 rv = -ENXIO;
1179 ; 1356 goto exec_ic_exit;
1357 }
1358 if ((fis->command != ATA_CMD_STANDBYNOW1) &&
1359 test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1360 &port->dd->dd_flag)) {
1361 rv = -ENXIO;
1362 goto exec_ic_exit;
1363 }
1364 }
1180 1365
1181 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1366 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1182 & (1 << MTIP_TAG_INTERNAL)) { 1367 & (1 << MTIP_TAG_INTERNAL)) {
1183 dev_err(&port->dd->pdev->dev, 1368 dev_err(&port->dd->pdev->dev,
1184 "Internal command did not complete [%d]\n", 1369 "Internal command did not complete [atomic]\n");
1185 atomic);
1186 rv = -EAGAIN; 1370 rv = -EAGAIN;
1371 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1372 &port->dd->dd_flag)) {
1373 hba_reset_nosleep(port->dd);
1374 rv = -ENXIO;
1375 } else {
1376 mtip_restart_port(port);
1377 rv = -EAGAIN;
1378 }
1187 } 1379 }
1188 } 1380 }
1189 1381exec_ic_exit:
1190 /* Clear the allocated and active bits for the internal command. */ 1382 /* Clear the allocated and active bits for the internal command. */
1191 atomic_set(&int_cmd->active, 0); 1383 atomic_set(&int_cmd->active, 0);
1192 release_slot(port, MTIP_TAG_INTERNAL); 1384 release_slot(port, MTIP_TAG_INTERNAL);
1193 clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags); 1385 if (rv >= 0 && mtip_pause_ncq(port, fis)) {
1386 /* NCQ paused */
1387 return rv;
1388 }
1389 clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
1194 wake_up_interruptible(&port->svc_wait); 1390 wake_up_interruptible(&port->svc_wait);
1195 1391
1196 return rv; 1392 return rv;
@@ -1240,6 +1436,9 @@ static int mtip_get_identify(struct mtip_port *port, void __user *user_buffer)
1240 int rv = 0; 1436 int rv = 0;
1241 struct host_to_dev_fis fis; 1437 struct host_to_dev_fis fis;
1242 1438
1439 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
1440 return -EFAULT;
1441
1243 /* Build the FIS. */ 1442 /* Build the FIS. */
1244 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1443 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1245 fis.type = 0x27; 1444 fis.type = 0x27;
@@ -1313,6 +1512,7 @@ static int mtip_standby_immediate(struct mtip_port *port)
1313{ 1512{
1314 int rv; 1513 int rv;
1315 struct host_to_dev_fis fis; 1514 struct host_to_dev_fis fis;
1515 unsigned long start;
1316 1516
1317 /* Build the FIS. */ 1517 /* Build the FIS. */
1318 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1518 memset(&fis, 0, sizeof(struct host_to_dev_fis));
@@ -1320,15 +1520,150 @@ static int mtip_standby_immediate(struct mtip_port *port)
1320 fis.opts = 1 << 7; 1520 fis.opts = 1 << 7;
1321 fis.command = ATA_CMD_STANDBYNOW1; 1521 fis.command = ATA_CMD_STANDBYNOW1;
1322 1522
1323 /* Execute the command. Use a 15-second timeout for large drives. */ 1523 start = jiffies;
1324 rv = mtip_exec_internal_command(port, 1524 rv = mtip_exec_internal_command(port,
1325 &fis, 1525 &fis,
1326 5, 1526 5,
1327 0, 1527 0,
1328 0, 1528 0,
1329 0, 1529 0,
1330 GFP_KERNEL, 1530 GFP_ATOMIC,
1331 15000); 1531 15000);
1532 dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
1533 jiffies_to_msecs(jiffies - start));
1534 if (rv)
1535 dev_warn(&port->dd->pdev->dev,
1536 "STANDBY IMMEDIATE command failed.\n");
1537
1538 return rv;
1539}
1540
1541/*
1542 * Issue a READ LOG EXT command to the device.
1543 *
1544 * @port pointer to the port structure.
1545 * @page page number to fetch
1546 * @buffer pointer to buffer
1547 * @buffer_dma dma address corresponding to @buffer
1548 * @sectors page length to fetch, in sectors
1549 *
1550 * return value
1551 * @rv return value from mtip_exec_internal_command()
1552 */
1553static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
1554 dma_addr_t buffer_dma, unsigned int sectors)
1555{
1556 struct host_to_dev_fis fis;
1557
1558 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1559 fis.type = 0x27;
1560 fis.opts = 1 << 7;
1561 fis.command = ATA_CMD_READ_LOG_EXT;
1562 fis.sect_count = sectors & 0xFF;
1563 fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
1564 fis.lba_low = page;
1565 fis.lba_mid = 0;
1566 fis.device = ATA_DEVICE_OBS;
1567
1568 memset(buffer, 0, sectors * ATA_SECT_SIZE);
1569
1570 return mtip_exec_internal_command(port,
1571 &fis,
1572 5,
1573 buffer_dma,
1574 sectors * ATA_SECT_SIZE,
1575 0,
1576 GFP_ATOMIC,
1577 MTIP_INTERNAL_COMMAND_TIMEOUT_MS);
1578}
1579
1580/*
1581 * Issue a SMART READ DATA command to the device.
1582 *
1583 * @port pointer to the port structure.
1584 * @buffer pointer to buffer
1585 * @buffer_dma dma address corresponding to @buffer
1586 *
1587 * return value
1588 * @rv return value from mtip_exec_internal_command()
1589 */
1590static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
1591 dma_addr_t buffer_dma)
1592{
1593 struct host_to_dev_fis fis;
1594
1595 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1596 fis.type = 0x27;
1597 fis.opts = 1 << 7;
1598 fis.command = ATA_CMD_SMART;
1599 fis.features = 0xD0;
1600 fis.sect_count = 1;
1601 fis.lba_mid = 0x4F;
1602 fis.lba_hi = 0xC2;
1603 fis.device = ATA_DEVICE_OBS;
1604
1605 return mtip_exec_internal_command(port,
1606 &fis,
1607 5,
1608 buffer_dma,
1609 ATA_SECT_SIZE,
1610 0,
1611 GFP_ATOMIC,
1612 15000);
1613}
1614
1615/*
1616 * Get the value of a smart attribute
1617 *
1618 * @port pointer to the port structure
1619 * @id attribute number
1620 * @attrib pointer to return attrib information corresponding to @id
1621 *
1622 * return value
1623 * -EINVAL NULL buffer passed or unsupported attribute @id.
1624 * -EPERM Identify data not valid, SMART not supported or not enabled
1625 */
1626static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
1627 struct smart_attr *attrib)
1628{
1629 int rv, i;
1630 struct smart_attr *pattr;
1631
1632 if (!attrib)
1633 return -EINVAL;
1634
1635 if (!port->identify_valid) {
1636 dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
1637 return -EPERM;
1638 }
1639 if (!(port->identify[82] & 0x1)) {
1640 dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
1641 return -EPERM;
1642 }
1643 if (!(port->identify[85] & 0x1)) {
1644 dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
1645 return -EPERM;
1646 }
1647
1648 memset(port->smart_buf, 0, ATA_SECT_SIZE);
1649 rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
1650 if (rv) {
1651 dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
1652 return rv;
1653 }
1654
1655 pattr = (struct smart_attr *)(port->smart_buf + 2);
1656 for (i = 0; i < 29; i++, pattr++)
1657 if (pattr->attr_id == id) {
1658 memcpy(attrib, pattr, sizeof(struct smart_attr));
1659 break;
1660 }
1661
1662 if (i == 29) {
1663 dev_warn(&port->dd->pdev->dev,
1664 "Query for invalid SMART attribute ID\n");
1665 rv = -EINVAL;
1666 }
1332 1667
1333 return rv; 1668 return rv;
1334} 1669}
@@ -1504,10 +1839,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
1504 fis.cyl_hi = command[5]; 1839 fis.cyl_hi = command[5];
1505 fis.device = command[6] & ~0x10; /* Clear the dev bit*/ 1840 fis.device = command[6] & ~0x10; /* Clear the dev bit*/
1506 1841
1507 1842 dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
1508 dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
1509 "nsect %x, sect %x, lcyl %x, "
1510 "hcyl %x, sel %x\n",
1511 __func__, 1843 __func__,
1512 command[0], 1844 command[0],
1513 command[1], 1845 command[1],
@@ -1534,8 +1866,7 @@ static int exec_drive_task(struct mtip_port *port, u8 *command)
1534 command[4] = reply->cyl_low; 1866 command[4] = reply->cyl_low;
1535 command[5] = reply->cyl_hi; 1867 command[5] = reply->cyl_hi;
1536 1868
1537 dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, " 1869 dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
1538 "err %x , cyl_lo %x cyl_hi %x\n",
1539 __func__, 1870 __func__,
1540 command[0], 1871 command[0],
1541 command[1], 1872 command[1],
@@ -1578,7 +1909,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1578 } 1909 }
1579 1910
1580 dbg_printk(MTIP_DRV_NAME 1911 dbg_printk(MTIP_DRV_NAME
1581 "%s: User Command: cmd %x, sect %x, " 1912 " %s: User Command: cmd %x, sect %x, "
1582 "feat %x, sectcnt %x\n", 1913 "feat %x, sectcnt %x\n",
1583 __func__, 1914 __func__,
1584 command[0], 1915 command[0],
@@ -1607,7 +1938,7 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1607 command[2] = command[3]; 1938 command[2] = command[3];
1608 1939
1609 dbg_printk(MTIP_DRV_NAME 1940 dbg_printk(MTIP_DRV_NAME
1610 "%s: Completion Status: stat %x, " 1941 " %s: Completion Status: stat %x, "
1611 "err %x, cmd %x\n", 1942 "err %x, cmd %x\n",
1612 __func__, 1943 __func__,
1613 command[0], 1944 command[0],
@@ -1810,9 +2141,10 @@ static int exec_drive_taskfile(struct driver_data *dd,
1810 } 2141 }
1811 2142
1812 dbg_printk(MTIP_DRV_NAME 2143 dbg_printk(MTIP_DRV_NAME
1813 "taskfile: cmd %x, feat %x, nsect %x," 2144 " %s: cmd %x, feat %x, nsect %x,"
1814 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x," 2145 " sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
1815 " head/dev %x\n", 2146 " head/dev %x\n",
2147 __func__,
1816 fis.command, 2148 fis.command,
1817 fis.features, 2149 fis.features,
1818 fis.sect_count, 2150 fis.sect_count,
@@ -1823,8 +2155,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
1823 2155
1824 switch (fis.command) { 2156 switch (fis.command) {
1825 case ATA_CMD_DOWNLOAD_MICRO: 2157 case ATA_CMD_DOWNLOAD_MICRO:
1826 /* Change timeout for Download Microcode to 60 seconds.*/ 2158 /* Change timeout for Download Microcode to 2 minutes */
1827 timeout = 60000; 2159 timeout = 120000;
1828 break; 2160 break;
1829 case ATA_CMD_SEC_ERASE_UNIT: 2161 case ATA_CMD_SEC_ERASE_UNIT:
1830 /* Change timeout for Security Erase Unit to 4 minutes.*/ 2162 /* Change timeout for Security Erase Unit to 4 minutes.*/
@@ -1840,8 +2172,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
1840 timeout = 10000; 2172 timeout = 10000;
1841 break; 2173 break;
1842 case ATA_CMD_SMART: 2174 case ATA_CMD_SMART:
1843 /* Change timeout for vendor unique command to 10 secs */ 2175 /* Change timeout for vendor unique command to 15 secs */
1844 timeout = 10000; 2176 timeout = 15000;
1845 break; 2177 break;
1846 default: 2178 default:
1847 timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; 2179 timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
@@ -1903,18 +2235,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
1903 req_task->hob_ports[1] = reply->features_ex; 2235 req_task->hob_ports[1] = reply->features_ex;
1904 req_task->hob_ports[2] = reply->sect_cnt_ex; 2236 req_task->hob_ports[2] = reply->sect_cnt_ex;
1905 } 2237 }
1906
1907 /* Com rest after secure erase or lowlevel format */
1908 if (((fis.command == ATA_CMD_SEC_ERASE_UNIT) ||
1909 ((fis.command == 0xFC) &&
1910 (fis.features == 0x27 || fis.features == 0x72 ||
1911 fis.features == 0x62 || fis.features == 0x26))) &&
1912 !(reply->command & 1)) {
1913 mtip_restart_port(dd->port);
1914 }
1915
1916 dbg_printk(MTIP_DRV_NAME 2238 dbg_printk(MTIP_DRV_NAME
1917 "%s: Completion: stat %x," 2239 " %s: Completion: stat %x,"
1918 "err %x, sect_cnt %x, lbalo %x," 2240 "err %x, sect_cnt %x, lbalo %x,"
1919 "lbamid %x, lbahi %x, dev %x\n", 2241 "lbamid %x, lbahi %x, dev %x\n",
1920 __func__, 2242 __func__,
@@ -2080,14 +2402,10 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2080 struct host_to_dev_fis *fis; 2402 struct host_to_dev_fis *fis;
2081 struct mtip_port *port = dd->port; 2403 struct mtip_port *port = dd->port;
2082 struct mtip_cmd *command = &port->commands[tag]; 2404 struct mtip_cmd *command = &port->commands[tag];
2405 int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
2083 2406
2084 /* Map the scatter list for DMA access */ 2407 /* Map the scatter list for DMA access */
2085 if (dir == READ) 2408 nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
2086 nents = dma_map_sg(&dd->pdev->dev, command->sg,
2087 nents, DMA_FROM_DEVICE);
2088 else
2089 nents = dma_map_sg(&dd->pdev->dev, command->sg,
2090 nents, DMA_TO_DEVICE);
2091 2409
2092 command->scatter_ents = nents; 2410 command->scatter_ents = nents;
2093 2411
@@ -2127,7 +2445,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2127 */ 2445 */
2128 command->comp_data = dd; 2446 command->comp_data = dd;
2129 command->comp_func = mtip_async_complete; 2447 command->comp_func = mtip_async_complete;
2130 command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE); 2448 command->direction = dma_dir;
2131 2449
2132 /* 2450 /*
2133 * Set the completion function and data for the command passed 2451 * Set the completion function and data for the command passed
@@ -2140,19 +2458,16 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
2140 * To prevent this command from being issued 2458 * To prevent this command from being issued
2141 * if an internal command is in progress or error handling is active. 2459 * if an internal command is in progress or error handling is active.
2142 */ 2460 */
2143 if (unlikely(test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) || 2461 if (port->flags & MTIP_PF_PAUSE_IO) {
2144 test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags))) {
2145 set_bit(tag, port->cmds_to_issue); 2462 set_bit(tag, port->cmds_to_issue);
2146 set_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags); 2463 set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2147 return; 2464 return;
2148 } 2465 }
2149 2466
2150 /* Issue the command to the hardware */ 2467 /* Issue the command to the hardware */
2151 mtip_issue_ncq_command(port, tag); 2468 mtip_issue_ncq_command(port, tag);
2152 2469
2153 /* Set the command's timeout value.*/ 2470 return;
2154 port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
2155 MTIP_NCQ_COMMAND_TIMEOUT_MS);
2156} 2471}
2157 2472
2158/* 2473/*
@@ -2191,6 +2506,10 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2191 down(&dd->port->cmd_slot); 2506 down(&dd->port->cmd_slot);
2192 *tag = get_slot(dd->port); 2507 *tag = get_slot(dd->port);
2193 2508
2509 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
2510 up(&dd->port->cmd_slot);
2511 return NULL;
2512 }
2194 if (unlikely(*tag < 0)) 2513 if (unlikely(*tag < 0))
2195 return NULL; 2514 return NULL;
2196 2515
@@ -2207,7 +2526,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd,
2207 * return value 2526 * return value
2208 * The size, in bytes, of the data copied into buf. 2527 * The size, in bytes, of the data copied into buf.
2209 */ 2528 */
2210static ssize_t hw_show_registers(struct device *dev, 2529static ssize_t mtip_hw_show_registers(struct device *dev,
2211 struct device_attribute *attr, 2530 struct device_attribute *attr,
2212 char *buf) 2531 char *buf)
2213{ 2532{
@@ -2216,7 +2535,7 @@ static ssize_t hw_show_registers(struct device *dev,
2216 int size = 0; 2535 int size = 0;
2217 int n; 2536 int n;
2218 2537
2219 size += sprintf(&buf[size], "%s:\ns_active:\n", __func__); 2538 size += sprintf(&buf[size], "S ACTive:\n");
2220 2539
2221 for (n = 0; n < dd->slot_groups; n++) 2540 for (n = 0; n < dd->slot_groups; n++)
2222 size += sprintf(&buf[size], "0x%08x\n", 2541 size += sprintf(&buf[size], "0x%08x\n",
@@ -2240,20 +2559,39 @@ static ssize_t hw_show_registers(struct device *dev,
2240 group_allocated); 2559 group_allocated);
2241 } 2560 }
2242 2561
2243 size += sprintf(&buf[size], "completed:\n"); 2562 size += sprintf(&buf[size], "Completed:\n");
2244 2563
2245 for (n = 0; n < dd->slot_groups; n++) 2564 for (n = 0; n < dd->slot_groups; n++)
2246 size += sprintf(&buf[size], "0x%08x\n", 2565 size += sprintf(&buf[size], "0x%08x\n",
2247 readl(dd->port->completed[n])); 2566 readl(dd->port->completed[n]));
2248 2567
2249 size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n", 2568 size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
2250 readl(dd->port->mmio + PORT_IRQ_STAT)); 2569 readl(dd->port->mmio + PORT_IRQ_STAT));
2251 size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n", 2570 size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
2252 readl(dd->mmio + HOST_IRQ_STAT)); 2571 readl(dd->mmio + HOST_IRQ_STAT));
2253 2572
2254 return size; 2573 return size;
2255} 2574}
2256static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL); 2575
2576static ssize_t mtip_hw_show_status(struct device *dev,
2577 struct device_attribute *attr,
2578 char *buf)
2579{
2580 struct driver_data *dd = dev_to_disk(dev)->private_data;
2581 int size = 0;
2582
2583 if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
2584 size += sprintf(buf, "%s", "thermal_shutdown\n");
2585 else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
2586 size += sprintf(buf, "%s", "write_protect\n");
2587 else
2588 size += sprintf(buf, "%s", "online\n");
2589
2590 return size;
2591}
2592
2593static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
2594static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2257 2595
2258/* 2596/*
2259 * Create the sysfs related attributes. 2597 * Create the sysfs related attributes.
@@ -2272,7 +2610,10 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2272 2610
2273 if (sysfs_create_file(kobj, &dev_attr_registers.attr)) 2611 if (sysfs_create_file(kobj, &dev_attr_registers.attr))
2274 dev_warn(&dd->pdev->dev, 2612 dev_warn(&dd->pdev->dev,
2275 "Error creating registers sysfs entry\n"); 2613 "Error creating 'registers' sysfs entry\n");
2614 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2615 dev_warn(&dd->pdev->dev,
2616 "Error creating 'status' sysfs entry\n");
2276 return 0; 2617 return 0;
2277} 2618}
2278 2619
@@ -2292,6 +2633,7 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2292 return -EINVAL; 2633 return -EINVAL;
2293 2634
2294 sysfs_remove_file(kobj, &dev_attr_registers.attr); 2635 sysfs_remove_file(kobj, &dev_attr_registers.attr);
2636 sysfs_remove_file(kobj, &dev_attr_status.attr);
2295 2637
2296 return 0; 2638 return 0;
2297} 2639}
@@ -2384,10 +2726,12 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2384 "FTL rebuild in progress. Polling for completion.\n"); 2726 "FTL rebuild in progress. Polling for completion.\n");
2385 2727
2386 start = jiffies; 2728 start = jiffies;
2387 dd->ftlrebuildflag = 1;
2388 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS); 2729 timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
2389 2730
2390 do { 2731 do {
2732 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2733 &dd->dd_flag)))
2734 return -EFAULT;
2391 if (mtip_check_surprise_removal(dd->pdev)) 2735 if (mtip_check_surprise_removal(dd->pdev))
2392 return -EFAULT; 2736 return -EFAULT;
2393 2737
@@ -2408,22 +2752,17 @@ static int mtip_ftl_rebuild_poll(struct driver_data *dd)
2408 dev_warn(&dd->pdev->dev, 2752 dev_warn(&dd->pdev->dev,
2409 "FTL rebuild complete (%d secs).\n", 2753 "FTL rebuild complete (%d secs).\n",
2410 jiffies_to_msecs(jiffies - start) / 1000); 2754 jiffies_to_msecs(jiffies - start) / 1000);
2411 dd->ftlrebuildflag = 0;
2412 mtip_block_initialize(dd); 2755 mtip_block_initialize(dd);
2413 break; 2756 return 0;
2414 } 2757 }
2415 ssleep(10); 2758 ssleep(10);
2416 } while (time_before(jiffies, timeout)); 2759 } while (time_before(jiffies, timeout));
2417 2760
2418 /* Check for timeout */ 2761 /* Check for timeout */
2419 if (dd->ftlrebuildflag) { 2762 dev_err(&dd->pdev->dev,
2420 dev_err(&dd->pdev->dev,
2421 "Timed out waiting for FTL rebuild to complete (%d secs).\n", 2763 "Timed out waiting for FTL rebuild to complete (%d secs).\n",
2422 jiffies_to_msecs(jiffies - start) / 1000); 2764 jiffies_to_msecs(jiffies - start) / 1000);
2423 return -EFAULT; 2765 return -EFAULT;
2424 }
2425
2426 return 0;
2427} 2766}
2428 2767
2429/* 2768/*
@@ -2448,14 +2787,17 @@ static int mtip_service_thread(void *data)
2448 * is in progress nor error handling is active 2787 * is in progress nor error handling is active
2449 */ 2788 */
2450 wait_event_interruptible(port->svc_wait, (port->flags) && 2789 wait_event_interruptible(port->svc_wait, (port->flags) &&
2451 !test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) && 2790 !(port->flags & MTIP_PF_PAUSE_IO));
2452 !test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags));
2453 2791
2454 if (kthread_should_stop()) 2792 if (kthread_should_stop())
2455 break; 2793 break;
2456 2794
2457 set_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags); 2795 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
2458 if (test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) { 2796 &dd->dd_flag)))
2797 break;
2798
2799 set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2800 if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
2459 slot = 1; 2801 slot = 1;
2460 /* used to restrict the loop to one iteration */ 2802 /* used to restrict the loop to one iteration */
2461 slot_start = num_cmd_slots; 2803 slot_start = num_cmd_slots;
@@ -2480,21 +2822,19 @@ static int mtip_service_thread(void *data)
2480 /* Issue the command to the hardware */ 2822 /* Issue the command to the hardware */
2481 mtip_issue_ncq_command(port, slot); 2823 mtip_issue_ncq_command(port, slot);
2482 2824
2483 /* Set the command's timeout value.*/
2484 port->commands[slot].comp_time = jiffies +
2485 msecs_to_jiffies(MTIP_NCQ_COMMAND_TIMEOUT_MS);
2486
2487 clear_bit(slot, port->cmds_to_issue); 2825 clear_bit(slot, port->cmds_to_issue);
2488 } 2826 }
2489 2827
2490 clear_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags); 2828 clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
2491 } else if (test_bit(MTIP_FLAG_REBUILD_BIT, &port->flags)) { 2829 } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
2492 mtip_ftl_rebuild_poll(dd); 2830 if (!mtip_ftl_rebuild_poll(dd))
2493 clear_bit(MTIP_FLAG_REBUILD_BIT, &port->flags); 2831 set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
2832 &dd->dd_flag);
2833 clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
2494 } 2834 }
2495 clear_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags); 2835 clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
2496 2836
2497 if (test_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &port->flags)) 2837 if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
2498 break; 2838 break;
2499 } 2839 }
2500 return 0; 2840 return 0;
@@ -2513,6 +2853,9 @@ static int mtip_hw_init(struct driver_data *dd)
2513 int i; 2853 int i;
2514 int rv; 2854 int rv;
2515 unsigned int num_command_slots; 2855 unsigned int num_command_slots;
2856 unsigned long timeout, timetaken;
2857 unsigned char *buf;
2858 struct smart_attr attr242;
2516 2859
2517 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR]; 2860 dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
2518 2861
@@ -2547,7 +2890,7 @@ static int mtip_hw_init(struct driver_data *dd)
2547 /* Allocate memory for the command list. */ 2890 /* Allocate memory for the command list. */
2548 dd->port->command_list = 2891 dd->port->command_list =
2549 dmam_alloc_coherent(&dd->pdev->dev, 2892 dmam_alloc_coherent(&dd->pdev->dev,
2550 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2), 2893 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
2551 &dd->port->command_list_dma, 2894 &dd->port->command_list_dma,
2552 GFP_KERNEL); 2895 GFP_KERNEL);
2553 if (!dd->port->command_list) { 2896 if (!dd->port->command_list) {
@@ -2560,7 +2903,7 @@ static int mtip_hw_init(struct driver_data *dd)
2560 /* Clear the memory we have allocated. */ 2903 /* Clear the memory we have allocated. */
2561 memset(dd->port->command_list, 2904 memset(dd->port->command_list,
2562 0, 2905 0,
2563 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2)); 2906 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
2564 2907
2565 /* Setup the addresse of the RX FIS. */ 2908 /* Setup the addresse of the RX FIS. */
2566 dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ; 2909 dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
@@ -2576,10 +2919,19 @@ static int mtip_hw_init(struct driver_data *dd)
2576 dd->port->identify_dma = dd->port->command_tbl_dma + 2919 dd->port->identify_dma = dd->port->command_tbl_dma +
2577 HW_CMD_TBL_AR_SZ; 2920 HW_CMD_TBL_AR_SZ;
2578 2921
2579 /* Setup the address of the sector buffer. */ 2922 /* Setup the address of the sector buffer - for some non-ncq cmds */
2580 dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE; 2923 dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
2581 dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE; 2924 dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
2582 2925
2926 /* Setup the address of the log buf - for read log command */
2927 dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE;
2928 dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
2929
2930 /* Setup the address of the smart buf - for smart read data command */
2931 dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE;
2932 dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
2933
2934
2583 /* Point the command headers at the command tables. */ 2935 /* Point the command headers at the command tables. */
2584 for (i = 0; i < num_command_slots; i++) { 2936 for (i = 0; i < num_command_slots; i++) {
2585 dd->port->commands[i].command_header = 2937 dd->port->commands[i].command_header =
@@ -2623,14 +2975,43 @@ static int mtip_hw_init(struct driver_data *dd)
2623 dd->port->mmio + i*0x80 + PORT_SDBV; 2975 dd->port->mmio + i*0x80 + PORT_SDBV;
2624 } 2976 }
2625 2977
2626 /* Reset the HBA. */ 2978 timetaken = jiffies;
2627 if (mtip_hba_reset(dd) < 0) { 2979 timeout = jiffies + msecs_to_jiffies(30000);
2628 dev_err(&dd->pdev->dev, 2980 while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
2629 "Card did not reset within timeout\n"); 2981 time_before(jiffies, timeout)) {
2630 rv = -EIO; 2982 mdelay(100);
2983 }
2984 if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
2985 timetaken = jiffies - timetaken;
2986 dev_warn(&dd->pdev->dev,
2987 "Surprise removal detected at %u ms\n",
2988 jiffies_to_msecs(timetaken));
2989 rv = -ENODEV;
2990 goto out2 ;
2991 }
2992 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
2993 timetaken = jiffies - timetaken;
2994 dev_warn(&dd->pdev->dev,
2995 "Removal detected at %u ms\n",
2996 jiffies_to_msecs(timetaken));
2997 rv = -EFAULT;
2631 goto out2; 2998 goto out2;
2632 } 2999 }
2633 3000
3001 /* Conditionally reset the HBA. */
3002 if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
3003 if (mtip_hba_reset(dd) < 0) {
3004 dev_err(&dd->pdev->dev,
3005 "Card did not reset within timeout\n");
3006 rv = -EIO;
3007 goto out2;
3008 }
3009 } else {
3010 /* Clear any pending interrupts on the HBA */
3011 writel(readl(dd->mmio + HOST_IRQ_STAT),
3012 dd->mmio + HOST_IRQ_STAT);
3013 }
3014
2634 mtip_init_port(dd->port); 3015 mtip_init_port(dd->port);
2635 mtip_start_port(dd->port); 3016 mtip_start_port(dd->port);
2636 3017
@@ -2660,6 +3041,12 @@ static int mtip_hw_init(struct driver_data *dd)
2660 mod_timer(&dd->port->cmd_timer, 3041 mod_timer(&dd->port->cmd_timer,
2661 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD)); 3042 jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
2662 3043
3044
3045 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
3046 rv = -EFAULT;
3047 goto out3;
3048 }
3049
2663 if (mtip_get_identify(dd->port, NULL) < 0) { 3050 if (mtip_get_identify(dd->port, NULL) < 0) {
2664 rv = -EFAULT; 3051 rv = -EFAULT;
2665 goto out3; 3052 goto out3;
@@ -2667,10 +3054,47 @@ static int mtip_hw_init(struct driver_data *dd)
2667 3054
2668 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) == 3055 if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
2669 MTIP_FTL_REBUILD_MAGIC) { 3056 MTIP_FTL_REBUILD_MAGIC) {
2670 set_bit(MTIP_FLAG_REBUILD_BIT, &dd->port->flags); 3057 set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
2671 return MTIP_FTL_REBUILD_MAGIC; 3058 return MTIP_FTL_REBUILD_MAGIC;
2672 } 3059 }
2673 mtip_dump_identify(dd->port); 3060 mtip_dump_identify(dd->port);
3061
3062 /* check write protect, over temp and rebuild statuses */
3063 rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
3064 dd->port->log_buf,
3065 dd->port->log_buf_dma, 1);
3066 if (rv) {
3067 dev_warn(&dd->pdev->dev,
3068 "Error in READ LOG EXT (10h) command\n");
3069 /* non-critical error, don't fail the load */
3070 } else {
3071 buf = (unsigned char *)dd->port->log_buf;
3072 if (buf[259] & 0x1) {
3073 dev_info(&dd->pdev->dev,
3074 "Write protect bit is set.\n");
3075 set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
3076 }
3077 if (buf[288] == 0xF7) {
3078 dev_info(&dd->pdev->dev,
3079 "Exceeded Tmax, drive in thermal shutdown.\n");
3080 set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
3081 }
3082 if (buf[288] == 0xBF) {
3083 dev_info(&dd->pdev->dev,
3084 "Drive indicates rebuild has failed.\n");
3085 /* TODO */
3086 }
3087 }
3088
3089 /* get write protect progess */
3090 memset(&attr242, 0, sizeof(struct smart_attr));
3091 if (mtip_get_smart_attr(dd->port, 242, &attr242))
3092 dev_warn(&dd->pdev->dev,
3093 "Unable to check write protect progress\n");
3094 else
3095 dev_info(&dd->pdev->dev,
3096 "Write protect progress: %d%% (%d blocks)\n",
3097 attr242.cur, attr242.data);
2674 return rv; 3098 return rv;
2675 3099
2676out3: 3100out3:
@@ -2688,7 +3112,7 @@ out2:
2688 3112
2689 /* Free the command/command header memory. */ 3113 /* Free the command/command header memory. */
2690 dmam_free_coherent(&dd->pdev->dev, 3114 dmam_free_coherent(&dd->pdev->dev,
2691 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2), 3115 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
2692 dd->port->command_list, 3116 dd->port->command_list,
2693 dd->port->command_list_dma); 3117 dd->port->command_list_dma);
2694out1: 3118out1:
@@ -2712,9 +3136,12 @@ static int mtip_hw_exit(struct driver_data *dd)
2712 * Send standby immediate (E0h) to the drive so that it 3136 * Send standby immediate (E0h) to the drive so that it
2713 * saves its state. 3137 * saves its state.
2714 */ 3138 */
2715 if (atomic_read(&dd->drv_cleanup_done) != true) { 3139 if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
2716 3140
2717 mtip_standby_immediate(dd->port); 3141 if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
3142 if (mtip_standby_immediate(dd->port))
3143 dev_warn(&dd->pdev->dev,
3144 "STANDBY IMMEDIATE failed\n");
2718 3145
2719 /* de-initialize the port. */ 3146 /* de-initialize the port. */
2720 mtip_deinit_port(dd->port); 3147 mtip_deinit_port(dd->port);
@@ -2734,7 +3161,7 @@ static int mtip_hw_exit(struct driver_data *dd)
2734 3161
2735 /* Free the command/command header memory. */ 3162 /* Free the command/command header memory. */
2736 dmam_free_coherent(&dd->pdev->dev, 3163 dmam_free_coherent(&dd->pdev->dev,
2737 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2), 3164 HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
2738 dd->port->command_list, 3165 dd->port->command_list,
2739 dd->port->command_list_dma); 3166 dd->port->command_list_dma);
2740 /* Free the memory allocated for the for structure. */ 3167 /* Free the memory allocated for the for structure. */
@@ -2892,6 +3319,9 @@ static int mtip_block_ioctl(struct block_device *dev,
2892 if (!dd) 3319 if (!dd)
2893 return -ENOTTY; 3320 return -ENOTTY;
2894 3321
3322 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3323 return -ENOTTY;
3324
2895 switch (cmd) { 3325 switch (cmd) {
2896 case BLKFLSBUF: 3326 case BLKFLSBUF:
2897 return -ENOTTY; 3327 return -ENOTTY;
@@ -2927,6 +3357,9 @@ static int mtip_block_compat_ioctl(struct block_device *dev,
2927 if (!dd) 3357 if (!dd)
2928 return -ENOTTY; 3358 return -ENOTTY;
2929 3359
3360 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
3361 return -ENOTTY;
3362
2930 switch (cmd) { 3363 switch (cmd) {
2931 case BLKFLSBUF: 3364 case BLKFLSBUF:
2932 return -ENOTTY; 3365 return -ENOTTY;
@@ -3049,6 +3482,24 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3049 int nents = 0; 3482 int nents = 0;
3050 int tag = 0; 3483 int tag = 0;
3051 3484
3485 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
3486 if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
3487 &dd->dd_flag))) {
3488 bio_endio(bio, -ENXIO);
3489 return;
3490 }
3491 if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
3492 bio_endio(bio, -ENODATA);
3493 return;
3494 }
3495 if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
3496 &dd->dd_flag) &&
3497 bio_data_dir(bio))) {
3498 bio_endio(bio, -ENODATA);
3499 return;
3500 }
3501 }
3502
3052 if (unlikely(!bio_has_data(bio))) { 3503 if (unlikely(!bio_has_data(bio))) {
3053 blk_queue_flush(queue, 0); 3504 blk_queue_flush(queue, 0);
3054 bio_endio(bio, 0); 3505 bio_endio(bio, 0);
@@ -3061,7 +3512,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3061 3512
3062 if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) { 3513 if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
3063 dev_warn(&dd->pdev->dev, 3514 dev_warn(&dd->pdev->dev,
3064 "Maximum number of SGL entries exceeded"); 3515 "Maximum number of SGL entries exceeded\n");
3065 bio_io_error(bio); 3516 bio_io_error(bio);
3066 mtip_hw_release_scatterlist(dd, tag); 3517 mtip_hw_release_scatterlist(dd, tag);
3067 return; 3518 return;
@@ -3210,8 +3661,10 @@ skip_create_disk:
3210 kobject_put(kobj); 3661 kobject_put(kobj);
3211 } 3662 }
3212 3663
3213 if (dd->mtip_svc_handler) 3664 if (dd->mtip_svc_handler) {
3665 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3214 return rv; /* service thread created for handling rebuild */ 3666 return rv; /* service thread created for handling rebuild */
3667 }
3215 3668
3216start_service_thread: 3669start_service_thread:
3217 sprintf(thd_name, "mtip_svc_thd_%02d", index); 3670 sprintf(thd_name, "mtip_svc_thd_%02d", index);
@@ -3220,12 +3673,15 @@ start_service_thread:
3220 dd, thd_name); 3673 dd, thd_name);
3221 3674
3222 if (IS_ERR(dd->mtip_svc_handler)) { 3675 if (IS_ERR(dd->mtip_svc_handler)) {
3223 printk(KERN_ERR "mtip32xx: service thread failed to start\n"); 3676 dev_err(&dd->pdev->dev, "service thread failed to start\n");
3224 dd->mtip_svc_handler = NULL; 3677 dd->mtip_svc_handler = NULL;
3225 rv = -EFAULT; 3678 rv = -EFAULT;
3226 goto kthread_run_error; 3679 goto kthread_run_error;
3227 } 3680 }
3228 3681
3682 if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
3683 rv = wait_for_rebuild;
3684
3229 return rv; 3685 return rv;
3230 3686
3231kthread_run_error: 3687kthread_run_error:
@@ -3266,16 +3722,18 @@ static int mtip_block_remove(struct driver_data *dd)
3266 struct kobject *kobj; 3722 struct kobject *kobj;
3267 3723
3268 if (dd->mtip_svc_handler) { 3724 if (dd->mtip_svc_handler) {
3269 set_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &dd->port->flags); 3725 set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
3270 wake_up_interruptible(&dd->port->svc_wait); 3726 wake_up_interruptible(&dd->port->svc_wait);
3271 kthread_stop(dd->mtip_svc_handler); 3727 kthread_stop(dd->mtip_svc_handler);
3272 } 3728 }
3273 3729
3274 /* Clean up the sysfs attributes managed by the protocol layer. */ 3730 /* Clean up the sysfs attributes, if created */
3275 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj); 3731 if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
3276 if (kobj) { 3732 kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
3277 mtip_hw_sysfs_exit(dd, kobj); 3733 if (kobj) {
3278 kobject_put(kobj); 3734 mtip_hw_sysfs_exit(dd, kobj);
3735 kobject_put(kobj);
3736 }
3279 } 3737 }
3280 3738
3281 /* 3739 /*
@@ -3283,6 +3741,11 @@ static int mtip_block_remove(struct driver_data *dd)
3283 * from /dev 3741 * from /dev
3284 */ 3742 */
3285 del_gendisk(dd->disk); 3743 del_gendisk(dd->disk);
3744
3745 spin_lock(&rssd_index_lock);
3746 ida_remove(&rssd_index_ida, dd->index);
3747 spin_unlock(&rssd_index_lock);
3748
3286 blk_cleanup_queue(dd->queue); 3749 blk_cleanup_queue(dd->queue);
3287 dd->disk = NULL; 3750 dd->disk = NULL;
3288 dd->queue = NULL; 3751 dd->queue = NULL;
@@ -3312,6 +3775,11 @@ static int mtip_block_shutdown(struct driver_data *dd)
3312 3775
3313 /* Delete our gendisk structure, and cleanup the blk queue. */ 3776 /* Delete our gendisk structure, and cleanup the blk queue. */
3314 del_gendisk(dd->disk); 3777 del_gendisk(dd->disk);
3778
3779 spin_lock(&rssd_index_lock);
3780 ida_remove(&rssd_index_ida, dd->index);
3781 spin_unlock(&rssd_index_lock);
3782
3315 blk_cleanup_queue(dd->queue); 3783 blk_cleanup_queue(dd->queue);
3316 dd->disk = NULL; 3784 dd->disk = NULL;
3317 dd->queue = NULL; 3785 dd->queue = NULL;
@@ -3359,11 +3827,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
3359 return -ENOMEM; 3827 return -ENOMEM;
3360 } 3828 }
3361 3829
3362 /* Set the atomic variable as 1 in case of SRSI */
3363 atomic_set(&dd->drv_cleanup_done, true);
3364
3365 atomic_set(&dd->resumeflag, false);
3366
3367 /* Attach the private data to this PCI device. */ 3830 /* Attach the private data to this PCI device. */
3368 pci_set_drvdata(pdev, dd); 3831 pci_set_drvdata(pdev, dd);
3369 3832
@@ -3420,7 +3883,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
3420 * instance number. 3883 * instance number.
3421 */ 3884 */
3422 instance++; 3885 instance++;
3423 3886 if (rv != MTIP_FTL_REBUILD_MAGIC)
3887 set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
3424 goto done; 3888 goto done;
3425 3889
3426block_initialize_err: 3890block_initialize_err:
@@ -3434,9 +3898,6 @@ iomap_err:
3434 pci_set_drvdata(pdev, NULL); 3898 pci_set_drvdata(pdev, NULL);
3435 return rv; 3899 return rv;
3436done: 3900done:
3437 /* Set the atomic variable as 0 in case of SRSI */
3438 atomic_set(&dd->drv_cleanup_done, true);
3439
3440 return rv; 3901 return rv;
3441} 3902}
3442 3903
@@ -3452,8 +3913,10 @@ static void mtip_pci_remove(struct pci_dev *pdev)
3452 struct driver_data *dd = pci_get_drvdata(pdev); 3913 struct driver_data *dd = pci_get_drvdata(pdev);
3453 int counter = 0; 3914 int counter = 0;
3454 3915
3916 set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
3917
3455 if (mtip_check_surprise_removal(pdev)) { 3918 if (mtip_check_surprise_removal(pdev)) {
3456 while (atomic_read(&dd->drv_cleanup_done) == false) { 3919 while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
3457 counter++; 3920 counter++;
3458 msleep(20); 3921 msleep(20);
3459 if (counter == 10) { 3922 if (counter == 10) {
@@ -3463,8 +3926,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
3463 } 3926 }
3464 } 3927 }
3465 } 3928 }
3466 /* Set the atomic variable as 1 in case of SRSI */
3467 atomic_set(&dd->drv_cleanup_done, true);
3468 3929
3469 /* Clean up the block layer. */ 3930 /* Clean up the block layer. */
3470 mtip_block_remove(dd); 3931 mtip_block_remove(dd);
@@ -3493,7 +3954,7 @@ static int mtip_pci_suspend(struct pci_dev *pdev, pm_message_t mesg)
3493 return -EFAULT; 3954 return -EFAULT;
3494 } 3955 }
3495 3956
3496 atomic_set(&dd->resumeflag, true); 3957 set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
3497 3958
3498 /* Disable ports & interrupts then send standby immediate */ 3959 /* Disable ports & interrupts then send standby immediate */
3499 rv = mtip_block_suspend(dd); 3960 rv = mtip_block_suspend(dd);
@@ -3559,7 +4020,7 @@ static int mtip_pci_resume(struct pci_dev *pdev)
3559 dev_err(&pdev->dev, "Unable to resume\n"); 4020 dev_err(&pdev->dev, "Unable to resume\n");
3560 4021
3561err: 4022err:
3562 atomic_set(&dd->resumeflag, false); 4023 clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
3563 4024
3564 return rv; 4025 return rv;
3565} 4026}
@@ -3608,18 +4069,25 @@ MODULE_DEVICE_TABLE(pci, mtip_pci_tbl);
3608 */ 4069 */
3609static int __init mtip_init(void) 4070static int __init mtip_init(void)
3610{ 4071{
4072 int error;
4073
3611 printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n"); 4074 printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
3612 4075
3613 /* Allocate a major block device number to use with this driver. */ 4076 /* Allocate a major block device number to use with this driver. */
3614 mtip_major = register_blkdev(0, MTIP_DRV_NAME); 4077 error = register_blkdev(0, MTIP_DRV_NAME);
3615 if (mtip_major < 0) { 4078 if (error <= 0) {
3616 printk(KERN_ERR "Unable to register block device (%d)\n", 4079 printk(KERN_ERR "Unable to register block device (%d)\n",
3617 mtip_major); 4080 error);
3618 return -EBUSY; 4081 return -EBUSY;
3619 } 4082 }
4083 mtip_major = error;
3620 4084
3621 /* Register our PCI operations. */ 4085 /* Register our PCI operations. */
3622 return pci_register_driver(&mtip_pci_driver); 4086 error = pci_register_driver(&mtip_pci_driver);
4087 if (error)
4088 unregister_blkdev(mtip_major, MTIP_DRV_NAME);
4089
4090 return error;
3623} 4091}
3624 4092
3625/* 4093/*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index e0554a8f2233..4ef58336310a 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -34,8 +34,8 @@
34/* offset of Device Control register in PCIe extended capabilites space */ 34/* offset of Device Control register in PCIe extended capabilites space */
35#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48 35#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
36 36
37/* # of times to retry timed out IOs */ 37/* # of times to retry timed out/failed IOs */
38#define MTIP_MAX_RETRIES 5 38#define MTIP_MAX_RETRIES 2
39 39
40/* Various timeout values in ms */ 40/* Various timeout values in ms */
41#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000 41#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
@@ -114,12 +114,41 @@
114#define __force_bit2int (unsigned int __force) 114#define __force_bit2int (unsigned int __force)
115 115
116/* below are bit numbers in 'flags' defined in mtip_port */ 116/* below are bit numbers in 'flags' defined in mtip_port */
117#define MTIP_FLAG_IC_ACTIVE_BIT 0 117#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
118#define MTIP_FLAG_EH_ACTIVE_BIT 1 118#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
119#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2 119#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
120#define MTIP_FLAG_ISSUE_CMDS_BIT 4 120#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
121#define MTIP_FLAG_REBUILD_BIT 5 121#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
122#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8 122 (1 << MTIP_PF_EH_ACTIVE_BIT) | \
123 (1 << MTIP_PF_SE_ACTIVE_BIT) | \
124 (1 << MTIP_PF_DM_ACTIVE_BIT))
125
126#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
127#define MTIP_PF_ISSUE_CMDS_BIT 5
128#define MTIP_PF_REBUILD_BIT 6
129#define MTIP_PF_SVC_THD_STOP_BIT 8
130
131/* below are bit numbers in 'dd_flag' defined in driver_data */
132#define MTIP_DDF_REMOVE_PENDING_BIT 1
133#define MTIP_DDF_OVER_TEMP_BIT 2
134#define MTIP_DDF_WRITE_PROTECT_BIT 3
135#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
136 (1 << MTIP_DDF_OVER_TEMP_BIT) | \
137 (1 << MTIP_DDF_WRITE_PROTECT_BIT))
138
139#define MTIP_DDF_CLEANUP_BIT 5
140#define MTIP_DDF_RESUME_BIT 6
141#define MTIP_DDF_INIT_DONE_BIT 7
142#define MTIP_DDF_REBUILD_FAILED_BIT 8
143
144__packed struct smart_attr{
145 u8 attr_id;
146 u16 flags;
147 u8 cur;
148 u8 worst;
149 u32 data;
150 u8 res[3];
151};
123 152
124/* Register Frame Information Structure (FIS), host to device. */ 153/* Register Frame Information Structure (FIS), host to device. */
125struct host_to_dev_fis { 154struct host_to_dev_fis {
@@ -345,6 +374,12 @@ struct mtip_port {
345 * when the command slot and all associated data structures 374 * when the command slot and all associated data structures
346 * are no longer needed. 375 * are no longer needed.
347 */ 376 */
377 u16 *log_buf;
378 dma_addr_t log_buf_dma;
379
380 u8 *smart_buf;
381 dma_addr_t smart_buf_dma;
382
348 unsigned long allocated[SLOTBITS_IN_LONGS]; 383 unsigned long allocated[SLOTBITS_IN_LONGS];
349 /* 384 /*
350 * used to queue commands when an internal command is in progress 385 * used to queue commands when an internal command is in progress
@@ -368,6 +403,7 @@ struct mtip_port {
368 * Timer used to complete commands that have been active for too long. 403 * Timer used to complete commands that have been active for too long.
369 */ 404 */
370 struct timer_list cmd_timer; 405 struct timer_list cmd_timer;
406 unsigned long ic_pause_timer;
371 /* 407 /*
372 * Semaphore used to block threads if there are no 408 * Semaphore used to block threads if there are no
373 * command slots available. 409 * command slots available.
@@ -404,13 +440,9 @@ struct driver_data {
404 440
405 unsigned slot_groups; /* number of slot groups the product supports */ 441 unsigned slot_groups; /* number of slot groups the product supports */
406 442
407 atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
408
409 unsigned long index; /* Index to determine the disk name */ 443 unsigned long index; /* Index to determine the disk name */
410 444
411 unsigned int ftlrebuildflag; /* FTL rebuild flag */ 445 unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
412
413 atomic_t resumeflag; /* Atomic variable to track suspend/resume */
414 446
415 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ 447 struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
416}; 448};
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c4a60badf252..0e4ef3de9d5d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -351,6 +351,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
351 cap_str_10, cap_str_2); 351 cap_str_10, cap_str_2);
352 352
353 set_capacity(vblk->disk, capacity); 353 set_capacity(vblk->disk, capacity);
354 revalidate_disk(vblk->disk);
354done: 355done:
355 mutex_unlock(&vblk->config_lock); 356 mutex_unlock(&vblk->config_lock);
356} 357}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 0088bf60f368..73f196ca713f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -321,6 +321,7 @@ struct seg_buf {
321static void xen_blkbk_unmap(struct pending_req *req) 321static void xen_blkbk_unmap(struct pending_req *req)
322{ 322{
323 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 323 struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
324 struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
324 unsigned int i, invcount = 0; 325 unsigned int i, invcount = 0;
325 grant_handle_t handle; 326 grant_handle_t handle;
326 int ret; 327 int ret;
@@ -332,25 +333,12 @@ static void xen_blkbk_unmap(struct pending_req *req)
332 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i), 333 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
333 GNTMAP_host_map, handle); 334 GNTMAP_host_map, handle);
334 pending_handle(req, i) = BLKBACK_INVALID_HANDLE; 335 pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
336 pages[invcount] = virt_to_page(vaddr(req, i));
335 invcount++; 337 invcount++;
336 } 338 }
337 339
338 ret = HYPERVISOR_grant_table_op( 340 ret = gnttab_unmap_refs(unmap, pages, invcount, false);
339 GNTTABOP_unmap_grant_ref, unmap, invcount);
340 BUG_ON(ret); 341 BUG_ON(ret);
341 /*
342 * Note, we use invcount, so nr->pages, so we can't index
343 * using vaddr(req, i).
344 */
345 for (i = 0; i < invcount; i++) {
346 ret = m2p_remove_override(
347 virt_to_page(unmap[i].host_addr), false);
348 if (ret) {
349 pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
350 (unsigned long)unmap[i].host_addr);
351 continue;
352 }
353 }
354} 342}
355 343
356static int xen_blkbk_map(struct blkif_request *req, 344static int xen_blkbk_map(struct blkif_request *req,
@@ -378,7 +366,7 @@ static int xen_blkbk_map(struct blkif_request *req,
378 pending_req->blkif->domid); 366 pending_req->blkif->domid);
379 } 367 }
380 368
381 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg); 369 ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
382 BUG_ON(ret); 370 BUG_ON(ret);
383 371
384 /* 372 /*
@@ -398,15 +386,6 @@ static int xen_blkbk_map(struct blkif_request *req,
398 if (ret) 386 if (ret)
399 continue; 387 continue;
400 388
401 ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
402 blkbk->pending_page(pending_req, i), NULL);
403 if (ret) {
404 pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
405 (unsigned long)map[i].dev_bus_addr, ret);
406 /* We could switch over to GNTTABOP_copy */
407 continue;
408 }
409
410 seg[i].buf = map[i].dev_bus_addr | 389 seg[i].buf = map[i].dev_bus_addr |
411 (req->u.rw.seg[i].first_sect << 9); 390 (req->u.rw.seg[i].first_sect << 9);
412 } 391 }
@@ -419,21 +398,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
419 int err = 0; 398 int err = 0;
420 int status = BLKIF_RSP_OKAY; 399 int status = BLKIF_RSP_OKAY;
421 struct block_device *bdev = blkif->vbd.bdev; 400 struct block_device *bdev = blkif->vbd.bdev;
401 unsigned long secure;
422 402
423 blkif->st_ds_req++; 403 blkif->st_ds_req++;
424 404
425 xen_blkif_get(blkif); 405 xen_blkif_get(blkif);
426 if (blkif->blk_backend_type == BLKIF_BACKEND_PHY || 406 secure = (blkif->vbd.discard_secure &&
427 blkif->blk_backend_type == BLKIF_BACKEND_FILE) { 407 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
428 unsigned long secure = (blkif->vbd.discard_secure && 408 BLKDEV_DISCARD_SECURE : 0;
429 (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ? 409
430 BLKDEV_DISCARD_SECURE : 0; 410 err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
431 err = blkdev_issue_discard(bdev, 411 req->u.discard.nr_sectors,
432 req->u.discard.sector_number, 412 GFP_KERNEL, secure);
433 req->u.discard.nr_sectors,
434 GFP_KERNEL, secure);
435 } else
436 err = -EOPNOTSUPP;
437 413
438 if (err == -EOPNOTSUPP) { 414 if (err == -EOPNOTSUPP) {
439 pr_debug(DRV_PFX "discard op failed, not supported\n"); 415 pr_debug(DRV_PFX "discard op failed, not supported\n");
@@ -830,7 +806,7 @@ static int __init xen_blkif_init(void)
830 int i, mmap_pages; 806 int i, mmap_pages;
831 int rc = 0; 807 int rc = 0;
832 808
833 if (!xen_pv_domain()) 809 if (!xen_domain())
834 return -ENODEV; 810 return -ENODEV;
835 811
836 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL); 812 blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index d0ee7edc9be8..773cf27dc23f 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -146,11 +146,6 @@ enum blkif_protocol {
146 BLKIF_PROTOCOL_X86_64 = 3, 146 BLKIF_PROTOCOL_X86_64 = 3,
147}; 147};
148 148
149enum blkif_backend_type {
150 BLKIF_BACKEND_PHY = 1,
151 BLKIF_BACKEND_FILE = 2,
152};
153
154struct xen_vbd { 149struct xen_vbd {
155 /* What the domain refers to this vbd as. */ 150 /* What the domain refers to this vbd as. */
156 blkif_vdev_t handle; 151 blkif_vdev_t handle;
@@ -177,7 +172,6 @@ struct xen_blkif {
177 unsigned int irq; 172 unsigned int irq;
178 /* Comms information. */ 173 /* Comms information. */
179 enum blkif_protocol blk_protocol; 174 enum blkif_protocol blk_protocol;
180 enum blkif_backend_type blk_backend_type;
181 union blkif_back_rings blk_rings; 175 union blkif_back_rings blk_rings;
182 void *blk_ring; 176 void *blk_ring;
183 /* The VBD attached to this interface. */ 177 /* The VBD attached to this interface. */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 24a2fb57e5d0..89860f34a7ec 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -381,72 +381,49 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
381 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache", 381 err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
382 "%d", state); 382 "%d", state);
383 if (err) 383 if (err)
384 xenbus_dev_fatal(dev, err, "writing feature-flush-cache"); 384 dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
385 385
386 return err; 386 return err;
387} 387}
388 388
389int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be) 389static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
390{ 390{
391 struct xenbus_device *dev = be->dev; 391 struct xenbus_device *dev = be->dev;
392 struct xen_blkif *blkif = be->blkif; 392 struct xen_blkif *blkif = be->blkif;
393 char *type;
394 int err; 393 int err;
395 int state = 0; 394 int state = 0;
395 struct block_device *bdev = be->blkif->vbd.bdev;
396 struct request_queue *q = bdev_get_queue(bdev);
396 397
397 type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL); 398 if (blk_queue_discard(q)) {
398 if (!IS_ERR(type)) { 399 err = xenbus_printf(xbt, dev->nodename,
399 if (strncmp(type, "file", 4) == 0) { 400 "discard-granularity", "%u",
400 state = 1; 401 q->limits.discard_granularity);
401 blkif->blk_backend_type = BLKIF_BACKEND_FILE; 402 if (err) {
403 dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
404 return;
402 } 405 }
403 if (strncmp(type, "phy", 3) == 0) { 406 err = xenbus_printf(xbt, dev->nodename,
404 struct block_device *bdev = be->blkif->vbd.bdev; 407 "discard-alignment", "%u",
405 struct request_queue *q = bdev_get_queue(bdev); 408 q->limits.discard_alignment);
406 if (blk_queue_discard(q)) { 409 if (err) {
407 err = xenbus_printf(xbt, dev->nodename, 410 dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
408 "discard-granularity", "%u", 411 return;
409 q->limits.discard_granularity); 412 }
410 if (err) { 413 state = 1;
411 xenbus_dev_fatal(dev, err, 414 /* Optional. */
412 "writing discard-granularity"); 415 err = xenbus_printf(xbt, dev->nodename,
413 goto kfree; 416 "discard-secure", "%d",
414 } 417 blkif->vbd.discard_secure);
415 err = xenbus_printf(xbt, dev->nodename, 418 if (err) {
416 "discard-alignment", "%u", 419 dev_warn(dev-dev, "writing discard-secure (%d)", err);
417 q->limits.discard_alignment); 420 return;
418 if (err) {
419 xenbus_dev_fatal(dev, err,
420 "writing discard-alignment");
421 goto kfree;
422 }
423 state = 1;
424 blkif->blk_backend_type = BLKIF_BACKEND_PHY;
425 }
426 /* Optional. */
427 err = xenbus_printf(xbt, dev->nodename,
428 "discard-secure", "%d",
429 blkif->vbd.discard_secure);
430 if (err) {
431 xenbus_dev_fatal(dev, err,
432 "writting discard-secure");
433 goto kfree;
434 }
435 } 421 }
436 } else {
437 err = PTR_ERR(type);
438 xenbus_dev_fatal(dev, err, "reading type");
439 goto out;
440 } 422 }
441
442 err = xenbus_printf(xbt, dev->nodename, "feature-discard", 423 err = xenbus_printf(xbt, dev->nodename, "feature-discard",
443 "%d", state); 424 "%d", state);
444 if (err) 425 if (err)
445 xenbus_dev_fatal(dev, err, "writing feature-discard"); 426 dev_warn(&dev->dev, "writing feature-discard (%d)", err);
446kfree:
447 kfree(type);
448out:
449 return err;
450} 427}
451int xen_blkbk_barrier(struct xenbus_transaction xbt, 428int xen_blkbk_barrier(struct xenbus_transaction xbt,
452 struct backend_info *be, int state) 429 struct backend_info *be, int state)
@@ -457,7 +434,7 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
457 err = xenbus_printf(xbt, dev->nodename, "feature-barrier", 434 err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
458 "%d", state); 435 "%d", state);
459 if (err) 436 if (err)
460 xenbus_dev_fatal(dev, err, "writing feature-barrier"); 437 dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
461 438
462 return err; 439 return err;
463} 440}
@@ -689,14 +666,12 @@ again:
689 return; 666 return;
690 } 667 }
691 668
692 err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support); 669 /* If we can't advertise it is OK. */
693 if (err) 670 xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
694 goto abort;
695 671
696 err = xen_blkbk_discard(xbt, be); 672 xen_blkbk_discard(xbt, be);
697 673
698 /* If we can't advertise it is OK. */ 674 xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
699 err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
700 675
701 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu", 676 err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
702 (unsigned long long)vbd_sz(&be->blkif->vbd)); 677 (unsigned long long)vbd_sz(&be->blkif->vbd));
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 98cbeba8cd53..4e86393a09cf 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -43,6 +43,7 @@
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/scatterlist.h> 45#include <linux/scatterlist.h>
46#include <linux/bitmap.h>
46 47
47#include <xen/xen.h> 48#include <xen/xen.h>
48#include <xen/xenbus.h> 49#include <xen/xenbus.h>
@@ -81,6 +82,7 @@ static const struct block_device_operations xlvbd_block_fops;
81 */ 82 */
82struct blkfront_info 83struct blkfront_info
83{ 84{
85 spinlock_t io_lock;
84 struct mutex mutex; 86 struct mutex mutex;
85 struct xenbus_device *xbdev; 87 struct xenbus_device *xbdev;
86 struct gendisk *gd; 88 struct gendisk *gd;
@@ -105,8 +107,6 @@ struct blkfront_info
105 int is_ready; 107 int is_ready;
106}; 108};
107 109
108static DEFINE_SPINLOCK(blkif_io_lock);
109
110static unsigned int nr_minors; 110static unsigned int nr_minors;
111static unsigned long *minors; 111static unsigned long *minors;
112static DEFINE_SPINLOCK(minor_lock); 112static DEFINE_SPINLOCK(minor_lock);
@@ -177,8 +177,7 @@ static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
177 177
178 spin_lock(&minor_lock); 178 spin_lock(&minor_lock);
179 if (find_next_bit(minors, end, minor) >= end) { 179 if (find_next_bit(minors, end, minor) >= end) {
180 for (; minor < end; ++minor) 180 bitmap_set(minors, minor, nr);
181 __set_bit(minor, minors);
182 rc = 0; 181 rc = 0;
183 } else 182 } else
184 rc = -EBUSY; 183 rc = -EBUSY;
@@ -193,8 +192,7 @@ static void xlbd_release_minors(unsigned int minor, unsigned int nr)
193 192
194 BUG_ON(end > nr_minors); 193 BUG_ON(end > nr_minors);
195 spin_lock(&minor_lock); 194 spin_lock(&minor_lock);
196 for (; minor < end; ++minor) 195 bitmap_clear(minors, minor, nr);
197 __clear_bit(minor, minors);
198 spin_unlock(&minor_lock); 196 spin_unlock(&minor_lock);
199} 197}
200 198
@@ -419,7 +417,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
419 struct request_queue *rq; 417 struct request_queue *rq;
420 struct blkfront_info *info = gd->private_data; 418 struct blkfront_info *info = gd->private_data;
421 419
422 rq = blk_init_queue(do_blkif_request, &blkif_io_lock); 420 rq = blk_init_queue(do_blkif_request, &info->io_lock);
423 if (rq == NULL) 421 if (rq == NULL)
424 return -1; 422 return -1;
425 423
@@ -636,14 +634,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
636 if (info->rq == NULL) 634 if (info->rq == NULL)
637 return; 635 return;
638 636
639 spin_lock_irqsave(&blkif_io_lock, flags); 637 spin_lock_irqsave(&info->io_lock, flags);
640 638
641 /* No more blkif_request(). */ 639 /* No more blkif_request(). */
642 blk_stop_queue(info->rq); 640 blk_stop_queue(info->rq);
643 641
644 /* No more gnttab callback work. */ 642 /* No more gnttab callback work. */
645 gnttab_cancel_free_callback(&info->callback); 643 gnttab_cancel_free_callback(&info->callback);
646 spin_unlock_irqrestore(&blkif_io_lock, flags); 644 spin_unlock_irqrestore(&info->io_lock, flags);
647 645
648 /* Flush gnttab callback work. Must be done with no locks held. */ 646 /* Flush gnttab callback work. Must be done with no locks held. */
649 flush_work_sync(&info->work); 647 flush_work_sync(&info->work);
@@ -675,16 +673,16 @@ static void blkif_restart_queue(struct work_struct *work)
675{ 673{
676 struct blkfront_info *info = container_of(work, struct blkfront_info, work); 674 struct blkfront_info *info = container_of(work, struct blkfront_info, work);
677 675
678 spin_lock_irq(&blkif_io_lock); 676 spin_lock_irq(&info->io_lock);
679 if (info->connected == BLKIF_STATE_CONNECTED) 677 if (info->connected == BLKIF_STATE_CONNECTED)
680 kick_pending_request_queues(info); 678 kick_pending_request_queues(info);
681 spin_unlock_irq(&blkif_io_lock); 679 spin_unlock_irq(&info->io_lock);
682} 680}
683 681
684static void blkif_free(struct blkfront_info *info, int suspend) 682static void blkif_free(struct blkfront_info *info, int suspend)
685{ 683{
686 /* Prevent new requests being issued until we fix things up. */ 684 /* Prevent new requests being issued until we fix things up. */
687 spin_lock_irq(&blkif_io_lock); 685 spin_lock_irq(&info->io_lock);
688 info->connected = suspend ? 686 info->connected = suspend ?
689 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED; 687 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
690 /* No more blkif_request(). */ 688 /* No more blkif_request(). */
@@ -692,7 +690,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
692 blk_stop_queue(info->rq); 690 blk_stop_queue(info->rq);
693 /* No more gnttab callback work. */ 691 /* No more gnttab callback work. */
694 gnttab_cancel_free_callback(&info->callback); 692 gnttab_cancel_free_callback(&info->callback);
695 spin_unlock_irq(&blkif_io_lock); 693 spin_unlock_irq(&info->io_lock);
696 694
697 /* Flush gnttab callback work. Must be done with no locks held. */ 695 /* Flush gnttab callback work. Must be done with no locks held. */
698 flush_work_sync(&info->work); 696 flush_work_sync(&info->work);
@@ -728,10 +726,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
728 struct blkfront_info *info = (struct blkfront_info *)dev_id; 726 struct blkfront_info *info = (struct blkfront_info *)dev_id;
729 int error; 727 int error;
730 728
731 spin_lock_irqsave(&blkif_io_lock, flags); 729 spin_lock_irqsave(&info->io_lock, flags);
732 730
733 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) { 731 if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
734 spin_unlock_irqrestore(&blkif_io_lock, flags); 732 spin_unlock_irqrestore(&info->io_lock, flags);
735 return IRQ_HANDLED; 733 return IRQ_HANDLED;
736 } 734 }
737 735
@@ -816,7 +814,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
816 814
817 kick_pending_request_queues(info); 815 kick_pending_request_queues(info);
818 816
819 spin_unlock_irqrestore(&blkif_io_lock, flags); 817 spin_unlock_irqrestore(&info->io_lock, flags);
820 818
821 return IRQ_HANDLED; 819 return IRQ_HANDLED;
822} 820}
@@ -991,6 +989,7 @@ static int blkfront_probe(struct xenbus_device *dev,
991 } 989 }
992 990
993 mutex_init(&info->mutex); 991 mutex_init(&info->mutex);
992 spin_lock_init(&info->io_lock);
994 info->xbdev = dev; 993 info->xbdev = dev;
995 info->vdevice = vdevice; 994 info->vdevice = vdevice;
996 info->connected = BLKIF_STATE_DISCONNECTED; 995 info->connected = BLKIF_STATE_DISCONNECTED;
@@ -1068,7 +1067,7 @@ static int blkif_recover(struct blkfront_info *info)
1068 1067
1069 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1068 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1070 1069
1071 spin_lock_irq(&blkif_io_lock); 1070 spin_lock_irq(&info->io_lock);
1072 1071
1073 /* Now safe for us to use the shared ring */ 1072 /* Now safe for us to use the shared ring */
1074 info->connected = BLKIF_STATE_CONNECTED; 1073 info->connected = BLKIF_STATE_CONNECTED;
@@ -1079,7 +1078,7 @@ static int blkif_recover(struct blkfront_info *info)
1079 /* Kick any other new requests queued since we resumed */ 1078 /* Kick any other new requests queued since we resumed */
1080 kick_pending_request_queues(info); 1079 kick_pending_request_queues(info);
1081 1080
1082 spin_unlock_irq(&blkif_io_lock); 1081 spin_unlock_irq(&info->io_lock);
1083 1082
1084 return 0; 1083 return 0;
1085} 1084}
@@ -1277,10 +1276,10 @@ static void blkfront_connect(struct blkfront_info *info)
1277 xenbus_switch_state(info->xbdev, XenbusStateConnected); 1276 xenbus_switch_state(info->xbdev, XenbusStateConnected);
1278 1277
1279 /* Kick pending requests. */ 1278 /* Kick pending requests. */
1280 spin_lock_irq(&blkif_io_lock); 1279 spin_lock_irq(&info->io_lock);
1281 info->connected = BLKIF_STATE_CONNECTED; 1280 info->connected = BLKIF_STATE_CONNECTED;
1282 kick_pending_request_queues(info); 1281 kick_pending_request_queues(info);
1283 spin_unlock_irq(&blkif_io_lock); 1282 spin_unlock_irq(&info->io_lock);
1284 1283
1285 add_disk(info->gd); 1284 add_disk(info->gd);
1286 1285
@@ -1410,7 +1409,6 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
1410 mutex_lock(&blkfront_mutex); 1409 mutex_lock(&blkfront_mutex);
1411 1410
1412 bdev = bdget_disk(disk, 0); 1411 bdev = bdget_disk(disk, 0);
1413 bdput(bdev);
1414 1412
1415 if (bdev->bd_openers) 1413 if (bdev->bd_openers)
1416 goto out; 1414 goto out;
@@ -1441,6 +1439,7 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
1441 } 1439 }
1442 1440
1443out: 1441out:
1442 bdput(bdev);
1444 mutex_unlock(&blkfront_mutex); 1443 mutex_unlock(&blkfront_mutex);
1445 return 0; 1444 return 0;
1446} 1445}
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 48442476ec00..ae9edca7b56d 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -72,7 +72,9 @@ static struct usb_device_id ath3k_table[] = {
72 72
73 /* Atheros AR3012 with sflash firmware*/ 73 /* Atheros AR3012 with sflash firmware*/
74 { USB_DEVICE(0x0CF3, 0x3004) }, 74 { USB_DEVICE(0x0CF3, 0x3004) },
75 { USB_DEVICE(0x0CF3, 0x311D) },
75 { USB_DEVICE(0x13d3, 0x3375) }, 76 { USB_DEVICE(0x13d3, 0x3375) },
77 { USB_DEVICE(0x04CA, 0x3005) },
76 78
77 /* Atheros AR5BBU12 with sflash firmware */ 79 /* Atheros AR5BBU12 with sflash firmware */
78 { USB_DEVICE(0x0489, 0xE02C) }, 80 { USB_DEVICE(0x0489, 0xE02C) },
@@ -89,7 +91,9 @@ static struct usb_device_id ath3k_blist_tbl[] = {
89 91
90 /* Atheros AR3012 with sflash firmware*/ 92 /* Atheros AR3012 with sflash firmware*/
91 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 93 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
94 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
92 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 95 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
96 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
93 97
94 { } /* Terminating entry */ 98 { } /* Terminating entry */
95}; 99};
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 480cad920048..3311b812a0c6 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -61,7 +61,7 @@ static struct usb_device_id btusb_table[] = {
61 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, 61 { USB_DEVICE_INFO(0xe0, 0x01, 0x01) },
62 62
63 /* Broadcom SoftSailing reporting vendor specific */ 63 /* Broadcom SoftSailing reporting vendor specific */
64 { USB_DEVICE(0x05ac, 0x21e1) }, 64 { USB_DEVICE(0x0a5c, 0x21e1) },
65 65
66 /* Apple MacBookPro 7,1 */ 66 /* Apple MacBookPro 7,1 */
67 { USB_DEVICE(0x05ac, 0x8213) }, 67 { USB_DEVICE(0x05ac, 0x8213) },
@@ -103,6 +103,7 @@ static struct usb_device_id btusb_table[] = {
103 /* Broadcom BCM20702A0 */ 103 /* Broadcom BCM20702A0 */
104 { USB_DEVICE(0x0a5c, 0x21e3) }, 104 { USB_DEVICE(0x0a5c, 0x21e3) },
105 { USB_DEVICE(0x0a5c, 0x21e6) }, 105 { USB_DEVICE(0x0a5c, 0x21e6) },
106 { USB_DEVICE(0x0a5c, 0x21e8) },
106 { USB_DEVICE(0x0a5c, 0x21f3) }, 107 { USB_DEVICE(0x0a5c, 0x21f3) },
107 { USB_DEVICE(0x413c, 0x8197) }, 108 { USB_DEVICE(0x413c, 0x8197) },
108 109
@@ -129,7 +130,9 @@ static struct usb_device_id blacklist_table[] = {
129 130
130 /* Atheros 3012 with sflash firmware */ 131 /* Atheros 3012 with sflash firmware */
131 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 132 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
133 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
132 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, 134 { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
135 { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 },
133 136
134 /* Atheros AR5BBU12 with sflash firmware */ 137 /* Atheros AR5BBU12 with sflash firmware */
135 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, 138 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index fd5adb408f44..98a8c05d4f23 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -299,11 +299,11 @@ static void hci_uart_tty_close(struct tty_struct *tty)
299 hci_uart_close(hdev); 299 hci_uart_close(hdev);
300 300
301 if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) { 301 if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
302 hu->proto->close(hu);
303 if (hdev) { 302 if (hdev) {
304 hci_unregister_dev(hdev); 303 hci_unregister_dev(hdev);
305 hci_free_dev(hdev); 304 hci_free_dev(hdev);
306 } 305 }
306 hu->proto->close(hu);
307 } 307 }
308 308
309 kfree(hu); 309 kfree(hu);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 3845ab44c330..dfd7876f127c 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -906,8 +906,8 @@ int hpet_alloc(struct hpet_data *hdp)
906 hpetp->hp_which, hdp->hd_phys_address, 906 hpetp->hp_which, hdp->hd_phys_address,
907 hpetp->hp_ntimer > 1 ? "s" : ""); 907 hpetp->hp_ntimer > 1 ? "s" : "");
908 for (i = 0; i < hpetp->hp_ntimer; i++) 908 for (i = 0; i < hpetp->hp_ntimer; i++)
909 printk("%s %d", i > 0 ? "," : "", hdp->hd_irq[i]); 909 printk(KERN_CONT "%s %d", i > 0 ? "," : "", hdp->hd_irq[i]);
910 printk("\n"); 910 printk(KERN_CONT "\n");
911 911
912 temp = hpetp->hp_tick_freq; 912 temp = hpetp->hp_tick_freq;
913 remainder = do_div(temp, 1000000); 913 remainder = do_div(temp, 1000000);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 54ca8b23cde3..4ec04a754733 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1260,10 +1260,15 @@ static int proc_do_uuid(ctl_table *table, int write,
1260 uuid = table->data; 1260 uuid = table->data;
1261 if (!uuid) { 1261 if (!uuid) {
1262 uuid = tmp_uuid; 1262 uuid = tmp_uuid;
1263 uuid[8] = 0;
1264 }
1265 if (uuid[8] == 0)
1266 generate_random_uuid(uuid); 1263 generate_random_uuid(uuid);
1264 } else {
1265 static DEFINE_SPINLOCK(bootid_spinlock);
1266
1267 spin_lock(&bootid_spinlock);
1268 if (!uuid[8])
1269 generate_random_uuid(uuid);
1270 spin_unlock(&bootid_spinlock);
1271 }
1267 1272
1268 sprintf(buf, "%pU", uuid); 1273 sprintf(buf, "%pU", uuid);
1269 1274
diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c
index 82e882028fcf..6b5cf02c35c8 100644
--- a/drivers/clocksource/acpi_pm.c
+++ b/drivers/clocksource/acpi_pm.c
@@ -23,7 +23,6 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/async.h>
27#include <asm/io.h> 26#include <asm/io.h>
28 27
29/* 28/*
@@ -180,15 +179,17 @@ static int verify_pmtmr_rate(void)
180/* Number of reads we try to get two different values */ 179/* Number of reads we try to get two different values */
181#define ACPI_PM_READ_CHECKS 10000 180#define ACPI_PM_READ_CHECKS 10000
182 181
183static void __init acpi_pm_clocksource_async(void *unused, async_cookie_t cookie) 182static int __init init_acpi_pm_clocksource(void)
184{ 183{
185 cycle_t value1, value2; 184 cycle_t value1, value2;
186 unsigned int i, j = 0; 185 unsigned int i, j = 0;
187 186
187 if (!pmtmr_ioport)
188 return -ENODEV;
188 189
189 /* "verify" this timing source: */ 190 /* "verify" this timing source: */
190 for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) { 191 for (j = 0; j < ACPI_PM_MONOTONICITY_CHECKS; j++) {
191 usleep_range(100 * j, 100 * j + 100); 192 udelay(100 * j);
192 value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm); 193 value1 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
193 for (i = 0; i < ACPI_PM_READ_CHECKS; i++) { 194 for (i = 0; i < ACPI_PM_READ_CHECKS; i++) {
194 value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm); 195 value2 = clocksource_acpi_pm.read(&clocksource_acpi_pm);
@@ -202,34 +203,25 @@ static void __init acpi_pm_clocksource_async(void *unused, async_cookie_t cookie
202 " 0x%#llx, 0x%#llx - aborting.\n", 203 " 0x%#llx, 0x%#llx - aborting.\n",
203 value1, value2); 204 value1, value2);
204 pmtmr_ioport = 0; 205 pmtmr_ioport = 0;
205 return; 206 return -EINVAL;
206 } 207 }
207 if (i == ACPI_PM_READ_CHECKS) { 208 if (i == ACPI_PM_READ_CHECKS) {
208 printk(KERN_INFO "PM-Timer failed consistency check " 209 printk(KERN_INFO "PM-Timer failed consistency check "
209 " (0x%#llx) - aborting.\n", value1); 210 " (0x%#llx) - aborting.\n", value1);
210 pmtmr_ioport = 0; 211 pmtmr_ioport = 0;
211 return; 212 return -ENODEV;
212 } 213 }
213 } 214 }
214 215
215 if (verify_pmtmr_rate() != 0){ 216 if (verify_pmtmr_rate() != 0){
216 pmtmr_ioport = 0; 217 pmtmr_ioport = 0;
217 return; 218 return -ENODEV;
218 } 219 }
219 220
220 clocksource_register_hz(&clocksource_acpi_pm, 221 return clocksource_register_hz(&clocksource_acpi_pm,
221 PMTMR_TICKS_PER_SEC); 222 PMTMR_TICKS_PER_SEC);
222} 223}
223 224
224static int __init init_acpi_pm_clocksource(void)
225{
226 if (!pmtmr_ioport)
227 return -ENODEV;
228
229 async_schedule(acpi_pm_clocksource_async, NULL);
230 return 0;
231}
232
233/* We use fs_initcall because we want the PCI fixups to have run 225/* We use fs_initcall because we want the PCI fixups to have run
234 * but we still need to load before device_initcall 226 * but we still need to load before device_initcall
235 */ 227 */
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ffbb44685915..5961e6415f08 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -4,6 +4,7 @@
4 4
5config ARM_OMAP2PLUS_CPUFREQ 5config ARM_OMAP2PLUS_CPUFREQ
6 bool "TI OMAP2+" 6 bool "TI OMAP2+"
7 depends on ARCH_OMAP2PLUS
7 default ARCH_OMAP2PLUS 8 default ARCH_OMAP2PLUS
8 select CPU_FREQ_TABLE 9 select CPU_FREQ_TABLE
9 10
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 767bcc31b365..2397f6f451b1 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -332,6 +332,20 @@ struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
332} 332}
333EXPORT_SYMBOL(dma_find_channel); 333EXPORT_SYMBOL(dma_find_channel);
334 334
335/*
336 * net_dma_find_channel - find a channel for net_dma
337 * net_dma has alignment requirements
338 */
339struct dma_chan *net_dma_find_channel(void)
340{
341 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
342 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
343 return NULL;
344
345 return chan;
346}
347EXPORT_SYMBOL(net_dma_find_channel);
348
335/** 349/**
336 * dma_issue_pending_all - flush all pending operations across all channels 350 * dma_issue_pending_all - flush all pending operations across all channels
337 */ 351 */
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 31493d80e0e9..73b2b65cb1de 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -546,9 +546,9 @@ void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
546 PCI_DMA_TODEVICE, flags, 0); 546 PCI_DMA_TODEVICE, flags, 0);
547} 547}
548 548
549unsigned long ioat_get_current_completion(struct ioat_chan_common *chan) 549dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan)
550{ 550{
551 unsigned long phys_complete; 551 dma_addr_t phys_complete;
552 u64 completion; 552 u64 completion;
553 553
554 completion = *chan->completion; 554 completion = *chan->completion;
@@ -569,7 +569,7 @@ unsigned long ioat_get_current_completion(struct ioat_chan_common *chan)
569} 569}
570 570
571bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 571bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
572 unsigned long *phys_complete) 572 dma_addr_t *phys_complete)
573{ 573{
574 *phys_complete = ioat_get_current_completion(chan); 574 *phys_complete = ioat_get_current_completion(chan);
575 if (*phys_complete == chan->last_completion) 575 if (*phys_complete == chan->last_completion)
@@ -580,14 +580,14 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
580 return true; 580 return true;
581} 581}
582 582
583static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete) 583static void __cleanup(struct ioat_dma_chan *ioat, dma_addr_t phys_complete)
584{ 584{
585 struct ioat_chan_common *chan = &ioat->base; 585 struct ioat_chan_common *chan = &ioat->base;
586 struct list_head *_desc, *n; 586 struct list_head *_desc, *n;
587 struct dma_async_tx_descriptor *tx; 587 struct dma_async_tx_descriptor *tx;
588 588
589 dev_dbg(to_dev(chan), "%s: phys_complete: %lx\n", 589 dev_dbg(to_dev(chan), "%s: phys_complete: %llx\n",
590 __func__, phys_complete); 590 __func__, (unsigned long long) phys_complete);
591 list_for_each_safe(_desc, n, &ioat->used_desc) { 591 list_for_each_safe(_desc, n, &ioat->used_desc) {
592 struct ioat_desc_sw *desc; 592 struct ioat_desc_sw *desc;
593 593
@@ -652,7 +652,7 @@ static void __cleanup(struct ioat_dma_chan *ioat, unsigned long phys_complete)
652static void ioat1_cleanup(struct ioat_dma_chan *ioat) 652static void ioat1_cleanup(struct ioat_dma_chan *ioat)
653{ 653{
654 struct ioat_chan_common *chan = &ioat->base; 654 struct ioat_chan_common *chan = &ioat->base;
655 unsigned long phys_complete; 655 dma_addr_t phys_complete;
656 656
657 prefetch(chan->completion); 657 prefetch(chan->completion);
658 658
@@ -698,7 +698,7 @@ static void ioat1_timer_event(unsigned long data)
698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 698 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
699 spin_unlock_bh(&ioat->desc_lock); 699 spin_unlock_bh(&ioat->desc_lock);
700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 700 } else if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
701 unsigned long phys_complete; 701 dma_addr_t phys_complete;
702 702
703 spin_lock_bh(&ioat->desc_lock); 703 spin_lock_bh(&ioat->desc_lock);
704 /* if we haven't made progress and we have already 704 /* if we haven't made progress and we have already
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index c7888bccd974..5e8fe01ba69d 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -88,7 +88,7 @@ struct ioatdma_device {
88struct ioat_chan_common { 88struct ioat_chan_common {
89 struct dma_chan common; 89 struct dma_chan common;
90 void __iomem *reg_base; 90 void __iomem *reg_base;
91 unsigned long last_completion; 91 dma_addr_t last_completion;
92 spinlock_t cleanup_lock; 92 spinlock_t cleanup_lock;
93 unsigned long state; 93 unsigned long state;
94 #define IOAT_COMPLETION_PENDING 0 94 #define IOAT_COMPLETION_PENDING 0
@@ -310,7 +310,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device);
310void __devexit ioat_dma_remove(struct ioatdma_device *device); 310void __devexit ioat_dma_remove(struct ioatdma_device *device);
311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, 311struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
312 void __iomem *iobase); 312 void __iomem *iobase);
313unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 313dma_addr_t ioat_get_current_completion(struct ioat_chan_common *chan);
314void ioat_init_channel(struct ioatdma_device *device, 314void ioat_init_channel(struct ioatdma_device *device,
315 struct ioat_chan_common *chan, int idx); 315 struct ioat_chan_common *chan, int idx);
316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 316enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
@@ -318,7 +318,7 @@ enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 318void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
319 size_t len, struct ioat_dma_descriptor *hw); 319 size_t len, struct ioat_dma_descriptor *hw);
320bool ioat_cleanup_preamble(struct ioat_chan_common *chan, 320bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
321 unsigned long *phys_complete); 321 dma_addr_t *phys_complete);
322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type); 322void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
323void ioat_kobject_del(struct ioatdma_device *device); 323void ioat_kobject_del(struct ioatdma_device *device);
324extern const struct sysfs_ops ioat_sysfs_ops; 324extern const struct sysfs_ops ioat_sysfs_ops;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index e8e110ff3d96..86895760b598 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -128,7 +128,7 @@ static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
128 spin_unlock_bh(&ioat->prep_lock); 128 spin_unlock_bh(&ioat->prep_lock);
129} 129}
130 130
131static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 131static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
132{ 132{
133 struct ioat_chan_common *chan = &ioat->base; 133 struct ioat_chan_common *chan = &ioat->base;
134 struct dma_async_tx_descriptor *tx; 134 struct dma_async_tx_descriptor *tx;
@@ -179,7 +179,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
179static void ioat2_cleanup(struct ioat2_dma_chan *ioat) 179static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
180{ 180{
181 struct ioat_chan_common *chan = &ioat->base; 181 struct ioat_chan_common *chan = &ioat->base;
182 unsigned long phys_complete; 182 dma_addr_t phys_complete;
183 183
184 spin_lock_bh(&chan->cleanup_lock); 184 spin_lock_bh(&chan->cleanup_lock);
185 if (ioat_cleanup_preamble(chan, &phys_complete)) 185 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -260,7 +260,7 @@ int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat) 260static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 unsigned long phys_complete; 263 dma_addr_t phys_complete;
264 264
265 ioat2_quiesce(chan, 0); 265 ioat2_quiesce(chan, 0);
266 if (ioat_cleanup_preamble(chan, &phys_complete)) 266 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -275,7 +275,7 @@ void ioat2_timer_event(unsigned long data)
275 struct ioat_chan_common *chan = &ioat->base; 275 struct ioat_chan_common *chan = &ioat->base;
276 276
277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 277 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
278 unsigned long phys_complete; 278 dma_addr_t phys_complete;
279 u64 status; 279 u64 status;
280 280
281 status = ioat_chansts(chan); 281 status = ioat_chansts(chan);
@@ -572,9 +572,9 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
572 */ 572 */
573 struct ioat_chan_common *chan = &ioat->base; 573 struct ioat_chan_common *chan = &ioat->base;
574 struct dma_chan *c = &chan->common; 574 struct dma_chan *c = &chan->common;
575 const u16 curr_size = ioat2_ring_size(ioat); 575 const u32 curr_size = ioat2_ring_size(ioat);
576 const u16 active = ioat2_ring_active(ioat); 576 const u16 active = ioat2_ring_active(ioat);
577 const u16 new_size = 1 << order; 577 const u32 new_size = 1 << order;
578 struct ioat_ring_ent **ring; 578 struct ioat_ring_ent **ring;
579 u16 i; 579 u16 i;
580 580
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index a2c413b2b8d8..be2a55b95c23 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -74,7 +74,7 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c)
74 return container_of(chan, struct ioat2_dma_chan, base); 74 return container_of(chan, struct ioat2_dma_chan, base);
75} 75}
76 76
77static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) 77static inline u32 ioat2_ring_size(struct ioat2_dma_chan *ioat)
78{ 78{
79 return 1 << ioat->alloc_order; 79 return 1 << ioat->alloc_order;
80} 80}
@@ -91,7 +91,7 @@ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat)
91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); 91 return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat));
92} 92}
93 93
94static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) 94static inline u32 ioat2_ring_space(struct ioat2_dma_chan *ioat)
95{ 95{
96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); 96 return ioat2_ring_size(ioat) - ioat2_ring_active(ioat);
97} 97}
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 2c4476c0e405..f7f1dc62c15c 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -257,7 +257,7 @@ static bool desc_has_ext(struct ioat_ring_ent *desc)
257 * The difference from the dma_v2.c __cleanup() is that this routine 257 * The difference from the dma_v2.c __cleanup() is that this routine
258 * handles extended descriptors and dma-unmapping raid operations. 258 * handles extended descriptors and dma-unmapping raid operations.
259 */ 259 */
260static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) 260static void __cleanup(struct ioat2_dma_chan *ioat, dma_addr_t phys_complete)
261{ 261{
262 struct ioat_chan_common *chan = &ioat->base; 262 struct ioat_chan_common *chan = &ioat->base;
263 struct ioat_ring_ent *desc; 263 struct ioat_ring_ent *desc;
@@ -314,7 +314,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
314static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 314static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
315{ 315{
316 struct ioat_chan_common *chan = &ioat->base; 316 struct ioat_chan_common *chan = &ioat->base;
317 unsigned long phys_complete; 317 dma_addr_t phys_complete;
318 318
319 spin_lock_bh(&chan->cleanup_lock); 319 spin_lock_bh(&chan->cleanup_lock);
320 if (ioat_cleanup_preamble(chan, &phys_complete)) 320 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -333,7 +333,7 @@ static void ioat3_cleanup_event(unsigned long data)
333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat) 333static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
334{ 334{
335 struct ioat_chan_common *chan = &ioat->base; 335 struct ioat_chan_common *chan = &ioat->base;
336 unsigned long phys_complete; 336 dma_addr_t phys_complete;
337 337
338 ioat2_quiesce(chan, 0); 338 ioat2_quiesce(chan, 0);
339 if (ioat_cleanup_preamble(chan, &phys_complete)) 339 if (ioat_cleanup_preamble(chan, &phys_complete))
@@ -348,7 +348,7 @@ static void ioat3_timer_event(unsigned long data)
348 struct ioat_chan_common *chan = &ioat->base; 348 struct ioat_chan_common *chan = &ioat->base;
349 349
350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
351 unsigned long phys_complete; 351 dma_addr_t phys_complete;
352 u64 status; 352 u64 status;
353 353
354 status = ioat_chansts(chan); 354 status = ioat_chansts(chan);
@@ -1149,6 +1149,44 @@ static int ioat3_reset_hw(struct ioat_chan_common *chan)
1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200)); 1149 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
1150} 1150}
1151 1151
1152static bool is_jf_ioat(struct pci_dev *pdev)
1153{
1154 switch (pdev->device) {
1155 case PCI_DEVICE_ID_INTEL_IOAT_JSF0:
1156 case PCI_DEVICE_ID_INTEL_IOAT_JSF1:
1157 case PCI_DEVICE_ID_INTEL_IOAT_JSF2:
1158 case PCI_DEVICE_ID_INTEL_IOAT_JSF3:
1159 case PCI_DEVICE_ID_INTEL_IOAT_JSF4:
1160 case PCI_DEVICE_ID_INTEL_IOAT_JSF5:
1161 case PCI_DEVICE_ID_INTEL_IOAT_JSF6:
1162 case PCI_DEVICE_ID_INTEL_IOAT_JSF7:
1163 case PCI_DEVICE_ID_INTEL_IOAT_JSF8:
1164 case PCI_DEVICE_ID_INTEL_IOAT_JSF9:
1165 return true;
1166 default:
1167 return false;
1168 }
1169}
1170
1171static bool is_snb_ioat(struct pci_dev *pdev)
1172{
1173 switch (pdev->device) {
1174 case PCI_DEVICE_ID_INTEL_IOAT_SNB0:
1175 case PCI_DEVICE_ID_INTEL_IOAT_SNB1:
1176 case PCI_DEVICE_ID_INTEL_IOAT_SNB2:
1177 case PCI_DEVICE_ID_INTEL_IOAT_SNB3:
1178 case PCI_DEVICE_ID_INTEL_IOAT_SNB4:
1179 case PCI_DEVICE_ID_INTEL_IOAT_SNB5:
1180 case PCI_DEVICE_ID_INTEL_IOAT_SNB6:
1181 case PCI_DEVICE_ID_INTEL_IOAT_SNB7:
1182 case PCI_DEVICE_ID_INTEL_IOAT_SNB8:
1183 case PCI_DEVICE_ID_INTEL_IOAT_SNB9:
1184 return true;
1185 default:
1186 return false;
1187 }
1188}
1189
1152int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) 1190int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1153{ 1191{
1154 struct pci_dev *pdev = device->pdev; 1192 struct pci_dev *pdev = device->pdev;
@@ -1169,6 +1207,9 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
1169 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 1207 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
1170 dma->device_free_chan_resources = ioat2_free_chan_resources; 1208 dma->device_free_chan_resources = ioat2_free_chan_resources;
1171 1209
1210 if (is_jf_ioat(pdev) || is_snb_ioat(pdev))
1211 dma->copy_align = 6;
1212
1172 dma_cap_set(DMA_INTERRUPT, dma->cap_mask); 1213 dma_cap_set(DMA_INTERRUPT, dma->cap_mask);
1173 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock; 1214 dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
1174 1215
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index da6c4c2c066a..79e3eba29702 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1252,8 +1252,8 @@ iop_adma_pq_zero_sum_self_test(struct iop_adma_device *device)
1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2]; 1252 struct page **pq_hw = &pq[IOP_ADMA_NUM_SRC_TEST+2];
1253 /* address conversion buffers (dma_map / page_address) */ 1253 /* address conversion buffers (dma_map / page_address) */
1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2]; 1254 void *pq_sw[IOP_ADMA_NUM_SRC_TEST+2];
1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST]; 1255 dma_addr_t pq_src[IOP_ADMA_NUM_SRC_TEST+2];
1256 dma_addr_t pq_dest[2]; 1256 dma_addr_t *pq_dest = &pq_src[IOP_ADMA_NUM_SRC_TEST];
1257 1257
1258 int i; 1258 int i;
1259 struct dma_async_tx_descriptor *tx; 1259 struct dma_async_tx_descriptor *tx;
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index edadbdad31d0..e03653d69357 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -430,7 +430,7 @@ config GPIO_ML_IOH
430 430
431config GPIO_SODAVILLE 431config GPIO_SODAVILLE
432 bool "Intel Sodaville GPIO support" 432 bool "Intel Sodaville GPIO support"
433 depends on X86 && PCI && OF && BROKEN 433 depends on X86 && PCI && OF
434 select GPIO_GENERIC 434 select GPIO_GENERIC
435 select GENERIC_IRQ_CHIP 435 select GENERIC_IRQ_CHIP
436 help 436 help
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 9ad1703d1408..ae5d7f12ce66 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -252,7 +252,7 @@ static irqreturn_t adp5588_irq_handler(int irq, void *devid)
252 if (ret < 0) 252 if (ret < 0)
253 memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat)); 253 memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
254 254
255 for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO); 255 for (bank = 0, bit = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
256 bank++, bit = 0) { 256 bank++, bit = 0) {
257 pending = dev->irq_stat[bank] & dev->irq_mask[bank]; 257 pending = dev->irq_stat[bank] & dev->irq_mask[bank];
258 258
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 46277877b7ec..19d6fc0229c3 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2382,8 +2382,8 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
2382#endif 2382#endif
2383}; 2383};
2384 2384
2385static struct samsung_gpio_chip exynos5_gpios_1[] = {
2386#ifdef CONFIG_ARCH_EXYNOS5 2385#ifdef CONFIG_ARCH_EXYNOS5
2386static struct samsung_gpio_chip exynos5_gpios_1[] = {
2387 { 2387 {
2388 .chip = { 2388 .chip = {
2389 .base = EXYNOS5_GPA0(0), 2389 .base = EXYNOS5_GPA0(0),
@@ -2541,11 +2541,11 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
2541 .to_irq = samsung_gpiolib_to_irq, 2541 .to_irq = samsung_gpiolib_to_irq,
2542 }, 2542 },
2543 }, 2543 },
2544#endif
2545}; 2544};
2545#endif
2546 2546
2547static struct samsung_gpio_chip exynos5_gpios_2[] = {
2548#ifdef CONFIG_ARCH_EXYNOS5 2547#ifdef CONFIG_ARCH_EXYNOS5
2548static struct samsung_gpio_chip exynos5_gpios_2[] = {
2549 { 2549 {
2550 .chip = { 2550 .chip = {
2551 .base = EXYNOS5_GPE0(0), 2551 .base = EXYNOS5_GPE0(0),
@@ -2602,11 +2602,11 @@ static struct samsung_gpio_chip exynos5_gpios_2[] = {
2602 2602
2603 }, 2603 },
2604 }, 2604 },
2605#endif
2606}; 2605};
2606#endif
2607 2607
2608static struct samsung_gpio_chip exynos5_gpios_3[] = {
2609#ifdef CONFIG_ARCH_EXYNOS5 2608#ifdef CONFIG_ARCH_EXYNOS5
2609static struct samsung_gpio_chip exynos5_gpios_3[] = {
2610 { 2610 {
2611 .chip = { 2611 .chip = {
2612 .base = EXYNOS5_GPV0(0), 2612 .base = EXYNOS5_GPV0(0),
@@ -2638,11 +2638,11 @@ static struct samsung_gpio_chip exynos5_gpios_3[] = {
2638 .label = "GPV4", 2638 .label = "GPV4",
2639 }, 2639 },
2640 }, 2640 },
2641#endif
2642}; 2641};
2642#endif
2643 2643
2644static struct samsung_gpio_chip exynos5_gpios_4[] = {
2645#ifdef CONFIG_ARCH_EXYNOS5 2644#ifdef CONFIG_ARCH_EXYNOS5
2645static struct samsung_gpio_chip exynos5_gpios_4[] = {
2646 { 2646 {
2647 .chip = { 2647 .chip = {
2648 .base = EXYNOS5_GPZ(0), 2648 .base = EXYNOS5_GPZ(0),
@@ -2650,8 +2650,8 @@ static struct samsung_gpio_chip exynos5_gpios_4[] = {
2650 .label = "GPZ", 2650 .label = "GPZ",
2651 }, 2651 },
2652 }, 2652 },
2653#endif
2654}; 2653};
2654#endif
2655 2655
2656 2656
2657#if defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF) 2657#if defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF)
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 9ba15d31d242..031e5d24837d 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -41,7 +41,7 @@
41struct sdv_gpio_chip_data { 41struct sdv_gpio_chip_data {
42 int irq_base; 42 int irq_base;
43 void __iomem *gpio_pub_base; 43 void __iomem *gpio_pub_base;
44 struct irq_domain id; 44 struct irq_domain *id;
45 struct irq_chip_generic *gc; 45 struct irq_chip_generic *gc;
46 struct bgpio_chip bgpio; 46 struct bgpio_chip bgpio;
47}; 47};
@@ -51,10 +51,9 @@ static int sdv_gpio_pub_set_type(struct irq_data *d, unsigned int type)
51 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 51 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
52 struct sdv_gpio_chip_data *sd = gc->private; 52 struct sdv_gpio_chip_data *sd = gc->private;
53 void __iomem *type_reg; 53 void __iomem *type_reg;
54 u32 irq_offs = d->irq - sd->irq_base;
55 u32 reg; 54 u32 reg;
56 55
57 if (irq_offs < 8) 56 if (d->hwirq < 8)
58 type_reg = sd->gpio_pub_base + GPIT1R0; 57 type_reg = sd->gpio_pub_base + GPIT1R0;
59 else 58 else
60 type_reg = sd->gpio_pub_base + GPIT1R1; 59 type_reg = sd->gpio_pub_base + GPIT1R1;
@@ -63,11 +62,11 @@ static int sdv_gpio_pub_set_type(struct irq_data *d, unsigned int type)
63 62
64 switch (type) { 63 switch (type) {
65 case IRQ_TYPE_LEVEL_HIGH: 64 case IRQ_TYPE_LEVEL_HIGH:
66 reg &= ~BIT(4 * (irq_offs % 8)); 65 reg &= ~BIT(4 * (d->hwirq % 8));
67 break; 66 break;
68 67
69 case IRQ_TYPE_LEVEL_LOW: 68 case IRQ_TYPE_LEVEL_LOW:
70 reg |= BIT(4 * (irq_offs % 8)); 69 reg |= BIT(4 * (d->hwirq % 8));
71 break; 70 break;
72 71
73 default: 72 default:
@@ -91,7 +90,7 @@ static irqreturn_t sdv_gpio_pub_irq_handler(int irq, void *data)
91 u32 irq_bit = __fls(irq_stat); 90 u32 irq_bit = __fls(irq_stat);
92 91
93 irq_stat &= ~BIT(irq_bit); 92 irq_stat &= ~BIT(irq_bit);
94 generic_handle_irq(sd->irq_base + irq_bit); 93 generic_handle_irq(irq_find_mapping(sd->id, irq_bit));
95 } 94 }
96 95
97 return IRQ_HANDLED; 96 return IRQ_HANDLED;
@@ -127,7 +126,7 @@ static int sdv_xlate(struct irq_domain *h, struct device_node *node,
127} 126}
128 127
129static struct irq_domain_ops irq_domain_sdv_ops = { 128static struct irq_domain_ops irq_domain_sdv_ops = {
130 .dt_translate = sdv_xlate, 129 .xlate = sdv_xlate,
131}; 130};
132 131
133static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd, 132static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
@@ -149,10 +148,6 @@ static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
149 if (ret) 148 if (ret)
150 goto out_free_desc; 149 goto out_free_desc;
151 150
152 sd->id.irq_base = sd->irq_base;
153 sd->id.of_node = of_node_get(pdev->dev.of_node);
154 sd->id.ops = &irq_domain_sdv_ops;
155
156 /* 151 /*
157 * This gpio irq controller latches level irqs. Testing shows that if 152 * This gpio irq controller latches level irqs. Testing shows that if
158 * we unmask & ACK the IRQ before the source of the interrupt is gone 153 * we unmask & ACK the IRQ before the source of the interrupt is gone
@@ -179,7 +174,10 @@ static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
179 IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST, 174 IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
180 IRQ_LEVEL | IRQ_NOPROBE); 175 IRQ_LEVEL | IRQ_NOPROBE);
181 176
182 irq_domain_add(&sd->id); 177 sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
178 sd->irq_base, 0, &irq_domain_sdv_ops, sd);
179 if (!sd->id)
180 goto out_free_irq;
183 return 0; 181 return 0;
184out_free_irq: 182out_free_irq:
185 free_irq(pdev->irq, sd); 183 free_irq(pdev->irq, sd);
@@ -260,7 +258,6 @@ static void sdv_gpio_remove(struct pci_dev *pdev)
260{ 258{
261 struct sdv_gpio_chip_data *sd = pci_get_drvdata(pdev); 259 struct sdv_gpio_chip_data *sd = pci_get_drvdata(pdev);
262 260
263 irq_domain_del(&sd->id);
264 free_irq(pdev->irq, sd); 261 free_irq(pdev->irq, sd);
265 irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS); 262 irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
266 263
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 4a3a5f72ed4a..de8d2090bce3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -34,14 +34,14 @@
34static int lowlevel_buffer_allocate(struct drm_device *dev, 34static int lowlevel_buffer_allocate(struct drm_device *dev,
35 unsigned int flags, struct exynos_drm_gem_buf *buf) 35 unsigned int flags, struct exynos_drm_gem_buf *buf)
36{ 36{
37 dma_addr_t start_addr, end_addr; 37 dma_addr_t start_addr;
38 unsigned int npages, page_size, i = 0; 38 unsigned int npages, page_size, i = 0;
39 struct scatterlist *sgl; 39 struct scatterlist *sgl;
40 int ret = 0; 40 int ret = 0;
41 41
42 DRM_DEBUG_KMS("%s\n", __FILE__); 42 DRM_DEBUG_KMS("%s\n", __FILE__);
43 43
44 if (flags & EXYNOS_BO_NONCONTIG) { 44 if (IS_NONCONTIG_BUFFER(flags)) {
45 DRM_DEBUG_KMS("not support allocation type.\n"); 45 DRM_DEBUG_KMS("not support allocation type.\n");
46 return -EINVAL; 46 return -EINVAL;
47 } 47 }
@@ -52,13 +52,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
52 } 52 }
53 53
54 if (buf->size >= SZ_1M) { 54 if (buf->size >= SZ_1M) {
55 npages = (buf->size >> SECTION_SHIFT) + 1; 55 npages = buf->size >> SECTION_SHIFT;
56 page_size = SECTION_SIZE; 56 page_size = SECTION_SIZE;
57 } else if (buf->size >= SZ_64K) { 57 } else if (buf->size >= SZ_64K) {
58 npages = (buf->size >> 16) + 1; 58 npages = buf->size >> 16;
59 page_size = SZ_64K; 59 page_size = SZ_64K;
60 } else { 60 } else {
61 npages = (buf->size >> PAGE_SHIFT) + 1; 61 npages = buf->size >> PAGE_SHIFT;
62 page_size = PAGE_SIZE; 62 page_size = PAGE_SIZE;
63 } 63 }
64 64
@@ -76,26 +76,13 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
76 return -ENOMEM; 76 return -ENOMEM;
77 } 77 }
78 78
79 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size, 79 buf->kvaddr = dma_alloc_writecombine(dev->dev, buf->size,
80 &buf->dma_addr, GFP_KERNEL); 80 &buf->dma_addr, GFP_KERNEL);
81 if (!buf->kvaddr) { 81 if (!buf->kvaddr) {
82 DRM_ERROR("failed to allocate buffer.\n"); 82 DRM_ERROR("failed to allocate buffer.\n");
83 ret = -ENOMEM; 83 ret = -ENOMEM;
84 goto err1; 84 goto err1;
85 } 85 }
86
87 start_addr = buf->dma_addr;
88 end_addr = buf->dma_addr + buf->size;
89
90 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
91 if (!buf->pages) {
92 DRM_ERROR("failed to allocate pages.\n");
93 ret = -ENOMEM;
94 goto err2;
95 }
96
97 start_addr = buf->dma_addr;
98 end_addr = buf->dma_addr + buf->size;
99 86
100 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL); 87 buf->pages = kzalloc(sizeof(struct page) * npages, GFP_KERNEL);
101 if (!buf->pages) { 88 if (!buf->pages) {
@@ -105,23 +92,17 @@ static int lowlevel_buffer_allocate(struct drm_device *dev,
105 } 92 }
106 93
107 sgl = buf->sgt->sgl; 94 sgl = buf->sgt->sgl;
95 start_addr = buf->dma_addr;
108 96
109 while (i < npages) { 97 while (i < npages) {
110 buf->pages[i] = phys_to_page(start_addr); 98 buf->pages[i] = phys_to_page(start_addr);
111 sg_set_page(sgl, buf->pages[i], page_size, 0); 99 sg_set_page(sgl, buf->pages[i], page_size, 0);
112 sg_dma_address(sgl) = start_addr; 100 sg_dma_address(sgl) = start_addr;
113 start_addr += page_size; 101 start_addr += page_size;
114 if (end_addr - start_addr < page_size)
115 break;
116 sgl = sg_next(sgl); 102 sgl = sg_next(sgl);
117 i++; 103 i++;
118 } 104 }
119 105
120 buf->pages[i] = phys_to_page(start_addr);
121
122 sgl = sg_next(sgl);
123 sg_set_page(sgl, buf->pages[i+1], end_addr - start_addr, 0);
124
125 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n", 106 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
126 (unsigned long)buf->kvaddr, 107 (unsigned long)buf->kvaddr,
127 (unsigned long)buf->dma_addr, 108 (unsigned long)buf->dma_addr,
@@ -150,7 +131,7 @@ static void lowlevel_buffer_deallocate(struct drm_device *dev,
150 * non-continuous memory would be released by exynos 131 * non-continuous memory would be released by exynos
151 * gem framework. 132 * gem framework.
152 */ 133 */
153 if (flags & EXYNOS_BO_NONCONTIG) { 134 if (IS_NONCONTIG_BUFFER(flags)) {
154 DRM_DEBUG_KMS("not support allocation type.\n"); 135 DRM_DEBUG_KMS("not support allocation type.\n");
155 return; 136 return;
156 } 137 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
index 411832e8e17a..eaf630dc5dba 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_core.c
@@ -54,16 +54,18 @@ static int exynos_drm_subdrv_probe(struct drm_device *dev,
54 * 54 *
55 * P.S. note that this driver is considered for modularization. 55 * P.S. note that this driver is considered for modularization.
56 */ 56 */
57 ret = subdrv->probe(dev, subdrv->manager.dev); 57 ret = subdrv->probe(dev, subdrv->dev);
58 if (ret) 58 if (ret)
59 return ret; 59 return ret;
60 } 60 }
61 61
62 if (subdrv->is_local) 62 if (!subdrv->manager)
63 return 0; 63 return 0;
64 64
65 subdrv->manager->dev = subdrv->dev;
66
65 /* create and initialize a encoder for this sub driver. */ 67 /* create and initialize a encoder for this sub driver. */
66 encoder = exynos_drm_encoder_create(dev, &subdrv->manager, 68 encoder = exynos_drm_encoder_create(dev, subdrv->manager,
67 (1 << MAX_CRTC) - 1); 69 (1 << MAX_CRTC) - 1);
68 if (!encoder) { 70 if (!encoder) {
69 DRM_ERROR("failed to create encoder\n"); 71 DRM_ERROR("failed to create encoder\n");
@@ -186,7 +188,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
186 188
187 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { 189 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
188 if (subdrv->open) { 190 if (subdrv->open) {
189 ret = subdrv->open(dev, subdrv->manager.dev, file); 191 ret = subdrv->open(dev, subdrv->dev, file);
190 if (ret) 192 if (ret)
191 goto err; 193 goto err;
192 } 194 }
@@ -197,7 +199,7 @@ int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
197err: 199err:
198 list_for_each_entry_reverse(subdrv, &subdrv->list, list) { 200 list_for_each_entry_reverse(subdrv, &subdrv->list, list) {
199 if (subdrv->close) 201 if (subdrv->close)
200 subdrv->close(dev, subdrv->manager.dev, file); 202 subdrv->close(dev, subdrv->dev, file);
201 } 203 }
202 return ret; 204 return ret;
203} 205}
@@ -209,7 +211,7 @@ void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
209 211
210 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { 212 list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
211 if (subdrv->close) 213 if (subdrv->close)
212 subdrv->close(dev, subdrv->manager.dev, file); 214 subdrv->close(dev, subdrv->dev, file);
213 } 215 }
214} 216}
215EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close); 217EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index fbd0a232c93d..1d814175cd49 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -225,24 +225,25 @@ struct exynos_drm_private {
225 * Exynos drm sub driver structure. 225 * Exynos drm sub driver structure.
226 * 226 *
227 * @list: sub driver has its own list object to register to exynos drm driver. 227 * @list: sub driver has its own list object to register to exynos drm driver.
228 * @dev: pointer to device object for subdrv device driver.
228 * @drm_dev: pointer to drm_device and this pointer would be set 229 * @drm_dev: pointer to drm_device and this pointer would be set
229 * when sub driver calls exynos_drm_subdrv_register(). 230 * when sub driver calls exynos_drm_subdrv_register().
230 * @is_local: appear encoder and connector disrelated device. 231 * @manager: subdrv has its own manager to control a hardware appropriately
232 * and we can access a hardware drawing on this manager.
231 * @probe: this callback would be called by exynos drm driver after 233 * @probe: this callback would be called by exynos drm driver after
232 * subdrv is registered to it. 234 * subdrv is registered to it.
233 * @remove: this callback is used to release resources created 235 * @remove: this callback is used to release resources created
234 * by probe callback. 236 * by probe callback.
235 * @open: this would be called with drm device file open. 237 * @open: this would be called with drm device file open.
236 * @close: this would be called with drm device file close. 238 * @close: this would be called with drm device file close.
237 * @manager: subdrv has its own manager to control a hardware appropriately
238 * and we can access a hardware drawing on this manager.
239 * @encoder: encoder object owned by this sub driver. 239 * @encoder: encoder object owned by this sub driver.
240 * @connector: connector object owned by this sub driver. 240 * @connector: connector object owned by this sub driver.
241 */ 241 */
242struct exynos_drm_subdrv { 242struct exynos_drm_subdrv {
243 struct list_head list; 243 struct list_head list;
244 struct device *dev;
244 struct drm_device *drm_dev; 245 struct drm_device *drm_dev;
245 bool is_local; 246 struct exynos_drm_manager *manager;
246 247
247 int (*probe)(struct drm_device *drm_dev, struct device *dev); 248 int (*probe)(struct drm_device *drm_dev, struct device *dev);
248 void (*remove)(struct drm_device *dev); 249 void (*remove)(struct drm_device *dev);
@@ -251,7 +252,6 @@ struct exynos_drm_subdrv {
251 void (*close)(struct drm_device *drm_dev, struct device *dev, 252 void (*close)(struct drm_device *drm_dev, struct device *dev,
252 struct drm_file *file); 253 struct drm_file *file);
253 254
254 struct exynos_drm_manager manager;
255 struct drm_encoder *encoder; 255 struct drm_encoder *encoder;
256 struct drm_connector *connector; 256 struct drm_connector *connector;
257}; 257};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index ecb6db229700..29fdbfeb43cb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -172,7 +172,7 @@ static void fimd_dpms(struct device *subdrv_dev, int mode)
172static void fimd_apply(struct device *subdrv_dev) 172static void fimd_apply(struct device *subdrv_dev)
173{ 173{
174 struct fimd_context *ctx = get_fimd_context(subdrv_dev); 174 struct fimd_context *ctx = get_fimd_context(subdrv_dev);
175 struct exynos_drm_manager *mgr = &ctx->subdrv.manager; 175 struct exynos_drm_manager *mgr = ctx->subdrv.manager;
176 struct exynos_drm_manager_ops *mgr_ops = mgr->ops; 176 struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
177 struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops; 177 struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
178 struct fimd_win_data *win_data; 178 struct fimd_win_data *win_data;
@@ -577,6 +577,13 @@ static struct exynos_drm_overlay_ops fimd_overlay_ops = {
577 .disable = fimd_win_disable, 577 .disable = fimd_win_disable,
578}; 578};
579 579
580static struct exynos_drm_manager fimd_manager = {
581 .pipe = -1,
582 .ops = &fimd_manager_ops,
583 .overlay_ops = &fimd_overlay_ops,
584 .display_ops = &fimd_display_ops,
585};
586
580static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc) 587static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
581{ 588{
582 struct exynos_drm_private *dev_priv = drm_dev->dev_private; 589 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
@@ -628,7 +635,7 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
628 struct fimd_context *ctx = (struct fimd_context *)dev_id; 635 struct fimd_context *ctx = (struct fimd_context *)dev_id;
629 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 636 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
630 struct drm_device *drm_dev = subdrv->drm_dev; 637 struct drm_device *drm_dev = subdrv->drm_dev;
631 struct exynos_drm_manager *manager = &subdrv->manager; 638 struct exynos_drm_manager *manager = subdrv->manager;
632 u32 val; 639 u32 val;
633 640
634 val = readl(ctx->regs + VIDINTCON1); 641 val = readl(ctx->regs + VIDINTCON1);
@@ -744,7 +751,7 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
744static int fimd_power_on(struct fimd_context *ctx, bool enable) 751static int fimd_power_on(struct fimd_context *ctx, bool enable)
745{ 752{
746 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 753 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
747 struct device *dev = subdrv->manager.dev; 754 struct device *dev = subdrv->dev;
748 755
749 DRM_DEBUG_KMS("%s\n", __FILE__); 756 DRM_DEBUG_KMS("%s\n", __FILE__);
750 757
@@ -867,13 +874,10 @@ static int __devinit fimd_probe(struct platform_device *pdev)
867 874
868 subdrv = &ctx->subdrv; 875 subdrv = &ctx->subdrv;
869 876
877 subdrv->dev = dev;
878 subdrv->manager = &fimd_manager;
870 subdrv->probe = fimd_subdrv_probe; 879 subdrv->probe = fimd_subdrv_probe;
871 subdrv->remove = fimd_subdrv_remove; 880 subdrv->remove = fimd_subdrv_remove;
872 subdrv->manager.pipe = -1;
873 subdrv->manager.ops = &fimd_manager_ops;
874 subdrv->manager.overlay_ops = &fimd_overlay_ops;
875 subdrv->manager.display_ops = &fimd_display_ops;
876 subdrv->manager.dev = dev;
877 881
878 mutex_init(&ctx->lock); 882 mutex_init(&ctx->lock);
879 883
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fa1aa94a3d8e..26d51979116b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -56,9 +56,28 @@ static unsigned int convert_to_vm_err_msg(int msg)
56 return out_msg; 56 return out_msg;
57} 57}
58 58
59static unsigned int mask_gem_flags(unsigned int flags) 59static int check_gem_flags(unsigned int flags)
60{ 60{
61 return flags &= EXYNOS_BO_NONCONTIG; 61 if (flags & ~(EXYNOS_BO_MASK)) {
62 DRM_ERROR("invalid flags.\n");
63 return -EINVAL;
64 }
65
66 return 0;
67}
68
69static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
70{
71 if (!IS_NONCONTIG_BUFFER(flags)) {
72 if (size >= SZ_1M)
73 return roundup(size, SECTION_SIZE);
74 else if (size >= SZ_64K)
75 return roundup(size, SZ_64K);
76 else
77 goto out;
78 }
79out:
80 return roundup(size, PAGE_SIZE);
62} 81}
63 82
64static struct page **exynos_gem_get_pages(struct drm_gem_object *obj, 83static struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
@@ -319,10 +338,17 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
319 struct exynos_drm_gem_buf *buf; 338 struct exynos_drm_gem_buf *buf;
320 int ret; 339 int ret;
321 340
322 size = roundup(size, PAGE_SIZE); 341 if (!size) {
323 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size); 342 DRM_ERROR("invalid size.\n");
343 return ERR_PTR(-EINVAL);
344 }
324 345
325 flags = mask_gem_flags(flags); 346 size = roundup_gem_size(size, flags);
347 DRM_DEBUG_KMS("%s\n", __FILE__);
348
349 ret = check_gem_flags(flags);
350 if (ret)
351 return ERR_PTR(ret);
326 352
327 buf = exynos_drm_init_buf(dev, size); 353 buf = exynos_drm_init_buf(dev, size);
328 if (!buf) 354 if (!buf)
@@ -331,7 +357,7 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
331 exynos_gem_obj = exynos_drm_gem_init(dev, size); 357 exynos_gem_obj = exynos_drm_gem_init(dev, size);
332 if (!exynos_gem_obj) { 358 if (!exynos_gem_obj) {
333 ret = -ENOMEM; 359 ret = -ENOMEM;
334 goto err; 360 goto err_fini_buf;
335 } 361 }
336 362
337 exynos_gem_obj->buffer = buf; 363 exynos_gem_obj->buffer = buf;
@@ -347,18 +373,19 @@ struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
347 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base); 373 ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
348 if (ret < 0) { 374 if (ret < 0) {
349 drm_gem_object_release(&exynos_gem_obj->base); 375 drm_gem_object_release(&exynos_gem_obj->base);
350 goto err; 376 goto err_fini_buf;
351 } 377 }
352 } else { 378 } else {
353 ret = exynos_drm_alloc_buf(dev, buf, flags); 379 ret = exynos_drm_alloc_buf(dev, buf, flags);
354 if (ret < 0) { 380 if (ret < 0) {
355 drm_gem_object_release(&exynos_gem_obj->base); 381 drm_gem_object_release(&exynos_gem_obj->base);
356 goto err; 382 goto err_fini_buf;
357 } 383 }
358 } 384 }
359 385
360 return exynos_gem_obj; 386 return exynos_gem_obj;
361err: 387
388err_fini_buf:
362 exynos_drm_fini_buf(dev, buf); 389 exynos_drm_fini_buf(dev, buf);
363 return ERR_PTR(ret); 390 return ERR_PTR(ret);
364} 391}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e40fbad8b705..4ed842039505 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -29,6 +29,8 @@
29#define to_exynos_gem_obj(x) container_of(x,\ 29#define to_exynos_gem_obj(x) container_of(x,\
30 struct exynos_drm_gem_obj, base) 30 struct exynos_drm_gem_obj, base)
31 31
32#define IS_NONCONTIG_BUFFER(f) (f & EXYNOS_BO_NONCONTIG)
33
32/* 34/*
33 * exynos drm gem buffer structure. 35 * exynos drm gem buffer structure.
34 * 36 *
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
index 14eb26b0ba1c..3424463676e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c
@@ -30,9 +30,8 @@
30 struct drm_hdmi_context, subdrv); 30 struct drm_hdmi_context, subdrv);
31 31
32/* these callback points shoud be set by specific drivers. */ 32/* these callback points shoud be set by specific drivers. */
33static struct exynos_hdmi_display_ops *hdmi_display_ops; 33static struct exynos_hdmi_ops *hdmi_ops;
34static struct exynos_hdmi_manager_ops *hdmi_manager_ops; 34static struct exynos_mixer_ops *mixer_ops;
35static struct exynos_hdmi_overlay_ops *hdmi_overlay_ops;
36 35
37struct drm_hdmi_context { 36struct drm_hdmi_context {
38 struct exynos_drm_subdrv subdrv; 37 struct exynos_drm_subdrv subdrv;
@@ -40,31 +39,20 @@ struct drm_hdmi_context {
40 struct exynos_drm_hdmi_context *mixer_ctx; 39 struct exynos_drm_hdmi_context *mixer_ctx;
41}; 40};
42 41
43void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops 42void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops)
44 *display_ops)
45{ 43{
46 DRM_DEBUG_KMS("%s\n", __FILE__); 44 DRM_DEBUG_KMS("%s\n", __FILE__);
47 45
48 if (display_ops) 46 if (ops)
49 hdmi_display_ops = display_ops; 47 hdmi_ops = ops;
50} 48}
51 49
52void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops 50void exynos_mixer_ops_register(struct exynos_mixer_ops *ops)
53 *manager_ops)
54{ 51{
55 DRM_DEBUG_KMS("%s\n", __FILE__); 52 DRM_DEBUG_KMS("%s\n", __FILE__);
56 53
57 if (manager_ops) 54 if (ops)
58 hdmi_manager_ops = manager_ops; 55 mixer_ops = ops;
59}
60
61void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
62 *overlay_ops)
63{
64 DRM_DEBUG_KMS("%s\n", __FILE__);
65
66 if (overlay_ops)
67 hdmi_overlay_ops = overlay_ops;
68} 56}
69 57
70static bool drm_hdmi_is_connected(struct device *dev) 58static bool drm_hdmi_is_connected(struct device *dev)
@@ -73,8 +61,8 @@ static bool drm_hdmi_is_connected(struct device *dev)
73 61
74 DRM_DEBUG_KMS("%s\n", __FILE__); 62 DRM_DEBUG_KMS("%s\n", __FILE__);
75 63
76 if (hdmi_display_ops && hdmi_display_ops->is_connected) 64 if (hdmi_ops && hdmi_ops->is_connected)
77 return hdmi_display_ops->is_connected(ctx->hdmi_ctx->ctx); 65 return hdmi_ops->is_connected(ctx->hdmi_ctx->ctx);
78 66
79 return false; 67 return false;
80} 68}
@@ -86,9 +74,9 @@ static int drm_hdmi_get_edid(struct device *dev,
86 74
87 DRM_DEBUG_KMS("%s\n", __FILE__); 75 DRM_DEBUG_KMS("%s\n", __FILE__);
88 76
89 if (hdmi_display_ops && hdmi_display_ops->get_edid) 77 if (hdmi_ops && hdmi_ops->get_edid)
90 return hdmi_display_ops->get_edid(ctx->hdmi_ctx->ctx, 78 return hdmi_ops->get_edid(ctx->hdmi_ctx->ctx, connector, edid,
91 connector, edid, len); 79 len);
92 80
93 return 0; 81 return 0;
94} 82}
@@ -99,9 +87,8 @@ static int drm_hdmi_check_timing(struct device *dev, void *timing)
99 87
100 DRM_DEBUG_KMS("%s\n", __FILE__); 88 DRM_DEBUG_KMS("%s\n", __FILE__);
101 89
102 if (hdmi_display_ops && hdmi_display_ops->check_timing) 90 if (hdmi_ops && hdmi_ops->check_timing)
103 return hdmi_display_ops->check_timing(ctx->hdmi_ctx->ctx, 91 return hdmi_ops->check_timing(ctx->hdmi_ctx->ctx, timing);
104 timing);
105 92
106 return 0; 93 return 0;
107} 94}
@@ -112,8 +99,8 @@ static int drm_hdmi_power_on(struct device *dev, int mode)
112 99
113 DRM_DEBUG_KMS("%s\n", __FILE__); 100 DRM_DEBUG_KMS("%s\n", __FILE__);
114 101
115 if (hdmi_display_ops && hdmi_display_ops->power_on) 102 if (hdmi_ops && hdmi_ops->power_on)
116 return hdmi_display_ops->power_on(ctx->hdmi_ctx->ctx, mode); 103 return hdmi_ops->power_on(ctx->hdmi_ctx->ctx, mode);
117 104
118 return 0; 105 return 0;
119} 106}
@@ -130,13 +117,13 @@ static int drm_hdmi_enable_vblank(struct device *subdrv_dev)
130{ 117{
131 struct drm_hdmi_context *ctx = to_context(subdrv_dev); 118 struct drm_hdmi_context *ctx = to_context(subdrv_dev);
132 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 119 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
133 struct exynos_drm_manager *manager = &subdrv->manager; 120 struct exynos_drm_manager *manager = subdrv->manager;
134 121
135 DRM_DEBUG_KMS("%s\n", __FILE__); 122 DRM_DEBUG_KMS("%s\n", __FILE__);
136 123
137 if (hdmi_overlay_ops && hdmi_overlay_ops->enable_vblank) 124 if (mixer_ops && mixer_ops->enable_vblank)
138 return hdmi_overlay_ops->enable_vblank(ctx->mixer_ctx->ctx, 125 return mixer_ops->enable_vblank(ctx->mixer_ctx->ctx,
139 manager->pipe); 126 manager->pipe);
140 127
141 return 0; 128 return 0;
142} 129}
@@ -147,8 +134,8 @@ static void drm_hdmi_disable_vblank(struct device *subdrv_dev)
147 134
148 DRM_DEBUG_KMS("%s\n", __FILE__); 135 DRM_DEBUG_KMS("%s\n", __FILE__);
149 136
150 if (hdmi_overlay_ops && hdmi_overlay_ops->disable_vblank) 137 if (mixer_ops && mixer_ops->disable_vblank)
151 return hdmi_overlay_ops->disable_vblank(ctx->mixer_ctx->ctx); 138 return mixer_ops->disable_vblank(ctx->mixer_ctx->ctx);
152} 139}
153 140
154static void drm_hdmi_mode_fixup(struct device *subdrv_dev, 141static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
@@ -160,9 +147,9 @@ static void drm_hdmi_mode_fixup(struct device *subdrv_dev,
160 147
161 DRM_DEBUG_KMS("%s\n", __FILE__); 148 DRM_DEBUG_KMS("%s\n", __FILE__);
162 149
163 if (hdmi_manager_ops && hdmi_manager_ops->mode_fixup) 150 if (hdmi_ops && hdmi_ops->mode_fixup)
164 hdmi_manager_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, 151 hdmi_ops->mode_fixup(ctx->hdmi_ctx->ctx, connector, mode,
165 mode, adjusted_mode); 152 adjusted_mode);
166} 153}
167 154
168static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode) 155static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
@@ -171,8 +158,8 @@ static void drm_hdmi_mode_set(struct device *subdrv_dev, void *mode)
171 158
172 DRM_DEBUG_KMS("%s\n", __FILE__); 159 DRM_DEBUG_KMS("%s\n", __FILE__);
173 160
174 if (hdmi_manager_ops && hdmi_manager_ops->mode_set) 161 if (hdmi_ops && hdmi_ops->mode_set)
175 hdmi_manager_ops->mode_set(ctx->hdmi_ctx->ctx, mode); 162 hdmi_ops->mode_set(ctx->hdmi_ctx->ctx, mode);
176} 163}
177 164
178static void drm_hdmi_get_max_resol(struct device *subdrv_dev, 165static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
@@ -182,9 +169,8 @@ static void drm_hdmi_get_max_resol(struct device *subdrv_dev,
182 169
183 DRM_DEBUG_KMS("%s\n", __FILE__); 170 DRM_DEBUG_KMS("%s\n", __FILE__);
184 171
185 if (hdmi_manager_ops && hdmi_manager_ops->get_max_resol) 172 if (hdmi_ops && hdmi_ops->get_max_resol)
186 hdmi_manager_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, 173 hdmi_ops->get_max_resol(ctx->hdmi_ctx->ctx, width, height);
187 height);
188} 174}
189 175
190static void drm_hdmi_commit(struct device *subdrv_dev) 176static void drm_hdmi_commit(struct device *subdrv_dev)
@@ -193,8 +179,8 @@ static void drm_hdmi_commit(struct device *subdrv_dev)
193 179
194 DRM_DEBUG_KMS("%s\n", __FILE__); 180 DRM_DEBUG_KMS("%s\n", __FILE__);
195 181
196 if (hdmi_manager_ops && hdmi_manager_ops->commit) 182 if (hdmi_ops && hdmi_ops->commit)
197 hdmi_manager_ops->commit(ctx->hdmi_ctx->ctx); 183 hdmi_ops->commit(ctx->hdmi_ctx->ctx);
198} 184}
199 185
200static void drm_hdmi_dpms(struct device *subdrv_dev, int mode) 186static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
@@ -209,8 +195,8 @@ static void drm_hdmi_dpms(struct device *subdrv_dev, int mode)
209 case DRM_MODE_DPMS_STANDBY: 195 case DRM_MODE_DPMS_STANDBY:
210 case DRM_MODE_DPMS_SUSPEND: 196 case DRM_MODE_DPMS_SUSPEND:
211 case DRM_MODE_DPMS_OFF: 197 case DRM_MODE_DPMS_OFF:
212 if (hdmi_manager_ops && hdmi_manager_ops->disable) 198 if (hdmi_ops && hdmi_ops->disable)
213 hdmi_manager_ops->disable(ctx->hdmi_ctx->ctx); 199 hdmi_ops->disable(ctx->hdmi_ctx->ctx);
214 break; 200 break;
215 default: 201 default:
216 DRM_DEBUG_KMS("unkown dps mode: %d\n", mode); 202 DRM_DEBUG_KMS("unkown dps mode: %d\n", mode);
@@ -235,8 +221,8 @@ static void drm_mixer_mode_set(struct device *subdrv_dev,
235 221
236 DRM_DEBUG_KMS("%s\n", __FILE__); 222 DRM_DEBUG_KMS("%s\n", __FILE__);
237 223
238 if (hdmi_overlay_ops && hdmi_overlay_ops->win_mode_set) 224 if (mixer_ops && mixer_ops->win_mode_set)
239 hdmi_overlay_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay); 225 mixer_ops->win_mode_set(ctx->mixer_ctx->ctx, overlay);
240} 226}
241 227
242static void drm_mixer_commit(struct device *subdrv_dev, int zpos) 228static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
@@ -245,8 +231,8 @@ static void drm_mixer_commit(struct device *subdrv_dev, int zpos)
245 231
246 DRM_DEBUG_KMS("%s\n", __FILE__); 232 DRM_DEBUG_KMS("%s\n", __FILE__);
247 233
248 if (hdmi_overlay_ops && hdmi_overlay_ops->win_commit) 234 if (mixer_ops && mixer_ops->win_commit)
249 hdmi_overlay_ops->win_commit(ctx->mixer_ctx->ctx, zpos); 235 mixer_ops->win_commit(ctx->mixer_ctx->ctx, zpos);
250} 236}
251 237
252static void drm_mixer_disable(struct device *subdrv_dev, int zpos) 238static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
@@ -255,8 +241,8 @@ static void drm_mixer_disable(struct device *subdrv_dev, int zpos)
255 241
256 DRM_DEBUG_KMS("%s\n", __FILE__); 242 DRM_DEBUG_KMS("%s\n", __FILE__);
257 243
258 if (hdmi_overlay_ops && hdmi_overlay_ops->win_disable) 244 if (mixer_ops && mixer_ops->win_disable)
259 hdmi_overlay_ops->win_disable(ctx->mixer_ctx->ctx, zpos); 245 mixer_ops->win_disable(ctx->mixer_ctx->ctx, zpos);
260} 246}
261 247
262static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = { 248static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
@@ -265,6 +251,12 @@ static struct exynos_drm_overlay_ops drm_hdmi_overlay_ops = {
265 .disable = drm_mixer_disable, 251 .disable = drm_mixer_disable,
266}; 252};
267 253
254static struct exynos_drm_manager hdmi_manager = {
255 .pipe = -1,
256 .ops = &drm_hdmi_manager_ops,
257 .overlay_ops = &drm_hdmi_overlay_ops,
258 .display_ops = &drm_hdmi_display_ops,
259};
268 260
269static int hdmi_subdrv_probe(struct drm_device *drm_dev, 261static int hdmi_subdrv_probe(struct drm_device *drm_dev,
270 struct device *dev) 262 struct device *dev)
@@ -332,12 +324,9 @@ static int __devinit exynos_drm_hdmi_probe(struct platform_device *pdev)
332 324
333 subdrv = &ctx->subdrv; 325 subdrv = &ctx->subdrv;
334 326
327 subdrv->dev = dev;
328 subdrv->manager = &hdmi_manager;
335 subdrv->probe = hdmi_subdrv_probe; 329 subdrv->probe = hdmi_subdrv_probe;
336 subdrv->manager.pipe = -1;
337 subdrv->manager.ops = &drm_hdmi_manager_ops;
338 subdrv->manager.overlay_ops = &drm_hdmi_overlay_ops;
339 subdrv->manager.display_ops = &drm_hdmi_display_ops;
340 subdrv->manager.dev = dev;
341 330
342 platform_set_drvdata(pdev, subdrv); 331 platform_set_drvdata(pdev, subdrv);
343 332
diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
index 44497cfb6c74..f3ae192c8dcf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.h
@@ -38,15 +38,15 @@ struct exynos_drm_hdmi_context {
38 void *ctx; 38 void *ctx;
39}; 39};
40 40
41struct exynos_hdmi_display_ops { 41struct exynos_hdmi_ops {
42 /* display */
42 bool (*is_connected)(void *ctx); 43 bool (*is_connected)(void *ctx);
43 int (*get_edid)(void *ctx, struct drm_connector *connector, 44 int (*get_edid)(void *ctx, struct drm_connector *connector,
44 u8 *edid, int len); 45 u8 *edid, int len);
45 int (*check_timing)(void *ctx, void *timing); 46 int (*check_timing)(void *ctx, void *timing);
46 int (*power_on)(void *ctx, int mode); 47 int (*power_on)(void *ctx, int mode);
47};
48 48
49struct exynos_hdmi_manager_ops { 49 /* manager */
50 void (*mode_fixup)(void *ctx, struct drm_connector *connector, 50 void (*mode_fixup)(void *ctx, struct drm_connector *connector,
51 struct drm_display_mode *mode, 51 struct drm_display_mode *mode,
52 struct drm_display_mode *adjusted_mode); 52 struct drm_display_mode *adjusted_mode);
@@ -57,22 +57,17 @@ struct exynos_hdmi_manager_ops {
57 void (*disable)(void *ctx); 57 void (*disable)(void *ctx);
58}; 58};
59 59
60struct exynos_hdmi_overlay_ops { 60struct exynos_mixer_ops {
61 /* manager */
61 int (*enable_vblank)(void *ctx, int pipe); 62 int (*enable_vblank)(void *ctx, int pipe);
62 void (*disable_vblank)(void *ctx); 63 void (*disable_vblank)(void *ctx);
64
65 /* overlay */
63 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay); 66 void (*win_mode_set)(void *ctx, struct exynos_drm_overlay *overlay);
64 void (*win_commit)(void *ctx, int zpos); 67 void (*win_commit)(void *ctx, int zpos);
65 void (*win_disable)(void *ctx, int zpos); 68 void (*win_disable)(void *ctx, int zpos);
66}; 69};
67 70
68extern struct platform_driver hdmi_driver; 71void exynos_hdmi_ops_register(struct exynos_hdmi_ops *ops);
69extern struct platform_driver mixer_driver; 72void exynos_mixer_ops_register(struct exynos_mixer_ops *ops);
70
71void exynos_drm_display_ops_register(struct exynos_hdmi_display_ops
72 *display_ops);
73void exynos_drm_manager_ops_register(struct exynos_hdmi_manager_ops
74 *manager_ops);
75void exynos_drm_overlay_ops_register(struct exynos_hdmi_overlay_ops
76 *overlay_ops);
77
78#endif 73#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index c277a3a445f5..f92fe4c6174a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -24,6 +24,10 @@ struct exynos_plane {
24 24
25static const uint32_t formats[] = { 25static const uint32_t formats[] = {
26 DRM_FORMAT_XRGB8888, 26 DRM_FORMAT_XRGB8888,
27 DRM_FORMAT_ARGB8888,
28 DRM_FORMAT_NV12,
29 DRM_FORMAT_NV12M,
30 DRM_FORMAT_NV12MT,
27}; 31};
28 32
29static int 33static int
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 8e1339f9fe1f..7b9c153dceb6 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -199,7 +199,7 @@ static void vidi_dpms(struct device *subdrv_dev, int mode)
199static void vidi_apply(struct device *subdrv_dev) 199static void vidi_apply(struct device *subdrv_dev)
200{ 200{
201 struct vidi_context *ctx = get_vidi_context(subdrv_dev); 201 struct vidi_context *ctx = get_vidi_context(subdrv_dev);
202 struct exynos_drm_manager *mgr = &ctx->subdrv.manager; 202 struct exynos_drm_manager *mgr = ctx->subdrv.manager;
203 struct exynos_drm_manager_ops *mgr_ops = mgr->ops; 203 struct exynos_drm_manager_ops *mgr_ops = mgr->ops;
204 struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops; 204 struct exynos_drm_overlay_ops *ovl_ops = mgr->overlay_ops;
205 struct vidi_win_data *win_data; 205 struct vidi_win_data *win_data;
@@ -374,6 +374,13 @@ static struct exynos_drm_overlay_ops vidi_overlay_ops = {
374 .disable = vidi_win_disable, 374 .disable = vidi_win_disable,
375}; 375};
376 376
377static struct exynos_drm_manager vidi_manager = {
378 .pipe = -1,
379 .ops = &vidi_manager_ops,
380 .overlay_ops = &vidi_overlay_ops,
381 .display_ops = &vidi_display_ops,
382};
383
377static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc) 384static void vidi_finish_pageflip(struct drm_device *drm_dev, int crtc)
378{ 385{
379 struct exynos_drm_private *dev_priv = drm_dev->dev_private; 386 struct exynos_drm_private *dev_priv = drm_dev->dev_private;
@@ -425,7 +432,7 @@ static void vidi_fake_vblank_handler(struct work_struct *work)
425 struct vidi_context *ctx = container_of(work, struct vidi_context, 432 struct vidi_context *ctx = container_of(work, struct vidi_context,
426 work); 433 work);
427 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 434 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
428 struct exynos_drm_manager *manager = &subdrv->manager; 435 struct exynos_drm_manager *manager = subdrv->manager;
429 436
430 if (manager->pipe < 0) 437 if (manager->pipe < 0)
431 return; 438 return;
@@ -471,7 +478,7 @@ static void vidi_subdrv_remove(struct drm_device *drm_dev)
471static int vidi_power_on(struct vidi_context *ctx, bool enable) 478static int vidi_power_on(struct vidi_context *ctx, bool enable)
472{ 479{
473 struct exynos_drm_subdrv *subdrv = &ctx->subdrv; 480 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
474 struct device *dev = subdrv->manager.dev; 481 struct device *dev = subdrv->dev;
475 482
476 DRM_DEBUG_KMS("%s\n", __FILE__); 483 DRM_DEBUG_KMS("%s\n", __FILE__);
477 484
@@ -611,13 +618,10 @@ static int __devinit vidi_probe(struct platform_device *pdev)
611 ctx->raw_edid = (struct edid *)fake_edid_info; 618 ctx->raw_edid = (struct edid *)fake_edid_info;
612 619
613 subdrv = &ctx->subdrv; 620 subdrv = &ctx->subdrv;
621 subdrv->dev = dev;
622 subdrv->manager = &vidi_manager;
614 subdrv->probe = vidi_subdrv_probe; 623 subdrv->probe = vidi_subdrv_probe;
615 subdrv->remove = vidi_subdrv_remove; 624 subdrv->remove = vidi_subdrv_remove;
616 subdrv->manager.pipe = -1;
617 subdrv->manager.ops = &vidi_manager_ops;
618 subdrv->manager.overlay_ops = &vidi_overlay_ops;
619 subdrv->manager.display_ops = &vidi_display_ops;
620 subdrv->manager.dev = dev;
621 625
622 mutex_init(&ctx->lock); 626 mutex_init(&ctx->lock);
623 627
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 575a8cbd3533..b00353876458 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -40,7 +40,6 @@
40 40
41#include "exynos_hdmi.h" 41#include "exynos_hdmi.h"
42 42
43#define HDMI_OVERLAY_NUMBER 3
44#define MAX_WIDTH 1920 43#define MAX_WIDTH 1920
45#define MAX_HEIGHT 1080 44#define MAX_HEIGHT 1080
46#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev)) 45#define get_hdmi_context(dev) platform_get_drvdata(to_platform_device(dev))
@@ -1194,7 +1193,7 @@ static int hdmi_conf_index(struct hdmi_context *hdata,
1194 1193
1195static bool hdmi_is_connected(void *ctx) 1194static bool hdmi_is_connected(void *ctx)
1196{ 1195{
1197 struct hdmi_context *hdata = (struct hdmi_context *)ctx; 1196 struct hdmi_context *hdata = ctx;
1198 u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS); 1197 u32 val = hdmi_reg_read(hdata, HDMI_HPD_STATUS);
1199 1198
1200 if (val) 1199 if (val)
@@ -1207,7 +1206,7 @@ static int hdmi_get_edid(void *ctx, struct drm_connector *connector,
1207 u8 *edid, int len) 1206 u8 *edid, int len)
1208{ 1207{
1209 struct edid *raw_edid; 1208 struct edid *raw_edid;
1210 struct hdmi_context *hdata = (struct hdmi_context *)ctx; 1209 struct hdmi_context *hdata = ctx;
1211 1210
1212 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1211 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1213 1212
@@ -1275,7 +1274,7 @@ static int hdmi_v14_check_timing(struct fb_videomode *check_timing)
1275 1274
1276static int hdmi_check_timing(void *ctx, void *timing) 1275static int hdmi_check_timing(void *ctx, void *timing)
1277{ 1276{
1278 struct hdmi_context *hdata = (struct hdmi_context *)ctx; 1277 struct hdmi_context *hdata = ctx;
1279 struct fb_videomode *check_timing = timing; 1278 struct fb_videomode *check_timing = timing;
1280 1279
1281 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1280 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1312,13 +1311,6 @@ static int hdmi_display_power_on(void *ctx, int mode)
1312 return 0; 1311 return 0;
1313} 1312}
1314 1313
1315static struct exynos_hdmi_display_ops display_ops = {
1316 .is_connected = hdmi_is_connected,
1317 .get_edid = hdmi_get_edid,
1318 .check_timing = hdmi_check_timing,
1319 .power_on = hdmi_display_power_on,
1320};
1321
1322static void hdmi_set_acr(u32 freq, u8 *acr) 1314static void hdmi_set_acr(u32 freq, u8 *acr)
1323{ 1315{
1324 u32 n, cts; 1316 u32 n, cts;
@@ -1914,7 +1906,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
1914 struct drm_display_mode *adjusted_mode) 1906 struct drm_display_mode *adjusted_mode)
1915{ 1907{
1916 struct drm_display_mode *m; 1908 struct drm_display_mode *m;
1917 struct hdmi_context *hdata = (struct hdmi_context *)ctx; 1909 struct hdmi_context *hdata = ctx;
1918 int index; 1910 int index;
1919 1911
1920 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1912 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1951,7 +1943,7 @@ static void hdmi_mode_fixup(void *ctx, struct drm_connector *connector,
1951 1943
1952static void hdmi_mode_set(void *ctx, void *mode) 1944static void hdmi_mode_set(void *ctx, void *mode)
1953{ 1945{
1954 struct hdmi_context *hdata = (struct hdmi_context *)ctx; 1946 struct hdmi_context *hdata = ctx;
1955 int conf_idx; 1947 int conf_idx;
1956 1948
1957 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1949 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
@@ -1974,7 +1966,7 @@ static void hdmi_get_max_resol(void *ctx, unsigned int *width,
1974 1966
1975static void hdmi_commit(void *ctx) 1967static void hdmi_commit(void *ctx)
1976{ 1968{
1977 struct hdmi_context *hdata = (struct hdmi_context *)ctx; 1969 struct hdmi_context *hdata = ctx;
1978 1970
1979 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1971 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1980 1972
@@ -1985,7 +1977,7 @@ static void hdmi_commit(void *ctx)
1985 1977
1986static void hdmi_disable(void *ctx) 1978static void hdmi_disable(void *ctx)
1987{ 1979{
1988 struct hdmi_context *hdata = (struct hdmi_context *)ctx; 1980 struct hdmi_context *hdata = ctx;
1989 1981
1990 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 1982 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
1991 1983
@@ -1996,7 +1988,14 @@ static void hdmi_disable(void *ctx)
1996 } 1988 }
1997} 1989}
1998 1990
1999static struct exynos_hdmi_manager_ops manager_ops = { 1991static struct exynos_hdmi_ops hdmi_ops = {
1992 /* display */
1993 .is_connected = hdmi_is_connected,
1994 .get_edid = hdmi_get_edid,
1995 .check_timing = hdmi_check_timing,
1996 .power_on = hdmi_display_power_on,
1997
1998 /* manager */
2000 .mode_fixup = hdmi_mode_fixup, 1999 .mode_fixup = hdmi_mode_fixup,
2001 .mode_set = hdmi_mode_set, 2000 .mode_set = hdmi_mode_set,
2002 .get_max_resol = hdmi_get_max_resol, 2001 .get_max_resol = hdmi_get_max_resol,
@@ -2020,7 +2019,7 @@ static void hdmi_hotplug_func(struct work_struct *work)
2020static irqreturn_t hdmi_irq_handler(int irq, void *arg) 2019static irqreturn_t hdmi_irq_handler(int irq, void *arg)
2021{ 2020{
2022 struct exynos_drm_hdmi_context *ctx = arg; 2021 struct exynos_drm_hdmi_context *ctx = arg;
2023 struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx; 2022 struct hdmi_context *hdata = ctx->ctx;
2024 u32 intc_flag; 2023 u32 intc_flag;
2025 2024
2026 intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG); 2025 intc_flag = hdmi_reg_read(hdata, HDMI_INTC_FLAG);
@@ -2173,7 +2172,7 @@ static int hdmi_runtime_suspend(struct device *dev)
2173 2172
2174 DRM_DEBUG_KMS("%s\n", __func__); 2173 DRM_DEBUG_KMS("%s\n", __func__);
2175 2174
2176 hdmi_resource_poweroff((struct hdmi_context *)ctx->ctx); 2175 hdmi_resource_poweroff(ctx->ctx);
2177 2176
2178 return 0; 2177 return 0;
2179} 2178}
@@ -2184,7 +2183,7 @@ static int hdmi_runtime_resume(struct device *dev)
2184 2183
2185 DRM_DEBUG_KMS("%s\n", __func__); 2184 DRM_DEBUG_KMS("%s\n", __func__);
2186 2185
2187 hdmi_resource_poweron((struct hdmi_context *)ctx->ctx); 2186 hdmi_resource_poweron(ctx->ctx);
2188 2187
2189 return 0; 2188 return 0;
2190} 2189}
@@ -2322,8 +2321,7 @@ static int __devinit hdmi_probe(struct platform_device *pdev)
2322 hdata->irq = res->start; 2321 hdata->irq = res->start;
2323 2322
2324 /* register specific callbacks to common hdmi. */ 2323 /* register specific callbacks to common hdmi. */
2325 exynos_drm_display_ops_register(&display_ops); 2324 exynos_hdmi_ops_register(&hdmi_ops);
2326 exynos_drm_manager_ops_register(&manager_ops);
2327 2325
2328 hdmi_resource_poweron(hdata); 2326 hdmi_resource_poweron(hdata);
2329 2327
@@ -2351,7 +2349,7 @@ err_data:
2351static int __devexit hdmi_remove(struct platform_device *pdev) 2349static int __devexit hdmi_remove(struct platform_device *pdev)
2352{ 2350{
2353 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev); 2351 struct exynos_drm_hdmi_context *ctx = platform_get_drvdata(pdev);
2354 struct hdmi_context *hdata = (struct hdmi_context *)ctx->ctx; 2352 struct hdmi_context *hdata = ctx->ctx;
2355 2353
2356 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__); 2354 DRM_DEBUG_KMS("[%d] %s\n", __LINE__, __func__);
2357 2355
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4d5f41e19527..e15438c01129 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -37,7 +37,8 @@
37#include "exynos_drm_drv.h" 37#include "exynos_drm_drv.h"
38#include "exynos_drm_hdmi.h" 38#include "exynos_drm_hdmi.h"
39 39
40#define HDMI_OVERLAY_NUMBER 3 40#define MIXER_WIN_NR 3
41#define MIXER_DEFAULT_WIN 0
41 42
42#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev)) 43#define get_mixer_context(dev) platform_get_drvdata(to_platform_device(dev))
43 44
@@ -75,16 +76,12 @@ struct mixer_resources {
75}; 76};
76 77
77struct mixer_context { 78struct mixer_context {
78 struct fb_videomode *default_timing;
79 unsigned int default_win;
80 unsigned int default_bpp;
81 unsigned int irq; 79 unsigned int irq;
82 int pipe; 80 int pipe;
83 bool interlace; 81 bool interlace;
84 bool vp_enabled;
85 82
86 struct mixer_resources mixer_res; 83 struct mixer_resources mixer_res;
87 struct hdmi_win_data win_data[HDMI_OVERLAY_NUMBER]; 84 struct hdmi_win_data win_data[MIXER_WIN_NR];
88}; 85};
89 86
90static const u8 filter_y_horiz_tap8[] = { 87static const u8 filter_y_horiz_tap8[] = {
@@ -643,9 +640,9 @@ static void mixer_win_mode_set(void *ctx,
643 640
644 win = overlay->zpos; 641 win = overlay->zpos;
645 if (win == DEFAULT_ZPOS) 642 if (win == DEFAULT_ZPOS)
646 win = mixer_ctx->default_win; 643 win = MIXER_DEFAULT_WIN;
647 644
648 if (win < 0 || win > HDMI_OVERLAY_NUMBER) { 645 if (win < 0 || win > MIXER_WIN_NR) {
649 DRM_ERROR("overlay plane[%d] is wrong\n", win); 646 DRM_ERROR("overlay plane[%d] is wrong\n", win);
650 return; 647 return;
651 } 648 }
@@ -683,9 +680,9 @@ static void mixer_win_commit(void *ctx, int zpos)
683 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 680 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
684 681
685 if (win == DEFAULT_ZPOS) 682 if (win == DEFAULT_ZPOS)
686 win = mixer_ctx->default_win; 683 win = MIXER_DEFAULT_WIN;
687 684
688 if (win < 0 || win > HDMI_OVERLAY_NUMBER) { 685 if (win < 0 || win > MIXER_WIN_NR) {
689 DRM_ERROR("overlay plane[%d] is wrong\n", win); 686 DRM_ERROR("overlay plane[%d] is wrong\n", win);
690 return; 687 return;
691 } 688 }
@@ -706,9 +703,9 @@ static void mixer_win_disable(void *ctx, int zpos)
706 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win); 703 DRM_DEBUG_KMS("[%d] %s, win: %d\n", __LINE__, __func__, win);
707 704
708 if (win == DEFAULT_ZPOS) 705 if (win == DEFAULT_ZPOS)
709 win = mixer_ctx->default_win; 706 win = MIXER_DEFAULT_WIN;
710 707
711 if (win < 0 || win > HDMI_OVERLAY_NUMBER) { 708 if (win < 0 || win > MIXER_WIN_NR) {
712 DRM_ERROR("overlay plane[%d] is wrong\n", win); 709 DRM_ERROR("overlay plane[%d] is wrong\n", win);
713 return; 710 return;
714 } 711 }
@@ -722,9 +719,12 @@ static void mixer_win_disable(void *ctx, int zpos)
722 spin_unlock_irqrestore(&res->reg_slock, flags); 719 spin_unlock_irqrestore(&res->reg_slock, flags);
723} 720}
724 721
725static struct exynos_hdmi_overlay_ops overlay_ops = { 722static struct exynos_mixer_ops mixer_ops = {
723 /* manager */
726 .enable_vblank = mixer_enable_vblank, 724 .enable_vblank = mixer_enable_vblank,
727 .disable_vblank = mixer_disable_vblank, 725 .disable_vblank = mixer_disable_vblank,
726
727 /* overlay */
728 .win_mode_set = mixer_win_mode_set, 728 .win_mode_set = mixer_win_mode_set,
729 .win_commit = mixer_win_commit, 729 .win_commit = mixer_win_commit,
730 .win_disable = mixer_win_disable, 730 .win_disable = mixer_win_disable,
@@ -771,8 +771,7 @@ static void mixer_finish_pageflip(struct drm_device *drm_dev, int crtc)
771static irqreturn_t mixer_irq_handler(int irq, void *arg) 771static irqreturn_t mixer_irq_handler(int irq, void *arg)
772{ 772{
773 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg; 773 struct exynos_drm_hdmi_context *drm_hdmi_ctx = arg;
774 struct mixer_context *ctx = 774 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
775 (struct mixer_context *)drm_hdmi_ctx->ctx;
776 struct mixer_resources *res = &ctx->mixer_res; 775 struct mixer_resources *res = &ctx->mixer_res;
777 u32 val, val_base; 776 u32 val, val_base;
778 777
@@ -902,7 +901,7 @@ static int mixer_runtime_resume(struct device *dev)
902 901
903 DRM_DEBUG_KMS("resume - start\n"); 902 DRM_DEBUG_KMS("resume - start\n");
904 903
905 mixer_resource_poweron((struct mixer_context *)ctx->ctx); 904 mixer_resource_poweron(ctx->ctx);
906 905
907 return 0; 906 return 0;
908} 907}
@@ -913,7 +912,7 @@ static int mixer_runtime_suspend(struct device *dev)
913 912
914 DRM_DEBUG_KMS("suspend - start\n"); 913 DRM_DEBUG_KMS("suspend - start\n");
915 914
916 mixer_resource_poweroff((struct mixer_context *)ctx->ctx); 915 mixer_resource_poweroff(ctx->ctx);
917 916
918 return 0; 917 return 0;
919} 918}
@@ -926,8 +925,7 @@ static const struct dev_pm_ops mixer_pm_ops = {
926static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx, 925static int __devinit mixer_resources_init(struct exynos_drm_hdmi_context *ctx,
927 struct platform_device *pdev) 926 struct platform_device *pdev)
928{ 927{
929 struct mixer_context *mixer_ctx = 928 struct mixer_context *mixer_ctx = ctx->ctx;
930 (struct mixer_context *)ctx->ctx;
931 struct device *dev = &pdev->dev; 929 struct device *dev = &pdev->dev;
932 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res; 930 struct mixer_resources *mixer_res = &mixer_ctx->mixer_res;
933 struct resource *res; 931 struct resource *res;
@@ -1076,7 +1074,7 @@ static int __devinit mixer_probe(struct platform_device *pdev)
1076 goto fail; 1074 goto fail;
1077 1075
1078 /* register specific callback point to common hdmi. */ 1076 /* register specific callback point to common hdmi. */
1079 exynos_drm_overlay_ops_register(&overlay_ops); 1077 exynos_mixer_ops_register(&mixer_ops);
1080 1078
1081 mixer_resource_poweron(ctx); 1079 mixer_resource_poweron(ctx);
1082 1080
@@ -1093,7 +1091,7 @@ static int mixer_remove(struct platform_device *pdev)
1093 struct device *dev = &pdev->dev; 1091 struct device *dev = &pdev->dev;
1094 struct exynos_drm_hdmi_context *drm_hdmi_ctx = 1092 struct exynos_drm_hdmi_context *drm_hdmi_ctx =
1095 platform_get_drvdata(pdev); 1093 platform_get_drvdata(pdev);
1096 struct mixer_context *ctx = (struct mixer_context *)drm_hdmi_ctx->ctx; 1094 struct mixer_context *ctx = drm_hdmi_ctx->ctx;
1097 1095
1098 dev_info(dev, "remove successful\n"); 1096 dev_info(dev, "remove successful\n");
1099 1097
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index ce7fc77678b4..b65c06f1a021 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -12,6 +12,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
12 i915_gem_execbuffer.o \ 12 i915_gem_execbuffer.o \
13 i915_gem_gtt.o \ 13 i915_gem_gtt.o \
14 i915_gem_tiling.o \ 14 i915_gem_tiling.o \
15 i915_sysfs.o \
15 i915_trace_points.o \ 16 i915_trace_points.o \
16 intel_display.o \ 17 intel_display.o \
17 intel_crt.o \ 18 intel_crt.o \
@@ -22,6 +23,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
22 intel_sdvo.o \ 23 intel_sdvo.o \
23 intel_modes.o \ 24 intel_modes.o \
24 intel_panel.o \ 25 intel_panel.o \
26 intel_pm.o \
25 intel_i2c.o \ 27 intel_i2c.o \
26 intel_fb.o \ 28 intel_fb.o \
27 intel_tv.o \ 29 intel_tv.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 967fb928c577..35462df7cefd 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1171,6 +1171,17 @@ static int gen6_drpc_info(struct seq_file *m)
1171 1171
1172 seq_printf(m, "Core Power Down: %s\n", 1172 seq_printf(m, "Core Power Down: %s\n",
1173 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK)); 1173 yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
1174
1175 /* Not exactly sure what this is */
1176 seq_printf(m, "RC6 \"Locked to RPn\" residency since boot: %u\n",
1177 I915_READ(GEN6_GT_GFX_RC6_LOCKED));
1178 seq_printf(m, "RC6 residency since boot: %u\n",
1179 I915_READ(GEN6_GT_GFX_RC6));
1180 seq_printf(m, "RC6+ residency since boot: %u\n",
1181 I915_READ(GEN6_GT_GFX_RC6p));
1182 seq_printf(m, "RC6++ residency since boot: %u\n",
1183 I915_READ(GEN6_GT_GFX_RC6pp));
1184
1174 return 0; 1185 return 0;
1175} 1186}
1176 1187
@@ -1821,7 +1832,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
1821 return 0; 1832 return 0;
1822} 1833}
1823 1834
1824int i915_forcewake_release(struct inode *inode, struct file *file) 1835static int i915_forcewake_release(struct inode *inode, struct file *file)
1825{ 1836{
1826 struct drm_device *dev = inode->i_private; 1837 struct drm_device *dev = inode->i_private;
1827 struct drm_i915_private *dev_priv = dev->dev_private; 1838 struct drm_i915_private *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 652f43f00ef2..a813f652fa1f 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -2115,7 +2115,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2115 spin_lock_init(&dev_priv->error_lock); 2115 spin_lock_init(&dev_priv->error_lock);
2116 spin_lock_init(&dev_priv->rps_lock); 2116 spin_lock_init(&dev_priv->rps_lock);
2117 2117
2118 if (IS_IVYBRIDGE(dev)) 2118 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
2119 dev_priv->num_pipe = 3; 2119 dev_priv->num_pipe = 3;
2120 else if (IS_MOBILE(dev) || !IS_GEN2(dev)) 2120 else if (IS_MOBILE(dev) || !IS_GEN2(dev))
2121 dev_priv->num_pipe = 2; 2121 dev_priv->num_pipe = 2;
@@ -2139,6 +2139,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2139 } 2139 }
2140 } 2140 }
2141 2141
2142 i915_setup_sysfs(dev);
2143
2142 /* Must be done after probing outputs */ 2144 /* Must be done after probing outputs */
2143 intel_opregion_init(dev); 2145 intel_opregion_init(dev);
2144 acpi_video_register(); 2146 acpi_video_register();
@@ -2190,6 +2192,8 @@ int i915_driver_unload(struct drm_device *dev)
2190 i915_mch_dev = NULL; 2192 i915_mch_dev = NULL;
2191 spin_unlock(&mchdev_lock); 2193 spin_unlock(&mchdev_lock);
2192 2194
2195 i915_teardown_sysfs(dev);
2196
2193 if (dev_priv->mm.inactive_shrinker.shrink) 2197 if (dev_priv->mm.inactive_shrinker.shrink)
2194 unregister_shrinker(&dev_priv->mm.inactive_shrinker); 2198 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2195 2199
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c33b0a41a73d..3effcf71e1b1 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -64,7 +64,7 @@ MODULE_PARM_DESC(semaphores,
64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); 64 "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
65 65
66int i915_enable_rc6 __read_mostly = -1; 66int i915_enable_rc6 __read_mostly = -1;
67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); 67module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6. " 69 "Enable power-saving render C-state 6. "
70 "Different stages can be selected via bitmask values " 70 "Different stages can be selected via bitmask values "
@@ -394,6 +394,21 @@ void intel_detect_pch(struct drm_device *dev)
394 } 394 }
395} 395}
396 396
397bool i915_semaphore_is_enabled(struct drm_device *dev)
398{
399 if (INTEL_INFO(dev)->gen < 6)
400 return 0;
401
402 if (i915_semaphores >= 0)
403 return i915_semaphores;
404
405 /* Enable semaphores on SNB when IO remapping is off */
406 if (INTEL_INFO(dev)->gen == 6)
407 return !intel_iommu_enabled;
408
409 return 1;
410}
411
397void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) 412void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
398{ 413{
399 int count; 414 int count;
@@ -836,9 +851,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
836 i915_gem_init_ppgtt(dev); 851 i915_gem_init_ppgtt(dev);
837 852
838 mutex_unlock(&dev->struct_mutex); 853 mutex_unlock(&dev->struct_mutex);
854
855 if (drm_core_check_feature(dev, DRIVER_MODESET))
856 intel_modeset_init_hw(dev);
857
839 drm_irq_uninstall(dev); 858 drm_irq_uninstall(dev);
840 drm_mode_config_reset(dev); 859 drm_mode_config_reset(dev);
841 drm_irq_install(dev); 860 drm_irq_install(dev);
861
842 mutex_lock(&dev->struct_mutex); 862 mutex_lock(&dev->struct_mutex);
843 } 863 }
844 864
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 92e496afc6f4..69e153956182 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -38,6 +38,7 @@
38#include <linux/i2c-algo-bit.h> 38#include <linux/i2c-algo-bit.h>
39#include <drm/intel-gtt.h> 39#include <drm/intel-gtt.h>
40#include <linux/backlight.h> 40#include <linux/backlight.h>
41#include <linux/intel-iommu.h>
41 42
42/* General customization: 43/* General customization:
43 */ 44 */
@@ -145,7 +146,6 @@ struct drm_i915_master_private {
145struct drm_i915_fence_reg { 146struct drm_i915_fence_reg {
146 struct list_head lru_list; 147 struct list_head lru_list;
147 struct drm_i915_gem_object *obj; 148 struct drm_i915_gem_object *obj;
148 uint32_t setup_seqno;
149 int pin_count; 149 int pin_count;
150}; 150};
151 151
@@ -930,13 +930,12 @@ struct drm_i915_gem_object {
930 */ 930 */
931 uint32_t gtt_offset; 931 uint32_t gtt_offset;
932 932
933 /** Breadcrumb of last rendering to the buffer. */
934 uint32_t last_rendering_seqno;
935 struct intel_ring_buffer *ring; 933 struct intel_ring_buffer *ring;
936 934
935 /** Breadcrumb of last rendering to the buffer. */
936 uint32_t last_rendering_seqno;
937 /** Breadcrumb of last fenced GPU access to the buffer. */ 937 /** Breadcrumb of last fenced GPU access to the buffer. */
938 uint32_t last_fenced_seqno; 938 uint32_t last_fenced_seqno;
939 struct intel_ring_buffer *last_fenced_ring;
940 939
941 /** Current tiling stride for the object, if it's tiled. */ 940 /** Current tiling stride for the object, if it's tiled. */
942 uint32_t stride; 941 uint32_t stride;
@@ -1127,8 +1126,10 @@ extern void i915_driver_preclose(struct drm_device *dev,
1127extern void i915_driver_postclose(struct drm_device *dev, 1126extern void i915_driver_postclose(struct drm_device *dev,
1128 struct drm_file *file_priv); 1127 struct drm_file *file_priv);
1129extern int i915_driver_device_is_agp(struct drm_device * dev); 1128extern int i915_driver_device_is_agp(struct drm_device * dev);
1129#ifdef CONFIG_COMPAT
1130extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 1130extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1131 unsigned long arg); 1131 unsigned long arg);
1132#endif
1132extern int i915_emit_box(struct drm_device *dev, 1133extern int i915_emit_box(struct drm_device *dev,
1133 struct drm_clip_rect *box, 1134 struct drm_clip_rect *box,
1134 int DR1, int DR4); 1135 int DR1, int DR4);
@@ -1230,6 +1231,8 @@ void i915_gem_lastclose(struct drm_device *dev);
1230 1231
1231int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); 1232int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1232int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj); 1233int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj);
1234int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1235 struct intel_ring_buffer *to);
1233void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, 1236void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1234 struct intel_ring_buffer *ring, 1237 struct intel_ring_buffer *ring,
1235 u32 seqno); 1238 u32 seqno);
@@ -1252,17 +1255,18 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1252 1255
1253u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring); 1256u32 i915_gem_next_request_seqno(struct intel_ring_buffer *ring);
1254 1257
1255int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 1258int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1256 struct intel_ring_buffer *pipelined);
1257int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); 1259int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1258 1260
1259static inline void 1261static inline bool
1260i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) 1262i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1261{ 1263{
1262 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1264 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1263 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 1265 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1264 dev_priv->fence_regs[obj->fence_reg].pin_count++; 1266 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1265 } 1267 return true;
1268 } else
1269 return false;
1266} 1270}
1267 1271
1268static inline void 1272static inline void
@@ -1380,6 +1384,10 @@ extern int i915_restore_state(struct drm_device *dev);
1380extern int i915_save_state(struct drm_device *dev); 1384extern int i915_save_state(struct drm_device *dev);
1381extern int i915_restore_state(struct drm_device *dev); 1385extern int i915_restore_state(struct drm_device *dev);
1382 1386
1387/* i915_sysfs.c */
1388void i915_setup_sysfs(struct drm_device *dev_priv);
1389void i915_teardown_sysfs(struct drm_device *dev_priv);
1390
1383/* intel_i2c.c */ 1391/* intel_i2c.c */
1384extern int intel_setup_gmbus(struct drm_device *dev); 1392extern int intel_setup_gmbus(struct drm_device *dev);
1385extern void intel_teardown_gmbus(struct drm_device *dev); 1393extern void intel_teardown_gmbus(struct drm_device *dev);
@@ -1424,6 +1432,7 @@ static inline void intel_unregister_dsm_handler(void) { return; }
1424#endif /* CONFIG_ACPI */ 1432#endif /* CONFIG_ACPI */
1425 1433
1426/* modesetting */ 1434/* modesetting */
1435extern void intel_modeset_init_hw(struct drm_device *dev);
1427extern void intel_modeset_init(struct drm_device *dev); 1436extern void intel_modeset_init(struct drm_device *dev);
1428extern void intel_modeset_gem_init(struct drm_device *dev); 1437extern void intel_modeset_gem_init(struct drm_device *dev);
1429extern void intel_modeset_cleanup(struct drm_device *dev); 1438extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1436,7 +1445,9 @@ extern void ironlake_enable_rc6(struct drm_device *dev);
1436extern void gen6_set_rps(struct drm_device *dev, u8 val); 1445extern void gen6_set_rps(struct drm_device *dev, u8 val);
1437extern void intel_detect_pch(struct drm_device *dev); 1446extern void intel_detect_pch(struct drm_device *dev);
1438extern int intel_trans_dp_port_sel(struct drm_crtc *crtc); 1447extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1448extern int intel_enable_rc6(const struct drm_device *dev);
1439 1449
1450extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1440extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); 1451extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1441extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv); 1452extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
1442extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); 1453extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b851bd34ca18..7bc4a40132ad 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -42,18 +42,34 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj, 42static __must_check int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
43 unsigned alignment, 43 unsigned alignment,
44 bool map_and_fenceable); 44 bool map_and_fenceable);
45static void i915_gem_clear_fence_reg(struct drm_device *dev,
46 struct drm_i915_fence_reg *reg);
47static int i915_gem_phys_pwrite(struct drm_device *dev, 45static int i915_gem_phys_pwrite(struct drm_device *dev,
48 struct drm_i915_gem_object *obj, 46 struct drm_i915_gem_object *obj,
49 struct drm_i915_gem_pwrite *args, 47 struct drm_i915_gem_pwrite *args,
50 struct drm_file *file); 48 struct drm_file *file);
51static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj); 49static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
52 50
51static void i915_gem_write_fence(struct drm_device *dev, int reg,
52 struct drm_i915_gem_object *obj);
53static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
54 struct drm_i915_fence_reg *fence,
55 bool enable);
56
53static int i915_gem_inactive_shrink(struct shrinker *shrinker, 57static int i915_gem_inactive_shrink(struct shrinker *shrinker,
54 struct shrink_control *sc); 58 struct shrink_control *sc);
55static void i915_gem_object_truncate(struct drm_i915_gem_object *obj); 59static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
56 60
61static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
62{
63 if (obj->tiling_mode)
64 i915_gem_release_mmap(obj);
65
66 /* As we do not have an associated fence register, we will force
67 * a tiling change if we ever need to acquire one.
68 */
69 obj->tiling_changed = false;
70 obj->fence_reg = I915_FENCE_REG_NONE;
71}
72
57/* some bookkeeping */ 73/* some bookkeeping */
58static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 74static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
59 size_t size) 75 size_t size)
@@ -876,6 +892,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
876 892
877 if (obj->gtt_space && 893 if (obj->gtt_space &&
878 obj->cache_level == I915_CACHE_NONE && 894 obj->cache_level == I915_CACHE_NONE &&
895 obj->tiling_mode == I915_TILING_NONE &&
879 obj->map_and_fenceable && 896 obj->map_and_fenceable &&
880 obj->base.write_domain != I915_GEM_DOMAIN_CPU) { 897 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
881 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file); 898 ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
@@ -1078,10 +1095,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1078 if (!obj->has_global_gtt_mapping) 1095 if (!obj->has_global_gtt_mapping)
1079 i915_gem_gtt_bind_object(obj, obj->cache_level); 1096 i915_gem_gtt_bind_object(obj, obj->cache_level);
1080 1097
1081 if (obj->tiling_mode == I915_TILING_NONE) 1098 ret = i915_gem_object_get_fence(obj);
1082 ret = i915_gem_object_put_fence(obj);
1083 else
1084 ret = i915_gem_object_get_fence(obj, NULL);
1085 if (ret) 1099 if (ret)
1086 goto unlock; 1100 goto unlock;
1087 1101
@@ -1400,7 +1414,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1400 1414
1401 if (obj->fenced_gpu_access) { 1415 if (obj->fenced_gpu_access) {
1402 obj->last_fenced_seqno = seqno; 1416 obj->last_fenced_seqno = seqno;
1403 obj->last_fenced_ring = ring;
1404 1417
1405 /* Bump MRU to take account of the delayed flush */ 1418 /* Bump MRU to take account of the delayed flush */
1406 if (obj->fence_reg != I915_FENCE_REG_NONE) { 1419 if (obj->fence_reg != I915_FENCE_REG_NONE) {
@@ -1418,6 +1431,7 @@ i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1418{ 1431{
1419 list_del_init(&obj->ring_list); 1432 list_del_init(&obj->ring_list);
1420 obj->last_rendering_seqno = 0; 1433 obj->last_rendering_seqno = 0;
1434 obj->last_fenced_seqno = 0;
1421} 1435}
1422 1436
1423static void 1437static void
@@ -1639,20 +1653,18 @@ static void i915_gem_reset_fences(struct drm_device *dev)
1639 1653
1640 for (i = 0; i < dev_priv->num_fence_regs; i++) { 1654 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1641 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1655 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1642 struct drm_i915_gem_object *obj = reg->obj;
1643 1656
1644 if (!obj) 1657 i915_gem_write_fence(dev, i, NULL);
1645 continue;
1646 1658
1647 if (obj->tiling_mode) 1659 if (reg->obj)
1648 i915_gem_release_mmap(obj); 1660 i915_gem_object_fence_lost(reg->obj);
1649 1661
1650 reg->obj->fence_reg = I915_FENCE_REG_NONE; 1662 reg->pin_count = 0;
1651 reg->obj->fenced_gpu_access = false; 1663 reg->obj = NULL;
1652 reg->obj->last_fenced_seqno = 0; 1664 INIT_LIST_HEAD(&reg->lru_list);
1653 reg->obj->last_fenced_ring = NULL;
1654 i915_gem_clear_fence_reg(dev, reg);
1655 } 1665 }
1666
1667 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
1656} 1668}
1657 1669
1658void i915_gem_reset(struct drm_device *dev) 1670void i915_gem_reset(struct drm_device *dev)
@@ -1956,6 +1968,62 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
1956 return 0; 1968 return 0;
1957} 1969}
1958 1970
1971/**
1972 * i915_gem_object_sync - sync an object to a ring.
1973 *
1974 * @obj: object which may be in use on another ring.
1975 * @to: ring we wish to use the object on. May be NULL.
1976 *
1977 * This code is meant to abstract object synchronization with the GPU.
1978 * Calling with NULL implies synchronizing the object with the CPU
1979 * rather than a particular GPU ring.
1980 *
1981 * Returns 0 if successful, else propagates up the lower layer error.
1982 */
1983int
1984i915_gem_object_sync(struct drm_i915_gem_object *obj,
1985 struct intel_ring_buffer *to)
1986{
1987 struct intel_ring_buffer *from = obj->ring;
1988 u32 seqno;
1989 int ret, idx;
1990
1991 if (from == NULL || to == from)
1992 return 0;
1993
1994 if (to == NULL || !i915_semaphore_is_enabled(obj->base.dev))
1995 return i915_gem_object_wait_rendering(obj);
1996
1997 idx = intel_ring_sync_index(from, to);
1998
1999 seqno = obj->last_rendering_seqno;
2000 if (seqno <= from->sync_seqno[idx])
2001 return 0;
2002
2003 if (seqno == from->outstanding_lazy_request) {
2004 struct drm_i915_gem_request *request;
2005
2006 request = kzalloc(sizeof(*request), GFP_KERNEL);
2007 if (request == NULL)
2008 return -ENOMEM;
2009
2010 ret = i915_add_request(from, NULL, request);
2011 if (ret) {
2012 kfree(request);
2013 return ret;
2014 }
2015
2016 seqno = request->seqno;
2017 }
2018
2019
2020 ret = to->sync_to(to, from, seqno);
2021 if (!ret)
2022 from->sync_seqno[idx] = seqno;
2023
2024 return ret;
2025}
2026
1959static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj) 2027static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
1960{ 2028{
1961 u32 old_write_domain, old_read_domains; 2029 u32 old_write_domain, old_read_domains;
@@ -2110,189 +2178,178 @@ int i915_gpu_idle(struct drm_device *dev, bool do_retire)
2110 return 0; 2178 return 0;
2111} 2179}
2112 2180
2113static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj, 2181static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
2114 struct intel_ring_buffer *pipelined) 2182 struct drm_i915_gem_object *obj)
2115{ 2183{
2116 struct drm_device *dev = obj->base.dev;
2117 drm_i915_private_t *dev_priv = dev->dev_private; 2184 drm_i915_private_t *dev_priv = dev->dev_private;
2118 u32 size = obj->gtt_space->size;
2119 int regnum = obj->fence_reg;
2120 uint64_t val; 2185 uint64_t val;
2121 2186
2122 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2187 if (obj) {
2123 0xfffff000) << 32; 2188 u32 size = obj->gtt_space->size;
2124 val |= obj->gtt_offset & 0xfffff000;
2125 val |= (uint64_t)((obj->stride / 128) - 1) <<
2126 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2127
2128 if (obj->tiling_mode == I915_TILING_Y)
2129 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2130 val |= I965_FENCE_REG_VALID;
2131 2189
2132 if (pipelined) { 2190 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2133 int ret = intel_ring_begin(pipelined, 6); 2191 0xfffff000) << 32;
2134 if (ret) 2192 val |= obj->gtt_offset & 0xfffff000;
2135 return ret; 2193 val |= (uint64_t)((obj->stride / 128) - 1) <<
2194 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2136 2195
2137 intel_ring_emit(pipelined, MI_NOOP); 2196 if (obj->tiling_mode == I915_TILING_Y)
2138 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); 2197 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2139 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8); 2198 val |= I965_FENCE_REG_VALID;
2140 intel_ring_emit(pipelined, (u32)val);
2141 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2142 intel_ring_emit(pipelined, (u32)(val >> 32));
2143 intel_ring_advance(pipelined);
2144 } else 2199 } else
2145 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val); 2200 val = 0;
2146 2201
2147 return 0; 2202 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
2203 POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
2148} 2204}
2149 2205
2150static int i965_write_fence_reg(struct drm_i915_gem_object *obj, 2206static void i965_write_fence_reg(struct drm_device *dev, int reg,
2151 struct intel_ring_buffer *pipelined) 2207 struct drm_i915_gem_object *obj)
2152{ 2208{
2153 struct drm_device *dev = obj->base.dev;
2154 drm_i915_private_t *dev_priv = dev->dev_private; 2209 drm_i915_private_t *dev_priv = dev->dev_private;
2155 u32 size = obj->gtt_space->size;
2156 int regnum = obj->fence_reg;
2157 uint64_t val; 2210 uint64_t val;
2158 2211
2159 val = (uint64_t)((obj->gtt_offset + size - 4096) & 2212 if (obj) {
2160 0xfffff000) << 32; 2213 u32 size = obj->gtt_space->size;
2161 val |= obj->gtt_offset & 0xfffff000;
2162 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2163 if (obj->tiling_mode == I915_TILING_Y)
2164 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2165 val |= I965_FENCE_REG_VALID;
2166
2167 if (pipelined) {
2168 int ret = intel_ring_begin(pipelined, 6);
2169 if (ret)
2170 return ret;
2171 2214
2172 intel_ring_emit(pipelined, MI_NOOP); 2215 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2173 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2)); 2216 0xfffff000) << 32;
2174 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8); 2217 val |= obj->gtt_offset & 0xfffff000;
2175 intel_ring_emit(pipelined, (u32)val); 2218 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2176 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4); 2219 if (obj->tiling_mode == I915_TILING_Y)
2177 intel_ring_emit(pipelined, (u32)(val >> 32)); 2220 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2178 intel_ring_advance(pipelined); 2221 val |= I965_FENCE_REG_VALID;
2179 } else 2222 } else
2180 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val); 2223 val = 0;
2181 2224
2182 return 0; 2225 I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
2226 POSTING_READ(FENCE_REG_965_0 + reg * 8);
2183} 2227}
2184 2228
2185static int i915_write_fence_reg(struct drm_i915_gem_object *obj, 2229static void i915_write_fence_reg(struct drm_device *dev, int reg,
2186 struct intel_ring_buffer *pipelined) 2230 struct drm_i915_gem_object *obj)
2187{ 2231{
2188 struct drm_device *dev = obj->base.dev;
2189 drm_i915_private_t *dev_priv = dev->dev_private; 2232 drm_i915_private_t *dev_priv = dev->dev_private;
2190 u32 size = obj->gtt_space->size; 2233 u32 val;
2191 u32 fence_reg, val, pitch_val;
2192 int tile_width;
2193
2194 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2195 (size & -size) != size ||
2196 (obj->gtt_offset & (size - 1)),
2197 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2198 obj->gtt_offset, obj->map_and_fenceable, size))
2199 return -EINVAL;
2200 2234
2201 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 2235 if (obj) {
2202 tile_width = 128; 2236 u32 size = obj->gtt_space->size;
2203 else 2237 int pitch_val;
2204 tile_width = 512; 2238 int tile_width;
2205
2206 /* Note: pitch better be a power of two tile widths */
2207 pitch_val = obj->stride / tile_width;
2208 pitch_val = ffs(pitch_val) - 1;
2209
2210 val = obj->gtt_offset;
2211 if (obj->tiling_mode == I915_TILING_Y)
2212 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2213 val |= I915_FENCE_SIZE_BITS(size);
2214 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2215 val |= I830_FENCE_REG_VALID;
2216
2217 fence_reg = obj->fence_reg;
2218 if (fence_reg < 8)
2219 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2220 else
2221 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2222 2239
2223 if (pipelined) { 2240 WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2224 int ret = intel_ring_begin(pipelined, 4); 2241 (size & -size) != size ||
2225 if (ret) 2242 (obj->gtt_offset & (size - 1)),
2226 return ret; 2243 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2244 obj->gtt_offset, obj->map_and_fenceable, size);
2227 2245
2228 intel_ring_emit(pipelined, MI_NOOP); 2246 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2229 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); 2247 tile_width = 128;
2230 intel_ring_emit(pipelined, fence_reg); 2248 else
2231 intel_ring_emit(pipelined, val); 2249 tile_width = 512;
2232 intel_ring_advance(pipelined); 2250
2251 /* Note: pitch better be a power of two tile widths */
2252 pitch_val = obj->stride / tile_width;
2253 pitch_val = ffs(pitch_val) - 1;
2254
2255 val = obj->gtt_offset;
2256 if (obj->tiling_mode == I915_TILING_Y)
2257 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2258 val |= I915_FENCE_SIZE_BITS(size);
2259 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2260 val |= I830_FENCE_REG_VALID;
2233 } else 2261 } else
2234 I915_WRITE(fence_reg, val); 2262 val = 0;
2235 2263
2236 return 0; 2264 if (reg < 8)
2265 reg = FENCE_REG_830_0 + reg * 4;
2266 else
2267 reg = FENCE_REG_945_8 + (reg - 8) * 4;
2268
2269 I915_WRITE(reg, val);
2270 POSTING_READ(reg);
2237} 2271}
2238 2272
2239static int i830_write_fence_reg(struct drm_i915_gem_object *obj, 2273static void i830_write_fence_reg(struct drm_device *dev, int reg,
2240 struct intel_ring_buffer *pipelined) 2274 struct drm_i915_gem_object *obj)
2241{ 2275{
2242 struct drm_device *dev = obj->base.dev;
2243 drm_i915_private_t *dev_priv = dev->dev_private; 2276 drm_i915_private_t *dev_priv = dev->dev_private;
2244 u32 size = obj->gtt_space->size;
2245 int regnum = obj->fence_reg;
2246 uint32_t val; 2277 uint32_t val;
2247 uint32_t pitch_val;
2248 2278
2249 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) || 2279 if (obj) {
2250 (size & -size) != size || 2280 u32 size = obj->gtt_space->size;
2251 (obj->gtt_offset & (size - 1)), 2281 uint32_t pitch_val;
2252 "object 0x%08x not 512K or pot-size 0x%08x aligned\n", 2282
2253 obj->gtt_offset, size)) 2283 WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2254 return -EINVAL; 2284 (size & -size) != size ||
2255 2285 (obj->gtt_offset & (size - 1)),
2256 pitch_val = obj->stride / 128; 2286 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2257 pitch_val = ffs(pitch_val) - 1; 2287 obj->gtt_offset, size);
2258 2288
2259 val = obj->gtt_offset; 2289 pitch_val = obj->stride / 128;
2260 if (obj->tiling_mode == I915_TILING_Y) 2290 pitch_val = ffs(pitch_val) - 1;
2261 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2291
2262 val |= I830_FENCE_SIZE_BITS(size); 2292 val = obj->gtt_offset;
2263 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2293 if (obj->tiling_mode == I915_TILING_Y)
2264 val |= I830_FENCE_REG_VALID; 2294 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2295 val |= I830_FENCE_SIZE_BITS(size);
2296 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2297 val |= I830_FENCE_REG_VALID;
2298 } else
2299 val = 0;
2265 2300
2266 if (pipelined) { 2301 I915_WRITE(FENCE_REG_830_0 + reg * 4, val);
2267 int ret = intel_ring_begin(pipelined, 4); 2302 POSTING_READ(FENCE_REG_830_0 + reg * 4);
2268 if (ret) 2303}
2269 return ret;
2270 2304
2271 intel_ring_emit(pipelined, MI_NOOP); 2305static void i915_gem_write_fence(struct drm_device *dev, int reg,
2272 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1)); 2306 struct drm_i915_gem_object *obj)
2273 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4); 2307{
2274 intel_ring_emit(pipelined, val); 2308 switch (INTEL_INFO(dev)->gen) {
2275 intel_ring_advance(pipelined); 2309 case 7:
2276 } else 2310 case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
2277 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val); 2311 case 5:
2312 case 4: i965_write_fence_reg(dev, reg, obj); break;
2313 case 3: i915_write_fence_reg(dev, reg, obj); break;
2314 case 2: i830_write_fence_reg(dev, reg, obj); break;
2315 default: break;
2316 }
2317}
2278 2318
2279 return 0; 2319static inline int fence_number(struct drm_i915_private *dev_priv,
2320 struct drm_i915_fence_reg *fence)
2321{
2322 return fence - dev_priv->fence_regs;
2280} 2323}
2281 2324
2282static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno) 2325static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
2326 struct drm_i915_fence_reg *fence,
2327 bool enable)
2283{ 2328{
2284 return i915_seqno_passed(ring->get_seqno(ring), seqno); 2329 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2330 int reg = fence_number(dev_priv, fence);
2331
2332 i915_gem_write_fence(obj->base.dev, reg, enable ? obj : NULL);
2333
2334 if (enable) {
2335 obj->fence_reg = reg;
2336 fence->obj = obj;
2337 list_move_tail(&fence->lru_list, &dev_priv->mm.fence_list);
2338 } else {
2339 obj->fence_reg = I915_FENCE_REG_NONE;
2340 fence->obj = NULL;
2341 list_del_init(&fence->lru_list);
2342 }
2285} 2343}
2286 2344
2287static int 2345static int
2288i915_gem_object_flush_fence(struct drm_i915_gem_object *obj, 2346i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
2289 struct intel_ring_buffer *pipelined)
2290{ 2347{
2291 int ret; 2348 int ret;
2292 2349
2293 if (obj->fenced_gpu_access) { 2350 if (obj->fenced_gpu_access) {
2294 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2351 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
2295 ret = i915_gem_flush_ring(obj->last_fenced_ring, 2352 ret = i915_gem_flush_ring(obj->ring,
2296 0, obj->base.write_domain); 2353 0, obj->base.write_domain);
2297 if (ret) 2354 if (ret)
2298 return ret; 2355 return ret;
@@ -2301,18 +2358,14 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2301 obj->fenced_gpu_access = false; 2358 obj->fenced_gpu_access = false;
2302 } 2359 }
2303 2360
2304 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) { 2361 if (obj->last_fenced_seqno) {
2305 if (!ring_passed_seqno(obj->last_fenced_ring, 2362 ret = i915_wait_request(obj->ring,
2306 obj->last_fenced_seqno)) { 2363 obj->last_fenced_seqno,
2307 ret = i915_wait_request(obj->last_fenced_ring, 2364 false);
2308 obj->last_fenced_seqno, 2365 if (ret)
2309 true); 2366 return ret;
2310 if (ret)
2311 return ret;
2312 }
2313 2367
2314 obj->last_fenced_seqno = 0; 2368 obj->last_fenced_seqno = 0;
2315 obj->last_fenced_ring = NULL;
2316 } 2369 }
2317 2370
2318 /* Ensure that all CPU reads are completed before installing a fence 2371 /* Ensure that all CPU reads are completed before installing a fence
@@ -2327,34 +2380,29 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2327int 2380int
2328i915_gem_object_put_fence(struct drm_i915_gem_object *obj) 2381i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2329{ 2382{
2383 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2330 int ret; 2384 int ret;
2331 2385
2332 if (obj->tiling_mode) 2386 ret = i915_gem_object_flush_fence(obj);
2333 i915_gem_release_mmap(obj);
2334
2335 ret = i915_gem_object_flush_fence(obj, NULL);
2336 if (ret) 2387 if (ret)
2337 return ret; 2388 return ret;
2338 2389
2339 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2390 if (obj->fence_reg == I915_FENCE_REG_NONE)
2340 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; 2391 return 0;
2341
2342 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count);
2343 i915_gem_clear_fence_reg(obj->base.dev,
2344 &dev_priv->fence_regs[obj->fence_reg]);
2345 2392
2346 obj->fence_reg = I915_FENCE_REG_NONE; 2393 i915_gem_object_update_fence(obj,
2347 } 2394 &dev_priv->fence_regs[obj->fence_reg],
2395 false);
2396 i915_gem_object_fence_lost(obj);
2348 2397
2349 return 0; 2398 return 0;
2350} 2399}
2351 2400
2352static struct drm_i915_fence_reg * 2401static struct drm_i915_fence_reg *
2353i915_find_fence_reg(struct drm_device *dev, 2402i915_find_fence_reg(struct drm_device *dev)
2354 struct intel_ring_buffer *pipelined)
2355{ 2403{
2356 struct drm_i915_private *dev_priv = dev->dev_private; 2404 struct drm_i915_private *dev_priv = dev->dev_private;
2357 struct drm_i915_fence_reg *reg, *first, *avail; 2405 struct drm_i915_fence_reg *reg, *avail;
2358 int i; 2406 int i;
2359 2407
2360 /* First try to find a free reg */ 2408 /* First try to find a free reg */
@@ -2372,204 +2420,77 @@ i915_find_fence_reg(struct drm_device *dev,
2372 return NULL; 2420 return NULL;
2373 2421
2374 /* None available, try to steal one or wait for a user to finish */ 2422 /* None available, try to steal one or wait for a user to finish */
2375 avail = first = NULL;
2376 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) { 2423 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2377 if (reg->pin_count) 2424 if (reg->pin_count)
2378 continue; 2425 continue;
2379 2426
2380 if (first == NULL) 2427 return reg;
2381 first = reg;
2382
2383 if (!pipelined ||
2384 !reg->obj->last_fenced_ring ||
2385 reg->obj->last_fenced_ring == pipelined) {
2386 avail = reg;
2387 break;
2388 }
2389 } 2428 }
2390 2429
2391 if (avail == NULL) 2430 return NULL;
2392 avail = first;
2393
2394 return avail;
2395} 2431}
2396 2432
2397/** 2433/**
2398 * i915_gem_object_get_fence - set up a fence reg for an object 2434 * i915_gem_object_get_fence - set up fencing for an object
2399 * @obj: object to map through a fence reg 2435 * @obj: object to map through a fence reg
2400 * @pipelined: ring on which to queue the change, or NULL for CPU access
2401 * @interruptible: must we wait uninterruptibly for the register to retire?
2402 * 2436 *
2403 * When mapping objects through the GTT, userspace wants to be able to write 2437 * When mapping objects through the GTT, userspace wants to be able to write
2404 * to them without having to worry about swizzling if the object is tiled. 2438 * to them without having to worry about swizzling if the object is tiled.
2405 *
2406 * This function walks the fence regs looking for a free one for @obj, 2439 * This function walks the fence regs looking for a free one for @obj,
2407 * stealing one if it can't find any. 2440 * stealing one if it can't find any.
2408 * 2441 *
2409 * It then sets up the reg based on the object's properties: address, pitch 2442 * It then sets up the reg based on the object's properties: address, pitch
2410 * and tiling format. 2443 * and tiling format.
2444 *
2445 * For an untiled surface, this removes any existing fence.
2411 */ 2446 */
2412int 2447int
2413i915_gem_object_get_fence(struct drm_i915_gem_object *obj, 2448i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
2414 struct intel_ring_buffer *pipelined)
2415{ 2449{
2416 struct drm_device *dev = obj->base.dev; 2450 struct drm_device *dev = obj->base.dev;
2417 struct drm_i915_private *dev_priv = dev->dev_private; 2451 struct drm_i915_private *dev_priv = dev->dev_private;
2452 bool enable = obj->tiling_mode != I915_TILING_NONE;
2418 struct drm_i915_fence_reg *reg; 2453 struct drm_i915_fence_reg *reg;
2419 int ret; 2454 int ret;
2420 2455
2421 /* XXX disable pipelining. There are bugs. Shocking. */ 2456 /* Have we updated the tiling parameters upon the object and so
2422 pipelined = NULL; 2457 * will need to serialise the write to the associated fence register?
2458 */
2459 if (obj->tiling_changed) {
2460 ret = i915_gem_object_flush_fence(obj);
2461 if (ret)
2462 return ret;
2463 }
2423 2464
2424 /* Just update our place in the LRU if our fence is getting reused. */ 2465 /* Just update our place in the LRU if our fence is getting reused. */
2425 if (obj->fence_reg != I915_FENCE_REG_NONE) { 2466 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2426 reg = &dev_priv->fence_regs[obj->fence_reg]; 2467 reg = &dev_priv->fence_regs[obj->fence_reg];
2427 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2468 if (!obj->tiling_changed) {
2428 2469 list_move_tail(&reg->lru_list,
2429 if (obj->tiling_changed) { 2470 &dev_priv->mm.fence_list);
2430 ret = i915_gem_object_flush_fence(obj, pipelined); 2471 return 0;
2431 if (ret)
2432 return ret;
2433
2434 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2435 pipelined = NULL;
2436
2437 if (pipelined) {
2438 reg->setup_seqno =
2439 i915_gem_next_request_seqno(pipelined);
2440 obj->last_fenced_seqno = reg->setup_seqno;
2441 obj->last_fenced_ring = pipelined;
2442 }
2443
2444 goto update;
2445 } 2472 }
2473 } else if (enable) {
2474 reg = i915_find_fence_reg(dev);
2475 if (reg == NULL)
2476 return -EDEADLK;
2446 2477
2447 if (!pipelined) { 2478 if (reg->obj) {
2448 if (reg->setup_seqno) { 2479 struct drm_i915_gem_object *old = reg->obj;
2449 if (!ring_passed_seqno(obj->last_fenced_ring,
2450 reg->setup_seqno)) {
2451 ret = i915_wait_request(obj->last_fenced_ring,
2452 reg->setup_seqno,
2453 true);
2454 if (ret)
2455 return ret;
2456 }
2457 2480
2458 reg->setup_seqno = 0; 2481 ret = i915_gem_object_flush_fence(old);
2459 }
2460 } else if (obj->last_fenced_ring &&
2461 obj->last_fenced_ring != pipelined) {
2462 ret = i915_gem_object_flush_fence(obj, pipelined);
2463 if (ret) 2482 if (ret)
2464 return ret; 2483 return ret;
2465 }
2466
2467 return 0;
2468 }
2469
2470 reg = i915_find_fence_reg(dev, pipelined);
2471 if (reg == NULL)
2472 return -EDEADLK;
2473
2474 ret = i915_gem_object_flush_fence(obj, pipelined);
2475 if (ret)
2476 return ret;
2477
2478 if (reg->obj) {
2479 struct drm_i915_gem_object *old = reg->obj;
2480
2481 drm_gem_object_reference(&old->base);
2482
2483 if (old->tiling_mode)
2484 i915_gem_release_mmap(old);
2485 2484
2486 ret = i915_gem_object_flush_fence(old, pipelined); 2485 i915_gem_object_fence_lost(old);
2487 if (ret) {
2488 drm_gem_object_unreference(&old->base);
2489 return ret;
2490 } 2486 }
2487 } else
2488 return 0;
2491 2489
2492 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0) 2490 i915_gem_object_update_fence(obj, reg, enable);
2493 pipelined = NULL;
2494
2495 old->fence_reg = I915_FENCE_REG_NONE;
2496 old->last_fenced_ring = pipelined;
2497 old->last_fenced_seqno =
2498 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2499
2500 drm_gem_object_unreference(&old->base);
2501 } else if (obj->last_fenced_seqno == 0)
2502 pipelined = NULL;
2503
2504 reg->obj = obj;
2505 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2506 obj->fence_reg = reg - dev_priv->fence_regs;
2507 obj->last_fenced_ring = pipelined;
2508
2509 reg->setup_seqno =
2510 pipelined ? i915_gem_next_request_seqno(pipelined) : 0;
2511 obj->last_fenced_seqno = reg->setup_seqno;
2512
2513update:
2514 obj->tiling_changed = false; 2491 obj->tiling_changed = false;
2515 switch (INTEL_INFO(dev)->gen) {
2516 case 7:
2517 case 6:
2518 ret = sandybridge_write_fence_reg(obj, pipelined);
2519 break;
2520 case 5:
2521 case 4:
2522 ret = i965_write_fence_reg(obj, pipelined);
2523 break;
2524 case 3:
2525 ret = i915_write_fence_reg(obj, pipelined);
2526 break;
2527 case 2:
2528 ret = i830_write_fence_reg(obj, pipelined);
2529 break;
2530 }
2531
2532 return ret;
2533}
2534
2535/**
2536 * i915_gem_clear_fence_reg - clear out fence register info
2537 * @obj: object to clear
2538 *
2539 * Zeroes out the fence register itself and clears out the associated
2540 * data structures in dev_priv and obj.
2541 */
2542static void
2543i915_gem_clear_fence_reg(struct drm_device *dev,
2544 struct drm_i915_fence_reg *reg)
2545{
2546 drm_i915_private_t *dev_priv = dev->dev_private;
2547 uint32_t fence_reg = reg - dev_priv->fence_regs;
2548 2492
2549 switch (INTEL_INFO(dev)->gen) { 2493 return 0;
2550 case 7:
2551 case 6:
2552 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2553 break;
2554 case 5:
2555 case 4:
2556 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2557 break;
2558 case 3:
2559 if (fence_reg >= 8)
2560 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2561 else
2562 case 2:
2563 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2564
2565 I915_WRITE(fence_reg, 0);
2566 break;
2567 }
2568
2569 list_del_init(&reg->lru_list);
2570 reg->obj = NULL;
2571 reg->setup_seqno = 0;
2572 reg->pin_count = 0;
2573} 2494}
2574 2495
2575/** 2496/**
@@ -2926,11 +2847,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2926 * Prepare buffer for display plane (scanout, cursors, etc). 2847 * Prepare buffer for display plane (scanout, cursors, etc).
2927 * Can be called from an uninterruptible phase (modesetting) and allows 2848 * Can be called from an uninterruptible phase (modesetting) and allows
2928 * any flushes to be pipelined (for pageflips). 2849 * any flushes to be pipelined (for pageflips).
2929 *
2930 * For the display plane, we want to be in the GTT but out of any write
2931 * domains. So in many ways this looks like set_to_gtt_domain() apart from the
2932 * ability to pipeline the waits, pinning and any additional subtleties
2933 * that may differentiate the display plane from ordinary buffers.
2934 */ 2850 */
2935int 2851int
2936i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 2852i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
@@ -2945,8 +2861,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2945 return ret; 2861 return ret;
2946 2862
2947 if (pipelined != obj->ring) { 2863 if (pipelined != obj->ring) {
2948 ret = i915_gem_object_wait_rendering(obj); 2864 ret = i915_gem_object_sync(obj, pipelined);
2949 if (ret == -ERESTARTSYS) 2865 if (ret)
2950 return ret; 2866 return ret;
2951 } 2867 }
2952 2868
@@ -3031,9 +2947,11 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3031 if (ret) 2947 if (ret)
3032 return ret; 2948 return ret;
3033 2949
3034 ret = i915_gem_object_wait_rendering(obj); 2950 if (write || obj->pending_gpu_write) {
3035 if (ret) 2951 ret = i915_gem_object_wait_rendering(obj);
3036 return ret; 2952 if (ret)
2953 return ret;
2954 }
3037 2955
3038 i915_gem_object_flush_gtt_write_domain(obj); 2956 i915_gem_object_flush_gtt_write_domain(obj);
3039 2957
@@ -3613,7 +3531,15 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
3613 pd_offset <<= 16; 3531 pd_offset <<= 16;
3614 3532
3615 if (INTEL_INFO(dev)->gen == 6) { 3533 if (INTEL_INFO(dev)->gen == 6) {
3616 uint32_t ecochk = I915_READ(GAM_ECOCHK); 3534 uint32_t ecochk, gab_ctl, ecobits;
3535
3536 ecobits = I915_READ(GAC_ECO_BITS);
3537 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
3538
3539 gab_ctl = I915_READ(GAB_CTL);
3540 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
3541
3542 ecochk = I915_READ(GAM_ECOCHK);
3617 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | 3543 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
3618 ECOCHK_PPGTT_CACHE64B); 3544 ECOCHK_PPGTT_CACHE64B);
3619 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE)); 3545 I915_WRITE(GFX_MODE, GFX_MODE_ENABLE(GFX_PPGTT_ENABLE));
@@ -3804,9 +3730,7 @@ i915_gem_load(struct drm_device *dev)
3804 dev_priv->num_fence_regs = 8; 3730 dev_priv->num_fence_regs = 8;
3805 3731
3806 /* Initialize fence registers to zero */ 3732 /* Initialize fence registers to zero */
3807 for (i = 0; i < dev_priv->num_fence_regs; i++) { 3733 i915_gem_reset_fences(dev);
3808 i915_gem_clear_fence_reg(dev, &dev_priv->fence_regs[i]);
3809 }
3810 3734
3811 i915_gem_detect_bit_6_swizzle(dev); 3735 i915_gem_detect_bit_6_swizzle(dev);
3812 init_waitqueue_head(&dev_priv->pending_flip_queue); 3736 init_waitqueue_head(&dev_priv->pending_flip_queue);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 254e2f6ac4f0..68ec0130a626 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -381,7 +381,11 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
381 uint32_t __iomem *reloc_entry; 381 uint32_t __iomem *reloc_entry;
382 void __iomem *reloc_page; 382 void __iomem *reloc_page;
383 383
384 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 384 ret = i915_gem_object_set_to_gtt_domain(obj, true);
385 if (ret)
386 return ret;
387
388 ret = i915_gem_object_put_fence(obj);
385 if (ret) 389 if (ret)
386 return ret; 390 return ret;
387 391
@@ -530,18 +534,13 @@ pin_and_fence_object(struct drm_i915_gem_object *obj,
530 534
531 if (has_fenced_gpu_access) { 535 if (has_fenced_gpu_access) {
532 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) { 536 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
533 if (obj->tiling_mode) { 537 ret = i915_gem_object_get_fence(obj);
534 ret = i915_gem_object_get_fence(obj, ring); 538 if (ret)
535 if (ret) 539 goto err_unpin;
536 goto err_unpin;
537 540
541 if (i915_gem_object_pin_fence(obj))
538 entry->flags |= __EXEC_OBJECT_HAS_FENCE; 542 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
539 i915_gem_object_pin_fence(obj); 543
540 } else {
541 ret = i915_gem_object_put_fence(obj);
542 if (ret)
543 goto err_unpin;
544 }
545 obj->pending_fenced_gpu_access = true; 544 obj->pending_fenced_gpu_access = true;
546 } 545 }
547 } 546 }
@@ -840,64 +839,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
840 return 0; 839 return 0;
841} 840}
842 841
843static bool
844intel_enable_semaphores(struct drm_device *dev)
845{
846 if (INTEL_INFO(dev)->gen < 6)
847 return 0;
848
849 if (i915_semaphores >= 0)
850 return i915_semaphores;
851
852 /* Disable semaphores on SNB */
853 if (INTEL_INFO(dev)->gen == 6)
854 return 0;
855
856 return 1;
857}
858
859static int
860i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
861 struct intel_ring_buffer *to)
862{
863 struct intel_ring_buffer *from = obj->ring;
864 u32 seqno;
865 int ret, idx;
866
867 if (from == NULL || to == from)
868 return 0;
869
870 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
871 if (!intel_enable_semaphores(obj->base.dev))
872 return i915_gem_object_wait_rendering(obj);
873
874 idx = intel_ring_sync_index(from, to);
875
876 seqno = obj->last_rendering_seqno;
877 if (seqno <= from->sync_seqno[idx])
878 return 0;
879
880 if (seqno == from->outstanding_lazy_request) {
881 struct drm_i915_gem_request *request;
882
883 request = kzalloc(sizeof(*request), GFP_KERNEL);
884 if (request == NULL)
885 return -ENOMEM;
886
887 ret = i915_add_request(from, NULL, request);
888 if (ret) {
889 kfree(request);
890 return ret;
891 }
892
893 seqno = request->seqno;
894 }
895
896 from->sync_seqno[idx] = seqno;
897
898 return to->sync_to(to, from, seqno - 1);
899}
900
901static int 842static int
902i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips) 843i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
903{ 844{
@@ -959,7 +900,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
959 } 900 }
960 901
961 list_for_each_entry(obj, objects, exec_list) { 902 list_for_each_entry(obj, objects, exec_list) {
962 ret = i915_gem_execbuffer_sync_rings(obj, ring); 903 ret = i915_gem_object_sync(obj, ring);
963 if (ret) 904 if (ret)
964 return ret; 905 return ret;
965 } 906 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 4fb875de32e6..25c8bf9d1d4e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
96 GFP_KERNEL); 96 GFP_KERNEL);
97 if (!ppgtt->pt_dma_addr) 97 if (!ppgtt->pt_dma_addr)
98 goto err_pt_alloc; 98 goto err_pt_alloc;
99 }
100 99
101 for (i = 0; i < ppgtt->num_pd_entries; i++) { 100 for (i = 0; i < ppgtt->num_pd_entries; i++) {
102 dma_addr_t pt_addr; 101 dma_addr_t pt_addr;
103 if (dev_priv->mm.gtt->needs_dmar) { 102
104 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 103 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
105 0, 4096, 104 0, 4096,
106 PCI_DMA_BIDIRECTIONAL); 105 PCI_DMA_BIDIRECTIONAL);
@@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
112 111
113 } 112 }
114 ppgtt->pt_dma_addr[i] = pt_addr; 113 ppgtt->pt_dma_addr[i] = pt_addr;
115 } else 114 }
116 pt_addr = page_to_phys(ppgtt->pt_pages[i]);
117 } 115 }
118 116
119 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma; 117 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index 13b028994b2b..0e72abb9f701 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -34,6 +34,7 @@
34#include "drmP.h" 34#include "drmP.h"
35#include "drm.h" 35#include "drm.h"
36#include "i915_drm.h" 36#include "i915_drm.h"
37#include "i915_drv.h"
37 38
38typedef struct _drm_i915_batchbuffer32 { 39typedef struct _drm_i915_batchbuffer32 {
39 int start; /* agp offset */ 40 int start; /* agp offset */
@@ -181,7 +182,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
181 (unsigned long)request); 182 (unsigned long)request);
182} 183}
183 184
184drm_ioctl_compat_t *i915_compat_ioctls[] = { 185static drm_ioctl_compat_t *i915_compat_ioctls[] = {
185 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer, 186 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
186 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer, 187 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
187 [DRM_I915_GETPARAM] = compat_i915_getparam, 188 [DRM_I915_GETPARAM] = compat_i915_getparam,
@@ -189,6 +190,7 @@ drm_ioctl_compat_t *i915_compat_ioctls[] = {
189 [DRM_I915_ALLOC] = compat_i915_alloc 190 [DRM_I915_ALLOC] = compat_i915_alloc
190}; 191};
191 192
193#ifdef CONFIG_COMPAT
192/** 194/**
193 * Called whenever a 32-bit process running under a 64-bit kernel 195 * Called whenever a 32-bit process running under a 64-bit kernel
194 * performs an ioctl on /dev/dri/card<n>. 196 * performs an ioctl on /dev/dri/card<n>.
@@ -217,3 +219,4 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
217 219
218 return ret; 220 return ret;
219} 221}
222#endif
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index febddc2952fb..ab023ca73b45 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -451,6 +451,31 @@ static void snb_gt_irq_handler(struct drm_device *dev,
451 } 451 }
452} 452}
453 453
454static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
455 u32 pm_iir)
456{
457 unsigned long flags;
458
459 /*
460 * IIR bits should never already be set because IMR should
461 * prevent an interrupt from being shown in IIR. The warning
462 * displays a case where we've unsafely cleared
463 * dev_priv->pm_iir. Although missing an interrupt of the same
464 * type is not a problem, it displays a problem in the logic.
465 *
466 * The mask bit in IMR is cleared by rps_work.
467 */
468
469 spin_lock_irqsave(&dev_priv->rps_lock, flags);
470 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
471 dev_priv->pm_iir |= pm_iir;
472 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
473 POSTING_READ(GEN6_PMIMR);
474 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
475
476 queue_work(dev_priv->wq, &dev_priv->rps_work);
477}
478
454static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS) 479static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
455{ 480{
456 struct drm_device *dev = (struct drm_device *) arg; 481 struct drm_device *dev = (struct drm_device *) arg;
@@ -532,16 +557,8 @@ static irqreturn_t valleyview_irq_handler(DRM_IRQ_ARGS)
532 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) 557 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
533 blc_event = true; 558 blc_event = true;
534 559
535 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { 560 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
536 unsigned long flags; 561 gen6_queue_rps_work(dev_priv, pm_iir);
537 spin_lock_irqsave(&dev_priv->rps_lock, flags);
538 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
539 dev_priv->pm_iir |= pm_iir;
540 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
541 POSTING_READ(GEN6_PMIMR);
542 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
543 queue_work(dev_priv->wq, &dev_priv->rps_work);
544 }
545 562
546 I915_WRITE(GTIIR, gt_iir); 563 I915_WRITE(GTIIR, gt_iir);
547 I915_WRITE(GEN6_PMIIR, pm_iir); 564 I915_WRITE(GEN6_PMIIR, pm_iir);
@@ -655,16 +672,8 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
655 pch_irq_handler(dev); 672 pch_irq_handler(dev);
656 } 673 }
657 674
658 if (pm_iir & GEN6_PM_DEFERRED_EVENTS) { 675 if (pm_iir & GEN6_PM_DEFERRED_EVENTS)
659 unsigned long flags; 676 gen6_queue_rps_work(dev_priv, pm_iir);
660 spin_lock_irqsave(&dev_priv->rps_lock, flags);
661 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
662 dev_priv->pm_iir |= pm_iir;
663 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
664 POSTING_READ(GEN6_PMIMR);
665 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
666 queue_work(dev_priv->wq, &dev_priv->rps_work);
667 }
668 677
669 /* should clear PCH hotplug event before clear CPU irq */ 678 /* should clear PCH hotplug event before clear CPU irq */
670 I915_WRITE(SDEIIR, pch_iir); 679 I915_WRITE(SDEIIR, pch_iir);
@@ -764,25 +773,8 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
764 i915_handle_rps_change(dev); 773 i915_handle_rps_change(dev);
765 } 774 }
766 775
767 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) { 776 if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS)
768 /* 777 gen6_queue_rps_work(dev_priv, pm_iir);
769 * IIR bits should never already be set because IMR should
770 * prevent an interrupt from being shown in IIR. The warning
771 * displays a case where we've unsafely cleared
772 * dev_priv->pm_iir. Although missing an interrupt of the same
773 * type is not a problem, it displays a problem in the logic.
774 *
775 * The mask bit in IMR is cleared by rps_work.
776 */
777 unsigned long flags;
778 spin_lock_irqsave(&dev_priv->rps_lock, flags);
779 WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
780 dev_priv->pm_iir |= pm_iir;
781 I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
782 POSTING_READ(GEN6_PMIMR);
783 spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
784 queue_work(dev_priv->wq, &dev_priv->rps_work);
785 }
786 778
787 /* should clear PCH hotplug event before clear CPU irq */ 779 /* should clear PCH hotplug event before clear CPU irq */
788 I915_WRITE(SDEIIR, pch_iir); 780 I915_WRITE(SDEIIR, pch_iir);
@@ -1376,7 +1368,8 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
1376 obj = work->pending_flip_obj; 1368 obj = work->pending_flip_obj;
1377 if (INTEL_INFO(dev)->gen >= 4) { 1369 if (INTEL_INFO(dev)->gen >= 4) {
1378 int dspsurf = DSPSURF(intel_crtc->plane); 1370 int dspsurf = DSPSURF(intel_crtc->plane);
1379 stall_detected = I915_READ(dspsurf) == obj->gtt_offset; 1371 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
1372 obj->gtt_offset;
1380 } else { 1373 } else {
1381 int dspaddr = DSPADDR(intel_crtc->plane); 1374 int dspaddr = DSPADDR(intel_crtc->plane);
1382 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + 1375 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
@@ -1875,6 +1868,36 @@ static bool kick_ring(struct intel_ring_buffer *ring)
1875 return false; 1868 return false;
1876} 1869}
1877 1870
1871static bool i915_hangcheck_hung(struct drm_device *dev)
1872{
1873 drm_i915_private_t *dev_priv = dev->dev_private;
1874
1875 if (dev_priv->hangcheck_count++ > 1) {
1876 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1877 i915_handle_error(dev, true);
1878
1879 if (!IS_GEN2(dev)) {
1880 /* Is the chip hanging on a WAIT_FOR_EVENT?
1881 * If so we can simply poke the RB_WAIT bit
1882 * and break the hang. This should work on
1883 * all but the second generation chipsets.
1884 */
1885 if (kick_ring(&dev_priv->ring[RCS]))
1886 return false;
1887
1888 if (HAS_BSD(dev) && kick_ring(&dev_priv->ring[VCS]))
1889 return false;
1890
1891 if (HAS_BLT(dev) && kick_ring(&dev_priv->ring[BCS]))
1892 return false;
1893 }
1894
1895 return true;
1896 }
1897
1898 return false;
1899}
1900
1878/** 1901/**
1879 * This is called when the chip hasn't reported back with completed 1902 * This is called when the chip hasn't reported back with completed
1880 * batchbuffers in a long time. The first time this is called we simply record 1903 * batchbuffers in a long time. The first time this is called we simply record
@@ -1895,9 +1918,14 @@ void i915_hangcheck_elapsed(unsigned long data)
1895 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) && 1918 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1896 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) && 1919 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1897 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) { 1920 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1898 dev_priv->hangcheck_count = 0; 1921 if (err) {
1899 if (err) 1922 if (i915_hangcheck_hung(dev))
1923 return;
1924
1900 goto repeat; 1925 goto repeat;
1926 }
1927
1928 dev_priv->hangcheck_count = 0;
1901 return; 1929 return;
1902 } 1930 }
1903 1931
@@ -1919,30 +1947,8 @@ void i915_hangcheck_elapsed(unsigned long data)
1919 dev_priv->last_acthd_blt == acthd_blt && 1947 dev_priv->last_acthd_blt == acthd_blt &&
1920 dev_priv->last_instdone == instdone && 1948 dev_priv->last_instdone == instdone &&
1921 dev_priv->last_instdone1 == instdone1) { 1949 dev_priv->last_instdone1 == instdone1) {
1922 if (dev_priv->hangcheck_count++ > 1) { 1950 if (i915_hangcheck_hung(dev))
1923 DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
1924 i915_handle_error(dev, true);
1925
1926 if (!IS_GEN2(dev)) {
1927 /* Is the chip hanging on a WAIT_FOR_EVENT?
1928 * If so we can simply poke the RB_WAIT bit
1929 * and break the hang. This should work on
1930 * all but the second generation chipsets.
1931 */
1932 if (kick_ring(&dev_priv->ring[RCS]))
1933 goto repeat;
1934
1935 if (HAS_BSD(dev) &&
1936 kick_ring(&dev_priv->ring[VCS]))
1937 goto repeat;
1938
1939 if (HAS_BLT(dev) &&
1940 kick_ring(&dev_priv->ring[BCS]))
1941 goto repeat;
1942 }
1943
1944 return; 1951 return;
1945 }
1946 } else { 1952 } else {
1947 dev_priv->hangcheck_count = 0; 1953 dev_priv->hangcheck_count = 0;
1948 1954
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6924f44a88df..5ac9837e49a5 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -127,6 +127,13 @@
127#define ECOCHK_PPGTT_CACHE64B (0x3<<3) 127#define ECOCHK_PPGTT_CACHE64B (0x3<<3)
128#define ECOCHK_PPGTT_CACHE4B (0x0<<3) 128#define ECOCHK_PPGTT_CACHE4B (0x0<<3)
129 129
130#define GAC_ECO_BITS 0x14090
131#define ECOBITS_PPGTT_CACHE64B (3<<8)
132#define ECOBITS_PPGTT_CACHE4B (0<<8)
133
134#define GAB_CTL 0x24000
135#define GAB_CTL_CONT_AFTER_PAGEFAULT (1<<8)
136
130/* VGA stuff */ 137/* VGA stuff */
131 138
132#define VGA_ST01_MDA 0x3ba 139#define VGA_ST01_MDA 0x3ba
@@ -224,6 +231,7 @@
224#define MI_BATCH_NON_SECURE (1) 231#define MI_BATCH_NON_SECURE (1)
225#define MI_BATCH_NON_SECURE_I965 (1<<8) 232#define MI_BATCH_NON_SECURE_I965 (1<<8)
226#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 233#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
234#define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */
227#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */ 235#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
228#define MI_SEMAPHORE_GLOBAL_GTT (1<<22) 236#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
229#define MI_SEMAPHORE_UPDATE (1<<21) 237#define MI_SEMAPHORE_UPDATE (1<<21)
@@ -490,6 +498,7 @@
490 */ 498 */
491# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14) 499# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
492#define _3D_CHICKEN3 0x02090 500#define _3D_CHICKEN3 0x02090
501#define _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL (1 << 5)
493 502
494#define MI_MODE 0x0209c 503#define MI_MODE 0x0209c
495# define VS_TIMER_DISPATCH (1 << 6) 504# define VS_TIMER_DISPATCH (1 << 6)
@@ -631,6 +640,7 @@
631#define CM0_MASK_SHIFT 16 640#define CM0_MASK_SHIFT 16
632#define CM0_IZ_OPT_DISABLE (1<<6) 641#define CM0_IZ_OPT_DISABLE (1<<6)
633#define CM0_ZR_OPT_DISABLE (1<<5) 642#define CM0_ZR_OPT_DISABLE (1<<5)
643#define CM0_STC_EVICT_DISABLE_LRA_SNB (1<<5)
634#define CM0_DEPTH_EVICT_DISABLE (1<<4) 644#define CM0_DEPTH_EVICT_DISABLE (1<<4)
635#define CM0_COLOR_EVICT_DISABLE (1<<3) 645#define CM0_COLOR_EVICT_DISABLE (1<<3)
636#define CM0_DEPTH_WRITE_DISABLE (1<<1) 646#define CM0_DEPTH_WRITE_DISABLE (1<<1)
@@ -682,6 +692,21 @@
682 692
683#define GEN6_BSD_RNCID 0x12198 693#define GEN6_BSD_RNCID 0x12198
684 694
695#define GEN7_FF_THREAD_MODE 0x20a0
696#define GEN7_FF_SCHED_MASK 0x0077070
697#define GEN7_FF_TS_SCHED_HS1 (0x5<<16)
698#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
699#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
700#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
701#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
702#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
703#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
704#define GEN7_FF_VS_SCHED_HW (0x0<<12)
705#define GEN7_FF_DS_SCHED_HS1 (0x5<<4)
706#define GEN7_FF_DS_SCHED_HS0 (0x3<<4)
707#define GEN7_FF_DS_SCHED_LOAD_BALANCE (0x1<<4) /* Default */
708#define GEN7_FF_DS_SCHED_HW (0x0<<4)
709
685/* 710/*
686 * Framebuffer compression (915+ only) 711 * Framebuffer compression (915+ only)
687 */ 712 */
@@ -2860,6 +2885,13 @@
2860#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF) 2885#define DSPSURF(plane) _PIPE(plane, _DSPASURF, _DSPBSURF)
2861#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF) 2886#define DSPTILEOFF(plane) _PIPE(plane, _DSPATILEOFF, _DSPBTILEOFF)
2862 2887
2888/* Display/Sprite base address macros */
2889#define DISP_BASEADDR_MASK (0xfffff000)
2890#define I915_LO_DISPBASE(val) (val & ~DISP_BASEADDR_MASK)
2891#define I915_HI_DISPBASE(val) (val & DISP_BASEADDR_MASK)
2892#define I915_MODIFY_DISPBASE(reg, gfx_addr) \
2893 (I915_WRITE(reg, gfx_addr | I915_LO_DISPBASE(I915_READ(reg))))
2894
2863/* VBIOS flags */ 2895/* VBIOS flags */
2864#define SWF00 0x71410 2896#define SWF00 0x71410
2865#define SWF01 0x71414 2897#define SWF01 0x71414
@@ -3648,6 +3680,9 @@
3648#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8) 3680#define FDI_LINK_TRAIN_PATTERN_IDLE_CPT (2<<8)
3649#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8) 3681#define FDI_LINK_TRAIN_NORMAL_CPT (3<<8)
3650#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8) 3682#define FDI_LINK_TRAIN_PATTERN_MASK_CPT (3<<8)
3683/* LPT */
3684#define FDI_PORT_WIDTH_2X_LPT (1<<19)
3685#define FDI_PORT_WIDTH_1X_LPT (0<<19)
3651 3686
3652#define _FDI_RXA_MISC 0xf0010 3687#define _FDI_RXA_MISC 0xf0010
3653#define _FDI_RXB_MISC 0xf1010 3688#define _FDI_RXB_MISC 0xf1010
@@ -3891,6 +3926,10 @@
3891#define GT_FIFO_FREE_ENTRIES 0x120008 3926#define GT_FIFO_FREE_ENTRIES 0x120008
3892#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3927#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3893 3928
3929#define GEN6_UCGCTL1 0x9400
3930# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
3931# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
3932
3894#define GEN6_UCGCTL2 0x9404 3933#define GEN6_UCGCTL2 0x9404
3895# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13) 3934# define GEN6_RCZUNIT_CLOCK_GATE_DISABLE (1 << 13)
3896# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) 3935# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
@@ -3970,6 +4009,11 @@
3970 GEN6_PM_RP_DOWN_THRESHOLD | \ 4009 GEN6_PM_RP_DOWN_THRESHOLD | \
3971 GEN6_PM_RP_DOWN_TIMEOUT) 4010 GEN6_PM_RP_DOWN_TIMEOUT)
3972 4011
4012#define GEN6_GT_GFX_RC6_LOCKED 0x138104
4013#define GEN6_GT_GFX_RC6 0x138108
4014#define GEN6_GT_GFX_RC6p 0x13810C
4015#define GEN6_GT_GFX_RC6pp 0x138110
4016
3973#define GEN6_PCODE_MAILBOX 0x138124 4017#define GEN6_PCODE_MAILBOX 0x138124
3974#define GEN6_PCODE_READY (1<<31) 4018#define GEN6_PCODE_READY (1<<31)
3975#define GEN6_READ_OC_PARAMS 0xc 4019#define GEN6_READ_OC_PARAMS 0xc
@@ -4170,6 +4214,10 @@
4170#define WRPLL_PLL_SELECT_SSC (0x01<<28) 4214#define WRPLL_PLL_SELECT_SSC (0x01<<28)
4171#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28) 4215#define WRPLL_PLL_SELECT_NON_SCC (0x02<<28)
4172#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28) 4216#define WRPLL_PLL_SELECT_LCPLL_2700 (0x03<<28)
4217/* WRPLL divider programming */
4218#define WRPLL_DIVIDER_REFERENCE(x) ((x)<<0)
4219#define WRPLL_DIVIDER_POST(x) ((x)<<8)
4220#define WRPLL_DIVIDER_FEEDBACK(x) ((x)<<16)
4173 4221
4174/* Port clock selection */ 4222/* Port clock selection */
4175#define PORT_CLK_SEL_A 0x46100 4223#define PORT_CLK_SEL_A 0x46100
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 2b5eb229ff2c..0c3e3bf67c28 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -879,17 +879,7 @@ int i915_restore_state(struct drm_device *dev)
879 mutex_unlock(&dev->struct_mutex); 879 mutex_unlock(&dev->struct_mutex);
880 880
881 if (drm_core_check_feature(dev, DRIVER_MODESET)) 881 if (drm_core_check_feature(dev, DRIVER_MODESET))
882 intel_init_clock_gating(dev); 882 intel_modeset_init_hw(dev);
883
884 if (IS_IRONLAKE_M(dev)) {
885 ironlake_enable_drps(dev);
886 intel_init_emon(dev);
887 }
888
889 if (INTEL_INFO(dev)->gen >= 6) {
890 gen6_enable_rps(dev_priv);
891 gen6_update_ring_freq(dev_priv);
892 }
893 883
894 mutex_lock(&dev->struct_mutex); 884 mutex_lock(&dev->struct_mutex);
895 885
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
new file mode 100644
index 000000000000..79f83445afa0
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -0,0 +1,111 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
32#include "i915_drv.h"
33
34static u32 calc_residency(struct drm_device *dev, const u32 reg)
35{
36 struct drm_i915_private *dev_priv = dev->dev_private;
37 u64 raw_time; /* 32b value may overflow during fixed point math */
38
39 if (!intel_enable_rc6(dev))
40 return 0;
41
42 raw_time = I915_READ(reg) * 128ULL;
43 return DIV_ROUND_UP_ULL(raw_time, 100000);
44}
45
46static ssize_t
47show_rc6_mask(struct device *dev, struct device_attribute *attr, char *buf)
48{
49 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
50 return snprintf(buf, PAGE_SIZE, "%x", intel_enable_rc6(dminor->dev));
51}
52
53static ssize_t
54show_rc6_ms(struct device *dev, struct device_attribute *attr, char *buf)
55{
56 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
57 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
58 return snprintf(buf, PAGE_SIZE, "%u", rc6_residency);
59}
60
61static ssize_t
62show_rc6p_ms(struct device *dev, struct device_attribute *attr, char *buf)
63{
64 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
65 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
66 return snprintf(buf, PAGE_SIZE, "%u", rc6p_residency);
67}
68
69static ssize_t
70show_rc6pp_ms(struct device *dev, struct device_attribute *attr, char *buf)
71{
72 struct drm_minor *dminor = container_of(dev, struct drm_minor, kdev);
73 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
74 return snprintf(buf, PAGE_SIZE, "%u", rc6pp_residency);
75}
76
77static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
78static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
79static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
80static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
81
82static struct attribute *rc6_attrs[] = {
83 &dev_attr_rc6_enable.attr,
84 &dev_attr_rc6_residency_ms.attr,
85 &dev_attr_rc6p_residency_ms.attr,
86 &dev_attr_rc6pp_residency_ms.attr,
87 NULL
88};
89
90static struct attribute_group rc6_attr_group = {
91 .name = power_group_name,
92 .attrs = rc6_attrs
93};
94
95void i915_setup_sysfs(struct drm_device *dev)
96{
97 int ret;
98
99 /* ILK doesn't have any residency information */
100 if (INTEL_INFO(dev)->gen < 6)
101 return;
102
103 ret = sysfs_merge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
104 if (ret)
105 DRM_ERROR("sysfs setup failed\n");
106}
107
108void i915_teardown_sysfs(struct drm_device *dev)
109{
110 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
111}
diff --git a/drivers/gpu/drm/i915/i915_trace_points.c b/drivers/gpu/drm/i915/i915_trace_points.c
index ead876eb6ea0..f1df2bd4ecf4 100644
--- a/drivers/gpu/drm/i915/i915_trace_points.c
+++ b/drivers/gpu/drm/i915/i915_trace_points.c
@@ -7,5 +7,7 @@
7 7
8#include "i915_drv.h" 8#include "i915_drv.h"
9 9
10#ifndef __CHECKER__
10#define CREATE_TRACE_POINTS 11#define CREATE_TRACE_POINTS
11#include "i915_trace.h" 12#include "i915_trace.h"
13#endif
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index f152b2a7fc54..f413899475e9 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -9,6 +9,7 @@
9#include <acpi/acpi_drivers.h> 9#include <acpi/acpi_drivers.h>
10 10
11#include "drmP.h" 11#include "drmP.h"
12#include "i915_drv.h"
12 13
13#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ 14#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
14 15
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 70b0f1abf149..0976137ab79a 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -55,18 +55,36 @@ static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
55 struct intel_crt, base); 55 struct intel_crt, base);
56} 56}
57 57
58static void intel_crt_dpms(struct drm_encoder *encoder, int mode) 58static void pch_crt_dpms(struct drm_encoder *encoder, int mode)
59{ 59{
60 struct drm_device *dev = encoder->dev; 60 struct drm_device *dev = encoder->dev;
61 struct drm_i915_private *dev_priv = dev->dev_private; 61 struct drm_i915_private *dev_priv = dev->dev_private;
62 u32 temp, reg; 62 u32 temp;
63 63
64 if (HAS_PCH_SPLIT(dev)) 64 temp = I915_READ(PCH_ADPA);
65 reg = PCH_ADPA; 65 temp &= ~ADPA_DAC_ENABLE;
66 else 66
67 reg = ADPA; 67 switch (mode) {
68 case DRM_MODE_DPMS_ON:
69 temp |= ADPA_DAC_ENABLE;
70 break;
71 case DRM_MODE_DPMS_STANDBY:
72 case DRM_MODE_DPMS_SUSPEND:
73 case DRM_MODE_DPMS_OFF:
74 /* Just leave port enable cleared */
75 break;
76 }
77
78 I915_WRITE(PCH_ADPA, temp);
79}
68 80
69 temp = I915_READ(reg); 81static void gmch_crt_dpms(struct drm_encoder *encoder, int mode)
82{
83 struct drm_device *dev = encoder->dev;
84 struct drm_i915_private *dev_priv = dev->dev_private;
85 u32 temp;
86
87 temp = I915_READ(ADPA);
70 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 88 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
71 temp &= ~ADPA_DAC_ENABLE; 89 temp &= ~ADPA_DAC_ENABLE;
72 90
@@ -85,7 +103,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
85 break; 103 break;
86 } 104 }
87 105
88 I915_WRITE(reg, temp); 106 I915_WRITE(ADPA, temp);
89} 107}
90 108
91static int intel_crt_mode_valid(struct drm_connector *connector, 109static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -516,12 +534,20 @@ static void intel_crt_reset(struct drm_connector *connector)
516 * Routines for controlling stuff on the analog port 534 * Routines for controlling stuff on the analog port
517 */ 535 */
518 536
519static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = { 537static const struct drm_encoder_helper_funcs pch_encoder_funcs = {
520 .dpms = intel_crt_dpms, 538 .mode_fixup = intel_crt_mode_fixup,
539 .prepare = intel_encoder_prepare,
540 .commit = intel_encoder_commit,
541 .mode_set = intel_crt_mode_set,
542 .dpms = pch_crt_dpms,
543};
544
545static const struct drm_encoder_helper_funcs gmch_encoder_funcs = {
521 .mode_fixup = intel_crt_mode_fixup, 546 .mode_fixup = intel_crt_mode_fixup,
522 .prepare = intel_encoder_prepare, 547 .prepare = intel_encoder_prepare,
523 .commit = intel_encoder_commit, 548 .commit = intel_encoder_commit,
524 .mode_set = intel_crt_mode_set, 549 .mode_set = intel_crt_mode_set,
550 .dpms = gmch_crt_dpms,
525}; 551};
526 552
527static const struct drm_connector_funcs intel_crt_connector_funcs = { 553static const struct drm_connector_funcs intel_crt_connector_funcs = {
@@ -567,6 +593,7 @@ void intel_crt_init(struct drm_device *dev)
567 struct intel_crt *crt; 593 struct intel_crt *crt;
568 struct intel_connector *intel_connector; 594 struct intel_connector *intel_connector;
569 struct drm_i915_private *dev_priv = dev->dev_private; 595 struct drm_i915_private *dev_priv = dev->dev_private;
596 const struct drm_encoder_helper_funcs *encoder_helper_funcs;
570 597
571 /* Skip machines without VGA that falsely report hotplug events */ 598 /* Skip machines without VGA that falsely report hotplug events */
572 if (dmi_check_system(intel_no_crt)) 599 if (dmi_check_system(intel_no_crt))
@@ -602,7 +629,12 @@ void intel_crt_init(struct drm_device *dev)
602 connector->interlace_allowed = 1; 629 connector->interlace_allowed = 1;
603 connector->doublescan_allowed = 0; 630 connector->doublescan_allowed = 0;
604 631
605 drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); 632 if (HAS_PCH_SPLIT(dev))
633 encoder_helper_funcs = &pch_encoder_funcs;
634 else
635 encoder_helper_funcs = &gmch_encoder_funcs;
636
637 drm_encoder_helper_add(&crt->base.base, encoder_helper_funcs);
606 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 638 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
607 639
608 drm_sysfs_connector_add(connector); 640 drm_sysfs_connector_add(connector);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 37514a52b05c..4c844c68ec80 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -25,7 +25,6 @@
25 */ 25 */
26 26
27#include <linux/dmi.h> 27#include <linux/dmi.h>
28#include <linux/cpufreq.h>
29#include <linux/module.h> 28#include <linux/module.h>
30#include <linux/input.h> 29#include <linux/input.h>
31#include <linux/i2c.h> 30#include <linux/i2c.h>
@@ -45,7 +44,6 @@
45#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) 44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
46 45
47bool intel_pipe_has_type(struct drm_crtc *crtc, int type); 46bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
48static void intel_update_watermarks(struct drm_device *dev);
49static void intel_increase_pllclock(struct drm_crtc *crtc); 47static void intel_increase_pllclock(struct drm_crtc *crtc);
50static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
51 49
@@ -1517,7 +1515,7 @@ static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1517 * Plane regs are double buffered, going from enabled->disabled needs a 1515 * Plane regs are double buffered, going from enabled->disabled needs a
1518 * trigger in order to latch. The display address reg provides this. 1516 * trigger in order to latch. The display address reg provides this.
1519 */ 1517 */
1520static void intel_flush_display_plane(struct drm_i915_private *dev_priv, 1518void intel_flush_display_plane(struct drm_i915_private *dev_priv,
1521 enum plane plane) 1519 enum plane plane)
1522{ 1520{
1523 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); 1521 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
@@ -1628,490 +1626,6 @@ static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1628 disable_pch_hdmi(dev_priv, pipe, HDMID); 1626 disable_pch_hdmi(dev_priv, pipe, HDMID);
1629} 1627}
1630 1628
1631static void i8xx_disable_fbc(struct drm_device *dev)
1632{
1633 struct drm_i915_private *dev_priv = dev->dev_private;
1634 u32 fbc_ctl;
1635
1636 /* Disable compression */
1637 fbc_ctl = I915_READ(FBC_CONTROL);
1638 if ((fbc_ctl & FBC_CTL_EN) == 0)
1639 return;
1640
1641 fbc_ctl &= ~FBC_CTL_EN;
1642 I915_WRITE(FBC_CONTROL, fbc_ctl);
1643
1644 /* Wait for compressing bit to clear */
1645 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
1646 DRM_DEBUG_KMS("FBC idle timed out\n");
1647 return;
1648 }
1649
1650 DRM_DEBUG_KMS("disabled FBC\n");
1651}
1652
1653static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1654{
1655 struct drm_device *dev = crtc->dev;
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 struct drm_framebuffer *fb = crtc->fb;
1658 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1659 struct drm_i915_gem_object *obj = intel_fb->obj;
1660 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1661 int cfb_pitch;
1662 int plane, i;
1663 u32 fbc_ctl, fbc_ctl2;
1664
1665 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
1666 if (fb->pitches[0] < cfb_pitch)
1667 cfb_pitch = fb->pitches[0];
1668
1669 /* FBC_CTL wants 64B units */
1670 cfb_pitch = (cfb_pitch / 64) - 1;
1671 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1672
1673 /* Clear old tags */
1674 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
1675 I915_WRITE(FBC_TAG + (i * 4), 0);
1676
1677 /* Set it up... */
1678 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
1679 fbc_ctl2 |= plane;
1680 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1681 I915_WRITE(FBC_FENCE_OFF, crtc->y);
1682
1683 /* enable it... */
1684 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
1685 if (IS_I945GM(dev))
1686 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1687 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1688 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1689 fbc_ctl |= obj->fence_reg;
1690 I915_WRITE(FBC_CONTROL, fbc_ctl);
1691
1692 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
1693 cfb_pitch, crtc->y, intel_crtc->plane);
1694}
1695
1696static bool i8xx_fbc_enabled(struct drm_device *dev)
1697{
1698 struct drm_i915_private *dev_priv = dev->dev_private;
1699
1700 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
1701}
1702
1703static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1704{
1705 struct drm_device *dev = crtc->dev;
1706 struct drm_i915_private *dev_priv = dev->dev_private;
1707 struct drm_framebuffer *fb = crtc->fb;
1708 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1709 struct drm_i915_gem_object *obj = intel_fb->obj;
1710 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1711 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1712 unsigned long stall_watermark = 200;
1713 u32 dpfc_ctl;
1714
1715 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1716 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
1717 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1718
1719 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1720 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1721 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1722 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
1723
1724 /* enable it... */
1725 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
1726
1727 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1728}
1729
1730static void g4x_disable_fbc(struct drm_device *dev)
1731{
1732 struct drm_i915_private *dev_priv = dev->dev_private;
1733 u32 dpfc_ctl;
1734
1735 /* Disable compression */
1736 dpfc_ctl = I915_READ(DPFC_CONTROL);
1737 if (dpfc_ctl & DPFC_CTL_EN) {
1738 dpfc_ctl &= ~DPFC_CTL_EN;
1739 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
1740
1741 DRM_DEBUG_KMS("disabled FBC\n");
1742 }
1743}
1744
1745static bool g4x_fbc_enabled(struct drm_device *dev)
1746{
1747 struct drm_i915_private *dev_priv = dev->dev_private;
1748
1749 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
1750}
1751
1752static void sandybridge_blit_fbc_update(struct drm_device *dev)
1753{
1754 struct drm_i915_private *dev_priv = dev->dev_private;
1755 u32 blt_ecoskpd;
1756
1757 /* Make sure blitter notifies FBC of writes */
1758 gen6_gt_force_wake_get(dev_priv);
1759 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1760 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1761 GEN6_BLITTER_LOCK_SHIFT;
1762 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1763 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
1764 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1765 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
1766 GEN6_BLITTER_LOCK_SHIFT);
1767 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1768 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1769 gen6_gt_force_wake_put(dev_priv);
1770}
1771
1772static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1773{
1774 struct drm_device *dev = crtc->dev;
1775 struct drm_i915_private *dev_priv = dev->dev_private;
1776 struct drm_framebuffer *fb = crtc->fb;
1777 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1778 struct drm_i915_gem_object *obj = intel_fb->obj;
1779 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1780 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1781 unsigned long stall_watermark = 200;
1782 u32 dpfc_ctl;
1783
1784 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1785 dpfc_ctl &= DPFC_RESERVED;
1786 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1787 /* Set persistent mode for front-buffer rendering, ala X. */
1788 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
1789 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
1790 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1791
1792 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
1793 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1794 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1795 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1796 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1797 /* enable it... */
1798 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1799
1800 if (IS_GEN6(dev)) {
1801 I915_WRITE(SNB_DPFC_CTL_SA,
1802 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
1803 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1804 sandybridge_blit_fbc_update(dev);
1805 }
1806
1807 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1808}
1809
1810static void ironlake_disable_fbc(struct drm_device *dev)
1811{
1812 struct drm_i915_private *dev_priv = dev->dev_private;
1813 u32 dpfc_ctl;
1814
1815 /* Disable compression */
1816 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1817 if (dpfc_ctl & DPFC_CTL_EN) {
1818 dpfc_ctl &= ~DPFC_CTL_EN;
1819 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
1820
1821 DRM_DEBUG_KMS("disabled FBC\n");
1822 }
1823}
1824
1825static bool ironlake_fbc_enabled(struct drm_device *dev)
1826{
1827 struct drm_i915_private *dev_priv = dev->dev_private;
1828
1829 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
1830}
1831
1832bool intel_fbc_enabled(struct drm_device *dev)
1833{
1834 struct drm_i915_private *dev_priv = dev->dev_private;
1835
1836 if (!dev_priv->display.fbc_enabled)
1837 return false;
1838
1839 return dev_priv->display.fbc_enabled(dev);
1840}
1841
1842static void intel_fbc_work_fn(struct work_struct *__work)
1843{
1844 struct intel_fbc_work *work =
1845 container_of(to_delayed_work(__work),
1846 struct intel_fbc_work, work);
1847 struct drm_device *dev = work->crtc->dev;
1848 struct drm_i915_private *dev_priv = dev->dev_private;
1849
1850 mutex_lock(&dev->struct_mutex);
1851 if (work == dev_priv->fbc_work) {
1852 /* Double check that we haven't switched fb without cancelling
1853 * the prior work.
1854 */
1855 if (work->crtc->fb == work->fb) {
1856 dev_priv->display.enable_fbc(work->crtc,
1857 work->interval);
1858
1859 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
1860 dev_priv->cfb_fb = work->crtc->fb->base.id;
1861 dev_priv->cfb_y = work->crtc->y;
1862 }
1863
1864 dev_priv->fbc_work = NULL;
1865 }
1866 mutex_unlock(&dev->struct_mutex);
1867
1868 kfree(work);
1869}
1870
1871static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
1872{
1873 if (dev_priv->fbc_work == NULL)
1874 return;
1875
1876 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
1877
1878 /* Synchronisation is provided by struct_mutex and checking of
1879 * dev_priv->fbc_work, so we can perform the cancellation
1880 * entirely asynchronously.
1881 */
1882 if (cancel_delayed_work(&dev_priv->fbc_work->work))
1883 /* tasklet was killed before being run, clean up */
1884 kfree(dev_priv->fbc_work);
1885
1886 /* Mark the work as no longer wanted so that if it does
1887 * wake-up (because the work was already running and waiting
1888 * for our mutex), it will discover that is no longer
1889 * necessary to run.
1890 */
1891 dev_priv->fbc_work = NULL;
1892}
1893
1894static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1895{
1896 struct intel_fbc_work *work;
1897 struct drm_device *dev = crtc->dev;
1898 struct drm_i915_private *dev_priv = dev->dev_private;
1899
1900 if (!dev_priv->display.enable_fbc)
1901 return;
1902
1903 intel_cancel_fbc_work(dev_priv);
1904
1905 work = kzalloc(sizeof *work, GFP_KERNEL);
1906 if (work == NULL) {
1907 dev_priv->display.enable_fbc(crtc, interval);
1908 return;
1909 }
1910
1911 work->crtc = crtc;
1912 work->fb = crtc->fb;
1913 work->interval = interval;
1914 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
1915
1916 dev_priv->fbc_work = work;
1917
1918 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
1919
1920 /* Delay the actual enabling to let pageflipping cease and the
1921 * display to settle before starting the compression. Note that
1922 * this delay also serves a second purpose: it allows for a
1923 * vblank to pass after disabling the FBC before we attempt
1924 * to modify the control registers.
1925 *
1926 * A more complicated solution would involve tracking vblanks
1927 * following the termination of the page-flipping sequence
1928 * and indeed performing the enable as a co-routine and not
1929 * waiting synchronously upon the vblank.
1930 */
1931 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
1932}
1933
1934void intel_disable_fbc(struct drm_device *dev)
1935{
1936 struct drm_i915_private *dev_priv = dev->dev_private;
1937
1938 intel_cancel_fbc_work(dev_priv);
1939
1940 if (!dev_priv->display.disable_fbc)
1941 return;
1942
1943 dev_priv->display.disable_fbc(dev);
1944 dev_priv->cfb_plane = -1;
1945}
1946
1947/**
1948 * intel_update_fbc - enable/disable FBC as needed
1949 * @dev: the drm_device
1950 *
1951 * Set up the framebuffer compression hardware at mode set time. We
1952 * enable it if possible:
1953 * - plane A only (on pre-965)
1954 * - no pixel mulitply/line duplication
1955 * - no alpha buffer discard
1956 * - no dual wide
1957 * - framebuffer <= 2048 in width, 1536 in height
1958 *
1959 * We can't assume that any compression will take place (worst case),
1960 * so the compressed buffer has to be the same size as the uncompressed
1961 * one. It also must reside (along with the line length buffer) in
1962 * stolen memory.
1963 *
1964 * We need to enable/disable FBC on a global basis.
1965 */
1966static void intel_update_fbc(struct drm_device *dev)
1967{
1968 struct drm_i915_private *dev_priv = dev->dev_private;
1969 struct drm_crtc *crtc = NULL, *tmp_crtc;
1970 struct intel_crtc *intel_crtc;
1971 struct drm_framebuffer *fb;
1972 struct intel_framebuffer *intel_fb;
1973 struct drm_i915_gem_object *obj;
1974 int enable_fbc;
1975
1976 DRM_DEBUG_KMS("\n");
1977
1978 if (!i915_powersave)
1979 return;
1980
1981 if (!I915_HAS_FBC(dev))
1982 return;
1983
1984 /*
1985 * If FBC is already on, we just have to verify that we can
1986 * keep it that way...
1987 * Need to disable if:
1988 * - more than one pipe is active
1989 * - changing FBC params (stride, fence, mode)
1990 * - new fb is too large to fit in compressed buffer
1991 * - going to an unsupported config (interlace, pixel multiply, etc.)
1992 */
1993 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
1994 if (tmp_crtc->enabled && tmp_crtc->fb) {
1995 if (crtc) {
1996 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
1997 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
1998 goto out_disable;
1999 }
2000 crtc = tmp_crtc;
2001 }
2002 }
2003
2004 if (!crtc || crtc->fb == NULL) {
2005 DRM_DEBUG_KMS("no output, disabling\n");
2006 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
2007 goto out_disable;
2008 }
2009
2010 intel_crtc = to_intel_crtc(crtc);
2011 fb = crtc->fb;
2012 intel_fb = to_intel_framebuffer(fb);
2013 obj = intel_fb->obj;
2014
2015 enable_fbc = i915_enable_fbc;
2016 if (enable_fbc < 0) {
2017 DRM_DEBUG_KMS("fbc set to per-chip default\n");
2018 enable_fbc = 1;
2019 if (INTEL_INFO(dev)->gen <= 6)
2020 enable_fbc = 0;
2021 }
2022 if (!enable_fbc) {
2023 DRM_DEBUG_KMS("fbc disabled per module param\n");
2024 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
2025 goto out_disable;
2026 }
2027 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
2028 DRM_DEBUG_KMS("framebuffer too large, disabling "
2029 "compression\n");
2030 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
2031 goto out_disable;
2032 }
2033 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
2034 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
2035 DRM_DEBUG_KMS("mode incompatible with compression, "
2036 "disabling\n");
2037 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
2038 goto out_disable;
2039 }
2040 if ((crtc->mode.hdisplay > 2048) ||
2041 (crtc->mode.vdisplay > 1536)) {
2042 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
2043 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
2044 goto out_disable;
2045 }
2046 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
2047 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
2048 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
2049 goto out_disable;
2050 }
2051
2052 /* The use of a CPU fence is mandatory in order to detect writes
2053 * by the CPU to the scanout and trigger updates to the FBC.
2054 */
2055 if (obj->tiling_mode != I915_TILING_X ||
2056 obj->fence_reg == I915_FENCE_REG_NONE) {
2057 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
2058 dev_priv->no_fbc_reason = FBC_NOT_TILED;
2059 goto out_disable;
2060 }
2061
2062 /* If the kernel debugger is active, always disable compression */
2063 if (in_dbg_master())
2064 goto out_disable;
2065
2066 /* If the scanout has not changed, don't modify the FBC settings.
2067 * Note that we make the fundamental assumption that the fb->obj
2068 * cannot be unpinned (and have its GTT offset and fence revoked)
2069 * without first being decoupled from the scanout and FBC disabled.
2070 */
2071 if (dev_priv->cfb_plane == intel_crtc->plane &&
2072 dev_priv->cfb_fb == fb->base.id &&
2073 dev_priv->cfb_y == crtc->y)
2074 return;
2075
2076 if (intel_fbc_enabled(dev)) {
2077 /* We update FBC along two paths, after changing fb/crtc
2078 * configuration (modeswitching) and after page-flipping
2079 * finishes. For the latter, we know that not only did
2080 * we disable the FBC at the start of the page-flip
2081 * sequence, but also more than one vblank has passed.
2082 *
2083 * For the former case of modeswitching, it is possible
2084 * to switch between two FBC valid configurations
2085 * instantaneously so we do need to disable the FBC
2086 * before we can modify its control registers. We also
2087 * have to wait for the next vblank for that to take
2088 * effect. However, since we delay enabling FBC we can
2089 * assume that a vblank has passed since disabling and
2090 * that we can safely alter the registers in the deferred
2091 * callback.
2092 *
2093 * In the scenario that we go from a valid to invalid
2094 * and then back to valid FBC configuration we have
2095 * no strict enforcement that a vblank occurred since
2096 * disabling the FBC. However, along all current pipe
2097 * disabling paths we do need to wait for a vblank at
2098 * some point. And we wait before enabling FBC anyway.
2099 */
2100 DRM_DEBUG_KMS("disabling active FBC for update\n");
2101 intel_disable_fbc(dev);
2102 }
2103
2104 intel_enable_fbc(crtc, 500);
2105 return;
2106
2107out_disable:
2108 /* Multiple disables should be harmless */
2109 if (intel_fbc_enabled(dev)) {
2110 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
2111 intel_disable_fbc(dev);
2112 }
2113}
2114
2115int 1629int
2116intel_pin_and_fence_fb_obj(struct drm_device *dev, 1630intel_pin_and_fence_fb_obj(struct drm_device *dev,
2117 struct drm_i915_gem_object *obj, 1631 struct drm_i915_gem_object *obj,
@@ -2152,13 +1666,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
2152 * framebuffer compression. For simplicity, we always install 1666 * framebuffer compression. For simplicity, we always install
2153 * a fence as the cost is not that onerous. 1667 * a fence as the cost is not that onerous.
2154 */ 1668 */
2155 if (obj->tiling_mode != I915_TILING_NONE) { 1669 ret = i915_gem_object_get_fence(obj);
2156 ret = i915_gem_object_get_fence(obj, pipelined); 1670 if (ret)
2157 if (ret) 1671 goto err_unpin;
2158 goto err_unpin;
2159 1672
2160 i915_gem_object_pin_fence(obj); 1673 i915_gem_object_pin_fence(obj);
2161 }
2162 1674
2163 dev_priv->mm.interruptible = true; 1675 dev_priv->mm.interruptible = true;
2164 return 0; 1676 return 0;
@@ -2239,7 +1751,7 @@ static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2239 Start, Offset, x, y, fb->pitches[0]); 1751 Start, Offset, x, y, fb->pitches[0]);
2240 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1752 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2241 if (INTEL_INFO(dev)->gen >= 4) { 1753 if (INTEL_INFO(dev)->gen >= 4) {
2242 I915_WRITE(DSPSURF(plane), Start); 1754 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
2243 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1755 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2244 I915_WRITE(DSPADDR(plane), Offset); 1756 I915_WRITE(DSPADDR(plane), Offset);
2245 } else 1757 } else
@@ -2319,7 +1831,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc,
2319 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1831 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
2320 Start, Offset, x, y, fb->pitches[0]); 1832 Start, Offset, x, y, fb->pitches[0]);
2321 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); 1833 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2322 I915_WRITE(DSPSURF(plane), Start); 1834 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
2323 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); 1835 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2324 I915_WRITE(DSPADDR(plane), Offset); 1836 I915_WRITE(DSPADDR(plane), Offset);
2325 POSTING_READ(reg); 1837 POSTING_READ(reg);
@@ -2334,16 +1846,39 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2334{ 1846{
2335 struct drm_device *dev = crtc->dev; 1847 struct drm_device *dev = crtc->dev;
2336 struct drm_i915_private *dev_priv = dev->dev_private; 1848 struct drm_i915_private *dev_priv = dev->dev_private;
1849
1850 if (dev_priv->display.disable_fbc)
1851 dev_priv->display.disable_fbc(dev);
1852 intel_increase_pllclock(crtc);
1853
1854 return dev_priv->display.update_plane(crtc, fb, x, y);
1855}
1856
1857static int
1858intel_finish_fb(struct drm_framebuffer *old_fb)
1859{
1860 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1861 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1862 bool was_interruptible = dev_priv->mm.interruptible;
2337 int ret; 1863 int ret;
2338 1864
2339 ret = dev_priv->display.update_plane(crtc, fb, x, y); 1865 wait_event(dev_priv->pending_flip_queue,
2340 if (ret) 1866 atomic_read(&dev_priv->mm.wedged) ||
2341 return ret; 1867 atomic_read(&obj->pending_flip) == 0);
2342 1868
2343 intel_update_fbc(dev); 1869 /* Big Hammer, we also need to ensure that any pending
2344 intel_increase_pllclock(crtc); 1870 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1871 * current scanout is retired before unpinning the old
1872 * framebuffer.
1873 *
1874 * This should only fail upon a hung GPU, in which case we
1875 * can safely continue.
1876 */
1877 dev_priv->mm.interruptible = false;
1878 ret = i915_gem_object_finish_gpu(obj);
1879 dev_priv->mm.interruptible = was_interruptible;
2345 1880
2346 return 0; 1881 return ret;
2347} 1882}
2348 1883
2349static int 1884static int
@@ -2351,6 +1886,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2351 struct drm_framebuffer *old_fb) 1886 struct drm_framebuffer *old_fb)
2352{ 1887{
2353 struct drm_device *dev = crtc->dev; 1888 struct drm_device *dev = crtc->dev;
1889 struct drm_i915_private *dev_priv = dev->dev_private;
2354 struct drm_i915_master_private *master_priv; 1890 struct drm_i915_master_private *master_priv;
2355 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1891 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2356 int ret; 1892 int ret;
@@ -2384,28 +1920,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2384 return ret; 1920 return ret;
2385 } 1921 }
2386 1922
2387 if (old_fb) { 1923 if (old_fb)
2388 struct drm_i915_private *dev_priv = dev->dev_private; 1924 intel_finish_fb(old_fb);
2389 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2390
2391 wait_event(dev_priv->pending_flip_queue,
2392 atomic_read(&dev_priv->mm.wedged) ||
2393 atomic_read(&obj->pending_flip) == 0);
2394
2395 /* Big Hammer, we also need to ensure that any pending
2396 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2397 * current scanout is retired before unpinning the old
2398 * framebuffer.
2399 *
2400 * This should only fail upon a hung GPU, in which case we
2401 * can safely continue.
2402 */
2403 ret = i915_gem_object_finish_gpu(obj);
2404 (void) ret;
2405 }
2406 1925
2407 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1926 ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
2408 LEAVE_ATOMIC_MODE_SET);
2409 if (ret) { 1927 if (ret) {
2410 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj); 1928 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
2411 mutex_unlock(&dev->struct_mutex); 1929 mutex_unlock(&dev->struct_mutex);
@@ -2418,6 +1936,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2418 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj); 1936 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
2419 } 1937 }
2420 1938
1939 intel_update_fbc(dev);
2421 mutex_unlock(&dev->struct_mutex); 1940 mutex_unlock(&dev->struct_mutex);
2422 1941
2423 if (!dev->primary->master) 1942 if (!dev->primary->master)
@@ -3010,16 +2529,14 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
3010 2529
3011static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2530static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3012{ 2531{
3013 struct drm_i915_gem_object *obj; 2532 struct drm_device *dev = crtc->dev;
3014 struct drm_i915_private *dev_priv;
3015 2533
3016 if (crtc->fb == NULL) 2534 if (crtc->fb == NULL)
3017 return; 2535 return;
3018 2536
3019 obj = to_intel_framebuffer(crtc->fb)->obj; 2537 mutex_lock(&dev->struct_mutex);
3020 dev_priv = crtc->dev->dev_private; 2538 intel_finish_fb(crtc->fb);
3021 wait_event(dev_priv->pending_flip_queue, 2539 mutex_unlock(&dev->struct_mutex);
3022 atomic_read(&obj->pending_flip) == 0);
3023} 2540}
3024 2541
3025static bool intel_crtc_driving_pch(struct drm_crtc *crtc) 2542static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
@@ -3669,1482 +3186,6 @@ ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3669 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 3186 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3670} 3187}
3671 3188
3672
3673struct intel_watermark_params {
3674 unsigned long fifo_size;
3675 unsigned long max_wm;
3676 unsigned long default_wm;
3677 unsigned long guard_size;
3678 unsigned long cacheline_size;
3679};
3680
3681/* Pineview has different values for various configs */
3682static const struct intel_watermark_params pineview_display_wm = {
3683 PINEVIEW_DISPLAY_FIFO,
3684 PINEVIEW_MAX_WM,
3685 PINEVIEW_DFT_WM,
3686 PINEVIEW_GUARD_WM,
3687 PINEVIEW_FIFO_LINE_SIZE
3688};
3689static const struct intel_watermark_params pineview_display_hplloff_wm = {
3690 PINEVIEW_DISPLAY_FIFO,
3691 PINEVIEW_MAX_WM,
3692 PINEVIEW_DFT_HPLLOFF_WM,
3693 PINEVIEW_GUARD_WM,
3694 PINEVIEW_FIFO_LINE_SIZE
3695};
3696static const struct intel_watermark_params pineview_cursor_wm = {
3697 PINEVIEW_CURSOR_FIFO,
3698 PINEVIEW_CURSOR_MAX_WM,
3699 PINEVIEW_CURSOR_DFT_WM,
3700 PINEVIEW_CURSOR_GUARD_WM,
3701 PINEVIEW_FIFO_LINE_SIZE,
3702};
3703static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
3704 PINEVIEW_CURSOR_FIFO,
3705 PINEVIEW_CURSOR_MAX_WM,
3706 PINEVIEW_CURSOR_DFT_WM,
3707 PINEVIEW_CURSOR_GUARD_WM,
3708 PINEVIEW_FIFO_LINE_SIZE
3709};
3710static const struct intel_watermark_params g4x_wm_info = {
3711 G4X_FIFO_SIZE,
3712 G4X_MAX_WM,
3713 G4X_MAX_WM,
3714 2,
3715 G4X_FIFO_LINE_SIZE,
3716};
3717static const struct intel_watermark_params g4x_cursor_wm_info = {
3718 I965_CURSOR_FIFO,
3719 I965_CURSOR_MAX_WM,
3720 I965_CURSOR_DFT_WM,
3721 2,
3722 G4X_FIFO_LINE_SIZE,
3723};
3724static const struct intel_watermark_params valleyview_wm_info = {
3725 VALLEYVIEW_FIFO_SIZE,
3726 VALLEYVIEW_MAX_WM,
3727 VALLEYVIEW_MAX_WM,
3728 2,
3729 G4X_FIFO_LINE_SIZE,
3730};
3731static const struct intel_watermark_params valleyview_cursor_wm_info = {
3732 I965_CURSOR_FIFO,
3733 VALLEYVIEW_CURSOR_MAX_WM,
3734 I965_CURSOR_DFT_WM,
3735 2,
3736 G4X_FIFO_LINE_SIZE,
3737};
3738static const struct intel_watermark_params i965_cursor_wm_info = {
3739 I965_CURSOR_FIFO,
3740 I965_CURSOR_MAX_WM,
3741 I965_CURSOR_DFT_WM,
3742 2,
3743 I915_FIFO_LINE_SIZE,
3744};
3745static const struct intel_watermark_params i945_wm_info = {
3746 I945_FIFO_SIZE,
3747 I915_MAX_WM,
3748 1,
3749 2,
3750 I915_FIFO_LINE_SIZE
3751};
3752static const struct intel_watermark_params i915_wm_info = {
3753 I915_FIFO_SIZE,
3754 I915_MAX_WM,
3755 1,
3756 2,
3757 I915_FIFO_LINE_SIZE
3758};
3759static const struct intel_watermark_params i855_wm_info = {
3760 I855GM_FIFO_SIZE,
3761 I915_MAX_WM,
3762 1,
3763 2,
3764 I830_FIFO_LINE_SIZE
3765};
3766static const struct intel_watermark_params i830_wm_info = {
3767 I830_FIFO_SIZE,
3768 I915_MAX_WM,
3769 1,
3770 2,
3771 I830_FIFO_LINE_SIZE
3772};
3773
3774static const struct intel_watermark_params ironlake_display_wm_info = {
3775 ILK_DISPLAY_FIFO,
3776 ILK_DISPLAY_MAXWM,
3777 ILK_DISPLAY_DFTWM,
3778 2,
3779 ILK_FIFO_LINE_SIZE
3780};
3781static const struct intel_watermark_params ironlake_cursor_wm_info = {
3782 ILK_CURSOR_FIFO,
3783 ILK_CURSOR_MAXWM,
3784 ILK_CURSOR_DFTWM,
3785 2,
3786 ILK_FIFO_LINE_SIZE
3787};
3788static const struct intel_watermark_params ironlake_display_srwm_info = {
3789 ILK_DISPLAY_SR_FIFO,
3790 ILK_DISPLAY_MAX_SRWM,
3791 ILK_DISPLAY_DFT_SRWM,
3792 2,
3793 ILK_FIFO_LINE_SIZE
3794};
3795static const struct intel_watermark_params ironlake_cursor_srwm_info = {
3796 ILK_CURSOR_SR_FIFO,
3797 ILK_CURSOR_MAX_SRWM,
3798 ILK_CURSOR_DFT_SRWM,
3799 2,
3800 ILK_FIFO_LINE_SIZE
3801};
3802
3803static const struct intel_watermark_params sandybridge_display_wm_info = {
3804 SNB_DISPLAY_FIFO,
3805 SNB_DISPLAY_MAXWM,
3806 SNB_DISPLAY_DFTWM,
3807 2,
3808 SNB_FIFO_LINE_SIZE
3809};
3810static const struct intel_watermark_params sandybridge_cursor_wm_info = {
3811 SNB_CURSOR_FIFO,
3812 SNB_CURSOR_MAXWM,
3813 SNB_CURSOR_DFTWM,
3814 2,
3815 SNB_FIFO_LINE_SIZE
3816};
3817static const struct intel_watermark_params sandybridge_display_srwm_info = {
3818 SNB_DISPLAY_SR_FIFO,
3819 SNB_DISPLAY_MAX_SRWM,
3820 SNB_DISPLAY_DFT_SRWM,
3821 2,
3822 SNB_FIFO_LINE_SIZE
3823};
3824static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
3825 SNB_CURSOR_SR_FIFO,
3826 SNB_CURSOR_MAX_SRWM,
3827 SNB_CURSOR_DFT_SRWM,
3828 2,
3829 SNB_FIFO_LINE_SIZE
3830};
3831
3832
3833/**
3834 * intel_calculate_wm - calculate watermark level
3835 * @clock_in_khz: pixel clock
3836 * @wm: chip FIFO params
3837 * @pixel_size: display pixel size
3838 * @latency_ns: memory latency for the platform
3839 *
3840 * Calculate the watermark level (the level at which the display plane will
3841 * start fetching from memory again). Each chip has a different display
3842 * FIFO size and allocation, so the caller needs to figure that out and pass
3843 * in the correct intel_watermark_params structure.
3844 *
3845 * As the pixel clock runs, the FIFO will be drained at a rate that depends
3846 * on the pixel size. When it reaches the watermark level, it'll start
3847 * fetching FIFO line sized based chunks from memory until the FIFO fills
3848 * past the watermark point. If the FIFO drains completely, a FIFO underrun
3849 * will occur, and a display engine hang could result.
3850 */
3851static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
3852 const struct intel_watermark_params *wm,
3853 int fifo_size,
3854 int pixel_size,
3855 unsigned long latency_ns)
3856{
3857 long entries_required, wm_size;
3858
3859 /*
3860 * Note: we need to make sure we don't overflow for various clock &
3861 * latency values.
3862 * clocks go from a few thousand to several hundred thousand.
3863 * latency is usually a few thousand
3864 */
3865 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
3866 1000;
3867 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
3868
3869 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
3870
3871 wm_size = fifo_size - (entries_required + wm->guard_size);
3872
3873 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
3874
3875 /* Don't promote wm_size to unsigned... */
3876 if (wm_size > (long)wm->max_wm)
3877 wm_size = wm->max_wm;
3878 if (wm_size <= 0)
3879 wm_size = wm->default_wm;
3880 return wm_size;
3881}
3882
3883struct cxsr_latency {
3884 int is_desktop;
3885 int is_ddr3;
3886 unsigned long fsb_freq;
3887 unsigned long mem_freq;
3888 unsigned long display_sr;
3889 unsigned long display_hpll_disable;
3890 unsigned long cursor_sr;
3891 unsigned long cursor_hpll_disable;
3892};
3893
3894static const struct cxsr_latency cxsr_latency_table[] = {
3895 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
3896 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
3897 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
3898 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
3899 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
3900
3901 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
3902 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
3903 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
3904 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
3905 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
3906
3907 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
3908 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
3909 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
3910 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
3911 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
3912
3913 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
3914 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
3915 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
3916 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
3917 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
3918
3919 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
3920 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
3921 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
3922 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
3923 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
3924
3925 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
3926 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
3927 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
3928 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
3929 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
3930};
3931
3932static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
3933 int is_ddr3,
3934 int fsb,
3935 int mem)
3936{
3937 const struct cxsr_latency *latency;
3938 int i;
3939
3940 if (fsb == 0 || mem == 0)
3941 return NULL;
3942
3943 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
3944 latency = &cxsr_latency_table[i];
3945 if (is_desktop == latency->is_desktop &&
3946 is_ddr3 == latency->is_ddr3 &&
3947 fsb == latency->fsb_freq && mem == latency->mem_freq)
3948 return latency;
3949 }
3950
3951 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
3952
3953 return NULL;
3954}
3955
3956static void pineview_disable_cxsr(struct drm_device *dev)
3957{
3958 struct drm_i915_private *dev_priv = dev->dev_private;
3959
3960 /* deactivate cxsr */
3961 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
3962}
3963
3964/*
3965 * Latency for FIFO fetches is dependent on several factors:
3966 * - memory configuration (speed, channels)
3967 * - chipset
3968 * - current MCH state
3969 * It can be fairly high in some situations, so here we assume a fairly
3970 * pessimal value. It's a tradeoff between extra memory fetches (if we
3971 * set this value too high, the FIFO will fetch frequently to stay full)
3972 * and power consumption (set it too low to save power and we might see
3973 * FIFO underruns and display "flicker").
3974 *
3975 * A value of 5us seems to be a good balance; safe for very low end
3976 * platforms but not overly aggressive on lower latency configs.
3977 */
3978static const int latency_ns = 5000;
3979
3980static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
3981{
3982 struct drm_i915_private *dev_priv = dev->dev_private;
3983 uint32_t dsparb = I915_READ(DSPARB);
3984 int size;
3985
3986 size = dsparb & 0x7f;
3987 if (plane)
3988 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
3989
3990 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
3991 plane ? "B" : "A", size);
3992
3993 return size;
3994}
3995
3996static int i85x_get_fifo_size(struct drm_device *dev, int plane)
3997{
3998 struct drm_i915_private *dev_priv = dev->dev_private;
3999 uint32_t dsparb = I915_READ(DSPARB);
4000 int size;
4001
4002 size = dsparb & 0x1ff;
4003 if (plane)
4004 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
4005 size >>= 1; /* Convert to cachelines */
4006
4007 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
4008 plane ? "B" : "A", size);
4009
4010 return size;
4011}
4012
4013static int i845_get_fifo_size(struct drm_device *dev, int plane)
4014{
4015 struct drm_i915_private *dev_priv = dev->dev_private;
4016 uint32_t dsparb = I915_READ(DSPARB);
4017 int size;
4018
4019 size = dsparb & 0x7f;
4020 size >>= 2; /* Convert to cachelines */
4021
4022 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
4023 plane ? "B" : "A",
4024 size);
4025
4026 return size;
4027}
4028
4029static int i830_get_fifo_size(struct drm_device *dev, int plane)
4030{
4031 struct drm_i915_private *dev_priv = dev->dev_private;
4032 uint32_t dsparb = I915_READ(DSPARB);
4033 int size;
4034
4035 size = dsparb & 0x7f;
4036 size >>= 1; /* Convert to cachelines */
4037
4038 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
4039 plane ? "B" : "A", size);
4040
4041 return size;
4042}
4043
4044static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
4045{
4046 struct drm_crtc *crtc, *enabled = NULL;
4047
4048 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
4049 if (crtc->enabled && crtc->fb) {
4050 if (enabled)
4051 return NULL;
4052 enabled = crtc;
4053 }
4054 }
4055
4056 return enabled;
4057}
4058
4059static void pineview_update_wm(struct drm_device *dev)
4060{
4061 struct drm_i915_private *dev_priv = dev->dev_private;
4062 struct drm_crtc *crtc;
4063 const struct cxsr_latency *latency;
4064 u32 reg;
4065 unsigned long wm;
4066
4067 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
4068 dev_priv->fsb_freq, dev_priv->mem_freq);
4069 if (!latency) {
4070 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
4071 pineview_disable_cxsr(dev);
4072 return;
4073 }
4074
4075 crtc = single_enabled_crtc(dev);
4076 if (crtc) {
4077 int clock = crtc->mode.clock;
4078 int pixel_size = crtc->fb->bits_per_pixel / 8;
4079
4080 /* Display SR */
4081 wm = intel_calculate_wm(clock, &pineview_display_wm,
4082 pineview_display_wm.fifo_size,
4083 pixel_size, latency->display_sr);
4084 reg = I915_READ(DSPFW1);
4085 reg &= ~DSPFW_SR_MASK;
4086 reg |= wm << DSPFW_SR_SHIFT;
4087 I915_WRITE(DSPFW1, reg);
4088 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
4089
4090 /* cursor SR */
4091 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
4092 pineview_display_wm.fifo_size,
4093 pixel_size, latency->cursor_sr);
4094 reg = I915_READ(DSPFW3);
4095 reg &= ~DSPFW_CURSOR_SR_MASK;
4096 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
4097 I915_WRITE(DSPFW3, reg);
4098
4099 /* Display HPLL off SR */
4100 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
4101 pineview_display_hplloff_wm.fifo_size,
4102 pixel_size, latency->display_hpll_disable);
4103 reg = I915_READ(DSPFW3);
4104 reg &= ~DSPFW_HPLL_SR_MASK;
4105 reg |= wm & DSPFW_HPLL_SR_MASK;
4106 I915_WRITE(DSPFW3, reg);
4107
4108 /* cursor HPLL off SR */
4109 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
4110 pineview_display_hplloff_wm.fifo_size,
4111 pixel_size, latency->cursor_hpll_disable);
4112 reg = I915_READ(DSPFW3);
4113 reg &= ~DSPFW_HPLL_CURSOR_MASK;
4114 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
4115 I915_WRITE(DSPFW3, reg);
4116 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
4117
4118 /* activate cxsr */
4119 I915_WRITE(DSPFW3,
4120 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
4121 DRM_DEBUG_KMS("Self-refresh is enabled\n");
4122 } else {
4123 pineview_disable_cxsr(dev);
4124 DRM_DEBUG_KMS("Self-refresh is disabled\n");
4125 }
4126}
4127
4128static bool g4x_compute_wm0(struct drm_device *dev,
4129 int plane,
4130 const struct intel_watermark_params *display,
4131 int display_latency_ns,
4132 const struct intel_watermark_params *cursor,
4133 int cursor_latency_ns,
4134 int *plane_wm,
4135 int *cursor_wm)
4136{
4137 struct drm_crtc *crtc;
4138 int htotal, hdisplay, clock, pixel_size;
4139 int line_time_us, line_count;
4140 int entries, tlb_miss;
4141
4142 crtc = intel_get_crtc_for_plane(dev, plane);
4143 if (crtc->fb == NULL || !crtc->enabled) {
4144 *cursor_wm = cursor->guard_size;
4145 *plane_wm = display->guard_size;
4146 return false;
4147 }
4148
4149 htotal = crtc->mode.htotal;
4150 hdisplay = crtc->mode.hdisplay;
4151 clock = crtc->mode.clock;
4152 pixel_size = crtc->fb->bits_per_pixel / 8;
4153
4154 /* Use the small buffer method to calculate plane watermark */
4155 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4156 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4157 if (tlb_miss > 0)
4158 entries += tlb_miss;
4159 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4160 *plane_wm = entries + display->guard_size;
4161 if (*plane_wm > (int)display->max_wm)
4162 *plane_wm = display->max_wm;
4163
4164 /* Use the large buffer method to calculate cursor watermark */
4165 line_time_us = ((htotal * 1000) / clock);
4166 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4167 entries = line_count * 64 * pixel_size;
4168 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4169 if (tlb_miss > 0)
4170 entries += tlb_miss;
4171 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4172 *cursor_wm = entries + cursor->guard_size;
4173 if (*cursor_wm > (int)cursor->max_wm)
4174 *cursor_wm = (int)cursor->max_wm;
4175
4176 return true;
4177}
4178
4179/*
4180 * Check the wm result.
4181 *
4182 * If any calculated watermark values is larger than the maximum value that
4183 * can be programmed into the associated watermark register, that watermark
4184 * must be disabled.
4185 */
4186static bool g4x_check_srwm(struct drm_device *dev,
4187 int display_wm, int cursor_wm,
4188 const struct intel_watermark_params *display,
4189 const struct intel_watermark_params *cursor)
4190{
4191 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
4192 display_wm, cursor_wm);
4193
4194 if (display_wm > display->max_wm) {
4195 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
4196 display_wm, display->max_wm);
4197 return false;
4198 }
4199
4200 if (cursor_wm > cursor->max_wm) {
4201 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
4202 cursor_wm, cursor->max_wm);
4203 return false;
4204 }
4205
4206 if (!(display_wm || cursor_wm)) {
4207 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
4208 return false;
4209 }
4210
4211 return true;
4212}
4213
4214static bool g4x_compute_srwm(struct drm_device *dev,
4215 int plane,
4216 int latency_ns,
4217 const struct intel_watermark_params *display,
4218 const struct intel_watermark_params *cursor,
4219 int *display_wm, int *cursor_wm)
4220{
4221 struct drm_crtc *crtc;
4222 int hdisplay, htotal, pixel_size, clock;
4223 unsigned long line_time_us;
4224 int line_count, line_size;
4225 int small, large;
4226 int entries;
4227
4228 if (!latency_ns) {
4229 *display_wm = *cursor_wm = 0;
4230 return false;
4231 }
4232
4233 crtc = intel_get_crtc_for_plane(dev, plane);
4234 hdisplay = crtc->mode.hdisplay;
4235 htotal = crtc->mode.htotal;
4236 clock = crtc->mode.clock;
4237 pixel_size = crtc->fb->bits_per_pixel / 8;
4238
4239 line_time_us = (htotal * 1000) / clock;
4240 line_count = (latency_ns / line_time_us + 1000) / 1000;
4241 line_size = hdisplay * pixel_size;
4242
4243 /* Use the minimum of the small and large buffer method for primary */
4244 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4245 large = line_count * line_size;
4246
4247 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4248 *display_wm = entries + display->guard_size;
4249
4250 /* calculate the self-refresh watermark for display cursor */
4251 entries = line_count * pixel_size * 64;
4252 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4253 *cursor_wm = entries + cursor->guard_size;
4254
4255 return g4x_check_srwm(dev,
4256 *display_wm, *cursor_wm,
4257 display, cursor);
4258}
4259
4260static bool vlv_compute_drain_latency(struct drm_device *dev,
4261 int plane,
4262 int *plane_prec_mult,
4263 int *plane_dl,
4264 int *cursor_prec_mult,
4265 int *cursor_dl)
4266{
4267 struct drm_crtc *crtc;
4268 int clock, pixel_size;
4269 int entries;
4270
4271 crtc = intel_get_crtc_for_plane(dev, plane);
4272 if (crtc->fb == NULL || !crtc->enabled)
4273 return false;
4274
4275 clock = crtc->mode.clock; /* VESA DOT Clock */
4276 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
4277
4278 entries = (clock / 1000) * pixel_size;
4279 *plane_prec_mult = (entries > 256) ?
4280 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
4281 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
4282 pixel_size);
4283
4284 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
4285 *cursor_prec_mult = (entries > 256) ?
4286 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
4287 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
4288
4289 return true;
4290}
4291
4292/*
4293 * Update drain latency registers of memory arbiter
4294 *
4295 * Valleyview SoC has a new memory arbiter and needs drain latency registers
4296 * to be programmed. Each plane has a drain latency multiplier and a drain
4297 * latency value.
4298 */
4299
4300static void vlv_update_drain_latency(struct drm_device *dev)
4301{
4302 struct drm_i915_private *dev_priv = dev->dev_private;
4303 int planea_prec, planea_dl, planeb_prec, planeb_dl;
4304 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
4305 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
4306 either 16 or 32 */
4307
4308 /* For plane A, Cursor A */
4309 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
4310 &cursor_prec_mult, &cursora_dl)) {
4311 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4312 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
4313 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4314 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
4315
4316 I915_WRITE(VLV_DDL1, cursora_prec |
4317 (cursora_dl << DDL_CURSORA_SHIFT) |
4318 planea_prec | planea_dl);
4319 }
4320
4321 /* For plane B, Cursor B */
4322 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
4323 &cursor_prec_mult, &cursorb_dl)) {
4324 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4325 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
4326 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
4327 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
4328
4329 I915_WRITE(VLV_DDL2, cursorb_prec |
4330 (cursorb_dl << DDL_CURSORB_SHIFT) |
4331 planeb_prec | planeb_dl);
4332 }
4333}
4334
4335#define single_plane_enabled(mask) is_power_of_2(mask)
4336
4337static void valleyview_update_wm(struct drm_device *dev)
4338{
4339 static const int sr_latency_ns = 12000;
4340 struct drm_i915_private *dev_priv = dev->dev_private;
4341 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4342 int plane_sr, cursor_sr;
4343 unsigned int enabled = 0;
4344
4345 vlv_update_drain_latency(dev);
4346
4347 if (g4x_compute_wm0(dev, 0,
4348 &valleyview_wm_info, latency_ns,
4349 &valleyview_cursor_wm_info, latency_ns,
4350 &planea_wm, &cursora_wm))
4351 enabled |= 1;
4352
4353 if (g4x_compute_wm0(dev, 1,
4354 &valleyview_wm_info, latency_ns,
4355 &valleyview_cursor_wm_info, latency_ns,
4356 &planeb_wm, &cursorb_wm))
4357 enabled |= 2;
4358
4359 plane_sr = cursor_sr = 0;
4360 if (single_plane_enabled(enabled) &&
4361 g4x_compute_srwm(dev, ffs(enabled) - 1,
4362 sr_latency_ns,
4363 &valleyview_wm_info,
4364 &valleyview_cursor_wm_info,
4365 &plane_sr, &cursor_sr))
4366 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
4367 else
4368 I915_WRITE(FW_BLC_SELF_VLV,
4369 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
4370
4371 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4372 planea_wm, cursora_wm,
4373 planeb_wm, cursorb_wm,
4374 plane_sr, cursor_sr);
4375
4376 I915_WRITE(DSPFW1,
4377 (plane_sr << DSPFW_SR_SHIFT) |
4378 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4379 (planeb_wm << DSPFW_PLANEB_SHIFT) |
4380 planea_wm);
4381 I915_WRITE(DSPFW2,
4382 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4383 (cursora_wm << DSPFW_CURSORA_SHIFT));
4384 I915_WRITE(DSPFW3,
4385 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
4386}
4387
4388static void g4x_update_wm(struct drm_device *dev)
4389{
4390 static const int sr_latency_ns = 12000;
4391 struct drm_i915_private *dev_priv = dev->dev_private;
4392 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
4393 int plane_sr, cursor_sr;
4394 unsigned int enabled = 0;
4395
4396 if (g4x_compute_wm0(dev, 0,
4397 &g4x_wm_info, latency_ns,
4398 &g4x_cursor_wm_info, latency_ns,
4399 &planea_wm, &cursora_wm))
4400 enabled |= 1;
4401
4402 if (g4x_compute_wm0(dev, 1,
4403 &g4x_wm_info, latency_ns,
4404 &g4x_cursor_wm_info, latency_ns,
4405 &planeb_wm, &cursorb_wm))
4406 enabled |= 2;
4407
4408 plane_sr = cursor_sr = 0;
4409 if (single_plane_enabled(enabled) &&
4410 g4x_compute_srwm(dev, ffs(enabled) - 1,
4411 sr_latency_ns,
4412 &g4x_wm_info,
4413 &g4x_cursor_wm_info,
4414 &plane_sr, &cursor_sr))
4415 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4416 else
4417 I915_WRITE(FW_BLC_SELF,
4418 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
4419
4420 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
4421 planea_wm, cursora_wm,
4422 planeb_wm, cursorb_wm,
4423 plane_sr, cursor_sr);
4424
4425 I915_WRITE(DSPFW1,
4426 (plane_sr << DSPFW_SR_SHIFT) |
4427 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
4428 (planeb_wm << DSPFW_PLANEB_SHIFT) |
4429 planea_wm);
4430 I915_WRITE(DSPFW2,
4431 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
4432 (cursora_wm << DSPFW_CURSORA_SHIFT));
4433 /* HPLL off in SR has some issues on G4x... disable it */
4434 I915_WRITE(DSPFW3,
4435 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
4436 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4437}
4438
4439static void i965_update_wm(struct drm_device *dev)
4440{
4441 struct drm_i915_private *dev_priv = dev->dev_private;
4442 struct drm_crtc *crtc;
4443 int srwm = 1;
4444 int cursor_sr = 16;
4445
4446 /* Calc sr entries for one plane configs */
4447 crtc = single_enabled_crtc(dev);
4448 if (crtc) {
4449 /* self-refresh has much higher latency */
4450 static const int sr_latency_ns = 12000;
4451 int clock = crtc->mode.clock;
4452 int htotal = crtc->mode.htotal;
4453 int hdisplay = crtc->mode.hdisplay;
4454 int pixel_size = crtc->fb->bits_per_pixel / 8;
4455 unsigned long line_time_us;
4456 int entries;
4457
4458 line_time_us = ((htotal * 1000) / clock);
4459
4460 /* Use ns/us then divide to preserve precision */
4461 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4462 pixel_size * hdisplay;
4463 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
4464 srwm = I965_FIFO_SIZE - entries;
4465 if (srwm < 0)
4466 srwm = 1;
4467 srwm &= 0x1ff;
4468 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
4469 entries, srwm);
4470
4471 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4472 pixel_size * 64;
4473 entries = DIV_ROUND_UP(entries,
4474 i965_cursor_wm_info.cacheline_size);
4475 cursor_sr = i965_cursor_wm_info.fifo_size -
4476 (entries + i965_cursor_wm_info.guard_size);
4477
4478 if (cursor_sr > i965_cursor_wm_info.max_wm)
4479 cursor_sr = i965_cursor_wm_info.max_wm;
4480
4481 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
4482 "cursor %d\n", srwm, cursor_sr);
4483
4484 if (IS_CRESTLINE(dev))
4485 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
4486 } else {
4487 /* Turn off self refresh if both pipes are enabled */
4488 if (IS_CRESTLINE(dev))
4489 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
4490 & ~FW_BLC_SELF_EN);
4491 }
4492
4493 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
4494 srwm);
4495
4496 /* 965 has limitations... */
4497 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
4498 (8 << 16) | (8 << 8) | (8 << 0));
4499 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
4500 /* update cursor SR watermark */
4501 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
4502}
4503
4504static void i9xx_update_wm(struct drm_device *dev)
4505{
4506 struct drm_i915_private *dev_priv = dev->dev_private;
4507 const struct intel_watermark_params *wm_info;
4508 uint32_t fwater_lo;
4509 uint32_t fwater_hi;
4510 int cwm, srwm = 1;
4511 int fifo_size;
4512 int planea_wm, planeb_wm;
4513 struct drm_crtc *crtc, *enabled = NULL;
4514
4515 if (IS_I945GM(dev))
4516 wm_info = &i945_wm_info;
4517 else if (!IS_GEN2(dev))
4518 wm_info = &i915_wm_info;
4519 else
4520 wm_info = &i855_wm_info;
4521
4522 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
4523 crtc = intel_get_crtc_for_plane(dev, 0);
4524 if (crtc->enabled && crtc->fb) {
4525 planea_wm = intel_calculate_wm(crtc->mode.clock,
4526 wm_info, fifo_size,
4527 crtc->fb->bits_per_pixel / 8,
4528 latency_ns);
4529 enabled = crtc;
4530 } else
4531 planea_wm = fifo_size - wm_info->guard_size;
4532
4533 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
4534 crtc = intel_get_crtc_for_plane(dev, 1);
4535 if (crtc->enabled && crtc->fb) {
4536 planeb_wm = intel_calculate_wm(crtc->mode.clock,
4537 wm_info, fifo_size,
4538 crtc->fb->bits_per_pixel / 8,
4539 latency_ns);
4540 if (enabled == NULL)
4541 enabled = crtc;
4542 else
4543 enabled = NULL;
4544 } else
4545 planeb_wm = fifo_size - wm_info->guard_size;
4546
4547 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
4548
4549 /*
4550 * Overlay gets an aggressive default since video jitter is bad.
4551 */
4552 cwm = 2;
4553
4554 /* Play safe and disable self-refresh before adjusting watermarks. */
4555 if (IS_I945G(dev) || IS_I945GM(dev))
4556 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
4557 else if (IS_I915GM(dev))
4558 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
4559
4560 /* Calc sr entries for one plane configs */
4561 if (HAS_FW_BLC(dev) && enabled) {
4562 /* self-refresh has much higher latency */
4563 static const int sr_latency_ns = 6000;
4564 int clock = enabled->mode.clock;
4565 int htotal = enabled->mode.htotal;
4566 int hdisplay = enabled->mode.hdisplay;
4567 int pixel_size = enabled->fb->bits_per_pixel / 8;
4568 unsigned long line_time_us;
4569 int entries;
4570
4571 line_time_us = (htotal * 1000) / clock;
4572
4573 /* Use ns/us then divide to preserve precision */
4574 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
4575 pixel_size * hdisplay;
4576 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
4577 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
4578 srwm = wm_info->fifo_size - entries;
4579 if (srwm < 0)
4580 srwm = 1;
4581
4582 if (IS_I945G(dev) || IS_I945GM(dev))
4583 I915_WRITE(FW_BLC_SELF,
4584 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
4585 else if (IS_I915GM(dev))
4586 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
4587 }
4588
4589 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
4590 planea_wm, planeb_wm, cwm, srwm);
4591
4592 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
4593 fwater_hi = (cwm & 0x1f);
4594
4595 /* Set request length to 8 cachelines per fetch */
4596 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
4597 fwater_hi = fwater_hi | (1 << 8);
4598
4599 I915_WRITE(FW_BLC, fwater_lo);
4600 I915_WRITE(FW_BLC2, fwater_hi);
4601
4602 if (HAS_FW_BLC(dev)) {
4603 if (enabled) {
4604 if (IS_I945G(dev) || IS_I945GM(dev))
4605 I915_WRITE(FW_BLC_SELF,
4606 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
4607 else if (IS_I915GM(dev))
4608 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
4609 DRM_DEBUG_KMS("memory self refresh enabled\n");
4610 } else
4611 DRM_DEBUG_KMS("memory self refresh disabled\n");
4612 }
4613}
4614
4615static void i830_update_wm(struct drm_device *dev)
4616{
4617 struct drm_i915_private *dev_priv = dev->dev_private;
4618 struct drm_crtc *crtc;
4619 uint32_t fwater_lo;
4620 int planea_wm;
4621
4622 crtc = single_enabled_crtc(dev);
4623 if (crtc == NULL)
4624 return;
4625
4626 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
4627 dev_priv->display.get_fifo_size(dev, 0),
4628 crtc->fb->bits_per_pixel / 8,
4629 latency_ns);
4630 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
4631 fwater_lo |= (3<<8) | planea_wm;
4632
4633 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
4634
4635 I915_WRITE(FW_BLC, fwater_lo);
4636}
4637
4638#define ILK_LP0_PLANE_LATENCY 700
4639#define ILK_LP0_CURSOR_LATENCY 1300
4640
4641/*
4642 * Check the wm result.
4643 *
4644 * If any calculated watermark values is larger than the maximum value that
4645 * can be programmed into the associated watermark register, that watermark
4646 * must be disabled.
4647 */
4648static bool ironlake_check_srwm(struct drm_device *dev, int level,
4649 int fbc_wm, int display_wm, int cursor_wm,
4650 const struct intel_watermark_params *display,
4651 const struct intel_watermark_params *cursor)
4652{
4653 struct drm_i915_private *dev_priv = dev->dev_private;
4654
4655 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
4656 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
4657
4658 if (fbc_wm > SNB_FBC_MAX_SRWM) {
4659 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
4660 fbc_wm, SNB_FBC_MAX_SRWM, level);
4661
4662 /* fbc has it's own way to disable FBC WM */
4663 I915_WRITE(DISP_ARB_CTL,
4664 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
4665 return false;
4666 }
4667
4668 if (display_wm > display->max_wm) {
4669 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
4670 display_wm, SNB_DISPLAY_MAX_SRWM, level);
4671 return false;
4672 }
4673
4674 if (cursor_wm > cursor->max_wm) {
4675 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
4676 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
4677 return false;
4678 }
4679
4680 if (!(fbc_wm || display_wm || cursor_wm)) {
4681 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
4682 return false;
4683 }
4684
4685 return true;
4686}
4687
4688/*
4689 * Compute watermark values of WM[1-3],
4690 */
4691static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
4692 int latency_ns,
4693 const struct intel_watermark_params *display,
4694 const struct intel_watermark_params *cursor,
4695 int *fbc_wm, int *display_wm, int *cursor_wm)
4696{
4697 struct drm_crtc *crtc;
4698 unsigned long line_time_us;
4699 int hdisplay, htotal, pixel_size, clock;
4700 int line_count, line_size;
4701 int small, large;
4702 int entries;
4703
4704 if (!latency_ns) {
4705 *fbc_wm = *display_wm = *cursor_wm = 0;
4706 return false;
4707 }
4708
4709 crtc = intel_get_crtc_for_plane(dev, plane);
4710 hdisplay = crtc->mode.hdisplay;
4711 htotal = crtc->mode.htotal;
4712 clock = crtc->mode.clock;
4713 pixel_size = crtc->fb->bits_per_pixel / 8;
4714
4715 line_time_us = (htotal * 1000) / clock;
4716 line_count = (latency_ns / line_time_us + 1000) / 1000;
4717 line_size = hdisplay * pixel_size;
4718
4719 /* Use the minimum of the small and large buffer method for primary */
4720 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
4721 large = line_count * line_size;
4722
4723 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
4724 *display_wm = entries + display->guard_size;
4725
4726 /*
4727 * Spec says:
4728 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
4729 */
4730 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
4731
4732 /* calculate the self-refresh watermark for display cursor */
4733 entries = line_count * pixel_size * 64;
4734 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4735 *cursor_wm = entries + cursor->guard_size;
4736
4737 return ironlake_check_srwm(dev, level,
4738 *fbc_wm, *display_wm, *cursor_wm,
4739 display, cursor);
4740}
4741
4742static void ironlake_update_wm(struct drm_device *dev)
4743{
4744 struct drm_i915_private *dev_priv = dev->dev_private;
4745 int fbc_wm, plane_wm, cursor_wm;
4746 unsigned int enabled;
4747
4748 enabled = 0;
4749 if (g4x_compute_wm0(dev, 0,
4750 &ironlake_display_wm_info,
4751 ILK_LP0_PLANE_LATENCY,
4752 &ironlake_cursor_wm_info,
4753 ILK_LP0_CURSOR_LATENCY,
4754 &plane_wm, &cursor_wm)) {
4755 I915_WRITE(WM0_PIPEA_ILK,
4756 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4757 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4758 " plane %d, " "cursor: %d\n",
4759 plane_wm, cursor_wm);
4760 enabled |= 1;
4761 }
4762
4763 if (g4x_compute_wm0(dev, 1,
4764 &ironlake_display_wm_info,
4765 ILK_LP0_PLANE_LATENCY,
4766 &ironlake_cursor_wm_info,
4767 ILK_LP0_CURSOR_LATENCY,
4768 &plane_wm, &cursor_wm)) {
4769 I915_WRITE(WM0_PIPEB_ILK,
4770 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4771 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4772 " plane %d, cursor: %d\n",
4773 plane_wm, cursor_wm);
4774 enabled |= 2;
4775 }
4776
4777 /*
4778 * Calculate and update the self-refresh watermark only when one
4779 * display plane is used.
4780 */
4781 I915_WRITE(WM3_LP_ILK, 0);
4782 I915_WRITE(WM2_LP_ILK, 0);
4783 I915_WRITE(WM1_LP_ILK, 0);
4784
4785 if (!single_plane_enabled(enabled))
4786 return;
4787 enabled = ffs(enabled) - 1;
4788
4789 /* WM1 */
4790 if (!ironlake_compute_srwm(dev, 1, enabled,
4791 ILK_READ_WM1_LATENCY() * 500,
4792 &ironlake_display_srwm_info,
4793 &ironlake_cursor_srwm_info,
4794 &fbc_wm, &plane_wm, &cursor_wm))
4795 return;
4796
4797 I915_WRITE(WM1_LP_ILK,
4798 WM1_LP_SR_EN |
4799 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4800 (fbc_wm << WM1_LP_FBC_SHIFT) |
4801 (plane_wm << WM1_LP_SR_SHIFT) |
4802 cursor_wm);
4803
4804 /* WM2 */
4805 if (!ironlake_compute_srwm(dev, 2, enabled,
4806 ILK_READ_WM2_LATENCY() * 500,
4807 &ironlake_display_srwm_info,
4808 &ironlake_cursor_srwm_info,
4809 &fbc_wm, &plane_wm, &cursor_wm))
4810 return;
4811
4812 I915_WRITE(WM2_LP_ILK,
4813 WM2_LP_EN |
4814 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4815 (fbc_wm << WM1_LP_FBC_SHIFT) |
4816 (plane_wm << WM1_LP_SR_SHIFT) |
4817 cursor_wm);
4818
4819 /*
4820 * WM3 is unsupported on ILK, probably because we don't have latency
4821 * data for that power state
4822 */
4823}
4824
4825void sandybridge_update_wm(struct drm_device *dev)
4826{
4827 struct drm_i915_private *dev_priv = dev->dev_private;
4828 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
4829 u32 val;
4830 int fbc_wm, plane_wm, cursor_wm;
4831 unsigned int enabled;
4832
4833 enabled = 0;
4834 if (g4x_compute_wm0(dev, 0,
4835 &sandybridge_display_wm_info, latency,
4836 &sandybridge_cursor_wm_info, latency,
4837 &plane_wm, &cursor_wm)) {
4838 val = I915_READ(WM0_PIPEA_ILK);
4839 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4840 I915_WRITE(WM0_PIPEA_ILK, val |
4841 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4842 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
4843 " plane %d, " "cursor: %d\n",
4844 plane_wm, cursor_wm);
4845 enabled |= 1;
4846 }
4847
4848 if (g4x_compute_wm0(dev, 1,
4849 &sandybridge_display_wm_info, latency,
4850 &sandybridge_cursor_wm_info, latency,
4851 &plane_wm, &cursor_wm)) {
4852 val = I915_READ(WM0_PIPEB_ILK);
4853 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4854 I915_WRITE(WM0_PIPEB_ILK, val |
4855 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4856 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
4857 " plane %d, cursor: %d\n",
4858 plane_wm, cursor_wm);
4859 enabled |= 2;
4860 }
4861
4862 /* IVB has 3 pipes */
4863 if (IS_IVYBRIDGE(dev) &&
4864 g4x_compute_wm0(dev, 2,
4865 &sandybridge_display_wm_info, latency,
4866 &sandybridge_cursor_wm_info, latency,
4867 &plane_wm, &cursor_wm)) {
4868 val = I915_READ(WM0_PIPEC_IVB);
4869 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
4870 I915_WRITE(WM0_PIPEC_IVB, val |
4871 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
4872 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
4873 " plane %d, cursor: %d\n",
4874 plane_wm, cursor_wm);
4875 enabled |= 3;
4876 }
4877
4878 /*
4879 * Calculate and update the self-refresh watermark only when one
4880 * display plane is used.
4881 *
4882 * SNB support 3 levels of watermark.
4883 *
4884 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
4885 * and disabled in the descending order
4886 *
4887 */
4888 I915_WRITE(WM3_LP_ILK, 0);
4889 I915_WRITE(WM2_LP_ILK, 0);
4890 I915_WRITE(WM1_LP_ILK, 0);
4891
4892 if (!single_plane_enabled(enabled) ||
4893 dev_priv->sprite_scaling_enabled)
4894 return;
4895 enabled = ffs(enabled) - 1;
4896
4897 /* WM1 */
4898 if (!ironlake_compute_srwm(dev, 1, enabled,
4899 SNB_READ_WM1_LATENCY() * 500,
4900 &sandybridge_display_srwm_info,
4901 &sandybridge_cursor_srwm_info,
4902 &fbc_wm, &plane_wm, &cursor_wm))
4903 return;
4904
4905 I915_WRITE(WM1_LP_ILK,
4906 WM1_LP_SR_EN |
4907 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4908 (fbc_wm << WM1_LP_FBC_SHIFT) |
4909 (plane_wm << WM1_LP_SR_SHIFT) |
4910 cursor_wm);
4911
4912 /* WM2 */
4913 if (!ironlake_compute_srwm(dev, 2, enabled,
4914 SNB_READ_WM2_LATENCY() * 500,
4915 &sandybridge_display_srwm_info,
4916 &sandybridge_cursor_srwm_info,
4917 &fbc_wm, &plane_wm, &cursor_wm))
4918 return;
4919
4920 I915_WRITE(WM2_LP_ILK,
4921 WM2_LP_EN |
4922 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4923 (fbc_wm << WM1_LP_FBC_SHIFT) |
4924 (plane_wm << WM1_LP_SR_SHIFT) |
4925 cursor_wm);
4926
4927 /* WM3 */
4928 if (!ironlake_compute_srwm(dev, 3, enabled,
4929 SNB_READ_WM3_LATENCY() * 500,
4930 &sandybridge_display_srwm_info,
4931 &sandybridge_cursor_srwm_info,
4932 &fbc_wm, &plane_wm, &cursor_wm))
4933 return;
4934
4935 I915_WRITE(WM3_LP_ILK,
4936 WM3_LP_EN |
4937 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
4938 (fbc_wm << WM1_LP_FBC_SHIFT) |
4939 (plane_wm << WM1_LP_SR_SHIFT) |
4940 cursor_wm);
4941}
4942
4943static bool
4944sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
4945 uint32_t sprite_width, int pixel_size,
4946 const struct intel_watermark_params *display,
4947 int display_latency_ns, int *sprite_wm)
4948{
4949 struct drm_crtc *crtc;
4950 int clock;
4951 int entries, tlb_miss;
4952
4953 crtc = intel_get_crtc_for_plane(dev, plane);
4954 if (crtc->fb == NULL || !crtc->enabled) {
4955 *sprite_wm = display->guard_size;
4956 return false;
4957 }
4958
4959 clock = crtc->mode.clock;
4960
4961 /* Use the small buffer method to calculate the sprite watermark */
4962 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4963 tlb_miss = display->fifo_size*display->cacheline_size -
4964 sprite_width * 8;
4965 if (tlb_miss > 0)
4966 entries += tlb_miss;
4967 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4968 *sprite_wm = entries + display->guard_size;
4969 if (*sprite_wm > (int)display->max_wm)
4970 *sprite_wm = display->max_wm;
4971
4972 return true;
4973}
4974
4975static bool
4976sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
4977 uint32_t sprite_width, int pixel_size,
4978 const struct intel_watermark_params *display,
4979 int latency_ns, int *sprite_wm)
4980{
4981 struct drm_crtc *crtc;
4982 unsigned long line_time_us;
4983 int clock;
4984 int line_count, line_size;
4985 int small, large;
4986 int entries;
4987
4988 if (!latency_ns) {
4989 *sprite_wm = 0;
4990 return false;
4991 }
4992
4993 crtc = intel_get_crtc_for_plane(dev, plane);
4994 clock = crtc->mode.clock;
4995 if (!clock) {
4996 *sprite_wm = 0;
4997 return false;
4998 }
4999
5000 line_time_us = (sprite_width * 1000) / clock;
5001 if (!line_time_us) {
5002 *sprite_wm = 0;
5003 return false;
5004 }
5005
5006 line_count = (latency_ns / line_time_us + 1000) / 1000;
5007 line_size = sprite_width * pixel_size;
5008
5009 /* Use the minimum of the small and large buffer method for primary */
5010 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
5011 large = line_count * line_size;
5012
5013 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
5014 *sprite_wm = entries + display->guard_size;
5015
5016 return *sprite_wm > 0x3ff ? false : true;
5017}
5018
5019static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
5020 uint32_t sprite_width, int pixel_size)
5021{
5022 struct drm_i915_private *dev_priv = dev->dev_private;
5023 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
5024 u32 val;
5025 int sprite_wm, reg;
5026 int ret;
5027
5028 switch (pipe) {
5029 case 0:
5030 reg = WM0_PIPEA_ILK;
5031 break;
5032 case 1:
5033 reg = WM0_PIPEB_ILK;
5034 break;
5035 case 2:
5036 reg = WM0_PIPEC_IVB;
5037 break;
5038 default:
5039 return; /* bad pipe */
5040 }
5041
5042 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
5043 &sandybridge_display_wm_info,
5044 latency, &sprite_wm);
5045 if (!ret) {
5046 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
5047 pipe);
5048 return;
5049 }
5050
5051 val = I915_READ(reg);
5052 val &= ~WM0_PIPE_SPRITE_MASK;
5053 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
5054 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
5055
5056
5057 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
5058 pixel_size,
5059 &sandybridge_display_srwm_info,
5060 SNB_READ_WM1_LATENCY() * 500,
5061 &sprite_wm);
5062 if (!ret) {
5063 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
5064 pipe);
5065 return;
5066 }
5067 I915_WRITE(WM1S_LP_ILK, sprite_wm);
5068
5069 /* Only IVB has two more LP watermarks for sprite */
5070 if (!IS_IVYBRIDGE(dev))
5071 return;
5072
5073 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
5074 pixel_size,
5075 &sandybridge_display_srwm_info,
5076 SNB_READ_WM2_LATENCY() * 500,
5077 &sprite_wm);
5078 if (!ret) {
5079 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
5080 pipe);
5081 return;
5082 }
5083 I915_WRITE(WM2S_LP_IVB, sprite_wm);
5084
5085 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
5086 pixel_size,
5087 &sandybridge_display_srwm_info,
5088 SNB_READ_WM3_LATENCY() * 500,
5089 &sprite_wm);
5090 if (!ret) {
5091 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
5092 pipe);
5093 return;
5094 }
5095 I915_WRITE(WM3S_LP_IVB, sprite_wm);
5096}
5097
5098/**
5099 * intel_update_watermarks - update FIFO watermark values based on current modes
5100 *
5101 * Calculate watermark values for the various WM regs based on current mode
5102 * and plane configuration.
5103 *
5104 * There are several cases to deal with here:
5105 * - normal (i.e. non-self-refresh)
5106 * - self-refresh (SR) mode
5107 * - lines are large relative to FIFO size (buffer can hold up to 2)
5108 * - lines are small relative to FIFO size (buffer can hold more than 2
5109 * lines), so need to account for TLB latency
5110 *
5111 * The normal calculation is:
5112 * watermark = dotclock * bytes per pixel * latency
5113 * where latency is platform & configuration dependent (we assume pessimal
5114 * values here).
5115 *
5116 * The SR calculation is:
5117 * watermark = (trunc(latency/line time)+1) * surface width *
5118 * bytes per pixel
5119 * where
5120 * line time = htotal / dotclock
5121 * surface width = hdisplay for normal plane and 64 for cursor
5122 * and latency is assumed to be high, as above.
5123 *
5124 * The final value programmed to the register should always be rounded up,
5125 * and include an extra 2 entries to account for clock crossings.
5126 *
5127 * We don't use the sprite, so we can ignore that. And on Crestline we have
5128 * to set the non-SR watermarks to 8.
5129 */
5130static void intel_update_watermarks(struct drm_device *dev)
5131{
5132 struct drm_i915_private *dev_priv = dev->dev_private;
5133
5134 if (dev_priv->display.update_wm)
5135 dev_priv->display.update_wm(dev);
5136}
5137
5138void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
5139 uint32_t sprite_width, int pixel_size)
5140{
5141 struct drm_i915_private *dev_priv = dev->dev_private;
5142
5143 if (dev_priv->display.update_sprite_wm)
5144 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
5145 pixel_size);
5146}
5147
5148static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) 3189static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
5149{ 3190{
5150 if (i915_panel_use_ssc >= 0) 3191 if (i915_panel_use_ssc >= 0)
@@ -5375,7 +3416,7 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
5375 struct drm_i915_private *dev_priv = dev->dev_private; 3416 struct drm_i915_private *dev_priv = dev->dev_private;
5376 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3417 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5377 int pipe = intel_crtc->pipe; 3418 int pipe = intel_crtc->pipe;
5378 u32 temp, lvds_sync = 0; 3419 u32 temp;
5379 3420
5380 temp = I915_READ(LVDS); 3421 temp = I915_READ(LVDS);
5381 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; 3422 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
@@ -5405,22 +3446,11 @@ static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
5405 else 3446 else
5406 temp &= ~LVDS_ENABLE_DITHER; 3447 temp &= ~LVDS_ENABLE_DITHER;
5407 } 3448 }
3449 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5408 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 3450 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
5409 lvds_sync |= LVDS_HSYNC_POLARITY; 3451 temp |= LVDS_HSYNC_POLARITY;
5410 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 3452 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
5411 lvds_sync |= LVDS_VSYNC_POLARITY; 3453 temp |= LVDS_VSYNC_POLARITY;
5412 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
5413 != lvds_sync) {
5414 char flags[2] = "-+";
5415 DRM_INFO("Changing LVDS panel from "
5416 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
5417 flags[!(temp & LVDS_HSYNC_POLARITY)],
5418 flags[!(temp & LVDS_VSYNC_POLARITY)],
5419 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
5420 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
5421 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
5422 temp |= lvds_sync;
5423 }
5424 I915_WRITE(LVDS, temp); 3454 I915_WRITE(LVDS, temp);
5425} 3455}
5426 3456
@@ -5965,17 +3995,16 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5965 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; 3995 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
5966 bool ok, has_reduced_clock = false, is_sdvo = false; 3996 bool ok, has_reduced_clock = false, is_sdvo = false;
5967 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; 3997 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
5968 struct intel_encoder *has_edp_encoder = NULL;
5969 struct drm_mode_config *mode_config = &dev->mode_config; 3998 struct drm_mode_config *mode_config = &dev->mode_config;
5970 struct intel_encoder *encoder; 3999 struct intel_encoder *encoder, *edp_encoder = NULL;
5971 const intel_limit_t *limit; 4000 const intel_limit_t *limit;
5972 int ret; 4001 int ret;
5973 struct fdi_m_n m_n = {0}; 4002 struct fdi_m_n m_n = {0};
5974 u32 temp; 4003 u32 temp;
5975 u32 lvds_sync = 0;
5976 int target_clock, pixel_multiplier, lane, link_bw, factor; 4004 int target_clock, pixel_multiplier, lane, link_bw, factor;
5977 unsigned int pipe_bpp; 4005 unsigned int pipe_bpp;
5978 bool dither; 4006 bool dither;
4007 bool is_cpu_edp = false, is_pch_edp = false;
5979 4008
5980 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { 4009 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
5981 if (encoder->base.crtc != crtc) 4010 if (encoder->base.crtc != crtc)
@@ -6001,7 +4030,12 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6001 is_dp = true; 4030 is_dp = true;
6002 break; 4031 break;
6003 case INTEL_OUTPUT_EDP: 4032 case INTEL_OUTPUT_EDP:
6004 has_edp_encoder = encoder; 4033 is_dp = true;
4034 if (intel_encoder_is_pch_edp(&encoder->base))
4035 is_pch_edp = true;
4036 else
4037 is_cpu_edp = true;
4038 edp_encoder = encoder;
6005 break; 4039 break;
6006 } 4040 }
6007 4041
@@ -6064,15 +4098,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6064 lane = 0; 4098 lane = 0;
6065 /* CPU eDP doesn't require FDI link, so just set DP M/N 4099 /* CPU eDP doesn't require FDI link, so just set DP M/N
6066 according to current link config */ 4100 according to current link config */
6067 if (has_edp_encoder && 4101 if (is_cpu_edp) {
6068 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6069 target_clock = mode->clock; 4102 target_clock = mode->clock;
6070 intel_edp_link_config(has_edp_encoder, 4103 intel_edp_link_config(edp_encoder, &lane, &link_bw);
6071 &lane, &link_bw);
6072 } else { 4104 } else {
6073 /* [e]DP over FDI requires target mode clock 4105 /* [e]DP over FDI requires target mode clock
6074 instead of link clock */ 4106 instead of link clock */
6075 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4107 if (is_dp)
6076 target_clock = mode->clock; 4108 target_clock = mode->clock;
6077 else 4109 else
6078 target_clock = adjusted_mode->clock; 4110 target_clock = adjusted_mode->clock;
@@ -6163,7 +4195,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6163 } 4195 }
6164 dpll |= DPLL_DVO_HIGH_SPEED; 4196 dpll |= DPLL_DVO_HIGH_SPEED;
6165 } 4197 }
6166 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) 4198 if (is_dp && !is_cpu_edp)
6167 dpll |= DPLL_DVO_HIGH_SPEED; 4199 dpll |= DPLL_DVO_HIGH_SPEED;
6168 4200
6169 /* compute bitmask from p1 value */ 4201 /* compute bitmask from p1 value */
@@ -6208,8 +4240,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6208 4240
6209 /* PCH eDP needs FDI, but CPU eDP does not */ 4241 /* PCH eDP needs FDI, but CPU eDP does not */
6210 if (!intel_crtc->no_pll) { 4242 if (!intel_crtc->no_pll) {
6211 if (!has_edp_encoder || 4243 if (!is_cpu_edp) {
6212 intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6213 I915_WRITE(PCH_FP0(pipe), fp); 4244 I915_WRITE(PCH_FP0(pipe), fp);
6214 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); 4245 I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
6215 4246
@@ -6262,22 +4293,11 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6262 * appropriately here, but we need to look more thoroughly into how 4293 * appropriately here, but we need to look more thoroughly into how
6263 * panels behave in the two modes. 4294 * panels behave in the two modes.
6264 */ 4295 */
4296 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6265 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 4297 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
6266 lvds_sync |= LVDS_HSYNC_POLARITY; 4298 temp |= LVDS_HSYNC_POLARITY;
6267 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 4299 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
6268 lvds_sync |= LVDS_VSYNC_POLARITY; 4300 temp |= LVDS_VSYNC_POLARITY;
6269 if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY))
6270 != lvds_sync) {
6271 char flags[2] = "-+";
6272 DRM_INFO("Changing LVDS panel from "
6273 "(%chsync, %cvsync) to (%chsync, %cvsync)\n",
6274 flags[!(temp & LVDS_HSYNC_POLARITY)],
6275 flags[!(temp & LVDS_VSYNC_POLARITY)],
6276 flags[!(lvds_sync & LVDS_HSYNC_POLARITY)],
6277 flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]);
6278 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
6279 temp |= lvds_sync;
6280 }
6281 I915_WRITE(PCH_LVDS, temp); 4301 I915_WRITE(PCH_LVDS, temp);
6282 } 4302 }
6283 4303
@@ -6287,7 +4307,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6287 pipeconf |= PIPECONF_DITHER_EN; 4307 pipeconf |= PIPECONF_DITHER_EN;
6288 pipeconf |= PIPECONF_DITHER_TYPE_SP; 4308 pipeconf |= PIPECONF_DITHER_TYPE_SP;
6289 } 4309 }
6290 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4310 if (is_dp && !is_cpu_edp) {
6291 intel_dp_set_m_n(crtc, mode, adjusted_mode); 4311 intel_dp_set_m_n(crtc, mode, adjusted_mode);
6292 } else { 4312 } else {
6293 /* For non-DP output, clear any trans DP clock recovery setting.*/ 4313 /* For non-DP output, clear any trans DP clock recovery setting.*/
@@ -6297,9 +4317,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6297 I915_WRITE(TRANSDPLINK_N1(pipe), 0); 4317 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
6298 } 4318 }
6299 4319
6300 if (!intel_crtc->no_pll && 4320 if (!intel_crtc->no_pll && (!edp_encoder || is_pch_edp)) {
6301 (!has_edp_encoder ||
6302 intel_encoder_is_pch_edp(&has_edp_encoder->base))) {
6303 I915_WRITE(PCH_DPLL(pipe), dpll); 4321 I915_WRITE(PCH_DPLL(pipe), dpll);
6304 4322
6305 /* Wait for the clocks to stabilize. */ 4323 /* Wait for the clocks to stabilize. */
@@ -6377,10 +4395,8 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
6377 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); 4395 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
6378 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); 4396 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
6379 4397
6380 if (has_edp_encoder && 4398 if (is_cpu_edp)
6381 !intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
6382 ironlake_set_pll_edp(crtc, adjusted_mode->clock); 4399 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
6383 }
6384 4400
6385 I915_WRITE(PIPECONF(pipe), pipeconf); 4401 I915_WRITE(PIPECONF(pipe), pipeconf);
6386 POSTING_READ(PIPECONF(pipe)); 4402 POSTING_READ(PIPECONF(pipe));
@@ -6748,7 +4764,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
6748 if (!visible && !intel_crtc->cursor_visible) 4764 if (!visible && !intel_crtc->cursor_visible)
6749 return; 4765 return;
6750 4766
6751 if (IS_IVYBRIDGE(dev)) { 4767 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
6752 I915_WRITE(CURPOS_IVB(pipe), pos); 4768 I915_WRITE(CURPOS_IVB(pipe), pos);
6753 ivb_update_cursor(crtc, base); 4769 ivb_update_cursor(crtc, base);
6754 } else { 4770 } else {
@@ -7636,14 +5652,14 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7636 5652
7637 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5653 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7638 if (ret) 5654 if (ret)
7639 goto out; 5655 goto err;
7640 5656
7641 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5657 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7642 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5658 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7643 5659
7644 ret = BEGIN_LP_RING(6); 5660 ret = BEGIN_LP_RING(6);
7645 if (ret) 5661 if (ret)
7646 goto out; 5662 goto err_unpin;
7647 5663
7648 /* Can't queue multiple flips, so wait for the previous 5664 /* Can't queue multiple flips, so wait for the previous
7649 * one to finish before executing the next. 5665 * one to finish before executing the next.
@@ -7660,7 +5676,11 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
7660 OUT_RING(obj->gtt_offset + offset); 5676 OUT_RING(obj->gtt_offset + offset);
7661 OUT_RING(0); /* aux display base address, unused */ 5677 OUT_RING(0); /* aux display base address, unused */
7662 ADVANCE_LP_RING(); 5678 ADVANCE_LP_RING();
7663out: 5679 return 0;
5680
5681err_unpin:
5682 intel_unpin_fb_obj(obj);
5683err:
7664 return ret; 5684 return ret;
7665} 5685}
7666 5686
@@ -7677,14 +5697,14 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7677 5697
7678 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5698 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7679 if (ret) 5699 if (ret)
7680 goto out; 5700 goto err;
7681 5701
7682 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5702 /* Offset into the new buffer for cases of shared fbs between CRTCs */
7683 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8; 5703 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
7684 5704
7685 ret = BEGIN_LP_RING(6); 5705 ret = BEGIN_LP_RING(6);
7686 if (ret) 5706 if (ret)
7687 goto out; 5707 goto err_unpin;
7688 5708
7689 if (intel_crtc->plane) 5709 if (intel_crtc->plane)
7690 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5710 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
@@ -7699,7 +5719,11 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
7699 OUT_RING(MI_NOOP); 5719 OUT_RING(MI_NOOP);
7700 5720
7701 ADVANCE_LP_RING(); 5721 ADVANCE_LP_RING();
7702out: 5722 return 0;
5723
5724err_unpin:
5725 intel_unpin_fb_obj(obj);
5726err:
7703 return ret; 5727 return ret;
7704} 5728}
7705 5729
@@ -7715,11 +5739,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7715 5739
7716 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5740 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7717 if (ret) 5741 if (ret)
7718 goto out; 5742 goto err;
7719 5743
7720 ret = BEGIN_LP_RING(4); 5744 ret = BEGIN_LP_RING(4);
7721 if (ret) 5745 if (ret)
7722 goto out; 5746 goto err_unpin;
7723 5747
7724 /* i965+ uses the linear or tiled offsets from the 5748 /* i965+ uses the linear or tiled offsets from the
7725 * Display Registers (which do not change across a page-flip) 5749 * Display Registers (which do not change across a page-flip)
@@ -7738,7 +5762,11 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
7738 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5762 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7739 OUT_RING(pf | pipesrc); 5763 OUT_RING(pf | pipesrc);
7740 ADVANCE_LP_RING(); 5764 ADVANCE_LP_RING();
7741out: 5765 return 0;
5766
5767err_unpin:
5768 intel_unpin_fb_obj(obj);
5769err:
7742 return ret; 5770 return ret;
7743} 5771}
7744 5772
@@ -7754,11 +5782,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7754 5782
7755 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); 5783 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
7756 if (ret) 5784 if (ret)
7757 goto out; 5785 goto err;
7758 5786
7759 ret = BEGIN_LP_RING(4); 5787 ret = BEGIN_LP_RING(4);
7760 if (ret) 5788 if (ret)
7761 goto out; 5789 goto err_unpin;
7762 5790
7763 OUT_RING(MI_DISPLAY_FLIP | 5791 OUT_RING(MI_DISPLAY_FLIP |
7764 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5792 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
@@ -7769,7 +5797,11 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
7769 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; 5797 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
7770 OUT_RING(pf | pipesrc); 5798 OUT_RING(pf | pipesrc);
7771 ADVANCE_LP_RING(); 5799 ADVANCE_LP_RING();
7772out: 5800 return 0;
5801
5802err_unpin:
5803 intel_unpin_fb_obj(obj);
5804err:
7773 return ret; 5805 return ret;
7774} 5806}
7775 5807
@@ -7791,18 +5823,22 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
7791 5823
7792 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 5824 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
7793 if (ret) 5825 if (ret)
7794 goto out; 5826 goto err;
7795 5827
7796 ret = intel_ring_begin(ring, 4); 5828 ret = intel_ring_begin(ring, 4);
7797 if (ret) 5829 if (ret)
7798 goto out; 5830 goto err_unpin;
7799 5831
7800 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 5832 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
7801 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 5833 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7802 intel_ring_emit(ring, (obj->gtt_offset)); 5834 intel_ring_emit(ring, (obj->gtt_offset));
7803 intel_ring_emit(ring, (MI_NOOP)); 5835 intel_ring_emit(ring, (MI_NOOP));
7804 intel_ring_advance(ring); 5836 intel_ring_advance(ring);
7805out: 5837 return 0;
5838
5839err_unpin:
5840 intel_unpin_fb_obj(obj);
5841err:
7806 return ret; 5842 return ret;
7807} 5843}
7808 5844
@@ -8292,926 +6328,6 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
8292 .output_poll_changed = intel_fb_output_poll_changed, 6328 .output_poll_changed = intel_fb_output_poll_changed,
8293}; 6329};
8294 6330
8295static struct drm_i915_gem_object *
8296intel_alloc_context_page(struct drm_device *dev)
8297{
8298 struct drm_i915_gem_object *ctx;
8299 int ret;
8300
8301 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
8302
8303 ctx = i915_gem_alloc_object(dev, 4096);
8304 if (!ctx) {
8305 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
8306 return NULL;
8307 }
8308
8309 ret = i915_gem_object_pin(ctx, 4096, true);
8310 if (ret) {
8311 DRM_ERROR("failed to pin power context: %d\n", ret);
8312 goto err_unref;
8313 }
8314
8315 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
8316 if (ret) {
8317 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
8318 goto err_unpin;
8319 }
8320
8321 return ctx;
8322
8323err_unpin:
8324 i915_gem_object_unpin(ctx);
8325err_unref:
8326 drm_gem_object_unreference(&ctx->base);
8327 mutex_unlock(&dev->struct_mutex);
8328 return NULL;
8329}
8330
8331bool ironlake_set_drps(struct drm_device *dev, u8 val)
8332{
8333 struct drm_i915_private *dev_priv = dev->dev_private;
8334 u16 rgvswctl;
8335
8336 rgvswctl = I915_READ16(MEMSWCTL);
8337 if (rgvswctl & MEMCTL_CMD_STS) {
8338 DRM_DEBUG("gpu busy, RCS change rejected\n");
8339 return false; /* still busy with another command */
8340 }
8341
8342 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
8343 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
8344 I915_WRITE16(MEMSWCTL, rgvswctl);
8345 POSTING_READ16(MEMSWCTL);
8346
8347 rgvswctl |= MEMCTL_CMD_STS;
8348 I915_WRITE16(MEMSWCTL, rgvswctl);
8349
8350 return true;
8351}
8352
8353void ironlake_enable_drps(struct drm_device *dev)
8354{
8355 struct drm_i915_private *dev_priv = dev->dev_private;
8356 u32 rgvmodectl = I915_READ(MEMMODECTL);
8357 u8 fmax, fmin, fstart, vstart;
8358
8359 /* Enable temp reporting */
8360 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
8361 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
8362
8363 /* 100ms RC evaluation intervals */
8364 I915_WRITE(RCUPEI, 100000);
8365 I915_WRITE(RCDNEI, 100000);
8366
8367 /* Set max/min thresholds to 90ms and 80ms respectively */
8368 I915_WRITE(RCBMAXAVG, 90000);
8369 I915_WRITE(RCBMINAVG, 80000);
8370
8371 I915_WRITE(MEMIHYST, 1);
8372
8373 /* Set up min, max, and cur for interrupt handling */
8374 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
8375 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
8376 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
8377 MEMMODE_FSTART_SHIFT;
8378
8379 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
8380 PXVFREQ_PX_SHIFT;
8381
8382 dev_priv->fmax = fmax; /* IPS callback will increase this */
8383 dev_priv->fstart = fstart;
8384
8385 dev_priv->max_delay = fstart;
8386 dev_priv->min_delay = fmin;
8387 dev_priv->cur_delay = fstart;
8388
8389 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
8390 fmax, fmin, fstart);
8391
8392 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
8393
8394 /*
8395 * Interrupts will be enabled in ironlake_irq_postinstall
8396 */
8397
8398 I915_WRITE(VIDSTART, vstart);
8399 POSTING_READ(VIDSTART);
8400
8401 rgvmodectl |= MEMMODE_SWMODE_EN;
8402 I915_WRITE(MEMMODECTL, rgvmodectl);
8403
8404 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
8405 DRM_ERROR("stuck trying to change perf mode\n");
8406 msleep(1);
8407
8408 ironlake_set_drps(dev, fstart);
8409
8410 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
8411 I915_READ(0x112e0);
8412 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
8413 dev_priv->last_count2 = I915_READ(0x112f4);
8414 getrawmonotonic(&dev_priv->last_time2);
8415}
8416
8417void ironlake_disable_drps(struct drm_device *dev)
8418{
8419 struct drm_i915_private *dev_priv = dev->dev_private;
8420 u16 rgvswctl = I915_READ16(MEMSWCTL);
8421
8422 /* Ack interrupts, disable EFC interrupt */
8423 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
8424 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
8425 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
8426 I915_WRITE(DEIIR, DE_PCU_EVENT);
8427 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
8428
8429 /* Go back to the starting frequency */
8430 ironlake_set_drps(dev, dev_priv->fstart);
8431 msleep(1);
8432 rgvswctl |= MEMCTL_CMD_STS;
8433 I915_WRITE(MEMSWCTL, rgvswctl);
8434 msleep(1);
8435
8436}
8437
8438void gen6_set_rps(struct drm_device *dev, u8 val)
8439{
8440 struct drm_i915_private *dev_priv = dev->dev_private;
8441 u32 swreq;
8442
8443 swreq = (val & 0x3ff) << 25;
8444 I915_WRITE(GEN6_RPNSWREQ, swreq);
8445}
8446
8447void gen6_disable_rps(struct drm_device *dev)
8448{
8449 struct drm_i915_private *dev_priv = dev->dev_private;
8450
8451 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
8452 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
8453 I915_WRITE(GEN6_PMIER, 0);
8454 /* Complete PM interrupt masking here doesn't race with the rps work
8455 * item again unmasking PM interrupts because that is using a different
8456 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
8457 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
8458
8459 spin_lock_irq(&dev_priv->rps_lock);
8460 dev_priv->pm_iir = 0;
8461 spin_unlock_irq(&dev_priv->rps_lock);
8462
8463 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
8464}
8465
8466static unsigned long intel_pxfreq(u32 vidfreq)
8467{
8468 unsigned long freq;
8469 int div = (vidfreq & 0x3f0000) >> 16;
8470 int post = (vidfreq & 0x3000) >> 12;
8471 int pre = (vidfreq & 0x7);
8472
8473 if (!pre)
8474 return 0;
8475
8476 freq = ((div * 133333) / ((1<<post) * pre));
8477
8478 return freq;
8479}
8480
8481void intel_init_emon(struct drm_device *dev)
8482{
8483 struct drm_i915_private *dev_priv = dev->dev_private;
8484 u32 lcfuse;
8485 u8 pxw[16];
8486 int i;
8487
8488 /* Disable to program */
8489 I915_WRITE(ECR, 0);
8490 POSTING_READ(ECR);
8491
8492 /* Program energy weights for various events */
8493 I915_WRITE(SDEW, 0x15040d00);
8494 I915_WRITE(CSIEW0, 0x007f0000);
8495 I915_WRITE(CSIEW1, 0x1e220004);
8496 I915_WRITE(CSIEW2, 0x04000004);
8497
8498 for (i = 0; i < 5; i++)
8499 I915_WRITE(PEW + (i * 4), 0);
8500 for (i = 0; i < 3; i++)
8501 I915_WRITE(DEW + (i * 4), 0);
8502
8503 /* Program P-state weights to account for frequency power adjustment */
8504 for (i = 0; i < 16; i++) {
8505 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
8506 unsigned long freq = intel_pxfreq(pxvidfreq);
8507 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
8508 PXVFREQ_PX_SHIFT;
8509 unsigned long val;
8510
8511 val = vid * vid;
8512 val *= (freq / 1000);
8513 val *= 255;
8514 val /= (127*127*900);
8515 if (val > 0xff)
8516 DRM_ERROR("bad pxval: %ld\n", val);
8517 pxw[i] = val;
8518 }
8519 /* Render standby states get 0 weight */
8520 pxw[14] = 0;
8521 pxw[15] = 0;
8522
8523 for (i = 0; i < 4; i++) {
8524 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
8525 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
8526 I915_WRITE(PXW + (i * 4), val);
8527 }
8528
8529 /* Adjust magic regs to magic values (more experimental results) */
8530 I915_WRITE(OGW0, 0);
8531 I915_WRITE(OGW1, 0);
8532 I915_WRITE(EG0, 0x00007f00);
8533 I915_WRITE(EG1, 0x0000000e);
8534 I915_WRITE(EG2, 0x000e0000);
8535 I915_WRITE(EG3, 0x68000300);
8536 I915_WRITE(EG4, 0x42000000);
8537 I915_WRITE(EG5, 0x00140031);
8538 I915_WRITE(EG6, 0);
8539 I915_WRITE(EG7, 0);
8540
8541 for (i = 0; i < 8; i++)
8542 I915_WRITE(PXWL + (i * 4), 0);
8543
8544 /* Enable PMON + select events */
8545 I915_WRITE(ECR, 0x80000019);
8546
8547 lcfuse = I915_READ(LCFUSE02);
8548
8549 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
8550}
8551
8552static int intel_enable_rc6(struct drm_device *dev)
8553{
8554 /*
8555 * Respect the kernel parameter if it is set
8556 */
8557 if (i915_enable_rc6 >= 0)
8558 return i915_enable_rc6;
8559
8560 /*
8561 * Disable RC6 on Ironlake
8562 */
8563 if (INTEL_INFO(dev)->gen == 5)
8564 return 0;
8565
8566 /*
8567 * Disable rc6 on Sandybridge
8568 */
8569 if (INTEL_INFO(dev)->gen == 6) {
8570 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
8571 return INTEL_RC6_ENABLE;
8572 }
8573 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
8574 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
8575}
8576
8577void gen6_enable_rps(struct drm_i915_private *dev_priv)
8578{
8579 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
8580 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
8581 u32 pcu_mbox, rc6_mask = 0;
8582 u32 gtfifodbg;
8583 int cur_freq, min_freq, max_freq;
8584 int rc6_mode;
8585 int i;
8586
8587 /* Here begins a magic sequence of register writes to enable
8588 * auto-downclocking.
8589 *
8590 * Perhaps there might be some value in exposing these to
8591 * userspace...
8592 */
8593 I915_WRITE(GEN6_RC_STATE, 0);
8594 mutex_lock(&dev_priv->dev->struct_mutex);
8595
8596 /* Clear the DBG now so we don't confuse earlier errors */
8597 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
8598 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
8599 I915_WRITE(GTFIFODBG, gtfifodbg);
8600 }
8601
8602 gen6_gt_force_wake_get(dev_priv);
8603
8604 /* disable the counters and set deterministic thresholds */
8605 I915_WRITE(GEN6_RC_CONTROL, 0);
8606
8607 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
8608 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
8609 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
8610 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
8611 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
8612
8613 for (i = 0; i < I915_NUM_RINGS; i++)
8614 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
8615
8616 I915_WRITE(GEN6_RC_SLEEP, 0);
8617 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
8618 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
8619 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
8620 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
8621
8622 rc6_mode = intel_enable_rc6(dev_priv->dev);
8623 if (rc6_mode & INTEL_RC6_ENABLE)
8624 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
8625
8626 if (rc6_mode & INTEL_RC6p_ENABLE)
8627 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
8628
8629 if (rc6_mode & INTEL_RC6pp_ENABLE)
8630 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
8631
8632 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
8633 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
8634 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
8635 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
8636
8637 I915_WRITE(GEN6_RC_CONTROL,
8638 rc6_mask |
8639 GEN6_RC_CTL_EI_MODE(1) |
8640 GEN6_RC_CTL_HW_ENABLE);
8641
8642 I915_WRITE(GEN6_RPNSWREQ,
8643 GEN6_FREQUENCY(10) |
8644 GEN6_OFFSET(0) |
8645 GEN6_AGGRESSIVE_TURBO);
8646 I915_WRITE(GEN6_RC_VIDEO_FREQ,
8647 GEN6_FREQUENCY(12));
8648
8649 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
8650 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
8651 18 << 24 |
8652 6 << 16);
8653 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
8654 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
8655 I915_WRITE(GEN6_RP_UP_EI, 100000);
8656 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
8657 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
8658 I915_WRITE(GEN6_RP_CONTROL,
8659 GEN6_RP_MEDIA_TURBO |
8660 GEN6_RP_MEDIA_HW_MODE |
8661 GEN6_RP_MEDIA_IS_GFX |
8662 GEN6_RP_ENABLE |
8663 GEN6_RP_UP_BUSY_AVG |
8664 GEN6_RP_DOWN_IDLE_CONT);
8665
8666 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8667 500))
8668 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8669
8670 I915_WRITE(GEN6_PCODE_DATA, 0);
8671 I915_WRITE(GEN6_PCODE_MAILBOX,
8672 GEN6_PCODE_READY |
8673 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8674 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8675 500))
8676 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8677
8678 min_freq = (rp_state_cap & 0xff0000) >> 16;
8679 max_freq = rp_state_cap & 0xff;
8680 cur_freq = (gt_perf_status & 0xff00) >> 8;
8681
8682 /* Check for overclock support */
8683 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8684 500))
8685 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
8686 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
8687 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
8688 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
8689 500))
8690 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
8691 if (pcu_mbox & (1<<31)) { /* OC supported */
8692 max_freq = pcu_mbox & 0xff;
8693 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
8694 }
8695
8696 /* In units of 100MHz */
8697 dev_priv->max_delay = max_freq;
8698 dev_priv->min_delay = min_freq;
8699 dev_priv->cur_delay = cur_freq;
8700
8701 /* requires MSI enabled */
8702 I915_WRITE(GEN6_PMIER,
8703 GEN6_PM_MBOX_EVENT |
8704 GEN6_PM_THERMAL_EVENT |
8705 GEN6_PM_RP_DOWN_TIMEOUT |
8706 GEN6_PM_RP_UP_THRESHOLD |
8707 GEN6_PM_RP_DOWN_THRESHOLD |
8708 GEN6_PM_RP_UP_EI_EXPIRED |
8709 GEN6_PM_RP_DOWN_EI_EXPIRED);
8710 spin_lock_irq(&dev_priv->rps_lock);
8711 WARN_ON(dev_priv->pm_iir != 0);
8712 I915_WRITE(GEN6_PMIMR, 0);
8713 spin_unlock_irq(&dev_priv->rps_lock);
8714 /* enable all PM interrupts */
8715 I915_WRITE(GEN6_PMINTRMSK, 0);
8716
8717 gen6_gt_force_wake_put(dev_priv);
8718 mutex_unlock(&dev_priv->dev->struct_mutex);
8719}
8720
8721void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
8722{
8723 int min_freq = 15;
8724 int gpu_freq, ia_freq, max_ia_freq;
8725 int scaling_factor = 180;
8726
8727 max_ia_freq = cpufreq_quick_get_max(0);
8728 /*
8729 * Default to measured freq if none found, PCU will ensure we don't go
8730 * over
8731 */
8732 if (!max_ia_freq)
8733 max_ia_freq = tsc_khz;
8734
8735 /* Convert from kHz to MHz */
8736 max_ia_freq /= 1000;
8737
8738 mutex_lock(&dev_priv->dev->struct_mutex);
8739
8740 /*
8741 * For each potential GPU frequency, load a ring frequency we'd like
8742 * to use for memory access. We do this by specifying the IA frequency
8743 * the PCU should use as a reference to determine the ring frequency.
8744 */
8745 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
8746 gpu_freq--) {
8747 int diff = dev_priv->max_delay - gpu_freq;
8748
8749 /*
8750 * For GPU frequencies less than 750MHz, just use the lowest
8751 * ring freq.
8752 */
8753 if (gpu_freq < min_freq)
8754 ia_freq = 800;
8755 else
8756 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
8757 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
8758
8759 I915_WRITE(GEN6_PCODE_DATA,
8760 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
8761 gpu_freq);
8762 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
8763 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
8764 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
8765 GEN6_PCODE_READY) == 0, 10)) {
8766 DRM_ERROR("pcode write of freq table timed out\n");
8767 continue;
8768 }
8769 }
8770
8771 mutex_unlock(&dev_priv->dev->struct_mutex);
8772}
8773
8774static void ironlake_init_clock_gating(struct drm_device *dev)
8775{
8776 struct drm_i915_private *dev_priv = dev->dev_private;
8777 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8778
8779 /* Required for FBC */
8780 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
8781 DPFCRUNIT_CLOCK_GATE_DISABLE |
8782 DPFDUNIT_CLOCK_GATE_DISABLE;
8783 /* Required for CxSR */
8784 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
8785
8786 I915_WRITE(PCH_3DCGDIS0,
8787 MARIUNIT_CLOCK_GATE_DISABLE |
8788 SVSMUNIT_CLOCK_GATE_DISABLE);
8789 I915_WRITE(PCH_3DCGDIS1,
8790 VFMUNIT_CLOCK_GATE_DISABLE);
8791
8792 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8793
8794 /*
8795 * According to the spec the following bits should be set in
8796 * order to enable memory self-refresh
8797 * The bit 22/21 of 0x42004
8798 * The bit 5 of 0x42020
8799 * The bit 15 of 0x45000
8800 */
8801 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8802 (I915_READ(ILK_DISPLAY_CHICKEN2) |
8803 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
8804 I915_WRITE(ILK_DSPCLK_GATE,
8805 (I915_READ(ILK_DSPCLK_GATE) |
8806 ILK_DPARB_CLK_GATE));
8807 I915_WRITE(DISP_ARB_CTL,
8808 (I915_READ(DISP_ARB_CTL) |
8809 DISP_FBC_WM_DIS));
8810 I915_WRITE(WM3_LP_ILK, 0);
8811 I915_WRITE(WM2_LP_ILK, 0);
8812 I915_WRITE(WM1_LP_ILK, 0);
8813
8814 /*
8815 * Based on the document from hardware guys the following bits
8816 * should be set unconditionally in order to enable FBC.
8817 * The bit 22 of 0x42000
8818 * The bit 22 of 0x42004
8819 * The bit 7,8,9 of 0x42020.
8820 */
8821 if (IS_IRONLAKE_M(dev)) {
8822 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8823 I915_READ(ILK_DISPLAY_CHICKEN1) |
8824 ILK_FBCQ_DIS);
8825 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8826 I915_READ(ILK_DISPLAY_CHICKEN2) |
8827 ILK_DPARB_GATE);
8828 I915_WRITE(ILK_DSPCLK_GATE,
8829 I915_READ(ILK_DSPCLK_GATE) |
8830 ILK_DPFC_DIS1 |
8831 ILK_DPFC_DIS2 |
8832 ILK_CLK_FBC);
8833 }
8834
8835 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8836 I915_READ(ILK_DISPLAY_CHICKEN2) |
8837 ILK_ELPIN_409_SELECT);
8838 I915_WRITE(_3D_CHICKEN2,
8839 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
8840 _3D_CHICKEN2_WM_READ_PIPELINED);
8841}
8842
8843static void gen6_init_clock_gating(struct drm_device *dev)
8844{
8845 struct drm_i915_private *dev_priv = dev->dev_private;
8846 int pipe;
8847 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8848
8849 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8850
8851 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8852 I915_READ(ILK_DISPLAY_CHICKEN2) |
8853 ILK_ELPIN_409_SELECT);
8854
8855 I915_WRITE(WM3_LP_ILK, 0);
8856 I915_WRITE(WM2_LP_ILK, 0);
8857 I915_WRITE(WM1_LP_ILK, 0);
8858
8859 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8860 * gating disable must be set. Failure to set it results in
8861 * flickering pixels due to Z write ordering failures after
8862 * some amount of runtime in the Mesa "fire" demo, and Unigine
8863 * Sanctuary and Tropics, and apparently anything else with
8864 * alpha test or pixel discard.
8865 *
8866 * According to the spec, bit 11 (RCCUNIT) must also be set,
8867 * but we didn't debug actual testcases to find it out.
8868 */
8869 I915_WRITE(GEN6_UCGCTL2,
8870 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8871 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8872
8873 /*
8874 * According to the spec the following bits should be
8875 * set in order to enable memory self-refresh and fbc:
8876 * The bit21 and bit22 of 0x42000
8877 * The bit21 and bit22 of 0x42004
8878 * The bit5 and bit7 of 0x42020
8879 * The bit14 of 0x70180
8880 * The bit14 of 0x71180
8881 */
8882 I915_WRITE(ILK_DISPLAY_CHICKEN1,
8883 I915_READ(ILK_DISPLAY_CHICKEN1) |
8884 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
8885 I915_WRITE(ILK_DISPLAY_CHICKEN2,
8886 I915_READ(ILK_DISPLAY_CHICKEN2) |
8887 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
8888 I915_WRITE(ILK_DSPCLK_GATE,
8889 I915_READ(ILK_DSPCLK_GATE) |
8890 ILK_DPARB_CLK_GATE |
8891 ILK_DPFD_CLK_GATE);
8892
8893 for_each_pipe(pipe) {
8894 I915_WRITE(DSPCNTR(pipe),
8895 I915_READ(DSPCNTR(pipe)) |
8896 DISPPLANE_TRICKLE_FEED_DISABLE);
8897 intel_flush_display_plane(dev_priv, pipe);
8898 }
8899}
8900
8901static void ivybridge_init_clock_gating(struct drm_device *dev)
8902{
8903 struct drm_i915_private *dev_priv = dev->dev_private;
8904 int pipe;
8905 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8906
8907 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8908
8909 I915_WRITE(WM3_LP_ILK, 0);
8910 I915_WRITE(WM2_LP_ILK, 0);
8911 I915_WRITE(WM1_LP_ILK, 0);
8912
8913 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8914 * This implements the WaDisableRCZUnitClockGating workaround.
8915 */
8916 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8917
8918 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8919
8920 I915_WRITE(IVB_CHICKEN3,
8921 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8922 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8923
8924 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8925 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8926 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8927
8928 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8929 I915_WRITE(GEN7_L3CNTLREG1,
8930 GEN7_WA_FOR_GEN7_L3_CONTROL);
8931 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
8932 GEN7_WA_L3_CHICKEN_MODE);
8933
8934 /* This is required by WaCatErrorRejectionIssue */
8935 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8936 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8937 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8938
8939 for_each_pipe(pipe) {
8940 I915_WRITE(DSPCNTR(pipe),
8941 I915_READ(DSPCNTR(pipe)) |
8942 DISPPLANE_TRICKLE_FEED_DISABLE);
8943 intel_flush_display_plane(dev_priv, pipe);
8944 }
8945}
8946
8947static void valleyview_init_clock_gating(struct drm_device *dev)
8948{
8949 struct drm_i915_private *dev_priv = dev->dev_private;
8950 int pipe;
8951 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
8952
8953 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
8954
8955 I915_WRITE(WM3_LP_ILK, 0);
8956 I915_WRITE(WM2_LP_ILK, 0);
8957 I915_WRITE(WM1_LP_ILK, 0);
8958
8959 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
8960 * This implements the WaDisableRCZUnitClockGating workaround.
8961 */
8962 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
8963
8964 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
8965
8966 I915_WRITE(IVB_CHICKEN3,
8967 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
8968 CHICKEN3_DGMG_DONE_FIX_DISABLE);
8969
8970 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
8971 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
8972 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
8973
8974 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
8975 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
8976 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
8977
8978 /* This is required by WaCatErrorRejectionIssue */
8979 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
8980 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
8981 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
8982
8983 for_each_pipe(pipe) {
8984 I915_WRITE(DSPCNTR(pipe),
8985 I915_READ(DSPCNTR(pipe)) |
8986 DISPPLANE_TRICKLE_FEED_DISABLE);
8987 intel_flush_display_plane(dev_priv, pipe);
8988 }
8989
8990 I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) |
8991 (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) |
8992 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
8993}
8994
8995static void g4x_init_clock_gating(struct drm_device *dev)
8996{
8997 struct drm_i915_private *dev_priv = dev->dev_private;
8998 uint32_t dspclk_gate;
8999
9000 I915_WRITE(RENCLK_GATE_D1, 0);
9001 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
9002 GS_UNIT_CLOCK_GATE_DISABLE |
9003 CL_UNIT_CLOCK_GATE_DISABLE);
9004 I915_WRITE(RAMCLK_GATE_D, 0);
9005 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
9006 OVRUNIT_CLOCK_GATE_DISABLE |
9007 OVCUNIT_CLOCK_GATE_DISABLE;
9008 if (IS_GM45(dev))
9009 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
9010 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
9011}
9012
9013static void crestline_init_clock_gating(struct drm_device *dev)
9014{
9015 struct drm_i915_private *dev_priv = dev->dev_private;
9016
9017 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
9018 I915_WRITE(RENCLK_GATE_D2, 0);
9019 I915_WRITE(DSPCLK_GATE_D, 0);
9020 I915_WRITE(RAMCLK_GATE_D, 0);
9021 I915_WRITE16(DEUC, 0);
9022}
9023
9024static void broadwater_init_clock_gating(struct drm_device *dev)
9025{
9026 struct drm_i915_private *dev_priv = dev->dev_private;
9027
9028 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
9029 I965_RCC_CLOCK_GATE_DISABLE |
9030 I965_RCPB_CLOCK_GATE_DISABLE |
9031 I965_ISC_CLOCK_GATE_DISABLE |
9032 I965_FBC_CLOCK_GATE_DISABLE);
9033 I915_WRITE(RENCLK_GATE_D2, 0);
9034}
9035
9036static void gen3_init_clock_gating(struct drm_device *dev)
9037{
9038 struct drm_i915_private *dev_priv = dev->dev_private;
9039 u32 dstate = I915_READ(D_STATE);
9040
9041 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
9042 DSTATE_DOT_CLOCK_GATING;
9043 I915_WRITE(D_STATE, dstate);
9044}
9045
9046static void i85x_init_clock_gating(struct drm_device *dev)
9047{
9048 struct drm_i915_private *dev_priv = dev->dev_private;
9049
9050 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
9051}
9052
9053static void i830_init_clock_gating(struct drm_device *dev)
9054{
9055 struct drm_i915_private *dev_priv = dev->dev_private;
9056
9057 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
9058}
9059
9060static void ibx_init_clock_gating(struct drm_device *dev)
9061{
9062 struct drm_i915_private *dev_priv = dev->dev_private;
9063
9064 /*
9065 * On Ibex Peak and Cougar Point, we need to disable clock
9066 * gating for the panel power sequencer or it will fail to
9067 * start up when no ports are active.
9068 */
9069 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
9070}
9071
9072static void cpt_init_clock_gating(struct drm_device *dev)
9073{
9074 struct drm_i915_private *dev_priv = dev->dev_private;
9075 int pipe;
9076
9077 /*
9078 * On Ibex Peak and Cougar Point, we need to disable clock
9079 * gating for the panel power sequencer or it will fail to
9080 * start up when no ports are active.
9081 */
9082 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
9083 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
9084 DPLS_EDP_PPS_FIX_DIS);
9085 /* Without this, mode sets may fail silently on FDI */
9086 for_each_pipe(pipe)
9087 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
9088}
9089
9090static void ironlake_teardown_rc6(struct drm_device *dev)
9091{
9092 struct drm_i915_private *dev_priv = dev->dev_private;
9093
9094 if (dev_priv->renderctx) {
9095 i915_gem_object_unpin(dev_priv->renderctx);
9096 drm_gem_object_unreference(&dev_priv->renderctx->base);
9097 dev_priv->renderctx = NULL;
9098 }
9099
9100 if (dev_priv->pwrctx) {
9101 i915_gem_object_unpin(dev_priv->pwrctx);
9102 drm_gem_object_unreference(&dev_priv->pwrctx->base);
9103 dev_priv->pwrctx = NULL;
9104 }
9105}
9106
9107static void ironlake_disable_rc6(struct drm_device *dev)
9108{
9109 struct drm_i915_private *dev_priv = dev->dev_private;
9110
9111 if (I915_READ(PWRCTXA)) {
9112 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
9113 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
9114 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
9115 50);
9116
9117 I915_WRITE(PWRCTXA, 0);
9118 POSTING_READ(PWRCTXA);
9119
9120 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
9121 POSTING_READ(RSTDBYCTL);
9122 }
9123
9124 ironlake_teardown_rc6(dev);
9125}
9126
9127static int ironlake_setup_rc6(struct drm_device *dev)
9128{
9129 struct drm_i915_private *dev_priv = dev->dev_private;
9130
9131 if (dev_priv->renderctx == NULL)
9132 dev_priv->renderctx = intel_alloc_context_page(dev);
9133 if (!dev_priv->renderctx)
9134 return -ENOMEM;
9135
9136 if (dev_priv->pwrctx == NULL)
9137 dev_priv->pwrctx = intel_alloc_context_page(dev);
9138 if (!dev_priv->pwrctx) {
9139 ironlake_teardown_rc6(dev);
9140 return -ENOMEM;
9141 }
9142
9143 return 0;
9144}
9145
9146void ironlake_enable_rc6(struct drm_device *dev)
9147{
9148 struct drm_i915_private *dev_priv = dev->dev_private;
9149 int ret;
9150
9151 /* rc6 disabled by default due to repeated reports of hanging during
9152 * boot and resume.
9153 */
9154 if (!intel_enable_rc6(dev))
9155 return;
9156
9157 mutex_lock(&dev->struct_mutex);
9158 ret = ironlake_setup_rc6(dev);
9159 if (ret) {
9160 mutex_unlock(&dev->struct_mutex);
9161 return;
9162 }
9163
9164 /*
9165 * GPU can automatically power down the render unit if given a page
9166 * to save state.
9167 */
9168 ret = BEGIN_LP_RING(6);
9169 if (ret) {
9170 ironlake_teardown_rc6(dev);
9171 mutex_unlock(&dev->struct_mutex);
9172 return;
9173 }
9174
9175 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
9176 OUT_RING(MI_SET_CONTEXT);
9177 OUT_RING(dev_priv->renderctx->gtt_offset |
9178 MI_MM_SPACE_GTT |
9179 MI_SAVE_EXT_STATE_EN |
9180 MI_RESTORE_EXT_STATE_EN |
9181 MI_RESTORE_INHIBIT);
9182 OUT_RING(MI_SUSPEND_FLUSH);
9183 OUT_RING(MI_NOOP);
9184 OUT_RING(MI_FLUSH);
9185 ADVANCE_LP_RING();
9186
9187 /*
9188 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
9189 * does an implicit flush, combined with MI_FLUSH above, it should be
9190 * safe to assume that renderctx is valid
9191 */
9192 ret = intel_wait_ring_idle(LP_RING(dev_priv));
9193 if (ret) {
9194 DRM_ERROR("failed to enable ironlake power power savings\n");
9195 ironlake_teardown_rc6(dev);
9196 mutex_unlock(&dev->struct_mutex);
9197 return;
9198 }
9199
9200 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
9201 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
9202 mutex_unlock(&dev->struct_mutex);
9203}
9204
9205void intel_init_clock_gating(struct drm_device *dev)
9206{
9207 struct drm_i915_private *dev_priv = dev->dev_private;
9208
9209 dev_priv->display.init_clock_gating(dev);
9210
9211 if (dev_priv->display.init_pch_clock_gating)
9212 dev_priv->display.init_pch_clock_gating(dev);
9213}
9214
9215/* Set up chip specific display functions */ 6331/* Set up chip specific display functions */
9216static void intel_init_display(struct drm_device *dev) 6332static void intel_init_display(struct drm_device *dev)
9217{ 6333{
@@ -9228,23 +6344,6 @@ static void intel_init_display(struct drm_device *dev)
9228 dev_priv->display.update_plane = i9xx_update_plane; 6344 dev_priv->display.update_plane = i9xx_update_plane;
9229 } 6345 }
9230 6346
9231 if (I915_HAS_FBC(dev)) {
9232 if (HAS_PCH_SPLIT(dev)) {
9233 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
9234 dev_priv->display.enable_fbc = ironlake_enable_fbc;
9235 dev_priv->display.disable_fbc = ironlake_disable_fbc;
9236 } else if (IS_GM45(dev)) {
9237 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
9238 dev_priv->display.enable_fbc = g4x_enable_fbc;
9239 dev_priv->display.disable_fbc = g4x_disable_fbc;
9240 } else if (IS_CRESTLINE(dev)) {
9241 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
9242 dev_priv->display.enable_fbc = i8xx_enable_fbc;
9243 dev_priv->display.disable_fbc = i8xx_disable_fbc;
9244 }
9245 /* 855GM needs testing */
9246 }
9247
9248 /* Returns the core display clock speed */ 6347 /* Returns the core display clock speed */
9249 if (IS_VALLEYVIEW(dev)) 6348 if (IS_VALLEYVIEW(dev))
9250 dev_priv->display.get_display_clock_speed = 6349 dev_priv->display.get_display_clock_speed =
@@ -9271,130 +6370,24 @@ static void intel_init_display(struct drm_device *dev)
9271 dev_priv->display.get_display_clock_speed = 6370 dev_priv->display.get_display_clock_speed =
9272 i830_get_display_clock_speed; 6371 i830_get_display_clock_speed;
9273 6372
9274 /* For FIFO watermark updates */
9275 if (HAS_PCH_SPLIT(dev)) { 6373 if (HAS_PCH_SPLIT(dev)) {
9276 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
9277 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
9278
9279 /* IVB configs may use multi-threaded forcewake */
9280 if (IS_IVYBRIDGE(dev)) {
9281 u32 ecobus;
9282
9283 /* A small trick here - if the bios hasn't configured MT forcewake,
9284 * and if the device is in RC6, then force_wake_mt_get will not wake
9285 * the device and the ECOBUS read will return zero. Which will be
9286 * (correctly) interpreted by the test below as MT forcewake being
9287 * disabled.
9288 */
9289 mutex_lock(&dev->struct_mutex);
9290 __gen6_gt_force_wake_mt_get(dev_priv);
9291 ecobus = I915_READ_NOTRACE(ECOBUS);
9292 __gen6_gt_force_wake_mt_put(dev_priv);
9293 mutex_unlock(&dev->struct_mutex);
9294
9295 if (ecobus & FORCEWAKE_MT_ENABLE) {
9296 DRM_DEBUG_KMS("Using MT version of forcewake\n");
9297 dev_priv->display.force_wake_get =
9298 __gen6_gt_force_wake_mt_get;
9299 dev_priv->display.force_wake_put =
9300 __gen6_gt_force_wake_mt_put;
9301 }
9302 }
9303
9304 if (HAS_PCH_IBX(dev))
9305 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
9306 else if (HAS_PCH_CPT(dev))
9307 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
9308
9309 if (IS_GEN5(dev)) { 6374 if (IS_GEN5(dev)) {
9310 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
9311 dev_priv->display.update_wm = ironlake_update_wm;
9312 else {
9313 DRM_DEBUG_KMS("Failed to get proper latency. "
9314 "Disable CxSR\n");
9315 dev_priv->display.update_wm = NULL;
9316 }
9317 dev_priv->display.fdi_link_train = ironlake_fdi_link_train; 6375 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
9318 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
9319 dev_priv->display.write_eld = ironlake_write_eld; 6376 dev_priv->display.write_eld = ironlake_write_eld;
9320 } else if (IS_GEN6(dev)) { 6377 } else if (IS_GEN6(dev)) {
9321 if (SNB_READ_WM0_LATENCY()) {
9322 dev_priv->display.update_wm = sandybridge_update_wm;
9323 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9324 } else {
9325 DRM_DEBUG_KMS("Failed to read display plane latency. "
9326 "Disable CxSR\n");
9327 dev_priv->display.update_wm = NULL;
9328 }
9329 dev_priv->display.fdi_link_train = gen6_fdi_link_train; 6378 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
9330 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
9331 dev_priv->display.write_eld = ironlake_write_eld; 6379 dev_priv->display.write_eld = ironlake_write_eld;
9332 } else if (IS_IVYBRIDGE(dev)) { 6380 } else if (IS_IVYBRIDGE(dev)) {
9333 /* FIXME: detect B0+ stepping and use auto training */ 6381 /* FIXME: detect B0+ stepping and use auto training */
9334 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; 6382 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
9335 if (SNB_READ_WM0_LATENCY()) {
9336 dev_priv->display.update_wm = sandybridge_update_wm;
9337 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
9338 } else {
9339 DRM_DEBUG_KMS("Failed to read display plane latency. "
9340 "Disable CxSR\n");
9341 dev_priv->display.update_wm = NULL;
9342 }
9343 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
9344 dev_priv->display.write_eld = ironlake_write_eld; 6383 dev_priv->display.write_eld = ironlake_write_eld;
9345 } else 6384 } else
9346 dev_priv->display.update_wm = NULL; 6385 dev_priv->display.update_wm = NULL;
9347 } else if (IS_VALLEYVIEW(dev)) { 6386 } else if (IS_VALLEYVIEW(dev)) {
9348 dev_priv->display.update_wm = valleyview_update_wm;
9349 dev_priv->display.init_clock_gating =
9350 valleyview_init_clock_gating;
9351 dev_priv->display.force_wake_get = vlv_force_wake_get; 6387 dev_priv->display.force_wake_get = vlv_force_wake_get;
9352 dev_priv->display.force_wake_put = vlv_force_wake_put; 6388 dev_priv->display.force_wake_put = vlv_force_wake_put;
9353 } else if (IS_PINEVIEW(dev)) {
9354 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
9355 dev_priv->is_ddr3,
9356 dev_priv->fsb_freq,
9357 dev_priv->mem_freq)) {
9358 DRM_INFO("failed to find known CxSR latency "
9359 "(found ddr%s fsb freq %d, mem freq %d), "
9360 "disabling CxSR\n",
9361 (dev_priv->is_ddr3 == 1) ? "3" : "2",
9362 dev_priv->fsb_freq, dev_priv->mem_freq);
9363 /* Disable CxSR and never update its watermark again */
9364 pineview_disable_cxsr(dev);
9365 dev_priv->display.update_wm = NULL;
9366 } else
9367 dev_priv->display.update_wm = pineview_update_wm;
9368 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9369 } else if (IS_G4X(dev)) { 6389 } else if (IS_G4X(dev)) {
9370 dev_priv->display.write_eld = g4x_write_eld; 6390 dev_priv->display.write_eld = g4x_write_eld;
9371 dev_priv->display.update_wm = g4x_update_wm;
9372 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
9373 } else if (IS_GEN4(dev)) {
9374 dev_priv->display.update_wm = i965_update_wm;
9375 if (IS_CRESTLINE(dev))
9376 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
9377 else if (IS_BROADWATER(dev))
9378 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
9379 } else if (IS_GEN3(dev)) {
9380 dev_priv->display.update_wm = i9xx_update_wm;
9381 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
9382 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
9383 } else if (IS_I865G(dev)) {
9384 dev_priv->display.update_wm = i830_update_wm;
9385 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9386 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9387 } else if (IS_I85X(dev)) {
9388 dev_priv->display.update_wm = i9xx_update_wm;
9389 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
9390 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
9391 } else {
9392 dev_priv->display.update_wm = i830_update_wm;
9393 dev_priv->display.init_clock_gating = i830_init_clock_gating;
9394 if (IS_845G(dev))
9395 dev_priv->display.get_fifo_size = i845_get_fifo_size;
9396 else
9397 dev_priv->display.get_fifo_size = i830_get_fifo_size;
9398 } 6391 }
9399 6392
9400 /* Default just returns -ENODEV to indicate unsupported */ 6393 /* Default just returns -ENODEV to indicate unsupported */
@@ -9464,7 +6457,7 @@ struct intel_quirk {
9464 void (*hook)(struct drm_device *dev); 6457 void (*hook)(struct drm_device *dev);
9465}; 6458};
9466 6459
9467struct intel_quirk intel_quirks[] = { 6460static struct intel_quirk intel_quirks[] = {
9468 /* HP Mini needs pipe A force quirk (LP: #322104) */ 6461 /* HP Mini needs pipe A force quirk (LP: #322104) */
9469 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, 6462 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
9470 6463
@@ -9524,7 +6517,7 @@ static void i915_disable_vga(struct drm_device *dev)
9524 vga_reg = VGACNTRL; 6517 vga_reg = VGACNTRL;
9525 6518
9526 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 6519 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
9527 outb(1, VGA_SR_INDEX); 6520 outb(SR01, VGA_SR_INDEX);
9528 sr1 = inb(VGA_SR_DATA); 6521 sr1 = inb(VGA_SR_DATA);
9529 outb(sr1 | 1<<5, VGA_SR_DATA); 6522 outb(sr1 | 1<<5, VGA_SR_DATA);
9530 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 6523 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
@@ -9534,6 +6527,39 @@ static void i915_disable_vga(struct drm_device *dev)
9534 POSTING_READ(vga_reg); 6527 POSTING_READ(vga_reg);
9535} 6528}
9536 6529
6530static void ivb_pch_pwm_override(struct drm_device *dev)
6531{
6532 struct drm_i915_private *dev_priv = dev->dev_private;
6533
6534 /*
6535 * IVB has CPU eDP backlight regs too, set things up to let the
6536 * PCH regs control the backlight
6537 */
6538 I915_WRITE(BLC_PWM_CPU_CTL2, PWM_ENABLE);
6539 I915_WRITE(BLC_PWM_CPU_CTL, 0);
6540 I915_WRITE(BLC_PWM_PCH_CTL1, PWM_ENABLE | (1<<30));
6541}
6542
6543void intel_modeset_init_hw(struct drm_device *dev)
6544{
6545 struct drm_i915_private *dev_priv = dev->dev_private;
6546
6547 intel_init_clock_gating(dev);
6548
6549 if (IS_IRONLAKE_M(dev)) {
6550 ironlake_enable_drps(dev);
6551 intel_init_emon(dev);
6552 }
6553
6554 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
6555 gen6_enable_rps(dev_priv);
6556 gen6_update_ring_freq(dev_priv);
6557 }
6558
6559 if (IS_IVYBRIDGE(dev))
6560 ivb_pch_pwm_override(dev);
6561}
6562
9537void intel_modeset_init(struct drm_device *dev) 6563void intel_modeset_init(struct drm_device *dev)
9538{ 6564{
9539 struct drm_i915_private *dev_priv = dev->dev_private; 6565 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -9551,6 +6577,8 @@ void intel_modeset_init(struct drm_device *dev)
9551 6577
9552 intel_init_quirks(dev); 6578 intel_init_quirks(dev);
9553 6579
6580 intel_init_pm(dev);
6581
9554 intel_init_display(dev); 6582 intel_init_display(dev);
9555 6583
9556 if (IS_GEN2(dev)) { 6584 if (IS_GEN2(dev)) {
@@ -9579,17 +6607,7 @@ void intel_modeset_init(struct drm_device *dev)
9579 i915_disable_vga(dev); 6607 i915_disable_vga(dev);
9580 intel_setup_outputs(dev); 6608 intel_setup_outputs(dev);
9581 6609
9582 intel_init_clock_gating(dev); 6610 intel_modeset_init_hw(dev);
9583
9584 if (IS_IRONLAKE_M(dev)) {
9585 ironlake_enable_drps(dev);
9586 intel_init_emon(dev);
9587 }
9588
9589 if (IS_GEN6(dev) || IS_GEN7(dev)) {
9590 gen6_enable_rps(dev_priv);
9591 gen6_update_ring_freq(dev_priv);
9592 }
9593 6611
9594 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6612 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
9595 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6613 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
@@ -9629,7 +6647,7 @@ void intel_modeset_cleanup(struct drm_device *dev)
9629 6647
9630 if (IS_IRONLAKE_M(dev)) 6648 if (IS_IRONLAKE_M(dev))
9631 ironlake_disable_drps(dev); 6649 ironlake_disable_drps(dev);
9632 if (IS_GEN6(dev) || IS_GEN7(dev)) 6650 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
9633 gen6_disable_rps(dev); 6651 gen6_disable_rps(dev);
9634 6652
9635 if (IS_IRONLAKE_M(dev)) 6653 if (IS_IRONLAKE_M(dev))
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 110552ff302c..44cf32c8bcbf 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -219,14 +219,38 @@ intel_dp_max_data_rate(int max_link_clock, int max_lanes)
219 return (max_link_clock * max_lanes * 8) / 10; 219 return (max_link_clock * max_lanes * 8) / 10;
220} 220}
221 221
222static bool
223intel_dp_adjust_dithering(struct intel_dp *intel_dp,
224 struct drm_display_mode *mode,
225 struct drm_display_mode *adjusted_mode)
226{
227 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
228 int max_lanes = intel_dp_max_lane_count(intel_dp);
229 int max_rate, mode_rate;
230
231 mode_rate = intel_dp_link_required(mode->clock, 24);
232 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
233
234 if (mode_rate > max_rate) {
235 mode_rate = intel_dp_link_required(mode->clock, 18);
236 if (mode_rate > max_rate)
237 return false;
238
239 if (adjusted_mode)
240 adjusted_mode->private_flags
241 |= INTEL_MODE_DP_FORCE_6BPC;
242
243 return true;
244 }
245
246 return true;
247}
248
222static int 249static int
223intel_dp_mode_valid(struct drm_connector *connector, 250intel_dp_mode_valid(struct drm_connector *connector,
224 struct drm_display_mode *mode) 251 struct drm_display_mode *mode)
225{ 252{
226 struct intel_dp *intel_dp = intel_attached_dp(connector); 253 struct intel_dp *intel_dp = intel_attached_dp(connector);
227 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
228 int max_lanes = intel_dp_max_lane_count(intel_dp);
229 int max_rate, mode_rate;
230 254
231 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 255 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
232 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 256 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@@ -236,16 +260,8 @@ intel_dp_mode_valid(struct drm_connector *connector,
236 return MODE_PANEL; 260 return MODE_PANEL;
237 } 261 }
238 262
239 mode_rate = intel_dp_link_required(mode->clock, 24); 263 if (!intel_dp_adjust_dithering(intel_dp, mode, NULL))
240 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 264 return MODE_CLOCK_HIGH;
241
242 if (mode_rate > max_rate) {
243 mode_rate = intel_dp_link_required(mode->clock, 18);
244 if (mode_rate > max_rate)
245 return MODE_CLOCK_HIGH;
246 else
247 mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
248 }
249 265
250 if (mode->clock < 10000) 266 if (mode->clock < 10000)
251 return MODE_CLOCK_LOW; 267 return MODE_CLOCK_LOW;
@@ -672,7 +688,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
672 int lane_count, clock; 688 int lane_count, clock;
673 int max_lane_count = intel_dp_max_lane_count(intel_dp); 689 int max_lane_count = intel_dp_max_lane_count(intel_dp);
674 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 690 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
675 int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 691 int bpp;
676 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 692 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
677 693
678 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 694 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@@ -686,6 +702,11 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
686 mode->clock = intel_dp->panel_fixed_mode->clock; 702 mode->clock = intel_dp->panel_fixed_mode->clock;
687 } 703 }
688 704
705 if (!intel_dp_adjust_dithering(intel_dp, mode, adjusted_mode))
706 return false;
707
708 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
709
689 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 710 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
690 for (clock = 0; clock <= max_clock; clock++) { 711 for (clock = 0; clock <= max_clock; clock++) {
691 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 712 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -1128,6 +1149,7 @@ static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1128 DRM_DEBUG_KMS("Turn eDP power off\n"); 1149 DRM_DEBUG_KMS("Turn eDP power off\n");
1129 1150
1130 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); 1151 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
1152 ironlake_panel_vdd_off_sync(intel_dp); /* finish any pending work */
1131 1153
1132 pp = ironlake_get_pp_control(dev_priv); 1154 pp = ironlake_get_pp_control(dev_priv);
1133 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1155 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
@@ -2462,6 +2484,13 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2462 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2484 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
2463 pp_div = I915_READ(PCH_PP_DIVISOR); 2485 pp_div = I915_READ(PCH_PP_DIVISOR);
2464 2486
2487 if (!pp_on || !pp_off || !pp_div) {
2488 DRM_INFO("bad panel power sequencing delays, disabling panel\n");
2489 intel_dp_encoder_destroy(&intel_dp->base.base);
2490 intel_dp_destroy(&intel_connector->base);
2491 return;
2492 }
2493
2465 /* Pull timing values out of registers */ 2494 /* Pull timing values out of registers */
2466 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2495 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
2467 PANEL_POWER_UP_DELAY_SHIFT; 2496 PANEL_POWER_UP_DELAY_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 79cabf58d877..c5bf8bebf0b0 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -204,6 +204,25 @@ struct intel_plane {
204 struct drm_intel_sprite_colorkey *key); 204 struct drm_intel_sprite_colorkey *key);
205}; 205};
206 206
207struct intel_watermark_params {
208 unsigned long fifo_size;
209 unsigned long max_wm;
210 unsigned long default_wm;
211 unsigned long guard_size;
212 unsigned long cacheline_size;
213};
214
215struct cxsr_latency {
216 int is_desktop;
217 int is_ddr3;
218 unsigned long fsb_freq;
219 unsigned long mem_freq;
220 unsigned long display_sr;
221 unsigned long display_hpll_disable;
222 unsigned long cursor_sr;
223 unsigned long cursor_hpll_disable;
224};
225
207#define to_intel_crtc(x) container_of(x, struct intel_crtc, base) 226#define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
208#define to_intel_connector(x) container_of(x, struct intel_connector, base) 227#define to_intel_connector(x) container_of(x, struct intel_connector, base)
209#define to_intel_encoder(x) container_of(x, struct intel_encoder, base) 228#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
@@ -320,6 +339,8 @@ extern bool intel_dpd_is_edp(struct drm_device *dev);
320extern void intel_edp_link_config(struct intel_encoder *, int *, int *); 339extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
321extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); 340extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
322extern int intel_plane_init(struct drm_device *dev, enum pipe pipe); 341extern int intel_plane_init(struct drm_device *dev, enum pipe pipe);
342extern void intel_flush_display_plane(struct drm_i915_private *dev_priv,
343 enum plane plane);
323 344
324/* intel_panel.c */ 345/* intel_panel.c */
325extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 346extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
@@ -377,6 +398,7 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
377extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 398extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
378 u16 *blue, int regno); 399 u16 *blue, int regno);
379extern void intel_enable_clock_gating(struct drm_device *dev); 400extern void intel_enable_clock_gating(struct drm_device *dev);
401extern void ironlake_disable_rc6(struct drm_device *dev);
380extern void ironlake_enable_drps(struct drm_device *dev); 402extern void ironlake_enable_drps(struct drm_device *dev);
381extern void ironlake_disable_drps(struct drm_device *dev); 403extern void ironlake_disable_drps(struct drm_device *dev);
382extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 404extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
@@ -422,7 +444,7 @@ extern void intel_write_eld(struct drm_encoder *encoder,
422extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); 444extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
423 445
424/* For use by IVB LP watermark workaround in intel_sprite.c */ 446/* For use by IVB LP watermark workaround in intel_sprite.c */
425extern void sandybridge_update_wm(struct drm_device *dev); 447extern void intel_update_watermarks(struct drm_device *dev);
426extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe, 448extern void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
427 uint32_t sprite_width, 449 uint32_t sprite_width,
428 int pixel_size); 450 int pixel_size);
@@ -434,4 +456,11 @@ extern int intel_sprite_get_colorkey(struct drm_device *dev, void *data,
434 456
435extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg); 457extern u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg);
436 458
459/* Power-related functions, located in intel_pm.c */
460extern void intel_init_pm(struct drm_device *dev);
461/* FBC */
462extern bool intel_fbc_enabled(struct drm_device *dev);
463extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
464extern void intel_update_fbc(struct drm_device *dev);
465
437#endif /* __INTEL_DRV_H__ */ 466#endif /* __INTEL_DRV_H__ */
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 19ecd78b8a2c..71ef2896be96 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -94,7 +94,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
94 mutex_lock(&dev->struct_mutex); 94 mutex_lock(&dev->struct_mutex);
95 95
96 /* Flush everything out, we'll be doing GTT only from now on */ 96 /* Flush everything out, we'll be doing GTT only from now on */
97 ret = intel_pin_and_fence_fb_obj(dev, obj, false); 97 ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
98 if (ret) { 98 if (ret) {
99 DRM_ERROR("failed to pin fb: %d\n", ret); 99 DRM_ERROR("failed to pin fb: %d\n", ret);
100 goto out_unref; 100 goto out_unref;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index c12db7265893..e04255edc801 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -205,27 +205,29 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
205 205
206static int 206static int
207gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg, 207gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
208 bool last) 208 u32 gmbus1_index)
209{ 209{
210 int reg_offset = dev_priv->gpio_mmio_base; 210 int reg_offset = dev_priv->gpio_mmio_base;
211 u16 len = msg->len; 211 u16 len = msg->len;
212 u8 *buf = msg->buf; 212 u8 *buf = msg->buf;
213 213
214 I915_WRITE(GMBUS1 + reg_offset, 214 I915_WRITE(GMBUS1 + reg_offset,
215 gmbus1_index |
215 GMBUS_CYCLE_WAIT | 216 GMBUS_CYCLE_WAIT |
216 (last ? GMBUS_CYCLE_STOP : 0) |
217 (len << GMBUS_BYTE_COUNT_SHIFT) | 217 (len << GMBUS_BYTE_COUNT_SHIFT) |
218 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | 218 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
219 GMBUS_SLAVE_READ | GMBUS_SW_RDY); 219 GMBUS_SLAVE_READ | GMBUS_SW_RDY);
220 POSTING_READ(GMBUS2 + reg_offset); 220 while (len) {
221 do { 221 int ret;
222 u32 val, loop = 0; 222 u32 val, loop = 0;
223 u32 gmbus2;
223 224
224 if (wait_for(I915_READ(GMBUS2 + reg_offset) & 225 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
225 (GMBUS_SATOER | GMBUS_HW_RDY), 226 (GMBUS_SATOER | GMBUS_HW_RDY),
226 50)) 227 50);
228 if (ret)
227 return -ETIMEDOUT; 229 return -ETIMEDOUT;
228 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 230 if (gmbus2 & GMBUS_SATOER)
229 return -ENXIO; 231 return -ENXIO;
230 232
231 val = I915_READ(GMBUS3 + reg_offset); 233 val = I915_READ(GMBUS3 + reg_offset);
@@ -233,14 +235,13 @@ gmbus_xfer_read(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
233 *buf++ = val & 0xff; 235 *buf++ = val & 0xff;
234 val >>= 8; 236 val >>= 8;
235 } while (--len && ++loop < 4); 237 } while (--len && ++loop < 4);
236 } while (len); 238 }
237 239
238 return 0; 240 return 0;
239} 241}
240 242
241static int 243static int
242gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg, 244gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg)
243 bool last)
244{ 245{
245 int reg_offset = dev_priv->gpio_mmio_base; 246 int reg_offset = dev_priv->gpio_mmio_base;
246 u16 len = msg->len; 247 u16 len = msg->len;
@@ -248,25 +249,20 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
248 u32 val, loop; 249 u32 val, loop;
249 250
250 val = loop = 0; 251 val = loop = 0;
251 do { 252 while (len && loop < 4) {
252 val |= *buf++ << (8 * loop); 253 val |= *buf++ << (8 * loop++);
253 } while (--len && ++loop < 4); 254 len -= 1;
255 }
254 256
255 I915_WRITE(GMBUS3 + reg_offset, val); 257 I915_WRITE(GMBUS3 + reg_offset, val);
256 I915_WRITE(GMBUS1 + reg_offset, 258 I915_WRITE(GMBUS1 + reg_offset,
257 GMBUS_CYCLE_WAIT | 259 GMBUS_CYCLE_WAIT |
258 (last ? GMBUS_CYCLE_STOP : 0) |
259 (msg->len << GMBUS_BYTE_COUNT_SHIFT) | 260 (msg->len << GMBUS_BYTE_COUNT_SHIFT) |
260 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) | 261 (msg->addr << GMBUS_SLAVE_ADDR_SHIFT) |
261 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); 262 GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
262 POSTING_READ(GMBUS2 + reg_offset);
263 while (len) { 263 while (len) {
264 if (wait_for(I915_READ(GMBUS2 + reg_offset) & 264 int ret;
265 (GMBUS_SATOER | GMBUS_HW_RDY), 265 u32 gmbus2;
266 50))
267 return -ETIMEDOUT;
268 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
269 return -ENXIO;
270 266
271 val = loop = 0; 267 val = loop = 0;
272 do { 268 do {
@@ -274,11 +270,58 @@ gmbus_xfer_write(struct drm_i915_private *dev_priv, struct i2c_msg *msg,
274 } while (--len && ++loop < 4); 270 } while (--len && ++loop < 4);
275 271
276 I915_WRITE(GMBUS3 + reg_offset, val); 272 I915_WRITE(GMBUS3 + reg_offset, val);
277 POSTING_READ(GMBUS2 + reg_offset); 273
274 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
275 (GMBUS_SATOER | GMBUS_HW_RDY),
276 50);
277 if (ret)
278 return -ETIMEDOUT;
279 if (gmbus2 & GMBUS_SATOER)
280 return -ENXIO;
278 } 281 }
279 return 0; 282 return 0;
280} 283}
281 284
285/*
286 * The gmbus controller can combine a 1 or 2 byte write with a read that
287 * immediately follows it by using an "INDEX" cycle.
288 */
289static bool
290gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
291{
292 return (i + 1 < num &&
293 !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
294 (msgs[i + 1].flags & I2C_M_RD));
295}
296
297static int
298gmbus_xfer_index_read(struct drm_i915_private *dev_priv, struct i2c_msg *msgs)
299{
300 int reg_offset = dev_priv->gpio_mmio_base;
301 u32 gmbus1_index = 0;
302 u32 gmbus5 = 0;
303 int ret;
304
305 if (msgs[0].len == 2)
306 gmbus5 = GMBUS_2BYTE_INDEX_EN |
307 msgs[0].buf[1] | (msgs[0].buf[0] << 8);
308 if (msgs[0].len == 1)
309 gmbus1_index = GMBUS_CYCLE_INDEX |
310 (msgs[0].buf[0] << GMBUS_SLAVE_INDEX_SHIFT);
311
312 /* GMBUS5 holds 16-bit index */
313 if (gmbus5)
314 I915_WRITE(GMBUS5 + reg_offset, gmbus5);
315
316 ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus1_index);
317
318 /* Clear GMBUS5 after each index transfer */
319 if (gmbus5)
320 I915_WRITE(GMBUS5 + reg_offset, 0);
321
322 return ret;
323}
324
282static int 325static int
283gmbus_xfer(struct i2c_adapter *adapter, 326gmbus_xfer(struct i2c_adapter *adapter,
284 struct i2c_msg *msgs, 327 struct i2c_msg *msgs,
@@ -288,7 +331,8 @@ gmbus_xfer(struct i2c_adapter *adapter,
288 struct intel_gmbus, 331 struct intel_gmbus,
289 adapter); 332 adapter);
290 struct drm_i915_private *dev_priv = bus->dev_priv; 333 struct drm_i915_private *dev_priv = bus->dev_priv;
291 int i, reg_offset, ret; 334 int i, reg_offset;
335 int ret = 0;
292 336
293 mutex_lock(&dev_priv->gmbus_mutex); 337 mutex_lock(&dev_priv->gmbus_mutex);
294 338
@@ -302,47 +346,82 @@ gmbus_xfer(struct i2c_adapter *adapter,
302 I915_WRITE(GMBUS0 + reg_offset, bus->reg0); 346 I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
303 347
304 for (i = 0; i < num; i++) { 348 for (i = 0; i < num; i++) {
305 bool last = i + 1 == num; 349 u32 gmbus2;
306 350
307 if (msgs[i].flags & I2C_M_RD) 351 if (gmbus_is_index_read(msgs, i, num)) {
308 ret = gmbus_xfer_read(dev_priv, &msgs[i], last); 352 ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
309 else 353 i += 1; /* set i to the index of the read xfer */
310 ret = gmbus_xfer_write(dev_priv, &msgs[i], last); 354 } else if (msgs[i].flags & I2C_M_RD) {
355 ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
356 } else {
357 ret = gmbus_xfer_write(dev_priv, &msgs[i]);
358 }
311 359
312 if (ret == -ETIMEDOUT) 360 if (ret == -ETIMEDOUT)
313 goto timeout; 361 goto timeout;
314 if (ret == -ENXIO) 362 if (ret == -ENXIO)
315 goto clear_err; 363 goto clear_err;
316 364
317 if (!last && 365 ret = wait_for((gmbus2 = I915_READ(GMBUS2 + reg_offset)) &
318 wait_for(I915_READ(GMBUS2 + reg_offset) & 366 (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE),
319 (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 367 50);
320 50)) 368 if (ret)
321 goto timeout; 369 goto timeout;
322 if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) 370 if (gmbus2 & GMBUS_SATOER)
323 goto clear_err; 371 goto clear_err;
324 } 372 }
325 373
326 goto done; 374 /* Generate a STOP condition on the bus. Note that gmbus can't generata
375 * a STOP on the very first cycle. To simplify the code we
376 * unconditionally generate the STOP condition with an additional gmbus
377 * cycle. */
378 I915_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
379
380 /* Mark the GMBUS interface as disabled after waiting for idle.
381 * We will re-enable it at the start of the next xfer,
382 * till then let it sleep.
383 */
384 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
385 10)) {
386 DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
387 adapter->name);
388 ret = -ETIMEDOUT;
389 }
390 I915_WRITE(GMBUS0 + reg_offset, 0);
391 ret = ret ?: i;
392 goto out;
327 393
328clear_err: 394clear_err:
395 /*
396 * Wait for bus to IDLE before clearing NAK.
397 * If we clear the NAK while bus is still active, then it will stay
398 * active and the next transaction may fail.
399 */
400 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0,
401 10))
402 DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
403 adapter->name);
404
329 /* Toggle the Software Clear Interrupt bit. This has the effect 405 /* Toggle the Software Clear Interrupt bit. This has the effect
330 * of resetting the GMBUS controller and so clearing the 406 * of resetting the GMBUS controller and so clearing the
331 * BUS_ERROR raised by the slave's NAK. 407 * BUS_ERROR raised by the slave's NAK.
332 */ 408 */
333 I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); 409 I915_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT);
334 I915_WRITE(GMBUS1 + reg_offset, 0); 410 I915_WRITE(GMBUS1 + reg_offset, 0);
411 I915_WRITE(GMBUS0 + reg_offset, 0);
335 412
336done: 413 DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
337 /* Mark the GMBUS interface as disabled after waiting for idle. 414 adapter->name, msgs[i].addr,
338 * We will re-enable it at the start of the next xfer, 415 (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
339 * till then let it sleep. 416
417 /*
418 * If no ACK is received during the address phase of a transaction,
419 * the adapter must report -ENXIO.
420 * It is not clear what to return if no ACK is received at other times.
421 * So, we always return -ENXIO in all NAK cases, to ensure we send
422 * it at least during the one case that is specified.
340 */ 423 */
341 if (wait_for((I915_READ(GMBUS2 + reg_offset) & GMBUS_ACTIVE) == 0, 10)) 424 ret = -ENXIO;
342 DRM_INFO("GMBUS [%s] timed out waiting for idle\n",
343 bus->adapter.name);
344 I915_WRITE(GMBUS0 + reg_offset, 0);
345 ret = i;
346 goto out; 425 goto out;
347 426
348timeout: 427timeout:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
new file mode 100644
index 000000000000..36940a390ef2
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -0,0 +1,3075 @@
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eugeni Dodonov <eugeni.dodonov@intel.com>
25 *
26 */
27
28#include <linux/cpufreq.h>
29#include "i915_drv.h"
30#include "intel_drv.h"
31
32/* FBC, or Frame Buffer Compression, is a technique employed to compress the
33 * framebuffer contents in-memory, aiming at reducing the required bandwidth
34 * during in-memory transfers and, therefore, reduce the power packet.
35 *
36 * The benefits of FBC are mostly visible with solid backgrounds and
37 * variation-less patterns.
38 *
39 * FBC-related functionality can be enabled by the means of the
40 * i915.i915_enable_fbc parameter
41 */
42
43static void i8xx_disable_fbc(struct drm_device *dev)
44{
45 struct drm_i915_private *dev_priv = dev->dev_private;
46 u32 fbc_ctl;
47
48 /* Disable compression */
49 fbc_ctl = I915_READ(FBC_CONTROL);
50 if ((fbc_ctl & FBC_CTL_EN) == 0)
51 return;
52
53 fbc_ctl &= ~FBC_CTL_EN;
54 I915_WRITE(FBC_CONTROL, fbc_ctl);
55
56 /* Wait for compressing bit to clear */
57 if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) {
58 DRM_DEBUG_KMS("FBC idle timed out\n");
59 return;
60 }
61
62 DRM_DEBUG_KMS("disabled FBC\n");
63}
64
65static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
66{
67 struct drm_device *dev = crtc->dev;
68 struct drm_i915_private *dev_priv = dev->dev_private;
69 struct drm_framebuffer *fb = crtc->fb;
70 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
71 struct drm_i915_gem_object *obj = intel_fb->obj;
72 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
73 int cfb_pitch;
74 int plane, i;
75 u32 fbc_ctl, fbc_ctl2;
76
77 cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE;
78 if (fb->pitches[0] < cfb_pitch)
79 cfb_pitch = fb->pitches[0];
80
81 /* FBC_CTL wants 64B units */
82 cfb_pitch = (cfb_pitch / 64) - 1;
83 plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
84
85 /* Clear old tags */
86 for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
87 I915_WRITE(FBC_TAG + (i * 4), 0);
88
89 /* Set it up... */
90 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE;
91 fbc_ctl2 |= plane;
92 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
93 I915_WRITE(FBC_FENCE_OFF, crtc->y);
94
95 /* enable it... */
96 fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC;
97 if (IS_I945GM(dev))
98 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
99 fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
100 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
101 fbc_ctl |= obj->fence_reg;
102 I915_WRITE(FBC_CONTROL, fbc_ctl);
103
104 DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ",
105 cfb_pitch, crtc->y, intel_crtc->plane);
106}
107
108static bool i8xx_fbc_enabled(struct drm_device *dev)
109{
110 struct drm_i915_private *dev_priv = dev->dev_private;
111
112 return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
113}
114
115static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
116{
117 struct drm_device *dev = crtc->dev;
118 struct drm_i915_private *dev_priv = dev->dev_private;
119 struct drm_framebuffer *fb = crtc->fb;
120 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
121 struct drm_i915_gem_object *obj = intel_fb->obj;
122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
123 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
124 unsigned long stall_watermark = 200;
125 u32 dpfc_ctl;
126
127 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
128 dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg;
129 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
130
131 I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
132 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
133 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
134 I915_WRITE(DPFC_FENCE_YOFF, crtc->y);
135
136 /* enable it... */
137 I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN);
138
139 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
140}
141
142static void g4x_disable_fbc(struct drm_device *dev)
143{
144 struct drm_i915_private *dev_priv = dev->dev_private;
145 u32 dpfc_ctl;
146
147 /* Disable compression */
148 dpfc_ctl = I915_READ(DPFC_CONTROL);
149 if (dpfc_ctl & DPFC_CTL_EN) {
150 dpfc_ctl &= ~DPFC_CTL_EN;
151 I915_WRITE(DPFC_CONTROL, dpfc_ctl);
152
153 DRM_DEBUG_KMS("disabled FBC\n");
154 }
155}
156
157static bool g4x_fbc_enabled(struct drm_device *dev)
158{
159 struct drm_i915_private *dev_priv = dev->dev_private;
160
161 return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
162}
163
164static void sandybridge_blit_fbc_update(struct drm_device *dev)
165{
166 struct drm_i915_private *dev_priv = dev->dev_private;
167 u32 blt_ecoskpd;
168
169 /* Make sure blitter notifies FBC of writes */
170 gen6_gt_force_wake_get(dev_priv);
171 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
172 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
173 GEN6_BLITTER_LOCK_SHIFT;
174 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
175 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY;
176 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
177 blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY <<
178 GEN6_BLITTER_LOCK_SHIFT);
179 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
180 POSTING_READ(GEN6_BLITTER_ECOSKPD);
181 gen6_gt_force_wake_put(dev_priv);
182}
183
184static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
185{
186 struct drm_device *dev = crtc->dev;
187 struct drm_i915_private *dev_priv = dev->dev_private;
188 struct drm_framebuffer *fb = crtc->fb;
189 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
190 struct drm_i915_gem_object *obj = intel_fb->obj;
191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
192 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
193 unsigned long stall_watermark = 200;
194 u32 dpfc_ctl;
195
196 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
197 dpfc_ctl &= DPFC_RESERVED;
198 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
199 /* Set persistent mode for front-buffer rendering, ala X. */
200 dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE;
201 dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg);
202 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
203
204 I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN |
205 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
206 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
207 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
208 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
209 /* enable it... */
210 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
211
212 if (IS_GEN6(dev)) {
213 I915_WRITE(SNB_DPFC_CTL_SA,
214 SNB_CPU_FENCE_ENABLE | obj->fence_reg);
215 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
216 sandybridge_blit_fbc_update(dev);
217 }
218
219 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
220}
221
222static void ironlake_disable_fbc(struct drm_device *dev)
223{
224 struct drm_i915_private *dev_priv = dev->dev_private;
225 u32 dpfc_ctl;
226
227 /* Disable compression */
228 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
229 if (dpfc_ctl & DPFC_CTL_EN) {
230 dpfc_ctl &= ~DPFC_CTL_EN;
231 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
232
233 DRM_DEBUG_KMS("disabled FBC\n");
234 }
235}
236
237static bool ironlake_fbc_enabled(struct drm_device *dev)
238{
239 struct drm_i915_private *dev_priv = dev->dev_private;
240
241 return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
242}
243
244bool intel_fbc_enabled(struct drm_device *dev)
245{
246 struct drm_i915_private *dev_priv = dev->dev_private;
247
248 if (!dev_priv->display.fbc_enabled)
249 return false;
250
251 return dev_priv->display.fbc_enabled(dev);
252}
253
254static void intel_fbc_work_fn(struct work_struct *__work)
255{
256 struct intel_fbc_work *work =
257 container_of(to_delayed_work(__work),
258 struct intel_fbc_work, work);
259 struct drm_device *dev = work->crtc->dev;
260 struct drm_i915_private *dev_priv = dev->dev_private;
261
262 mutex_lock(&dev->struct_mutex);
263 if (work == dev_priv->fbc_work) {
264 /* Double check that we haven't switched fb without cancelling
265 * the prior work.
266 */
267 if (work->crtc->fb == work->fb) {
268 dev_priv->display.enable_fbc(work->crtc,
269 work->interval);
270
271 dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane;
272 dev_priv->cfb_fb = work->crtc->fb->base.id;
273 dev_priv->cfb_y = work->crtc->y;
274 }
275
276 dev_priv->fbc_work = NULL;
277 }
278 mutex_unlock(&dev->struct_mutex);
279
280 kfree(work);
281}
282
283static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv)
284{
285 if (dev_priv->fbc_work == NULL)
286 return;
287
288 DRM_DEBUG_KMS("cancelling pending FBC enable\n");
289
290 /* Synchronisation is provided by struct_mutex and checking of
291 * dev_priv->fbc_work, so we can perform the cancellation
292 * entirely asynchronously.
293 */
294 if (cancel_delayed_work(&dev_priv->fbc_work->work))
295 /* tasklet was killed before being run, clean up */
296 kfree(dev_priv->fbc_work);
297
298 /* Mark the work as no longer wanted so that if it does
299 * wake-up (because the work was already running and waiting
300 * for our mutex), it will discover that is no longer
301 * necessary to run.
302 */
303 dev_priv->fbc_work = NULL;
304}
305
306void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
307{
308 struct intel_fbc_work *work;
309 struct drm_device *dev = crtc->dev;
310 struct drm_i915_private *dev_priv = dev->dev_private;
311
312 if (!dev_priv->display.enable_fbc)
313 return;
314
315 intel_cancel_fbc_work(dev_priv);
316
317 work = kzalloc(sizeof *work, GFP_KERNEL);
318 if (work == NULL) {
319 dev_priv->display.enable_fbc(crtc, interval);
320 return;
321 }
322
323 work->crtc = crtc;
324 work->fb = crtc->fb;
325 work->interval = interval;
326 INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn);
327
328 dev_priv->fbc_work = work;
329
330 DRM_DEBUG_KMS("scheduling delayed FBC enable\n");
331
332 /* Delay the actual enabling to let pageflipping cease and the
333 * display to settle before starting the compression. Note that
334 * this delay also serves a second purpose: it allows for a
335 * vblank to pass after disabling the FBC before we attempt
336 * to modify the control registers.
337 *
338 * A more complicated solution would involve tracking vblanks
339 * following the termination of the page-flipping sequence
340 * and indeed performing the enable as a co-routine and not
341 * waiting synchronously upon the vblank.
342 */
343 schedule_delayed_work(&work->work, msecs_to_jiffies(50));
344}
345
346void intel_disable_fbc(struct drm_device *dev)
347{
348 struct drm_i915_private *dev_priv = dev->dev_private;
349
350 intel_cancel_fbc_work(dev_priv);
351
352 if (!dev_priv->display.disable_fbc)
353 return;
354
355 dev_priv->display.disable_fbc(dev);
356 dev_priv->cfb_plane = -1;
357}
358
359/**
360 * intel_update_fbc - enable/disable FBC as needed
361 * @dev: the drm_device
362 *
363 * Set up the framebuffer compression hardware at mode set time. We
364 * enable it if possible:
365 * - plane A only (on pre-965)
366 * - no pixel mulitply/line duplication
367 * - no alpha buffer discard
368 * - no dual wide
369 * - framebuffer <= 2048 in width, 1536 in height
370 *
371 * We can't assume that any compression will take place (worst case),
372 * so the compressed buffer has to be the same size as the uncompressed
373 * one. It also must reside (along with the line length buffer) in
374 * stolen memory.
375 *
376 * We need to enable/disable FBC on a global basis.
377 */
378void intel_update_fbc(struct drm_device *dev)
379{
380 struct drm_i915_private *dev_priv = dev->dev_private;
381 struct drm_crtc *crtc = NULL, *tmp_crtc;
382 struct intel_crtc *intel_crtc;
383 struct drm_framebuffer *fb;
384 struct intel_framebuffer *intel_fb;
385 struct drm_i915_gem_object *obj;
386 int enable_fbc;
387
388 DRM_DEBUG_KMS("\n");
389
390 if (!i915_powersave)
391 return;
392
393 if (!I915_HAS_FBC(dev))
394 return;
395
396 /*
397 * If FBC is already on, we just have to verify that we can
398 * keep it that way...
399 * Need to disable if:
400 * - more than one pipe is active
401 * - changing FBC params (stride, fence, mode)
402 * - new fb is too large to fit in compressed buffer
403 * - going to an unsupported config (interlace, pixel multiply, etc.)
404 */
405 list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) {
406 if (tmp_crtc->enabled && tmp_crtc->fb) {
407 if (crtc) {
408 DRM_DEBUG_KMS("more than one pipe active, disabling compression\n");
409 dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES;
410 goto out_disable;
411 }
412 crtc = tmp_crtc;
413 }
414 }
415
416 if (!crtc || crtc->fb == NULL) {
417 DRM_DEBUG_KMS("no output, disabling\n");
418 dev_priv->no_fbc_reason = FBC_NO_OUTPUT;
419 goto out_disable;
420 }
421
422 intel_crtc = to_intel_crtc(crtc);
423 fb = crtc->fb;
424 intel_fb = to_intel_framebuffer(fb);
425 obj = intel_fb->obj;
426
427 enable_fbc = i915_enable_fbc;
428 if (enable_fbc < 0) {
429 DRM_DEBUG_KMS("fbc set to per-chip default\n");
430 enable_fbc = 1;
431 if (INTEL_INFO(dev)->gen <= 6)
432 enable_fbc = 0;
433 }
434 if (!enable_fbc) {
435 DRM_DEBUG_KMS("fbc disabled per module param\n");
436 dev_priv->no_fbc_reason = FBC_MODULE_PARAM;
437 goto out_disable;
438 }
439 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
440 DRM_DEBUG_KMS("framebuffer too large, disabling "
441 "compression\n");
442 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
443 goto out_disable;
444 }
445 if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) ||
446 (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) {
447 DRM_DEBUG_KMS("mode incompatible with compression, "
448 "disabling\n");
449 dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
450 goto out_disable;
451 }
452 if ((crtc->mode.hdisplay > 2048) ||
453 (crtc->mode.vdisplay > 1536)) {
454 DRM_DEBUG_KMS("mode too large for compression, disabling\n");
455 dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
456 goto out_disable;
457 }
458 if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) {
459 DRM_DEBUG_KMS("plane not 0, disabling compression\n");
460 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
461 goto out_disable;
462 }
463
464 /* The use of a CPU fence is mandatory in order to detect writes
465 * by the CPU to the scanout and trigger updates to the FBC.
466 */
467 if (obj->tiling_mode != I915_TILING_X ||
468 obj->fence_reg == I915_FENCE_REG_NONE) {
469 DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n");
470 dev_priv->no_fbc_reason = FBC_NOT_TILED;
471 goto out_disable;
472 }
473
474 /* If the kernel debugger is active, always disable compression */
475 if (in_dbg_master())
476 goto out_disable;
477
478 /* If the scanout has not changed, don't modify the FBC settings.
479 * Note that we make the fundamental assumption that the fb->obj
480 * cannot be unpinned (and have its GTT offset and fence revoked)
481 * without first being decoupled from the scanout and FBC disabled.
482 */
483 if (dev_priv->cfb_plane == intel_crtc->plane &&
484 dev_priv->cfb_fb == fb->base.id &&
485 dev_priv->cfb_y == crtc->y)
486 return;
487
488 if (intel_fbc_enabled(dev)) {
489 /* We update FBC along two paths, after changing fb/crtc
490 * configuration (modeswitching) and after page-flipping
491 * finishes. For the latter, we know that not only did
492 * we disable the FBC at the start of the page-flip
493 * sequence, but also more than one vblank has passed.
494 *
495 * For the former case of modeswitching, it is possible
496 * to switch between two FBC valid configurations
497 * instantaneously so we do need to disable the FBC
498 * before we can modify its control registers. We also
499 * have to wait for the next vblank for that to take
500 * effect. However, since we delay enabling FBC we can
501 * assume that a vblank has passed since disabling and
502 * that we can safely alter the registers in the deferred
503 * callback.
504 *
505 * In the scenario that we go from a valid to invalid
506 * and then back to valid FBC configuration we have
507 * no strict enforcement that a vblank occurred since
508 * disabling the FBC. However, along all current pipe
509 * disabling paths we do need to wait for a vblank at
510 * some point. And we wait before enabling FBC anyway.
511 */
512 DRM_DEBUG_KMS("disabling active FBC for update\n");
513 intel_disable_fbc(dev);
514 }
515
516 intel_enable_fbc(crtc, 500);
517 return;
518
519out_disable:
520 /* Multiple disables should be harmless */
521 if (intel_fbc_enabled(dev)) {
522 DRM_DEBUG_KMS("unsupported config, disabling FBC\n");
523 intel_disable_fbc(dev);
524 }
525}
526
527static const struct cxsr_latency cxsr_latency_table[] = {
528 {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */
529 {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */
530 {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */
531 {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */
532 {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */
533
534 {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */
535 {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */
536 {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */
537 {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */
538 {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */
539
540 {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */
541 {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */
542 {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */
543 {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */
544 {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */
545
546 {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */
547 {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */
548 {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */
549 {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */
550 {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */
551
552 {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */
553 {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */
554 {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */
555 {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */
556 {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */
557
558 {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */
559 {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */
560 {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */
561 {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */
562 {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */
563};
564
565const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop,
566 int is_ddr3,
567 int fsb,
568 int mem)
569{
570 const struct cxsr_latency *latency;
571 int i;
572
573 if (fsb == 0 || mem == 0)
574 return NULL;
575
576 for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) {
577 latency = &cxsr_latency_table[i];
578 if (is_desktop == latency->is_desktop &&
579 is_ddr3 == latency->is_ddr3 &&
580 fsb == latency->fsb_freq && mem == latency->mem_freq)
581 return latency;
582 }
583
584 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
585
586 return NULL;
587}
588
589static void pineview_disable_cxsr(struct drm_device *dev)
590{
591 struct drm_i915_private *dev_priv = dev->dev_private;
592
593 /* deactivate cxsr */
594 I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN);
595}
596
597/*
598 * Latency for FIFO fetches is dependent on several factors:
599 * - memory configuration (speed, channels)
600 * - chipset
601 * - current MCH state
602 * It can be fairly high in some situations, so here we assume a fairly
603 * pessimal value. It's a tradeoff between extra memory fetches (if we
604 * set this value too high, the FIFO will fetch frequently to stay full)
605 * and power consumption (set it too low to save power and we might see
606 * FIFO underruns and display "flicker").
607 *
608 * A value of 5us seems to be a good balance; safe for very low end
609 * platforms but not overly aggressive on lower latency configs.
610 */
611static const int latency_ns = 5000;
612
613static int i9xx_get_fifo_size(struct drm_device *dev, int plane)
614{
615 struct drm_i915_private *dev_priv = dev->dev_private;
616 uint32_t dsparb = I915_READ(DSPARB);
617 int size;
618
619 size = dsparb & 0x7f;
620 if (plane)
621 size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size;
622
623 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
624 plane ? "B" : "A", size);
625
626 return size;
627}
628
629static int i85x_get_fifo_size(struct drm_device *dev, int plane)
630{
631 struct drm_i915_private *dev_priv = dev->dev_private;
632 uint32_t dsparb = I915_READ(DSPARB);
633 int size;
634
635 size = dsparb & 0x1ff;
636 if (plane)
637 size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size;
638 size >>= 1; /* Convert to cachelines */
639
640 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
641 plane ? "B" : "A", size);
642
643 return size;
644}
645
646static int i845_get_fifo_size(struct drm_device *dev, int plane)
647{
648 struct drm_i915_private *dev_priv = dev->dev_private;
649 uint32_t dsparb = I915_READ(DSPARB);
650 int size;
651
652 size = dsparb & 0x7f;
653 size >>= 2; /* Convert to cachelines */
654
655 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
656 plane ? "B" : "A",
657 size);
658
659 return size;
660}
661
662static int i830_get_fifo_size(struct drm_device *dev, int plane)
663{
664 struct drm_i915_private *dev_priv = dev->dev_private;
665 uint32_t dsparb = I915_READ(DSPARB);
666 int size;
667
668 size = dsparb & 0x7f;
669 size >>= 1; /* Convert to cachelines */
670
671 DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb,
672 plane ? "B" : "A", size);
673
674 return size;
675}
676
677/* Pineview has different values for various configs */
678static const struct intel_watermark_params pineview_display_wm = {
679 PINEVIEW_DISPLAY_FIFO,
680 PINEVIEW_MAX_WM,
681 PINEVIEW_DFT_WM,
682 PINEVIEW_GUARD_WM,
683 PINEVIEW_FIFO_LINE_SIZE
684};
685static const struct intel_watermark_params pineview_display_hplloff_wm = {
686 PINEVIEW_DISPLAY_FIFO,
687 PINEVIEW_MAX_WM,
688 PINEVIEW_DFT_HPLLOFF_WM,
689 PINEVIEW_GUARD_WM,
690 PINEVIEW_FIFO_LINE_SIZE
691};
692static const struct intel_watermark_params pineview_cursor_wm = {
693 PINEVIEW_CURSOR_FIFO,
694 PINEVIEW_CURSOR_MAX_WM,
695 PINEVIEW_CURSOR_DFT_WM,
696 PINEVIEW_CURSOR_GUARD_WM,
697 PINEVIEW_FIFO_LINE_SIZE,
698};
699static const struct intel_watermark_params pineview_cursor_hplloff_wm = {
700 PINEVIEW_CURSOR_FIFO,
701 PINEVIEW_CURSOR_MAX_WM,
702 PINEVIEW_CURSOR_DFT_WM,
703 PINEVIEW_CURSOR_GUARD_WM,
704 PINEVIEW_FIFO_LINE_SIZE
705};
706static const struct intel_watermark_params g4x_wm_info = {
707 G4X_FIFO_SIZE,
708 G4X_MAX_WM,
709 G4X_MAX_WM,
710 2,
711 G4X_FIFO_LINE_SIZE,
712};
713static const struct intel_watermark_params g4x_cursor_wm_info = {
714 I965_CURSOR_FIFO,
715 I965_CURSOR_MAX_WM,
716 I965_CURSOR_DFT_WM,
717 2,
718 G4X_FIFO_LINE_SIZE,
719};
720static const struct intel_watermark_params valleyview_wm_info = {
721 VALLEYVIEW_FIFO_SIZE,
722 VALLEYVIEW_MAX_WM,
723 VALLEYVIEW_MAX_WM,
724 2,
725 G4X_FIFO_LINE_SIZE,
726};
727static const struct intel_watermark_params valleyview_cursor_wm_info = {
728 I965_CURSOR_FIFO,
729 VALLEYVIEW_CURSOR_MAX_WM,
730 I965_CURSOR_DFT_WM,
731 2,
732 G4X_FIFO_LINE_SIZE,
733};
734static const struct intel_watermark_params i965_cursor_wm_info = {
735 I965_CURSOR_FIFO,
736 I965_CURSOR_MAX_WM,
737 I965_CURSOR_DFT_WM,
738 2,
739 I915_FIFO_LINE_SIZE,
740};
741static const struct intel_watermark_params i945_wm_info = {
742 I945_FIFO_SIZE,
743 I915_MAX_WM,
744 1,
745 2,
746 I915_FIFO_LINE_SIZE
747};
748static const struct intel_watermark_params i915_wm_info = {
749 I915_FIFO_SIZE,
750 I915_MAX_WM,
751 1,
752 2,
753 I915_FIFO_LINE_SIZE
754};
755static const struct intel_watermark_params i855_wm_info = {
756 I855GM_FIFO_SIZE,
757 I915_MAX_WM,
758 1,
759 2,
760 I830_FIFO_LINE_SIZE
761};
762static const struct intel_watermark_params i830_wm_info = {
763 I830_FIFO_SIZE,
764 I915_MAX_WM,
765 1,
766 2,
767 I830_FIFO_LINE_SIZE
768};
769
770static const struct intel_watermark_params ironlake_display_wm_info = {
771 ILK_DISPLAY_FIFO,
772 ILK_DISPLAY_MAXWM,
773 ILK_DISPLAY_DFTWM,
774 2,
775 ILK_FIFO_LINE_SIZE
776};
777static const struct intel_watermark_params ironlake_cursor_wm_info = {
778 ILK_CURSOR_FIFO,
779 ILK_CURSOR_MAXWM,
780 ILK_CURSOR_DFTWM,
781 2,
782 ILK_FIFO_LINE_SIZE
783};
784static const struct intel_watermark_params ironlake_display_srwm_info = {
785 ILK_DISPLAY_SR_FIFO,
786 ILK_DISPLAY_MAX_SRWM,
787 ILK_DISPLAY_DFT_SRWM,
788 2,
789 ILK_FIFO_LINE_SIZE
790};
791static const struct intel_watermark_params ironlake_cursor_srwm_info = {
792 ILK_CURSOR_SR_FIFO,
793 ILK_CURSOR_MAX_SRWM,
794 ILK_CURSOR_DFT_SRWM,
795 2,
796 ILK_FIFO_LINE_SIZE
797};
798
799static const struct intel_watermark_params sandybridge_display_wm_info = {
800 SNB_DISPLAY_FIFO,
801 SNB_DISPLAY_MAXWM,
802 SNB_DISPLAY_DFTWM,
803 2,
804 SNB_FIFO_LINE_SIZE
805};
806static const struct intel_watermark_params sandybridge_cursor_wm_info = {
807 SNB_CURSOR_FIFO,
808 SNB_CURSOR_MAXWM,
809 SNB_CURSOR_DFTWM,
810 2,
811 SNB_FIFO_LINE_SIZE
812};
813static const struct intel_watermark_params sandybridge_display_srwm_info = {
814 SNB_DISPLAY_SR_FIFO,
815 SNB_DISPLAY_MAX_SRWM,
816 SNB_DISPLAY_DFT_SRWM,
817 2,
818 SNB_FIFO_LINE_SIZE
819};
820static const struct intel_watermark_params sandybridge_cursor_srwm_info = {
821 SNB_CURSOR_SR_FIFO,
822 SNB_CURSOR_MAX_SRWM,
823 SNB_CURSOR_DFT_SRWM,
824 2,
825 SNB_FIFO_LINE_SIZE
826};
827
828
829/**
830 * intel_calculate_wm - calculate watermark level
831 * @clock_in_khz: pixel clock
832 * @wm: chip FIFO params
833 * @pixel_size: display pixel size
834 * @latency_ns: memory latency for the platform
835 *
836 * Calculate the watermark level (the level at which the display plane will
837 * start fetching from memory again). Each chip has a different display
838 * FIFO size and allocation, so the caller needs to figure that out and pass
839 * in the correct intel_watermark_params structure.
840 *
841 * As the pixel clock runs, the FIFO will be drained at a rate that depends
842 * on the pixel size. When it reaches the watermark level, it'll start
843 * fetching FIFO line sized based chunks from memory until the FIFO fills
844 * past the watermark point. If the FIFO drains completely, a FIFO underrun
845 * will occur, and a display engine hang could result.
846 */
847static unsigned long intel_calculate_wm(unsigned long clock_in_khz,
848 const struct intel_watermark_params *wm,
849 int fifo_size,
850 int pixel_size,
851 unsigned long latency_ns)
852{
853 long entries_required, wm_size;
854
855 /*
856 * Note: we need to make sure we don't overflow for various clock &
857 * latency values.
858 * clocks go from a few thousand to several hundred thousand.
859 * latency is usually a few thousand
860 */
861 entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) /
862 1000;
863 entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size);
864
865 DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required);
866
867 wm_size = fifo_size - (entries_required + wm->guard_size);
868
869 DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size);
870
871 /* Don't promote wm_size to unsigned... */
872 if (wm_size > (long)wm->max_wm)
873 wm_size = wm->max_wm;
874 if (wm_size <= 0)
875 wm_size = wm->default_wm;
876 return wm_size;
877}
878
879static struct drm_crtc *single_enabled_crtc(struct drm_device *dev)
880{
881 struct drm_crtc *crtc, *enabled = NULL;
882
883 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
884 if (crtc->enabled && crtc->fb) {
885 if (enabled)
886 return NULL;
887 enabled = crtc;
888 }
889 }
890
891 return enabled;
892}
893
894static void pineview_update_wm(struct drm_device *dev)
895{
896 struct drm_i915_private *dev_priv = dev->dev_private;
897 struct drm_crtc *crtc;
898 const struct cxsr_latency *latency;
899 u32 reg;
900 unsigned long wm;
901
902 latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3,
903 dev_priv->fsb_freq, dev_priv->mem_freq);
904 if (!latency) {
905 DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n");
906 pineview_disable_cxsr(dev);
907 return;
908 }
909
910 crtc = single_enabled_crtc(dev);
911 if (crtc) {
912 int clock = crtc->mode.clock;
913 int pixel_size = crtc->fb->bits_per_pixel / 8;
914
915 /* Display SR */
916 wm = intel_calculate_wm(clock, &pineview_display_wm,
917 pineview_display_wm.fifo_size,
918 pixel_size, latency->display_sr);
919 reg = I915_READ(DSPFW1);
920 reg &= ~DSPFW_SR_MASK;
921 reg |= wm << DSPFW_SR_SHIFT;
922 I915_WRITE(DSPFW1, reg);
923 DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg);
924
925 /* cursor SR */
926 wm = intel_calculate_wm(clock, &pineview_cursor_wm,
927 pineview_display_wm.fifo_size,
928 pixel_size, latency->cursor_sr);
929 reg = I915_READ(DSPFW3);
930 reg &= ~DSPFW_CURSOR_SR_MASK;
931 reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT;
932 I915_WRITE(DSPFW3, reg);
933
934 /* Display HPLL off SR */
935 wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm,
936 pineview_display_hplloff_wm.fifo_size,
937 pixel_size, latency->display_hpll_disable);
938 reg = I915_READ(DSPFW3);
939 reg &= ~DSPFW_HPLL_SR_MASK;
940 reg |= wm & DSPFW_HPLL_SR_MASK;
941 I915_WRITE(DSPFW3, reg);
942
943 /* cursor HPLL off SR */
944 wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm,
945 pineview_display_hplloff_wm.fifo_size,
946 pixel_size, latency->cursor_hpll_disable);
947 reg = I915_READ(DSPFW3);
948 reg &= ~DSPFW_HPLL_CURSOR_MASK;
949 reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT;
950 I915_WRITE(DSPFW3, reg);
951 DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg);
952
953 /* activate cxsr */
954 I915_WRITE(DSPFW3,
955 I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN);
956 DRM_DEBUG_KMS("Self-refresh is enabled\n");
957 } else {
958 pineview_disable_cxsr(dev);
959 DRM_DEBUG_KMS("Self-refresh is disabled\n");
960 }
961}
962
963static bool g4x_compute_wm0(struct drm_device *dev,
964 int plane,
965 const struct intel_watermark_params *display,
966 int display_latency_ns,
967 const struct intel_watermark_params *cursor,
968 int cursor_latency_ns,
969 int *plane_wm,
970 int *cursor_wm)
971{
972 struct drm_crtc *crtc;
973 int htotal, hdisplay, clock, pixel_size;
974 int line_time_us, line_count;
975 int entries, tlb_miss;
976
977 crtc = intel_get_crtc_for_plane(dev, plane);
978 if (crtc->fb == NULL || !crtc->enabled) {
979 *cursor_wm = cursor->guard_size;
980 *plane_wm = display->guard_size;
981 return false;
982 }
983
984 htotal = crtc->mode.htotal;
985 hdisplay = crtc->mode.hdisplay;
986 clock = crtc->mode.clock;
987 pixel_size = crtc->fb->bits_per_pixel / 8;
988
989 /* Use the small buffer method to calculate plane watermark */
990 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
991 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
992 if (tlb_miss > 0)
993 entries += tlb_miss;
994 entries = DIV_ROUND_UP(entries, display->cacheline_size);
995 *plane_wm = entries + display->guard_size;
996 if (*plane_wm > (int)display->max_wm)
997 *plane_wm = display->max_wm;
998
999 /* Use the large buffer method to calculate cursor watermark */
1000 line_time_us = ((htotal * 1000) / clock);
1001 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
1002 entries = line_count * 64 * pixel_size;
1003 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
1004 if (tlb_miss > 0)
1005 entries += tlb_miss;
1006 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1007 *cursor_wm = entries + cursor->guard_size;
1008 if (*cursor_wm > (int)cursor->max_wm)
1009 *cursor_wm = (int)cursor->max_wm;
1010
1011 return true;
1012}
1013
1014/*
1015 * Check the wm result.
1016 *
1017 * If any calculated watermark values is larger than the maximum value that
1018 * can be programmed into the associated watermark register, that watermark
1019 * must be disabled.
1020 */
1021static bool g4x_check_srwm(struct drm_device *dev,
1022 int display_wm, int cursor_wm,
1023 const struct intel_watermark_params *display,
1024 const struct intel_watermark_params *cursor)
1025{
1026 DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n",
1027 display_wm, cursor_wm);
1028
1029 if (display_wm > display->max_wm) {
1030 DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n",
1031 display_wm, display->max_wm);
1032 return false;
1033 }
1034
1035 if (cursor_wm > cursor->max_wm) {
1036 DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n",
1037 cursor_wm, cursor->max_wm);
1038 return false;
1039 }
1040
1041 if (!(display_wm || cursor_wm)) {
1042 DRM_DEBUG_KMS("SR latency is 0, disabling\n");
1043 return false;
1044 }
1045
1046 return true;
1047}
1048
1049static bool g4x_compute_srwm(struct drm_device *dev,
1050 int plane,
1051 int latency_ns,
1052 const struct intel_watermark_params *display,
1053 const struct intel_watermark_params *cursor,
1054 int *display_wm, int *cursor_wm)
1055{
1056 struct drm_crtc *crtc;
1057 int hdisplay, htotal, pixel_size, clock;
1058 unsigned long line_time_us;
1059 int line_count, line_size;
1060 int small, large;
1061 int entries;
1062
1063 if (!latency_ns) {
1064 *display_wm = *cursor_wm = 0;
1065 return false;
1066 }
1067
1068 crtc = intel_get_crtc_for_plane(dev, plane);
1069 hdisplay = crtc->mode.hdisplay;
1070 htotal = crtc->mode.htotal;
1071 clock = crtc->mode.clock;
1072 pixel_size = crtc->fb->bits_per_pixel / 8;
1073
1074 line_time_us = (htotal * 1000) / clock;
1075 line_count = (latency_ns / line_time_us + 1000) / 1000;
1076 line_size = hdisplay * pixel_size;
1077
1078 /* Use the minimum of the small and large buffer method for primary */
1079 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1080 large = line_count * line_size;
1081
1082 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1083 *display_wm = entries + display->guard_size;
1084
1085 /* calculate the self-refresh watermark for display cursor */
1086 entries = line_count * pixel_size * 64;
1087 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1088 *cursor_wm = entries + cursor->guard_size;
1089
1090 return g4x_check_srwm(dev,
1091 *display_wm, *cursor_wm,
1092 display, cursor);
1093}
1094
1095static bool vlv_compute_drain_latency(struct drm_device *dev,
1096 int plane,
1097 int *plane_prec_mult,
1098 int *plane_dl,
1099 int *cursor_prec_mult,
1100 int *cursor_dl)
1101{
1102 struct drm_crtc *crtc;
1103 int clock, pixel_size;
1104 int entries;
1105
1106 crtc = intel_get_crtc_for_plane(dev, plane);
1107 if (crtc->fb == NULL || !crtc->enabled)
1108 return false;
1109
1110 clock = crtc->mode.clock; /* VESA DOT Clock */
1111 pixel_size = crtc->fb->bits_per_pixel / 8; /* BPP */
1112
1113 entries = (clock / 1000) * pixel_size;
1114 *plane_prec_mult = (entries > 256) ?
1115 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1116 *plane_dl = (64 * (*plane_prec_mult) * 4) / ((clock / 1000) *
1117 pixel_size);
1118
1119 entries = (clock / 1000) * 4; /* BPP is always 4 for cursor */
1120 *cursor_prec_mult = (entries > 256) ?
1121 DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_16;
1122 *cursor_dl = (64 * (*cursor_prec_mult) * 4) / ((clock / 1000) * 4);
1123
1124 return true;
1125}
1126
1127/*
1128 * Update drain latency registers of memory arbiter
1129 *
1130 * Valleyview SoC has a new memory arbiter and needs drain latency registers
1131 * to be programmed. Each plane has a drain latency multiplier and a drain
1132 * latency value.
1133 */
1134
1135static void vlv_update_drain_latency(struct drm_device *dev)
1136{
1137 struct drm_i915_private *dev_priv = dev->dev_private;
1138 int planea_prec, planea_dl, planeb_prec, planeb_dl;
1139 int cursora_prec, cursora_dl, cursorb_prec, cursorb_dl;
1140 int plane_prec_mult, cursor_prec_mult; /* Precision multiplier is
1141 either 16 or 32 */
1142
1143 /* For plane A, Cursor A */
1144 if (vlv_compute_drain_latency(dev, 0, &plane_prec_mult, &planea_dl,
1145 &cursor_prec_mult, &cursora_dl)) {
1146 cursora_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1147 DDL_CURSORA_PRECISION_32 : DDL_CURSORA_PRECISION_16;
1148 planea_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1149 DDL_PLANEA_PRECISION_32 : DDL_PLANEA_PRECISION_16;
1150
1151 I915_WRITE(VLV_DDL1, cursora_prec |
1152 (cursora_dl << DDL_CURSORA_SHIFT) |
1153 planea_prec | planea_dl);
1154 }
1155
1156 /* For plane B, Cursor B */
1157 if (vlv_compute_drain_latency(dev, 1, &plane_prec_mult, &planeb_dl,
1158 &cursor_prec_mult, &cursorb_dl)) {
1159 cursorb_prec = (cursor_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1160 DDL_CURSORB_PRECISION_32 : DDL_CURSORB_PRECISION_16;
1161 planeb_prec = (plane_prec_mult == DRAIN_LATENCY_PRECISION_32) ?
1162 DDL_PLANEB_PRECISION_32 : DDL_PLANEB_PRECISION_16;
1163
1164 I915_WRITE(VLV_DDL2, cursorb_prec |
1165 (cursorb_dl << DDL_CURSORB_SHIFT) |
1166 planeb_prec | planeb_dl);
1167 }
1168}
1169
1170#define single_plane_enabled(mask) is_power_of_2(mask)
1171
1172static void valleyview_update_wm(struct drm_device *dev)
1173{
1174 static const int sr_latency_ns = 12000;
1175 struct drm_i915_private *dev_priv = dev->dev_private;
1176 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1177 int plane_sr, cursor_sr;
1178 unsigned int enabled = 0;
1179
1180 vlv_update_drain_latency(dev);
1181
1182 if (g4x_compute_wm0(dev, 0,
1183 &valleyview_wm_info, latency_ns,
1184 &valleyview_cursor_wm_info, latency_ns,
1185 &planea_wm, &cursora_wm))
1186 enabled |= 1;
1187
1188 if (g4x_compute_wm0(dev, 1,
1189 &valleyview_wm_info, latency_ns,
1190 &valleyview_cursor_wm_info, latency_ns,
1191 &planeb_wm, &cursorb_wm))
1192 enabled |= 2;
1193
1194 plane_sr = cursor_sr = 0;
1195 if (single_plane_enabled(enabled) &&
1196 g4x_compute_srwm(dev, ffs(enabled) - 1,
1197 sr_latency_ns,
1198 &valleyview_wm_info,
1199 &valleyview_cursor_wm_info,
1200 &plane_sr, &cursor_sr))
1201 I915_WRITE(FW_BLC_SELF_VLV, FW_CSPWRDWNEN);
1202 else
1203 I915_WRITE(FW_BLC_SELF_VLV,
1204 I915_READ(FW_BLC_SELF_VLV) & ~FW_CSPWRDWNEN);
1205
1206 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1207 planea_wm, cursora_wm,
1208 planeb_wm, cursorb_wm,
1209 plane_sr, cursor_sr);
1210
1211 I915_WRITE(DSPFW1,
1212 (plane_sr << DSPFW_SR_SHIFT) |
1213 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1214 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1215 planea_wm);
1216 I915_WRITE(DSPFW2,
1217 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1218 (cursora_wm << DSPFW_CURSORA_SHIFT));
1219 I915_WRITE(DSPFW3,
1220 (I915_READ(DSPFW3) | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)));
1221}
1222
1223static void g4x_update_wm(struct drm_device *dev)
1224{
1225 static const int sr_latency_ns = 12000;
1226 struct drm_i915_private *dev_priv = dev->dev_private;
1227 int planea_wm, planeb_wm, cursora_wm, cursorb_wm;
1228 int plane_sr, cursor_sr;
1229 unsigned int enabled = 0;
1230
1231 if (g4x_compute_wm0(dev, 0,
1232 &g4x_wm_info, latency_ns,
1233 &g4x_cursor_wm_info, latency_ns,
1234 &planea_wm, &cursora_wm))
1235 enabled |= 1;
1236
1237 if (g4x_compute_wm0(dev, 1,
1238 &g4x_wm_info, latency_ns,
1239 &g4x_cursor_wm_info, latency_ns,
1240 &planeb_wm, &cursorb_wm))
1241 enabled |= 2;
1242
1243 plane_sr = cursor_sr = 0;
1244 if (single_plane_enabled(enabled) &&
1245 g4x_compute_srwm(dev, ffs(enabled) - 1,
1246 sr_latency_ns,
1247 &g4x_wm_info,
1248 &g4x_cursor_wm_info,
1249 &plane_sr, &cursor_sr))
1250 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1251 else
1252 I915_WRITE(FW_BLC_SELF,
1253 I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN);
1254
1255 DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n",
1256 planea_wm, cursora_wm,
1257 planeb_wm, cursorb_wm,
1258 plane_sr, cursor_sr);
1259
1260 I915_WRITE(DSPFW1,
1261 (plane_sr << DSPFW_SR_SHIFT) |
1262 (cursorb_wm << DSPFW_CURSORB_SHIFT) |
1263 (planeb_wm << DSPFW_PLANEB_SHIFT) |
1264 planea_wm);
1265 I915_WRITE(DSPFW2,
1266 (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) |
1267 (cursora_wm << DSPFW_CURSORA_SHIFT));
1268 /* HPLL off in SR has some issues on G4x... disable it */
1269 I915_WRITE(DSPFW3,
1270 (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) |
1271 (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1272}
1273
1274static void i965_update_wm(struct drm_device *dev)
1275{
1276 struct drm_i915_private *dev_priv = dev->dev_private;
1277 struct drm_crtc *crtc;
1278 int srwm = 1;
1279 int cursor_sr = 16;
1280
1281 /* Calc sr entries for one plane configs */
1282 crtc = single_enabled_crtc(dev);
1283 if (crtc) {
1284 /* self-refresh has much higher latency */
1285 static const int sr_latency_ns = 12000;
1286 int clock = crtc->mode.clock;
1287 int htotal = crtc->mode.htotal;
1288 int hdisplay = crtc->mode.hdisplay;
1289 int pixel_size = crtc->fb->bits_per_pixel / 8;
1290 unsigned long line_time_us;
1291 int entries;
1292
1293 line_time_us = ((htotal * 1000) / clock);
1294
1295 /* Use ns/us then divide to preserve precision */
1296 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1297 pixel_size * hdisplay;
1298 entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE);
1299 srwm = I965_FIFO_SIZE - entries;
1300 if (srwm < 0)
1301 srwm = 1;
1302 srwm &= 0x1ff;
1303 DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n",
1304 entries, srwm);
1305
1306 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1307 pixel_size * 64;
1308 entries = DIV_ROUND_UP(entries,
1309 i965_cursor_wm_info.cacheline_size);
1310 cursor_sr = i965_cursor_wm_info.fifo_size -
1311 (entries + i965_cursor_wm_info.guard_size);
1312
1313 if (cursor_sr > i965_cursor_wm_info.max_wm)
1314 cursor_sr = i965_cursor_wm_info.max_wm;
1315
1316 DRM_DEBUG_KMS("self-refresh watermark: display plane %d "
1317 "cursor %d\n", srwm, cursor_sr);
1318
1319 if (IS_CRESTLINE(dev))
1320 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN);
1321 } else {
1322 /* Turn off self refresh if both pipes are enabled */
1323 if (IS_CRESTLINE(dev))
1324 I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
1325 & ~FW_BLC_SELF_EN);
1326 }
1327
1328 DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n",
1329 srwm);
1330
1331 /* 965 has limitations... */
1332 I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) |
1333 (8 << 16) | (8 << 8) | (8 << 0));
1334 I915_WRITE(DSPFW2, (8 << 8) | (8 << 0));
1335 /* update cursor SR watermark */
1336 I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT));
1337}
1338
1339static void i9xx_update_wm(struct drm_device *dev)
1340{
1341 struct drm_i915_private *dev_priv = dev->dev_private;
1342 const struct intel_watermark_params *wm_info;
1343 uint32_t fwater_lo;
1344 uint32_t fwater_hi;
1345 int cwm, srwm = 1;
1346 int fifo_size;
1347 int planea_wm, planeb_wm;
1348 struct drm_crtc *crtc, *enabled = NULL;
1349
1350 if (IS_I945GM(dev))
1351 wm_info = &i945_wm_info;
1352 else if (!IS_GEN2(dev))
1353 wm_info = &i915_wm_info;
1354 else
1355 wm_info = &i855_wm_info;
1356
1357 fifo_size = dev_priv->display.get_fifo_size(dev, 0);
1358 crtc = intel_get_crtc_for_plane(dev, 0);
1359 if (crtc->enabled && crtc->fb) {
1360 planea_wm = intel_calculate_wm(crtc->mode.clock,
1361 wm_info, fifo_size,
1362 crtc->fb->bits_per_pixel / 8,
1363 latency_ns);
1364 enabled = crtc;
1365 } else
1366 planea_wm = fifo_size - wm_info->guard_size;
1367
1368 fifo_size = dev_priv->display.get_fifo_size(dev, 1);
1369 crtc = intel_get_crtc_for_plane(dev, 1);
1370 if (crtc->enabled && crtc->fb) {
1371 planeb_wm = intel_calculate_wm(crtc->mode.clock,
1372 wm_info, fifo_size,
1373 crtc->fb->bits_per_pixel / 8,
1374 latency_ns);
1375 if (enabled == NULL)
1376 enabled = crtc;
1377 else
1378 enabled = NULL;
1379 } else
1380 planeb_wm = fifo_size - wm_info->guard_size;
1381
1382 DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm);
1383
1384 /*
1385 * Overlay gets an aggressive default since video jitter is bad.
1386 */
1387 cwm = 2;
1388
1389 /* Play safe and disable self-refresh before adjusting watermarks. */
1390 if (IS_I945G(dev) || IS_I945GM(dev))
1391 I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0);
1392 else if (IS_I915GM(dev))
1393 I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
1394
1395 /* Calc sr entries for one plane configs */
1396 if (HAS_FW_BLC(dev) && enabled) {
1397 /* self-refresh has much higher latency */
1398 static const int sr_latency_ns = 6000;
1399 int clock = enabled->mode.clock;
1400 int htotal = enabled->mode.htotal;
1401 int hdisplay = enabled->mode.hdisplay;
1402 int pixel_size = enabled->fb->bits_per_pixel / 8;
1403 unsigned long line_time_us;
1404 int entries;
1405
1406 line_time_us = (htotal * 1000) / clock;
1407
1408 /* Use ns/us then divide to preserve precision */
1409 entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) *
1410 pixel_size * hdisplay;
1411 entries = DIV_ROUND_UP(entries, wm_info->cacheline_size);
1412 DRM_DEBUG_KMS("self-refresh entries: %d\n", entries);
1413 srwm = wm_info->fifo_size - entries;
1414 if (srwm < 0)
1415 srwm = 1;
1416
1417 if (IS_I945G(dev) || IS_I945GM(dev))
1418 I915_WRITE(FW_BLC_SELF,
1419 FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
1420 else if (IS_I915GM(dev))
1421 I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
1422 }
1423
1424 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
1425 planea_wm, planeb_wm, cwm, srwm);
1426
1427 fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f);
1428 fwater_hi = (cwm & 0x1f);
1429
1430 /* Set request length to 8 cachelines per fetch */
1431 fwater_lo = fwater_lo | (1 << 24) | (1 << 8);
1432 fwater_hi = fwater_hi | (1 << 8);
1433
1434 I915_WRITE(FW_BLC, fwater_lo);
1435 I915_WRITE(FW_BLC2, fwater_hi);
1436
1437 if (HAS_FW_BLC(dev)) {
1438 if (enabled) {
1439 if (IS_I945G(dev) || IS_I945GM(dev))
1440 I915_WRITE(FW_BLC_SELF,
1441 FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
1442 else if (IS_I915GM(dev))
1443 I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
1444 DRM_DEBUG_KMS("memory self refresh enabled\n");
1445 } else
1446 DRM_DEBUG_KMS("memory self refresh disabled\n");
1447 }
1448}
1449
1450static void i830_update_wm(struct drm_device *dev)
1451{
1452 struct drm_i915_private *dev_priv = dev->dev_private;
1453 struct drm_crtc *crtc;
1454 uint32_t fwater_lo;
1455 int planea_wm;
1456
1457 crtc = single_enabled_crtc(dev);
1458 if (crtc == NULL)
1459 return;
1460
1461 planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info,
1462 dev_priv->display.get_fifo_size(dev, 0),
1463 crtc->fb->bits_per_pixel / 8,
1464 latency_ns);
1465 fwater_lo = I915_READ(FW_BLC) & ~0xfff;
1466 fwater_lo |= (3<<8) | planea_wm;
1467
1468 DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm);
1469
1470 I915_WRITE(FW_BLC, fwater_lo);
1471}
1472
1473#define ILK_LP0_PLANE_LATENCY 700
1474#define ILK_LP0_CURSOR_LATENCY 1300
1475
1476/*
1477 * Check the wm result.
1478 *
1479 * If any calculated watermark values is larger than the maximum value that
1480 * can be programmed into the associated watermark register, that watermark
1481 * must be disabled.
1482 */
1483static bool ironlake_check_srwm(struct drm_device *dev, int level,
1484 int fbc_wm, int display_wm, int cursor_wm,
1485 const struct intel_watermark_params *display,
1486 const struct intel_watermark_params *cursor)
1487{
1488 struct drm_i915_private *dev_priv = dev->dev_private;
1489
1490 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
1491 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
1492
1493 if (fbc_wm > SNB_FBC_MAX_SRWM) {
1494 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
1495 fbc_wm, SNB_FBC_MAX_SRWM, level);
1496
1497 /* fbc has it's own way to disable FBC WM */
1498 I915_WRITE(DISP_ARB_CTL,
1499 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
1500 return false;
1501 }
1502
1503 if (display_wm > display->max_wm) {
1504 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
1505 display_wm, SNB_DISPLAY_MAX_SRWM, level);
1506 return false;
1507 }
1508
1509 if (cursor_wm > cursor->max_wm) {
1510 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
1511 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
1512 return false;
1513 }
1514
1515 if (!(fbc_wm || display_wm || cursor_wm)) {
1516 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
1517 return false;
1518 }
1519
1520 return true;
1521}
1522
1523/*
1524 * Compute watermark values of WM[1-3],
1525 */
1526static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane,
1527 int latency_ns,
1528 const struct intel_watermark_params *display,
1529 const struct intel_watermark_params *cursor,
1530 int *fbc_wm, int *display_wm, int *cursor_wm)
1531{
1532 struct drm_crtc *crtc;
1533 unsigned long line_time_us;
1534 int hdisplay, htotal, pixel_size, clock;
1535 int line_count, line_size;
1536 int small, large;
1537 int entries;
1538
1539 if (!latency_ns) {
1540 *fbc_wm = *display_wm = *cursor_wm = 0;
1541 return false;
1542 }
1543
1544 crtc = intel_get_crtc_for_plane(dev, plane);
1545 hdisplay = crtc->mode.hdisplay;
1546 htotal = crtc->mode.htotal;
1547 clock = crtc->mode.clock;
1548 pixel_size = crtc->fb->bits_per_pixel / 8;
1549
1550 line_time_us = (htotal * 1000) / clock;
1551 line_count = (latency_ns / line_time_us + 1000) / 1000;
1552 line_size = hdisplay * pixel_size;
1553
1554 /* Use the minimum of the small and large buffer method for primary */
1555 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1556 large = line_count * line_size;
1557
1558 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1559 *display_wm = entries + display->guard_size;
1560
1561 /*
1562 * Spec says:
1563 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
1564 */
1565 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
1566
1567 /* calculate the self-refresh watermark for display cursor */
1568 entries = line_count * pixel_size * 64;
1569 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
1570 *cursor_wm = entries + cursor->guard_size;
1571
1572 return ironlake_check_srwm(dev, level,
1573 *fbc_wm, *display_wm, *cursor_wm,
1574 display, cursor);
1575}
1576
1577static void ironlake_update_wm(struct drm_device *dev)
1578{
1579 struct drm_i915_private *dev_priv = dev->dev_private;
1580 int fbc_wm, plane_wm, cursor_wm;
1581 unsigned int enabled;
1582
1583 enabled = 0;
1584 if (g4x_compute_wm0(dev, 0,
1585 &ironlake_display_wm_info,
1586 ILK_LP0_PLANE_LATENCY,
1587 &ironlake_cursor_wm_info,
1588 ILK_LP0_CURSOR_LATENCY,
1589 &plane_wm, &cursor_wm)) {
1590 I915_WRITE(WM0_PIPEA_ILK,
1591 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1592 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1593 " plane %d, " "cursor: %d\n",
1594 plane_wm, cursor_wm);
1595 enabled |= 1;
1596 }
1597
1598 if (g4x_compute_wm0(dev, 1,
1599 &ironlake_display_wm_info,
1600 ILK_LP0_PLANE_LATENCY,
1601 &ironlake_cursor_wm_info,
1602 ILK_LP0_CURSOR_LATENCY,
1603 &plane_wm, &cursor_wm)) {
1604 I915_WRITE(WM0_PIPEB_ILK,
1605 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
1606 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1607 " plane %d, cursor: %d\n",
1608 plane_wm, cursor_wm);
1609 enabled |= 2;
1610 }
1611
1612 /*
1613 * Calculate and update the self-refresh watermark only when one
1614 * display plane is used.
1615 */
1616 I915_WRITE(WM3_LP_ILK, 0);
1617 I915_WRITE(WM2_LP_ILK, 0);
1618 I915_WRITE(WM1_LP_ILK, 0);
1619
1620 if (!single_plane_enabled(enabled))
1621 return;
1622 enabled = ffs(enabled) - 1;
1623
1624 /* WM1 */
1625 if (!ironlake_compute_srwm(dev, 1, enabled,
1626 ILK_READ_WM1_LATENCY() * 500,
1627 &ironlake_display_srwm_info,
1628 &ironlake_cursor_srwm_info,
1629 &fbc_wm, &plane_wm, &cursor_wm))
1630 return;
1631
1632 I915_WRITE(WM1_LP_ILK,
1633 WM1_LP_SR_EN |
1634 (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1635 (fbc_wm << WM1_LP_FBC_SHIFT) |
1636 (plane_wm << WM1_LP_SR_SHIFT) |
1637 cursor_wm);
1638
1639 /* WM2 */
1640 if (!ironlake_compute_srwm(dev, 2, enabled,
1641 ILK_READ_WM2_LATENCY() * 500,
1642 &ironlake_display_srwm_info,
1643 &ironlake_cursor_srwm_info,
1644 &fbc_wm, &plane_wm, &cursor_wm))
1645 return;
1646
1647 I915_WRITE(WM2_LP_ILK,
1648 WM2_LP_EN |
1649 (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1650 (fbc_wm << WM1_LP_FBC_SHIFT) |
1651 (plane_wm << WM1_LP_SR_SHIFT) |
1652 cursor_wm);
1653
1654 /*
1655 * WM3 is unsupported on ILK, probably because we don't have latency
1656 * data for that power state
1657 */
1658}
1659
1660static void sandybridge_update_wm(struct drm_device *dev)
1661{
1662 struct drm_i915_private *dev_priv = dev->dev_private;
1663 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1664 u32 val;
1665 int fbc_wm, plane_wm, cursor_wm;
1666 unsigned int enabled;
1667
1668 enabled = 0;
1669 if (g4x_compute_wm0(dev, 0,
1670 &sandybridge_display_wm_info, latency,
1671 &sandybridge_cursor_wm_info, latency,
1672 &plane_wm, &cursor_wm)) {
1673 val = I915_READ(WM0_PIPEA_ILK);
1674 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1675 I915_WRITE(WM0_PIPEA_ILK, val |
1676 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1677 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1678 " plane %d, " "cursor: %d\n",
1679 plane_wm, cursor_wm);
1680 enabled |= 1;
1681 }
1682
1683 if (g4x_compute_wm0(dev, 1,
1684 &sandybridge_display_wm_info, latency,
1685 &sandybridge_cursor_wm_info, latency,
1686 &plane_wm, &cursor_wm)) {
1687 val = I915_READ(WM0_PIPEB_ILK);
1688 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1689 I915_WRITE(WM0_PIPEB_ILK, val |
1690 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1691 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1692 " plane %d, cursor: %d\n",
1693 plane_wm, cursor_wm);
1694 enabled |= 2;
1695 }
1696
1697 /* IVB has 3 pipes */
1698 if (IS_IVYBRIDGE(dev) &&
1699 g4x_compute_wm0(dev, 2,
1700 &sandybridge_display_wm_info, latency,
1701 &sandybridge_cursor_wm_info, latency,
1702 &plane_wm, &cursor_wm)) {
1703 val = I915_READ(WM0_PIPEC_IVB);
1704 val &= ~(WM0_PIPE_PLANE_MASK | WM0_PIPE_CURSOR_MASK);
1705 I915_WRITE(WM0_PIPEC_IVB, val |
1706 ((plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm));
1707 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1708 " plane %d, cursor: %d\n",
1709 plane_wm, cursor_wm);
1710 enabled |= 3;
1711 }
1712
1713 /*
1714 * Calculate and update the self-refresh watermark only when one
1715 * display plane is used.
1716 *
1717 * SNB support 3 levels of watermark.
1718 *
1719 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
1720 * and disabled in the descending order
1721 *
1722 */
1723 I915_WRITE(WM3_LP_ILK, 0);
1724 I915_WRITE(WM2_LP_ILK, 0);
1725 I915_WRITE(WM1_LP_ILK, 0);
1726
1727 if (!single_plane_enabled(enabled) ||
1728 dev_priv->sprite_scaling_enabled)
1729 return;
1730 enabled = ffs(enabled) - 1;
1731
1732 /* WM1 */
1733 if (!ironlake_compute_srwm(dev, 1, enabled,
1734 SNB_READ_WM1_LATENCY() * 500,
1735 &sandybridge_display_srwm_info,
1736 &sandybridge_cursor_srwm_info,
1737 &fbc_wm, &plane_wm, &cursor_wm))
1738 return;
1739
1740 I915_WRITE(WM1_LP_ILK,
1741 WM1_LP_SR_EN |
1742 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1743 (fbc_wm << WM1_LP_FBC_SHIFT) |
1744 (plane_wm << WM1_LP_SR_SHIFT) |
1745 cursor_wm);
1746
1747 /* WM2 */
1748 if (!ironlake_compute_srwm(dev, 2, enabled,
1749 SNB_READ_WM2_LATENCY() * 500,
1750 &sandybridge_display_srwm_info,
1751 &sandybridge_cursor_srwm_info,
1752 &fbc_wm, &plane_wm, &cursor_wm))
1753 return;
1754
1755 I915_WRITE(WM2_LP_ILK,
1756 WM2_LP_EN |
1757 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1758 (fbc_wm << WM1_LP_FBC_SHIFT) |
1759 (plane_wm << WM1_LP_SR_SHIFT) |
1760 cursor_wm);
1761
1762 /* WM3 */
1763 if (!ironlake_compute_srwm(dev, 3, enabled,
1764 SNB_READ_WM3_LATENCY() * 500,
1765 &sandybridge_display_srwm_info,
1766 &sandybridge_cursor_srwm_info,
1767 &fbc_wm, &plane_wm, &cursor_wm))
1768 return;
1769
1770 I915_WRITE(WM3_LP_ILK,
1771 WM3_LP_EN |
1772 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
1773 (fbc_wm << WM1_LP_FBC_SHIFT) |
1774 (plane_wm << WM1_LP_SR_SHIFT) |
1775 cursor_wm);
1776}
1777
1778static bool
1779sandybridge_compute_sprite_wm(struct drm_device *dev, int plane,
1780 uint32_t sprite_width, int pixel_size,
1781 const struct intel_watermark_params *display,
1782 int display_latency_ns, int *sprite_wm)
1783{
1784 struct drm_crtc *crtc;
1785 int clock;
1786 int entries, tlb_miss;
1787
1788 crtc = intel_get_crtc_for_plane(dev, plane);
1789 if (crtc->fb == NULL || !crtc->enabled) {
1790 *sprite_wm = display->guard_size;
1791 return false;
1792 }
1793
1794 clock = crtc->mode.clock;
1795
1796 /* Use the small buffer method to calculate the sprite watermark */
1797 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
1798 tlb_miss = display->fifo_size*display->cacheline_size -
1799 sprite_width * 8;
1800 if (tlb_miss > 0)
1801 entries += tlb_miss;
1802 entries = DIV_ROUND_UP(entries, display->cacheline_size);
1803 *sprite_wm = entries + display->guard_size;
1804 if (*sprite_wm > (int)display->max_wm)
1805 *sprite_wm = display->max_wm;
1806
1807 return true;
1808}
1809
1810static bool
1811sandybridge_compute_sprite_srwm(struct drm_device *dev, int plane,
1812 uint32_t sprite_width, int pixel_size,
1813 const struct intel_watermark_params *display,
1814 int latency_ns, int *sprite_wm)
1815{
1816 struct drm_crtc *crtc;
1817 unsigned long line_time_us;
1818 int clock;
1819 int line_count, line_size;
1820 int small, large;
1821 int entries;
1822
1823 if (!latency_ns) {
1824 *sprite_wm = 0;
1825 return false;
1826 }
1827
1828 crtc = intel_get_crtc_for_plane(dev, plane);
1829 clock = crtc->mode.clock;
1830 if (!clock) {
1831 *sprite_wm = 0;
1832 return false;
1833 }
1834
1835 line_time_us = (sprite_width * 1000) / clock;
1836 if (!line_time_us) {
1837 *sprite_wm = 0;
1838 return false;
1839 }
1840
1841 line_count = (latency_ns / line_time_us + 1000) / 1000;
1842 line_size = sprite_width * pixel_size;
1843
1844 /* Use the minimum of the small and large buffer method for primary */
1845 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
1846 large = line_count * line_size;
1847
1848 entries = DIV_ROUND_UP(min(small, large), display->cacheline_size);
1849 *sprite_wm = entries + display->guard_size;
1850
1851 return *sprite_wm > 0x3ff ? false : true;
1852}
1853
1854static void sandybridge_update_sprite_wm(struct drm_device *dev, int pipe,
1855 uint32_t sprite_width, int pixel_size)
1856{
1857 struct drm_i915_private *dev_priv = dev->dev_private;
1858 int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */
1859 u32 val;
1860 int sprite_wm, reg;
1861 int ret;
1862
1863 switch (pipe) {
1864 case 0:
1865 reg = WM0_PIPEA_ILK;
1866 break;
1867 case 1:
1868 reg = WM0_PIPEB_ILK;
1869 break;
1870 case 2:
1871 reg = WM0_PIPEC_IVB;
1872 break;
1873 default:
1874 return; /* bad pipe */
1875 }
1876
1877 ret = sandybridge_compute_sprite_wm(dev, pipe, sprite_width, pixel_size,
1878 &sandybridge_display_wm_info,
1879 latency, &sprite_wm);
1880 if (!ret) {
1881 DRM_DEBUG_KMS("failed to compute sprite wm for pipe %d\n",
1882 pipe);
1883 return;
1884 }
1885
1886 val = I915_READ(reg);
1887 val &= ~WM0_PIPE_SPRITE_MASK;
1888 I915_WRITE(reg, val | (sprite_wm << WM0_PIPE_SPRITE_SHIFT));
1889 DRM_DEBUG_KMS("sprite watermarks For pipe %d - %d\n", pipe, sprite_wm);
1890
1891
1892 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1893 pixel_size,
1894 &sandybridge_display_srwm_info,
1895 SNB_READ_WM1_LATENCY() * 500,
1896 &sprite_wm);
1897 if (!ret) {
1898 DRM_DEBUG_KMS("failed to compute sprite lp1 wm on pipe %d\n",
1899 pipe);
1900 return;
1901 }
1902 I915_WRITE(WM1S_LP_ILK, sprite_wm);
1903
1904 /* Only IVB has two more LP watermarks for sprite */
1905 if (!IS_IVYBRIDGE(dev))
1906 return;
1907
1908 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1909 pixel_size,
1910 &sandybridge_display_srwm_info,
1911 SNB_READ_WM2_LATENCY() * 500,
1912 &sprite_wm);
1913 if (!ret) {
1914 DRM_DEBUG_KMS("failed to compute sprite lp2 wm on pipe %d\n",
1915 pipe);
1916 return;
1917 }
1918 I915_WRITE(WM2S_LP_IVB, sprite_wm);
1919
1920 ret = sandybridge_compute_sprite_srwm(dev, pipe, sprite_width,
1921 pixel_size,
1922 &sandybridge_display_srwm_info,
1923 SNB_READ_WM3_LATENCY() * 500,
1924 &sprite_wm);
1925 if (!ret) {
1926 DRM_DEBUG_KMS("failed to compute sprite lp3 wm on pipe %d\n",
1927 pipe);
1928 return;
1929 }
1930 I915_WRITE(WM3S_LP_IVB, sprite_wm);
1931}
1932
1933/**
1934 * intel_update_watermarks - update FIFO watermark values based on current modes
1935 *
1936 * Calculate watermark values for the various WM regs based on current mode
1937 * and plane configuration.
1938 *
1939 * There are several cases to deal with here:
1940 * - normal (i.e. non-self-refresh)
1941 * - self-refresh (SR) mode
1942 * - lines are large relative to FIFO size (buffer can hold up to 2)
1943 * - lines are small relative to FIFO size (buffer can hold more than 2
1944 * lines), so need to account for TLB latency
1945 *
1946 * The normal calculation is:
1947 * watermark = dotclock * bytes per pixel * latency
1948 * where latency is platform & configuration dependent (we assume pessimal
1949 * values here).
1950 *
1951 * The SR calculation is:
1952 * watermark = (trunc(latency/line time)+1) * surface width *
1953 * bytes per pixel
1954 * where
1955 * line time = htotal / dotclock
1956 * surface width = hdisplay for normal plane and 64 for cursor
1957 * and latency is assumed to be high, as above.
1958 *
1959 * The final value programmed to the register should always be rounded up,
1960 * and include an extra 2 entries to account for clock crossings.
1961 *
1962 * We don't use the sprite, so we can ignore that. And on Crestline we have
1963 * to set the non-SR watermarks to 8.
1964 */
1965void intel_update_watermarks(struct drm_device *dev)
1966{
1967 struct drm_i915_private *dev_priv = dev->dev_private;
1968
1969 if (dev_priv->display.update_wm)
1970 dev_priv->display.update_wm(dev);
1971}
1972
1973void intel_update_sprite_watermarks(struct drm_device *dev, int pipe,
1974 uint32_t sprite_width, int pixel_size)
1975{
1976 struct drm_i915_private *dev_priv = dev->dev_private;
1977
1978 if (dev_priv->display.update_sprite_wm)
1979 dev_priv->display.update_sprite_wm(dev, pipe, sprite_width,
1980 pixel_size);
1981}
1982
1983static struct drm_i915_gem_object *
1984intel_alloc_context_page(struct drm_device *dev)
1985{
1986 struct drm_i915_gem_object *ctx;
1987 int ret;
1988
1989 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
1990
1991 ctx = i915_gem_alloc_object(dev, 4096);
1992 if (!ctx) {
1993 DRM_DEBUG("failed to alloc power context, RC6 disabled\n");
1994 return NULL;
1995 }
1996
1997 ret = i915_gem_object_pin(ctx, 4096, true);
1998 if (ret) {
1999 DRM_ERROR("failed to pin power context: %d\n", ret);
2000 goto err_unref;
2001 }
2002
2003 ret = i915_gem_object_set_to_gtt_domain(ctx, 1);
2004 if (ret) {
2005 DRM_ERROR("failed to set-domain on power context: %d\n", ret);
2006 goto err_unpin;
2007 }
2008
2009 return ctx;
2010
2011err_unpin:
2012 i915_gem_object_unpin(ctx);
2013err_unref:
2014 drm_gem_object_unreference(&ctx->base);
2015 mutex_unlock(&dev->struct_mutex);
2016 return NULL;
2017}
2018
2019bool ironlake_set_drps(struct drm_device *dev, u8 val)
2020{
2021 struct drm_i915_private *dev_priv = dev->dev_private;
2022 u16 rgvswctl;
2023
2024 rgvswctl = I915_READ16(MEMSWCTL);
2025 if (rgvswctl & MEMCTL_CMD_STS) {
2026 DRM_DEBUG("gpu busy, RCS change rejected\n");
2027 return false; /* still busy with another command */
2028 }
2029
2030 rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
2031 (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
2032 I915_WRITE16(MEMSWCTL, rgvswctl);
2033 POSTING_READ16(MEMSWCTL);
2034
2035 rgvswctl |= MEMCTL_CMD_STS;
2036 I915_WRITE16(MEMSWCTL, rgvswctl);
2037
2038 return true;
2039}
2040
2041void ironlake_enable_drps(struct drm_device *dev)
2042{
2043 struct drm_i915_private *dev_priv = dev->dev_private;
2044 u32 rgvmodectl = I915_READ(MEMMODECTL);
2045 u8 fmax, fmin, fstart, vstart;
2046
2047 /* Enable temp reporting */
2048 I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN);
2049 I915_WRITE16(TSC1, I915_READ(TSC1) | TSE);
2050
2051 /* 100ms RC evaluation intervals */
2052 I915_WRITE(RCUPEI, 100000);
2053 I915_WRITE(RCDNEI, 100000);
2054
2055 /* Set max/min thresholds to 90ms and 80ms respectively */
2056 I915_WRITE(RCBMAXAVG, 90000);
2057 I915_WRITE(RCBMINAVG, 80000);
2058
2059 I915_WRITE(MEMIHYST, 1);
2060
2061 /* Set up min, max, and cur for interrupt handling */
2062 fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
2063 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
2064 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
2065 MEMMODE_FSTART_SHIFT;
2066
2067 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
2068 PXVFREQ_PX_SHIFT;
2069
2070 dev_priv->fmax = fmax; /* IPS callback will increase this */
2071 dev_priv->fstart = fstart;
2072
2073 dev_priv->max_delay = fstart;
2074 dev_priv->min_delay = fmin;
2075 dev_priv->cur_delay = fstart;
2076
2077 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
2078 fmax, fmin, fstart);
2079
2080 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
2081
2082 /*
2083 * Interrupts will be enabled in ironlake_irq_postinstall
2084 */
2085
2086 I915_WRITE(VIDSTART, vstart);
2087 POSTING_READ(VIDSTART);
2088
2089 rgvmodectl |= MEMMODE_SWMODE_EN;
2090 I915_WRITE(MEMMODECTL, rgvmodectl);
2091
2092 if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10))
2093 DRM_ERROR("stuck trying to change perf mode\n");
2094 msleep(1);
2095
2096 ironlake_set_drps(dev, fstart);
2097
2098 dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) +
2099 I915_READ(0x112e0);
2100 dev_priv->last_time1 = jiffies_to_msecs(jiffies);
2101 dev_priv->last_count2 = I915_READ(0x112f4);
2102 getrawmonotonic(&dev_priv->last_time2);
2103}
2104
2105void ironlake_disable_drps(struct drm_device *dev)
2106{
2107 struct drm_i915_private *dev_priv = dev->dev_private;
2108 u16 rgvswctl = I915_READ16(MEMSWCTL);
2109
2110 /* Ack interrupts, disable EFC interrupt */
2111 I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
2112 I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
2113 I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
2114 I915_WRITE(DEIIR, DE_PCU_EVENT);
2115 I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
2116
2117 /* Go back to the starting frequency */
2118 ironlake_set_drps(dev, dev_priv->fstart);
2119 msleep(1);
2120 rgvswctl |= MEMCTL_CMD_STS;
2121 I915_WRITE(MEMSWCTL, rgvswctl);
2122 msleep(1);
2123
2124}
2125
2126void gen6_set_rps(struct drm_device *dev, u8 val)
2127{
2128 struct drm_i915_private *dev_priv = dev->dev_private;
2129 u32 swreq;
2130
2131 swreq = (val & 0x3ff) << 25;
2132 I915_WRITE(GEN6_RPNSWREQ, swreq);
2133}
2134
2135void gen6_disable_rps(struct drm_device *dev)
2136{
2137 struct drm_i915_private *dev_priv = dev->dev_private;
2138
2139 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
2140 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
2141 I915_WRITE(GEN6_PMIER, 0);
2142 /* Complete PM interrupt masking here doesn't race with the rps work
2143 * item again unmasking PM interrupts because that is using a different
2144 * register (PMIMR) to mask PM interrupts. The only risk is in leaving
2145 * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */
2146
2147 spin_lock_irq(&dev_priv->rps_lock);
2148 dev_priv->pm_iir = 0;
2149 spin_unlock_irq(&dev_priv->rps_lock);
2150
2151 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2152}
2153
2154int intel_enable_rc6(const struct drm_device *dev)
2155{
2156 /*
2157 * Respect the kernel parameter if it is set
2158 */
2159 if (i915_enable_rc6 >= 0)
2160 return i915_enable_rc6;
2161
2162 /*
2163 * Disable RC6 on Ironlake
2164 */
2165 if (INTEL_INFO(dev)->gen == 5)
2166 return 0;
2167
2168 /* Sorry Haswell, no RC6 for you for now. */
2169 if (IS_HASWELL(dev))
2170 return 0;
2171
2172 /*
2173 * Disable rc6 on Sandybridge
2174 */
2175 if (INTEL_INFO(dev)->gen == 6) {
2176 DRM_DEBUG_DRIVER("Sandybridge: deep RC6 disabled\n");
2177 return INTEL_RC6_ENABLE;
2178 }
2179 DRM_DEBUG_DRIVER("RC6 and deep RC6 enabled\n");
2180 return (INTEL_RC6_ENABLE | INTEL_RC6p_ENABLE);
2181}
2182
2183void gen6_enable_rps(struct drm_i915_private *dev_priv)
2184{
2185 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
2186 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
2187 u32 pcu_mbox, rc6_mask = 0;
2188 u32 gtfifodbg;
2189 int cur_freq, min_freq, max_freq;
2190 int rc6_mode;
2191 int i;
2192
2193 /* Here begins a magic sequence of register writes to enable
2194 * auto-downclocking.
2195 *
2196 * Perhaps there might be some value in exposing these to
2197 * userspace...
2198 */
2199 I915_WRITE(GEN6_RC_STATE, 0);
2200 mutex_lock(&dev_priv->dev->struct_mutex);
2201
2202 /* Clear the DBG now so we don't confuse earlier errors */
2203 if ((gtfifodbg = I915_READ(GTFIFODBG))) {
2204 DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg);
2205 I915_WRITE(GTFIFODBG, gtfifodbg);
2206 }
2207
2208 gen6_gt_force_wake_get(dev_priv);
2209
2210 /* disable the counters and set deterministic thresholds */
2211 I915_WRITE(GEN6_RC_CONTROL, 0);
2212
2213 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
2214 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
2215 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
2216 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
2217 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
2218
2219 for (i = 0; i < I915_NUM_RINGS; i++)
2220 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
2221
2222 I915_WRITE(GEN6_RC_SLEEP, 0);
2223 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
2224 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
2225 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
2226 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
2227
2228 rc6_mode = intel_enable_rc6(dev_priv->dev);
2229 if (rc6_mode & INTEL_RC6_ENABLE)
2230 rc6_mask |= GEN6_RC_CTL_RC6_ENABLE;
2231
2232 if (rc6_mode & INTEL_RC6p_ENABLE)
2233 rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE;
2234
2235 if (rc6_mode & INTEL_RC6pp_ENABLE)
2236 rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE;
2237
2238 DRM_INFO("Enabling RC6 states: RC6 %s, RC6p %s, RC6pp %s\n",
2239 (rc6_mode & INTEL_RC6_ENABLE) ? "on" : "off",
2240 (rc6_mode & INTEL_RC6p_ENABLE) ? "on" : "off",
2241 (rc6_mode & INTEL_RC6pp_ENABLE) ? "on" : "off");
2242
2243 I915_WRITE(GEN6_RC_CONTROL,
2244 rc6_mask |
2245 GEN6_RC_CTL_EI_MODE(1) |
2246 GEN6_RC_CTL_HW_ENABLE);
2247
2248 I915_WRITE(GEN6_RPNSWREQ,
2249 GEN6_FREQUENCY(10) |
2250 GEN6_OFFSET(0) |
2251 GEN6_AGGRESSIVE_TURBO);
2252 I915_WRITE(GEN6_RC_VIDEO_FREQ,
2253 GEN6_FREQUENCY(12));
2254
2255 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
2256 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
2257 18 << 24 |
2258 6 << 16);
2259 I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000);
2260 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000);
2261 I915_WRITE(GEN6_RP_UP_EI, 100000);
2262 I915_WRITE(GEN6_RP_DOWN_EI, 5000000);
2263 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
2264 I915_WRITE(GEN6_RP_CONTROL,
2265 GEN6_RP_MEDIA_TURBO |
2266 GEN6_RP_MEDIA_HW_MODE |
2267 GEN6_RP_MEDIA_IS_GFX |
2268 GEN6_RP_ENABLE |
2269 GEN6_RP_UP_BUSY_AVG |
2270 GEN6_RP_DOWN_IDLE_CONT);
2271
2272 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2273 500))
2274 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2275
2276 I915_WRITE(GEN6_PCODE_DATA, 0);
2277 I915_WRITE(GEN6_PCODE_MAILBOX,
2278 GEN6_PCODE_READY |
2279 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2280 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2281 500))
2282 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2283
2284 min_freq = (rp_state_cap & 0xff0000) >> 16;
2285 max_freq = rp_state_cap & 0xff;
2286 cur_freq = (gt_perf_status & 0xff00) >> 8;
2287
2288 /* Check for overclock support */
2289 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2290 500))
2291 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
2292 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS);
2293 pcu_mbox = I915_READ(GEN6_PCODE_DATA);
2294 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
2295 500))
2296 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
2297 if (pcu_mbox & (1<<31)) { /* OC supported */
2298 max_freq = pcu_mbox & 0xff;
2299 DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50);
2300 }
2301
2302 /* In units of 100MHz */
2303 dev_priv->max_delay = max_freq;
2304 dev_priv->min_delay = min_freq;
2305 dev_priv->cur_delay = cur_freq;
2306
2307 /* requires MSI enabled */
2308 I915_WRITE(GEN6_PMIER,
2309 GEN6_PM_MBOX_EVENT |
2310 GEN6_PM_THERMAL_EVENT |
2311 GEN6_PM_RP_DOWN_TIMEOUT |
2312 GEN6_PM_RP_UP_THRESHOLD |
2313 GEN6_PM_RP_DOWN_THRESHOLD |
2314 GEN6_PM_RP_UP_EI_EXPIRED |
2315 GEN6_PM_RP_DOWN_EI_EXPIRED);
2316 spin_lock_irq(&dev_priv->rps_lock);
2317 WARN_ON(dev_priv->pm_iir != 0);
2318 I915_WRITE(GEN6_PMIMR, 0);
2319 spin_unlock_irq(&dev_priv->rps_lock);
2320 /* enable all PM interrupts */
2321 I915_WRITE(GEN6_PMINTRMSK, 0);
2322
2323 gen6_gt_force_wake_put(dev_priv);
2324 mutex_unlock(&dev_priv->dev->struct_mutex);
2325}
2326
2327void gen6_update_ring_freq(struct drm_i915_private *dev_priv)
2328{
2329 int min_freq = 15;
2330 int gpu_freq, ia_freq, max_ia_freq;
2331 int scaling_factor = 180;
2332
2333 max_ia_freq = cpufreq_quick_get_max(0);
2334 /*
2335 * Default to measured freq if none found, PCU will ensure we don't go
2336 * over
2337 */
2338 if (!max_ia_freq)
2339 max_ia_freq = tsc_khz;
2340
2341 /* Convert from kHz to MHz */
2342 max_ia_freq /= 1000;
2343
2344 mutex_lock(&dev_priv->dev->struct_mutex);
2345
2346 /*
2347 * For each potential GPU frequency, load a ring frequency we'd like
2348 * to use for memory access. We do this by specifying the IA frequency
2349 * the PCU should use as a reference to determine the ring frequency.
2350 */
2351 for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay;
2352 gpu_freq--) {
2353 int diff = dev_priv->max_delay - gpu_freq;
2354
2355 /*
2356 * For GPU frequencies less than 750MHz, just use the lowest
2357 * ring freq.
2358 */
2359 if (gpu_freq < min_freq)
2360 ia_freq = 800;
2361 else
2362 ia_freq = max_ia_freq - ((diff * scaling_factor) / 2);
2363 ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100);
2364
2365 I915_WRITE(GEN6_PCODE_DATA,
2366 (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) |
2367 gpu_freq);
2368 I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY |
2369 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
2370 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) &
2371 GEN6_PCODE_READY) == 0, 10)) {
2372 DRM_ERROR("pcode write of freq table timed out\n");
2373 continue;
2374 }
2375 }
2376
2377 mutex_unlock(&dev_priv->dev->struct_mutex);
2378}
2379
2380static void ironlake_teardown_rc6(struct drm_device *dev)
2381{
2382 struct drm_i915_private *dev_priv = dev->dev_private;
2383
2384 if (dev_priv->renderctx) {
2385 i915_gem_object_unpin(dev_priv->renderctx);
2386 drm_gem_object_unreference(&dev_priv->renderctx->base);
2387 dev_priv->renderctx = NULL;
2388 }
2389
2390 if (dev_priv->pwrctx) {
2391 i915_gem_object_unpin(dev_priv->pwrctx);
2392 drm_gem_object_unreference(&dev_priv->pwrctx->base);
2393 dev_priv->pwrctx = NULL;
2394 }
2395}
2396
2397void ironlake_disable_rc6(struct drm_device *dev)
2398{
2399 struct drm_i915_private *dev_priv = dev->dev_private;
2400
2401 if (I915_READ(PWRCTXA)) {
2402 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
2403 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
2404 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
2405 50);
2406
2407 I915_WRITE(PWRCTXA, 0);
2408 POSTING_READ(PWRCTXA);
2409
2410 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2411 POSTING_READ(RSTDBYCTL);
2412 }
2413
2414 ironlake_teardown_rc6(dev);
2415}
2416
2417static int ironlake_setup_rc6(struct drm_device *dev)
2418{
2419 struct drm_i915_private *dev_priv = dev->dev_private;
2420
2421 if (dev_priv->renderctx == NULL)
2422 dev_priv->renderctx = intel_alloc_context_page(dev);
2423 if (!dev_priv->renderctx)
2424 return -ENOMEM;
2425
2426 if (dev_priv->pwrctx == NULL)
2427 dev_priv->pwrctx = intel_alloc_context_page(dev);
2428 if (!dev_priv->pwrctx) {
2429 ironlake_teardown_rc6(dev);
2430 return -ENOMEM;
2431 }
2432
2433 return 0;
2434}
2435
2436void ironlake_enable_rc6(struct drm_device *dev)
2437{
2438 struct drm_i915_private *dev_priv = dev->dev_private;
2439 int ret;
2440
2441 /* rc6 disabled by default due to repeated reports of hanging during
2442 * boot and resume.
2443 */
2444 if (!intel_enable_rc6(dev))
2445 return;
2446
2447 mutex_lock(&dev->struct_mutex);
2448 ret = ironlake_setup_rc6(dev);
2449 if (ret) {
2450 mutex_unlock(&dev->struct_mutex);
2451 return;
2452 }
2453
2454 /*
2455 * GPU can automatically power down the render unit if given a page
2456 * to save state.
2457 */
2458 ret = BEGIN_LP_RING(6);
2459 if (ret) {
2460 ironlake_teardown_rc6(dev);
2461 mutex_unlock(&dev->struct_mutex);
2462 return;
2463 }
2464
2465 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
2466 OUT_RING(MI_SET_CONTEXT);
2467 OUT_RING(dev_priv->renderctx->gtt_offset |
2468 MI_MM_SPACE_GTT |
2469 MI_SAVE_EXT_STATE_EN |
2470 MI_RESTORE_EXT_STATE_EN |
2471 MI_RESTORE_INHIBIT);
2472 OUT_RING(MI_SUSPEND_FLUSH);
2473 OUT_RING(MI_NOOP);
2474 OUT_RING(MI_FLUSH);
2475 ADVANCE_LP_RING();
2476
2477 /*
2478 * Wait for the command parser to advance past MI_SET_CONTEXT. The HW
2479 * does an implicit flush, combined with MI_FLUSH above, it should be
2480 * safe to assume that renderctx is valid
2481 */
2482 ret = intel_wait_ring_idle(LP_RING(dev_priv));
2483 if (ret) {
2484 DRM_ERROR("failed to enable ironlake power power savings\n");
2485 ironlake_teardown_rc6(dev);
2486 mutex_unlock(&dev->struct_mutex);
2487 return;
2488 }
2489
2490 I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN);
2491 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
2492 mutex_unlock(&dev->struct_mutex);
2493}
2494
2495static unsigned long intel_pxfreq(u32 vidfreq)
2496{
2497 unsigned long freq;
2498 int div = (vidfreq & 0x3f0000) >> 16;
2499 int post = (vidfreq & 0x3000) >> 12;
2500 int pre = (vidfreq & 0x7);
2501
2502 if (!pre)
2503 return 0;
2504
2505 freq = ((div * 133333) / ((1<<post) * pre));
2506
2507 return freq;
2508}
2509
2510void intel_init_emon(struct drm_device *dev)
2511{
2512 struct drm_i915_private *dev_priv = dev->dev_private;
2513 u32 lcfuse;
2514 u8 pxw[16];
2515 int i;
2516
2517 /* Disable to program */
2518 I915_WRITE(ECR, 0);
2519 POSTING_READ(ECR);
2520
2521 /* Program energy weights for various events */
2522 I915_WRITE(SDEW, 0x15040d00);
2523 I915_WRITE(CSIEW0, 0x007f0000);
2524 I915_WRITE(CSIEW1, 0x1e220004);
2525 I915_WRITE(CSIEW2, 0x04000004);
2526
2527 for (i = 0; i < 5; i++)
2528 I915_WRITE(PEW + (i * 4), 0);
2529 for (i = 0; i < 3; i++)
2530 I915_WRITE(DEW + (i * 4), 0);
2531
2532 /* Program P-state weights to account for frequency power adjustment */
2533 for (i = 0; i < 16; i++) {
2534 u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4));
2535 unsigned long freq = intel_pxfreq(pxvidfreq);
2536 unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >>
2537 PXVFREQ_PX_SHIFT;
2538 unsigned long val;
2539
2540 val = vid * vid;
2541 val *= (freq / 1000);
2542 val *= 255;
2543 val /= (127*127*900);
2544 if (val > 0xff)
2545 DRM_ERROR("bad pxval: %ld\n", val);
2546 pxw[i] = val;
2547 }
2548 /* Render standby states get 0 weight */
2549 pxw[14] = 0;
2550 pxw[15] = 0;
2551
2552 for (i = 0; i < 4; i++) {
2553 u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) |
2554 (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]);
2555 I915_WRITE(PXW + (i * 4), val);
2556 }
2557
2558 /* Adjust magic regs to magic values (more experimental results) */
2559 I915_WRITE(OGW0, 0);
2560 I915_WRITE(OGW1, 0);
2561 I915_WRITE(EG0, 0x00007f00);
2562 I915_WRITE(EG1, 0x0000000e);
2563 I915_WRITE(EG2, 0x000e0000);
2564 I915_WRITE(EG3, 0x68000300);
2565 I915_WRITE(EG4, 0x42000000);
2566 I915_WRITE(EG5, 0x00140031);
2567 I915_WRITE(EG6, 0);
2568 I915_WRITE(EG7, 0);
2569
2570 for (i = 0; i < 8; i++)
2571 I915_WRITE(PXWL + (i * 4), 0);
2572
2573 /* Enable PMON + select events */
2574 I915_WRITE(ECR, 0x80000019);
2575
2576 lcfuse = I915_READ(LCFUSE02);
2577
2578 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
2579}
2580
2581static void ironlake_init_clock_gating(struct drm_device *dev)
2582{
2583 struct drm_i915_private *dev_priv = dev->dev_private;
2584 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2585
2586 /* Required for FBC */
2587 dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE |
2588 DPFCRUNIT_CLOCK_GATE_DISABLE |
2589 DPFDUNIT_CLOCK_GATE_DISABLE;
2590 /* Required for CxSR */
2591 dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE;
2592
2593 I915_WRITE(PCH_3DCGDIS0,
2594 MARIUNIT_CLOCK_GATE_DISABLE |
2595 SVSMUNIT_CLOCK_GATE_DISABLE);
2596 I915_WRITE(PCH_3DCGDIS1,
2597 VFMUNIT_CLOCK_GATE_DISABLE);
2598
2599 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2600
2601 /*
2602 * According to the spec the following bits should be set in
2603 * order to enable memory self-refresh
2604 * The bit 22/21 of 0x42004
2605 * The bit 5 of 0x42020
2606 * The bit 15 of 0x45000
2607 */
2608 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2609 (I915_READ(ILK_DISPLAY_CHICKEN2) |
2610 ILK_DPARB_GATE | ILK_VSDPFD_FULL));
2611 I915_WRITE(ILK_DSPCLK_GATE,
2612 (I915_READ(ILK_DSPCLK_GATE) |
2613 ILK_DPARB_CLK_GATE));
2614 I915_WRITE(DISP_ARB_CTL,
2615 (I915_READ(DISP_ARB_CTL) |
2616 DISP_FBC_WM_DIS));
2617 I915_WRITE(WM3_LP_ILK, 0);
2618 I915_WRITE(WM2_LP_ILK, 0);
2619 I915_WRITE(WM1_LP_ILK, 0);
2620
2621 /*
2622 * Based on the document from hardware guys the following bits
2623 * should be set unconditionally in order to enable FBC.
2624 * The bit 22 of 0x42000
2625 * The bit 22 of 0x42004
2626 * The bit 7,8,9 of 0x42020.
2627 */
2628 if (IS_IRONLAKE_M(dev)) {
2629 I915_WRITE(ILK_DISPLAY_CHICKEN1,
2630 I915_READ(ILK_DISPLAY_CHICKEN1) |
2631 ILK_FBCQ_DIS);
2632 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2633 I915_READ(ILK_DISPLAY_CHICKEN2) |
2634 ILK_DPARB_GATE);
2635 I915_WRITE(ILK_DSPCLK_GATE,
2636 I915_READ(ILK_DSPCLK_GATE) |
2637 ILK_DPFC_DIS1 |
2638 ILK_DPFC_DIS2 |
2639 ILK_CLK_FBC);
2640 }
2641
2642 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2643 I915_READ(ILK_DISPLAY_CHICKEN2) |
2644 ILK_ELPIN_409_SELECT);
2645 I915_WRITE(_3D_CHICKEN2,
2646 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
2647 _3D_CHICKEN2_WM_READ_PIPELINED);
2648}
2649
2650static void gen6_init_clock_gating(struct drm_device *dev)
2651{
2652 struct drm_i915_private *dev_priv = dev->dev_private;
2653 int pipe;
2654 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2655
2656 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2657
2658 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2659 I915_READ(ILK_DISPLAY_CHICKEN2) |
2660 ILK_ELPIN_409_SELECT);
2661
2662 I915_WRITE(WM3_LP_ILK, 0);
2663 I915_WRITE(WM2_LP_ILK, 0);
2664 I915_WRITE(WM1_LP_ILK, 0);
2665
2666 /* clear masked bit */
2667 I915_WRITE(CACHE_MODE_0,
2668 CM0_STC_EVICT_DISABLE_LRA_SNB << CM0_MASK_SHIFT);
2669
2670 I915_WRITE(GEN6_UCGCTL1,
2671 I915_READ(GEN6_UCGCTL1) |
2672 GEN6_BLBUNIT_CLOCK_GATE_DISABLE |
2673 GEN6_CSUNIT_CLOCK_GATE_DISABLE);
2674
2675 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
2676 * gating disable must be set. Failure to set it results in
2677 * flickering pixels due to Z write ordering failures after
2678 * some amount of runtime in the Mesa "fire" demo, and Unigine
2679 * Sanctuary and Tropics, and apparently anything else with
2680 * alpha test or pixel discard.
2681 *
2682 * According to the spec, bit 11 (RCCUNIT) must also be set,
2683 * but we didn't debug actual testcases to find it out.
2684 */
2685 I915_WRITE(GEN6_UCGCTL2,
2686 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
2687 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
2688
2689 /* Bspec says we need to always set all mask bits. */
2690 I915_WRITE(_3D_CHICKEN, (0xFFFF << 16) |
2691 _3D_CHICKEN_SF_DISABLE_FASTCLIP_CULL);
2692
2693 /*
2694 * According to the spec the following bits should be
2695 * set in order to enable memory self-refresh and fbc:
2696 * The bit21 and bit22 of 0x42000
2697 * The bit21 and bit22 of 0x42004
2698 * The bit5 and bit7 of 0x42020
2699 * The bit14 of 0x70180
2700 * The bit14 of 0x71180
2701 */
2702 I915_WRITE(ILK_DISPLAY_CHICKEN1,
2703 I915_READ(ILK_DISPLAY_CHICKEN1) |
2704 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
2705 I915_WRITE(ILK_DISPLAY_CHICKEN2,
2706 I915_READ(ILK_DISPLAY_CHICKEN2) |
2707 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
2708 I915_WRITE(ILK_DSPCLK_GATE,
2709 I915_READ(ILK_DSPCLK_GATE) |
2710 ILK_DPARB_CLK_GATE |
2711 ILK_DPFD_CLK_GATE);
2712
2713 for_each_pipe(pipe) {
2714 I915_WRITE(DSPCNTR(pipe),
2715 I915_READ(DSPCNTR(pipe)) |
2716 DISPPLANE_TRICKLE_FEED_DISABLE);
2717 intel_flush_display_plane(dev_priv, pipe);
2718 }
2719}
2720
2721static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
2722{
2723 uint32_t reg = I915_READ(GEN7_FF_THREAD_MODE);
2724
2725 reg &= ~GEN7_FF_SCHED_MASK;
2726 reg |= GEN7_FF_TS_SCHED_HW;
2727 reg |= GEN7_FF_VS_SCHED_HW;
2728 reg |= GEN7_FF_DS_SCHED_HW;
2729
2730 I915_WRITE(GEN7_FF_THREAD_MODE, reg);
2731}
2732
2733static void ivybridge_init_clock_gating(struct drm_device *dev)
2734{
2735 struct drm_i915_private *dev_priv = dev->dev_private;
2736 int pipe;
2737 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2738
2739 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2740
2741 I915_WRITE(WM3_LP_ILK, 0);
2742 I915_WRITE(WM2_LP_ILK, 0);
2743 I915_WRITE(WM1_LP_ILK, 0);
2744
2745 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
2746 * This implements the WaDisableRCZUnitClockGating workaround.
2747 */
2748 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
2749
2750 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
2751
2752 I915_WRITE(IVB_CHICKEN3,
2753 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
2754 CHICKEN3_DGMG_DONE_FIX_DISABLE);
2755
2756 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
2757 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
2758 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
2759
2760 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
2761 I915_WRITE(GEN7_L3CNTLREG1,
2762 GEN7_WA_FOR_GEN7_L3_CONTROL);
2763 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
2764 GEN7_WA_L3_CHICKEN_MODE);
2765
2766 /* This is required by WaCatErrorRejectionIssue */
2767 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
2768 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
2769 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
2770
2771 for_each_pipe(pipe) {
2772 I915_WRITE(DSPCNTR(pipe),
2773 I915_READ(DSPCNTR(pipe)) |
2774 DISPPLANE_TRICKLE_FEED_DISABLE);
2775 intel_flush_display_plane(dev_priv, pipe);
2776 }
2777
2778 gen7_setup_fixed_func_scheduler(dev_priv);
2779}
2780
2781static void valleyview_init_clock_gating(struct drm_device *dev)
2782{
2783 struct drm_i915_private *dev_priv = dev->dev_private;
2784 int pipe;
2785 uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE;
2786
2787 I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);
2788
2789 I915_WRITE(WM3_LP_ILK, 0);
2790 I915_WRITE(WM2_LP_ILK, 0);
2791 I915_WRITE(WM1_LP_ILK, 0);
2792
2793 /* According to the spec, bit 13 (RCZUNIT) must be set on IVB.
2794 * This implements the WaDisableRCZUnitClockGating workaround.
2795 */
2796 I915_WRITE(GEN6_UCGCTL2, GEN6_RCZUNIT_CLOCK_GATE_DISABLE);
2797
2798 I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE);
2799
2800 I915_WRITE(IVB_CHICKEN3,
2801 CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE |
2802 CHICKEN3_DGMG_DONE_FIX_DISABLE);
2803
2804 /* Apply the WaDisableRHWOOptimizationForRenderHang workaround. */
2805 I915_WRITE(GEN7_COMMON_SLICE_CHICKEN1,
2806 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
2807
2808 /* WaApplyL3ControlAndL3ChickenMode requires those two on Ivy Bridge */
2809 I915_WRITE(GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
2810 I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
2811
2812 /* This is required by WaCatErrorRejectionIssue */
2813 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
2814 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
2815 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB);
2816
2817 for_each_pipe(pipe) {
2818 I915_WRITE(DSPCNTR(pipe),
2819 I915_READ(DSPCNTR(pipe)) |
2820 DISPPLANE_TRICKLE_FEED_DISABLE);
2821 intel_flush_display_plane(dev_priv, pipe);
2822 }
2823
2824 I915_WRITE(CACHE_MODE_1, I915_READ(CACHE_MODE_1) |
2825 (PIXEL_SUBSPAN_COLLECT_OPT_DISABLE << 16) |
2826 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2827}
2828
2829static void g4x_init_clock_gating(struct drm_device *dev)
2830{
2831 struct drm_i915_private *dev_priv = dev->dev_private;
2832 uint32_t dspclk_gate;
2833
2834 I915_WRITE(RENCLK_GATE_D1, 0);
2835 I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE |
2836 GS_UNIT_CLOCK_GATE_DISABLE |
2837 CL_UNIT_CLOCK_GATE_DISABLE);
2838 I915_WRITE(RAMCLK_GATE_D, 0);
2839 dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE |
2840 OVRUNIT_CLOCK_GATE_DISABLE |
2841 OVCUNIT_CLOCK_GATE_DISABLE;
2842 if (IS_GM45(dev))
2843 dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE;
2844 I915_WRITE(DSPCLK_GATE_D, dspclk_gate);
2845}
2846
2847static void crestline_init_clock_gating(struct drm_device *dev)
2848{
2849 struct drm_i915_private *dev_priv = dev->dev_private;
2850
2851 I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE);
2852 I915_WRITE(RENCLK_GATE_D2, 0);
2853 I915_WRITE(DSPCLK_GATE_D, 0);
2854 I915_WRITE(RAMCLK_GATE_D, 0);
2855 I915_WRITE16(DEUC, 0);
2856}
2857
2858static void broadwater_init_clock_gating(struct drm_device *dev)
2859{
2860 struct drm_i915_private *dev_priv = dev->dev_private;
2861
2862 I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE |
2863 I965_RCC_CLOCK_GATE_DISABLE |
2864 I965_RCPB_CLOCK_GATE_DISABLE |
2865 I965_ISC_CLOCK_GATE_DISABLE |
2866 I965_FBC_CLOCK_GATE_DISABLE);
2867 I915_WRITE(RENCLK_GATE_D2, 0);
2868}
2869
2870static void gen3_init_clock_gating(struct drm_device *dev)
2871{
2872 struct drm_i915_private *dev_priv = dev->dev_private;
2873 u32 dstate = I915_READ(D_STATE);
2874
2875 dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING |
2876 DSTATE_DOT_CLOCK_GATING;
2877 I915_WRITE(D_STATE, dstate);
2878}
2879
2880static void i85x_init_clock_gating(struct drm_device *dev)
2881{
2882 struct drm_i915_private *dev_priv = dev->dev_private;
2883
2884 I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
2885}
2886
2887static void i830_init_clock_gating(struct drm_device *dev)
2888{
2889 struct drm_i915_private *dev_priv = dev->dev_private;
2890
2891 I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
2892}
2893
2894static void ibx_init_clock_gating(struct drm_device *dev)
2895{
2896 struct drm_i915_private *dev_priv = dev->dev_private;
2897
2898 /*
2899 * On Ibex Peak and Cougar Point, we need to disable clock
2900 * gating for the panel power sequencer or it will fail to
2901 * start up when no ports are active.
2902 */
2903 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
2904}
2905
2906static void cpt_init_clock_gating(struct drm_device *dev)
2907{
2908 struct drm_i915_private *dev_priv = dev->dev_private;
2909 int pipe;
2910
2911 /*
2912 * On Ibex Peak and Cougar Point, we need to disable clock
2913 * gating for the panel power sequencer or it will fail to
2914 * start up when no ports are active.
2915 */
2916 I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
2917 I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) |
2918 DPLS_EDP_PPS_FIX_DIS);
2919 /* Without this, mode sets may fail silently on FDI */
2920 for_each_pipe(pipe)
2921 I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS);
2922}
2923
2924void intel_init_clock_gating(struct drm_device *dev)
2925{
2926 struct drm_i915_private *dev_priv = dev->dev_private;
2927
2928 dev_priv->display.init_clock_gating(dev);
2929
2930 if (dev_priv->display.init_pch_clock_gating)
2931 dev_priv->display.init_pch_clock_gating(dev);
2932}
2933
2934/* Set up chip specific power management-related functions */
2935void intel_init_pm(struct drm_device *dev)
2936{
2937 struct drm_i915_private *dev_priv = dev->dev_private;
2938
2939 if (I915_HAS_FBC(dev)) {
2940 if (HAS_PCH_SPLIT(dev)) {
2941 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
2942 dev_priv->display.enable_fbc = ironlake_enable_fbc;
2943 dev_priv->display.disable_fbc = ironlake_disable_fbc;
2944 } else if (IS_GM45(dev)) {
2945 dev_priv->display.fbc_enabled = g4x_fbc_enabled;
2946 dev_priv->display.enable_fbc = g4x_enable_fbc;
2947 dev_priv->display.disable_fbc = g4x_disable_fbc;
2948 } else if (IS_CRESTLINE(dev)) {
2949 dev_priv->display.fbc_enabled = i8xx_fbc_enabled;
2950 dev_priv->display.enable_fbc = i8xx_enable_fbc;
2951 dev_priv->display.disable_fbc = i8xx_disable_fbc;
2952 }
2953 /* 855GM needs testing */
2954 }
2955
2956 /* For FIFO watermark updates */
2957 if (HAS_PCH_SPLIT(dev)) {
2958 dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
2959 dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
2960
2961 /* IVB configs may use multi-threaded forcewake */
2962 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
2963 u32 ecobus;
2964
2965 /* A small trick here - if the bios hasn't configured MT forcewake,
2966 * and if the device is in RC6, then force_wake_mt_get will not wake
2967 * the device and the ECOBUS read will return zero. Which will be
2968 * (correctly) interpreted by the test below as MT forcewake being
2969 * disabled.
2970 */
2971 mutex_lock(&dev->struct_mutex);
2972 __gen6_gt_force_wake_mt_get(dev_priv);
2973 ecobus = I915_READ_NOTRACE(ECOBUS);
2974 __gen6_gt_force_wake_mt_put(dev_priv);
2975 mutex_unlock(&dev->struct_mutex);
2976
2977 if (ecobus & FORCEWAKE_MT_ENABLE) {
2978 DRM_DEBUG_KMS("Using MT version of forcewake\n");
2979 dev_priv->display.force_wake_get =
2980 __gen6_gt_force_wake_mt_get;
2981 dev_priv->display.force_wake_put =
2982 __gen6_gt_force_wake_mt_put;
2983 }
2984 }
2985
2986 if (HAS_PCH_IBX(dev))
2987 dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
2988 else if (HAS_PCH_CPT(dev))
2989 dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating;
2990
2991 if (IS_GEN5(dev)) {
2992 if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK)
2993 dev_priv->display.update_wm = ironlake_update_wm;
2994 else {
2995 DRM_DEBUG_KMS("Failed to get proper latency. "
2996 "Disable CxSR\n");
2997 dev_priv->display.update_wm = NULL;
2998 }
2999 dev_priv->display.init_clock_gating = ironlake_init_clock_gating;
3000 } else if (IS_GEN6(dev)) {
3001 if (SNB_READ_WM0_LATENCY()) {
3002 dev_priv->display.update_wm = sandybridge_update_wm;
3003 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3004 } else {
3005 DRM_DEBUG_KMS("Failed to read display plane latency. "
3006 "Disable CxSR\n");
3007 dev_priv->display.update_wm = NULL;
3008 }
3009 dev_priv->display.init_clock_gating = gen6_init_clock_gating;
3010 } else if (IS_IVYBRIDGE(dev)) {
3011 /* FIXME: detect B0+ stepping and use auto training */
3012 if (SNB_READ_WM0_LATENCY()) {
3013 dev_priv->display.update_wm = sandybridge_update_wm;
3014 dev_priv->display.update_sprite_wm = sandybridge_update_sprite_wm;
3015 } else {
3016 DRM_DEBUG_KMS("Failed to read display plane latency. "
3017 "Disable CxSR\n");
3018 dev_priv->display.update_wm = NULL;
3019 }
3020 dev_priv->display.init_clock_gating = ivybridge_init_clock_gating;
3021 } else
3022 dev_priv->display.update_wm = NULL;
3023 } else if (IS_VALLEYVIEW(dev)) {
3024 dev_priv->display.update_wm = valleyview_update_wm;
3025 dev_priv->display.init_clock_gating =
3026 valleyview_init_clock_gating;
3027 dev_priv->display.force_wake_get = vlv_force_wake_get;
3028 dev_priv->display.force_wake_put = vlv_force_wake_put;
3029 } else if (IS_PINEVIEW(dev)) {
3030 if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev),
3031 dev_priv->is_ddr3,
3032 dev_priv->fsb_freq,
3033 dev_priv->mem_freq)) {
3034 DRM_INFO("failed to find known CxSR latency "
3035 "(found ddr%s fsb freq %d, mem freq %d), "
3036 "disabling CxSR\n",
3037 (dev_priv->is_ddr3 == 1) ? "3" : "2",
3038 dev_priv->fsb_freq, dev_priv->mem_freq);
3039 /* Disable CxSR and never update its watermark again */
3040 pineview_disable_cxsr(dev);
3041 dev_priv->display.update_wm = NULL;
3042 } else
3043 dev_priv->display.update_wm = pineview_update_wm;
3044 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3045 } else if (IS_G4X(dev)) {
3046 dev_priv->display.update_wm = g4x_update_wm;
3047 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
3048 } else if (IS_GEN4(dev)) {
3049 dev_priv->display.update_wm = i965_update_wm;
3050 if (IS_CRESTLINE(dev))
3051 dev_priv->display.init_clock_gating = crestline_init_clock_gating;
3052 else if (IS_BROADWATER(dev))
3053 dev_priv->display.init_clock_gating = broadwater_init_clock_gating;
3054 } else if (IS_GEN3(dev)) {
3055 dev_priv->display.update_wm = i9xx_update_wm;
3056 dev_priv->display.get_fifo_size = i9xx_get_fifo_size;
3057 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
3058 } else if (IS_I865G(dev)) {
3059 dev_priv->display.update_wm = i830_update_wm;
3060 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3061 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3062 } else if (IS_I85X(dev)) {
3063 dev_priv->display.update_wm = i9xx_update_wm;
3064 dev_priv->display.get_fifo_size = i85x_get_fifo_size;
3065 dev_priv->display.init_clock_gating = i85x_init_clock_gating;
3066 } else {
3067 dev_priv->display.update_wm = i830_update_wm;
3068 dev_priv->display.init_clock_gating = i830_init_clock_gating;
3069 if (IS_845G(dev))
3070 dev_priv->display.get_fifo_size = i845_get_fifo_size;
3071 else
3072 dev_priv->display.get_fifo_size = i830_get_fifo_size;
3073 }
3074}
3075
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index dfdb613752c5..12d9bc789dfb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -53,9 +53,35 @@ static inline int ring_space(struct intel_ring_buffer *ring)
53} 53}
54 54
55static int 55static int
56render_ring_flush(struct intel_ring_buffer *ring, 56gen2_render_ring_flush(struct intel_ring_buffer *ring,
57 u32 invalidate_domains, 57 u32 invalidate_domains,
58 u32 flush_domains) 58 u32 flush_domains)
59{
60 u32 cmd;
61 int ret;
62
63 cmd = MI_FLUSH;
64 if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
65 cmd |= MI_NO_WRITE_FLUSH;
66
67 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
68 cmd |= MI_READ_FLUSH;
69
70 ret = intel_ring_begin(ring, 2);
71 if (ret)
72 return ret;
73
74 intel_ring_emit(ring, cmd);
75 intel_ring_emit(ring, MI_NOOP);
76 intel_ring_advance(ring);
77
78 return 0;
79}
80
81static int
82gen4_render_ring_flush(struct intel_ring_buffer *ring,
83 u32 invalidate_domains,
84 u32 flush_domains)
59{ 85{
60 struct drm_device *dev = ring->dev; 86 struct drm_device *dev = ring->dev;
61 u32 cmd; 87 u32 cmd;
@@ -90,17 +116,8 @@ render_ring_flush(struct intel_ring_buffer *ring,
90 */ 116 */
91 117
92 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 118 cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
93 if ((invalidate_domains|flush_domains) & 119 if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
94 I915_GEM_DOMAIN_RENDER)
95 cmd &= ~MI_NO_WRITE_FLUSH; 120 cmd &= ~MI_NO_WRITE_FLUSH;
96 if (INTEL_INFO(dev)->gen < 4) {
97 /*
98 * On the 965, the sampler cache always gets flushed
99 * and this bit is reserved.
100 */
101 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
102 cmd |= MI_READ_FLUSH;
103 }
104 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 121 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
105 cmd |= MI_EXE_FLUSH; 122 cmd |= MI_EXE_FLUSH;
106 123
@@ -472,21 +489,30 @@ gen6_add_request(struct intel_ring_buffer *ring,
472 * @seqno - seqno which the waiter will block on 489 * @seqno - seqno which the waiter will block on
473 */ 490 */
474static int 491static int
475intel_ring_sync(struct intel_ring_buffer *waiter, 492gen6_ring_sync(struct intel_ring_buffer *waiter,
476 struct intel_ring_buffer *signaller, 493 struct intel_ring_buffer *signaller,
477 int ring, 494 u32 seqno)
478 u32 seqno)
479{ 495{
480 int ret; 496 int ret;
481 u32 dw1 = MI_SEMAPHORE_MBOX | 497 u32 dw1 = MI_SEMAPHORE_MBOX |
482 MI_SEMAPHORE_COMPARE | 498 MI_SEMAPHORE_COMPARE |
483 MI_SEMAPHORE_REGISTER; 499 MI_SEMAPHORE_REGISTER;
484 500
501 /* Throughout all of the GEM code, seqno passed implies our current
502 * seqno is >= the last seqno executed. However for hardware the
503 * comparison is strictly greater than.
504 */
505 seqno -= 1;
506
507 WARN_ON(signaller->semaphore_register[waiter->id] ==
508 MI_SEMAPHORE_SYNC_INVALID);
509
485 ret = intel_ring_begin(waiter, 4); 510 ret = intel_ring_begin(waiter, 4);
486 if (ret) 511 if (ret)
487 return ret; 512 return ret;
488 513
489 intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]); 514 intel_ring_emit(waiter,
515 dw1 | signaller->semaphore_register[waiter->id]);
490 intel_ring_emit(waiter, seqno); 516 intel_ring_emit(waiter, seqno);
491 intel_ring_emit(waiter, 0); 517 intel_ring_emit(waiter, 0);
492 intel_ring_emit(waiter, MI_NOOP); 518 intel_ring_emit(waiter, MI_NOOP);
@@ -495,47 +521,6 @@ intel_ring_sync(struct intel_ring_buffer *waiter,
495 return 0; 521 return 0;
496} 522}
497 523
498/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
499int
500render_ring_sync_to(struct intel_ring_buffer *waiter,
501 struct intel_ring_buffer *signaller,
502 u32 seqno)
503{
504 WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
505 return intel_ring_sync(waiter,
506 signaller,
507 RCS,
508 seqno);
509}
510
511/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
512int
513gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
514 struct intel_ring_buffer *signaller,
515 u32 seqno)
516{
517 WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
518 return intel_ring_sync(waiter,
519 signaller,
520 VCS,
521 seqno);
522}
523
524/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
525int
526gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
527 struct intel_ring_buffer *signaller,
528 u32 seqno)
529{
530 WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
531 return intel_ring_sync(waiter,
532 signaller,
533 BCS,
534 seqno);
535}
536
537
538
539#define PIPE_CONTROL_FLUSH(ring__, addr__) \ 524#define PIPE_CONTROL_FLUSH(ring__, addr__) \
540do { \ 525do { \
541 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ 526 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
@@ -597,27 +582,6 @@ pc_render_add_request(struct intel_ring_buffer *ring,
597 return 0; 582 return 0;
598} 583}
599 584
600static int
601render_ring_add_request(struct intel_ring_buffer *ring,
602 u32 *result)
603{
604 u32 seqno = i915_gem_next_request_seqno(ring);
605 int ret;
606
607 ret = intel_ring_begin(ring, 4);
608 if (ret)
609 return ret;
610
611 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
612 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
613 intel_ring_emit(ring, seqno);
614 intel_ring_emit(ring, MI_USER_INTERRUPT);
615 intel_ring_advance(ring);
616
617 *result = seqno;
618 return 0;
619}
620
621static u32 585static u32
622gen6_ring_get_seqno(struct intel_ring_buffer *ring) 586gen6_ring_get_seqno(struct intel_ring_buffer *ring)
623{ 587{
@@ -644,40 +608,43 @@ pc_render_get_seqno(struct intel_ring_buffer *ring)
644 return pc->cpu_page[0]; 608 return pc->cpu_page[0];
645} 609}
646 610
647static void 611static bool
648ironlake_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 612gen5_ring_get_irq(struct intel_ring_buffer *ring)
649{ 613{
650 dev_priv->gt_irq_mask &= ~mask; 614 struct drm_device *dev = ring->dev;
651 I915_WRITE(GTIMR, dev_priv->gt_irq_mask); 615 drm_i915_private_t *dev_priv = dev->dev_private;
652 POSTING_READ(GTIMR);
653}
654 616
655static void 617 if (!dev->irq_enabled)
656ironlake_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 618 return false;
657{
658 dev_priv->gt_irq_mask |= mask;
659 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
660 POSTING_READ(GTIMR);
661}
662 619
663static void 620 spin_lock(&ring->irq_lock);
664i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 621 if (ring->irq_refcount++ == 0) {
665{ 622 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
666 dev_priv->irq_mask &= ~mask; 623 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
667 I915_WRITE(IMR, dev_priv->irq_mask); 624 POSTING_READ(GTIMR);
668 POSTING_READ(IMR); 625 }
626 spin_unlock(&ring->irq_lock);
627
628 return true;
669} 629}
670 630
671static void 631static void
672i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 632gen5_ring_put_irq(struct intel_ring_buffer *ring)
673{ 633{
674 dev_priv->irq_mask |= mask; 634 struct drm_device *dev = ring->dev;
675 I915_WRITE(IMR, dev_priv->irq_mask); 635 drm_i915_private_t *dev_priv = dev->dev_private;
676 POSTING_READ(IMR); 636
637 spin_lock(&ring->irq_lock);
638 if (--ring->irq_refcount == 0) {
639 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
640 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
641 POSTING_READ(GTIMR);
642 }
643 spin_unlock(&ring->irq_lock);
677} 644}
678 645
679static bool 646static bool
680render_ring_get_irq(struct intel_ring_buffer *ring) 647i9xx_ring_get_irq(struct intel_ring_buffer *ring)
681{ 648{
682 struct drm_device *dev = ring->dev; 649 struct drm_device *dev = ring->dev;
683 drm_i915_private_t *dev_priv = dev->dev_private; 650 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -687,11 +654,9 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
687 654
688 spin_lock(&ring->irq_lock); 655 spin_lock(&ring->irq_lock);
689 if (ring->irq_refcount++ == 0) { 656 if (ring->irq_refcount++ == 0) {
690 if (INTEL_INFO(dev)->gen >= 5) 657 dev_priv->irq_mask &= ~ring->irq_enable_mask;
691 ironlake_enable_irq(dev_priv, 658 I915_WRITE(IMR, dev_priv->irq_mask);
692 GT_PIPE_NOTIFY | GT_USER_INTERRUPT); 659 POSTING_READ(IMR);
693 else
694 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
695 } 660 }
696 spin_unlock(&ring->irq_lock); 661 spin_unlock(&ring->irq_lock);
697 662
@@ -699,19 +664,16 @@ render_ring_get_irq(struct intel_ring_buffer *ring)
699} 664}
700 665
701static void 666static void
702render_ring_put_irq(struct intel_ring_buffer *ring) 667i9xx_ring_put_irq(struct intel_ring_buffer *ring)
703{ 668{
704 struct drm_device *dev = ring->dev; 669 struct drm_device *dev = ring->dev;
705 drm_i915_private_t *dev_priv = dev->dev_private; 670 drm_i915_private_t *dev_priv = dev->dev_private;
706 671
707 spin_lock(&ring->irq_lock); 672 spin_lock(&ring->irq_lock);
708 if (--ring->irq_refcount == 0) { 673 if (--ring->irq_refcount == 0) {
709 if (INTEL_INFO(dev)->gen >= 5) 674 dev_priv->irq_mask |= ring->irq_enable_mask;
710 ironlake_disable_irq(dev_priv, 675 I915_WRITE(IMR, dev_priv->irq_mask);
711 GT_USER_INTERRUPT | 676 POSTING_READ(IMR);
712 GT_PIPE_NOTIFY);
713 else
714 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
715 } 677 }
716 spin_unlock(&ring->irq_lock); 678 spin_unlock(&ring->irq_lock);
717} 679}
@@ -765,7 +727,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
765} 727}
766 728
767static int 729static int
768ring_add_request(struct intel_ring_buffer *ring, 730i9xx_add_request(struct intel_ring_buffer *ring,
769 u32 *result) 731 u32 *result)
770{ 732{
771 u32 seqno; 733 u32 seqno;
@@ -792,7 +754,6 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
792{ 754{
793 struct drm_device *dev = ring->dev; 755 struct drm_device *dev = ring->dev;
794 drm_i915_private_t *dev_priv = dev->dev_private; 756 drm_i915_private_t *dev_priv = dev->dev_private;
795 u32 mask = ring->irq_enable;
796 757
797 if (!dev->irq_enabled) 758 if (!dev->irq_enabled)
798 return false; 759 return false;
@@ -804,9 +765,10 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)
804 765
805 spin_lock(&ring->irq_lock); 766 spin_lock(&ring->irq_lock);
806 if (ring->irq_refcount++ == 0) { 767 if (ring->irq_refcount++ == 0) {
807 ring->irq_mask &= ~mask; 768 I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
808 I915_WRITE_IMR(ring, ring->irq_mask); 769 dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
809 ironlake_enable_irq(dev_priv, mask); 770 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
771 POSTING_READ(GTIMR);
810 } 772 }
811 spin_unlock(&ring->irq_lock); 773 spin_unlock(&ring->irq_lock);
812 774
@@ -818,105 +780,69 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring)
818{ 780{
819 struct drm_device *dev = ring->dev; 781 struct drm_device *dev = ring->dev;
820 drm_i915_private_t *dev_priv = dev->dev_private; 782 drm_i915_private_t *dev_priv = dev->dev_private;
821 u32 mask = ring->irq_enable;
822 783
823 spin_lock(&ring->irq_lock); 784 spin_lock(&ring->irq_lock);
824 if (--ring->irq_refcount == 0) { 785 if (--ring->irq_refcount == 0) {
825 ring->irq_mask |= mask; 786 I915_WRITE_IMR(ring, ~0);
826 I915_WRITE_IMR(ring, ring->irq_mask); 787 dev_priv->gt_irq_mask |= ring->irq_enable_mask;
827 ironlake_disable_irq(dev_priv, mask); 788 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
789 POSTING_READ(GTIMR);
828 } 790 }
829 spin_unlock(&ring->irq_lock); 791 spin_unlock(&ring->irq_lock);
830 792
831 gen6_gt_force_wake_put(dev_priv); 793 gen6_gt_force_wake_put(dev_priv);
832} 794}
833 795
834static bool 796static int
835bsd_ring_get_irq(struct intel_ring_buffer *ring) 797i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
836{ 798{
837 struct drm_device *dev = ring->dev; 799 int ret;
838 drm_i915_private_t *dev_priv = dev->dev_private;
839
840 if (!dev->irq_enabled)
841 return false;
842 800
843 spin_lock(&ring->irq_lock); 801 ret = intel_ring_begin(ring, 2);
844 if (ring->irq_refcount++ == 0) { 802 if (ret)
845 if (IS_G4X(dev)) 803 return ret;
846 i915_enable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
847 else
848 ironlake_enable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
849 }
850 spin_unlock(&ring->irq_lock);
851 804
852 return true; 805 intel_ring_emit(ring,
853} 806 MI_BATCH_BUFFER_START |
854static void 807 MI_BATCH_GTT |
855bsd_ring_put_irq(struct intel_ring_buffer *ring) 808 MI_BATCH_NON_SECURE_I965);
856{ 809 intel_ring_emit(ring, offset);
857 struct drm_device *dev = ring->dev; 810 intel_ring_advance(ring);
858 drm_i915_private_t *dev_priv = dev->dev_private;
859 811
860 spin_lock(&ring->irq_lock); 812 return 0;
861 if (--ring->irq_refcount == 0) {
862 if (IS_G4X(dev))
863 i915_disable_irq(dev_priv, I915_BSD_USER_INTERRUPT);
864 else
865 ironlake_disable_irq(dev_priv, GT_BSD_USER_INTERRUPT);
866 }
867 spin_unlock(&ring->irq_lock);
868} 813}
869 814
870static int 815static int
871ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length) 816i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
817 u32 offset, u32 len)
872{ 818{
873 int ret; 819 int ret;
874 820
875 ret = intel_ring_begin(ring, 2); 821 ret = intel_ring_begin(ring, 4);
876 if (ret) 822 if (ret)
877 return ret; 823 return ret;
878 824
879 intel_ring_emit(ring, 825 intel_ring_emit(ring, MI_BATCH_BUFFER);
880 MI_BATCH_BUFFER_START | (2 << 6) | 826 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
881 MI_BATCH_NON_SECURE_I965); 827 intel_ring_emit(ring, offset + len - 8);
882 intel_ring_emit(ring, offset); 828 intel_ring_emit(ring, 0);
883 intel_ring_advance(ring); 829 intel_ring_advance(ring);
884 830
885 return 0; 831 return 0;
886} 832}
887 833
888static int 834static int
889render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, 835i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
890 u32 offset, u32 len) 836 u32 offset, u32 len)
891{ 837{
892 struct drm_device *dev = ring->dev;
893 int ret; 838 int ret;
894 839
895 if (IS_I830(dev) || IS_845G(dev)) { 840 ret = intel_ring_begin(ring, 2);
896 ret = intel_ring_begin(ring, 4); 841 if (ret)
897 if (ret) 842 return ret;
898 return ret;
899
900 intel_ring_emit(ring, MI_BATCH_BUFFER);
901 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
902 intel_ring_emit(ring, offset + len - 8);
903 intel_ring_emit(ring, 0);
904 } else {
905 ret = intel_ring_begin(ring, 2);
906 if (ret)
907 return ret;
908 843
909 if (INTEL_INFO(dev)->gen >= 4) { 844 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
910 intel_ring_emit(ring, 845 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
911 MI_BATCH_BUFFER_START | (2 << 6) |
912 MI_BATCH_NON_SECURE_I965);
913 intel_ring_emit(ring, offset);
914 } else {
915 intel_ring_emit(ring,
916 MI_BATCH_BUFFER_START | (2 << 6));
917 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
918 }
919 }
920 intel_ring_advance(ring); 846 intel_ring_advance(ring);
921 847
922 return 0; 848 return 0;
@@ -983,8 +909,8 @@ err:
983 return ret; 909 return ret;
984} 910}
985 911
986int intel_init_ring_buffer(struct drm_device *dev, 912static int intel_init_ring_buffer(struct drm_device *dev,
987 struct intel_ring_buffer *ring) 913 struct intel_ring_buffer *ring)
988{ 914{
989 struct drm_i915_gem_object *obj; 915 struct drm_i915_gem_object *obj;
990 int ret; 916 int ret;
@@ -993,10 +919,10 @@ int intel_init_ring_buffer(struct drm_device *dev,
993 INIT_LIST_HEAD(&ring->active_list); 919 INIT_LIST_HEAD(&ring->active_list);
994 INIT_LIST_HEAD(&ring->request_list); 920 INIT_LIST_HEAD(&ring->request_list);
995 INIT_LIST_HEAD(&ring->gpu_write_list); 921 INIT_LIST_HEAD(&ring->gpu_write_list);
922 ring->size = 32 * PAGE_SIZE;
996 923
997 init_waitqueue_head(&ring->irq_queue); 924 init_waitqueue_head(&ring->irq_queue);
998 spin_lock_init(&ring->irq_lock); 925 spin_lock_init(&ring->irq_lock);
999 ring->irq_mask = ~0;
1000 926
1001 if (I915_NEED_GFX_HWS(dev)) { 927 if (I915_NEED_GFX_HWS(dev)) {
1002 ret = init_status_page(ring); 928 ret = init_status_page(ring);
@@ -1040,7 +966,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
1040 * of the buffer. 966 * of the buffer.
1041 */ 967 */
1042 ring->effective_size = ring->size; 968 ring->effective_size = ring->size;
1043 if (IS_I830(ring->dev)) 969 if (IS_I830(ring->dev) || IS_845G(ring->dev))
1044 ring->effective_size -= 128; 970 ring->effective_size -= 128;
1045 971
1046 return 0; 972 return 0;
@@ -1263,44 +1189,6 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
1263 ring->write_tail(ring, ring->tail); 1189 ring->write_tail(ring, ring->tail);
1264} 1190}
1265 1191
1266static const struct intel_ring_buffer render_ring = {
1267 .name = "render ring",
1268 .id = RCS,
1269 .mmio_base = RENDER_RING_BASE,
1270 .size = 32 * PAGE_SIZE,
1271 .init = init_render_ring,
1272 .write_tail = ring_write_tail,
1273 .flush = render_ring_flush,
1274 .add_request = render_ring_add_request,
1275 .get_seqno = ring_get_seqno,
1276 .irq_get = render_ring_get_irq,
1277 .irq_put = render_ring_put_irq,
1278 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
1279 .cleanup = render_ring_cleanup,
1280 .sync_to = render_ring_sync_to,
1281 .semaphore_register = {MI_SEMAPHORE_SYNC_INVALID,
1282 MI_SEMAPHORE_SYNC_RV,
1283 MI_SEMAPHORE_SYNC_RB},
1284 .signal_mbox = {GEN6_VRSYNC, GEN6_BRSYNC},
1285};
1286
1287/* ring buffer for bit-stream decoder */
1288
1289static const struct intel_ring_buffer bsd_ring = {
1290 .name = "bsd ring",
1291 .id = VCS,
1292 .mmio_base = BSD_RING_BASE,
1293 .size = 32 * PAGE_SIZE,
1294 .init = init_ring_common,
1295 .write_tail = ring_write_tail,
1296 .flush = bsd_ring_flush,
1297 .add_request = ring_add_request,
1298 .get_seqno = ring_get_seqno,
1299 .irq_get = bsd_ring_get_irq,
1300 .irq_put = bsd_ring_put_irq,
1301 .dispatch_execbuffer = ring_dispatch_execbuffer,
1302};
1303
1304 1192
1305static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, 1193static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1306 u32 value) 1194 u32 value)
@@ -1363,28 +1251,6 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
1363 return 0; 1251 return 0;
1364} 1252}
1365 1253
1366/* ring buffer for Video Codec for Gen6+ */
1367static const struct intel_ring_buffer gen6_bsd_ring = {
1368 .name = "gen6 bsd ring",
1369 .id = VCS,
1370 .mmio_base = GEN6_BSD_RING_BASE,
1371 .size = 32 * PAGE_SIZE,
1372 .init = init_ring_common,
1373 .write_tail = gen6_bsd_ring_write_tail,
1374 .flush = gen6_ring_flush,
1375 .add_request = gen6_add_request,
1376 .get_seqno = gen6_ring_get_seqno,
1377 .irq_enable = GEN6_BSD_USER_INTERRUPT,
1378 .irq_get = gen6_ring_get_irq,
1379 .irq_put = gen6_ring_put_irq,
1380 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1381 .sync_to = gen6_bsd_ring_sync_to,
1382 .semaphore_register = {MI_SEMAPHORE_SYNC_VR,
1383 MI_SEMAPHORE_SYNC_INVALID,
1384 MI_SEMAPHORE_SYNC_VB},
1385 .signal_mbox = {GEN6_RVSYNC, GEN6_BVSYNC},
1386};
1387
1388/* Blitter support (SandyBridge+) */ 1254/* Blitter support (SandyBridge+) */
1389 1255
1390static int blt_ring_flush(struct intel_ring_buffer *ring, 1256static int blt_ring_flush(struct intel_ring_buffer *ring,
@@ -1408,44 +1274,58 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
1408 return 0; 1274 return 0;
1409} 1275}
1410 1276
1411static const struct intel_ring_buffer gen6_blt_ring = {
1412 .name = "blt ring",
1413 .id = BCS,
1414 .mmio_base = BLT_RING_BASE,
1415 .size = 32 * PAGE_SIZE,
1416 .init = init_ring_common,
1417 .write_tail = ring_write_tail,
1418 .flush = blt_ring_flush,
1419 .add_request = gen6_add_request,
1420 .get_seqno = gen6_ring_get_seqno,
1421 .irq_get = gen6_ring_get_irq,
1422 .irq_put = gen6_ring_put_irq,
1423 .irq_enable = GEN6_BLITTER_USER_INTERRUPT,
1424 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
1425 .sync_to = gen6_blt_ring_sync_to,
1426 .semaphore_register = {MI_SEMAPHORE_SYNC_BR,
1427 MI_SEMAPHORE_SYNC_BV,
1428 MI_SEMAPHORE_SYNC_INVALID},
1429 .signal_mbox = {GEN6_RBSYNC, GEN6_VBSYNC},
1430};
1431
1432int intel_init_render_ring_buffer(struct drm_device *dev) 1277int intel_init_render_ring_buffer(struct drm_device *dev)
1433{ 1278{
1434 drm_i915_private_t *dev_priv = dev->dev_private; 1279 drm_i915_private_t *dev_priv = dev->dev_private;
1435 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1280 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1436 1281
1437 *ring = render_ring; 1282 ring->name = "render ring";
1283 ring->id = RCS;
1284 ring->mmio_base = RENDER_RING_BASE;
1285
1438 if (INTEL_INFO(dev)->gen >= 6) { 1286 if (INTEL_INFO(dev)->gen >= 6) {
1439 ring->add_request = gen6_add_request; 1287 ring->add_request = gen6_add_request;
1440 ring->flush = gen6_render_ring_flush; 1288 ring->flush = gen6_render_ring_flush;
1441 ring->irq_get = gen6_ring_get_irq; 1289 ring->irq_get = gen6_ring_get_irq;
1442 ring->irq_put = gen6_ring_put_irq; 1290 ring->irq_put = gen6_ring_put_irq;
1443 ring->irq_enable = GT_USER_INTERRUPT; 1291 ring->irq_enable_mask = GT_USER_INTERRUPT;
1444 ring->get_seqno = gen6_ring_get_seqno; 1292 ring->get_seqno = gen6_ring_get_seqno;
1293 ring->sync_to = gen6_ring_sync;
1294 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
1295 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
1296 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
1297 ring->signal_mbox[0] = GEN6_VRSYNC;
1298 ring->signal_mbox[1] = GEN6_BRSYNC;
1445 } else if (IS_GEN5(dev)) { 1299 } else if (IS_GEN5(dev)) {
1446 ring->add_request = pc_render_add_request; 1300 ring->add_request = pc_render_add_request;
1301 ring->flush = gen4_render_ring_flush;
1447 ring->get_seqno = pc_render_get_seqno; 1302 ring->get_seqno = pc_render_get_seqno;
1303 ring->irq_get = gen5_ring_get_irq;
1304 ring->irq_put = gen5_ring_put_irq;
1305 ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
1306 } else {
1307 ring->add_request = i9xx_add_request;
1308 if (INTEL_INFO(dev)->gen < 4)
1309 ring->flush = gen2_render_ring_flush;
1310 else
1311 ring->flush = gen4_render_ring_flush;
1312 ring->get_seqno = ring_get_seqno;
1313 ring->irq_get = i9xx_ring_get_irq;
1314 ring->irq_put = i9xx_ring_put_irq;
1315 ring->irq_enable_mask = I915_USER_INTERRUPT;
1448 } 1316 }
1317 ring->write_tail = ring_write_tail;
1318 if (INTEL_INFO(dev)->gen >= 6)
1319 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1320 else if (INTEL_INFO(dev)->gen >= 4)
1321 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1322 else if (IS_I830(dev) || IS_845G(dev))
1323 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1324 else
1325 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1326 ring->init = init_render_ring;
1327 ring->cleanup = render_ring_cleanup;
1328
1449 1329
1450 if (!I915_NEED_GFX_HWS(dev)) { 1330 if (!I915_NEED_GFX_HWS(dev)) {
1451 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1331 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
@@ -1460,17 +1340,37 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
1460 drm_i915_private_t *dev_priv = dev->dev_private; 1340 drm_i915_private_t *dev_priv = dev->dev_private;
1461 struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; 1341 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
1462 1342
1463 *ring = render_ring; 1343 ring->name = "render ring";
1344 ring->id = RCS;
1345 ring->mmio_base = RENDER_RING_BASE;
1346
1464 if (INTEL_INFO(dev)->gen >= 6) { 1347 if (INTEL_INFO(dev)->gen >= 6) {
1465 ring->add_request = gen6_add_request; 1348 /* non-kms not supported on gen6+ */
1466 ring->irq_get = gen6_ring_get_irq; 1349 return -ENODEV;
1467 ring->irq_put = gen6_ring_put_irq;
1468 ring->irq_enable = GT_USER_INTERRUPT;
1469 } else if (IS_GEN5(dev)) {
1470 ring->add_request = pc_render_add_request;
1471 ring->get_seqno = pc_render_get_seqno;
1472 } 1350 }
1473 1351
1352 /* Note: gem is not supported on gen5/ilk without kms (the corresponding
1353 * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
1354 * the special gen5 functions. */
1355 ring->add_request = i9xx_add_request;
1356 if (INTEL_INFO(dev)->gen < 4)
1357 ring->flush = gen2_render_ring_flush;
1358 else
1359 ring->flush = gen4_render_ring_flush;
1360 ring->get_seqno = ring_get_seqno;
1361 ring->irq_get = i9xx_ring_get_irq;
1362 ring->irq_put = i9xx_ring_put_irq;
1363 ring->irq_enable_mask = I915_USER_INTERRUPT;
1364 ring->write_tail = ring_write_tail;
1365 if (INTEL_INFO(dev)->gen >= 4)
1366 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1367 else if (IS_I830(dev) || IS_845G(dev))
1368 ring->dispatch_execbuffer = i830_dispatch_execbuffer;
1369 else
1370 ring->dispatch_execbuffer = i915_dispatch_execbuffer;
1371 ring->init = init_render_ring;
1372 ring->cleanup = render_ring_cleanup;
1373
1474 if (!I915_NEED_GFX_HWS(dev)) 1374 if (!I915_NEED_GFX_HWS(dev))
1475 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr; 1375 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1476 1376
@@ -1506,10 +1406,46 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)
1506 drm_i915_private_t *dev_priv = dev->dev_private; 1406 drm_i915_private_t *dev_priv = dev->dev_private;
1507 struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; 1407 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1508 1408
1509 if (IS_GEN6(dev) || IS_GEN7(dev)) 1409 ring->name = "bsd ring";
1510 *ring = gen6_bsd_ring; 1410 ring->id = VCS;
1511 else 1411
1512 *ring = bsd_ring; 1412 ring->write_tail = ring_write_tail;
1413 if (IS_GEN6(dev) || IS_GEN7(dev)) {
1414 ring->mmio_base = GEN6_BSD_RING_BASE;
1415 /* gen6 bsd needs a special wa for tail updates */
1416 if (IS_GEN6(dev))
1417 ring->write_tail = gen6_bsd_ring_write_tail;
1418 ring->flush = gen6_ring_flush;
1419 ring->add_request = gen6_add_request;
1420 ring->get_seqno = gen6_ring_get_seqno;
1421 ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
1422 ring->irq_get = gen6_ring_get_irq;
1423 ring->irq_put = gen6_ring_put_irq;
1424 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1425 ring->sync_to = gen6_ring_sync;
1426 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
1427 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
1428 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
1429 ring->signal_mbox[0] = GEN6_RVSYNC;
1430 ring->signal_mbox[1] = GEN6_BVSYNC;
1431 } else {
1432 ring->mmio_base = BSD_RING_BASE;
1433 ring->flush = bsd_ring_flush;
1434 ring->add_request = i9xx_add_request;
1435 ring->get_seqno = ring_get_seqno;
1436 if (IS_GEN5(dev)) {
1437 ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
1438 ring->irq_get = gen5_ring_get_irq;
1439 ring->irq_put = gen5_ring_put_irq;
1440 } else {
1441 ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
1442 ring->irq_get = i9xx_ring_get_irq;
1443 ring->irq_put = i9xx_ring_put_irq;
1444 }
1445 ring->dispatch_execbuffer = i965_dispatch_execbuffer;
1446 }
1447 ring->init = init_ring_common;
1448
1513 1449
1514 return intel_init_ring_buffer(dev, ring); 1450 return intel_init_ring_buffer(dev, ring);
1515} 1451}
@@ -1519,7 +1455,25 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
1519 drm_i915_private_t *dev_priv = dev->dev_private; 1455 drm_i915_private_t *dev_priv = dev->dev_private;
1520 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 1456 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1521 1457
1522 *ring = gen6_blt_ring; 1458 ring->name = "blitter ring";
1459 ring->id = BCS;
1460
1461 ring->mmio_base = BLT_RING_BASE;
1462 ring->write_tail = ring_write_tail;
1463 ring->flush = blt_ring_flush;
1464 ring->add_request = gen6_add_request;
1465 ring->get_seqno = gen6_ring_get_seqno;
1466 ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
1467 ring->irq_get = gen6_ring_get_irq;
1468 ring->irq_put = gen6_ring_put_irq;
1469 ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
1470 ring->sync_to = gen6_ring_sync;
1471 ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
1472 ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
1473 ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
1474 ring->signal_mbox[0] = GEN6_RBSYNC;
1475 ring->signal_mbox[1] = GEN6_VBSYNC;
1476 ring->init = init_ring_common;
1523 1477
1524 return intel_init_ring_buffer(dev, ring); 1478 return intel_init_ring_buffer(dev, ring);
1525} 1479}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3488a5a127db..06a66adf69c2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -58,8 +58,7 @@ struct intel_ring_buffer {
58 58
59 spinlock_t irq_lock; 59 spinlock_t irq_lock;
60 u32 irq_refcount; 60 u32 irq_refcount;
61 u32 irq_mask; 61 u32 irq_enable_mask; /* bitmask to enable ring interrupt */
62 u32 irq_enable; /* IRQs enabled for this ring */
63 u32 irq_seqno; /* last seq seem at irq time */ 62 u32 irq_seqno; /* last seq seem at irq time */
64 u32 trace_irq_seqno; 63 u32 trace_irq_seqno;
65 u32 waiting_seqno; 64 u32 waiting_seqno;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6898145b44ce..c330efd59a0e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -443,9 +443,17 @@ static const char *cmd_status_names[] = {
443static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd, 443static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
444 const void *args, int args_len) 444 const void *args, int args_len)
445{ 445{
446 u8 buf[args_len*2 + 2], status; 446 u8 *buf, status;
447 struct i2c_msg msgs[args_len + 3]; 447 struct i2c_msg *msgs;
448 int i, ret; 448 int i, ret = true;
449
450 buf = (u8 *)kzalloc(args_len * 2 + 2, GFP_KERNEL);
451 if (!buf)
452 return false;
453
454 msgs = kcalloc(args_len + 3, sizeof(*msgs), GFP_KERNEL);
455 if (!msgs)
456 return false;
449 457
450 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len); 458 intel_sdvo_debug_write(intel_sdvo, cmd, args, args_len);
451 459
@@ -479,15 +487,19 @@ static bool intel_sdvo_write_cmd(struct intel_sdvo *intel_sdvo, u8 cmd,
479 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3); 487 ret = i2c_transfer(intel_sdvo->i2c, msgs, i+3);
480 if (ret < 0) { 488 if (ret < 0) {
481 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret); 489 DRM_DEBUG_KMS("I2c transfer returned %d\n", ret);
482 return false; 490 ret = false;
491 goto out;
483 } 492 }
484 if (ret != i+3) { 493 if (ret != i+3) {
485 /* failure in I2C transfer */ 494 /* failure in I2C transfer */
486 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3); 495 DRM_DEBUG_KMS("I2c transfer returned %d/%d\n", ret, i+3);
487 return false; 496 ret = false;
488 } 497 }
489 498
490 return true; 499out:
500 kfree(msgs);
501 kfree(buf);
502 return ret;
491} 503}
492 504
493static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo, 505static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
@@ -1258,7 +1270,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
1258 dev_priv->crt_ddc_pin)); 1270 dev_priv->crt_ddc_pin));
1259} 1271}
1260 1272
1261enum drm_connector_status 1273static enum drm_connector_status
1262intel_sdvo_tmds_sink_detect(struct drm_connector *connector) 1274intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
1263{ 1275{
1264 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); 1276 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index a464771a7240..fbf03b996587 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -95,7 +95,6 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
95 /* must disable */ 95 /* must disable */
96 sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 96 sprctl |= SPRITE_TRICKLE_FEED_DISABLE;
97 sprctl |= SPRITE_ENABLE; 97 sprctl |= SPRITE_ENABLE;
98 sprctl |= SPRITE_DEST_KEY;
99 98
100 /* Sizes are 0 based */ 99 /* Sizes are 0 based */
101 src_w--; 100 src_w--;
@@ -112,13 +111,13 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
112 */ 111 */
113 if (crtc_w != src_w || crtc_h != src_h) { 112 if (crtc_w != src_w || crtc_h != src_h) {
114 dev_priv->sprite_scaling_enabled = true; 113 dev_priv->sprite_scaling_enabled = true;
115 sandybridge_update_wm(dev); 114 intel_update_watermarks(dev);
116 intel_wait_for_vblank(dev, pipe); 115 intel_wait_for_vblank(dev, pipe);
117 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h; 116 sprscale = SPRITE_SCALE_ENABLE | (src_w << 16) | src_h;
118 } else { 117 } else {
119 dev_priv->sprite_scaling_enabled = false; 118 dev_priv->sprite_scaling_enabled = false;
120 /* potentially re-enable LP watermarks */ 119 /* potentially re-enable LP watermarks */
121 sandybridge_update_wm(dev); 120 intel_update_watermarks(dev);
122 } 121 }
123 122
124 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]); 123 I915_WRITE(SPRSTRIDE(pipe), fb->pitches[0]);
@@ -134,7 +133,7 @@ ivb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
134 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w); 133 I915_WRITE(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
135 I915_WRITE(SPRSCALE(pipe), sprscale); 134 I915_WRITE(SPRSCALE(pipe), sprscale);
136 I915_WRITE(SPRCTL(pipe), sprctl); 135 I915_WRITE(SPRCTL(pipe), sprctl);
137 I915_WRITE(SPRSURF(pipe), obj->gtt_offset); 136 I915_MODIFY_DISPBASE(SPRSURF(pipe), obj->gtt_offset);
138 POSTING_READ(SPRSURF(pipe)); 137 POSTING_READ(SPRSURF(pipe));
139} 138}
140 139
@@ -150,7 +149,7 @@ ivb_disable_plane(struct drm_plane *plane)
150 /* Can't leave the scaler enabled... */ 149 /* Can't leave the scaler enabled... */
151 I915_WRITE(SPRSCALE(pipe), 0); 150 I915_WRITE(SPRSCALE(pipe), 0);
152 /* Activate double buffered register update */ 151 /* Activate double buffered register update */
153 I915_WRITE(SPRSURF(pipe), 0); 152 I915_MODIFY_DISPBASE(SPRSURF(pipe), 0);
154 POSTING_READ(SPRSURF(pipe)); 153 POSTING_READ(SPRSURF(pipe));
155} 154}
156 155
@@ -209,7 +208,7 @@ ivb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
209} 208}
210 209
211static void 210static void
212snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb, 211ilk_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
213 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y, 212 struct drm_i915_gem_object *obj, int crtc_x, int crtc_y,
214 unsigned int crtc_w, unsigned int crtc_h, 213 unsigned int crtc_w, unsigned int crtc_h,
215 uint32_t x, uint32_t y, 214 uint32_t x, uint32_t y,
@@ -219,7 +218,7 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
219 struct drm_i915_private *dev_priv = dev->dev_private; 218 struct drm_i915_private *dev_priv = dev->dev_private;
220 struct intel_plane *intel_plane = to_intel_plane(plane); 219 struct intel_plane *intel_plane = to_intel_plane(plane);
221 int pipe = intel_plane->pipe, pixel_size; 220 int pipe = intel_plane->pipe, pixel_size;
222 u32 dvscntr, dvsscale = 0; 221 u32 dvscntr, dvsscale;
223 222
224 dvscntr = I915_READ(DVSCNTR(pipe)); 223 dvscntr = I915_READ(DVSCNTR(pipe));
225 224
@@ -263,8 +262,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
263 if (obj->tiling_mode != I915_TILING_NONE) 262 if (obj->tiling_mode != I915_TILING_NONE)
264 dvscntr |= DVS_TILED; 263 dvscntr |= DVS_TILED;
265 264
266 /* must disable */ 265 if (IS_GEN6(dev))
267 dvscntr |= DVS_TRICKLE_FEED_DISABLE; 266 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */
268 dvscntr |= DVS_ENABLE; 267 dvscntr |= DVS_ENABLE;
269 268
270 /* Sizes are 0 based */ 269 /* Sizes are 0 based */
@@ -275,7 +274,8 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
275 274
276 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size); 275 intel_update_sprite_watermarks(dev, pipe, crtc_w, pixel_size);
277 276
278 if (crtc_w != src_w || crtc_h != src_h) 277 dvsscale = 0;
278 if (IS_GEN5(dev) || crtc_w != src_w || crtc_h != src_h)
279 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h; 279 dvsscale = DVS_SCALE_ENABLE | (src_w << 16) | src_h;
280 280
281 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]); 281 I915_WRITE(DVSSTRIDE(pipe), fb->pitches[0]);
@@ -291,12 +291,12 @@ snb_update_plane(struct drm_plane *plane, struct drm_framebuffer *fb,
291 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w); 291 I915_WRITE(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
292 I915_WRITE(DVSSCALE(pipe), dvsscale); 292 I915_WRITE(DVSSCALE(pipe), dvsscale);
293 I915_WRITE(DVSCNTR(pipe), dvscntr); 293 I915_WRITE(DVSCNTR(pipe), dvscntr);
294 I915_WRITE(DVSSURF(pipe), obj->gtt_offset); 294 I915_MODIFY_DISPBASE(DVSSURF(pipe), obj->gtt_offset);
295 POSTING_READ(DVSSURF(pipe)); 295 POSTING_READ(DVSSURF(pipe));
296} 296}
297 297
298static void 298static void
299snb_disable_plane(struct drm_plane *plane) 299ilk_disable_plane(struct drm_plane *plane)
300{ 300{
301 struct drm_device *dev = plane->dev; 301 struct drm_device *dev = plane->dev;
302 struct drm_i915_private *dev_priv = dev->dev_private; 302 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -307,7 +307,7 @@ snb_disable_plane(struct drm_plane *plane)
307 /* Disable the scaler */ 307 /* Disable the scaler */
308 I915_WRITE(DVSSCALE(pipe), 0); 308 I915_WRITE(DVSSCALE(pipe), 0);
309 /* Flush double buffered register updates */ 309 /* Flush double buffered register updates */
310 I915_WRITE(DVSSURF(pipe), 0); 310 I915_MODIFY_DISPBASE(DVSSURF(pipe), 0);
311 POSTING_READ(DVSSURF(pipe)); 311 POSTING_READ(DVSSURF(pipe));
312} 312}
313 313
@@ -334,7 +334,7 @@ intel_disable_primary(struct drm_crtc *crtc)
334} 334}
335 335
336static int 336static int
337snb_update_colorkey(struct drm_plane *plane, 337ilk_update_colorkey(struct drm_plane *plane,
338 struct drm_intel_sprite_colorkey *key) 338 struct drm_intel_sprite_colorkey *key)
339{ 339{
340 struct drm_device *dev = plane->dev; 340 struct drm_device *dev = plane->dev;
@@ -363,7 +363,7 @@ snb_update_colorkey(struct drm_plane *plane,
363} 363}
364 364
365static void 365static void
366snb_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key) 366ilk_get_colorkey(struct drm_plane *plane, struct drm_intel_sprite_colorkey *key)
367{ 367{
368 struct drm_device *dev = plane->dev; 368 struct drm_device *dev = plane->dev;
369 struct drm_i915_private *dev_priv = dev->dev_private; 369 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -617,6 +617,14 @@ static const struct drm_plane_funcs intel_plane_funcs = {
617 .destroy = intel_destroy_plane, 617 .destroy = intel_destroy_plane,
618}; 618};
619 619
620static uint32_t ilk_plane_formats[] = {
621 DRM_FORMAT_XRGB8888,
622 DRM_FORMAT_YUYV,
623 DRM_FORMAT_YVYU,
624 DRM_FORMAT_UYVY,
625 DRM_FORMAT_VYUY,
626};
627
620static uint32_t snb_plane_formats[] = { 628static uint32_t snb_plane_formats[] = {
621 DRM_FORMAT_XBGR8888, 629 DRM_FORMAT_XBGR8888,
622 DRM_FORMAT_XRGB8888, 630 DRM_FORMAT_XRGB8888,
@@ -631,34 +639,56 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
631{ 639{
632 struct intel_plane *intel_plane; 640 struct intel_plane *intel_plane;
633 unsigned long possible_crtcs; 641 unsigned long possible_crtcs;
642 const uint32_t *plane_formats;
643 int num_plane_formats;
634 int ret; 644 int ret;
635 645
636 if (!(IS_GEN6(dev) || IS_GEN7(dev))) 646 if (INTEL_INFO(dev)->gen < 5)
637 return -ENODEV; 647 return -ENODEV;
638 648
639 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL); 649 intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
640 if (!intel_plane) 650 if (!intel_plane)
641 return -ENOMEM; 651 return -ENOMEM;
642 652
643 if (IS_GEN6(dev)) { 653 switch (INTEL_INFO(dev)->gen) {
654 case 5:
655 case 6:
644 intel_plane->max_downscale = 16; 656 intel_plane->max_downscale = 16;
645 intel_plane->update_plane = snb_update_plane; 657 intel_plane->update_plane = ilk_update_plane;
646 intel_plane->disable_plane = snb_disable_plane; 658 intel_plane->disable_plane = ilk_disable_plane;
647 intel_plane->update_colorkey = snb_update_colorkey; 659 intel_plane->update_colorkey = ilk_update_colorkey;
648 intel_plane->get_colorkey = snb_get_colorkey; 660 intel_plane->get_colorkey = ilk_get_colorkey;
649 } else if (IS_GEN7(dev)) { 661
662 if (IS_GEN6(dev)) {
663 plane_formats = snb_plane_formats;
664 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
665 } else {
666 plane_formats = ilk_plane_formats;
667 num_plane_formats = ARRAY_SIZE(ilk_plane_formats);
668 }
669 break;
670
671 case 7:
650 intel_plane->max_downscale = 2; 672 intel_plane->max_downscale = 2;
651 intel_plane->update_plane = ivb_update_plane; 673 intel_plane->update_plane = ivb_update_plane;
652 intel_plane->disable_plane = ivb_disable_plane; 674 intel_plane->disable_plane = ivb_disable_plane;
653 intel_plane->update_colorkey = ivb_update_colorkey; 675 intel_plane->update_colorkey = ivb_update_colorkey;
654 intel_plane->get_colorkey = ivb_get_colorkey; 676 intel_plane->get_colorkey = ivb_get_colorkey;
677
678 plane_formats = snb_plane_formats;
679 num_plane_formats = ARRAY_SIZE(snb_plane_formats);
680 break;
681
682 default:
683 return -ENODEV;
655 } 684 }
656 685
657 intel_plane->pipe = pipe; 686 intel_plane->pipe = pipe;
658 possible_crtcs = (1 << pipe); 687 possible_crtcs = (1 << pipe);
659 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs, 688 ret = drm_plane_init(dev, &intel_plane->base, possible_crtcs,
660 &intel_plane_funcs, snb_plane_formats, 689 &intel_plane_funcs,
661 ARRAY_SIZE(snb_plane_formats), false); 690 plane_formats, num_plane_formats,
691 false);
662 if (ret) 692 if (ret)
663 kfree(intel_plane); 693 kfree(intel_plane);
664 694
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index ca12c709f3eb..67f444d632fb 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -811,7 +811,7 @@ intel_tv_mode_lookup(const char *tv_format)
811{ 811{
812 int i; 812 int i;
813 813
814 for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) { 814 for (i = 0; i < ARRAY_SIZE(tv_modes); i++) {
815 const struct tv_mode *tv_mode = &tv_modes[i]; 815 const struct tv_mode *tv_mode = &tv_modes[i];
816 816
817 if (!strcmp(tv_format, tv_mode->name)) 817 if (!strcmp(tv_format, tv_mode->name))
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 06b209b2e229..b92a694caa0d 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -230,6 +230,10 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
230 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 230 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
231 return; 231 return;
232 232
233 /* some R4xx chips have the wrong frev */
234 if (rdev->family <= CHIP_RV410)
235 frev = 1;
236
233 switch (frev) { 237 switch (frev) {
234 case 1: 238 case 1:
235 switch (crev) { 239 switch (crev) {
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index e11df778e194..cb1141854282 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -2557,7 +2557,7 @@ static void r100_pll_errata_after_data(struct radeon_device *rdev)
2557 * or the chip could hang on a subsequent access 2557 * or the chip could hang on a subsequent access
2558 */ 2558 */
2559 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) { 2559 if (rdev->pll_errata & CHIP_ERRATA_PLL_DELAY) {
2560 udelay(5000); 2560 mdelay(5);
2561 } 2561 }
2562 2562
2563 /* This function is required to workaround a hardware bug in some (all?) 2563 /* This function is required to workaround a hardware bug in some (all?)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 8f84bd67ce7f..222245d0138a 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2839,7 +2839,7 @@ void r600_rlc_stop(struct radeon_device *rdev)
2839 /* r7xx asics need to soft reset RLC before halting */ 2839 /* r7xx asics need to soft reset RLC before halting */
2840 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC); 2840 WREG32(SRBM_SOFT_RESET, SOFT_RESET_RLC);
2841 RREG32(SRBM_SOFT_RESET); 2841 RREG32(SRBM_SOFT_RESET);
2842 udelay(15000); 2842 mdelay(15);
2843 WREG32(SRBM_SOFT_RESET, 0); 2843 WREG32(SRBM_SOFT_RESET, 0);
2844 RREG32(SRBM_SOFT_RESET); 2844 RREG32(SRBM_SOFT_RESET);
2845 } 2845 }
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 84c546250955..75ed17c96115 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -407,7 +407,7 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
407 407
408 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 408 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
409 RADEON_READ(R600_GRBM_SOFT_RESET); 409 RADEON_READ(R600_GRBM_SOFT_RESET);
410 DRM_UDELAY(15000); 410 mdelay(15);
411 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); 411 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
412 412
413 fw_data = (const __be32 *)dev_priv->me_fw->data; 413 fw_data = (const __be32 *)dev_priv->me_fw->data;
@@ -500,7 +500,7 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
500 500
501 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 501 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
502 RADEON_READ(R600_GRBM_SOFT_RESET); 502 RADEON_READ(R600_GRBM_SOFT_RESET);
503 DRM_UDELAY(15000); 503 mdelay(15);
504 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); 504 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
505 505
506 fw_data = (const __be32 *)dev_priv->pfp_fw->data; 506 fw_data = (const __be32 *)dev_priv->pfp_fw->data;
@@ -1797,7 +1797,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1797 1797
1798 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 1798 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
1799 RADEON_READ(R600_GRBM_SOFT_RESET); 1799 RADEON_READ(R600_GRBM_SOFT_RESET);
1800 DRM_UDELAY(15000); 1800 mdelay(15);
1801 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0); 1801 RADEON_WRITE(R600_GRBM_SOFT_RESET, 0);
1802 1802
1803 1803
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 6ae0c75f016a..9c6b29a41927 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -633,7 +633,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
633 tmp &= ~(R300_SCLK_FORCE_VAP); 633 tmp &= ~(R300_SCLK_FORCE_VAP);
634 tmp |= RADEON_SCLK_FORCE_CP; 634 tmp |= RADEON_SCLK_FORCE_CP;
635 WREG32_PLL(RADEON_SCLK_CNTL, tmp); 635 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
636 udelay(15000); 636 mdelay(15);
637 637
638 tmp = RREG32_PLL(R300_SCLK_CNTL2); 638 tmp = RREG32_PLL(R300_SCLK_CNTL2);
639 tmp &= ~(R300_SCLK_FORCE_TCL | 639 tmp &= ~(R300_SCLK_FORCE_TCL |
@@ -651,12 +651,12 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
651 tmp |= (RADEON_ENGIN_DYNCLK_MODE | 651 tmp |= (RADEON_ENGIN_DYNCLK_MODE |
652 (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT)); 652 (0x01 << RADEON_ACTIVE_HILO_LAT_SHIFT));
653 WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp); 653 WREG32_PLL(RADEON_CLK_PWRMGT_CNTL, tmp);
654 udelay(15000); 654 mdelay(15);
655 655
656 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL); 656 tmp = RREG32_PLL(RADEON_CLK_PIN_CNTL);
657 tmp |= RADEON_SCLK_DYN_START_CNTL; 657 tmp |= RADEON_SCLK_DYN_START_CNTL;
658 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp); 658 WREG32_PLL(RADEON_CLK_PIN_CNTL, tmp);
659 udelay(15000); 659 mdelay(15);
660 660
661 /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200 661 /* When DRI is enabled, setting DYN_STOP_LAT to zero can cause some R200
662 to lockup randomly, leave them as set by BIOS. 662 to lockup randomly, leave them as set by BIOS.
@@ -696,7 +696,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
696 tmp |= RADEON_SCLK_MORE_FORCEON; 696 tmp |= RADEON_SCLK_MORE_FORCEON;
697 } 697 }
698 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp); 698 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
699 udelay(15000); 699 mdelay(15);
700 } 700 }
701 701
702 /* RV200::A11 A12, RV250::A11 A12 */ 702 /* RV200::A11 A12, RV250::A11 A12 */
@@ -709,7 +709,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
709 tmp |= RADEON_TCL_BYPASS_DISABLE; 709 tmp |= RADEON_TCL_BYPASS_DISABLE;
710 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp); 710 WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
711 } 711 }
712 udelay(15000); 712 mdelay(15);
713 713
714 /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */ 714 /*enable dynamic mode for display clocks (PIXCLK and PIX2CLK) */
715 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL); 715 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
@@ -722,14 +722,14 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
722 RADEON_PIXCLK_TMDS_ALWAYS_ONb); 722 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
723 723
724 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 724 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
725 udelay(15000); 725 mdelay(15);
726 726
727 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL); 727 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
728 tmp |= (RADEON_PIXCLK_ALWAYS_ONb | 728 tmp |= (RADEON_PIXCLK_ALWAYS_ONb |
729 RADEON_PIXCLK_DAC_ALWAYS_ONb); 729 RADEON_PIXCLK_DAC_ALWAYS_ONb);
730 730
731 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp); 731 WREG32_PLL(RADEON_VCLK_ECP_CNTL, tmp);
732 udelay(15000); 732 mdelay(15);
733 } 733 }
734 } else { 734 } else {
735 /* Turn everything OFF (ForceON to everything) */ 735 /* Turn everything OFF (ForceON to everything) */
@@ -861,7 +861,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
861 } 861 }
862 WREG32_PLL(RADEON_SCLK_CNTL, tmp); 862 WREG32_PLL(RADEON_SCLK_CNTL, tmp);
863 863
864 udelay(16000); 864 mdelay(16);
865 865
866 if ((rdev->family == CHIP_R300) || 866 if ((rdev->family == CHIP_R300) ||
867 (rdev->family == CHIP_R350)) { 867 (rdev->family == CHIP_R350)) {
@@ -870,7 +870,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
870 R300_SCLK_FORCE_GA | 870 R300_SCLK_FORCE_GA |
871 R300_SCLK_FORCE_CBA); 871 R300_SCLK_FORCE_CBA);
872 WREG32_PLL(R300_SCLK_CNTL2, tmp); 872 WREG32_PLL(R300_SCLK_CNTL2, tmp);
873 udelay(16000); 873 mdelay(16);
874 } 874 }
875 875
876 if (rdev->flags & RADEON_IS_IGP) { 876 if (rdev->flags & RADEON_IS_IGP) {
@@ -878,7 +878,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
878 tmp &= ~(RADEON_FORCEON_MCLKA | 878 tmp &= ~(RADEON_FORCEON_MCLKA |
879 RADEON_FORCEON_YCLKA); 879 RADEON_FORCEON_YCLKA);
880 WREG32_PLL(RADEON_MCLK_CNTL, tmp); 880 WREG32_PLL(RADEON_MCLK_CNTL, tmp);
881 udelay(16000); 881 mdelay(16);
882 } 882 }
883 883
884 if ((rdev->family == CHIP_RV200) || 884 if ((rdev->family == CHIP_RV200) ||
@@ -887,7 +887,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
887 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL); 887 tmp = RREG32_PLL(RADEON_SCLK_MORE_CNTL);
888 tmp |= RADEON_SCLK_MORE_FORCEON; 888 tmp |= RADEON_SCLK_MORE_FORCEON;
889 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp); 889 WREG32_PLL(RADEON_SCLK_MORE_CNTL, tmp);
890 udelay(16000); 890 mdelay(16);
891 } 891 }
892 892
893 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL); 893 tmp = RREG32_PLL(RADEON_PIXCLKS_CNTL);
@@ -900,7 +900,7 @@ void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable)
900 RADEON_PIXCLK_TMDS_ALWAYS_ONb); 900 RADEON_PIXCLK_TMDS_ALWAYS_ONb);
901 901
902 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp); 902 WREG32_PLL(RADEON_PIXCLKS_CNTL, tmp);
903 udelay(16000); 903 mdelay(16);
904 904
905 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL); 905 tmp = RREG32_PLL(RADEON_VCLK_ECP_CNTL);
906 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb | 906 tmp &= ~(RADEON_PIXCLK_ALWAYS_ONb |
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 81fc100be7e1..2cad9fde92fc 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2845,7 +2845,7 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
2845 case 4: 2845 case 4:
2846 val = RBIOS16(index); 2846 val = RBIOS16(index);
2847 index += 2; 2847 index += 2;
2848 udelay(val * 1000); 2848 mdelay(val);
2849 break; 2849 break;
2850 case 6: 2850 case 6:
2851 slave_addr = id & 0xff; 2851 slave_addr = id & 0xff;
@@ -3044,7 +3044,7 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
3044 udelay(150); 3044 udelay(150);
3045 break; 3045 break;
3046 case 2: 3046 case 2:
3047 udelay(1000); 3047 mdelay(1);
3048 break; 3048 break;
3049 case 3: 3049 case 3:
3050 while (tmp--) { 3050 while (tmp--) {
@@ -3075,13 +3075,13 @@ static void combios_parse_pll_table(struct drm_device *dev, uint16_t offset)
3075 /*mclk_cntl |= 0x00001111;*//* ??? */ 3075 /*mclk_cntl |= 0x00001111;*//* ??? */
3076 WREG32_PLL(RADEON_MCLK_CNTL, 3076 WREG32_PLL(RADEON_MCLK_CNTL,
3077 mclk_cntl); 3077 mclk_cntl);
3078 udelay(10000); 3078 mdelay(10);
3079#endif 3079#endif
3080 WREG32_PLL 3080 WREG32_PLL
3081 (RADEON_CLK_PWRMGT_CNTL, 3081 (RADEON_CLK_PWRMGT_CNTL,
3082 tmp & 3082 tmp &
3083 ~RADEON_CG_NO1_DEBUG_0); 3083 ~RADEON_CG_NO1_DEBUG_0);
3084 udelay(10000); 3084 mdelay(10);
3085 } 3085 }
3086 break; 3086 break;
3087 default: 3087 default:
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 85bcfc8923a7..3edec1c198e3 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -900,6 +900,10 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
900 struct radeon_i2c_chan *i2c; 900 struct radeon_i2c_chan *i2c;
901 int ret; 901 int ret;
902 902
903 /* don't add the mm_i2c bus unless hw_i2c is enabled */
904 if (rec->mm_i2c && (radeon_hw_i2c == 0))
905 return NULL;
906
903 i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL); 907 i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
904 if (i2c == NULL) 908 if (i2c == NULL)
905 return NULL; 909 return NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 2f46e0c8df53..42db254f6bb0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -88,7 +88,7 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
88 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); 88 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
89 lvds_pll_cntl |= RADEON_LVDS_PLL_EN; 89 lvds_pll_cntl |= RADEON_LVDS_PLL_EN;
90 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl); 90 WREG32(RADEON_LVDS_PLL_CNTL, lvds_pll_cntl);
91 udelay(1000); 91 mdelay(1);
92 92
93 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL); 93 lvds_pll_cntl = RREG32(RADEON_LVDS_PLL_CNTL);
94 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET; 94 lvds_pll_cntl &= ~RADEON_LVDS_PLL_RESET;
@@ -101,7 +101,7 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
101 (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT)); 101 (backlight_level << RADEON_LVDS_BL_MOD_LEVEL_SHIFT));
102 if (is_mac) 102 if (is_mac)
103 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN; 103 lvds_gen_cntl |= RADEON_LVDS_BL_MOD_EN;
104 udelay(panel_pwr_delay * 1000); 104 mdelay(panel_pwr_delay);
105 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 105 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
106 break; 106 break;
107 case DRM_MODE_DPMS_STANDBY: 107 case DRM_MODE_DPMS_STANDBY:
@@ -118,10 +118,10 @@ static void radeon_legacy_lvds_update(struct drm_encoder *encoder, int mode)
118 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 118 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
119 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON); 119 lvds_gen_cntl &= ~(RADEON_LVDS_ON | RADEON_LVDS_BLON | RADEON_LVDS_EN | RADEON_LVDS_DIGON);
120 } 120 }
121 udelay(panel_pwr_delay * 1000); 121 mdelay(panel_pwr_delay);
122 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl); 122 WREG32(RADEON_LVDS_GEN_CNTL, lvds_gen_cntl);
123 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl); 123 WREG32_PLL(RADEON_PIXCLKS_CNTL, pixclks_cntl);
124 udelay(panel_pwr_delay * 1000); 124 mdelay(panel_pwr_delay);
125 break; 125 break;
126 } 126 }
127 127
@@ -656,7 +656,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
656 656
657 WREG32(RADEON_DAC_MACRO_CNTL, tmp); 657 WREG32(RADEON_DAC_MACRO_CNTL, tmp);
658 658
659 udelay(2000); 659 mdelay(2);
660 660
661 if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT) 661 if (RREG32(RADEON_DAC_CNTL) & RADEON_DAC_CMP_OUTPUT)
662 found = connector_status_connected; 662 found = connector_status_connected;
@@ -1499,7 +1499,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
1499 tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN; 1499 tmp = dac_cntl2 | RADEON_DAC2_DAC2_CLK_SEL | RADEON_DAC2_CMP_EN;
1500 WREG32(RADEON_DAC_CNTL2, tmp); 1500 WREG32(RADEON_DAC_CNTL2, tmp);
1501 1501
1502 udelay(10000); 1502 mdelay(10);
1503 1503
1504 if (ASIC_IS_R300(rdev)) { 1504 if (ASIC_IS_R300(rdev)) {
1505 if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B) 1505 if (RREG32(RADEON_DAC_CNTL2) & RADEON_DAC2_CMP_OUT_B)
diff --git a/drivers/gpu/drm/savage/savage_state.c b/drivers/gpu/drm/savage/savage_state.c
index 031aaaf79ac2..b6d8608375cd 100644
--- a/drivers/gpu/drm/savage/savage_state.c
+++ b/drivers/gpu/drm/savage/savage_state.c
@@ -988,7 +988,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
988 * for locking on FreeBSD. 988 * for locking on FreeBSD.
989 */ 989 */
990 if (cmdbuf->size) { 990 if (cmdbuf->size) {
991 kcmd_addr = kmalloc(cmdbuf->size * 8, GFP_KERNEL); 991 kcmd_addr = kmalloc_array(cmdbuf->size, 8, GFP_KERNEL);
992 if (kcmd_addr == NULL) 992 if (kcmd_addr == NULL)
993 return -ENOMEM; 993 return -ENOMEM;
994 994
@@ -1015,8 +1015,8 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_
1015 cmdbuf->vb_addr = kvb_addr; 1015 cmdbuf->vb_addr = kvb_addr;
1016 } 1016 }
1017 if (cmdbuf->nbox) { 1017 if (cmdbuf->nbox) {
1018 kbox_addr = kmalloc(cmdbuf->nbox * sizeof(struct drm_clip_rect), 1018 kbox_addr = kmalloc_array(cmdbuf->nbox, sizeof(struct drm_clip_rect),
1019 GFP_KERNEL); 1019 GFP_KERNEL);
1020 if (kbox_addr == NULL) { 1020 if (kbox_addr == NULL) {
1021 ret = -ENOMEM; 1021 ret = -ENOMEM;
1022 goto done; 1022 goto done;
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 145f13580ff0..9140236a0182 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -391,6 +391,7 @@ static ssize_t show_str(struct device *dev,
391 break; 391 break;
392 default: 392 default:
393 BUG(); 393 BUG();
394 val = "";
394 } 395 }
395 396
396 return sprintf(buf, "%s\n", val); 397 return sprintf(buf, "%s\n", val);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index be51037363c8..29b319db573e 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -710,13 +710,13 @@ static u16 pmbus_data2reg(struct pmbus_data *data,
710 * If a negative value is stored in any of the referenced registers, this value 710 * If a negative value is stored in any of the referenced registers, this value
711 * reflects an error code which will be returned. 711 * reflects an error code which will be returned.
712 */ 712 */
713static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val) 713static int pmbus_get_boolean(struct pmbus_data *data, int index)
714{ 714{
715 u8 s1 = (index >> 24) & 0xff; 715 u8 s1 = (index >> 24) & 0xff;
716 u8 s2 = (index >> 16) & 0xff; 716 u8 s2 = (index >> 16) & 0xff;
717 u8 reg = (index >> 8) & 0xff; 717 u8 reg = (index >> 8) & 0xff;
718 u8 mask = index & 0xff; 718 u8 mask = index & 0xff;
719 int status; 719 int ret, status;
720 u8 regval; 720 u8 regval;
721 721
722 status = data->status[reg]; 722 status = data->status[reg];
@@ -725,7 +725,7 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
725 725
726 regval = status & mask; 726 regval = status & mask;
727 if (!s1 && !s2) 727 if (!s1 && !s2)
728 *val = !!regval; 728 ret = !!regval;
729 else { 729 else {
730 long v1, v2; 730 long v1, v2;
731 struct pmbus_sensor *sensor1, *sensor2; 731 struct pmbus_sensor *sensor1, *sensor2;
@@ -739,9 +739,9 @@ static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
739 739
740 v1 = pmbus_reg2data(data, sensor1); 740 v1 = pmbus_reg2data(data, sensor1);
741 v2 = pmbus_reg2data(data, sensor2); 741 v2 = pmbus_reg2data(data, sensor2);
742 *val = !!(regval && v1 >= v2); 742 ret = !!(regval && v1 >= v2);
743 } 743 }
744 return 0; 744 return ret;
745} 745}
746 746
747static ssize_t pmbus_show_boolean(struct device *dev, 747static ssize_t pmbus_show_boolean(struct device *dev,
@@ -750,11 +750,10 @@ static ssize_t pmbus_show_boolean(struct device *dev,
750 struct sensor_device_attribute *attr = to_sensor_dev_attr(da); 750 struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
751 struct pmbus_data *data = pmbus_update_device(dev); 751 struct pmbus_data *data = pmbus_update_device(dev);
752 int val; 752 int val;
753 int err;
754 753
755 err = pmbus_get_boolean(data, attr->index, &val); 754 val = pmbus_get_boolean(data, attr->index);
756 if (err) 755 if (val < 0)
757 return err; 756 return val;
758 return snprintf(buf, PAGE_SIZE, "%d\n", val); 757 return snprintf(buf, PAGE_SIZE, "%d\n", val);
759} 758}
760 759
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index d3b778da3f86..c5f6be478bad 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -343,10 +343,11 @@ exit:
343 return err; 343 return err;
344} 344}
345 345
346static int __init smsc47b397_find(unsigned short *addr) 346static int __init smsc47b397_find(void)
347{ 347{
348 u8 id, rev; 348 u8 id, rev;
349 char *name; 349 char *name;
350 unsigned short addr;
350 351
351 superio_enter(); 352 superio_enter();
352 id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID); 353 id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
@@ -370,14 +371,14 @@ static int __init smsc47b397_find(unsigned short *addr)
370 rev = superio_inb(SUPERIO_REG_DEVREV); 371 rev = superio_inb(SUPERIO_REG_DEVREV);
371 372
372 superio_select(SUPERIO_REG_LD8); 373 superio_select(SUPERIO_REG_LD8);
373 *addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8) 374 addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
374 | superio_inb(SUPERIO_REG_BASE_LSB); 375 | superio_inb(SUPERIO_REG_BASE_LSB);
375 376
376 pr_info("found SMSC %s (base address 0x%04x, revision %u)\n", 377 pr_info("found SMSC %s (base address 0x%04x, revision %u)\n",
377 name, *addr, rev); 378 name, addr, rev);
378 379
379 superio_exit(); 380 superio_exit();
380 return 0; 381 return addr;
381} 382}
382 383
383static int __init smsc47b397_init(void) 384static int __init smsc47b397_init(void)
@@ -385,9 +386,10 @@ static int __init smsc47b397_init(void)
385 unsigned short address; 386 unsigned short address;
386 int ret; 387 int ret;
387 388
388 ret = smsc47b397_find(&address); 389 ret = smsc47b397_find();
389 if (ret) 390 if (ret < 0)
390 return ret; 391 return ret;
392 address = ret;
391 393
392 ret = platform_driver_register(&smsc47b397_driver); 394 ret = platform_driver_register(&smsc47b397_driver);
393 if (ret) 395 if (ret)
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index c590c1469793..b5aa38dd7ab9 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -491,10 +491,10 @@ static const struct attribute_group smsc47m1_group = {
491 .attrs = smsc47m1_attributes, 491 .attrs = smsc47m1_attributes,
492}; 492};
493 493
494static int __init smsc47m1_find(unsigned short *addr, 494static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
495 struct smsc47m1_sio_data *sio_data)
496{ 495{
497 u8 val; 496 u8 val;
497 unsigned short addr;
498 498
499 superio_enter(); 499 superio_enter();
500 val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID); 500 val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
@@ -546,9 +546,9 @@ static int __init smsc47m1_find(unsigned short *addr,
546 } 546 }
547 547
548 superio_select(); 548 superio_select();
549 *addr = (superio_inb(SUPERIO_REG_BASE) << 8) 549 addr = (superio_inb(SUPERIO_REG_BASE) << 8)
550 | superio_inb(SUPERIO_REG_BASE + 1); 550 | superio_inb(SUPERIO_REG_BASE + 1);
551 if (*addr == 0) { 551 if (addr == 0) {
552 pr_info("Device address not set, will not use\n"); 552 pr_info("Device address not set, will not use\n");
553 superio_exit(); 553 superio_exit();
554 return -ENODEV; 554 return -ENODEV;
@@ -565,7 +565,7 @@ static int __init smsc47m1_find(unsigned short *addr,
565 } 565 }
566 566
567 superio_exit(); 567 superio_exit();
568 return 0; 568 return addr;
569} 569}
570 570
571/* Restore device to its initial state */ 571/* Restore device to its initial state */
@@ -938,13 +938,15 @@ static int __init sm_smsc47m1_init(void)
938 unsigned short address; 938 unsigned short address;
939 struct smsc47m1_sio_data sio_data; 939 struct smsc47m1_sio_data sio_data;
940 940
941 if (smsc47m1_find(&address, &sio_data)) 941 err = smsc47m1_find(&sio_data);
942 return -ENODEV; 942 if (err < 0)
943 return err;
944 address = err;
943 945
944 /* Sets global pdev as a side effect */ 946 /* Sets global pdev as a side effect */
945 err = smsc47m1_device_add(address, &sio_data); 947 err = smsc47m1_device_add(address, &sio_data);
946 if (err) 948 if (err)
947 goto exit; 949 return err;
948 950
949 err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe); 951 err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
950 if (err) 952 if (err)
@@ -955,7 +957,6 @@ static int __init sm_smsc47m1_init(void)
955exit_device: 957exit_device:
956 platform_device_unregister(pdev); 958 platform_device_unregister(pdev);
957 smsc47m1_restore(&sio_data); 959 smsc47m1_restore(&sio_data);
958exit:
959 return err; 960 return err;
960} 961}
961 962
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index 37f42113af31..00e8f213f56e 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -182,7 +182,6 @@ static int i2c_dw_pci_resume(struct device *dev)
182 pci_restore_state(pdev); 182 pci_restore_state(pdev);
183 183
184 i2c_dw_init(i2c); 184 i2c_dw_init(i2c);
185 i2c_dw_enable(i2c);
186 return 0; 185 return 0;
187} 186}
188 187
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 83b720ef6c34..246fdc151652 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -179,7 +179,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
179{ 179{
180 struct ib_port_attr attr; 180 struct ib_port_attr attr;
181 char *speed = ""; 181 char *speed = "";
182 int rate = -1; /* in deci-Gb/sec */ 182 int rate; /* in deci-Gb/sec */
183 ssize_t ret; 183 ssize_t ret;
184 184
185 ret = ib_query_port(p->ibdev, p->port_num, &attr); 185 ret = ib_query_port(p->ibdev, p->port_num, &attr);
@@ -187,9 +187,6 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
187 return ret; 187 return ret;
188 188
189 switch (attr.active_speed) { 189 switch (attr.active_speed) {
190 case IB_SPEED_SDR:
191 rate = 25;
192 break;
193 case IB_SPEED_DDR: 190 case IB_SPEED_DDR:
194 speed = " DDR"; 191 speed = " DDR";
195 rate = 50; 192 rate = 50;
@@ -210,6 +207,10 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused,
210 speed = " EDR"; 207 speed = " EDR";
211 rate = 250; 208 rate = 250;
212 break; 209 break;
210 case IB_SPEED_SDR:
211 default: /* default to SDR for invalid rates */
212 rate = 25;
213 break;
213 } 214 }
214 215
215 rate *= ib_width_enum_to_int(attr.active_width); 216 rate *= ib_width_enum_to_int(attr.active_width);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 75d305629300..669673e81439 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -253,6 +253,11 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port,
253 if (out_mad->data[15] & 0x1) 253 if (out_mad->data[15] & 0x1)
254 props->active_speed = IB_SPEED_FDR10; 254 props->active_speed = IB_SPEED_FDR10;
255 } 255 }
256
257 /* Avoid wrong speed value returned by FW if the IB link is down. */
258 if (props->state == IB_PORT_DOWN)
259 props->active_speed = IB_SPEED_SDR;
260
256out: 261out:
257 kfree(in_mad); 262 kfree(in_mad);
258 kfree(out_mad); 263 kfree(out_mad);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 69e2ad06e515..daf21b899999 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3232,6 +3232,7 @@ static void srpt_add_one(struct ib_device *device)
3232 srq_attr.attr.max_wr = sdev->srq_size; 3232 srq_attr.attr.max_wr = sdev->srq_size;
3233 srq_attr.attr.max_sge = 1; 3233 srq_attr.attr.max_sge = 1;
3234 srq_attr.attr.srq_limit = 0; 3234 srq_attr.attr.srq_limit = 0;
3235 srq_attr.srq_type = IB_SRQT_BASIC;
3235 3236
3236 sdev->srq = ib_create_srq(sdev->pd, &srq_attr); 3237 sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
3237 if (IS_ERR(sdev->srq)) 3238 if (IS_ERR(sdev->srq))
diff --git a/drivers/input/misc/da9052_onkey.c b/drivers/input/misc/da9052_onkey.c
index 34aebb8cd080..3c843cd725fa 100644
--- a/drivers/input/misc/da9052_onkey.c
+++ b/drivers/input/misc/da9052_onkey.c
@@ -95,7 +95,8 @@ static int __devinit da9052_onkey_probe(struct platform_device *pdev)
95 input_dev = input_allocate_device(); 95 input_dev = input_allocate_device();
96 if (!onkey || !input_dev) { 96 if (!onkey || !input_dev) {
97 dev_err(&pdev->dev, "Failed to allocate memory\n"); 97 dev_err(&pdev->dev, "Failed to allocate memory\n");
98 return -ENOMEM; 98 error = -ENOMEM;
99 goto err_free_mem;
99 } 100 }
100 101
101 onkey->input = input_dev; 102 onkey->input = input_dev;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index d2c0db159b18..479011004a11 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -486,7 +486,6 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
486 unsigned char *packet = psmouse->packet; 486 unsigned char *packet = psmouse->packet;
487 487
488 input_report_key(dev, BTN_LEFT, packet[0] & 0x01); 488 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
489 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
490 input_mt_report_pointer_emulation(dev, true); 489 input_mt_report_pointer_emulation(dev, true);
491 input_sync(dev); 490 input_sync(dev);
492} 491}
@@ -967,6 +966,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
967 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) 966 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
968 return -1; 967 return -1;
969 968
969 __set_bit(INPUT_PROP_POINTER, dev->propbit);
970 __set_bit(EV_KEY, dev->evbit); 970 __set_bit(EV_KEY, dev->evbit);
971 __set_bit(EV_ABS, dev->evbit); 971 __set_bit(EV_ABS, dev->evbit);
972 __clear_bit(EV_REL, dev->evbit); 972 __clear_bit(EV_REL, dev->evbit);
@@ -1017,7 +1017,9 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1017 */ 1017 */
1018 psmouse_warn(psmouse, "couldn't query resolution data.\n"); 1018 psmouse_warn(psmouse, "couldn't query resolution data.\n");
1019 } 1019 }
1020 1020 /* v4 is clickpad, with only one button. */
1021 __set_bit(INPUT_PROP_BUTTONPAD, dev->propbit);
1022 __clear_bit(BTN_RIGHT, dev->keybit);
1021 __set_bit(BTN_TOOL_QUADTAP, dev->keybit); 1023 __set_bit(BTN_TOOL_QUADTAP, dev->keybit);
1022 /* For X to recognize me as touchpad. */ 1024 /* For X to recognize me as touchpad. */
1023 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); 1025 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
@@ -1245,6 +1247,8 @@ static void elantech_disconnect(struct psmouse *psmouse)
1245 */ 1247 */
1246static int elantech_reconnect(struct psmouse *psmouse) 1248static int elantech_reconnect(struct psmouse *psmouse)
1247{ 1249{
1250 psmouse_reset(psmouse);
1251
1248 if (elantech_detect(psmouse, 0)) 1252 if (elantech_detect(psmouse, 0))
1249 return -1; 1253 return -1;
1250 1254
@@ -1324,6 +1328,8 @@ int elantech_init(struct psmouse *psmouse)
1324 if (!etd) 1328 if (!etd)
1325 return -ENOMEM; 1329 return -ENOMEM;
1326 1330
1331 psmouse_reset(psmouse);
1332
1327 etd->parity[0] = 1; 1333 etd->parity[0] = 1;
1328 for (i = 1; i < 256; i++) 1334 for (i = 1; i < 256; i++)
1329 etd->parity[i] = etd->parity[i & (i - 1)] ^ 1; 1335 etd->parity[i] = etd->parity[i & (i - 1)] ^ 1;
diff --git a/drivers/input/mouse/gpio_mouse.c b/drivers/input/mouse/gpio_mouse.c
index a9ad8e1402be..39fe9b737cae 100644
--- a/drivers/input/mouse/gpio_mouse.c
+++ b/drivers/input/mouse/gpio_mouse.c
@@ -12,9 +12,9 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/input-polldev.h> 14#include <linux/input-polldev.h>
15#include <linux/gpio.h>
15#include <linux/gpio_mouse.h> 16#include <linux/gpio_mouse.h>
16 17
17#include <asm/gpio.h>
18 18
19/* 19/*
20 * Timer function which is run every scan_ms ms when the device is opened. 20 * Timer function which is run every scan_ms ms when the device is opened.
diff --git a/drivers/input/mouse/sentelic.c b/drivers/input/mouse/sentelic.c
index a977bfaa6821..661a0ca3b3d6 100644
--- a/drivers/input/mouse/sentelic.c
+++ b/drivers/input/mouse/sentelic.c
@@ -741,6 +741,14 @@ static psmouse_ret_t fsp_process_byte(struct psmouse *psmouse)
741 } 741 }
742 } else { 742 } else {
743 /* SFAC packet */ 743 /* SFAC packet */
744 if ((packet[0] & (FSP_PB0_LBTN|FSP_PB0_PHY_BTN)) ==
745 FSP_PB0_LBTN) {
746 /* On-pad click in SFAC mode should be handled
747 * by userspace. On-pad clicks in MFMC mode
748 * are real clickpad clicks, and not ignored.
749 */
750 packet[0] &= ~FSP_PB0_LBTN;
751 }
744 752
745 /* no multi-finger information */ 753 /* no multi-finger information */
746 ad->last_mt_fgr = 0; 754 ad->last_mt_fgr = 0;
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 22b218018137..f3102494237d 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -304,7 +304,7 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
304 return 0; 304 return 0;
305 305
306 if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) { 306 if (trackpoint_read(&psmouse->ps2dev, TP_EXT_BTN, &button_info)) {
307 printk(KERN_WARNING "trackpoint.c: failed to get extended button data\n"); 307 psmouse_warn(psmouse, "failed to get extended button data\n");
308 button_info = 0; 308 button_info = 0;
309 } 309 }
310 310
@@ -326,16 +326,18 @@ int trackpoint_detect(struct psmouse *psmouse, bool set_properties)
326 326
327 error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group); 327 error = sysfs_create_group(&ps2dev->serio->dev.kobj, &trackpoint_attr_group);
328 if (error) { 328 if (error) {
329 printk(KERN_ERR 329 psmouse_err(psmouse,
330 "trackpoint.c: failed to create sysfs attributes, error: %d\n", 330 "failed to create sysfs attributes, error: %d\n",
331 error); 331 error);
332 kfree(psmouse->private); 332 kfree(psmouse->private);
333 psmouse->private = NULL; 333 psmouse->private = NULL;
334 return -1; 334 return -1;
335 } 335 }
336 336
337 printk(KERN_INFO "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n", 337 psmouse_info(psmouse,
338 firmware_id, (button_info & 0xf0) >> 4, button_info & 0x0f); 338 "IBM TrackPoint firmware: 0x%02x, buttons: %d/%d\n",
339 firmware_id,
340 (button_info & 0xf0) >> 4, button_info & 0x0f);
339 341
340 return 0; 342 return 0;
341} 343}
diff --git a/drivers/input/touchscreen/tps6507x-ts.c b/drivers/input/touchscreen/tps6507x-ts.c
index 6c6f6d8ea9b4..f7eda3d00fad 100644
--- a/drivers/input/touchscreen/tps6507x-ts.c
+++ b/drivers/input/touchscreen/tps6507x-ts.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * drivers/input/touchscreen/tps6507x_ts.c
3 *
4 * Touchscreen driver for the tps6507x chip. 2 * Touchscreen driver for the tps6507x chip.
5 * 3 *
6 * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com) 4 * Copyright (c) 2009 RidgeRun (todd.fischer@ridgerun.com)
@@ -376,4 +374,4 @@ module_platform_driver(tps6507x_ts_driver);
376MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>"); 374MODULE_AUTHOR("Todd Fischer <todd.fischer@ridgerun.com>");
377MODULE_DESCRIPTION("TPS6507x - TouchScreen driver"); 375MODULE_DESCRIPTION("TPS6507x - TouchScreen driver");
378MODULE_LICENSE("GPL v2"); 376MODULE_LICENSE("GPL v2");
379MODULE_ALIAS("platform:tps6507x-tsc"); 377MODULE_ALIAS("platform:tps6507x-ts");
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index b3d6ac17272d..a6d9fd2858f7 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -176,7 +176,7 @@ static void if_close(struct tty_struct *tty, struct file *filp)
176 struct cardstate *cs = tty->driver_data; 176 struct cardstate *cs = tty->driver_data;
177 177
178 if (!cs) { /* happens if we didn't find cs in open */ 178 if (!cs) { /* happens if we didn't find cs in open */
179 printk(KERN_DEBUG "%s: no cardstate\n", __func__); 179 gig_dbg(DEBUG_IF, "%s: no cardstate", __func__);
180 return; 180 return;
181 } 181 }
182 182
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3d0dfa7a89a2..97e73e555d11 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -539,9 +539,6 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
539 bitmap->events_cleared = bitmap->mddev->events; 539 bitmap->events_cleared = bitmap->mddev->events;
540 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 540 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
541 541
542 bitmap->flags |= BITMAP_HOSTENDIAN;
543 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
544
545 kunmap_atomic(sb); 542 kunmap_atomic(sb);
546 543
547 return 0; 544 return 0;
@@ -1788,7 +1785,9 @@ int bitmap_load(struct mddev *mddev)
1788 * re-add of a missing device */ 1785 * re-add of a missing device */
1789 start = mddev->recovery_cp; 1786 start = mddev->recovery_cp;
1790 1787
1788 mutex_lock(&mddev->bitmap_info.mutex);
1791 err = bitmap_init_from_disk(bitmap, start); 1789 err = bitmap_init_from_disk(bitmap, start);
1790 mutex_unlock(&mddev->bitmap_info.mutex);
1792 1791
1793 if (err) 1792 if (err)
1794 goto out; 1793 goto out;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index d35e4c991e38..15dd59b84e94 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1712,6 +1712,7 @@ static int process_checks(struct r1bio *r1_bio)
1712 struct r1conf *conf = mddev->private; 1712 struct r1conf *conf = mddev->private;
1713 int primary; 1713 int primary;
1714 int i; 1714 int i;
1715 int vcnt;
1715 1716
1716 for (primary = 0; primary < conf->raid_disks * 2; primary++) 1717 for (primary = 0; primary < conf->raid_disks * 2; primary++)
1717 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1718 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -1721,9 +1722,9 @@ static int process_checks(struct r1bio *r1_bio)
1721 break; 1722 break;
1722 } 1723 }
1723 r1_bio->read_disk = primary; 1724 r1_bio->read_disk = primary;
1725 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1724 for (i = 0; i < conf->raid_disks * 2; i++) { 1726 for (i = 0; i < conf->raid_disks * 2; i++) {
1725 int j; 1727 int j;
1726 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1727 struct bio *pbio = r1_bio->bios[primary]; 1728 struct bio *pbio = r1_bio->bios[primary];
1728 struct bio *sbio = r1_bio->bios[i]; 1729 struct bio *sbio = r1_bio->bios[i];
1729 int size; 1730 int size;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index fff782189e48..c8dbb84d5357 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1788,6 +1788,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1788 struct r10conf *conf = mddev->private; 1788 struct r10conf *conf = mddev->private;
1789 int i, first; 1789 int i, first;
1790 struct bio *tbio, *fbio; 1790 struct bio *tbio, *fbio;
1791 int vcnt;
1791 1792
1792 atomic_set(&r10_bio->remaining, 1); 1793 atomic_set(&r10_bio->remaining, 1);
1793 1794
@@ -1802,10 +1803,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1802 first = i; 1803 first = i;
1803 fbio = r10_bio->devs[i].bio; 1804 fbio = r10_bio->devs[i].bio;
1804 1805
1806 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1805 /* now find blocks with errors */ 1807 /* now find blocks with errors */
1806 for (i=0 ; i < conf->copies ; i++) { 1808 for (i=0 ; i < conf->copies ; i++) {
1807 int j, d; 1809 int j, d;
1808 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1809 1810
1810 tbio = r10_bio->devs[i].bio; 1811 tbio = r10_bio->devs[i].bio;
1811 1812
@@ -1871,7 +1872,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1871 */ 1872 */
1872 for (i = 0; i < conf->copies; i++) { 1873 for (i = 0; i < conf->copies; i++) {
1873 int j, d; 1874 int j, d;
1874 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1875 1875
1876 tbio = r10_bio->devs[i].repl_bio; 1876 tbio = r10_bio->devs[i].repl_bio;
1877 if (!tbio || !tbio->bi_end_io) 1877 if (!tbio || !tbio->bi_end_io)
diff --git a/drivers/media/dvb/dvb-core/dvb_frontend.c b/drivers/media/dvb/dvb-core/dvb_frontend.c
index 4555baa383b2..39696c6a4ed7 100644
--- a/drivers/media/dvb/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb/dvb-core/dvb_frontend.c
@@ -143,10 +143,12 @@ struct dvb_frontend_private {
143static void dvb_frontend_wakeup(struct dvb_frontend *fe); 143static void dvb_frontend_wakeup(struct dvb_frontend *fe);
144static int dtv_get_frontend(struct dvb_frontend *fe, 144static int dtv_get_frontend(struct dvb_frontend *fe,
145 struct dvb_frontend_parameters *p_out); 145 struct dvb_frontend_parameters *p_out);
146static int dtv_property_legacy_params_sync(struct dvb_frontend *fe,
147 struct dvb_frontend_parameters *p);
146 148
147static bool has_get_frontend(struct dvb_frontend *fe) 149static bool has_get_frontend(struct dvb_frontend *fe)
148{ 150{
149 return fe->ops.get_frontend; 151 return fe->ops.get_frontend != NULL;
150} 152}
151 153
152/* 154/*
@@ -697,6 +699,7 @@ restart:
697 fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; 699 fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN;
698 fepriv->delay = HZ / 2; 700 fepriv->delay = HZ / 2;
699 } 701 }
702 dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
700 fe->ops.read_status(fe, &s); 703 fe->ops.read_status(fe, &s);
701 if (s != fepriv->status) { 704 if (s != fepriv->status) {
702 dvb_frontend_add_event(fe, s); /* update event list */ 705 dvb_frontend_add_event(fe, s); /* update event list */
@@ -1833,6 +1836,13 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
1833 return -EINVAL; 1836 return -EINVAL;
1834 1837
1835 /* 1838 /*
1839 * Initialize output parameters to match the values given by
1840 * the user. FE_SET_FRONTEND triggers an initial frontend event
1841 * with status = 0, which copies output parameters to userspace.
1842 */
1843 dtv_property_legacy_params_sync(fe, &fepriv->parameters_out);
1844
1845 /*
1836 * Be sure that the bandwidth will be filled for all 1846 * Be sure that the bandwidth will be filled for all
1837 * non-satellite systems, as tuners need to know what 1847 * non-satellite systems, as tuners need to know what
1838 * low pass/Nyquist half filter should be applied, in 1848 * low pass/Nyquist half filter should be applied, in
diff --git a/drivers/media/dvb/dvb-usb/it913x.c b/drivers/media/dvb/dvb-usb/it913x.c
index 3b7b102f20ae..482d249ca7f3 100644
--- a/drivers/media/dvb/dvb-usb/it913x.c
+++ b/drivers/media/dvb/dvb-usb/it913x.c
@@ -238,12 +238,27 @@ static int it913x_read_reg(struct usb_device *udev, u32 reg)
238 238
239static u32 it913x_query(struct usb_device *udev, u8 pro) 239static u32 it913x_query(struct usb_device *udev, u8 pro)
240{ 240{
241 int ret; 241 int ret, i;
242 u8 data[4]; 242 u8 data[4];
243 ret = it913x_io(udev, READ_LONG, pro, CMD_DEMOD_READ, 243 u8 ver;
244 0x1222, 0, &data[0], 3); 244
245 for (i = 0; i < 5; i++) {
246 ret = it913x_io(udev, READ_LONG, pro, CMD_DEMOD_READ,
247 0x1222, 0, &data[0], 3);
248 ver = data[0];
249 if (ver > 0 && ver < 3)
250 break;
251 msleep(100);
252 }
245 253
246 it913x_config.chip_ver = data[0]; 254 if (ver < 1 || ver > 2) {
255 info("Failed to identify chip version applying 1");
256 it913x_config.chip_ver = 0x1;
257 it913x_config.chip_type = 0x9135;
258 return 0;
259 }
260
261 it913x_config.chip_ver = ver;
247 it913x_config.chip_type = (u16)(data[2] << 8) + data[1]; 262 it913x_config.chip_type = (u16)(data[2] << 8) + data[1];
248 263
249 info("Chip Version=%02x Chip Type=%04x", it913x_config.chip_ver, 264 info("Chip Version=%02x Chip Type=%04x", it913x_config.chip_ver,
@@ -660,30 +675,41 @@ static int it913x_download_firmware(struct usb_device *udev,
660 if ((packet_size > min_pkt) || (i == fw->size)) { 675 if ((packet_size > min_pkt) || (i == fw->size)) {
661 fw_data = (u8 *)(fw->data + pos); 676 fw_data = (u8 *)(fw->data + pos);
662 pos += packet_size; 677 pos += packet_size;
663 if (packet_size > 0) 678 if (packet_size > 0) {
664 ret |= it913x_io(udev, WRITE_DATA, 679 ret = it913x_io(udev, WRITE_DATA,
665 DEV_0, CMD_SCATTER_WRITE, 0, 680 DEV_0, CMD_SCATTER_WRITE, 0,
666 0, fw_data, packet_size); 681 0, fw_data, packet_size);
682 if (ret < 0)
683 break;
684 }
667 udelay(1000); 685 udelay(1000);
668 } 686 }
669 } 687 }
670 i++; 688 i++;
671 } 689 }
672 690
673 ret |= it913x_io(udev, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
674
675 msleep(100);
676
677 if (ret < 0) 691 if (ret < 0)
678 info("FRM Firmware Download Failed (%04x)" , ret); 692 info("FRM Firmware Download Failed (%d)" , ret);
679 else 693 else
680 info("FRM Firmware Download Completed - Resetting Device"); 694 info("FRM Firmware Download Completed - Resetting Device");
681 695
682 ret |= it913x_return_status(udev); 696 msleep(30);
697
698 ret = it913x_io(udev, WRITE_CMD, DEV_0, CMD_BOOT, 0, 0, NULL, 0);
699 if (ret < 0)
700 info("FRM Device not responding to reboot");
701
702 ret = it913x_return_status(udev);
703 if (ret == 0) {
704 info("FRM Failed to reboot device");
705 return -ENODEV;
706 }
683 707
684 msleep(30); 708 msleep(30);
685 709
686 ret |= it913x_wr_reg(udev, DEV_0, I2C_CLK, I2C_CLK_400); 710 ret = it913x_wr_reg(udev, DEV_0, I2C_CLK, I2C_CLK_400);
711
712 msleep(30);
687 713
688 /* Tuner function */ 714 /* Tuner function */
689 if (it913x_config.dual_mode) 715 if (it913x_config.dual_mode)
@@ -901,5 +927,5 @@ module_usb_driver(it913x_driver);
901 927
902MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 928MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
903MODULE_DESCRIPTION("it913x USB 2 Driver"); 929MODULE_DESCRIPTION("it913x USB 2 Driver");
904MODULE_VERSION("1.27"); 930MODULE_VERSION("1.28");
905MODULE_LICENSE("GPL"); 931MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 5452beef8e11..989e556913ed 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1763,13 +1763,13 @@ static int ivtv_decoder_ioctls(struct file *filp, unsigned int cmd, void *arg)
1763 IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n"); 1763 IVTV_DEBUG_IOCTL("AUDIO_CHANNEL_SELECT\n");
1764 if (iarg > AUDIO_STEREO_SWAPPED) 1764 if (iarg > AUDIO_STEREO_SWAPPED)
1765 return -EINVAL; 1765 return -EINVAL;
1766 return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg); 1766 return v4l2_ctrl_s_ctrl(itv->ctrl_audio_playback, iarg + 1);
1767 1767
1768 case AUDIO_BILINGUAL_CHANNEL_SELECT: 1768 case AUDIO_BILINGUAL_CHANNEL_SELECT:
1769 IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n"); 1769 IVTV_DEBUG_IOCTL("AUDIO_BILINGUAL_CHANNEL_SELECT\n");
1770 if (iarg > AUDIO_STEREO_SWAPPED) 1770 if (iarg > AUDIO_STEREO_SWAPPED)
1771 return -EINVAL; 1771 return -EINVAL;
1772 return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg); 1772 return v4l2_ctrl_s_ctrl(itv->ctrl_audio_multilingual_playback, iarg + 1);
1773 1773
1774 default: 1774 default:
1775 return -EINVAL; 1775 return -EINVAL;
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c
index 4a44f9a1bae0..b76b0ac0958f 100644
--- a/drivers/media/video/uvc/uvc_video.c
+++ b/drivers/media/video/uvc/uvc_video.c
@@ -468,22 +468,30 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
468 spin_unlock_irqrestore(&stream->clock.lock, flags); 468 spin_unlock_irqrestore(&stream->clock.lock, flags);
469} 469}
470 470
471static int uvc_video_clock_init(struct uvc_streaming *stream) 471static void uvc_video_clock_reset(struct uvc_streaming *stream)
472{ 472{
473 struct uvc_clock *clock = &stream->clock; 473 struct uvc_clock *clock = &stream->clock;
474 474
475 spin_lock_init(&clock->lock);
476 clock->head = 0; 475 clock->head = 0;
477 clock->count = 0; 476 clock->count = 0;
478 clock->size = 32;
479 clock->last_sof = -1; 477 clock->last_sof = -1;
480 clock->sof_offset = -1; 478 clock->sof_offset = -1;
479}
480
481static int uvc_video_clock_init(struct uvc_streaming *stream)
482{
483 struct uvc_clock *clock = &stream->clock;
484
485 spin_lock_init(&clock->lock);
486 clock->size = 32;
481 487
482 clock->samples = kmalloc(clock->size * sizeof(*clock->samples), 488 clock->samples = kmalloc(clock->size * sizeof(*clock->samples),
483 GFP_KERNEL); 489 GFP_KERNEL);
484 if (clock->samples == NULL) 490 if (clock->samples == NULL)
485 return -ENOMEM; 491 return -ENOMEM;
486 492
493 uvc_video_clock_reset(stream);
494
487 return 0; 495 return 0;
488} 496}
489 497
@@ -1424,8 +1432,6 @@ static void uvc_uninit_video(struct uvc_streaming *stream, int free_buffers)
1424 1432
1425 if (free_buffers) 1433 if (free_buffers)
1426 uvc_free_urb_buffers(stream); 1434 uvc_free_urb_buffers(stream);
1427
1428 uvc_video_clock_cleanup(stream);
1429} 1435}
1430 1436
1431/* 1437/*
@@ -1555,10 +1561,6 @@ static int uvc_init_video(struct uvc_streaming *stream, gfp_t gfp_flags)
1555 1561
1556 uvc_video_stats_start(stream); 1562 uvc_video_stats_start(stream);
1557 1563
1558 ret = uvc_video_clock_init(stream);
1559 if (ret < 0)
1560 return ret;
1561
1562 if (intf->num_altsetting > 1) { 1564 if (intf->num_altsetting > 1) {
1563 struct usb_host_endpoint *best_ep = NULL; 1565 struct usb_host_endpoint *best_ep = NULL;
1564 unsigned int best_psize = 3 * 1024; 1566 unsigned int best_psize = 3 * 1024;
@@ -1683,6 +1685,8 @@ int uvc_video_resume(struct uvc_streaming *stream, int reset)
1683 1685
1684 stream->frozen = 0; 1686 stream->frozen = 0;
1685 1687
1688 uvc_video_clock_reset(stream);
1689
1686 ret = uvc_commit_video(stream, &stream->ctrl); 1690 ret = uvc_commit_video(stream, &stream->ctrl);
1687 if (ret < 0) { 1691 if (ret < 0) {
1688 uvc_queue_enable(&stream->queue, 0); 1692 uvc_queue_enable(&stream->queue, 0);
@@ -1819,25 +1823,35 @@ int uvc_video_enable(struct uvc_streaming *stream, int enable)
1819 uvc_uninit_video(stream, 1); 1823 uvc_uninit_video(stream, 1);
1820 usb_set_interface(stream->dev->udev, stream->intfnum, 0); 1824 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1821 uvc_queue_enable(&stream->queue, 0); 1825 uvc_queue_enable(&stream->queue, 0);
1826 uvc_video_clock_cleanup(stream);
1822 return 0; 1827 return 0;
1823 } 1828 }
1824 1829
1825 ret = uvc_queue_enable(&stream->queue, 1); 1830 ret = uvc_video_clock_init(stream);
1826 if (ret < 0) 1831 if (ret < 0)
1827 return ret; 1832 return ret;
1828 1833
1834 ret = uvc_queue_enable(&stream->queue, 1);
1835 if (ret < 0)
1836 goto error_queue;
1837
1829 /* Commit the streaming parameters. */ 1838 /* Commit the streaming parameters. */
1830 ret = uvc_commit_video(stream, &stream->ctrl); 1839 ret = uvc_commit_video(stream, &stream->ctrl);
1831 if (ret < 0) { 1840 if (ret < 0)
1832 uvc_queue_enable(&stream->queue, 0); 1841 goto error_commit;
1833 return ret;
1834 }
1835 1842
1836 ret = uvc_init_video(stream, GFP_KERNEL); 1843 ret = uvc_init_video(stream, GFP_KERNEL);
1837 if (ret < 0) { 1844 if (ret < 0)
1838 usb_set_interface(stream->dev->udev, stream->intfnum, 0); 1845 goto error_video;
1839 uvc_queue_enable(&stream->queue, 0); 1846
1840 } 1847 return 0;
1848
1849error_video:
1850 usb_set_interface(stream->dev->udev, stream->intfnum, 0);
1851error_commit:
1852 uvc_queue_enable(&stream->queue, 0);
1853error_queue:
1854 uvc_video_clock_cleanup(stream);
1841 1855
1842 return ret; 1856 return ret;
1843} 1857}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index ebc1e8658226..5be32489714f 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2788,6 +2788,7 @@ static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
2788 .constraints = { 2788 .constraints = {
2789 .name = "db8500-vape", 2789 .name = "db8500-vape",
2790 .valid_ops_mask = REGULATOR_CHANGE_STATUS, 2790 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2791 .always_on = true,
2791 }, 2792 },
2792 .consumer_supplies = db8500_vape_consumers, 2793 .consumer_supplies = db8500_vape_consumers,
2793 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers), 2794 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
index 94eb05b1afdf..58fc65f5c817 100644
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -106,16 +106,14 @@ static int mtdchar_open(struct inode *inode, struct file *file)
106 } 106 }
107 107
108 if (mtd->type == MTD_ABSENT) { 108 if (mtd->type == MTD_ABSENT) {
109 put_mtd_device(mtd);
110 ret = -ENODEV; 109 ret = -ENODEV;
111 goto out; 110 goto out1;
112 } 111 }
113 112
114 mtd_ino = iget_locked(mnt->mnt_sb, devnum); 113 mtd_ino = iget_locked(mnt->mnt_sb, devnum);
115 if (!mtd_ino) { 114 if (!mtd_ino) {
116 put_mtd_device(mtd);
117 ret = -ENOMEM; 115 ret = -ENOMEM;
118 goto out; 116 goto out1;
119 } 117 }
120 if (mtd_ino->i_state & I_NEW) { 118 if (mtd_ino->i_state & I_NEW) {
121 mtd_ino->i_private = mtd; 119 mtd_ino->i_private = mtd;
@@ -127,23 +125,25 @@ static int mtdchar_open(struct inode *inode, struct file *file)
127 125
128 /* You can't open it RW if it's not a writeable device */ 126 /* You can't open it RW if it's not a writeable device */
129 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) { 127 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
130 iput(mtd_ino);
131 put_mtd_device(mtd);
132 ret = -EACCES; 128 ret = -EACCES;
133 goto out; 129 goto out2;
134 } 130 }
135 131
136 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL); 132 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
137 if (!mfi) { 133 if (!mfi) {
138 iput(mtd_ino);
139 put_mtd_device(mtd);
140 ret = -ENOMEM; 134 ret = -ENOMEM;
141 goto out; 135 goto out2;
142 } 136 }
143 mfi->ino = mtd_ino; 137 mfi->ino = mtd_ino;
144 mfi->mtd = mtd; 138 mfi->mtd = mtd;
145 file->private_data = mfi; 139 file->private_data = mfi;
140 mutex_unlock(&mtd_mutex);
141 return 0;
146 142
143out2:
144 iput(mtd_ino);
145out1:
146 put_mtd_device(mtd);
147out: 147out:
148 mutex_unlock(&mtd_mutex); 148 mutex_unlock(&mtd_mutex);
149 simple_release_fs(&mnt, &count); 149 simple_release_fs(&mnt, &count);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 215eb2536b1e..2504ab005589 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -118,15 +118,13 @@ void ath9k_ps_restore(struct ath_softc *sc)
118 if (--sc->ps_usecount != 0) 118 if (--sc->ps_usecount != 0)
119 goto unlock; 119 goto unlock;
120 120
121 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) 121 if (sc->ps_idle && (sc->ps_flags & PS_WAIT_FOR_TX_ACK))
122 goto unlock;
123
124 if (sc->ps_idle)
125 mode = ATH9K_PM_FULL_SLEEP; 122 mode = ATH9K_PM_FULL_SLEEP;
126 else if (sc->ps_enabled && 123 else if (sc->ps_enabled &&
127 !(sc->ps_flags & (PS_WAIT_FOR_BEACON | 124 !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
128 PS_WAIT_FOR_CAB | 125 PS_WAIT_FOR_CAB |
129 PS_WAIT_FOR_PSPOLL_DATA))) 126 PS_WAIT_FOR_PSPOLL_DATA |
127 PS_WAIT_FOR_TX_ACK)))
130 mode = ATH9K_PM_NETWORK_SLEEP; 128 mode = ATH9K_PM_NETWORK_SLEEP;
131 else 129 else
132 goto unlock; 130 goto unlock;
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index fc9901e027c1..90cc5e772650 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1062,11 +1062,6 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1062 1062
1063 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); 1063 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
1064 1064
1065 /*
1066 * Register the extra components.
1067 */
1068 rt2x00rfkill_register(rt2x00dev);
1069
1070 return 0; 1065 return 0;
1071} 1066}
1072 1067
@@ -1210,6 +1205,7 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1210 rt2x00link_register(rt2x00dev); 1205 rt2x00link_register(rt2x00dev);
1211 rt2x00leds_register(rt2x00dev); 1206 rt2x00leds_register(rt2x00dev);
1212 rt2x00debug_register(rt2x00dev); 1207 rt2x00debug_register(rt2x00dev);
1208 rt2x00rfkill_register(rt2x00dev);
1213 1209
1214 return 0; 1210 return 0;
1215 1211
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 510023554e5f..e54488db0e10 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -838,7 +838,10 @@ void rtl_get_tcb_desc(struct ieee80211_hw *hw,
838 __le16 fc = hdr->frame_control; 838 __le16 fc = hdr->frame_control;
839 839
840 txrate = ieee80211_get_tx_rate(hw, info); 840 txrate = ieee80211_get_tx_rate(hw, info);
841 tcb_desc->hw_rate = txrate->hw_value; 841 if (txrate)
842 tcb_desc->hw_rate = txrate->hw_value;
843 else
844 tcb_desc->hw_rate = 0;
842 845
843 if (ieee80211_is_data(fc)) { 846 if (ieee80211_is_data(fc)) {
844 /* 847 /*
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 07dd38efe62a..288b035a3579 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -912,8 +912,13 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
912 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); 912 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
913 ring = &rtlpci->tx_ring[BEACON_QUEUE]; 913 ring = &rtlpci->tx_ring[BEACON_QUEUE];
914 pskb = __skb_dequeue(&ring->queue); 914 pskb = __skb_dequeue(&ring->queue);
915 if (pskb) 915 if (pskb) {
916 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
917 pci_unmap_single(rtlpci->pdev, rtlpriv->cfg->ops->get_desc(
918 (u8 *) entry, true, HW_DESC_TXBUFF_ADDR),
919 pskb->len, PCI_DMA_TODEVICE);
916 kfree_skb(pskb); 920 kfree_skb(pskb);
921 }
917 922
918 /*NB: the beacon data buffer must be 32-bit aligned. */ 923 /*NB: the beacon data buffer must be 32-bit aligned. */
919 pskb = ieee80211_beacon_get(hw, mac->vif); 924 pskb = ieee80211_beacon_get(hw, mac->vif);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index 4898c502974d..480862c07f92 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -91,7 +91,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
91 u8 tid; 91 u8 tid;
92 struct rtl_priv *rtlpriv = rtl_priv(hw); 92 struct rtl_priv *rtlpriv = rtl_priv(hw);
93 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 93 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
94 static int header_print;
95 94
96 rtlpriv->dm.dm_initialgain_enable = true; 95 rtlpriv->dm.dm_initialgain_enable = true;
97 rtlpriv->dm.dm_flag = 0; 96 rtlpriv->dm.dm_flag = 0;
@@ -171,10 +170,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
171 for (tid = 0; tid < 8; tid++) 170 for (tid = 0; tid < 8; tid++)
172 skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]); 171 skb_queue_head_init(&rtlpriv->mac80211.skb_waitq[tid]);
173 172
174 /* Only load firmware for first MAC */
175 if (header_print)
176 return 0;
177
178 /* for firmware buf */ 173 /* for firmware buf */
179 rtlpriv->rtlhal.pfirmware = vzalloc(0x8000); 174 rtlpriv->rtlhal.pfirmware = vzalloc(0x8000);
180 if (!rtlpriv->rtlhal.pfirmware) { 175 if (!rtlpriv->rtlhal.pfirmware) {
@@ -186,7 +181,6 @@ static int rtl92d_init_sw_vars(struct ieee80211_hw *hw)
186 rtlpriv->max_fw_size = 0x8000; 181 rtlpriv->max_fw_size = 0x8000;
187 pr_info("Driver for Realtek RTL8192DE WLAN interface\n"); 182 pr_info("Driver for Realtek RTL8192DE WLAN interface\n");
188 pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name); 183 pr_info("Loading firmware file %s\n", rtlpriv->cfg->fw_name);
189 header_print++;
190 184
191 /* request fw */ 185 /* request fw */
192 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name, 186 err = request_firmware_nowait(THIS_MODULE, 1, rtlpriv->cfg->fw_name,
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 2e1e352864bb..d04dbda13f5a 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -124,46 +124,38 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
124 return status; 124 return status;
125} 125}
126 126
127static u32 _usb_read_sync(struct usb_device *udev, u32 addr, u16 len) 127static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
128{ 128{
129 struct device *dev = rtlpriv->io.dev;
130 struct usb_device *udev = to_usb_device(dev);
129 u8 request; 131 u8 request;
130 u16 wvalue; 132 u16 wvalue;
131 u16 index; 133 u16 index;
132 u32 *data; 134 __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
133 u32 ret;
134 135
135 data = kmalloc(sizeof(u32), GFP_KERNEL);
136 if (!data)
137 return -ENOMEM;
138 request = REALTEK_USB_VENQT_CMD_REQ; 136 request = REALTEK_USB_VENQT_CMD_REQ;
139 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */ 137 index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
140 138
141 wvalue = (u16)addr; 139 wvalue = (u16)addr;
142 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len); 140 _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
143 ret = le32_to_cpu(*data); 141 if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
144 kfree(data); 142 rtlpriv->usb_data_index = 0;
145 return ret; 143 return le32_to_cpu(*data);
146} 144}
147 145
148static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr) 146static u8 _usb_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
149{ 147{
150 struct device *dev = rtlpriv->io.dev; 148 return (u8)_usb_read_sync(rtlpriv, addr, 1);
151
152 return (u8)_usb_read_sync(to_usb_device(dev), addr, 1);
153} 149}
154 150
155static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr) 151static u16 _usb_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
156{ 152{
157 struct device *dev = rtlpriv->io.dev; 153 return (u16)_usb_read_sync(rtlpriv, addr, 2);
158
159 return (u16)_usb_read_sync(to_usb_device(dev), addr, 2);
160} 154}
161 155
162static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr) 156static u32 _usb_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
163{ 157{
164 struct device *dev = rtlpriv->io.dev; 158 return _usb_read_sync(rtlpriv, addr, 4);
165
166 return _usb_read_sync(to_usb_device(dev), addr, 4);
167} 159}
168 160
169static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val, 161static void _usb_write_async(struct usb_device *udev, u32 addr, u32 val,
@@ -955,6 +947,11 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
955 return -ENOMEM; 947 return -ENOMEM;
956 } 948 }
957 rtlpriv = hw->priv; 949 rtlpriv = hw->priv;
950 rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
951 GFP_KERNEL);
952 if (!rtlpriv->usb_data)
953 return -ENOMEM;
954 rtlpriv->usb_data_index = 0;
958 init_completion(&rtlpriv->firmware_loading_complete); 955 init_completion(&rtlpriv->firmware_loading_complete);
959 SET_IEEE80211_DEV(hw, &intf->dev); 956 SET_IEEE80211_DEV(hw, &intf->dev);
960 udev = interface_to_usbdev(intf); 957 udev = interface_to_usbdev(intf);
@@ -1025,6 +1022,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
1025 /* rtl_deinit_rfkill(hw); */ 1022 /* rtl_deinit_rfkill(hw); */
1026 rtl_usb_deinit(hw); 1023 rtl_usb_deinit(hw);
1027 rtl_deinit_core(hw); 1024 rtl_deinit_core(hw);
1025 kfree(rtlpriv->usb_data);
1028 rtlpriv->cfg->ops->deinit_sw_leds(hw); 1026 rtlpriv->cfg->ops->deinit_sw_leds(hw);
1029 rtlpriv->cfg->ops->deinit_sw_vars(hw); 1027 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1030 _rtl_usb_io_handler_release(hw); 1028 _rtl_usb_io_handler_release(hw);
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
index b591614c3b9b..28ebc69218a3 100644
--- a/drivers/net/wireless/rtlwifi/wifi.h
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -67,7 +67,7 @@
67#define QOS_QUEUE_NUM 4 67#define QOS_QUEUE_NUM 4
68#define RTL_MAC80211_NUM_QUEUE 5 68#define RTL_MAC80211_NUM_QUEUE 5
69#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254 69#define REALTEK_USB_VENQT_MAX_BUF_SIZE 254
70 70#define RTL_USB_MAX_RX_COUNT 100
71#define QBSS_LOAD_SIZE 5 71#define QBSS_LOAD_SIZE 5
72#define MAX_WMMELE_LENGTH 64 72#define MAX_WMMELE_LENGTH 64
73 73
@@ -1629,6 +1629,10 @@ struct rtl_priv {
1629 interface or hardware */ 1629 interface or hardware */
1630 unsigned long status; 1630 unsigned long status;
1631 1631
1632 /* data buffer pointer for USB reads */
1633 __le32 *usb_data;
1634 int usb_data_index;
1635
1632 /*This must be the last item so 1636 /*This must be the last item so
1633 that it points to the data allocated 1637 that it points to the data allocated
1634 beyond this structure like: 1638 beyond this structure like:
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index bba81216b4db..bf984b6dc477 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -140,7 +140,7 @@ int of_gpio_simple_xlate(struct gpio_chip *gc,
140 if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells)) 140 if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
141 return -EINVAL; 141 return -EINVAL;
142 142
143 if (gpiospec->args[0] > gc->ngpio) 143 if (gpiospec->args[0] >= gc->ngpio)
144 return -EINVAL; 144 return -EINVAL;
145 145
146 if (flags) 146 if (flags)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 815674415267..d20f1334792b 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -967,16 +967,47 @@ pci_save_state(struct pci_dev *dev)
967 return 0; 967 return 0;
968} 968}
969 969
970static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
971 u32 saved_val, int retry)
972{
973 u32 val;
974
975 pci_read_config_dword(pdev, offset, &val);
976 if (val == saved_val)
977 return;
978
979 for (;;) {
980 dev_dbg(&pdev->dev, "restoring config space at offset "
981 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
982 pci_write_config_dword(pdev, offset, saved_val);
983 if (retry-- <= 0)
984 return;
985
986 pci_read_config_dword(pdev, offset, &val);
987 if (val == saved_val)
988 return;
989
990 mdelay(1);
991 }
992}
993
994static void pci_restore_config_space(struct pci_dev *pdev, int start, int end,
995 int retry)
996{
997 int index;
998
999 for (index = end; index >= start; index--)
1000 pci_restore_config_dword(pdev, 4 * index,
1001 pdev->saved_config_space[index],
1002 retry);
1003}
1004
970/** 1005/**
971 * pci_restore_state - Restore the saved state of a PCI device 1006 * pci_restore_state - Restore the saved state of a PCI device
972 * @dev: - PCI device that we're dealing with 1007 * @dev: - PCI device that we're dealing with
973 */ 1008 */
974void pci_restore_state(struct pci_dev *dev) 1009void pci_restore_state(struct pci_dev *dev)
975{ 1010{
976 int i;
977 u32 val;
978 int tries;
979
980 if (!dev->state_saved) 1011 if (!dev->state_saved)
981 return; 1012 return;
982 1013
@@ -984,24 +1015,14 @@ void pci_restore_state(struct pci_dev *dev)
984 pci_restore_pcie_state(dev); 1015 pci_restore_pcie_state(dev);
985 pci_restore_ats_state(dev); 1016 pci_restore_ats_state(dev);
986 1017
1018 pci_restore_config_space(dev, 10, 15, 0);
987 /* 1019 /*
988 * The Base Address register should be programmed before the command 1020 * The Base Address register should be programmed before the command
989 * register(s) 1021 * register(s)
990 */ 1022 */
991 for (i = 15; i >= 0; i--) { 1023 pci_restore_config_space(dev, 4, 9, 10);
992 pci_read_config_dword(dev, i * 4, &val); 1024 pci_restore_config_space(dev, 0, 3, 0);
993 tries = 10; 1025
994 while (tries && val != dev->saved_config_space[i]) {
995 dev_dbg(&dev->dev, "restoring config "
996 "space at offset %#x (was %#x, writing %#x)\n",
997 i, val, (int)dev->saved_config_space[i]);
998 pci_write_config_dword(dev,i * 4,
999 dev->saved_config_space[i]);
1000 pci_read_config_dword(dev, i * 4, &val);
1001 mdelay(10);
1002 tries--;
1003 }
1004 }
1005 pci_restore_pcix_state(dev); 1026 pci_restore_pcix_state(dev);
1006 pci_restore_msi_state(dev); 1027 pci_restore_msi_state(dev);
1007 pci_restore_iov_state(dev); 1028 pci_restore_iov_state(dev);
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 53969af17558..81fd606e47bc 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -214,7 +214,7 @@ static struct of_device_id __devinitdata of_anatop_regulator_match_tbl[] = {
214 { /* end */ } 214 { /* end */ }
215}; 215};
216 216
217static struct platform_driver anatop_regulator = { 217static struct platform_driver anatop_regulator_driver = {
218 .driver = { 218 .driver = {
219 .name = "anatop_regulator", 219 .name = "anatop_regulator",
220 .owner = THIS_MODULE, 220 .owner = THIS_MODULE,
@@ -226,13 +226,13 @@ static struct platform_driver anatop_regulator = {
226 226
227static int __init anatop_regulator_init(void) 227static int __init anatop_regulator_init(void)
228{ 228{
229 return platform_driver_register(&anatop_regulator); 229 return platform_driver_register(&anatop_regulator_driver);
230} 230}
231postcore_initcall(anatop_regulator_init); 231postcore_initcall(anatop_regulator_init);
232 232
233static void __exit anatop_regulator_exit(void) 233static void __exit anatop_regulator_exit(void)
234{ 234{
235 platform_driver_unregister(&anatop_regulator); 235 platform_driver_unregister(&anatop_regulator_driver);
236} 236}
237module_exit(anatop_regulator_exit); 237module_exit(anatop_regulator_exit);
238 238
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index 550292304b0f..c9f890b088da 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -213,7 +213,6 @@ static struct platform_driver efi_rtc_driver = {
213 .name = "rtc-efi", 213 .name = "rtc-efi",
214 .owner = THIS_MODULE, 214 .owner = THIS_MODULE,
215 }, 215 },
216 .probe = efi_rtc_probe,
217 .remove = __exit_p(efi_rtc_remove), 216 .remove = __exit_p(efi_rtc_remove),
218}; 217};
219 218
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 692de7360e94..684ef4bbfce4 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -339,8 +339,7 @@ static int pl031_probe(struct amba_device *adev, const struct amba_id *id)
339 dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision); 339 dev_dbg(&adev->dev, "revision = 0x%01x\n", ldata->hw_revision);
340 340
341 /* Enable the clockwatch on ST Variants */ 341 /* Enable the clockwatch on ST Variants */
342 if ((ldata->hw_designer == AMBA_VENDOR_ST) && 342 if (ldata->hw_designer == AMBA_VENDOR_ST)
343 (ldata->hw_revision > 1))
344 writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN, 343 writel(readl(ldata->base + RTC_CR) | RTC_CR_CWEN,
345 ldata->base + RTC_CR); 344 ldata->base + RTC_CR);
346 345
diff --git a/drivers/rtc/rtc-r9701.c b/drivers/rtc/rtc-r9701.c
index 7f8e6c247935..33b6ba0afa0d 100644
--- a/drivers/rtc/rtc-r9701.c
+++ b/drivers/rtc/rtc-r9701.c
@@ -122,6 +122,7 @@ static const struct rtc_class_ops r9701_rtc_ops = {
122static int __devinit r9701_probe(struct spi_device *spi) 122static int __devinit r9701_probe(struct spi_device *spi)
123{ 123{
124 struct rtc_device *rtc; 124 struct rtc_device *rtc;
125 struct rtc_time dt;
125 unsigned char tmp; 126 unsigned char tmp;
126 int res; 127 int res;
127 128
@@ -132,6 +133,27 @@ static int __devinit r9701_probe(struct spi_device *spi)
132 return -ENODEV; 133 return -ENODEV;
133 } 134 }
134 135
136 /*
137 * The device seems to be present. Now check if the registers
138 * contain invalid values. If so, try to write a default date:
139 * 2000/1/1 00:00:00
140 */
141 r9701_get_datetime(&spi->dev, &dt);
142 if (rtc_valid_tm(&dt)) {
143 dev_info(&spi->dev, "trying to repair invalid date/time\n");
144 dt.tm_sec = 0;
145 dt.tm_min = 0;
146 dt.tm_hour = 0;
147 dt.tm_mday = 1;
148 dt.tm_mon = 0;
149 dt.tm_year = 100;
150
151 if (r9701_set_datetime(&spi->dev, &dt)) {
152 dev_err(&spi->dev, "cannot repair RTC register\n");
153 return -ENODEV;
154 }
155 }
156
135 rtc = rtc_device_register("r9701", 157 rtc = rtc_device_register("r9701",
136 &spi->dev, &r9701_rtc_ops, THIS_MODULE); 158 &spi->dev, &r9701_rtc_ops, THIS_MODULE);
137 if (IS_ERR(rtc)) 159 if (IS_ERR(rtc))
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 9ccea134a996..3f3a29752369 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -40,6 +40,10 @@ enum s3c_cpu_type {
40 TYPE_S3C64XX, 40 TYPE_S3C64XX,
41}; 41};
42 42
43struct s3c_rtc_drv_data {
44 int cpu_type;
45};
46
43/* I have yet to find an S3C implementation with more than one 47/* I have yet to find an S3C implementation with more than one
44 * of these rtc blocks in */ 48 * of these rtc blocks in */
45 49
@@ -446,10 +450,12 @@ static const struct of_device_id s3c_rtc_dt_match[];
446static inline int s3c_rtc_get_driver_data(struct platform_device *pdev) 450static inline int s3c_rtc_get_driver_data(struct platform_device *pdev)
447{ 451{
448#ifdef CONFIG_OF 452#ifdef CONFIG_OF
453 struct s3c_rtc_drv_data *data;
449 if (pdev->dev.of_node) { 454 if (pdev->dev.of_node) {
450 const struct of_device_id *match; 455 const struct of_device_id *match;
451 match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node); 456 match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node);
452 return match->data; 457 data = (struct s3c_rtc_drv_data *) match->data;
458 return data->cpu_type;
453 } 459 }
454#endif 460#endif
455 return platform_get_device_id(pdev)->driver_data; 461 return platform_get_device_id(pdev)->driver_data;
@@ -664,20 +670,27 @@ static int s3c_rtc_resume(struct platform_device *pdev)
664#define s3c_rtc_resume NULL 670#define s3c_rtc_resume NULL
665#endif 671#endif
666 672
673static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
674 [TYPE_S3C2410] = { TYPE_S3C2410 },
675 [TYPE_S3C2416] = { TYPE_S3C2416 },
676 [TYPE_S3C2443] = { TYPE_S3C2443 },
677 [TYPE_S3C64XX] = { TYPE_S3C64XX },
678};
679
667#ifdef CONFIG_OF 680#ifdef CONFIG_OF
668static const struct of_device_id s3c_rtc_dt_match[] = { 681static const struct of_device_id s3c_rtc_dt_match[] = {
669 { 682 {
670 .compatible = "samsung,s3c2410-rtc" 683 .compatible = "samsung,s3c2410-rtc",
671 .data = TYPE_S3C2410, 684 .data = &s3c_rtc_drv_data_array[TYPE_S3C2410],
672 }, { 685 }, {
673 .compatible = "samsung,s3c2416-rtc" 686 .compatible = "samsung,s3c2416-rtc",
674 .data = TYPE_S3C2416, 687 .data = &s3c_rtc_drv_data_array[TYPE_S3C2416],
675 }, { 688 }, {
676 .compatible = "samsung,s3c2443-rtc" 689 .compatible = "samsung,s3c2443-rtc",
677 .data = TYPE_S3C2443, 690 .data = &s3c_rtc_drv_data_array[TYPE_S3C2443],
678 }, { 691 }, {
679 .compatible = "samsung,s3c6410-rtc" 692 .compatible = "samsung,s3c6410-rtc",
680 .data = TYPE_S3C64XX, 693 .data = &s3c_rtc_drv_data_array[TYPE_S3C64XX],
681 }, 694 },
682 {}, 695 {},
683}; 696};
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index 4c2c6df2a9ef..258abeabf624 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -112,6 +112,7 @@ static const u8 twl6030_rtc_reg_map[] = {
112#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10 112#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10
113#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20 113#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20
114#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40 114#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40
115#define BIT_RTC_CTRL_REG_RTC_V_OPT 0x80
115 116
116/* RTC_STATUS_REG bitfields */ 117/* RTC_STATUS_REG bitfields */
117#define BIT_RTC_STATUS_REG_RUN_M 0x02 118#define BIT_RTC_STATUS_REG_RUN_M 0x02
@@ -235,25 +236,57 @@ static int twl_rtc_read_time(struct device *dev, struct rtc_time *tm)
235 unsigned char rtc_data[ALL_TIME_REGS + 1]; 236 unsigned char rtc_data[ALL_TIME_REGS + 1];
236 int ret; 237 int ret;
237 u8 save_control; 238 u8 save_control;
239 u8 rtc_control;
238 240
239 ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG); 241 ret = twl_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
240 if (ret < 0) 242 if (ret < 0) {
243 dev_err(dev, "%s: reading CTRL_REG, error %d\n", __func__, ret);
241 return ret; 244 return ret;
245 }
246 /* for twl6030/32 make sure BIT_RTC_CTRL_REG_GET_TIME_M is clear */
247 if (twl_class_is_6030()) {
248 if (save_control & BIT_RTC_CTRL_REG_GET_TIME_M) {
249 save_control &= ~BIT_RTC_CTRL_REG_GET_TIME_M;
250 ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
251 if (ret < 0) {
252 dev_err(dev, "%s clr GET_TIME, error %d\n",
253 __func__, ret);
254 return ret;
255 }
256 }
257 }
242 258
243 save_control |= BIT_RTC_CTRL_REG_GET_TIME_M; 259 /* Copy RTC counting registers to static registers or latches */
260 rtc_control = save_control | BIT_RTC_CTRL_REG_GET_TIME_M;
244 261
245 ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG); 262 /* for twl6030/32 enable read access to static shadowed registers */
246 if (ret < 0) 263 if (twl_class_is_6030())
264 rtc_control |= BIT_RTC_CTRL_REG_RTC_V_OPT;
265
266 ret = twl_rtc_write_u8(rtc_control, REG_RTC_CTRL_REG);
267 if (ret < 0) {
268 dev_err(dev, "%s: writing CTRL_REG, error %d\n", __func__, ret);
247 return ret; 269 return ret;
270 }
248 271
249 ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data, 272 ret = twl_i2c_read(TWL_MODULE_RTC, rtc_data,
250 (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS); 273 (rtc_reg_map[REG_SECONDS_REG]), ALL_TIME_REGS);
251 274
252 if (ret < 0) { 275 if (ret < 0) {
253 dev_err(dev, "rtc_read_time error %d\n", ret); 276 dev_err(dev, "%s: reading data, error %d\n", __func__, ret);
254 return ret; 277 return ret;
255 } 278 }
256 279
280 /* for twl6030 restore original state of rtc control register */
281 if (twl_class_is_6030()) {
282 ret = twl_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
283 if (ret < 0) {
284 dev_err(dev, "%s: restore CTRL_REG, error %d\n",
285 __func__, ret);
286 return ret;
287 }
288 }
289
257 tm->tm_sec = bcd2bin(rtc_data[0]); 290 tm->tm_sec = bcd2bin(rtc_data[0]);
258 tm->tm_min = bcd2bin(rtc_data[1]); 291 tm->tm_min = bcd2bin(rtc_data[1]);
259 tm->tm_hour = bcd2bin(rtc_data[2]); 292 tm->tm_hour = bcd2bin(rtc_data[2]);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 2cfcbffa41fd..386f0c53bea7 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -835,7 +835,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
835 835
836 scsi_eh_restore_cmnd(scmd, &ses); 836 scsi_eh_restore_cmnd(scmd, &ses);
837 837
838 if (sdrv->eh_action) 838 if (sdrv && sdrv->eh_action)
839 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn); 839 rtn = sdrv->eh_action(scmd, cmnd, cmnd_size, rtn);
840 840
841 return rtn; 841 return rtn;
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 31bfba805cf4..9b2901feaf78 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -653,7 +653,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
653 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", 653 dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
654 rx_buf_count); 654 rx_buf_count);
655 if (t->tx_buf) 655 if (t->tx_buf)
656 dma_unmap_single(NULL, t->tx_dma, t->len, 656 dma_unmap_single(&spi->dev, t->tx_dma, t->len,
657 DMA_TO_DEVICE); 657 DMA_TO_DEVICE);
658 return -ENOMEM; 658 return -ENOMEM;
659 } 659 }
@@ -692,10 +692,10 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
692 if (spicfg->io_type == SPI_IO_TYPE_DMA) { 692 if (spicfg->io_type == SPI_IO_TYPE_DMA) {
693 693
694 if (t->tx_buf) 694 if (t->tx_buf)
695 dma_unmap_single(NULL, t->tx_dma, t->len, 695 dma_unmap_single(&spi->dev, t->tx_dma, t->len,
696 DMA_TO_DEVICE); 696 DMA_TO_DEVICE);
697 697
698 dma_unmap_single(NULL, t->rx_dma, rx_buf_count, 698 dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
699 DMA_FROM_DEVICE); 699 DMA_FROM_DEVICE);
700 700
701 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 701 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 24cacff57786..5f748c0d96bd 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -139,10 +139,12 @@ static void fsl_spi_change_mode(struct spi_device *spi)
139static void fsl_spi_chipselect(struct spi_device *spi, int value) 139static void fsl_spi_chipselect(struct spi_device *spi, int value)
140{ 140{
141 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); 141 struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
142 struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data; 142 struct fsl_spi_platform_data *pdata;
143 bool pol = spi->mode & SPI_CS_HIGH; 143 bool pol = spi->mode & SPI_CS_HIGH;
144 struct spi_mpc8xxx_cs *cs = spi->controller_state; 144 struct spi_mpc8xxx_cs *cs = spi->controller_state;
145 145
146 pdata = spi->dev.parent->parent->platform_data;
147
146 if (value == BITBANG_CS_INACTIVE) { 148 if (value == BITBANG_CS_INACTIVE) {
147 if (pdata->cs_control) 149 if (pdata->cs_control)
148 pdata->cs_control(spi, !pol); 150 pdata->cs_control(spi, !pol);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 31054e3de4c1..570f22053be8 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -83,7 +83,7 @@ struct spi_imx_data {
83 struct spi_bitbang bitbang; 83 struct spi_bitbang bitbang;
84 84
85 struct completion xfer_done; 85 struct completion xfer_done;
86 void *base; 86 void __iomem *base;
87 int irq; 87 int irq;
88 struct clk *clk; 88 struct clk *clk;
89 unsigned long spi_clk; 89 unsigned long spi_clk;
@@ -766,8 +766,12 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
766 } 766 }
767 767
768 ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs); 768 ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
769 if (ret < 0) 769 if (ret < 0) {
770 num_cs = mxc_platform_info->num_chipselect; 770 if (mxc_platform_info)
771 num_cs = mxc_platform_info->num_chipselect;
772 else
773 return ret;
774 }
771 775
772 master = spi_alloc_master(&pdev->dev, 776 master = spi_alloc_master(&pdev->dev,
773 sizeof(struct spi_imx_data) + sizeof(int) * num_cs); 777 sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
@@ -784,7 +788,7 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
784 788
785 for (i = 0; i < master->num_chipselect; i++) { 789 for (i = 0; i < master->num_chipselect; i++) {
786 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); 790 int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
787 if (cs_gpio < 0) 791 if (cs_gpio < 0 && mxc_platform_info)
788 cs_gpio = mxc_platform_info->chipselect[i]; 792 cs_gpio = mxc_platform_info->chipselect[i];
789 793
790 spi_imx->chipselect[i] = cs_gpio; 794 spi_imx->chipselect[i] = cs_gpio;
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 96f0da66b185..09c925aaf320 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -2195,7 +2195,6 @@ static int pl022_runtime_suspend(struct device *dev)
2195 struct pl022 *pl022 = dev_get_drvdata(dev); 2195 struct pl022 *pl022 = dev_get_drvdata(dev);
2196 2196
2197 clk_disable(pl022->clk); 2197 clk_disable(pl022->clk);
2198 amba_vcore_disable(pl022->adev);
2199 2198
2200 return 0; 2199 return 0;
2201} 2200}
@@ -2204,7 +2203,6 @@ static int pl022_runtime_resume(struct device *dev)
2204{ 2203{
2205 struct pl022 *pl022 = dev_get_drvdata(dev); 2204 struct pl022 *pl022 = dev_get_drvdata(dev);
2206 2205
2207 amba_vcore_enable(pl022->adev);
2208 clk_enable(pl022->clk); 2206 clk_enable(pl022->clk);
2209 2207
2210 return 0; 2208 return 0;
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index 08a3b1133d29..eb1dee26bda3 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -27,13 +27,14 @@ config ANDROID_LOGGER
27 27
28config ANDROID_PERSISTENT_RAM 28config ANDROID_PERSISTENT_RAM
29 bool 29 bool
30 depends on HAVE_MEMBLOCK
30 select REED_SOLOMON 31 select REED_SOLOMON
31 select REED_SOLOMON_ENC8 32 select REED_SOLOMON_ENC8
32 select REED_SOLOMON_DEC8 33 select REED_SOLOMON_DEC8
33 34
34config ANDROID_RAM_CONSOLE 35config ANDROID_RAM_CONSOLE
35 bool "Android RAM buffer console" 36 bool "Android RAM buffer console"
36 depends on !S390 && !UML 37 depends on !S390 && !UML && HAVE_MEMBLOCK
37 select ANDROID_PERSISTENT_RAM 38 select ANDROID_PERSISTENT_RAM
38 default n 39 default n
39 40
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 052b43e4e505..b91e4bc332a7 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -55,7 +55,6 @@ static int lowmem_minfree[6] = {
55}; 55};
56static int lowmem_minfree_size = 4; 56static int lowmem_minfree_size = 4;
57 57
58static struct task_struct *lowmem_deathpending;
59static unsigned long lowmem_deathpending_timeout; 58static unsigned long lowmem_deathpending_timeout;
60 59
61#define lowmem_print(level, x...) \ 60#define lowmem_print(level, x...) \
@@ -64,24 +63,6 @@ static unsigned long lowmem_deathpending_timeout;
64 printk(x); \ 63 printk(x); \
65 } while (0) 64 } while (0)
66 65
67static int
68task_notify_func(struct notifier_block *self, unsigned long val, void *data);
69
70static struct notifier_block task_nb = {
71 .notifier_call = task_notify_func,
72};
73
74static int
75task_notify_func(struct notifier_block *self, unsigned long val, void *data)
76{
77 struct task_struct *task = data;
78
79 if (task == lowmem_deathpending)
80 lowmem_deathpending = NULL;
81
82 return NOTIFY_OK;
83}
84
85static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc) 66static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
86{ 67{
87 struct task_struct *tsk; 68 struct task_struct *tsk;
@@ -97,19 +78,6 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
97 int other_file = global_page_state(NR_FILE_PAGES) - 78 int other_file = global_page_state(NR_FILE_PAGES) -
98 global_page_state(NR_SHMEM); 79 global_page_state(NR_SHMEM);
99 80
100 /*
101 * If we already have a death outstanding, then
102 * bail out right away; indicating to vmscan
103 * that we have nothing further to offer on
104 * this pass.
105 *
106 * Note: Currently you need CONFIG_PROFILING
107 * for this to work correctly.
108 */
109 if (lowmem_deathpending &&
110 time_before_eq(jiffies, lowmem_deathpending_timeout))
111 return 0;
112
113 if (lowmem_adj_size < array_size) 81 if (lowmem_adj_size < array_size)
114 array_size = lowmem_adj_size; 82 array_size = lowmem_adj_size;
115 if (lowmem_minfree_size < array_size) 83 if (lowmem_minfree_size < array_size)
@@ -148,6 +116,12 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
148 if (!p) 116 if (!p)
149 continue; 117 continue;
150 118
119 if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
120 time_before_eq(jiffies, lowmem_deathpending_timeout)) {
121 task_unlock(p);
122 rcu_read_unlock();
123 return 0;
124 }
151 oom_score_adj = p->signal->oom_score_adj; 125 oom_score_adj = p->signal->oom_score_adj;
152 if (oom_score_adj < min_score_adj) { 126 if (oom_score_adj < min_score_adj) {
153 task_unlock(p); 127 task_unlock(p);
@@ -174,15 +148,9 @@ static int lowmem_shrink(struct shrinker *s, struct shrink_control *sc)
174 lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", 148 lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
175 selected->pid, selected->comm, 149 selected->pid, selected->comm,
176 selected_oom_score_adj, selected_tasksize); 150 selected_oom_score_adj, selected_tasksize);
177 /*
178 * If CONFIG_PROFILING is off, then we don't want to stall
179 * the killer by setting lowmem_deathpending.
180 */
181#ifdef CONFIG_PROFILING
182 lowmem_deathpending = selected;
183 lowmem_deathpending_timeout = jiffies + HZ; 151 lowmem_deathpending_timeout = jiffies + HZ;
184#endif
185 send_sig(SIGKILL, selected, 0); 152 send_sig(SIGKILL, selected, 0);
153 set_tsk_thread_flag(selected, TIF_MEMDIE);
186 rem -= selected_tasksize; 154 rem -= selected_tasksize;
187 } 155 }
188 lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n", 156 lowmem_print(4, "lowmem_shrink %lu, %x, return %d\n",
@@ -198,7 +166,6 @@ static struct shrinker lowmem_shrinker = {
198 166
199static int __init lowmem_init(void) 167static int __init lowmem_init(void)
200{ 168{
201 task_handoff_register(&task_nb);
202 register_shrinker(&lowmem_shrinker); 169 register_shrinker(&lowmem_shrinker);
203 return 0; 170 return 0;
204} 171}
@@ -206,7 +173,6 @@ static int __init lowmem_init(void)
206static void __exit lowmem_exit(void) 173static void __exit lowmem_exit(void)
207{ 174{
208 unregister_shrinker(&lowmem_shrinker); 175 unregister_shrinker(&lowmem_shrinker);
209 task_handoff_unregister(&task_nb);
210} 176}
211 177
212module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); 178module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
diff --git a/drivers/staging/android/persistent_ram.c b/drivers/staging/android/persistent_ram.c
index e08f2574e30a..8d8c1e33e0ff 100644
--- a/drivers/staging/android/persistent_ram.c
+++ b/drivers/staging/android/persistent_ram.c
@@ -399,12 +399,12 @@ static __init
399struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc) 399struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
400{ 400{
401 struct persistent_ram_zone *prz; 401 struct persistent_ram_zone *prz;
402 int ret; 402 int ret = -ENOMEM;
403 403
404 prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL); 404 prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
405 if (!prz) { 405 if (!prz) {
406 pr_err("persistent_ram: failed to allocate persistent ram zone\n"); 406 pr_err("persistent_ram: failed to allocate persistent ram zone\n");
407 return ERR_PTR(-ENOMEM); 407 goto err;
408 } 408 }
409 409
410 INIT_LIST_HEAD(&prz->node); 410 INIT_LIST_HEAD(&prz->node);
@@ -412,13 +412,13 @@ struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
412 ret = persistent_ram_buffer_init(dev_name(dev), prz); 412 ret = persistent_ram_buffer_init(dev_name(dev), prz);
413 if (ret) { 413 if (ret) {
414 pr_err("persistent_ram: failed to initialize buffer\n"); 414 pr_err("persistent_ram: failed to initialize buffer\n");
415 return ERR_PTR(ret); 415 goto err;
416 } 416 }
417 417
418 prz->ecc = ecc; 418 prz->ecc = ecc;
419 ret = persistent_ram_init_ecc(prz, prz->buffer_size); 419 ret = persistent_ram_init_ecc(prz, prz->buffer_size);
420 if (ret) 420 if (ret)
421 return ERR_PTR(ret); 421 goto err;
422 422
423 if (prz->buffer->sig == PERSISTENT_RAM_SIG) { 423 if (prz->buffer->sig == PERSISTENT_RAM_SIG) {
424 if (buffer_size(prz) > prz->buffer_size || 424 if (buffer_size(prz) > prz->buffer_size ||
@@ -442,6 +442,9 @@ struct persistent_ram_zone *__persistent_ram_init(struct device *dev, bool ecc)
442 atomic_set(&prz->buffer->size, 0); 442 atomic_set(&prz->buffer->size, 0);
443 443
444 return prz; 444 return prz;
445err:
446 kfree(prz);
447 return ERR_PTR(ret);
445} 448}
446 449
447struct persistent_ram_zone * __init 450struct persistent_ram_zone * __init
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
index bc723eff11af..45c522cbe784 100644
--- a/drivers/staging/android/timed_gpio.c
+++ b/drivers/staging/android/timed_gpio.c
@@ -85,7 +85,7 @@ static int timed_gpio_probe(struct platform_device *pdev)
85 struct timed_gpio_platform_data *pdata = pdev->dev.platform_data; 85 struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
86 struct timed_gpio *cur_gpio; 86 struct timed_gpio *cur_gpio;
87 struct timed_gpio_data *gpio_data, *gpio_dat; 87 struct timed_gpio_data *gpio_data, *gpio_dat;
88 int i, j, ret = 0; 88 int i, ret;
89 89
90 if (!pdata) 90 if (!pdata)
91 return -EBUSY; 91 return -EBUSY;
@@ -108,18 +108,12 @@ static int timed_gpio_probe(struct platform_device *pdev)
108 gpio_dat->dev.get_time = gpio_get_time; 108 gpio_dat->dev.get_time = gpio_get_time;
109 gpio_dat->dev.enable = gpio_enable; 109 gpio_dat->dev.enable = gpio_enable;
110 ret = gpio_request(cur_gpio->gpio, cur_gpio->name); 110 ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
111 if (ret >= 0) { 111 if (ret < 0)
112 ret = timed_output_dev_register(&gpio_dat->dev); 112 goto err_out;
113 if (ret < 0) 113 ret = timed_output_dev_register(&gpio_dat->dev);
114 gpio_free(cur_gpio->gpio);
115 }
116 if (ret < 0) { 114 if (ret < 0) {
117 for (j = 0; j < i; j++) { 115 gpio_free(cur_gpio->gpio);
118 timed_output_dev_unregister(&gpio_data[i].dev); 116 goto err_out;
119 gpio_free(gpio_data[i].gpio);
120 }
121 kfree(gpio_data);
122 return ret;
123 } 117 }
124 118
125 gpio_dat->gpio = cur_gpio->gpio; 119 gpio_dat->gpio = cur_gpio->gpio;
@@ -131,6 +125,15 @@ static int timed_gpio_probe(struct platform_device *pdev)
131 platform_set_drvdata(pdev, gpio_data); 125 platform_set_drvdata(pdev, gpio_data);
132 126
133 return 0; 127 return 0;
128
129err_out:
130 while (--i >= 0) {
131 timed_output_dev_unregister(&gpio_data[i].dev);
132 gpio_free(gpio_data[i].gpio);
133 }
134 kfree(gpio_data);
135
136 return ret;
134} 137}
135 138
136static int timed_gpio_remove(struct platform_device *pdev) 139static int timed_gpio_remove(struct platform_device *pdev)
diff --git a/drivers/staging/iio/inkern.c b/drivers/staging/iio/inkern.c
index de2c8ea64965..ef07a02bf542 100644
--- a/drivers/staging/iio/inkern.c
+++ b/drivers/staging/iio/inkern.c
@@ -82,6 +82,7 @@ int iio_map_array_unregister(struct iio_dev *indio_dev,
82 ret = -ENODEV; 82 ret = -ENODEV;
83 goto error_ret; 83 goto error_ret;
84 } 84 }
85 i++;
85 } 86 }
86error_ret: 87error_ret:
87 mutex_unlock(&iio_map_list_lock); 88 mutex_unlock(&iio_map_list_lock);
diff --git a/drivers/staging/iio/magnetometer/ak8975.c b/drivers/staging/iio/magnetometer/ak8975.c
index d5ddac3d8831..ebc2d0840caf 100644
--- a/drivers/staging/iio/magnetometer/ak8975.c
+++ b/drivers/staging/iio/magnetometer/ak8975.c
@@ -108,7 +108,8 @@ static const int ak8975_index_to_reg[] = {
108static int ak8975_write_data(struct i2c_client *client, 108static int ak8975_write_data(struct i2c_client *client,
109 u8 reg, u8 val, u8 mask, u8 shift) 109 u8 reg, u8 val, u8 mask, u8 shift)
110{ 110{
111 struct ak8975_data *data = i2c_get_clientdata(client); 111 struct iio_dev *indio_dev = i2c_get_clientdata(client);
112 struct ak8975_data *data = iio_priv(indio_dev);
112 u8 regval; 113 u8 regval;
113 int ret; 114 int ret;
114 115
@@ -159,7 +160,8 @@ static int ak8975_read_data(struct i2c_client *client,
159 */ 160 */
160static int ak8975_setup(struct i2c_client *client) 161static int ak8975_setup(struct i2c_client *client)
161{ 162{
162 struct ak8975_data *data = i2c_get_clientdata(client); 163 struct iio_dev *indio_dev = i2c_get_clientdata(client);
164 struct ak8975_data *data = iio_priv(indio_dev);
163 u8 device_id; 165 u8 device_id;
164 int ret; 166 int ret;
165 167
@@ -509,6 +511,7 @@ static int ak8975_probe(struct i2c_client *client,
509 goto exit_gpio; 511 goto exit_gpio;
510 } 512 }
511 data = iio_priv(indio_dev); 513 data = iio_priv(indio_dev);
514 i2c_set_clientdata(client, indio_dev);
512 /* Perform some basic start-of-day setup of the device. */ 515 /* Perform some basic start-of-day setup of the device. */
513 err = ak8975_setup(client); 516 err = ak8975_setup(client);
514 if (err < 0) { 517 if (err < 0) {
@@ -516,7 +519,6 @@ static int ak8975_probe(struct i2c_client *client,
516 goto exit_free_iio; 519 goto exit_free_iio;
517 } 520 }
518 521
519 i2c_set_clientdata(client, indio_dev);
520 data->client = client; 522 data->client = client;
521 mutex_init(&data->lock); 523 mutex_init(&data->lock);
522 data->eoc_irq = client->irq; 524 data->eoc_irq = client->irq;
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 91dd3da70cb4..e00b416c4d33 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -521,7 +521,9 @@ static int hmc5843_detect(struct i2c_client *client,
521/* Called when we have found a new HMC5843. */ 521/* Called when we have found a new HMC5843. */
522static void hmc5843_init_client(struct i2c_client *client) 522static void hmc5843_init_client(struct i2c_client *client)
523{ 523{
524 struct hmc5843_data *data = i2c_get_clientdata(client); 524 struct iio_dev *indio_dev = i2c_get_clientdata(client);
525 struct hmc5843_data *data = iio_priv(indio_dev);
526
525 hmc5843_set_meas_conf(client, data->meas_conf); 527 hmc5843_set_meas_conf(client, data->meas_conf);
526 hmc5843_set_rate(client, data->rate); 528 hmc5843_set_rate(client, data->rate);
527 hmc5843_configure(client, data->operating_mode); 529 hmc5843_configure(client, data->operating_mode);
diff --git a/drivers/staging/media/as102/as102_fw.c b/drivers/staging/media/as102/as102_fw.c
index 43ebc43e6b9a..1075fb1df0d9 100644
--- a/drivers/staging/media/as102/as102_fw.c
+++ b/drivers/staging/media/as102/as102_fw.c
@@ -165,7 +165,7 @@ error:
165int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap) 165int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap)
166{ 166{
167 int errno = -EFAULT; 167 int errno = -EFAULT;
168 const struct firmware *firmware; 168 const struct firmware *firmware = NULL;
169 unsigned char *cmd_buf = NULL; 169 unsigned char *cmd_buf = NULL;
170 char *fw1, *fw2; 170 char *fw1, *fw2;
171 struct usb_device *dev = bus_adap->usb_dev; 171 struct usb_device *dev = bus_adap->usb_dev;
diff --git a/drivers/staging/omapdrm/omap_drv.c b/drivers/staging/omapdrm/omap_drv.c
index 3df5b4c58ecd..620b8d54223d 100644
--- a/drivers/staging/omapdrm/omap_drv.c
+++ b/drivers/staging/omapdrm/omap_drv.c
@@ -803,9 +803,6 @@ static void pdev_shutdown(struct platform_device *device)
803static int pdev_probe(struct platform_device *device) 803static int pdev_probe(struct platform_device *device)
804{ 804{
805 DBG("%s", device->name); 805 DBG("%s", device->name);
806 if (platform_driver_register(&omap_dmm_driver))
807 dev_err(&device->dev, "DMM registration failed\n");
808
809 return drm_platform_init(&omap_drm_driver, device); 806 return drm_platform_init(&omap_drm_driver, device);
810} 807}
811 808
@@ -833,6 +830,10 @@ struct platform_driver pdev = {
833static int __init omap_drm_init(void) 830static int __init omap_drm_init(void)
834{ 831{
835 DBG("init"); 832 DBG("init");
833 if (platform_driver_register(&omap_dmm_driver)) {
834 /* we can continue on without DMM.. so not fatal */
835 dev_err(NULL, "DMM registration failed\n");
836 }
836 return platform_driver_register(&pdev); 837 return platform_driver_register(&pdev);
837} 838}
838 839
diff --git a/drivers/staging/ozwpan/TODO b/drivers/staging/ozwpan/TODO
index f7a9c122f596..c2d30a7112f3 100644
--- a/drivers/staging/ozwpan/TODO
+++ b/drivers/staging/ozwpan/TODO
@@ -8,5 +8,7 @@ TODO:
8 - code review by USB developer community. 8 - code review by USB developer community.
9 - testing with as many devices as possible. 9 - testing with as many devices as possible.
10 10
11Please send any patches for this driver to Chris Kelly <ckelly@ozmodevices.com> 11Please send any patches for this driver to
12Rupesh Gujare <rgujare@ozmodevices.com>
13Chris Kelly <ckelly@ozmodevices.com>
12and Greg Kroah-Hartman <gregkh@linuxfoundation.org>. 14and Greg Kroah-Hartman <gregkh@linuxfoundation.org>.
diff --git a/drivers/staging/ramster/Kconfig b/drivers/staging/ramster/Kconfig
index 8b57b87edda4..4af1f8d4b953 100644
--- a/drivers/staging/ramster/Kconfig
+++ b/drivers/staging/ramster/Kconfig
@@ -1,10 +1,6 @@
1# Dependency on CONFIG_BROKEN is because there is a commit dependency
2# on a cleancache naming change to be submitted by Konrad Wilk
3# a39c00ded70339603ffe1b0ffdf3ade85bcf009a "Merge branch 'stable/cleancache.v13'
4# into linux-next. Once this commit is present, BROKEN can be removed
5config RAMSTER 1config RAMSTER
6 bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem" 2 bool "Cross-machine RAM capacity sharing, aka peer-to-peer tmem"
7 depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM && BROKEN 3 depends on (CLEANCACHE || FRONTSWAP) && CONFIGFS_FS=y && !ZCACHE && !XVMALLOC && !HIGHMEM
8 select LZO_COMPRESS 4 select LZO_COMPRESS
9 select LZO_DECOMPRESS 5 select LZO_DECOMPRESS
10 default n 6 default n
diff --git a/drivers/staging/rts_pstor/ms.c b/drivers/staging/rts_pstor/ms.c
index 66341dff8c99..f9a4498984cc 100644
--- a/drivers/staging/rts_pstor/ms.c
+++ b/drivers/staging/rts_pstor/ms.c
@@ -3498,7 +3498,8 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32
3498 3498
3499 log_blk++; 3499 log_blk++;
3500 3500
3501 for (seg_no = 0; seg_no < sizeof(ms_start_idx)/2; seg_no++) { 3501 for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1;
3502 seg_no++) {
3502 if (log_blk < ms_start_idx[seg_no+1]) 3503 if (log_blk < ms_start_idx[seg_no+1])
3503 break; 3504 break;
3504 } 3505 }
diff --git a/drivers/staging/rts_pstor/rtsx.c b/drivers/staging/rts_pstor/rtsx.c
index a7feb3e328a0..1dccd933a7e4 100644
--- a/drivers/staging/rts_pstor/rtsx.c
+++ b/drivers/staging/rts_pstor/rtsx.c
@@ -1000,6 +1000,11 @@ static int __devinit rtsx_probe(struct pci_dev *pci,
1000 1000
1001 rtsx_init_chip(dev->chip); 1001 rtsx_init_chip(dev->chip);
1002 1002
1003 /* set the supported max_lun and max_id for the scsi host
1004 * NOTE: the minimal value of max_id is 1 */
1005 host->max_id = 1;
1006 host->max_lun = dev->chip->max_lun;
1007
1003 /* Start up our control thread */ 1008 /* Start up our control thread */
1004 th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME); 1009 th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
1005 if (IS_ERR(th)) { 1010 if (IS_ERR(th)) {
diff --git a/drivers/staging/rts_pstor/rtsx_transport.c b/drivers/staging/rts_pstor/rtsx_transport.c
index 4e3d2c106af0..9b2e5c99870f 100644
--- a/drivers/staging/rts_pstor/rtsx_transport.c
+++ b/drivers/staging/rts_pstor/rtsx_transport.c
@@ -335,6 +335,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
335 int sg_cnt, i, resid; 335 int sg_cnt, i, resid;
336 int err = 0; 336 int err = 0;
337 long timeleft; 337 long timeleft;
338 struct scatterlist *sg_ptr;
338 u32 val = TRIG_DMA; 339 u32 val = TRIG_DMA;
339 340
340 if ((sg == NULL) || (num_sg <= 0) || !offset || !index) 341 if ((sg == NULL) || (num_sg <= 0) || !offset || !index)
@@ -371,7 +372,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
371 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir); 372 sg_cnt = dma_map_sg(&(rtsx->pci->dev), sg, num_sg, dma_dir);
372 373
373 resid = size; 374 resid = size;
374 375 sg_ptr = sg;
375 chip->sgi = 0; 376 chip->sgi = 0;
376 /* Usually the next entry will be @sg@ + 1, but if this sg element 377 /* Usually the next entry will be @sg@ + 1, but if this sg element
377 * is part of a chained scatterlist, it could jump to the start of 378 * is part of a chained scatterlist, it could jump to the start of
@@ -379,14 +380,14 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
379 * the proper sg 380 * the proper sg
380 */ 381 */
381 for (i = 0; i < *index; i++) 382 for (i = 0; i < *index; i++)
382 sg = sg_next(sg); 383 sg_ptr = sg_next(sg_ptr);
383 for (i = *index; i < sg_cnt; i++) { 384 for (i = *index; i < sg_cnt; i++) {
384 dma_addr_t addr; 385 dma_addr_t addr;
385 unsigned int len; 386 unsigned int len;
386 u8 option; 387 u8 option;
387 388
388 addr = sg_dma_address(sg); 389 addr = sg_dma_address(sg_ptr);
389 len = sg_dma_len(sg); 390 len = sg_dma_len(sg_ptr);
390 391
391 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n", 392 RTSX_DEBUGP("DMA addr: 0x%x, Len: 0x%x\n",
392 (unsigned int)addr, len); 393 (unsigned int)addr, len);
@@ -415,7 +416,7 @@ static int rtsx_transfer_sglist_adma_partial(struct rtsx_chip *chip, u8 card,
415 if (!resid) 416 if (!resid)
416 break; 417 break;
417 418
418 sg = sg_next(sg); 419 sg_ptr = sg_next(sg_ptr);
419 } 420 }
420 421
421 RTSX_DEBUGP("SG table count = %d\n", chip->sgi); 422 RTSX_DEBUGP("SG table count = %d\n", chip->sgi);
diff --git a/drivers/staging/sep/sep_main.c b/drivers/staging/sep/sep_main.c
index ad54c2e5c932..f1701bc6e312 100644
--- a/drivers/staging/sep/sep_main.c
+++ b/drivers/staging/sep/sep_main.c
@@ -3114,7 +3114,7 @@ static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3114 current->pid); 3114 current->pid);
3115 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET, 3115 if (1 == test_bit(SEP_LEGACY_SENDMSG_DONE_OFFSET,
3116 &call_status->status)) { 3116 &call_status->status)) {
3117 dev_warn(&sep->pdev->dev, 3117 dev_dbg(&sep->pdev->dev,
3118 "[PID%d] dcb prep needed before send msg\n", 3118 "[PID%d] dcb prep needed before send msg\n",
3119 current->pid); 3119 current->pid);
3120 error = -EPROTO; 3120 error = -EPROTO;
@@ -3122,9 +3122,9 @@ static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3122 } 3122 }
3123 3123
3124 if (!arg) { 3124 if (!arg) {
3125 dev_warn(&sep->pdev->dev, 3125 dev_dbg(&sep->pdev->dev,
3126 "[PID%d] dcb null arg\n", current->pid); 3126 "[PID%d] dcb null arg\n", current->pid);
3127 error = EINVAL; 3127 error = -EINVAL;
3128 goto end_function; 3128 goto end_function;
3129 } 3129 }
3130 3130
diff --git a/drivers/staging/vme/devices/vme_pio2_core.c b/drivers/staging/vme/devices/vme_pio2_core.c
index 9fedc442a779..573c80003f0c 100644
--- a/drivers/staging/vme/devices/vme_pio2_core.c
+++ b/drivers/staging/vme/devices/vme_pio2_core.c
@@ -35,10 +35,10 @@ static int vector[PIO2_CARDS_MAX];
35static int vector_num; 35static int vector_num;
36static int level[PIO2_CARDS_MAX]; 36static int level[PIO2_CARDS_MAX];
37static int level_num; 37static int level_num;
38static const char *variant[PIO2_CARDS_MAX]; 38static char *variant[PIO2_CARDS_MAX];
39static int variant_num; 39static int variant_num;
40 40
41static int loopback; 41static bool loopback;
42 42
43static int pio2_match(struct vme_dev *); 43static int pio2_match(struct vme_dev *);
44static int __devinit pio2_probe(struct vme_dev *); 44static int __devinit pio2_probe(struct vme_dev *);
diff --git a/drivers/staging/vt6655/key.c b/drivers/staging/vt6655/key.c
index 0ff8d7bbf2a7..774b0d4a7e06 100644
--- a/drivers/staging/vt6655/key.c
+++ b/drivers/staging/vt6655/key.c
@@ -655,6 +655,9 @@ bool KeybSetDefaultKey (
655 return (false); 655 return (false);
656 } 656 }
657 657
658 if (uKeyLength > MAX_KEY_LEN)
659 return false;
660
658 pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = true; 661 pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = true;
659 for(ii=0;ii<ETH_ALEN;ii++) 662 for(ii=0;ii<ETH_ALEN;ii++)
660 pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF; 663 pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
diff --git a/drivers/staging/vt6656/ioctl.c b/drivers/staging/vt6656/ioctl.c
index 1463d76895f0..d59456c29df1 100644
--- a/drivers/staging/vt6656/ioctl.c
+++ b/drivers/staging/vt6656/ioctl.c
@@ -565,7 +565,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
565 result = -ENOMEM; 565 result = -ENOMEM;
566 break; 566 break;
567 } 567 }
568 pNodeList = (PSNodeList)kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC); 568 pNodeList = kmalloc(sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)), (int)GFP_ATOMIC);
569 if (pNodeList == NULL) { 569 if (pNodeList == NULL) {
570 result = -ENOMEM; 570 result = -ENOMEM;
571 break; 571 break;
@@ -601,6 +601,7 @@ int private_ioctl(PSDevice pDevice, struct ifreq *rq)
601 } 601 }
602 } 602 }
603 if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) { 603 if (copy_to_user(pReq->data, pNodeList, sizeof(SNodeList) + (sNodeList.uItem * sizeof(SNodeItem)))) {
604 kfree(pNodeList);
604 result = -EFAULT; 605 result = -EFAULT;
605 break; 606 break;
606 } 607 }
diff --git a/drivers/staging/vt6656/key.c b/drivers/staging/vt6656/key.c
index 27bb523c8a97..ee62a06a75f4 100644
--- a/drivers/staging/vt6656/key.c
+++ b/drivers/staging/vt6656/key.c
@@ -684,6 +684,9 @@ BOOL KeybSetDefaultKey(
684 return (FALSE); 684 return (FALSE);
685 } 685 }
686 686
687 if (uKeyLength > MAX_KEY_LEN)
688 return false;
689
687 pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE; 690 pTable->KeyTable[MAX_KEY_TABLE-1].bInUse = TRUE;
688 for (ii = 0; ii < ETH_ALEN; ii++) 691 for (ii = 0; ii < ETH_ALEN; ii++)
689 pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF; 692 pTable->KeyTable[MAX_KEY_TABLE-1].abyBSSID[ii] = 0xFF;
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 94d5c35e22fb..3650bbff7686 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -61,7 +61,7 @@ XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension,
61 } 61 }
62 temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B); 62 temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
63 /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */ 63 /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
64 if ((temp & 0x88) == 0x80) 64 if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
65 data = 0; /* DDR */ 65 data = 0; /* DDR */
66 else 66 else
67 data = 1; /* DDRII */ 67 data = 1; /* DDRII */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index 2919924213c4..60d4adf99923 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -152,6 +152,7 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
152 pVBInfo->pXGINew_CR97 = &XG20_CR97; 152 pVBInfo->pXGINew_CR97 = &XG20_CR97;
153 153
154 if (ChipType == XG27) { 154 if (ChipType == XG27) {
155 unsigned char temp;
155 pVBInfo->MCLKData 156 pVBInfo->MCLKData
156 = (struct SiS_MCLKData *) XGI27New_MCLKData; 157 = (struct SiS_MCLKData *) XGI27New_MCLKData;
157 pVBInfo->CR40 = XGI27_cr41; 158 pVBInfo->CR40 = XGI27_cr41;
@@ -162,7 +163,13 @@ void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
162 pVBInfo->pCRDE = XG27_CRDE; 163 pVBInfo->pCRDE = XG27_CRDE;
163 pVBInfo->pSR40 = &XG27_SR40; 164 pVBInfo->pSR40 = &XG27_SR40;
164 pVBInfo->pSR41 = &XG27_SR41; 165 pVBInfo->pSR41 = &XG27_SR41;
166 pVBInfo->SR15 = XG27_SR13;
165 167
168 /*Z11m DDR*/
169 temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B);
170 /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */
171 if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08))
172 pVBInfo->pXGINew_CR97 = &Z11m_CR97;
166 } 173 }
167 174
168 if (ChipType >= XG20) { 175 if (ChipType >= XG20) {
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index dddf261ed53d..e8d6f674b274 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -33,6 +33,13 @@ static struct XGI_ECLKDataStruct XGI340_ECLKData[] = {
33 {0x5c, 0x23, 0x01, 166} 33 {0x5c, 0x23, 0x01, 166}
34}; 34};
35 35
36static unsigned char XG27_SR13[4][8] = {
37 {0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
38 {0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
39 {0x32, 0x32, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR18 */
40 {0x03, 0x03, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00} /* SR1B */
41};
42
36static unsigned char XGI340_SR13[4][8] = { 43static unsigned char XGI340_SR13[4][8] = {
37 {0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */ 44 {0x35, 0x45, 0xb1, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR13 */
38 {0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */ 45 {0x41, 0x51, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00}, /* SR14 */
@@ -71,7 +78,7 @@ static unsigned char XGI27_cr41[24][8] = {
71 {0x20, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 0 CR41 */ 78 {0x20, 0x40, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 0 CR41 */
72 {0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 1 CR8A */ 79 {0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 1 CR8A */
73 {0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 2 CR8B */ 80 {0xC4, 0x40, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 2 CR8B */
74 {0xB5, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 3 CR40[7], 81 {0xB3, 0x13, 0xa4, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 3 CR40[7],
75 CR99[2:0], 82 CR99[2:0],
76 CR45[3:0]*/ 83 CR45[3:0]*/
77 {0xf0, 0xf5, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 4 CR59 */ 84 {0xf0, 0xf5, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00}, /* 4 CR59 */
@@ -2803,6 +2810,8 @@ static unsigned char XG27_CRDE[2];
2803static unsigned char XG27_SR40 = 0x04 ; 2810static unsigned char XG27_SR40 = 0x04 ;
2804static unsigned char XG27_SR41 = 0x00 ; 2811static unsigned char XG27_SR41 = 0x00 ;
2805 2812
2813static unsigned char Z11m_CR97 = 0x80 ;
2814
2806static struct XGI330_VCLKDataStruct XGI_VCLKData[] = { 2815static struct XGI330_VCLKDataStruct XGI_VCLKData[] = {
2807 /* SR2B,SR2C,SR2D */ 2816 /* SR2B,SR2C,SR2D */
2808 {0x1B, 0xE1, 25}, /* 00 (25.175MHz) */ 2817 {0x1B, 0xE1, 25}, /* 00 (25.175MHz) */
diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
index 09caa4f2687e..917461c66014 100644
--- a/drivers/staging/zsmalloc/zsmalloc-main.c
+++ b/drivers/staging/zsmalloc/zsmalloc-main.c
@@ -267,33 +267,39 @@ static unsigned long obj_idx_to_offset(struct page *page,
267 return off + obj_idx * class_size; 267 return off + obj_idx * class_size;
268} 268}
269 269
270static void reset_page(struct page *page)
271{
272 clear_bit(PG_private, &page->flags);
273 clear_bit(PG_private_2, &page->flags);
274 set_page_private(page, 0);
275 page->mapping = NULL;
276 page->freelist = NULL;
277 reset_page_mapcount(page);
278}
279
270static void free_zspage(struct page *first_page) 280static void free_zspage(struct page *first_page)
271{ 281{
272 struct page *nextp, *tmp; 282 struct page *nextp, *tmp, *head_extra;
273 283
274 BUG_ON(!is_first_page(first_page)); 284 BUG_ON(!is_first_page(first_page));
275 BUG_ON(first_page->inuse); 285 BUG_ON(first_page->inuse);
276 286
277 nextp = (struct page *)page_private(first_page); 287 head_extra = (struct page *)page_private(first_page);
278 288
279 clear_bit(PG_private, &first_page->flags); 289 reset_page(first_page);
280 clear_bit(PG_private_2, &first_page->flags);
281 set_page_private(first_page, 0);
282 first_page->mapping = NULL;
283 first_page->freelist = NULL;
284 reset_page_mapcount(first_page);
285 __free_page(first_page); 290 __free_page(first_page);
286 291
287 /* zspage with only 1 system page */ 292 /* zspage with only 1 system page */
288 if (!nextp) 293 if (!head_extra)
289 return; 294 return;
290 295
291 list_for_each_entry_safe(nextp, tmp, &nextp->lru, lru) { 296 list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
292 list_del(&nextp->lru); 297 list_del(&nextp->lru);
293 clear_bit(PG_private_2, &nextp->flags); 298 reset_page(nextp);
294 nextp->index = 0;
295 __free_page(nextp); 299 __free_page(nextp);
296 } 300 }
301 reset_page(head_extra);
302 __free_page(head_extra);
297} 303}
298 304
299/* Initialize a newly allocated zspage */ 305/* Initialize a newly allocated zspage */
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 5b149b466ec8..5c27f7e6c9f1 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -1572,13 +1572,11 @@ static irqreturn_t serial8250_interrupt(int irq, void *dev_id)
1572 do { 1572 do {
1573 struct uart_8250_port *up; 1573 struct uart_8250_port *up;
1574 struct uart_port *port; 1574 struct uart_port *port;
1575 bool skip;
1576 1575
1577 up = list_entry(l, struct uart_8250_port, list); 1576 up = list_entry(l, struct uart_8250_port, list);
1578 port = &up->port; 1577 port = &up->port;
1579 skip = pass_counter && up->port.flags & UPF_IIR_ONCE;
1580 1578
1581 if (!skip && port->handle_irq(port)) { 1579 if (port->handle_irq(port)) {
1582 handled = 1; 1580 handled = 1;
1583 end = NULL; 1581 end = NULL;
1584 } else if (end == NULL) 1582 } else if (end == NULL)
@@ -2037,10 +2035,12 @@ static int serial8250_startup(struct uart_port *port)
2037 spin_unlock_irqrestore(&port->lock, flags); 2035 spin_unlock_irqrestore(&port->lock, flags);
2038 2036
2039 /* 2037 /*
2040 * If the interrupt is not reasserted, setup a timer to 2038 * If the interrupt is not reasserted, or we otherwise
2041 * kick the UART on a regular basis. 2039 * don't trust the iir, setup a timer to kick the UART
2040 * on a regular basis.
2042 */ 2041 */
2043 if (!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) { 2042 if ((!(iir1 & UART_IIR_NO_INT) && (iir & UART_IIR_NO_INT)) ||
2043 up->port.flags & UPF_BUG_THRE) {
2044 up->bugs |= UART_BUG_THRE; 2044 up->bugs |= UART_BUG_THRE;
2045 pr_debug("ttyS%d - using backup timer\n", 2045 pr_debug("ttyS%d - using backup timer\n",
2046 serial_index(port)); 2046 serial_index(port));
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index da2b0b0a183f..858dca865d6a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1096,7 +1096,7 @@ static int kt_serial_setup(struct serial_private *priv,
1096 const struct pciserial_board *board, 1096 const struct pciserial_board *board,
1097 struct uart_port *port, int idx) 1097 struct uart_port *port, int idx)
1098{ 1098{
1099 port->flags |= UPF_IIR_ONCE; 1099 port->flags |= UPF_BUG_THRE;
1100 return skip_tx_en_setup(priv, board, port, idx); 1100 return skip_tx_en_setup(priv, board, port, idx);
1101} 1101}
1102 1102
@@ -1118,18 +1118,6 @@ pci_xr17c154_setup(struct serial_private *priv,
1118 return pci_default_setup(priv, board, port, idx); 1118 return pci_default_setup(priv, board, port, idx);
1119} 1119}
1120 1120
1121static int try_enable_msi(struct pci_dev *dev)
1122{
1123 /* use msi if available, but fallback to legacy otherwise */
1124 pci_enable_msi(dev);
1125 return 0;
1126}
1127
1128static void disable_msi(struct pci_dev *dev)
1129{
1130 pci_disable_msi(dev);
1131}
1132
1133#define PCI_VENDOR_ID_SBSMODULARIO 0x124B 1121#define PCI_VENDOR_ID_SBSMODULARIO 0x124B
1134#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B 1122#define PCI_SUBVENDOR_ID_SBSMODULARIO 0x124B
1135#define PCI_DEVICE_ID_OCTPRO 0x0001 1123#define PCI_DEVICE_ID_OCTPRO 0x0001
@@ -1249,9 +1237,7 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
1249 .device = PCI_DEVICE_ID_INTEL_PATSBURG_KT, 1237 .device = PCI_DEVICE_ID_INTEL_PATSBURG_KT,
1250 .subvendor = PCI_ANY_ID, 1238 .subvendor = PCI_ANY_ID,
1251 .subdevice = PCI_ANY_ID, 1239 .subdevice = PCI_ANY_ID,
1252 .init = try_enable_msi,
1253 .setup = kt_serial_setup, 1240 .setup = kt_serial_setup,
1254 .exit = disable_msi,
1255 }, 1241 },
1256 /* 1242 /*
1257 * ITE 1243 * ITE
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 665beb68f670..070b442c1f81 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1041,7 +1041,7 @@ config SERIAL_OMAP
1041 1041
1042config SERIAL_OMAP_CONSOLE 1042config SERIAL_OMAP_CONSOLE
1043 bool "Console on OMAP serial port" 1043 bool "Console on OMAP serial port"
1044 depends on SERIAL_OMAP 1044 depends on SERIAL_OMAP=y
1045 select SERIAL_CORE_CONSOLE 1045 select SERIAL_CORE_CONSOLE
1046 help 1046 help
1047 Select this option if you would like to use omap serial port as 1047 Select this option if you would like to use omap serial port as
diff --git a/drivers/tty/serial/altera_uart.c b/drivers/tty/serial/altera_uart.c
index e7903751e058..1f0330915d5a 100644
--- a/drivers/tty/serial/altera_uart.c
+++ b/drivers/tty/serial/altera_uart.c
@@ -556,7 +556,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
556 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 556 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
557 if (res_mem) 557 if (res_mem)
558 port->mapbase = res_mem->start; 558 port->mapbase = res_mem->start;
559 else if (platp->mapbase) 559 else if (platp)
560 port->mapbase = platp->mapbase; 560 port->mapbase = platp->mapbase;
561 else 561 else
562 return -EINVAL; 562 return -EINVAL;
@@ -564,7 +564,7 @@ static int __devinit altera_uart_probe(struct platform_device *pdev)
564 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 564 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
565 if (res_irq) 565 if (res_irq)
566 port->irq = res_irq->start; 566 port->irq = res_irq->start;
567 else if (platp->irq) 567 else if (platp)
568 port->irq = platp->irq; 568 port->irq = platp->irq;
569 569
570 /* Check platform data first so we can override device node data */ 570 /* Check platform data first so we can override device node data */
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 0c65c9e66986..3d569cd68f58 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -1946,10 +1946,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1946 goto unmap; 1946 goto unmap;
1947 } 1947 }
1948 1948
1949 /* Ensure interrupts from this UART are masked and cleared */
1950 writew(0, uap->port.membase + UART011_IMSC);
1951 writew(0xffff, uap->port.membase + UART011_ICR);
1952
1953 uap->vendor = vendor; 1949 uap->vendor = vendor;
1954 uap->lcrh_rx = vendor->lcrh_rx; 1950 uap->lcrh_rx = vendor->lcrh_rx;
1955 uap->lcrh_tx = vendor->lcrh_tx; 1951 uap->lcrh_tx = vendor->lcrh_tx;
@@ -1967,6 +1963,10 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1967 uap->port.line = i; 1963 uap->port.line = i;
1968 pl011_dma_probe(uap); 1964 pl011_dma_probe(uap);
1969 1965
1966 /* Ensure interrupts from this UART are masked and cleared */
1967 writew(0, uap->port.membase + UART011_IMSC);
1968 writew(0xffff, uap->port.membase + UART011_ICR);
1969
1970 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev)); 1970 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
1971 1971
1972 amba_ports[i] = uap; 1972 amba_ports[i] = uap;
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index f9a6be7a9bed..3d7e1ee2fa57 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -389,6 +389,8 @@ static void atmel_start_rx(struct uart_port *port)
389{ 389{
390 UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */ 390 UART_PUT_CR(port, ATMEL_US_RSTSTA); /* reset status and receiver */
391 391
392 UART_PUT_CR(port, ATMEL_US_RXEN);
393
392 if (atmel_use_dma_rx(port)) { 394 if (atmel_use_dma_rx(port)) {
393 /* enable PDC controller */ 395 /* enable PDC controller */
394 UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT | 396 UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT |
@@ -404,6 +406,8 @@ static void atmel_start_rx(struct uart_port *port)
404 */ 406 */
405static void atmel_stop_rx(struct uart_port *port) 407static void atmel_stop_rx(struct uart_port *port)
406{ 408{
409 UART_PUT_CR(port, ATMEL_US_RXDIS);
410
407 if (atmel_use_dma_rx(port)) { 411 if (atmel_use_dma_rx(port)) {
408 /* disable PDC receive */ 412 /* disable PDC receive */
409 UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS); 413 UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS);
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 0121486ac4fa..d00b38eb268e 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1381,29 +1381,24 @@ static int serial_omap_probe(struct platform_device *pdev)
1381 return -ENODEV; 1381 return -ENODEV;
1382 } 1382 }
1383 1383
1384 if (!request_mem_region(mem->start, resource_size(mem), 1384 if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
1385 pdev->dev.driver->name)) { 1385 pdev->dev.driver->name)) {
1386 dev_err(&pdev->dev, "memory region already claimed\n"); 1386 dev_err(&pdev->dev, "memory region already claimed\n");
1387 return -EBUSY; 1387 return -EBUSY;
1388 } 1388 }
1389 1389
1390 dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 1390 dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
1391 if (!dma_rx) { 1391 if (!dma_rx)
1392 ret = -EINVAL; 1392 return -ENXIO;
1393 goto err;
1394 }
1395 1393
1396 dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); 1394 dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
1397 if (!dma_tx) { 1395 if (!dma_tx)
1398 ret = -EINVAL; 1396 return -ENXIO;
1399 goto err; 1397
1400 } 1398 up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
1399 if (!up)
1400 return -ENOMEM;
1401 1401
1402 up = kzalloc(sizeof(*up), GFP_KERNEL);
1403 if (up == NULL) {
1404 ret = -ENOMEM;
1405 goto do_release_region;
1406 }
1407 up->pdev = pdev; 1402 up->pdev = pdev;
1408 up->port.dev = &pdev->dev; 1403 up->port.dev = &pdev->dev;
1409 up->port.type = PORT_OMAP; 1404 up->port.type = PORT_OMAP;
@@ -1423,16 +1418,17 @@ static int serial_omap_probe(struct platform_device *pdev)
1423 dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n", 1418 dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
1424 up->port.line); 1419 up->port.line);
1425 ret = -ENODEV; 1420 ret = -ENODEV;
1426 goto err; 1421 goto err_port_line;
1427 } 1422 }
1428 1423
1429 sprintf(up->name, "OMAP UART%d", up->port.line); 1424 sprintf(up->name, "OMAP UART%d", up->port.line);
1430 up->port.mapbase = mem->start; 1425 up->port.mapbase = mem->start;
1431 up->port.membase = ioremap(mem->start, resource_size(mem)); 1426 up->port.membase = devm_ioremap(&pdev->dev, mem->start,
1427 resource_size(mem));
1432 if (!up->port.membase) { 1428 if (!up->port.membase) {
1433 dev_err(&pdev->dev, "can't ioremap UART\n"); 1429 dev_err(&pdev->dev, "can't ioremap UART\n");
1434 ret = -ENOMEM; 1430 ret = -ENOMEM;
1435 goto err; 1431 goto err_ioremap;
1436 } 1432 }
1437 1433
1438 up->port.flags = omap_up_info->flags; 1434 up->port.flags = omap_up_info->flags;
@@ -1478,16 +1474,19 @@ static int serial_omap_probe(struct platform_device *pdev)
1478 1474
1479 ret = uart_add_one_port(&serial_omap_reg, &up->port); 1475 ret = uart_add_one_port(&serial_omap_reg, &up->port);
1480 if (ret != 0) 1476 if (ret != 0)
1481 goto do_release_region; 1477 goto err_add_port;
1482 1478
1483 pm_runtime_put(&pdev->dev); 1479 pm_runtime_put(&pdev->dev);
1484 platform_set_drvdata(pdev, up); 1480 platform_set_drvdata(pdev, up);
1485 return 0; 1481 return 0;
1486err: 1482
1483err_add_port:
1484 pm_runtime_put(&pdev->dev);
1485 pm_runtime_disable(&pdev->dev);
1486err_ioremap:
1487err_port_line:
1487 dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n", 1488 dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
1488 pdev->id, __func__, ret); 1489 pdev->id, __func__, ret);
1489do_release_region:
1490 release_mem_region(mem->start, resource_size(mem));
1491 return ret; 1490 return ret;
1492} 1491}
1493 1492
@@ -1499,8 +1498,6 @@ static int serial_omap_remove(struct platform_device *dev)
1499 pm_runtime_disable(&up->pdev->dev); 1498 pm_runtime_disable(&up->pdev->dev);
1500 uart_remove_one_port(&serial_omap_reg, &up->port); 1499 uart_remove_one_port(&serial_omap_reg, &up->port);
1501 pm_qos_remove_request(&up->pm_qos_request); 1500 pm_qos_remove_request(&up->pm_qos_request);
1502
1503 kfree(up);
1504 } 1501 }
1505 1502
1506 platform_set_drvdata(dev, NULL); 1503 platform_set_drvdata(dev, NULL);
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index 08b9962b8fda..bbbec4a74cfb 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -210,6 +210,7 @@ enum {
210#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */ 210#define CMITC_UARTCLK 192000000 /* 192.0000 MHz */
211#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */ 211#define FRI2_64_UARTCLK 64000000 /* 64.0000 MHz */
212#define FRI2_48_UARTCLK 48000000 /* 48.0000 MHz */ 212#define FRI2_48_UARTCLK 48000000 /* 48.0000 MHz */
213#define NTC1_UARTCLK 64000000 /* 64.0000 MHz */
213 214
214struct pch_uart_buffer { 215struct pch_uart_buffer {
215 unsigned char *buf; 216 unsigned char *buf;
@@ -384,6 +385,12 @@ static int pch_uart_get_uartclk(void)
384 if (cmp && strstr(cmp, "Fish River Island II")) 385 if (cmp && strstr(cmp, "Fish River Island II"))
385 return FRI2_48_UARTCLK; 386 return FRI2_48_UARTCLK;
386 387
388 /* Kontron COMe-mTT10 (nanoETXexpress-TT) */
389 cmp = dmi_get_system_info(DMI_BOARD_NAME);
390 if (cmp && (strstr(cmp, "COMe-mTT") ||
391 strstr(cmp, "nanoETXexpress-TT")))
392 return NTC1_UARTCLK;
393
387 return DEFAULT_UARTCLK; 394 return DEFAULT_UARTCLK;
388} 395}
389 396
@@ -1651,6 +1658,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
1651 } 1658 }
1652 1659
1653 pci_enable_msi(pdev); 1660 pci_enable_msi(pdev);
1661 pci_set_master(pdev);
1654 1662
1655 iobase = pci_resource_start(pdev, 0); 1663 iobase = pci_resource_start(pdev, 0);
1656 mapbase = pci_resource_start(pdev, 1); 1664 mapbase = pci_resource_start(pdev, 1);
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index de249d265bec..d8b0aee35632 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -982,6 +982,7 @@ static void s3c24xx_serial_resetport(struct uart_port *port,
982 982
983 ucon &= ucon_mask; 983 ucon &= ucon_mask;
984 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon); 984 wr_regl(port, S3C2410_UCON, ucon | cfg->ucon);
985 wr_regl(port, S3C2410_ULCON, cfg->ulcon);
985 986
986 /* reset both fifos */ 987 /* reset both fifos */
987 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH); 988 wr_regl(port, S3C2410_UFCON, cfg->ufcon | S3C2410_UFCON_RESETBOTH);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 3bdd4b19dd06..2156188db4a6 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -2932,11 +2932,10 @@ static int __init con_init(void)
2932 gotoxy(vc, vc->vc_x, vc->vc_y); 2932 gotoxy(vc, vc->vc_x, vc->vc_y);
2933 csi_J(vc, 0); 2933 csi_J(vc, 0);
2934 update_screen(vc); 2934 update_screen(vc);
2935 pr_info("Console: %s %s %dx%d", 2935 pr_info("Console: %s %s %dx%d\n",
2936 vc->vc_can_do_color ? "colour" : "mono", 2936 vc->vc_can_do_color ? "colour" : "mono",
2937 display_desc, vc->vc_cols, vc->vc_rows); 2937 display_desc, vc->vc_cols, vc->vc_rows);
2938 printable = 1; 2938 printable = 1;
2939 printk("\n");
2940 2939
2941 console_unlock(); 2940 console_unlock();
2942 2941
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index cbd8f5f80596..76316a33061b 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -2,14 +2,6 @@
2# USB device configuration 2# USB device configuration
3# 3#
4 4
5menuconfig USB_SUPPORT
6 bool "USB support"
7 depends on HAS_IOMEM
8 default y
9 ---help---
10 This option adds core support for Universal Serial Bus (USB).
11 You will also need drivers from the following menu to make use of it.
12
13# many non-PCI SOC chips embed OHCI 5# many non-PCI SOC chips embed OHCI
14config USB_ARCH_HAS_OHCI 6config USB_ARCH_HAS_OHCI
15 boolean 7 boolean
@@ -63,6 +55,14 @@ config USB_ARCH_HAS_XHCI
63 boolean 55 boolean
64 default PCI 56 default PCI
65 57
58menuconfig USB_SUPPORT
59 bool "USB support"
60 depends on HAS_IOMEM
61 default y
62 ---help---
63 This option adds core support for Universal Serial Bus (USB).
64 You will also need drivers from the following menu to make use of it.
65
66if USB_SUPPORT 66if USB_SUPPORT
67 67
68config USB_COMMON 68config USB_COMMON
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f8e2d6d52e5c..9a56635dc19c 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -1189,8 +1189,13 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
1189 if (status == 0) { 1189 if (status == 0) {
1190 status = usb_suspend_device(udev, msg); 1190 status = usb_suspend_device(udev, msg);
1191 1191
1192 /* Again, ignore errors during system sleep transitions */ 1192 /*
1193 if (!PMSG_IS_AUTO(msg)) 1193 * Ignore errors from non-root-hub devices during
1194 * system sleep transitions. For the most part,
1195 * these devices should go to low power anyway when
1196 * the entire bus is suspended.
1197 */
1198 if (udev->parent && !PMSG_IS_AUTO(msg))
1194 status = 0; 1199 status = 0;
1195 } 1200 }
1196 1201
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 9d7fc9a39933..140d3e11f212 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1978,6 +1978,18 @@ int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg)
1978 if (status == 0) { 1978 if (status == 0) {
1979 usb_set_device_state(rhdev, USB_STATE_SUSPENDED); 1979 usb_set_device_state(rhdev, USB_STATE_SUSPENDED);
1980 hcd->state = HC_STATE_SUSPENDED; 1980 hcd->state = HC_STATE_SUSPENDED;
1981
1982 /* Did we race with a root-hub wakeup event? */
1983 if (rhdev->do_remote_wakeup) {
1984 char buffer[6];
1985
1986 status = hcd->driver->hub_status_data(hcd, buffer);
1987 if (status != 0) {
1988 dev_dbg(&rhdev->dev, "suspend raced with wakeup event\n");
1989 hcd_bus_resume(rhdev, PMSG_AUTO_RESUME);
1990 status = -EBUSY;
1991 }
1992 }
1981 } else { 1993 } else {
1982 spin_lock_irq(&hcd_root_hub_lock); 1994 spin_lock_irq(&hcd_root_hub_lock);
1983 if (!HCD_DEAD(hcd)) { 1995 if (!HCD_DEAD(hcd)) {
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 28664eb7f555..a2aa9d652c67 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3163,6 +3163,22 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
3163 if (retval) 3163 if (retval)
3164 goto fail; 3164 goto fail;
3165 3165
3166 /*
3167 * Some superspeed devices have finished the link training process
3168 * and attached to a superspeed hub port, but the device descriptor
3169 * got from those devices show they aren't superspeed devices. Warm
3170 * reset the port attached by the devices can fix them.
3171 */
3172 if ((udev->speed == USB_SPEED_SUPER) &&
3173 (le16_to_cpu(udev->descriptor.bcdUSB) < 0x0300)) {
3174 dev_err(&udev->dev, "got a wrong device descriptor, "
3175 "warm reset device\n");
3176 hub_port_reset(hub, port1, udev,
3177 HUB_BH_RESET_TIME, true);
3178 retval = -EINVAL;
3179 goto fail;
3180 }
3181
3166 if (udev->descriptor.bMaxPacketSize0 == 0xff || 3182 if (udev->descriptor.bMaxPacketSize0 == 0xff ||
3167 udev->speed == USB_SPEED_SUPER) 3183 udev->speed == USB_SPEED_SUPER)
3168 i = 512; 3184 i = 512;
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b3bdfede45e6..aed3e07942d4 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -308,7 +308,8 @@ static void sg_complete(struct urb *urb)
308 retval = usb_unlink_urb(io->urbs [i]); 308 retval = usb_unlink_urb(io->urbs [i]);
309 if (retval != -EINPROGRESS && 309 if (retval != -EINPROGRESS &&
310 retval != -ENODEV && 310 retval != -ENODEV &&
311 retval != -EBUSY) 311 retval != -EBUSY &&
312 retval != -EIDRM)
312 dev_err(&io->dev->dev, 313 dev_err(&io->dev->dev,
313 "%s, unlink --> %d\n", 314 "%s, unlink --> %d\n",
314 __func__, retval); 315 __func__, retval);
@@ -317,7 +318,6 @@ static void sg_complete(struct urb *urb)
317 } 318 }
318 spin_lock(&io->lock); 319 spin_lock(&io->lock);
319 } 320 }
320 urb->dev = NULL;
321 321
322 /* on the last completion, signal usb_sg_wait() */ 322 /* on the last completion, signal usb_sg_wait() */
323 io->bytes += urb->actual_length; 323 io->bytes += urb->actual_length;
@@ -524,7 +524,6 @@ void usb_sg_wait(struct usb_sg_request *io)
524 case -ENXIO: /* hc didn't queue this one */ 524 case -ENXIO: /* hc didn't queue this one */
525 case -EAGAIN: 525 case -EAGAIN:
526 case -ENOMEM: 526 case -ENOMEM:
527 io->urbs[i]->dev = NULL;
528 retval = 0; 527 retval = 0;
529 yield(); 528 yield();
530 break; 529 break;
@@ -542,7 +541,6 @@ void usb_sg_wait(struct usb_sg_request *io)
542 541
543 /* fail any uncompleted urbs */ 542 /* fail any uncompleted urbs */
544 default: 543 default:
545 io->urbs[i]->dev = NULL;
546 io->urbs[i]->status = retval; 544 io->urbs[i]->status = retval;
547 dev_dbg(&io->dev->dev, "%s, submit --> %d\n", 545 dev_dbg(&io->dev->dev, "%s, submit --> %d\n",
548 __func__, retval); 546 __func__, retval);
@@ -593,7 +591,10 @@ void usb_sg_cancel(struct usb_sg_request *io)
593 if (!io->urbs [i]->dev) 591 if (!io->urbs [i]->dev)
594 continue; 592 continue;
595 retval = usb_unlink_urb(io->urbs [i]); 593 retval = usb_unlink_urb(io->urbs [i]);
596 if (retval != -EINPROGRESS && retval != -EBUSY) 594 if (retval != -EINPROGRESS
595 && retval != -ENODEV
596 && retval != -EBUSY
597 && retval != -EIDRM)
597 dev_warn(&io->dev->dev, "%s, unlink --> %d\n", 598 dev_warn(&io->dev->dev, "%s, unlink --> %d\n",
598 __func__, retval); 599 __func__, retval);
599 } 600 }
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 7239a73c1b8c..cd9b3a2cd8a7 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -539,6 +539,10 @@ EXPORT_SYMBOL_GPL(usb_submit_urb);
539 * never submitted, or it was unlinked before, or the hardware is already 539 * never submitted, or it was unlinked before, or the hardware is already
540 * finished with it), even if the completion handler has not yet run. 540 * finished with it), even if the completion handler has not yet run.
541 * 541 *
542 * The URB must not be deallocated while this routine is running. In
543 * particular, when a driver calls this routine, it must insure that the
544 * completion handler cannot deallocate the URB.
545 *
542 * Unlinking and Endpoint Queues: 546 * Unlinking and Endpoint Queues:
543 * 547 *
544 * [The behaviors and guarantees described below do not apply to virtual 548 * [The behaviors and guarantees described below do not apply to virtual
@@ -603,6 +607,10 @@ EXPORT_SYMBOL_GPL(usb_unlink_urb);
603 * with error -EPERM. Thus even if the URB's completion handler always 607 * with error -EPERM. Thus even if the URB's completion handler always
604 * tries to resubmit, it will not succeed and the URB will become idle. 608 * tries to resubmit, it will not succeed and the URB will become idle.
605 * 609 *
610 * The URB must not be deallocated while this routine is running. In
611 * particular, when a driver calls this routine, it must insure that the
612 * completion handler cannot deallocate the URB.
613 *
606 * This routine may not be used in an interrupt context (such as a bottom 614 * This routine may not be used in an interrupt context (such as a bottom
607 * half or a completion handler), or when holding a spinlock, or in other 615 * half or a completion handler), or when holding a spinlock, or in other
608 * situations where the caller can't schedule(). 616 * situations where the caller can't schedule().
@@ -640,6 +648,10 @@ EXPORT_SYMBOL_GPL(usb_kill_urb);
640 * with error -EPERM. Thus even if the URB's completion handler always 648 * with error -EPERM. Thus even if the URB's completion handler always
641 * tries to resubmit, it will not succeed and the URB will become idle. 649 * tries to resubmit, it will not succeed and the URB will become idle.
642 * 650 *
651 * The URB must not be deallocated while this routine is running. In
652 * particular, when a driver calls this routine, it must insure that the
653 * completion handler cannot deallocate the URB.
654 *
643 * This routine may not be used in an interrupt context (such as a bottom 655 * This routine may not be used in an interrupt context (such as a bottom
644 * half or a completion handler), or when holding a spinlock, or in other 656 * half or a completion handler), or when holding a spinlock, or in other
645 * situations where the caller can't schedule(). 657 * situations where the caller can't schedule().
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index 8793f32bab11..e58b16442971 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1574,7 +1574,6 @@ static void destroy_ep_files (struct dev_data *dev)
1574 DBG (dev, "%s %d\n", __func__, dev->state); 1574 DBG (dev, "%s %d\n", __func__, dev->state);
1575 1575
1576 /* dev->state must prevent interference */ 1576 /* dev->state must prevent interference */
1577restart:
1578 spin_lock_irq (&dev->lock); 1577 spin_lock_irq (&dev->lock);
1579 while (!list_empty(&dev->epfiles)) { 1578 while (!list_empty(&dev->epfiles)) {
1580 struct ep_data *ep; 1579 struct ep_data *ep;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 057cdda7a489..806cc95317aa 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -347,6 +347,8 @@ static int ehci_reset (struct ehci_hcd *ehci)
347 if (ehci->debug) 347 if (ehci->debug)
348 dbgp_external_startup(); 348 dbgp_external_startup();
349 349
350 ehci->port_c_suspend = ehci->suspended_ports =
351 ehci->resuming_ports = 0;
350 return retval; 352 return retval;
351} 353}
352 354
@@ -939,6 +941,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
939 * like usb_port_resume() does. 941 * like usb_port_resume() does.
940 */ 942 */
941 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25); 943 ehci->reset_done[i] = jiffies + msecs_to_jiffies(25);
944 set_bit(i, &ehci->resuming_ports);
942 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1); 945 ehci_dbg (ehci, "port %d remote wakeup\n", i + 1);
943 mod_timer(&hcd->rh_timer, ehci->reset_done[i]); 946 mod_timer(&hcd->rh_timer, ehci->reset_done[i]);
944 } 947 }
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 256fbd42e48c..38fe07623152 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -223,15 +223,10 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
223 * remote wakeup, we must fail the suspend. 223 * remote wakeup, we must fail the suspend.
224 */ 224 */
225 if (hcd->self.root_hub->do_remote_wakeup) { 225 if (hcd->self.root_hub->do_remote_wakeup) {
226 port = HCS_N_PORTS(ehci->hcs_params); 226 if (ehci->resuming_ports) {
227 while (port--) { 227 spin_unlock_irq(&ehci->lock);
228 if (ehci->reset_done[port] != 0) { 228 ehci_dbg(ehci, "suspend failed because a port is resuming\n");
229 spin_unlock_irq(&ehci->lock); 229 return -EBUSY;
230 ehci_dbg(ehci, "suspend failed because "
231 "port %d is resuming\n",
232 port + 1);
233 return -EBUSY;
234 }
235 } 230 }
236 } 231 }
237 232
@@ -554,16 +549,12 @@ static int
554ehci_hub_status_data (struct usb_hcd *hcd, char *buf) 549ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
555{ 550{
556 struct ehci_hcd *ehci = hcd_to_ehci (hcd); 551 struct ehci_hcd *ehci = hcd_to_ehci (hcd);
557 u32 temp, status = 0; 552 u32 temp, status;
558 u32 mask; 553 u32 mask;
559 int ports, i, retval = 1; 554 int ports, i, retval = 1;
560 unsigned long flags; 555 unsigned long flags;
561 u32 ppcd = 0; 556 u32 ppcd = 0;
562 557
563 /* if !USB_SUSPEND, root hub timers won't get shut down ... */
564 if (ehci->rh_state != EHCI_RH_RUNNING)
565 return 0;
566
567 /* init status to no-changes */ 558 /* init status to no-changes */
568 buf [0] = 0; 559 buf [0] = 0;
569 ports = HCS_N_PORTS (ehci->hcs_params); 560 ports = HCS_N_PORTS (ehci->hcs_params);
@@ -572,6 +563,11 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
572 retval++; 563 retval++;
573 } 564 }
574 565
566 /* Inform the core about resumes-in-progress by returning
567 * a non-zero value even if there are no status changes.
568 */
569 status = ehci->resuming_ports;
570
575 /* Some boards (mostly VIA?) report bogus overcurrent indications, 571 /* Some boards (mostly VIA?) report bogus overcurrent indications,
576 * causing massive log spam unless we completely ignore them. It 572 * causing massive log spam unless we completely ignore them. It
577 * may be relevant that VIA VT8235 controllers, where PORT_POWER is 573 * may be relevant that VIA VT8235 controllers, where PORT_POWER is
@@ -846,6 +842,7 @@ static int ehci_hub_control (
846 ehci_writel(ehci, 842 ehci_writel(ehci,
847 temp & ~(PORT_RWC_BITS | PORT_RESUME), 843 temp & ~(PORT_RWC_BITS | PORT_RESUME),
848 status_reg); 844 status_reg);
845 clear_bit(wIndex, &ehci->resuming_ports);
849 retval = handshake(ehci, status_reg, 846 retval = handshake(ehci, status_reg,
850 PORT_RESUME, 0, 2000 /* 2msec */); 847 PORT_RESUME, 0, 2000 /* 2msec */);
851 if (retval != 0) { 848 if (retval != 0) {
@@ -864,6 +861,7 @@ static int ehci_hub_control (
864 ehci->reset_done[wIndex])) { 861 ehci->reset_done[wIndex])) {
865 status |= USB_PORT_STAT_C_RESET << 16; 862 status |= USB_PORT_STAT_C_RESET << 16;
866 ehci->reset_done [wIndex] = 0; 863 ehci->reset_done [wIndex] = 0;
864 clear_bit(wIndex, &ehci->resuming_ports);
867 865
868 /* force reset to complete */ 866 /* force reset to complete */
869 ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET), 867 ehci_writel(ehci, temp & ~(PORT_RWC_BITS | PORT_RESET),
@@ -884,8 +882,10 @@ static int ehci_hub_control (
884 ehci_readl(ehci, status_reg)); 882 ehci_readl(ehci, status_reg));
885 } 883 }
886 884
887 if (!(temp & (PORT_RESUME|PORT_RESET))) 885 if (!(temp & (PORT_RESUME|PORT_RESET))) {
888 ehci->reset_done[wIndex] = 0; 886 ehci->reset_done[wIndex] = 0;
887 clear_bit(wIndex, &ehci->resuming_ports);
888 }
889 889
890 /* transfer dedicated ports to the companion hc */ 890 /* transfer dedicated ports to the companion hc */
891 if ((temp & PORT_CONNECT) && 891 if ((temp & PORT_CONNECT) &&
@@ -920,6 +920,7 @@ static int ehci_hub_control (
920 status |= USB_PORT_STAT_SUSPEND; 920 status |= USB_PORT_STAT_SUSPEND;
921 } else if (test_bit(wIndex, &ehci->suspended_ports)) { 921 } else if (test_bit(wIndex, &ehci->suspended_ports)) {
922 clear_bit(wIndex, &ehci->suspended_ports); 922 clear_bit(wIndex, &ehci->suspended_ports);
923 clear_bit(wIndex, &ehci->resuming_ports);
923 ehci->reset_done[wIndex] = 0; 924 ehci->reset_done[wIndex] = 0;
924 if (temp & PORT_PE) 925 if (temp & PORT_PE)
925 set_bit(wIndex, &ehci->port_c_suspend); 926 set_bit(wIndex, &ehci->port_c_suspend);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 3de48a2d7955..73544bd440bd 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -224,6 +224,7 @@ static int tegra_ehci_hub_control(
224 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); 224 temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
225 /* start resume signalling */ 225 /* start resume signalling */
226 ehci_writel(ehci, temp | PORT_RESUME, status_reg); 226 ehci_writel(ehci, temp | PORT_RESUME, status_reg);
227 set_bit(wIndex-1, &ehci->resuming_ports);
227 228
228 spin_unlock_irqrestore(&ehci->lock, flags); 229 spin_unlock_irqrestore(&ehci->lock, flags);
229 msleep(20); 230 msleep(20);
@@ -236,6 +237,7 @@ static int tegra_ehci_hub_control(
236 pr_err("%s: timeout waiting for SUSPEND\n", __func__); 237 pr_err("%s: timeout waiting for SUSPEND\n", __func__);
237 238
238 ehci->reset_done[wIndex-1] = 0; 239 ehci->reset_done[wIndex-1] = 0;
240 clear_bit(wIndex-1, &ehci->resuming_ports);
239 241
240 tegra->port_resuming = 1; 242 tegra->port_resuming = 1;
241 goto done; 243 goto done;
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 8f9acbc96fde..2694ed6558d2 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -117,6 +117,8 @@ struct ehci_hcd { /* one per controller */
117 the change-suspend feature turned on */ 117 the change-suspend feature turned on */
118 unsigned long suspended_ports; /* which ports are 118 unsigned long suspended_ports; /* which ports are
119 suspended */ 119 suspended */
120 unsigned long resuming_ports; /* which ports have
121 started to resume */
120 122
121 /* per-HC memory pools (could be per-bus, but ...) */ 123 /* per-HC memory pools (could be per-bus, but ...) */
122 struct dma_pool *qh_pool; /* qh per active urb */ 124 struct dma_pool *qh_pool; /* qh per active urb */
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 11de5f1be981..32dada8c8b4f 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -825,9 +825,13 @@ static void __devinit quirk_usb_handoff_xhci(struct pci_dev *pdev)
825 } 825 }
826 } 826 }
827 827
828 /* Disable any BIOS SMIs */ 828 val = readl(base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
829 writel(XHCI_LEGACY_DISABLE_SMI, 829 /* Mask off (turn off) any enabled SMIs */
830 base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET); 830 val &= XHCI_LEGACY_DISABLE_SMI;
831 /* Mask all SMI events bits, RW1C */
832 val |= XHCI_LEGACY_SMI_EVENTS;
833 /* Disable any BIOS SMIs and clear all SMI events*/
834 writel(val, base + ext_cap_offset + XHCI_LEGACY_CONTROL_OFFSET);
831 835
832 if (usb_is_intel_switchable_xhci(pdev)) 836 if (usb_is_intel_switchable_xhci(pdev))
833 usb_enable_xhci_ports(pdev); 837 usb_enable_xhci_ports(pdev);
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index 045cde4cbc3d..768d54295a20 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -196,11 +196,12 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
196 status = get_hub_status_data(uhci, buf); 196 status = get_hub_status_data(uhci, buf);
197 197
198 switch (uhci->rh_state) { 198 switch (uhci->rh_state) {
199 case UHCI_RH_SUSPENDING:
200 case UHCI_RH_SUSPENDED: 199 case UHCI_RH_SUSPENDED:
201 /* if port change, ask to be resumed */ 200 /* if port change, ask to be resumed */
202 if (status || uhci->resuming_ports) 201 if (status || uhci->resuming_ports) {
202 status = 1;
203 usb_hcd_resume_root_hub(hcd); 203 usb_hcd_resume_root_hub(hcd);
204 }
204 break; 205 break;
205 206
206 case UHCI_RH_AUTO_STOPPED: 207 case UHCI_RH_AUTO_STOPPED:
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index e9b0f043455d..4b436f5a4171 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -119,7 +119,7 @@ static void xhci_print_command_reg(struct xhci_hcd *xhci)
119 xhci_dbg(xhci, " Event Interrupts %s\n", 119 xhci_dbg(xhci, " Event Interrupts %s\n",
120 (temp & CMD_EIE) ? "enabled " : "disabled"); 120 (temp & CMD_EIE) ? "enabled " : "disabled");
121 xhci_dbg(xhci, " Host System Error Interrupts %s\n", 121 xhci_dbg(xhci, " Host System Error Interrupts %s\n",
122 (temp & CMD_EIE) ? "enabled " : "disabled"); 122 (temp & CMD_HSEIE) ? "enabled " : "disabled");
123 xhci_dbg(xhci, " HC has %sfinished light reset\n", 123 xhci_dbg(xhci, " HC has %sfinished light reset\n",
124 (temp & CMD_LRESET) ? "not " : ""); 124 (temp & CMD_LRESET) ? "not " : "");
125} 125}
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index c7f33123d4c0..377f4242dabb 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -62,8 +62,9 @@
62/* USB Legacy Support Control and Status Register - section 7.1.2 */ 62/* USB Legacy Support Control and Status Register - section 7.1.2 */
63/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */ 63/* Add this offset, plus the value of xECP in HCCPARAMS to the base address */
64#define XHCI_LEGACY_CONTROL_OFFSET (0x04) 64#define XHCI_LEGACY_CONTROL_OFFSET (0x04)
65/* bits 1:2, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */ 65/* bits 1:3, 5:12, and 17:19 need to be preserved; bits 21:28 should be zero */
66#define XHCI_LEGACY_DISABLE_SMI ((0x3 << 1) + (0xff << 5) + (0x7 << 17)) 66#define XHCI_LEGACY_DISABLE_SMI ((0x7 << 1) + (0xff << 5) + (0x7 << 17))
67#define XHCI_LEGACY_SMI_EVENTS (0x7 << 29)
67 68
68/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */ 69/* USB 2.0 xHCI 0.96 L1C capability - section 7.2.2.1.3.2 */
69#define XHCI_L1C (1 << 16) 70#define XHCI_L1C (1 << 16)
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index cae4c6f2845a..68eaa908ac8e 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1796,11 +1796,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1796 int i; 1796 int i;
1797 1797
1798 /* Free the Event Ring Segment Table and the actual Event Ring */ 1798 /* Free the Event Ring Segment Table and the actual Event Ring */
1799 if (xhci->ir_set) {
1800 xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
1801 xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
1802 xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
1803 }
1804 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
1805 if (xhci->erst.entries) 1800 if (xhci->erst.entries)
1806 dma_free_coherent(&pdev->dev, size, 1801 dma_free_coherent(&pdev->dev, size,
@@ -1812,7 +1807,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1812 xhci->event_ring = NULL; 1807 xhci->event_ring = NULL;
1813 xhci_dbg(xhci, "Freed event ring\n"); 1808 xhci_dbg(xhci, "Freed event ring\n");
1814 1809
1815 xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring);
1816 if (xhci->cmd_ring) 1810 if (xhci->cmd_ring)
1817 xhci_ring_free(xhci, xhci->cmd_ring); 1811 xhci_ring_free(xhci, xhci->cmd_ring);
1818 xhci->cmd_ring = NULL; 1812 xhci->cmd_ring = NULL;
@@ -1841,7 +1835,6 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1841 xhci->medium_streams_pool = NULL; 1835 xhci->medium_streams_pool = NULL;
1842 xhci_dbg(xhci, "Freed medium stream array pool\n"); 1836 xhci_dbg(xhci, "Freed medium stream array pool\n");
1843 1837
1844 xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr);
1845 if (xhci->dcbaa) 1838 if (xhci->dcbaa)
1846 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa), 1839 dma_free_coherent(&pdev->dev, sizeof(*xhci->dcbaa),
1847 xhci->dcbaa, xhci->dcbaa->dma); 1840 xhci->dcbaa, xhci->dcbaa->dma);
@@ -2459,6 +2452,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
2459 2452
2460fail: 2453fail:
2461 xhci_warn(xhci, "Couldn't initialize memory\n"); 2454 xhci_warn(xhci, "Couldn't initialize memory\n");
2455 xhci_halt(xhci);
2456 xhci_reset(xhci);
2462 xhci_mem_cleanup(xhci); 2457 xhci_mem_cleanup(xhci);
2463 return -ENOMEM; 2458 return -ENOMEM;
2464} 2459}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index ef98b38626fb..7a856a767e77 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -95,6 +95,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
95 xhci->quirks |= XHCI_RESET_ON_RESUME; 95 xhci->quirks |= XHCI_RESET_ON_RESUME;
96 xhci_dbg(xhci, "QUIRK: Resetting on resume\n"); 96 xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
97 } 97 }
98 if (pdev->vendor == PCI_VENDOR_ID_VIA)
99 xhci->quirks |= XHCI_RESET_ON_RESUME;
98} 100}
99 101
100/* called during probe() after chip reset completes */ 102/* called during probe() after chip reset completes */
@@ -326,7 +328,7 @@ int __init xhci_register_pci(void)
326 return pci_register_driver(&xhci_pci_driver); 328 return pci_register_driver(&xhci_pci_driver);
327} 329}
328 330
329void __exit xhci_unregister_pci(void) 331void xhci_unregister_pci(void)
330{ 332{
331 pci_unregister_driver(&xhci_pci_driver); 333 pci_unregister_driver(&xhci_pci_driver);
332} 334}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 6bd9d53062eb..3d9422f16a20 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2417,7 +2417,7 @@ hw_died:
2417 u32 irq_pending; 2417 u32 irq_pending;
2418 /* Acknowledge the PCI interrupt */ 2418 /* Acknowledge the PCI interrupt */
2419 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending); 2419 irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
2420 irq_pending |= 0x3; 2420 irq_pending |= IMAN_IP;
2421 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending); 2421 xhci_writel(xhci, irq_pending, &xhci->ir_set->irq_pending);
2422 } 2422 }
2423 2423
@@ -2734,7 +2734,7 @@ int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2734 urb->dev->speed == USB_SPEED_FULL) 2734 urb->dev->speed == USB_SPEED_FULL)
2735 urb->interval /= 8; 2735 urb->interval /= 8;
2736 } 2736 }
2737 return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 2737 return xhci_queue_bulk_tx(xhci, mem_flags, urb, slot_id, ep_index);
2738} 2738}
2739 2739
2740/* 2740/*
@@ -3514,7 +3514,7 @@ int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
3514 } 3514 }
3515 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free; 3515 ep_ring->num_trbs_free_temp = ep_ring->num_trbs_free;
3516 3516
3517 return xhci_queue_isoc_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index); 3517 return xhci_queue_isoc_tx(xhci, mem_flags, urb, slot_id, ep_index);
3518} 3518}
3519 3519
3520/**** Command Ring Operations ****/ 3520/**** Command Ring Operations ****/
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index e1963d4a430f..36641a7f2371 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -106,6 +106,9 @@ int xhci_halt(struct xhci_hcd *xhci)
106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); 106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
107 if (!ret) 107 if (!ret)
108 xhci->xhc_state |= XHCI_STATE_HALTED; 108 xhci->xhc_state |= XHCI_STATE_HALTED;
109 else
110 xhci_warn(xhci, "Host not halted after %u microseconds.\n",
111 XHCI_MAX_HALT_USEC);
109 return ret; 112 return ret;
110} 113}
111 114
@@ -664,11 +667,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
664 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification); 667 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
665 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr); 668 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
666 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg); 669 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
667 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
668 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
669 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size); 670 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
670 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base); 671 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
671 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue); 672 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
673 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
674 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
672} 675}
673 676
674static void xhci_restore_registers(struct xhci_hcd *xhci) 677static void xhci_restore_registers(struct xhci_hcd *xhci)
@@ -677,10 +680,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
677 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification); 680 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
678 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr); 681 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
679 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg); 682 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
680 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
681 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
682 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size); 683 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
683 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 684 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
685 xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
686 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
687 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
684} 688}
685 689
686static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci) 690static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 91074fdab3eb..3d69c4b2b542 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -205,6 +205,10 @@ struct xhci_op_regs {
205#define CMD_PM_INDEX (1 << 11) 205#define CMD_PM_INDEX (1 << 11)
206/* bits 12:31 are reserved (and should be preserved on writes). */ 206/* bits 12:31 are reserved (and should be preserved on writes). */
207 207
208/* IMAN - Interrupt Management Register */
209#define IMAN_IP (1 << 1)
210#define IMAN_IE (1 << 0)
211
208/* USBSTS - USB status - status bitmasks */ 212/* USBSTS - USB status - status bitmasks */
209/* HC not running - set to 1 when run/stop bit is cleared. */ 213/* HC not running - set to 1 when run/stop bit is cleared. */
210#define STS_HALT XHCI_STS_HALT 214#define STS_HALT XHCI_STS_HALT
diff --git a/drivers/usb/serial/bus.c b/drivers/usb/serial/bus.c
index 7f547dc3a590..ed8adb052ca7 100644
--- a/drivers/usb/serial/bus.c
+++ b/drivers/usb/serial/bus.c
@@ -60,8 +60,6 @@ static int usb_serial_device_probe(struct device *dev)
60 retval = -ENODEV; 60 retval = -ENODEV;
61 goto exit; 61 goto exit;
62 } 62 }
63 if (port->dev_state != PORT_REGISTERING)
64 goto exit;
65 63
66 driver = port->serial->type; 64 driver = port->serial->type;
67 if (driver->port_probe) { 65 if (driver->port_probe) {
@@ -98,9 +96,6 @@ static int usb_serial_device_remove(struct device *dev)
98 if (!port) 96 if (!port)
99 return -ENODEV; 97 return -ENODEV;
100 98
101 if (port->dev_state != PORT_UNREGISTERING)
102 return retval;
103
104 device_remove_file(&port->dev, &dev_attr_port_number); 99 device_remove_file(&port->dev, &dev_attr_port_number);
105 100
106 driver = port->serial->type; 101 driver = port->serial->type;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ff8605b4b4be..02e7f2d32d52 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -75,7 +75,8 @@ struct ftdi_private {
75 unsigned long last_dtr_rts; /* saved modem control outputs */ 75 unsigned long last_dtr_rts; /* saved modem control outputs */
76 struct async_icount icount; 76 struct async_icount icount;
77 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */ 77 wait_queue_head_t delta_msr_wait; /* Used for TIOCMIWAIT */
78 char prev_status, diff_status; /* Used for TIOCMIWAIT */ 78 char prev_status; /* Used for TIOCMIWAIT */
79 bool dev_gone; /* Used to abort TIOCMIWAIT */
79 char transmit_empty; /* If transmitter is empty or not */ 80 char transmit_empty; /* If transmitter is empty or not */
80 struct usb_serial_port *port; 81 struct usb_serial_port *port;
81 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface 82 __u16 interface; /* FT2232C, FT2232H or FT4232H port interface
@@ -1681,6 +1682,7 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
1681 init_waitqueue_head(&priv->delta_msr_wait); 1682 init_waitqueue_head(&priv->delta_msr_wait);
1682 1683
1683 priv->flags = ASYNC_LOW_LATENCY; 1684 priv->flags = ASYNC_LOW_LATENCY;
1685 priv->dev_gone = false;
1684 1686
1685 if (quirk && quirk->port_probe) 1687 if (quirk && quirk->port_probe)
1686 quirk->port_probe(priv); 1688 quirk->port_probe(priv);
@@ -1839,6 +1841,9 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
1839 1841
1840 dbg("%s", __func__); 1842 dbg("%s", __func__);
1841 1843
1844 priv->dev_gone = true;
1845 wake_up_interruptible_all(&priv->delta_msr_wait);
1846
1842 remove_sysfs_attrs(port); 1847 remove_sysfs_attrs(port);
1843 1848
1844 kref_put(&priv->kref, ftdi_sio_priv_release); 1849 kref_put(&priv->kref, ftdi_sio_priv_release);
@@ -1982,17 +1987,19 @@ static int ftdi_process_packet(struct tty_struct *tty,
1982 N.B. packet may be processed more than once, but differences 1987 N.B. packet may be processed more than once, but differences
1983 are only processed once. */ 1988 are only processed once. */
1984 status = packet[0] & FTDI_STATUS_B0_MASK; 1989 status = packet[0] & FTDI_STATUS_B0_MASK;
1985 if (status & FTDI_RS0_CTS)
1986 priv->icount.cts++;
1987 if (status & FTDI_RS0_DSR)
1988 priv->icount.dsr++;
1989 if (status & FTDI_RS0_RI)
1990 priv->icount.rng++;
1991 if (status & FTDI_RS0_RLSD)
1992 priv->icount.dcd++;
1993 if (status != priv->prev_status) { 1990 if (status != priv->prev_status) {
1994 priv->diff_status |= status ^ priv->prev_status; 1991 char diff_status = status ^ priv->prev_status;
1995 wake_up_interruptible(&priv->delta_msr_wait); 1992
1993 if (diff_status & FTDI_RS0_CTS)
1994 priv->icount.cts++;
1995 if (diff_status & FTDI_RS0_DSR)
1996 priv->icount.dsr++;
1997 if (diff_status & FTDI_RS0_RI)
1998 priv->icount.rng++;
1999 if (diff_status & FTDI_RS0_RLSD)
2000 priv->icount.dcd++;
2001
2002 wake_up_interruptible_all(&priv->delta_msr_wait);
1996 priv->prev_status = status; 2003 priv->prev_status = status;
1997 } 2004 }
1998 2005
@@ -2395,15 +2402,12 @@ static int ftdi_ioctl(struct tty_struct *tty,
2395 */ 2402 */
2396 case TIOCMIWAIT: 2403 case TIOCMIWAIT:
2397 cprev = priv->icount; 2404 cprev = priv->icount;
2398 while (1) { 2405 while (!priv->dev_gone) {
2399 interruptible_sleep_on(&priv->delta_msr_wait); 2406 interruptible_sleep_on(&priv->delta_msr_wait);
2400 /* see if a signal did it */ 2407 /* see if a signal did it */
2401 if (signal_pending(current)) 2408 if (signal_pending(current))
2402 return -ERESTARTSYS; 2409 return -ERESTARTSYS;
2403 cnow = priv->icount; 2410 cnow = priv->icount;
2404 if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr &&
2405 cnow.dcd == cprev.dcd && cnow.cts == cprev.cts)
2406 return -EIO; /* no change => error */
2407 if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) || 2411 if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
2408 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || 2412 ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
2409 ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || 2413 ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
@@ -2412,7 +2416,7 @@ static int ftdi_ioctl(struct tty_struct *tty,
2412 } 2416 }
2413 cprev = cnow; 2417 cprev = cnow;
2414 } 2418 }
2415 /* not reached */ 2419 return -EIO;
2416 break; 2420 break;
2417 case TIOCSERGETLSR: 2421 case TIOCSERGETLSR:
2418 return get_lsr_info(port, (struct serial_struct __user *)arg); 2422 return get_lsr_info(port, (struct serial_struct __user *)arg);
diff --git a/drivers/usb/serial/metro-usb.c b/drivers/usb/serial/metro-usb.c
index 6e1622f2a297..08d16e8c002d 100644
--- a/drivers/usb/serial/metro-usb.c
+++ b/drivers/usb/serial/metro-usb.c
@@ -27,8 +27,8 @@
27 27
28/* Product information. */ 28/* Product information. */
29#define FOCUS_VENDOR_ID 0x0C2E 29#define FOCUS_VENDOR_ID 0x0C2E
30#define FOCUS_PRODUCT_ID 0x0720 30#define FOCUS_PRODUCT_ID_BI 0x0720
31#define FOCUS_PRODUCT_ID_UNI 0x0710 31#define FOCUS_PRODUCT_ID_UNI 0x0700
32 32
33#define METROUSB_SET_REQUEST_TYPE 0x40 33#define METROUSB_SET_REQUEST_TYPE 0x40
34#define METROUSB_SET_MODEM_CTRL_REQUEST 10 34#define METROUSB_SET_MODEM_CTRL_REQUEST 10
@@ -47,7 +47,7 @@ struct metrousb_private {
47 47
48/* Device table list. */ 48/* Device table list. */
49static struct usb_device_id id_table[] = { 49static struct usb_device_id id_table[] = {
50 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID) }, 50 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_BI) },
51 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) }, 51 { USB_DEVICE(FOCUS_VENDOR_ID, FOCUS_PRODUCT_ID_UNI) },
52 { }, /* Terminating entry. */ 52 { }, /* Terminating entry. */
53}; 53};
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 836cfa9a515f..f4465ccddc35 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -708,6 +708,7 @@ static const struct usb_device_id option_ids[] = {
708 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) }, 708 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_FULLSPEED) },
709 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) }, 709 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED) },
710 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) }, 710 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_HIGHSPEED) },
711 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED) },
711 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) }, 712 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED3) },
712 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) }, 713 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED4) },
713 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) }, 714 { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_HIGHSPEED5) },
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index ff4a174fa5de..a1a9062954c4 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -420,7 +420,7 @@ static void pl2303_set_termios(struct tty_struct *tty,
420 control = priv->line_control; 420 control = priv->line_control;
421 if ((cflag & CBAUD) == B0) 421 if ((cflag & CBAUD) == B0)
422 priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS); 422 priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
423 else 423 else if ((old_termios->c_cflag & CBAUD) == B0)
424 priv->line_control |= (CONTROL_DTR | CONTROL_RTS); 424 priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
425 if (control != priv->line_control) { 425 if (control != priv->line_control) {
426 control = priv->line_control; 426 control = priv->line_control;
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index f14465a83dd1..fdd5aa2c8d82 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
289 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ 289 { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */
290 { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ 290 { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */
291 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ 291 { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */
292 { USB_DEVICE(0x1199, 0x68A2) }, /* Sierra Wireless MC7710 */
292 /* Sierra Wireless C885 */ 293 /* Sierra Wireless C885 */
293 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, 294 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)},
294 /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */ 295 /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 69230f01056a..97355a15bbea 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -1059,6 +1059,12 @@ int usb_serial_probe(struct usb_interface *interface,
1059 serial->attached = 1; 1059 serial->attached = 1;
1060 } 1060 }
1061 1061
1062 /* Avoid race with tty_open and serial_install by setting the
1063 * disconnected flag and not clearing it until all ports have been
1064 * registered.
1065 */
1066 serial->disconnected = 1;
1067
1062 if (get_free_serial(serial, num_ports, &minor) == NULL) { 1068 if (get_free_serial(serial, num_ports, &minor) == NULL) {
1063 dev_err(&interface->dev, "No more free serial devices\n"); 1069 dev_err(&interface->dev, "No more free serial devices\n");
1064 goto probe_error; 1070 goto probe_error;
@@ -1070,19 +1076,16 @@ int usb_serial_probe(struct usb_interface *interface,
1070 port = serial->port[i]; 1076 port = serial->port[i];
1071 dev_set_name(&port->dev, "ttyUSB%d", port->number); 1077 dev_set_name(&port->dev, "ttyUSB%d", port->number);
1072 dbg ("%s - registering %s", __func__, dev_name(&port->dev)); 1078 dbg ("%s - registering %s", __func__, dev_name(&port->dev));
1073 port->dev_state = PORT_REGISTERING;
1074 device_enable_async_suspend(&port->dev); 1079 device_enable_async_suspend(&port->dev);
1075 1080
1076 retval = device_add(&port->dev); 1081 retval = device_add(&port->dev);
1077 if (retval) { 1082 if (retval)
1078 dev_err(&port->dev, "Error registering port device, " 1083 dev_err(&port->dev, "Error registering port device, "
1079 "continuing\n"); 1084 "continuing\n");
1080 port->dev_state = PORT_UNREGISTERED;
1081 } else {
1082 port->dev_state = PORT_REGISTERED;
1083 }
1084 } 1085 }
1085 1086
1087 serial->disconnected = 0;
1088
1086 usb_serial_console_init(debug, minor); 1089 usb_serial_console_init(debug, minor);
1087 1090
1088exit: 1091exit:
@@ -1124,22 +1127,8 @@ void usb_serial_disconnect(struct usb_interface *interface)
1124 } 1127 }
1125 kill_traffic(port); 1128 kill_traffic(port);
1126 cancel_work_sync(&port->work); 1129 cancel_work_sync(&port->work);
1127 if (port->dev_state == PORT_REGISTERED) { 1130 if (device_is_registered(&port->dev))
1128
1129 /* Make sure the port is bound so that the
1130 * driver's port_remove method is called.
1131 */
1132 if (!port->dev.driver) {
1133 int rc;
1134
1135 port->dev.driver =
1136 &serial->type->driver;
1137 rc = device_bind_driver(&port->dev);
1138 }
1139 port->dev_state = PORT_UNREGISTERING;
1140 device_del(&port->dev); 1131 device_del(&port->dev);
1141 port->dev_state = PORT_UNREGISTERED;
1142 }
1143 } 1132 }
1144 } 1133 }
1145 serial->type->disconnect(serial); 1134 serial->type->disconnect(serial);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index c18538e4a6db..2653e73db623 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -132,6 +132,35 @@ static struct us_unusual_dev for_dynamic_ids =
132#undef COMPLIANT_DEV 132#undef COMPLIANT_DEV
133#undef USUAL_DEV 133#undef USUAL_DEV
134 134
135#ifdef CONFIG_LOCKDEP
136
137static struct lock_class_key us_interface_key[USB_MAXINTERFACES];
138
139static void us_set_lock_class(struct mutex *mutex,
140 struct usb_interface *intf)
141{
142 struct usb_device *udev = interface_to_usbdev(intf);
143 struct usb_host_config *config = udev->actconfig;
144 int i;
145
146 for (i = 0; i < config->desc.bNumInterfaces; i++) {
147 if (config->interface[i] == intf)
148 break;
149 }
150
151 BUG_ON(i == config->desc.bNumInterfaces);
152
153 lockdep_set_class(mutex, &us_interface_key[i]);
154}
155
156#else
157
158static void us_set_lock_class(struct mutex *mutex,
159 struct usb_interface *intf)
160{
161}
162
163#endif
135 164
136#ifdef CONFIG_PM /* Minimal support for suspend and resume */ 165#ifdef CONFIG_PM /* Minimal support for suspend and resume */
137 166
@@ -895,6 +924,7 @@ int usb_stor_probe1(struct us_data **pus,
895 *pus = us = host_to_us(host); 924 *pus = us = host_to_us(host);
896 memset(us, 0, sizeof(struct us_data)); 925 memset(us, 0, sizeof(struct us_data));
897 mutex_init(&(us->dev_mutex)); 926 mutex_init(&(us->dev_mutex));
927 us_set_lock_class(&us->dev_mutex, intf);
898 init_completion(&us->cmnd_ready); 928 init_completion(&us->cmnd_ready);
899 init_completion(&(us->notify)); 929 init_completion(&(us->notify));
900 init_waitqueue_head(&us->delay_wait); 930 init_waitqueue_head(&us->delay_wait);
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index befcbd8ef019..ffbce4525468 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -499,7 +499,8 @@ static int __devinit au1100fb_drv_probe(struct platform_device *dev)
499 au1100fb_fix.mmio_start = regs_res->start; 499 au1100fb_fix.mmio_start = regs_res->start;
500 au1100fb_fix.mmio_len = resource_size(regs_res); 500 au1100fb_fix.mmio_len = resource_size(regs_res);
501 501
502 if (!devm_request_mem_region(au1100fb_fix.mmio_start, 502 if (!devm_request_mem_region(&dev->dev,
503 au1100fb_fix.mmio_start,
503 au1100fb_fix.mmio_len, 504 au1100fb_fix.mmio_len,
504 DRIVER_NAME)) { 505 DRIVER_NAME)) {
505 print_err("fail to lock memory region at 0x%08lx", 506 print_err("fail to lock memory region at 0x%08lx",
@@ -516,7 +517,7 @@ static int __devinit au1100fb_drv_probe(struct platform_device *dev)
516 fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres * 517 fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
517 (fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS; 518 (fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
518 519
519 fbdev->fb_mem = dmam_alloc_coherent(&dev->dev, &dev->dev, 520 fbdev->fb_mem = dmam_alloc_coherent(&dev->dev,
520 PAGE_ALIGN(fbdev->fb_len), 521 PAGE_ALIGN(fbdev->fb_len),
521 &fbdev->fb_phys, GFP_KERNEL); 522 &fbdev->fb_phys, GFP_KERNEL);
522 if (!fbdev->fb_mem) { 523 if (!fbdev->fb_mem) {
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 3e9a773db09f..7ca79f02056e 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1724,7 +1724,7 @@ static int __devinit au1200fb_drv_probe(struct platform_device *dev)
1724 /* Allocate the framebuffer to the maximum screen size */ 1724 /* Allocate the framebuffer to the maximum screen size */
1725 fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8; 1725 fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
1726 1726
1727 fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev, &dev->dev, 1727 fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
1728 PAGE_ALIGN(fbdev->fb_len), 1728 PAGE_ALIGN(fbdev->fb_len),
1729 &fbdev->fb_phys, GFP_KERNEL); 1729 &fbdev->fb_phys, GFP_KERNEL);
1730 if (!fbdev->fb_mem) { 1730 if (!fbdev->fb_mem) {
diff --git a/drivers/video/kyro/STG4000Reg.h b/drivers/video/kyro/STG4000Reg.h
index 5d6269882589..50f4670e9252 100644
--- a/drivers/video/kyro/STG4000Reg.h
+++ b/drivers/video/kyro/STG4000Reg.h
@@ -73,210 +73,210 @@ typedef enum _OVRL_PIX_FORMAT {
73/* Register Table */ 73/* Register Table */
74typedef struct { 74typedef struct {
75 /* 0h */ 75 /* 0h */
76 volatile unsigned long Thread0Enable; /* 0x0000 */ 76 volatile u32 Thread0Enable; /* 0x0000 */
77 volatile unsigned long Thread1Enable; /* 0x0004 */ 77 volatile u32 Thread1Enable; /* 0x0004 */
78 volatile unsigned long Thread0Recover; /* 0x0008 */ 78 volatile u32 Thread0Recover; /* 0x0008 */
79 volatile unsigned long Thread1Recover; /* 0x000C */ 79 volatile u32 Thread1Recover; /* 0x000C */
80 volatile unsigned long Thread0Step; /* 0x0010 */ 80 volatile u32 Thread0Step; /* 0x0010 */
81 volatile unsigned long Thread1Step; /* 0x0014 */ 81 volatile u32 Thread1Step; /* 0x0014 */
82 volatile unsigned long VideoInStatus; /* 0x0018 */ 82 volatile u32 VideoInStatus; /* 0x0018 */
83 volatile unsigned long Core2InSignStart; /* 0x001C */ 83 volatile u32 Core2InSignStart; /* 0x001C */
84 volatile unsigned long Core1ResetVector; /* 0x0020 */ 84 volatile u32 Core1ResetVector; /* 0x0020 */
85 volatile unsigned long Core1ROMOffset; /* 0x0024 */ 85 volatile u32 Core1ROMOffset; /* 0x0024 */
86 volatile unsigned long Core1ArbiterPriority; /* 0x0028 */ 86 volatile u32 Core1ArbiterPriority; /* 0x0028 */
87 volatile unsigned long VideoInControl; /* 0x002C */ 87 volatile u32 VideoInControl; /* 0x002C */
88 volatile unsigned long VideoInReg0CtrlA; /* 0x0030 */ 88 volatile u32 VideoInReg0CtrlA; /* 0x0030 */
89 volatile unsigned long VideoInReg0CtrlB; /* 0x0034 */ 89 volatile u32 VideoInReg0CtrlB; /* 0x0034 */
90 volatile unsigned long VideoInReg1CtrlA; /* 0x0038 */ 90 volatile u32 VideoInReg1CtrlA; /* 0x0038 */
91 volatile unsigned long VideoInReg1CtrlB; /* 0x003C */ 91 volatile u32 VideoInReg1CtrlB; /* 0x003C */
92 volatile unsigned long Thread0Kicker; /* 0x0040 */ 92 volatile u32 Thread0Kicker; /* 0x0040 */
93 volatile unsigned long Core2InputSign; /* 0x0044 */ 93 volatile u32 Core2InputSign; /* 0x0044 */
94 volatile unsigned long Thread0ProgCtr; /* 0x0048 */ 94 volatile u32 Thread0ProgCtr; /* 0x0048 */
95 volatile unsigned long Thread1ProgCtr; /* 0x004C */ 95 volatile u32 Thread1ProgCtr; /* 0x004C */
96 volatile unsigned long Thread1Kicker; /* 0x0050 */ 96 volatile u32 Thread1Kicker; /* 0x0050 */
97 volatile unsigned long GPRegister1; /* 0x0054 */ 97 volatile u32 GPRegister1; /* 0x0054 */
98 volatile unsigned long GPRegister2; /* 0x0058 */ 98 volatile u32 GPRegister2; /* 0x0058 */
99 volatile unsigned long GPRegister3; /* 0x005C */ 99 volatile u32 GPRegister3; /* 0x005C */
100 volatile unsigned long GPRegister4; /* 0x0060 */ 100 volatile u32 GPRegister4; /* 0x0060 */
101 volatile unsigned long SerialIntA; /* 0x0064 */ 101 volatile u32 SerialIntA; /* 0x0064 */
102 102
103 volatile unsigned long Fill0[6]; /* GAP 0x0068 - 0x007C */ 103 volatile u32 Fill0[6]; /* GAP 0x0068 - 0x007C */
104 104
105 volatile unsigned long SoftwareReset; /* 0x0080 */ 105 volatile u32 SoftwareReset; /* 0x0080 */
106 volatile unsigned long SerialIntB; /* 0x0084 */ 106 volatile u32 SerialIntB; /* 0x0084 */
107 107
108 volatile unsigned long Fill1[37]; /* GAP 0x0088 - 0x011C */ 108 volatile u32 Fill1[37]; /* GAP 0x0088 - 0x011C */
109 109
110 volatile unsigned long ROMELQV; /* 0x011C */ 110 volatile u32 ROMELQV; /* 0x011C */
111 volatile unsigned long WLWH; /* 0x0120 */ 111 volatile u32 WLWH; /* 0x0120 */
112 volatile unsigned long ROMELWL; /* 0x0124 */ 112 volatile u32 ROMELWL; /* 0x0124 */
113 113
114 volatile unsigned long dwFill_1; /* GAP 0x0128 */ 114 volatile u32 dwFill_1; /* GAP 0x0128 */
115 115
116 volatile unsigned long IntStatus; /* 0x012C */ 116 volatile u32 IntStatus; /* 0x012C */
117 volatile unsigned long IntMask; /* 0x0130 */ 117 volatile u32 IntMask; /* 0x0130 */
118 volatile unsigned long IntClear; /* 0x0134 */ 118 volatile u32 IntClear; /* 0x0134 */
119 119
120 volatile unsigned long Fill2[6]; /* GAP 0x0138 - 0x014C */ 120 volatile u32 Fill2[6]; /* GAP 0x0138 - 0x014C */
121 121
122 volatile unsigned long ROMGPIOA; /* 0x0150 */ 122 volatile u32 ROMGPIOA; /* 0x0150 */
123 volatile unsigned long ROMGPIOB; /* 0x0154 */ 123 volatile u32 ROMGPIOB; /* 0x0154 */
124 volatile unsigned long ROMGPIOC; /* 0x0158 */ 124 volatile u32 ROMGPIOC; /* 0x0158 */
125 volatile unsigned long ROMGPIOD; /* 0x015C */ 125 volatile u32 ROMGPIOD; /* 0x015C */
126 126
127 volatile unsigned long Fill3[2]; /* GAP 0x0160 - 0x0168 */ 127 volatile u32 Fill3[2]; /* GAP 0x0160 - 0x0168 */
128 128
129 volatile unsigned long AGPIntID; /* 0x0168 */ 129 volatile u32 AGPIntID; /* 0x0168 */
130 volatile unsigned long AGPIntClassCode; /* 0x016C */ 130 volatile u32 AGPIntClassCode; /* 0x016C */
131 volatile unsigned long AGPIntBIST; /* 0x0170 */ 131 volatile u32 AGPIntBIST; /* 0x0170 */
132 volatile unsigned long AGPIntSSID; /* 0x0174 */ 132 volatile u32 AGPIntSSID; /* 0x0174 */
133 volatile unsigned long AGPIntPMCSR; /* 0x0178 */ 133 volatile u32 AGPIntPMCSR; /* 0x0178 */
134 volatile unsigned long VGAFrameBufBase; /* 0x017C */ 134 volatile u32 VGAFrameBufBase; /* 0x017C */
135 volatile unsigned long VGANotify; /* 0x0180 */ 135 volatile u32 VGANotify; /* 0x0180 */
136 volatile unsigned long DACPLLMode; /* 0x0184 */ 136 volatile u32 DACPLLMode; /* 0x0184 */
137 volatile unsigned long Core1VideoClockDiv; /* 0x0188 */ 137 volatile u32 Core1VideoClockDiv; /* 0x0188 */
138 volatile unsigned long AGPIntStat; /* 0x018C */ 138 volatile u32 AGPIntStat; /* 0x018C */
139 139
140 /* 140 /*
141 volatile unsigned long Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400 141 volatile u32 Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400
142 volatile unsigned long Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table 142 volatile u32 Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table
143 volatile unsigned long Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604 143 volatile u32 Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604
144 volatile unsigned long Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680 144 volatile u32 Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680
145 volatile unsigned long Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC 145 volatile u32 Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC
146 */ 146 */
147 volatile unsigned long Fill4[412]; /* 0x0190 - 0x07FC */ 147 volatile u32 Fill4[412]; /* 0x0190 - 0x07FC */
148 148
149 volatile unsigned long TACtrlStreamBase; /* 0x0800 */ 149 volatile u32 TACtrlStreamBase; /* 0x0800 */
150 volatile unsigned long TAObjDataBase; /* 0x0804 */ 150 volatile u32 TAObjDataBase; /* 0x0804 */
151 volatile unsigned long TAPtrDataBase; /* 0x0808 */ 151 volatile u32 TAPtrDataBase; /* 0x0808 */
152 volatile unsigned long TARegionDataBase; /* 0x080C */ 152 volatile u32 TARegionDataBase; /* 0x080C */
153 volatile unsigned long TATailPtrBase; /* 0x0810 */ 153 volatile u32 TATailPtrBase; /* 0x0810 */
154 volatile unsigned long TAPtrRegionSize; /* 0x0814 */ 154 volatile u32 TAPtrRegionSize; /* 0x0814 */
155 volatile unsigned long TAConfiguration; /* 0x0818 */ 155 volatile u32 TAConfiguration; /* 0x0818 */
156 volatile unsigned long TAObjDataStartAddr; /* 0x081C */ 156 volatile u32 TAObjDataStartAddr; /* 0x081C */
157 volatile unsigned long TAObjDataEndAddr; /* 0x0820 */ 157 volatile u32 TAObjDataEndAddr; /* 0x0820 */
158 volatile unsigned long TAXScreenClip; /* 0x0824 */ 158 volatile u32 TAXScreenClip; /* 0x0824 */
159 volatile unsigned long TAYScreenClip; /* 0x0828 */ 159 volatile u32 TAYScreenClip; /* 0x0828 */
160 volatile unsigned long TARHWClamp; /* 0x082C */ 160 volatile u32 TARHWClamp; /* 0x082C */
161 volatile unsigned long TARHWCompare; /* 0x0830 */ 161 volatile u32 TARHWCompare; /* 0x0830 */
162 volatile unsigned long TAStart; /* 0x0834 */ 162 volatile u32 TAStart; /* 0x0834 */
163 volatile unsigned long TAObjReStart; /* 0x0838 */ 163 volatile u32 TAObjReStart; /* 0x0838 */
164 volatile unsigned long TAPtrReStart; /* 0x083C */ 164 volatile u32 TAPtrReStart; /* 0x083C */
165 volatile unsigned long TAStatus1; /* 0x0840 */ 165 volatile u32 TAStatus1; /* 0x0840 */
166 volatile unsigned long TAStatus2; /* 0x0844 */ 166 volatile u32 TAStatus2; /* 0x0844 */
167 volatile unsigned long TAIntStatus; /* 0x0848 */ 167 volatile u32 TAIntStatus; /* 0x0848 */
168 volatile unsigned long TAIntMask; /* 0x084C */ 168 volatile u32 TAIntMask; /* 0x084C */
169 169
170 volatile unsigned long Fill5[235]; /* GAP 0x0850 - 0x0BF8 */ 170 volatile u32 Fill5[235]; /* GAP 0x0850 - 0x0BF8 */
171 171
172 volatile unsigned long TextureAddrThresh; /* 0x0BFC */ 172 volatile u32 TextureAddrThresh; /* 0x0BFC */
173 volatile unsigned long Core1Translation; /* 0x0C00 */ 173 volatile u32 Core1Translation; /* 0x0C00 */
174 volatile unsigned long TextureAddrReMap; /* 0x0C04 */ 174 volatile u32 TextureAddrReMap; /* 0x0C04 */
175 volatile unsigned long RenderOutAGPRemap; /* 0x0C08 */ 175 volatile u32 RenderOutAGPRemap; /* 0x0C08 */
176 volatile unsigned long _3DRegionReadTrans; /* 0x0C0C */ 176 volatile u32 _3DRegionReadTrans; /* 0x0C0C */
177 volatile unsigned long _3DPtrReadTrans; /* 0x0C10 */ 177 volatile u32 _3DPtrReadTrans; /* 0x0C10 */
178 volatile unsigned long _3DParamReadTrans; /* 0x0C14 */ 178 volatile u32 _3DParamReadTrans; /* 0x0C14 */
179 volatile unsigned long _3DRegionReadThresh; /* 0x0C18 */ 179 volatile u32 _3DRegionReadThresh; /* 0x0C18 */
180 volatile unsigned long _3DPtrReadThresh; /* 0x0C1C */ 180 volatile u32 _3DPtrReadThresh; /* 0x0C1C */
181 volatile unsigned long _3DParamReadThresh; /* 0x0C20 */ 181 volatile u32 _3DParamReadThresh; /* 0x0C20 */
182 volatile unsigned long _3DRegionReadAGPRemap; /* 0x0C24 */ 182 volatile u32 _3DRegionReadAGPRemap; /* 0x0C24 */
183 volatile unsigned long _3DPtrReadAGPRemap; /* 0x0C28 */ 183 volatile u32 _3DPtrReadAGPRemap; /* 0x0C28 */
184 volatile unsigned long _3DParamReadAGPRemap; /* 0x0C2C */ 184 volatile u32 _3DParamReadAGPRemap; /* 0x0C2C */
185 volatile unsigned long ZBufferAGPRemap; /* 0x0C30 */ 185 volatile u32 ZBufferAGPRemap; /* 0x0C30 */
186 volatile unsigned long TAIndexAGPRemap; /* 0x0C34 */ 186 volatile u32 TAIndexAGPRemap; /* 0x0C34 */
187 volatile unsigned long TAVertexAGPRemap; /* 0x0C38 */ 187 volatile u32 TAVertexAGPRemap; /* 0x0C38 */
188 volatile unsigned long TAUVAddrTrans; /* 0x0C3C */ 188 volatile u32 TAUVAddrTrans; /* 0x0C3C */
189 volatile unsigned long TATailPtrCacheTrans; /* 0x0C40 */ 189 volatile u32 TATailPtrCacheTrans; /* 0x0C40 */
190 volatile unsigned long TAParamWriteTrans; /* 0x0C44 */ 190 volatile u32 TAParamWriteTrans; /* 0x0C44 */
191 volatile unsigned long TAPtrWriteTrans; /* 0x0C48 */ 191 volatile u32 TAPtrWriteTrans; /* 0x0C48 */
192 volatile unsigned long TAParamWriteThresh; /* 0x0C4C */ 192 volatile u32 TAParamWriteThresh; /* 0x0C4C */
193 volatile unsigned long TAPtrWriteThresh; /* 0x0C50 */ 193 volatile u32 TAPtrWriteThresh; /* 0x0C50 */
194 volatile unsigned long TATailPtrCacheAGPRe; /* 0x0C54 */ 194 volatile u32 TATailPtrCacheAGPRe; /* 0x0C54 */
195 volatile unsigned long TAParamWriteAGPRe; /* 0x0C58 */ 195 volatile u32 TAParamWriteAGPRe; /* 0x0C58 */
196 volatile unsigned long TAPtrWriteAGPRe; /* 0x0C5C */ 196 volatile u32 TAPtrWriteAGPRe; /* 0x0C5C */
197 volatile unsigned long SDRAMArbiterConf; /* 0x0C60 */ 197 volatile u32 SDRAMArbiterConf; /* 0x0C60 */
198 volatile unsigned long SDRAMConf0; /* 0x0C64 */ 198 volatile u32 SDRAMConf0; /* 0x0C64 */
199 volatile unsigned long SDRAMConf1; /* 0x0C68 */ 199 volatile u32 SDRAMConf1; /* 0x0C68 */
200 volatile unsigned long SDRAMConf2; /* 0x0C6C */ 200 volatile u32 SDRAMConf2; /* 0x0C6C */
201 volatile unsigned long SDRAMRefresh; /* 0x0C70 */ 201 volatile u32 SDRAMRefresh; /* 0x0C70 */
202 volatile unsigned long SDRAMPowerStat; /* 0x0C74 */ 202 volatile u32 SDRAMPowerStat; /* 0x0C74 */
203 203
204 volatile unsigned long Fill6[2]; /* GAP 0x0C78 - 0x0C7C */ 204 volatile u32 Fill6[2]; /* GAP 0x0C78 - 0x0C7C */
205 205
206 volatile unsigned long RAMBistData; /* 0x0C80 */ 206 volatile u32 RAMBistData; /* 0x0C80 */
207 volatile unsigned long RAMBistCtrl; /* 0x0C84 */ 207 volatile u32 RAMBistCtrl; /* 0x0C84 */
208 volatile unsigned long FIFOBistKey; /* 0x0C88 */ 208 volatile u32 FIFOBistKey; /* 0x0C88 */
209 volatile unsigned long RAMBistResult; /* 0x0C8C */ 209 volatile u32 RAMBistResult; /* 0x0C8C */
210 volatile unsigned long FIFOBistResult; /* 0x0C90 */ 210 volatile u32 FIFOBistResult; /* 0x0C90 */
211 211
212 /* 212 /*
213 volatile unsigned long Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC 213 volatile u32 Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC
214 volatile unsigned long Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters 214 volatile u32 Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters
215 */ 215 */
216 216
217 volatile unsigned long Fill7[16]; /* 0x0c94 - 0x0cd0 */ 217 volatile u32 Fill7[16]; /* 0x0c94 - 0x0cd0 */
218 218
219 volatile unsigned long SDRAMAddrSign; /* 0x0CD4 */ 219 volatile u32 SDRAMAddrSign; /* 0x0CD4 */
220 volatile unsigned long SDRAMDataSign; /* 0x0CD8 */ 220 volatile u32 SDRAMDataSign; /* 0x0CD8 */
221 volatile unsigned long SDRAMSignConf; /* 0x0CDC */ 221 volatile u32 SDRAMSignConf; /* 0x0CDC */
222 222
223 /* DWFILL; //GAP 0x0CE0 */ 223 /* DWFILL; //GAP 0x0CE0 */
224 volatile unsigned long dwFill_2; 224 volatile u32 dwFill_2;
225 225
226 volatile unsigned long ISPSignature; /* 0x0CE4 */ 226 volatile u32 ISPSignature; /* 0x0CE4 */
227 227
228 volatile unsigned long Fill8[454]; /*GAP 0x0CE8 - 0x13FC */ 228 volatile u32 Fill8[454]; /*GAP 0x0CE8 - 0x13FC */
229 229
230 volatile unsigned long DACPrimAddress; /* 0x1400 */ 230 volatile u32 DACPrimAddress; /* 0x1400 */
231 volatile unsigned long DACPrimSize; /* 0x1404 */ 231 volatile u32 DACPrimSize; /* 0x1404 */
232 volatile unsigned long DACCursorAddr; /* 0x1408 */ 232 volatile u32 DACCursorAddr; /* 0x1408 */
233 volatile unsigned long DACCursorCtrl; /* 0x140C */ 233 volatile u32 DACCursorCtrl; /* 0x140C */
234 volatile unsigned long DACOverlayAddr; /* 0x1410 */ 234 volatile u32 DACOverlayAddr; /* 0x1410 */
235 volatile unsigned long DACOverlayUAddr; /* 0x1414 */ 235 volatile u32 DACOverlayUAddr; /* 0x1414 */
236 volatile unsigned long DACOverlayVAddr; /* 0x1418 */ 236 volatile u32 DACOverlayVAddr; /* 0x1418 */
237 volatile unsigned long DACOverlaySize; /* 0x141C */ 237 volatile u32 DACOverlaySize; /* 0x141C */
238 volatile unsigned long DACOverlayVtDec; /* 0x1420 */ 238 volatile u32 DACOverlayVtDec; /* 0x1420 */
239 239
240 volatile unsigned long Fill9[9]; /* GAP 0x1424 - 0x1444 */ 240 volatile u32 Fill9[9]; /* GAP 0x1424 - 0x1444 */
241 241
242 volatile unsigned long DACVerticalScal; /* 0x1448 */ 242 volatile u32 DACVerticalScal; /* 0x1448 */
243 volatile unsigned long DACPixelFormat; /* 0x144C */ 243 volatile u32 DACPixelFormat; /* 0x144C */
244 volatile unsigned long DACHorizontalScal; /* 0x1450 */ 244 volatile u32 DACHorizontalScal; /* 0x1450 */
245 volatile unsigned long DACVidWinStart; /* 0x1454 */ 245 volatile u32 DACVidWinStart; /* 0x1454 */
246 volatile unsigned long DACVidWinEnd; /* 0x1458 */ 246 volatile u32 DACVidWinEnd; /* 0x1458 */
247 volatile unsigned long DACBlendCtrl; /* 0x145C */ 247 volatile u32 DACBlendCtrl; /* 0x145C */
248 volatile unsigned long DACHorTim1; /* 0x1460 */ 248 volatile u32 DACHorTim1; /* 0x1460 */
249 volatile unsigned long DACHorTim2; /* 0x1464 */ 249 volatile u32 DACHorTim2; /* 0x1464 */
250 volatile unsigned long DACHorTim3; /* 0x1468 */ 250 volatile u32 DACHorTim3; /* 0x1468 */
251 volatile unsigned long DACVerTim1; /* 0x146C */ 251 volatile u32 DACVerTim1; /* 0x146C */
252 volatile unsigned long DACVerTim2; /* 0x1470 */ 252 volatile u32 DACVerTim2; /* 0x1470 */
253 volatile unsigned long DACVerTim3; /* 0x1474 */ 253 volatile u32 DACVerTim3; /* 0x1474 */
254 volatile unsigned long DACBorderColor; /* 0x1478 */ 254 volatile u32 DACBorderColor; /* 0x1478 */
255 volatile unsigned long DACSyncCtrl; /* 0x147C */ 255 volatile u32 DACSyncCtrl; /* 0x147C */
256 volatile unsigned long DACStreamCtrl; /* 0x1480 */ 256 volatile u32 DACStreamCtrl; /* 0x1480 */
257 volatile unsigned long DACLUTAddress; /* 0x1484 */ 257 volatile u32 DACLUTAddress; /* 0x1484 */
258 volatile unsigned long DACLUTData; /* 0x1488 */ 258 volatile u32 DACLUTData; /* 0x1488 */
259 volatile unsigned long DACBurstCtrl; /* 0x148C */ 259 volatile u32 DACBurstCtrl; /* 0x148C */
260 volatile unsigned long DACCrcTrigger; /* 0x1490 */ 260 volatile u32 DACCrcTrigger; /* 0x1490 */
261 volatile unsigned long DACCrcDone; /* 0x1494 */ 261 volatile u32 DACCrcDone; /* 0x1494 */
262 volatile unsigned long DACCrcResult1; /* 0x1498 */ 262 volatile u32 DACCrcResult1; /* 0x1498 */
263 volatile unsigned long DACCrcResult2; /* 0x149C */ 263 volatile u32 DACCrcResult2; /* 0x149C */
264 volatile unsigned long DACLinecount; /* 0x14A0 */ 264 volatile u32 DACLinecount; /* 0x14A0 */
265 265
266 volatile unsigned long Fill10[151]; /*GAP 0x14A4 - 0x16FC */ 266 volatile u32 Fill10[151]; /*GAP 0x14A4 - 0x16FC */
267 267
268 volatile unsigned long DigVidPortCtrl; /* 0x1700 */ 268 volatile u32 DigVidPortCtrl; /* 0x1700 */
269 volatile unsigned long DigVidPortStat; /* 0x1704 */ 269 volatile u32 DigVidPortStat; /* 0x1704 */
270 270
271 /* 271 /*
272 volatile unsigned long Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC 272 volatile u32 Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC
273 volatile unsigned long Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT 273 volatile u32 Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT
274 */ 274 */
275 275
276 volatile unsigned long Fill11[1598]; 276 volatile u32 Fill11[1598];
277 277
278 /* DWFILL; //GAP 0x3000 ALUT 256MB offset */ 278 /* DWFILL; //GAP 0x3000 ALUT 256MB offset */
279 volatile unsigned long Fill_3; 279 volatile u32 Fill_3;
280 280
281} STG4000REG; 281} STG4000REG;
282 282
diff --git a/drivers/video/msm/mddi.c b/drivers/video/msm/mddi.c
index 4527cbf0a4ec..b061d709bc44 100644
--- a/drivers/video/msm/mddi.c
+++ b/drivers/video/msm/mddi.c
@@ -420,7 +420,7 @@ static void mddi_resume(struct msm_mddi_client_data *cdata)
420 mddi_set_auto_hibernate(&mddi->client_data, 1); 420 mddi_set_auto_hibernate(&mddi->client_data, 1);
421} 421}
422 422
423static int __init mddi_get_client_caps(struct mddi_info *mddi) 423static int __devinit mddi_get_client_caps(struct mddi_info *mddi)
424{ 424{
425 int i, j; 425 int i, j;
426 426
@@ -622,9 +622,9 @@ uint32_t mddi_remote_read(struct msm_mddi_client_data *cdata, uint32_t reg)
622 622
623static struct mddi_info mddi_info[2]; 623static struct mddi_info mddi_info[2];
624 624
625static int __init mddi_clk_setup(struct platform_device *pdev, 625static int __devinit mddi_clk_setup(struct platform_device *pdev,
626 struct mddi_info *mddi, 626 struct mddi_info *mddi,
627 unsigned long clk_rate) 627 unsigned long clk_rate)
628{ 628{
629 int ret; 629 int ret;
630 630
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 260cca7ddb41..26e83d7fdd6f 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -815,8 +815,15 @@ static int __devinit uvesafb_vbe_init(struct fb_info *info)
815 par->pmi_setpal = pmi_setpal; 815 par->pmi_setpal = pmi_setpal;
816 par->ypan = ypan; 816 par->ypan = ypan;
817 817
818 if (par->pmi_setpal || par->ypan) 818 if (par->pmi_setpal || par->ypan) {
819 uvesafb_vbe_getpmi(task, par); 819 if (__supported_pte_mask & _PAGE_NX) {
820 par->pmi_setpal = par->ypan = 0;
821 printk(KERN_WARNING "uvesafb: NX protection is actively."
822 "We have better not to use the PMI.\n");
823 } else {
824 uvesafb_vbe_getpmi(task, par);
825 }
826 }
820#else 827#else
821 /* The protected mode interface is not available on non-x86. */ 828 /* The protected mode interface is not available on non-x86. */
822 par->pmi_setpal = par->ypan = 0; 829 par->pmi_setpal = par->ypan = 0;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d286b40a5671..86eff48dab78 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -405,6 +405,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
405 bio_put(bio); 405 bio_put(bio);
406 406
407 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS); 407 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
408 BUG_ON(!bio);
408 bio->bi_private = cb; 409 bio->bi_private = cb;
409 bio->bi_end_io = end_compressed_bio_write; 410 bio->bi_end_io = end_compressed_bio_write;
410 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 411 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -687,6 +688,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
687 688
688 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, 689 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
689 GFP_NOFS); 690 GFP_NOFS);
691 BUG_ON(!comp_bio);
690 comp_bio->bi_private = cb; 692 comp_bio->bi_private = cb;
691 comp_bio->bi_end_io = end_compressed_bio_read; 693 comp_bio->bi_end_io = end_compressed_bio_read;
692 694
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a84420491c11..2b35f8d14bb9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -529,9 +529,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
529 * allocate blocks for the tree root we can't do the fast caching since 529 * allocate blocks for the tree root we can't do the fast caching since
530 * we likely hold important locks. 530 * we likely hold important locks.
531 */ 531 */
532 if (trans && (!trans->transaction->in_commit) && 532 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
533 (root && root != root->fs_info->tree_root) &&
534 btrfs_test_opt(root, SPACE_CACHE)) {
535 ret = load_free_space_cache(fs_info, cache); 533 ret = load_free_space_cache(fs_info, cache);
536 534
537 spin_lock(&cache->lock); 535 spin_lock(&cache->lock);
@@ -3152,15 +3150,14 @@ static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3152/* 3150/*
3153 * returns target flags in extended format or 0 if restripe for this 3151 * returns target flags in extended format or 0 if restripe for this
3154 * chunk_type is not in progress 3152 * chunk_type is not in progress
3153 *
3154 * should be called with either volume_mutex or balance_lock held
3155 */ 3155 */
3156static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags) 3156static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3157{ 3157{
3158 struct btrfs_balance_control *bctl = fs_info->balance_ctl; 3158 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3159 u64 target = 0; 3159 u64 target = 0;
3160 3160
3161 BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
3162 !spin_is_locked(&fs_info->balance_lock));
3163
3164 if (!bctl) 3161 if (!bctl)
3165 return 0; 3162 return 0;
3166 3163
@@ -4205,7 +4202,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
4205 num_bytes += div64_u64(data_used + meta_used, 50); 4202 num_bytes += div64_u64(data_used + meta_used, 50);
4206 4203
4207 if (num_bytes * 3 > meta_used) 4204 if (num_bytes * 3 > meta_used)
4208 num_bytes = div64_u64(meta_used, 3) * 2; 4205 num_bytes = div64_u64(meta_used, 3);
4209 4206
4210 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10); 4207 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
4211} 4208}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8d904dd7ea9f..cd4b5e400221 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1937,7 +1937,7 @@ int repair_eb_io_failure(struct btrfs_root *root, struct extent_buffer *eb,
1937 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree; 1937 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
1938 u64 start = eb->start; 1938 u64 start = eb->start;
1939 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len); 1939 unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
1940 int ret; 1940 int ret = 0;
1941 1941
1942 for (i = 0; i < num_pages; i++) { 1942 for (i = 0; i < num_pages; i++) {
1943 struct page *p = extent_buffer_page(eb, i); 1943 struct page *p = extent_buffer_page(eb, i);
@@ -2180,6 +2180,10 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2180 } 2180 }
2181 2181
2182 bio = bio_alloc(GFP_NOFS, 1); 2182 bio = bio_alloc(GFP_NOFS, 1);
2183 if (!bio) {
2184 free_io_failure(inode, failrec, 0);
2185 return -EIO;
2186 }
2183 bio->bi_private = state; 2187 bio->bi_private = state;
2184 bio->bi_end_io = failed_bio->bi_end_io; 2188 bio->bi_end_io = failed_bio->bi_end_io;
2185 bio->bi_sector = failrec->logical >> 9; 2189 bio->bi_sector = failrec->logical >> 9;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index e88330d3df52..202008ec367d 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -748,13 +748,6 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
748 u64 used = btrfs_block_group_used(&block_group->item); 748 u64 used = btrfs_block_group_used(&block_group->item);
749 749
750 /* 750 /*
751 * If we're unmounting then just return, since this does a search on the
752 * normal root and not the commit root and we could deadlock.
753 */
754 if (btrfs_fs_closing(fs_info))
755 return 0;
756
757 /*
758 * If this block group has been marked to be cleared for one reason or 751 * If this block group has been marked to be cleared for one reason or
759 * another then we can't trust the on disk cache, so just return. 752 * another then we can't trust the on disk cache, so just return.
760 */ 753 */
@@ -768,6 +761,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
768 path = btrfs_alloc_path(); 761 path = btrfs_alloc_path();
769 if (!path) 762 if (!path)
770 return 0; 763 return 0;
764 path->search_commit_root = 1;
765 path->skip_locking = 1;
771 766
772 inode = lookup_free_space_inode(root, block_group, path); 767 inode = lookup_free_space_inode(root, block_group, path);
773 if (IS_ERR(inode)) { 768 if (IS_ERR(inode)) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 90acc82046c3..bc015f77f3ea 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1044,6 +1044,8 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1044 1044
1045 BUG_ON(!page->page); 1045 BUG_ON(!page->page);
1046 bio = bio_alloc(GFP_NOFS, 1); 1046 bio = bio_alloc(GFP_NOFS, 1);
1047 if (!bio)
1048 return -EIO;
1047 bio->bi_bdev = page->bdev; 1049 bio->bi_bdev = page->bdev;
1048 bio->bi_sector = page->physical >> 9; 1050 bio->bi_sector = page->physical >> 9;
1049 bio->bi_end_io = scrub_complete_bio_end_io; 1051 bio->bi_end_io = scrub_complete_bio_end_io;
@@ -1171,6 +1173,8 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1171 DECLARE_COMPLETION_ONSTACK(complete); 1173 DECLARE_COMPLETION_ONSTACK(complete);
1172 1174
1173 bio = bio_alloc(GFP_NOFS, 1); 1175 bio = bio_alloc(GFP_NOFS, 1);
1176 if (!bio)
1177 return -EIO;
1174 bio->bi_bdev = page_bad->bdev; 1178 bio->bi_bdev = page_bad->bdev;
1175 bio->bi_sector = page_bad->physical >> 9; 1179 bio->bi_sector = page_bad->physical >> 9;
1176 bio->bi_end_io = scrub_complete_bio_end_io; 1180 bio->bi_end_io = scrub_complete_bio_end_io;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8da29e8e4de1..11b77a59db62 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -480,6 +480,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
480 struct btrfs_transaction *cur_trans = trans->transaction; 480 struct btrfs_transaction *cur_trans = trans->transaction;
481 struct btrfs_fs_info *info = root->fs_info; 481 struct btrfs_fs_info *info = root->fs_info;
482 int count = 0; 482 int count = 0;
483 int err = 0;
483 484
484 if (--trans->use_count) { 485 if (--trans->use_count) {
485 trans->block_rsv = trans->orig_rsv; 486 trans->block_rsv = trans->orig_rsv;
@@ -532,18 +533,18 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
532 533
533 if (current->journal_info == trans) 534 if (current->journal_info == trans)
534 current->journal_info = NULL; 535 current->journal_info = NULL;
535 memset(trans, 0, sizeof(*trans));
536 kmem_cache_free(btrfs_trans_handle_cachep, trans);
537 536
538 if (throttle) 537 if (throttle)
539 btrfs_run_delayed_iputs(root); 538 btrfs_run_delayed_iputs(root);
540 539
541 if (trans->aborted || 540 if (trans->aborted ||
542 root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 541 root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
543 return -EIO; 542 err = -EIO;
544 } 543 }
545 544
546 return 0; 545 memset(trans, 0, sizeof(*trans));
546 kmem_cache_free(btrfs_trans_handle_cachep, trans);
547 return err;
547} 548}
548 549
549int btrfs_end_transaction(struct btrfs_trans_handle *trans, 550int btrfs_end_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a872b48be0ae..759d02486d7c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3833,6 +3833,7 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3833 int sub_stripes = 0; 3833 int sub_stripes = 0;
3834 u64 stripes_per_dev = 0; 3834 u64 stripes_per_dev = 0;
3835 u32 remaining_stripes = 0; 3835 u32 remaining_stripes = 0;
3836 u32 last_stripe = 0;
3836 3837
3837 if (map->type & 3838 if (map->type &
3838 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) { 3839 (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
@@ -3846,6 +3847,8 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3846 stripe_nr_orig, 3847 stripe_nr_orig,
3847 factor, 3848 factor,
3848 &remaining_stripes); 3849 &remaining_stripes);
3850 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
3851 last_stripe *= sub_stripes;
3849 } 3852 }
3850 3853
3851 for (i = 0; i < num_stripes; i++) { 3854 for (i = 0; i < num_stripes; i++) {
@@ -3858,16 +3861,29 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
3858 BTRFS_BLOCK_GROUP_RAID10)) { 3861 BTRFS_BLOCK_GROUP_RAID10)) {
3859 bbio->stripes[i].length = stripes_per_dev * 3862 bbio->stripes[i].length = stripes_per_dev *
3860 map->stripe_len; 3863 map->stripe_len;
3864
3861 if (i / sub_stripes < remaining_stripes) 3865 if (i / sub_stripes < remaining_stripes)
3862 bbio->stripes[i].length += 3866 bbio->stripes[i].length +=
3863 map->stripe_len; 3867 map->stripe_len;
3868
3869 /*
3870 * Special for the first stripe and
3871 * the last stripe:
3872 *
3873 * |-------|...|-------|
3874 * |----------|
3875 * off end_off
3876 */
3864 if (i < sub_stripes) 3877 if (i < sub_stripes)
3865 bbio->stripes[i].length -= 3878 bbio->stripes[i].length -=
3866 stripe_offset; 3879 stripe_offset;
3867 if ((i / sub_stripes + 1) % 3880
3868 sub_stripes == remaining_stripes) 3881 if (stripe_index >= last_stripe &&
3882 stripe_index <= (last_stripe +
3883 sub_stripes - 1))
3869 bbio->stripes[i].length -= 3884 bbio->stripes[i].length -=
3870 stripe_end_offset; 3885 stripe_end_offset;
3886
3871 if (i == sub_stripes - 1) 3887 if (i == sub_stripes - 1)
3872 stripe_offset = 0; 3888 stripe_offset = 0;
3873 } else 3889 } else
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index c465ae066c62..eb08c9e43c2a 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,10 +1,6 @@
1config GFS2_FS 1config GFS2_FS
2 tristate "GFS2 file system support" 2 tristate "GFS2 file system support"
3 depends on (64BIT || LBDAF) 3 depends on (64BIT || LBDAF)
4 select DLM if GFS2_FS_LOCKING_DLM
5 select CONFIGFS_FS if GFS2_FS_LOCKING_DLM
6 select SYSFS if GFS2_FS_LOCKING_DLM
7 select IP_SCTP if DLM_SCTP
8 select FS_POSIX_ACL 4 select FS_POSIX_ACL
9 select CRC32 5 select CRC32
10 select QUOTACTL 6 select QUOTACTL
@@ -29,7 +25,8 @@ config GFS2_FS
29 25
30config GFS2_FS_LOCKING_DLM 26config GFS2_FS_LOCKING_DLM
31 bool "GFS2 DLM locking" 27 bool "GFS2 DLM locking"
32 depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && HOTPLUG 28 depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \
29 HOTPLUG && DLM && CONFIGFS_FS && SYSFS
33 help 30 help
34 Multiple node locking module for GFS2 31 Multiple node locking module for GFS2
35 32
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 38b7a74a0f91..9b2ff0e851b1 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -807,7 +807,7 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
807 807
808 if (inode == sdp->sd_rindex) { 808 if (inode == sdp->sd_rindex) {
809 adjust_fs_space(inode); 809 adjust_fs_space(inode);
810 ip->i_gh.gh_flags |= GL_NOCACHE; 810 sdp->sd_rindex_uptodate = 0;
811 } 811 }
812 812
813 brelse(dibh); 813 brelse(dibh);
@@ -873,7 +873,7 @@ static int gfs2_write_end(struct file *file, struct address_space *mapping,
873 873
874 if (inode == sdp->sd_rindex) { 874 if (inode == sdp->sd_rindex) {
875 adjust_fs_space(inode); 875 adjust_fs_space(inode);
876 ip->i_gh.gh_flags |= GL_NOCACHE; 876 sdp->sd_rindex_uptodate = 0;
877 } 877 }
878 878
879 brelse(dibh); 879 brelse(dibh);
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 197c5c47e577..03c04febe26f 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -724,7 +724,11 @@ static int do_strip(struct gfs2_inode *ip, struct buffer_head *dibh,
724 int metadata; 724 int metadata;
725 unsigned int revokes = 0; 725 unsigned int revokes = 0;
726 int x; 726 int x;
727 int error = 0; 727 int error;
728
729 error = gfs2_rindex_update(sdp);
730 if (error)
731 return error;
728 732
729 if (!*top) 733 if (!*top)
730 sm->sm_first = 0; 734 sm->sm_first = 0;
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index c35573abd371..a836056343f0 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -1844,6 +1844,10 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
1844 unsigned int x, size = len * sizeof(u64); 1844 unsigned int x, size = len * sizeof(u64);
1845 int error; 1845 int error;
1846 1846
1847 error = gfs2_rindex_update(sdp);
1848 if (error)
1849 return error;
1850
1847 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1851 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1848 1852
1849 ht = kzalloc(size, GFP_NOFS); 1853 ht = kzalloc(size, GFP_NOFS);
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index c98a60ee6dfd..a9ba2444e077 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -1031,7 +1031,13 @@ static int gfs2_unlink(struct inode *dir, struct dentry *dentry)
1031 struct buffer_head *bh; 1031 struct buffer_head *bh;
1032 struct gfs2_holder ghs[3]; 1032 struct gfs2_holder ghs[3];
1033 struct gfs2_rgrpd *rgd; 1033 struct gfs2_rgrpd *rgd;
1034 int error = -EROFS; 1034 int error;
1035
1036 error = gfs2_rindex_update(sdp);
1037 if (error)
1038 return error;
1039
1040 error = -EROFS;
1035 1041
1036 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs); 1042 gfs2_holder_init(dip->i_gl, LM_ST_EXCLUSIVE, 0, ghs);
1037 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1); 1043 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, ghs + 1);
@@ -1224,6 +1230,10 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
1224 return 0; 1230 return 0;
1225 } 1231 }
1226 1232
1233 error = gfs2_rindex_update(sdp);
1234 if (error)
1235 return error;
1236
1227 if (odip != ndip) { 1237 if (odip != ndip) {
1228 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE, 1238 error = gfs2_glock_nq_init(sdp->sd_rename_gl, LM_ST_EXCLUSIVE,
1229 0, &r_gh); 1239 0, &r_gh);
@@ -1345,7 +1355,6 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
1345 error = alloc_required; 1355 error = alloc_required;
1346 if (error < 0) 1356 if (error < 0)
1347 goto out_gunlock; 1357 goto out_gunlock;
1348 error = 0;
1349 1358
1350 if (alloc_required) { 1359 if (alloc_required) {
1351 struct gfs2_qadata *qa = gfs2_qadata_get(ndip); 1360 struct gfs2_qadata *qa = gfs2_qadata_get(ndip);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 19bde40b4864..3df65c9ab73b 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -332,9 +332,6 @@ struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
332 struct rb_node *n, *next; 332 struct rb_node *n, *next;
333 struct gfs2_rgrpd *cur; 333 struct gfs2_rgrpd *cur;
334 334
335 if (gfs2_rindex_update(sdp))
336 return NULL;
337
338 spin_lock(&sdp->sd_rindex_spin); 335 spin_lock(&sdp->sd_rindex_spin);
339 n = sdp->sd_rindex_tree.rb_node; 336 n = sdp->sd_rindex_tree.rb_node;
340 while (n) { 337 while (n) {
@@ -640,6 +637,7 @@ static int read_rindex_entry(struct gfs2_inode *ip,
640 return 0; 637 return 0;
641 638
642 error = 0; /* someone else read in the rgrp; free it and ignore it */ 639 error = 0; /* someone else read in the rgrp; free it and ignore it */
640 gfs2_glock_put(rgd->rd_gl);
643 641
644fail: 642fail:
645 kfree(rgd->rd_bits); 643 kfree(rgd->rd_bits);
@@ -927,6 +925,10 @@ int gfs2_fitrim(struct file *filp, void __user *argp)
927 } else if (copy_from_user(&r, argp, sizeof(r))) 925 } else if (copy_from_user(&r, argp, sizeof(r)))
928 return -EFAULT; 926 return -EFAULT;
929 927
928 ret = gfs2_rindex_update(sdp);
929 if (ret)
930 return ret;
931
930 rgd = gfs2_blk2rgrpd(sdp, r.start, 0); 932 rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
931 rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0); 933 rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
932 934
diff --git a/fs/gfs2/xattr.c b/fs/gfs2/xattr.c
index 2e5ba425cae7..927f4df874ae 100644
--- a/fs/gfs2/xattr.c
+++ b/fs/gfs2/xattr.c
@@ -238,6 +238,10 @@ static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
238 unsigned int x; 238 unsigned int x;
239 int error; 239 int error;
240 240
241 error = gfs2_rindex_update(sdp);
242 if (error)
243 return error;
244
241 if (GFS2_EA_IS_STUFFED(ea)) 245 if (GFS2_EA_IS_STUFFED(ea))
242 return 0; 246 return 0;
243 247
@@ -1330,6 +1334,10 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
1330 unsigned int x; 1334 unsigned int x;
1331 int error; 1335 int error;
1332 1336
1337 error = gfs2_rindex_update(sdp);
1338 if (error)
1339 return error;
1340
1333 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1341 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1334 1342
1335 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh); 1343 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh);
@@ -1439,6 +1447,10 @@ static int ea_dealloc_block(struct gfs2_inode *ip)
1439 struct gfs2_holder gh; 1447 struct gfs2_holder gh;
1440 int error; 1448 int error;
1441 1449
1450 error = gfs2_rindex_update(sdp);
1451 if (error)
1452 return error;
1453
1442 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1); 1454 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1443 if (!rgd) { 1455 if (!rgd) {
1444 gfs2_consist_inode(ip); 1456 gfs2_consist_inode(ip);
diff --git a/fs/libfs.c b/fs/libfs.c
index 358094f0433d..18d08f5db53a 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -529,6 +529,7 @@ int simple_fill_super(struct super_block *s, unsigned long magic,
529 return 0; 529 return 0;
530out: 530out:
531 d_genocide(root); 531 d_genocide(root);
532 shrink_dcache_parent(root);
532 dput(root); 533 dput(root);
533 return -ENOMEM; 534 return -ENOMEM;
534} 535}
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 6a0c62d6e442..64c3b3172367 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -18,19 +18,39 @@
18#ifndef arch_irq_stat 18#ifndef arch_irq_stat
19#define arch_irq_stat() 0 19#define arch_irq_stat() 0
20#endif 20#endif
21#ifndef arch_idle_time 21
22#define arch_idle_time(cpu) 0 22#ifdef arch_idle_time
23#endif 23
24static cputime64_t get_idle_time(int cpu)
25{
26 cputime64_t idle;
27
28 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
29 if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
30 idle += arch_idle_time(cpu);
31 return idle;
32}
33
34static cputime64_t get_iowait_time(int cpu)
35{
36 cputime64_t iowait;
37
38 iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
39 if (cpu_online(cpu) && nr_iowait_cpu(cpu))
40 iowait += arch_idle_time(cpu);
41 return iowait;
42}
43
44#else
24 45
25static u64 get_idle_time(int cpu) 46static u64 get_idle_time(int cpu)
26{ 47{
27 u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); 48 u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
28 49
29 if (idle_time == -1ULL) { 50 if (idle_time == -1ULL)
30 /* !NO_HZ so we can rely on cpustat.idle */ 51 /* !NO_HZ so we can rely on cpustat.idle */
31 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; 52 idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
32 idle += arch_idle_time(cpu); 53 else
33 } else
34 idle = usecs_to_cputime64(idle_time); 54 idle = usecs_to_cputime64(idle_time);
35 55
36 return idle; 56 return idle;
@@ -49,6 +69,8 @@ static u64 get_iowait_time(int cpu)
49 return iowait; 69 return iowait;
50} 70}
51 71
72#endif
73
52static int show_stat(struct seq_file *p, void *v) 74static int show_stat(struct seq_file *p, void *v)
53{ 75{
54 int i, j; 76 int i, j;
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index 2a7a3f5d1ca6..35a36d39fa2c 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -729,6 +729,9 @@ int sysfs_create_dir(struct kobject * kobj)
729 else 729 else
730 parent_sd = &sysfs_root; 730 parent_sd = &sysfs_root;
731 731
732 if (!parent_sd)
733 return -ENOENT;
734
732 if (sysfs_ns_type(parent_sd)) 735 if (sysfs_ns_type(parent_sd))
733 ns = kobj->ktype->namespace(kobj); 736 ns = kobj->ktype->namespace(kobj);
734 type = sysfs_read_ns_type(kobj); 737 type = sysfs_read_ns_type(kobj);
@@ -878,7 +881,6 @@ int sysfs_rename(struct sysfs_dirent *sd,
878 881
879 dup_name = sd->s_name; 882 dup_name = sd->s_name;
880 sd->s_name = new_name; 883 sd->s_name = new_name;
881 sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
882 } 884 }
883 885
884 /* Move to the appropriate place in the appropriate directories rbtree. */ 886 /* Move to the appropriate place in the appropriate directories rbtree. */
@@ -886,6 +888,7 @@ int sysfs_rename(struct sysfs_dirent *sd,
886 sysfs_get(new_parent_sd); 888 sysfs_get(new_parent_sd);
887 sysfs_put(sd->s_parent); 889 sysfs_put(sd->s_parent);
888 sd->s_ns = new_ns; 890 sd->s_ns = new_ns;
891 sd->s_hash = sysfs_name_hash(sd->s_ns, sd->s_name);
889 sd->s_parent = new_parent_sd; 892 sd->s_parent = new_parent_sd;
890 sysfs_link_sibling(sd); 893 sysfs_link_sibling(sd);
891 894
diff --git a/fs/sysfs/group.c b/fs/sysfs/group.c
index dd1701caecc9..2df555c66d57 100644
--- a/fs/sysfs/group.c
+++ b/fs/sysfs/group.c
@@ -67,7 +67,11 @@ static int internal_create_group(struct kobject *kobj, int update,
67 /* Updates may happen before the object has been instantiated */ 67 /* Updates may happen before the object has been instantiated */
68 if (unlikely(update && !kobj->sd)) 68 if (unlikely(update && !kobj->sd))
69 return -EINVAL; 69 return -EINVAL;
70 70 if (!grp->attrs) {
71 WARN(1, "sysfs: attrs not set by subsystem for group: %s/%s\n",
72 kobj->name, grp->name ? "" : grp->name);
73 return -EINVAL;
74 }
71 if (grp->name) { 75 if (grp->name) {
72 error = sysfs_create_subdir(kobj, grp->name, &sd); 76 error = sysfs_create_subdir(kobj, grp->name, &sd);
73 if (error) 77 if (error)
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index 3963116083ae..e478de4e5d56 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -85,7 +85,7 @@ struct drm_exynos_gem_mmap {
85struct drm_exynos_vidi_connection { 85struct drm_exynos_vidi_connection {
86 unsigned int connection; 86 unsigned int connection;
87 unsigned int extensions; 87 unsigned int extensions;
88 uint64_t *edid; 88 uint64_t edid;
89}; 89};
90 90
91struct drm_exynos_plane_set_zpos { 91struct drm_exynos_plane_set_zpos {
@@ -96,7 +96,8 @@ struct drm_exynos_plane_set_zpos {
96/* memory type definitions. */ 96/* memory type definitions. */
97enum e_drm_exynos_gem_mem_type { 97enum e_drm_exynos_gem_mem_type {
98 /* Physically Non-Continuous memory. */ 98 /* Physically Non-Continuous memory. */
99 EXYNOS_BO_NONCONTIG = 1 << 0 99 EXYNOS_BO_NONCONTIG = 1 << 0,
100 EXYNOS_BO_MASK = EXYNOS_BO_NONCONTIG
100}; 101};
101 102
102#define DRM_EXYNOS_GEM_CREATE 0x00 103#define DRM_EXYNOS_GEM_CREATE 0x00
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 7847e197730a..8d54f79457ba 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -30,7 +30,6 @@ struct amba_device {
30 struct device dev; 30 struct device dev;
31 struct resource res; 31 struct resource res;
32 struct clk *pclk; 32 struct clk *pclk;
33 struct regulator *vcore;
34 u64 dma_mask; 33 u64 dma_mask;
35 unsigned int periphid; 34 unsigned int periphid;
36 unsigned int irq[AMBA_NR_IRQS]; 35 unsigned int irq[AMBA_NR_IRQS];
@@ -75,12 +74,6 @@ void amba_release_regions(struct amba_device *);
75#define amba_pclk_disable(d) \ 74#define amba_pclk_disable(d) \
76 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 75 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
77 76
78#define amba_vcore_enable(d) \
79 (IS_ERR((d)->vcore) ? 0 : regulator_enable((d)->vcore))
80
81#define amba_vcore_disable(d) \
82 do { if (!IS_ERR((d)->vcore)) regulator_disable((d)->vcore); } while (0)
83
84/* Some drivers don't use the struct amba_device */ 77/* Some drivers don't use the struct amba_device */
85#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) 78#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
86#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) 79#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index b8c51124ed19..76dd1b199a1b 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -25,6 +25,8 @@
25#ifndef _SSP_PL022_H 25#ifndef _SSP_PL022_H
26#define _SSP_PL022_H 26#define _SSP_PL022_H
27 27
28#include <linux/types.h>
29
28/** 30/**
29 * whether SSP is in loopback mode or not 31 * whether SSP is in loopback mode or not
30 */ 32 */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 606cf339bb56..2aa24664a5b5 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -426,14 +426,10 @@ struct request_queue {
426 (1 << QUEUE_FLAG_SAME_COMP) | \ 426 (1 << QUEUE_FLAG_SAME_COMP) | \
427 (1 << QUEUE_FLAG_ADD_RANDOM)) 427 (1 << QUEUE_FLAG_ADD_RANDOM))
428 428
429static inline int queue_is_locked(struct request_queue *q) 429static inline void queue_lockdep_assert_held(struct request_queue *q)
430{ 430{
431#ifdef CONFIG_SMP 431 if (q->queue_lock)
432 spinlock_t *lock = q->queue_lock; 432 lockdep_assert_held(q->queue_lock);
433 return lock && spin_is_locked(lock);
434#else
435 return 1;
436#endif
437} 433}
438 434
439static inline void queue_flag_set_unlocked(unsigned int flag, 435static inline void queue_flag_set_unlocked(unsigned int flag,
@@ -445,7 +441,7 @@ static inline void queue_flag_set_unlocked(unsigned int flag,
445static inline int queue_flag_test_and_clear(unsigned int flag, 441static inline int queue_flag_test_and_clear(unsigned int flag,
446 struct request_queue *q) 442 struct request_queue *q)
447{ 443{
448 WARN_ON_ONCE(!queue_is_locked(q)); 444 queue_lockdep_assert_held(q);
449 445
450 if (test_bit(flag, &q->queue_flags)) { 446 if (test_bit(flag, &q->queue_flags)) {
451 __clear_bit(flag, &q->queue_flags); 447 __clear_bit(flag, &q->queue_flags);
@@ -458,7 +454,7 @@ static inline int queue_flag_test_and_clear(unsigned int flag,
458static inline int queue_flag_test_and_set(unsigned int flag, 454static inline int queue_flag_test_and_set(unsigned int flag,
459 struct request_queue *q) 455 struct request_queue *q)
460{ 456{
461 WARN_ON_ONCE(!queue_is_locked(q)); 457 queue_lockdep_assert_held(q);
462 458
463 if (!test_bit(flag, &q->queue_flags)) { 459 if (!test_bit(flag, &q->queue_flags)) {
464 __set_bit(flag, &q->queue_flags); 460 __set_bit(flag, &q->queue_flags);
@@ -470,7 +466,7 @@ static inline int queue_flag_test_and_set(unsigned int flag,
470 466
471static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 467static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
472{ 468{
473 WARN_ON_ONCE(!queue_is_locked(q)); 469 queue_lockdep_assert_held(q);
474 __set_bit(flag, &q->queue_flags); 470 __set_bit(flag, &q->queue_flags);
475} 471}
476 472
@@ -487,7 +483,7 @@ static inline int queue_in_flight(struct request_queue *q)
487 483
488static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 484static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
489{ 485{
490 WARN_ON_ONCE(!queue_is_locked(q)); 486 queue_lockdep_assert_held(q);
491 __clear_bit(flag, &q->queue_flags); 487 __clear_bit(flag, &q->queue_flags);
492} 488}
493 489
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 676f967390ae..f9a2e5e67a54 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -974,6 +974,7 @@ int dma_async_device_register(struct dma_device *device);
974void dma_async_device_unregister(struct dma_device *device); 974void dma_async_device_unregister(struct dma_device *device);
975void dma_run_dependencies(struct dma_async_tx_descriptor *tx); 975void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
976struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type); 976struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type);
977struct dma_chan *net_dma_find_channel(void);
977#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y) 978#define dma_request_channel(mask, x, y) __dma_request_channel(&(mask), x, y)
978 979
979/* --- Helper iov-locking functions --- */ 980/* --- Helper iov-locking functions --- */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index bff29c58da23..7810406f3d80 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -263,6 +263,11 @@ static inline void irqd_clr_chained_irq_inprogress(struct irq_data *d)
263 d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS; 263 d->state_use_accessors &= ~IRQD_IRQ_INPROGRESS;
264} 264}
265 265
266static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
267{
268 return d->hwirq;
269}
270
266/** 271/**
267 * struct irq_chip - hardware interrupt chip descriptor 272 * struct irq_chip - hardware interrupt chip descriptor
268 * 273 *
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index ead4a4215797..c65740d76e66 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -42,12 +42,6 @@ struct of_device_id;
42/* Number of irqs reserved for a legacy isa controller */ 42/* Number of irqs reserved for a legacy isa controller */
43#define NUM_ISA_INTERRUPTS 16 43#define NUM_ISA_INTERRUPTS 16
44 44
45/* This type is the placeholder for a hardware interrupt number. It has to
46 * be big enough to enclose whatever representation is used by a given
47 * platform.
48 */
49typedef unsigned long irq_hw_number_t;
50
51/** 45/**
52 * struct irq_domain_ops - Methods for irq_domain objects 46 * struct irq_domain_ops - Methods for irq_domain objects
53 * @match: Match an interrupt controller device node to a host, returns 47 * @match: Match an interrupt controller device node to a host, returns
@@ -104,6 +98,9 @@ struct irq_domain {
104 unsigned int size; 98 unsigned int size;
105 unsigned int *revmap; 99 unsigned int *revmap;
106 } linear; 100 } linear;
101 struct {
102 unsigned int max_irq;
103 } nomap;
107 struct radix_tree_root tree; 104 struct radix_tree_root tree;
108 } revmap_data; 105 } revmap_data;
109 const struct irq_domain_ops *ops; 106 const struct irq_domain_ops *ops;
@@ -126,6 +123,7 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
126 const struct irq_domain_ops *ops, 123 const struct irq_domain_ops *ops,
127 void *host_data); 124 void *host_data);
128struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 125struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
126 unsigned int max_irq,
129 const struct irq_domain_ops *ops, 127 const struct irq_domain_ops *ops,
130 void *host_data); 128 void *host_data);
131struct irq_domain *irq_domain_add_tree(struct device_node *of_node, 129struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
@@ -134,7 +132,6 @@ struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
134 132
135extern struct irq_domain *irq_find_host(struct device_node *node); 133extern struct irq_domain *irq_find_host(struct device_node *node);
136extern void irq_set_default_host(struct irq_domain *host); 134extern void irq_set_default_host(struct irq_domain *host);
137extern void irq_set_virq_count(unsigned int count);
138 135
139static inline struct irq_domain *irq_domain_add_legacy_isa( 136static inline struct irq_domain *irq_domain_add_legacy_isa(
140 struct device_node *of_node, 137 struct device_node *of_node,
@@ -146,7 +143,6 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
146} 143}
147extern struct irq_domain *irq_find_host(struct device_node *node); 144extern struct irq_domain *irq_find_host(struct device_node *node);
148extern void irq_set_default_host(struct irq_domain *host); 145extern void irq_set_default_host(struct irq_domain *host);
149extern void irq_set_virq_count(unsigned int count);
150 146
151 147
152extern unsigned int irq_create_mapping(struct irq_domain *host, 148extern unsigned int irq_create_mapping(struct irq_domain *host,
diff --git a/include/linux/kconfig.h b/include/linux/kconfig.h
index 067eda0e4b32..be342b94c640 100644
--- a/include/linux/kconfig.h
+++ b/include/linux/kconfig.h
@@ -4,29 +4,43 @@
4#include <generated/autoconf.h> 4#include <generated/autoconf.h>
5 5
6/* 6/*
7 * Helper macros to use CONFIG_ options in C expressions. Note that 7 * Helper macros to use CONFIG_ options in C/CPP expressions. Note that
8 * these only work with boolean and tristate options. 8 * these only work with boolean and tristate options.
9 */ 9 */
10 10
11/* 11/*
12 * Getting something that works in C and CPP for an arg that may or may
13 * not be defined is tricky. Here, if we have "#define CONFIG_BOOGER 1"
14 * we match on the placeholder define, insert the "0," for arg1 and generate
15 * the triplet (0, 1, 0). Then the last step cherry picks the 2nd arg (a one).
16 * When CONFIG_BOOGER is not defined, we generate a (... 1, 0) pair, and when
17 * the last step cherry picks the 2nd arg, we get a zero.
18 */
19#define __ARG_PLACEHOLDER_1 0,
20#define config_enabled(cfg) _config_enabled(cfg)
21#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value)
22#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0)
23#define ___config_enabled(__ignored, val, ...) val
24
25/*
12 * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm', 26 * IS_ENABLED(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y' or 'm',
13 * 0 otherwise. 27 * 0 otherwise.
14 * 28 *
15 */ 29 */
16#define IS_ENABLED(option) \ 30#define IS_ENABLED(option) \
17 (__enabled_ ## option || __enabled_ ## option ## _MODULE) 31 (config_enabled(option) || config_enabled(option##_MODULE))
18 32
19/* 33/*
20 * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0 34 * IS_BUILTIN(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'y', 0
21 * otherwise. For boolean options, this is equivalent to 35 * otherwise. For boolean options, this is equivalent to
22 * IS_ENABLED(CONFIG_FOO). 36 * IS_ENABLED(CONFIG_FOO).
23 */ 37 */
24#define IS_BUILTIN(option) __enabled_ ## option 38#define IS_BUILTIN(option) config_enabled(option)
25 39
26/* 40/*
27 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0 41 * IS_MODULE(CONFIG_FOO) evaluates to 1 if CONFIG_FOO is set to 'm', 0
28 * otherwise. 42 * otherwise.
29 */ 43 */
30#define IS_MODULE(option) __enabled_ ## option ## _MODULE 44#define IS_MODULE(option) config_enabled(option##_MODULE)
31 45
32#endif /* __LINUX_KCONFIG_H */ 46#endif /* __LINUX_KCONFIG_H */
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index f549adccc94c..1bc898b14a80 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -287,7 +287,17 @@ extern unsigned int ip6t_do_table(struct sk_buff *skb,
287 struct xt_table *table); 287 struct xt_table *table);
288 288
289/* Check for an extension */ 289/* Check for an extension */
290extern int ip6t_ext_hdr(u8 nexthdr); 290static inline int
291ip6t_ext_hdr(u8 nexthdr)
292{ return (nexthdr == IPPROTO_HOPOPTS) ||
293 (nexthdr == IPPROTO_ROUTING) ||
294 (nexthdr == IPPROTO_FRAGMENT) ||
295 (nexthdr == IPPROTO_ESP) ||
296 (nexthdr == IPPROTO_AH) ||
297 (nexthdr == IPPROTO_NONE) ||
298 (nexthdr == IPPROTO_DSTOPTS);
299}
300
291/* find specified header and get offset to it */ 301/* find specified header and get offset to it */
292extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, 302extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
293 int target, unsigned short *fragoff); 303 int target, unsigned short *fragoff);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index f51bf2e70c69..2db407a40051 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -357,7 +357,7 @@ struct uart_port {
357#define UPF_CONS_FLOW ((__force upf_t) (1 << 23)) 357#define UPF_CONS_FLOW ((__force upf_t) (1 << 23))
358#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24)) 358#define UPF_SHARE_IRQ ((__force upf_t) (1 << 24))
359#define UPF_EXAR_EFR ((__force upf_t) (1 << 25)) 359#define UPF_EXAR_EFR ((__force upf_t) (1 << 25))
360#define UPF_IIR_ONCE ((__force upf_t) (1 << 26)) 360#define UPF_BUG_THRE ((__force upf_t) (1 << 26))
361/* The exact UART type is known and should not be probed. */ 361/* The exact UART type is known and should not be probed. */
362#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27)) 362#define UPF_FIXED_TYPE ((__force upf_t) (1 << 27))
363#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28)) 363#define UPF_BOOT_AUTOCONF ((__force upf_t) (1 << 28))
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 33370271b8b2..70a3f8d49118 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -481,6 +481,7 @@ struct sk_buff {
481 union { 481 union {
482 __u32 mark; 482 __u32 mark;
483 __u32 dropcount; 483 __u32 dropcount;
484 __u32 avail_size;
484 }; 485 };
485 486
486 sk_buff_data_t transport_header; 487 sk_buff_data_t transport_header;
@@ -1366,6 +1367,18 @@ static inline int skb_tailroom(const struct sk_buff *skb)
1366} 1367}
1367 1368
1368/** 1369/**
1370 * skb_availroom - bytes at buffer end
1371 * @skb: buffer to check
1372 *
1373 * Return the number of bytes of free space at the tail of an sk_buff
1374 * allocated by sk_stream_alloc()
1375 */
1376static inline int skb_availroom(const struct sk_buff *skb)
1377{
1378 return skb_is_nonlinear(skb) ? 0 : skb->avail_size - skb->len;
1379}
1380
1381/**
1369 * skb_reserve - adjust headroom 1382 * skb_reserve - adjust headroom
1370 * @skb: buffer to alter 1383 * @skb: buffer to alter
1371 * @len: bytes to move 1384 * @len: bytes to move
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 6a40c76bdcf1..1747b6787b9e 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -3,14 +3,10 @@
3 3
4#include <linux/compiler.h> 4#include <linux/compiler.h>
5 5
6#ifdef __KERNEL__
7
6#undef NULL 8#undef NULL
7#if defined(__cplusplus)
8#define NULL 0
9#else
10#define NULL ((void *)0) 9#define NULL ((void *)0)
11#endif
12
13#ifdef __KERNEL__
14 10
15enum { 11enum {
16 false = 0, 12 false = 0,
diff --git a/include/linux/types.h b/include/linux/types.h
index e5fa50345516..7f480db60231 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -210,6 +210,12 @@ typedef u32 phys_addr_t;
210 210
211typedef phys_addr_t resource_size_t; 211typedef phys_addr_t resource_size_t;
212 212
213/*
214 * This type is the placeholder for a hardware interrupt number. It has to be
215 * big enough to enclose whatever representation is used by a given platform.
216 */
217typedef unsigned long irq_hw_number_t;
218
213typedef struct { 219typedef struct {
214 int counter; 220 int counter;
215} atomic_t; 221} atomic_t;
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index fbb666b1b670..474283888233 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -28,13 +28,6 @@
28/* parity check flag */ 28/* parity check flag */
29#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) 29#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
30 30
31enum port_dev_state {
32 PORT_UNREGISTERED,
33 PORT_REGISTERING,
34 PORT_REGISTERED,
35 PORT_UNREGISTERING,
36};
37
38/* USB serial flags */ 31/* USB serial flags */
39#define USB_SERIAL_WRITE_BUSY 0 32#define USB_SERIAL_WRITE_BUSY 0
40 33
@@ -124,7 +117,6 @@ struct usb_serial_port {
124 char throttle_req; 117 char throttle_req;
125 unsigned long sysrq; /* sysrq timeout */ 118 unsigned long sysrq; /* sysrq timeout */
126 struct device dev; 119 struct device dev;
127 enum port_dev_state dev_state;
128}; 120};
129#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev) 121#define to_usb_serial_port(d) container_of(d, struct usb_serial_port, dev)
130 122
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index 759a25ba0539..367ab18dccf7 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -48,6 +48,8 @@
48 */ 48 */
49#define VGA_DEFAULT_DEVICE (NULL) 49#define VGA_DEFAULT_DEVICE (NULL)
50 50
51struct pci_dev;
52
51/* For use by clients */ 53/* For use by clients */
52 54
53/** 55/**
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 344b0f972828..d47e523c9d83 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -92,6 +92,7 @@ enum {
92 HCI_SERVICE_CACHE, 92 HCI_SERVICE_CACHE,
93 HCI_LINK_KEYS, 93 HCI_LINK_KEYS,
94 HCI_DEBUG_KEYS, 94 HCI_DEBUG_KEYS,
95 HCI_UNREGISTER,
95 96
96 HCI_LE_SCAN, 97 HCI_LE_SCAN,
97 HCI_SSP_ENABLED, 98 HCI_SSP_ENABLED,
@@ -1327,8 +1328,8 @@ struct sockaddr_hci {
1327#define HCI_DEV_NONE 0xffff 1328#define HCI_DEV_NONE 0xffff
1328 1329
1329#define HCI_CHANNEL_RAW 0 1330#define HCI_CHANNEL_RAW 0
1330#define HCI_CHANNEL_CONTROL 1
1331#define HCI_CHANNEL_MONITOR 2 1331#define HCI_CHANNEL_MONITOR 2
1332#define HCI_CHANNEL_CONTROL 3
1332 1333
1333struct hci_filter { 1334struct hci_filter {
1334 unsigned long type_mask; 1335 unsigned long type_mask;
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index daefaac51131..6822d2595aff 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -427,7 +427,7 @@ enum {
427static inline bool hci_conn_ssp_enabled(struct hci_conn *conn) 427static inline bool hci_conn_ssp_enabled(struct hci_conn *conn)
428{ 428{
429 struct hci_dev *hdev = conn->hdev; 429 struct hci_dev *hdev = conn->hdev;
430 return (test_bit(HCI_SSP_ENABLED, &hdev->flags) && 430 return (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags) &&
431 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags)); 431 test_bit(HCI_CONN_SSP_ENABLED, &conn->flags));
432} 432}
433 433
@@ -907,11 +907,13 @@ static inline void hci_role_switch_cfm(struct hci_conn *conn, __u8 status,
907 907
908static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type) 908static inline bool eir_has_data_type(u8 *data, size_t data_len, u8 type)
909{ 909{
910 u8 field_len; 910 size_t parsed = 0;
911 size_t parsed;
912 911
913 for (parsed = 0; parsed < data_len - 1; parsed += field_len) { 912 if (data_len < 2)
914 field_len = data[0]; 913 return false;
914
915 while (parsed < data_len - 1) {
916 u8 field_len = data[0];
915 917
916 if (field_len == 0) 918 if (field_len == 0)
917 break; 919 break;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index ffc1377e092e..ebfd91fc20f8 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -117,7 +117,7 @@ struct mgmt_mode {
117#define MGMT_OP_SET_DISCOVERABLE 0x0006 117#define MGMT_OP_SET_DISCOVERABLE 0x0006
118struct mgmt_cp_set_discoverable { 118struct mgmt_cp_set_discoverable {
119 __u8 val; 119 __u8 val;
120 __u16 timeout; 120 __le16 timeout;
121} __packed; 121} __packed;
122#define MGMT_SET_DISCOVERABLE_SIZE 3 122#define MGMT_SET_DISCOVERABLE_SIZE 3
123 123
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 87d203ff7a8a..9210bdc7bd8d 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1327,7 +1327,7 @@ static inline struct ieee80211_rate *
1327ieee80211_get_tx_rate(const struct ieee80211_hw *hw, 1327ieee80211_get_tx_rate(const struct ieee80211_hw *hw,
1328 const struct ieee80211_tx_info *c) 1328 const struct ieee80211_tx_info *c)
1329{ 1329{
1330 if (WARN_ON(c->control.rates[0].idx < 0)) 1330 if (WARN_ON_ONCE(c->control.rates[0].idx < 0))
1331 return NULL; 1331 return NULL;
1332 return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx]; 1332 return &hw->wiphy->bands[c->band]->bitrates[c->control.rates[0].idx];
1333} 1333}
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 377df4a28512..1e1198546c72 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -134,6 +134,9 @@ struct scsi_cmnd {
134 134
135static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd) 135static inline struct scsi_driver *scsi_cmd_to_driver(struct scsi_cmnd *cmd)
136{ 136{
137 if (!cmd->request->rq_disk)
138 return NULL;
139
137 return *(struct scsi_driver **)cmd->request->rq_disk->private_data; 140 return *(struct scsi_driver **)cmd->request->rq_disk->private_data;
138} 141}
139 142
diff --git a/include/sound/core.h b/include/sound/core.h
index b6e0f57d451d..bc056687f647 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -325,6 +325,13 @@ void release_and_free_resource(struct resource *res);
325 325
326/* --- */ 326/* --- */
327 327
328/* sound printk debug levels */
329enum {
330 SND_PR_ALWAYS,
331 SND_PR_DEBUG,
332 SND_PR_VERBOSE,
333};
334
328#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK) 335#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
329__printf(4, 5) 336__printf(4, 5)
330void __snd_printk(unsigned int level, const char *file, int line, 337void __snd_printk(unsigned int level, const char *file, int line,
@@ -354,6 +361,8 @@ void __snd_printk(unsigned int level, const char *file, int line,
354 */ 361 */
355#define snd_printd(fmt, args...) \ 362#define snd_printd(fmt, args...) \
356 __snd_printk(1, __FILE__, __LINE__, fmt, ##args) 363 __snd_printk(1, __FILE__, __LINE__, fmt, ##args)
364#define _snd_printd(level, fmt, args...) \
365 __snd_printk(level, __FILE__, __LINE__, fmt, ##args)
357 366
358/** 367/**
359 * snd_BUG - give a BUG warning message and stack trace 368 * snd_BUG - give a BUG warning message and stack trace
@@ -383,6 +392,7 @@ void __snd_printk(unsigned int level, const char *file, int line,
383#else /* !CONFIG_SND_DEBUG */ 392#else /* !CONFIG_SND_DEBUG */
384 393
385#define snd_printd(fmt, args...) do { } while (0) 394#define snd_printd(fmt, args...) do { } while (0)
395#define _snd_printd(level, fmt, args...) do { } while (0)
386#define snd_BUG() do { } while (0) 396#define snd_BUG() do { } while (0)
387static inline int __snd_bug_on(int cond) 397static inline int __snd_bug_on(int cond)
388{ 398{
diff --git a/kernel/cred.c b/kernel/cred.c
index 97b36eeca4c9..e70683d9ec32 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -386,6 +386,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
386 struct cred *new; 386 struct cred *new;
387 int ret; 387 int ret;
388 388
389 p->replacement_session_keyring = NULL;
390
389 if ( 391 if (
390#ifdef CONFIG_KEYS 392#ifdef CONFIG_KEYS
391 !p->cred->thread_keyring && 393 !p->cred->thread_keyring &&
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index cf1a4a68ce44..d1a758bc972a 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -62,7 +62,7 @@ config IRQ_DOMAIN_DEBUG
62 help 62 help
63 This option will show the mapping relationship between hardware irq 63 This option will show the mapping relationship between hardware irq
64 numbers and Linux irq numbers. The mapping is exposed via debugfs 64 numbers and Linux irq numbers. The mapping is exposed via debugfs
65 in the file "virq_mapping". 65 in the file "irq_domain_mapping".
66 66
67 If you don't know what this means you don't need it. 67 If you don't know what this means you don't need it.
68 68
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 3601f3fbf67c..0e0ba5f840b2 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -23,7 +23,6 @@ static LIST_HEAD(irq_domain_list);
23static DEFINE_MUTEX(irq_domain_mutex); 23static DEFINE_MUTEX(irq_domain_mutex);
24 24
25static DEFINE_MUTEX(revmap_trees_mutex); 25static DEFINE_MUTEX(revmap_trees_mutex);
26static unsigned int irq_virq_count = NR_IRQS;
27static struct irq_domain *irq_default_domain; 26static struct irq_domain *irq_default_domain;
28 27
29/** 28/**
@@ -184,13 +183,16 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
184} 183}
185 184
186struct irq_domain *irq_domain_add_nomap(struct device_node *of_node, 185struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
186 unsigned int max_irq,
187 const struct irq_domain_ops *ops, 187 const struct irq_domain_ops *ops,
188 void *host_data) 188 void *host_data)
189{ 189{
190 struct irq_domain *domain = irq_domain_alloc(of_node, 190 struct irq_domain *domain = irq_domain_alloc(of_node,
191 IRQ_DOMAIN_MAP_NOMAP, ops, host_data); 191 IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
192 if (domain) 192 if (domain) {
193 domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
193 irq_domain_add(domain); 194 irq_domain_add(domain);
195 }
194 return domain; 196 return domain;
195} 197}
196 198
@@ -262,22 +264,6 @@ void irq_set_default_host(struct irq_domain *domain)
262 irq_default_domain = domain; 264 irq_default_domain = domain;
263} 265}
264 266
265/**
266 * irq_set_virq_count() - Set the maximum number of linux irqs
267 * @count: number of linux irqs, capped with NR_IRQS
268 *
269 * This is mainly for use by platforms like iSeries who want to program
270 * the virtual irq number in the controller to avoid the reverse mapping
271 */
272void irq_set_virq_count(unsigned int count)
273{
274 pr_debug("irq: Trying to set virq count to %d\n", count);
275
276 BUG_ON(count < NUM_ISA_INTERRUPTS);
277 if (count < NR_IRQS)
278 irq_virq_count = count;
279}
280
281static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, 267static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
282 irq_hw_number_t hwirq) 268 irq_hw_number_t hwirq)
283{ 269{
@@ -320,13 +306,12 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
320 pr_debug("irq: create_direct virq allocation failed\n"); 306 pr_debug("irq: create_direct virq allocation failed\n");
321 return 0; 307 return 0;
322 } 308 }
323 if (virq >= irq_virq_count) { 309 if (virq >= domain->revmap_data.nomap.max_irq) {
324 pr_err("ERROR: no free irqs available below %i maximum\n", 310 pr_err("ERROR: no free irqs available below %i maximum\n",
325 irq_virq_count); 311 domain->revmap_data.nomap.max_irq);
326 irq_free_desc(virq); 312 irq_free_desc(virq);
327 return 0; 313 return 0;
328 } 314 }
329
330 pr_debug("irq: create_direct obtained virq %d\n", virq); 315 pr_debug("irq: create_direct obtained virq %d\n", virq);
331 316
332 if (irq_setup_virq(domain, virq, virq)) { 317 if (irq_setup_virq(domain, virq, virq)) {
@@ -350,7 +335,8 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
350unsigned int irq_create_mapping(struct irq_domain *domain, 335unsigned int irq_create_mapping(struct irq_domain *domain,
351 irq_hw_number_t hwirq) 336 irq_hw_number_t hwirq)
352{ 337{
353 unsigned int virq, hint; 338 unsigned int hint;
339 int virq;
354 340
355 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); 341 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
356 342
@@ -377,13 +363,13 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
377 return irq_domain_legacy_revmap(domain, hwirq); 363 return irq_domain_legacy_revmap(domain, hwirq);
378 364
379 /* Allocate a virtual interrupt number */ 365 /* Allocate a virtual interrupt number */
380 hint = hwirq % irq_virq_count; 366 hint = hwirq % nr_irqs;
381 if (hint == 0) 367 if (hint == 0)
382 hint++; 368 hint++;
383 virq = irq_alloc_desc_from(hint, 0); 369 virq = irq_alloc_desc_from(hint, 0);
384 if (!virq) 370 if (virq <= 0)
385 virq = irq_alloc_desc_from(1, 0); 371 virq = irq_alloc_desc_from(1, 0);
386 if (!virq) { 372 if (virq <= 0) {
387 pr_debug("irq: -> virq allocation failed\n"); 373 pr_debug("irq: -> virq allocation failed\n");
388 return 0; 374 return 0;
389 } 375 }
@@ -515,7 +501,7 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
515 irq_hw_number_t hwirq) 501 irq_hw_number_t hwirq)
516{ 502{
517 unsigned int i; 503 unsigned int i;
518 unsigned int hint = hwirq % irq_virq_count; 504 unsigned int hint = hwirq % nr_irqs;
519 505
520 /* Look for default domain if nececssary */ 506 /* Look for default domain if nececssary */
521 if (domain == NULL) 507 if (domain == NULL)
@@ -536,7 +522,7 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
536 if (data && (data->domain == domain) && (data->hwirq == hwirq)) 522 if (data && (data->domain == domain) && (data->hwirq == hwirq))
537 return i; 523 return i;
538 i++; 524 i++;
539 if (i >= irq_virq_count) 525 if (i >= nr_irqs)
540 i = 1; 526 i = 1;
541 } while(i != hint); 527 } while(i != hint);
542 return 0; 528 return 0;
@@ -642,8 +628,9 @@ static int virq_debug_show(struct seq_file *m, void *private)
642 void *data; 628 void *data;
643 int i; 629 int i;
644 630
645 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", 631 seq_printf(m, "%-5s %-7s %-15s %-*s %s\n", "irq", "hwirq",
646 "chip name", "chip data", "domain name"); 632 "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
633 "domain name");
647 634
648 for (i = 1; i < nr_irqs; i++) { 635 for (i = 1; i < nr_irqs; i++) {
649 desc = irq_to_desc(i); 636 desc = irq_to_desc(i);
@@ -666,7 +653,7 @@ static int virq_debug_show(struct seq_file *m, void *private)
666 seq_printf(m, "%-15s ", p); 653 seq_printf(m, "%-15s ", p);
667 654
668 data = irq_desc_get_chip_data(desc); 655 data = irq_desc_get_chip_data(desc);
669 seq_printf(m, "0x%16p ", data); 656 seq_printf(m, data ? "0x%p " : " %p ", data);
670 657
671 if (desc->irq_data.domain && desc->irq_data.domain->of_node) 658 if (desc->irq_data.domain && desc->irq_data.domain->of_node)
672 p = desc->irq_data.domain->of_node->full_name; 659 p = desc->irq_data.domain->of_node->full_name;
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 0c56d44b9fd5..1588e3b2871b 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -11,6 +11,7 @@
11#include <linux/irq_work.h> 11#include <linux/irq_work.h>
12#include <linux/percpu.h> 12#include <linux/percpu.h>
13#include <linux/hardirq.h> 13#include <linux/hardirq.h>
14#include <linux/irqflags.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15 16
16/* 17/*
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 22000c3db0dd..8d262b467573 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -284,8 +284,12 @@ SYSCALL_DEFINE3(setitimer, int, which, struct itimerval __user *, value,
284 if (value) { 284 if (value) {
285 if(copy_from_user(&set_buffer, value, sizeof(set_buffer))) 285 if(copy_from_user(&set_buffer, value, sizeof(set_buffer)))
286 return -EFAULT; 286 return -EFAULT;
287 } else 287 } else {
288 memset((char *) &set_buffer, 0, sizeof(set_buffer)); 288 memset(&set_buffer, 0, sizeof(set_buffer));
289 printk_once(KERN_WARNING "%s calls setitimer() with new_value NULL pointer."
290 " Misfeature support will be removed\n",
291 current->comm);
292 }
289 293
290 error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL); 294 error = do_setitimer(which, &set_buffer, ovalue ? &get_buffer : NULL);
291 if (error || !ovalue) 295 if (error || !ovalue)
diff --git a/kernel/panic.c b/kernel/panic.c
index 80aed44e345a..8ed89a175d79 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -97,7 +97,7 @@ void panic(const char *fmt, ...)
97 /* 97 /*
98 * Avoid nested stack-dumping if a panic occurs during oops processing 98 * Avoid nested stack-dumping if a panic occurs during oops processing
99 */ 99 */
100 if (!oops_in_progress) 100 if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
101 dump_stack(); 101 dump_stack();
102#endif 102#endif
103 103
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 2cf9cc7aa103..a20dc8a3c949 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -1,6 +1,10 @@
1# 1#
2# Timer subsystem related configuration options 2# Timer subsystem related configuration options
3# 3#
4
5# Core internal switch. Selected by NO_HZ / HIGH_RES_TIMERS. This is
6# only related to the tick functionality. Oneshot clockevent devices
7# are supported independ of this.
4config TICK_ONESHOT 8config TICK_ONESHOT
5 bool 9 bool
6 10
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index e883f57a3cd3..bf57abdc7bd0 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -575,10 +575,12 @@ void tick_broadcast_switch_to_oneshot(void)
575 unsigned long flags; 575 unsigned long flags;
576 576
577 raw_spin_lock_irqsave(&tick_broadcast_lock, flags); 577 raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
578
579 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
580
578 if (cpumask_empty(tick_get_broadcast_mask())) 581 if (cpumask_empty(tick_get_broadcast_mask()))
579 goto end; 582 goto end;
580 583
581 tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
582 bc = tick_broadcast_device.evtdev; 584 bc = tick_broadcast_device.evtdev;
583 if (bc) 585 if (bc)
584 tick_broadcast_setup_oneshot(bc); 586 tick_broadcast_setup_oneshot(bc);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 3526038f2836..6a3a5b9ff561 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -534,9 +534,9 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
534 hrtimer_get_expires(&ts->sched_timer), 0)) 534 hrtimer_get_expires(&ts->sched_timer), 0))
535 break; 535 break;
536 } 536 }
537 /* Update jiffies and reread time */ 537 /* Reread time and update jiffies */
538 tick_do_update_jiffies64(now);
539 now = ktime_get(); 538 now = ktime_get();
539 tick_do_update_jiffies64(now);
540 } 540 }
541} 541}
542 542
diff --git a/lib/kobject.c b/lib/kobject.c
index 21dee7c19afd..aeefa8bc8b1c 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -192,14 +192,14 @@ static int kobject_add_internal(struct kobject *kobj)
192 192
193 /* be noisy on error issues */ 193 /* be noisy on error issues */
194 if (error == -EEXIST) 194 if (error == -EEXIST)
195 printk(KERN_ERR "%s failed for %s with " 195 WARN(1, "%s failed for %s with "
196 "-EEXIST, don't try to register things with " 196 "-EEXIST, don't try to register things with "
197 "the same name in the same directory.\n", 197 "the same name in the same directory.\n",
198 __func__, kobject_name(kobj)); 198 __func__, kobject_name(kobj));
199 else 199 else
200 printk(KERN_ERR "%s failed for %s (%d)\n", 200 WARN(1, "%s failed for %s (error: %d parent: %s)\n",
201 __func__, kobject_name(kobj), error); 201 __func__, kobject_name(kobj), error,
202 dump_stack(); 202 parent ? kobject_name(parent) : "'none'");
203 } else 203 } else
204 kobj->state_in_sysfs = 1; 204 kobj->state_in_sysfs = 1;
205 205
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b8ce6f450956..cd65cb19c941 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2791,6 +2791,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2791 * so no worry about deadlock. 2791 * so no worry about deadlock.
2792 */ 2792 */
2793 page = pte_page(entry); 2793 page = pte_page(entry);
2794 get_page(page);
2794 if (page != pagecache_page) 2795 if (page != pagecache_page)
2795 lock_page(page); 2796 lock_page(page);
2796 2797
@@ -2822,6 +2823,7 @@ out_page_table_lock:
2822 } 2823 }
2823 if (page != pagecache_page) 2824 if (page != pagecache_page)
2824 unlock_page(page); 2825 unlock_page(page);
2826 put_page(page);
2825 2827
2826out_mutex: 2828out_mutex:
2827 mutex_unlock(&hugetlb_instantiation_mutex); 2829 mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 7d698df4a067..a7165a60d0a7 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2165,7 +2165,7 @@ static int __cpuinit memcg_cpu_hotplug_callback(struct notifier_block *nb,
2165 if (action == CPU_ONLINE) 2165 if (action == CPU_ONLINE)
2166 return NOTIFY_OK; 2166 return NOTIFY_OK;
2167 2167
2168 if ((action != CPU_DEAD) || action != CPU_DEAD_FROZEN) 2168 if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
2169 return NOTIFY_OK; 2169 return NOTIFY_OK;
2170 2170
2171 for_each_mem_cgroup(iter) 2171 for_each_mem_cgroup(iter)
@@ -3763,7 +3763,7 @@ move_account:
3763 goto try_to_free; 3763 goto try_to_free;
3764 cond_resched(); 3764 cond_resched();
3765 /* "ret" should also be checked to ensure all lists are empty. */ 3765 /* "ret" should also be checked to ensure all lists are empty. */
3766 } while (memcg->res.usage > 0 || ret); 3766 } while (res_counter_read_u64(&memcg->res, RES_USAGE) > 0 || ret);
3767out: 3767out:
3768 css_put(&memcg->css); 3768 css_put(&memcg->css);
3769 return ret; 3769 return ret;
@@ -3778,7 +3778,7 @@ try_to_free:
3778 lru_add_drain_all(); 3778 lru_add_drain_all();
3779 /* try to free all pages in this cgroup */ 3779 /* try to free all pages in this cgroup */
3780 shrink = 1; 3780 shrink = 1;
3781 while (nr_retries && memcg->res.usage > 0) { 3781 while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) {
3782 int progress; 3782 int progress;
3783 3783
3784 if (signal_pending(current)) { 3784 if (signal_pending(current)) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33c332bbab73..1a518684a32f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2107,12 +2107,7 @@ restart:
2107 * with multiple processes reclaiming pages, the total 2107 * with multiple processes reclaiming pages, the total
2108 * freeing target can get unreasonably large. 2108 * freeing target can get unreasonably large.
2109 */ 2109 */
2110 if (nr_reclaimed >= nr_to_reclaim) 2110 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY)
2111 nr_to_reclaim = 0;
2112 else
2113 nr_to_reclaim -= nr_reclaimed;
2114
2115 if (!nr_to_reclaim && priority < DEF_PRIORITY)
2116 break; 2111 break;
2117 } 2112 }
2118 blk_finish_plug(&plug); 2113 blk_finish_plug(&plug);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index e33af63a884a..92a857e3786d 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -665,6 +665,11 @@ int hci_dev_open(__u16 dev)
665 665
666 hci_req_lock(hdev); 666 hci_req_lock(hdev);
667 667
668 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669 ret = -ENODEV;
670 goto done;
671 }
672
668 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) { 673 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
669 ret = -ERFKILL; 674 ret = -ERFKILL;
670 goto done; 675 goto done;
@@ -1849,6 +1854,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
1849 1854
1850 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); 1855 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1851 1856
1857 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1858
1852 write_lock(&hci_dev_list_lock); 1859 write_lock(&hci_dev_list_lock);
1853 list_del(&hdev->list); 1860 list_del(&hdev->list);
1854 write_unlock(&hci_dev_list_lock); 1861 write_unlock(&hci_dev_list_lock);
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index b8e17e4dac8b..94552b33d528 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1308,6 +1308,7 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1308 if (chan->retry_count >= chan->remote_max_tx) { 1308 if (chan->retry_count >= chan->remote_max_tx) {
1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED); 1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 l2cap_chan_unlock(chan); 1310 l2cap_chan_unlock(chan);
1311 l2cap_chan_put(chan);
1311 return; 1312 return;
1312 } 1313 }
1313 1314
@@ -1316,6 +1317,7 @@ static void l2cap_monitor_timeout(struct work_struct *work)
1316 1317
1317 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1318 l2cap_chan_unlock(chan); 1319 l2cap_chan_unlock(chan);
1320 l2cap_chan_put(chan);
1319} 1321}
1320 1322
1321static void l2cap_retrans_timeout(struct work_struct *work) 1323static void l2cap_retrans_timeout(struct work_struct *work)
@@ -1335,6 +1337,7 @@ static void l2cap_retrans_timeout(struct work_struct *work)
1335 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL); 1337 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1336 1338
1337 l2cap_chan_unlock(chan); 1339 l2cap_chan_unlock(chan);
1340 l2cap_chan_put(chan);
1338} 1341}
1339 1342
1340static void l2cap_drop_acked_frames(struct l2cap_chan *chan) 1343static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index c4fe583b0af6..29122ed28ea9 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -82,7 +82,7 @@ static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
82 } 82 }
83 83
84 if (la.l2_cid) 84 if (la.l2_cid)
85 err = l2cap_add_scid(chan, la.l2_cid); 85 err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid));
86 else 86 else
87 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm); 87 err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm);
88 88
@@ -123,7 +123,8 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int al
123 if (la.l2_cid && la.l2_psm) 123 if (la.l2_cid && la.l2_psm)
124 return -EINVAL; 124 return -EINVAL;
125 125
126 err = l2cap_chan_connect(chan, la.l2_psm, la.l2_cid, &la.l2_bdaddr); 126 err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
127 &la.l2_bdaddr);
127 if (err) 128 if (err)
128 return err; 129 return err;
129 130
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7fcff8887131..4ef275c69675 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -2523,13 +2523,18 @@ static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
2523 2523
2524 if (cp->val) { 2524 if (cp->val) {
2525 type = PAGE_SCAN_TYPE_INTERLACED; 2525 type = PAGE_SCAN_TYPE_INTERLACED;
2526 acp.interval = 0x0024; /* 22.5 msec page scan interval */ 2526
2527 /* 22.5 msec page scan interval */
2528 acp.interval = __constant_cpu_to_le16(0x0024);
2527 } else { 2529 } else {
2528 type = PAGE_SCAN_TYPE_STANDARD; /* default */ 2530 type = PAGE_SCAN_TYPE_STANDARD; /* default */
2529 acp.interval = 0x0800; /* default 1.28 sec page scan */ 2531
2532 /* default 1.28 sec page scan */
2533 acp.interval = __constant_cpu_to_le16(0x0800);
2530 } 2534 }
2531 2535
2532 acp.window = 0x0012; /* default 11.25 msec page scan window */ 2536 /* default 11.25 msec page scan window */
2537 acp.window = __constant_cpu_to_le16(0x0012);
2533 2538
2534 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp), 2539 err = hci_send_cmd(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, sizeof(acp),
2535 &acp); 2540 &acp);
@@ -2936,7 +2941,7 @@ int mgmt_device_connected(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
2936 name, name_len); 2941 name, name_len);
2937 2942
2938 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0) 2943 if (dev_class && memcmp(dev_class, "\0\0\0", 3) != 0)
2939 eir_len = eir_append_data(&ev->eir[eir_len], eir_len, 2944 eir_len = eir_append_data(ev->eir, eir_len,
2940 EIR_CLASS_OF_DEV, dev_class, 3); 2945 EIR_CLASS_OF_DEV, dev_class, 3);
2941 2946
2942 put_unaligned_le16(eir_len, &ev->eir_len); 2947 put_unaligned_le16(eir_len, &ev->eir_len);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 702a1ae9220b..27ca25ed7021 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -241,7 +241,6 @@ static void br_multicast_group_expired(unsigned long data)
241 hlist_del_rcu(&mp->hlist[mdb->ver]); 241 hlist_del_rcu(&mp->hlist[mdb->ver]);
242 mdb->size--; 242 mdb->size--;
243 243
244 del_timer(&mp->query_timer);
245 call_rcu_bh(&mp->rcu, br_multicast_free_group); 244 call_rcu_bh(&mp->rcu, br_multicast_free_group);
246 245
247out: 246out:
@@ -271,7 +270,6 @@ static void br_multicast_del_pg(struct net_bridge *br,
271 rcu_assign_pointer(*pp, p->next); 270 rcu_assign_pointer(*pp, p->next);
272 hlist_del_init(&p->mglist); 271 hlist_del_init(&p->mglist);
273 del_timer(&p->timer); 272 del_timer(&p->timer);
274 del_timer(&p->query_timer);
275 call_rcu_bh(&p->rcu, br_multicast_free_pg); 273 call_rcu_bh(&p->rcu, br_multicast_free_pg);
276 274
277 if (!mp->ports && !mp->mglist && 275 if (!mp->ports && !mp->mglist &&
@@ -507,74 +505,6 @@ static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br,
507 return NULL; 505 return NULL;
508} 506}
509 507
510static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp)
511{
512 struct net_bridge *br = mp->br;
513 struct sk_buff *skb;
514
515 skb = br_multicast_alloc_query(br, &mp->addr);
516 if (!skb)
517 goto timer;
518
519 netif_rx(skb);
520
521timer:
522 if (++mp->queries_sent < br->multicast_last_member_count)
523 mod_timer(&mp->query_timer,
524 jiffies + br->multicast_last_member_interval);
525}
526
527static void br_multicast_group_query_expired(unsigned long data)
528{
529 struct net_bridge_mdb_entry *mp = (void *)data;
530 struct net_bridge *br = mp->br;
531
532 spin_lock(&br->multicast_lock);
533 if (!netif_running(br->dev) || !mp->mglist ||
534 mp->queries_sent >= br->multicast_last_member_count)
535 goto out;
536
537 br_multicast_send_group_query(mp);
538
539out:
540 spin_unlock(&br->multicast_lock);
541}
542
543static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg)
544{
545 struct net_bridge_port *port = pg->port;
546 struct net_bridge *br = port->br;
547 struct sk_buff *skb;
548
549 skb = br_multicast_alloc_query(br, &pg->addr);
550 if (!skb)
551 goto timer;
552
553 br_deliver(port, skb);
554
555timer:
556 if (++pg->queries_sent < br->multicast_last_member_count)
557 mod_timer(&pg->query_timer,
558 jiffies + br->multicast_last_member_interval);
559}
560
561static void br_multicast_port_group_query_expired(unsigned long data)
562{
563 struct net_bridge_port_group *pg = (void *)data;
564 struct net_bridge_port *port = pg->port;
565 struct net_bridge *br = port->br;
566
567 spin_lock(&br->multicast_lock);
568 if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) ||
569 pg->queries_sent >= br->multicast_last_member_count)
570 goto out;
571
572 br_multicast_send_port_group_query(pg);
573
574out:
575 spin_unlock(&br->multicast_lock);
576}
577
578static struct net_bridge_mdb_entry *br_multicast_get_group( 508static struct net_bridge_mdb_entry *br_multicast_get_group(
579 struct net_bridge *br, struct net_bridge_port *port, 509 struct net_bridge *br, struct net_bridge_port *port,
580 struct br_ip *group, int hash) 510 struct br_ip *group, int hash)
@@ -690,8 +620,6 @@ rehash:
690 mp->addr = *group; 620 mp->addr = *group;
691 setup_timer(&mp->timer, br_multicast_group_expired, 621 setup_timer(&mp->timer, br_multicast_group_expired,
692 (unsigned long)mp); 622 (unsigned long)mp);
693 setup_timer(&mp->query_timer, br_multicast_group_query_expired,
694 (unsigned long)mp);
695 623
696 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); 624 hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]);
697 mdb->size++; 625 mdb->size++;
@@ -746,8 +674,6 @@ static int br_multicast_add_group(struct net_bridge *br,
746 hlist_add_head(&p->mglist, &port->mglist); 674 hlist_add_head(&p->mglist, &port->mglist);
747 setup_timer(&p->timer, br_multicast_port_group_expired, 675 setup_timer(&p->timer, br_multicast_port_group_expired,
748 (unsigned long)p); 676 (unsigned long)p);
749 setup_timer(&p->query_timer, br_multicast_port_group_query_expired,
750 (unsigned long)p);
751 677
752 rcu_assign_pointer(*pp, p); 678 rcu_assign_pointer(*pp, p);
753 679
@@ -1291,9 +1217,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1291 time_after(mp->timer.expires, time) : 1217 time_after(mp->timer.expires, time) :
1292 try_to_del_timer_sync(&mp->timer) >= 0)) { 1218 try_to_del_timer_sync(&mp->timer) >= 0)) {
1293 mod_timer(&mp->timer, time); 1219 mod_timer(&mp->timer, time);
1294
1295 mp->queries_sent = 0;
1296 mod_timer(&mp->query_timer, now);
1297 } 1220 }
1298 1221
1299 goto out; 1222 goto out;
@@ -1310,9 +1233,6 @@ static void br_multicast_leave_group(struct net_bridge *br,
1310 time_after(p->timer.expires, time) : 1233 time_after(p->timer.expires, time) :
1311 try_to_del_timer_sync(&p->timer) >= 0)) { 1234 try_to_del_timer_sync(&p->timer) >= 0)) {
1312 mod_timer(&p->timer, time); 1235 mod_timer(&p->timer, time);
1313
1314 p->queries_sent = 0;
1315 mod_timer(&p->query_timer, now);
1316 } 1236 }
1317 1237
1318 break; 1238 break;
@@ -1681,7 +1601,6 @@ void br_multicast_stop(struct net_bridge *br)
1681 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], 1601 hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i],
1682 hlist[ver]) { 1602 hlist[ver]) {
1683 del_timer(&mp->timer); 1603 del_timer(&mp->timer);
1684 del_timer(&mp->query_timer);
1685 call_rcu_bh(&mp->rcu, br_multicast_free_group); 1604 call_rcu_bh(&mp->rcu, br_multicast_free_group);
1686 } 1605 }
1687 } 1606 }
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 0b67a63ad7a8..e1d882257877 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -82,9 +82,7 @@ struct net_bridge_port_group {
82 struct hlist_node mglist; 82 struct hlist_node mglist;
83 struct rcu_head rcu; 83 struct rcu_head rcu;
84 struct timer_list timer; 84 struct timer_list timer;
85 struct timer_list query_timer;
86 struct br_ip addr; 85 struct br_ip addr;
87 u32 queries_sent;
88}; 86};
89 87
90struct net_bridge_mdb_entry 88struct net_bridge_mdb_entry
@@ -94,10 +92,8 @@ struct net_bridge_mdb_entry
94 struct net_bridge_port_group __rcu *ports; 92 struct net_bridge_port_group __rcu *ports;
95 struct rcu_head rcu; 93 struct rcu_head rcu;
96 struct timer_list timer; 94 struct timer_list timer;
97 struct timer_list query_timer;
98 struct br_ip addr; 95 struct br_ip addr;
99 bool mglist; 96 bool mglist;
100 u32 queries_sent;
101}; 97};
102 98
103struct net_bridge_mdb_htable 99struct net_bridge_mdb_htable
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index baf8d281152c..e59840010d45 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -952,9 +952,11 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
952 goto adjust_others; 952 goto adjust_others;
953 } 953 }
954 954
955 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 955 data = kmalloc(size + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
956 gfp_mask);
956 if (!data) 957 if (!data)
957 goto nodata; 958 goto nodata;
959 size = SKB_WITH_OVERHEAD(ksize(data));
958 960
959 /* Copy only real data... and, alas, header. This should be 961 /* Copy only real data... and, alas, header. This should be
960 * optimized for the cases when header is void. 962 * optimized for the cases when header is void.
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index de9da21113a1..cf73cc70ed2d 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -74,16 +74,24 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
74 74
75 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph); 75 iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
76 if (iph == NULL) 76 if (iph == NULL)
77 return -NF_DROP; 77 return -NF_ACCEPT;
78 78
79 /* Conntrack defragments packets, we might still see fragments 79 /* Conntrack defragments packets, we might still see fragments
80 * inside ICMP packets though. */ 80 * inside ICMP packets though. */
81 if (iph->frag_off & htons(IP_OFFSET)) 81 if (iph->frag_off & htons(IP_OFFSET))
82 return -NF_DROP; 82 return -NF_ACCEPT;
83 83
84 *dataoff = nhoff + (iph->ihl << 2); 84 *dataoff = nhoff + (iph->ihl << 2);
85 *protonum = iph->protocol; 85 *protonum = iph->protocol;
86 86
87 /* Check bogus IP headers */
88 if (*dataoff > skb->len) {
89 pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
90 "nhoff %u, ihl %u, skblen %u\n",
91 nhoff, iph->ihl << 2, skb->len);
92 return -NF_ACCEPT;
93 }
94
87 return NF_ACCEPT; 95 return NF_ACCEPT;
88} 96}
89 97
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 5d54ed30e821..8bb6adeb62c0 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -701,11 +701,12 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
701 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); 701 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
702 if (skb) { 702 if (skb) {
703 if (sk_wmem_schedule(sk, skb->truesize)) { 703 if (sk_wmem_schedule(sk, skb->truesize)) {
704 skb_reserve(skb, sk->sk_prot->max_header);
704 /* 705 /*
705 * Make sure that we have exactly size bytes 706 * Make sure that we have exactly size bytes
706 * available to the caller, no more, no less. 707 * available to the caller, no more, no less.
707 */ 708 */
708 skb_reserve(skb, skb_tailroom(skb) - size); 709 skb->avail_size = size;
709 return skb; 710 return skb;
710 } 711 }
711 __kfree_skb(skb); 712 __kfree_skb(skb);
@@ -995,10 +996,9 @@ new_segment:
995 copy = seglen; 996 copy = seglen;
996 997
997 /* Where to copy to? */ 998 /* Where to copy to? */
998 if (skb_tailroom(skb) > 0) { 999 if (skb_availroom(skb) > 0) {
999 /* We have some space in skb head. Superb! */ 1000 /* We have some space in skb head. Superb! */
1000 if (copy > skb_tailroom(skb)) 1001 copy = min_t(int, copy, skb_availroom(skb));
1001 copy = skb_tailroom(skb);
1002 err = skb_add_data_nocache(sk, skb, from, copy); 1002 err = skb_add_data_nocache(sk, skb, from, copy);
1003 if (err) 1003 if (err)
1004 goto do_fault; 1004 goto do_fault;
@@ -1452,7 +1452,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1452 if ((available < target) && 1452 if ((available < target) &&
1453 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && 1453 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1454 !sysctl_tcp_low_latency && 1454 !sysctl_tcp_low_latency &&
1455 dma_find_channel(DMA_MEMCPY)) { 1455 net_dma_find_channel()) {
1456 preempt_enable_no_resched(); 1456 preempt_enable_no_resched();
1457 tp->ucopy.pinned_list = 1457 tp->ucopy.pinned_list =
1458 dma_pin_iovec_pages(msg->msg_iov, len); 1458 dma_pin_iovec_pages(msg->msg_iov, len);
@@ -1667,7 +1667,7 @@ do_prequeue:
1667 if (!(flags & MSG_TRUNC)) { 1667 if (!(flags & MSG_TRUNC)) {
1668#ifdef CONFIG_NET_DMA 1668#ifdef CONFIG_NET_DMA
1669 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1669 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1670 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1670 tp->ucopy.dma_chan = net_dma_find_channel();
1671 1671
1672 if (tp->ucopy.dma_chan) { 1672 if (tp->ucopy.dma_chan) {
1673 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( 1673 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
@@ -3302,8 +3302,7 @@ void __init tcp_init(void)
3302 3302
3303 tcp_init_mem(&init_net); 3303 tcp_init_mem(&init_net);
3304 /* Set per-socket limits to no more than 1/128 the pressure threshold */ 3304 /* Set per-socket limits to no more than 1/128 the pressure threshold */
3305 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 10); 3305 limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3306 limit = max(limit, 128UL);
3307 max_share = min(4UL*1024*1024, limit); 3306 max_share = min(4UL*1024*1024, limit);
3308 3307
3309 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; 3308 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index e886e2f7fa8d..9944c1d9a218 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -474,8 +474,11 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
474 if (!win_dep) { 474 if (!win_dep) {
475 m -= (new_sample >> 3); 475 m -= (new_sample >> 3);
476 new_sample += m; 476 new_sample += m;
477 } else if (m < new_sample) 477 } else {
478 new_sample = m << 3; 478 m <<= 3;
479 if (m < new_sample)
480 new_sample = m;
481 }
479 } else { 482 } else {
480 /* No previous measure. */ 483 /* No previous measure. */
481 new_sample = m << 3; 484 new_sample = m << 3;
@@ -5225,7 +5228,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb,
5225 return 0; 5228 return 0;
5226 5229
5227 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 5230 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
5228 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 5231 tp->ucopy.dma_chan = net_dma_find_channel();
5229 5232
5230 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) { 5233 if (tp->ucopy.dma_chan && skb_csum_unnecessary(skb)) {
5231 5234
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3a25cf743f8b..0cb86ceb652f 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1730,7 +1730,7 @@ process:
1730#ifdef CONFIG_NET_DMA 1730#ifdef CONFIG_NET_DMA
1731 struct tcp_sock *tp = tcp_sk(sk); 1731 struct tcp_sock *tp = tcp_sk(sk);
1732 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1732 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1733 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1733 tp->ucopy.dma_chan = net_dma_find_channel();
1734 if (tp->ucopy.dma_chan) 1734 if (tp->ucopy.dma_chan)
1735 ret = tcp_v4_do_rcv(sk, skb); 1735 ret = tcp_v4_do_rcv(sk, skb);
1736 else 1736 else
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 364784a91939..376b2cfbb685 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2060,7 +2060,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
2060 /* Punt if not enough space exists in the first SKB for 2060 /* Punt if not enough space exists in the first SKB for
2061 * the data in the second 2061 * the data in the second
2062 */ 2062 */
2063 if (skb->len > skb_tailroom(to)) 2063 if (skb->len > skb_availroom(to))
2064 break; 2064 break;
2065 2065
2066 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) 2066 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp)))
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 94874b0bdcdc..9d4e15559319 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -78,19 +78,6 @@ EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
78 78
79 Hence the start of any table is given by get_table() below. */ 79 Hence the start of any table is given by get_table() below. */
80 80
81/* Check for an extension */
82int
83ip6t_ext_hdr(u8 nexthdr)
84{
85 return (nexthdr == IPPROTO_HOPOPTS) ||
86 (nexthdr == IPPROTO_ROUTING) ||
87 (nexthdr == IPPROTO_FRAGMENT) ||
88 (nexthdr == IPPROTO_ESP) ||
89 (nexthdr == IPPROTO_AH) ||
90 (nexthdr == IPPROTO_NONE) ||
91 (nexthdr == IPPROTO_DSTOPTS);
92}
93
94/* Returns whether matches rule or not. */ 81/* Returns whether matches rule or not. */
95/* Performance critical - called for every packet */ 82/* Performance critical - called for every packet */
96static inline bool 83static inline bool
@@ -2366,7 +2353,6 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2366EXPORT_SYMBOL(ip6t_register_table); 2353EXPORT_SYMBOL(ip6t_register_table);
2367EXPORT_SYMBOL(ip6t_unregister_table); 2354EXPORT_SYMBOL(ip6t_unregister_table);
2368EXPORT_SYMBOL(ip6t_do_table); 2355EXPORT_SYMBOL(ip6t_do_table);
2369EXPORT_SYMBOL(ip6t_ext_hdr);
2370EXPORT_SYMBOL(ipv6_find_hdr); 2356EXPORT_SYMBOL(ipv6_find_hdr);
2371 2357
2372module_init(ip6_tables_init); 2358module_init(ip6_tables_init);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 12c6ece67f39..86cfe6005f40 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1645,7 +1645,7 @@ process:
1645#ifdef CONFIG_NET_DMA 1645#ifdef CONFIG_NET_DMA
1646 struct tcp_sock *tp = tcp_sk(sk); 1646 struct tcp_sock *tp = tcp_sk(sk);
1647 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) 1647 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1648 tp->ucopy.dma_chan = dma_find_channel(DMA_MEMCPY); 1648 tp->ucopy.dma_chan = net_dma_find_channel();
1649 if (tp->ucopy.dma_chan) 1649 if (tp->ucopy.dma_chan)
1650 ret = tcp_v6_do_rcv(sk, skb); 1650 ret = tcp_v6_do_rcv(sk, skb);
1651 else 1651 else
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 576fb25456dd..f76da5b3f5c5 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -3387,8 +3387,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
3387 */ 3387 */
3388 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n", 3388 printk(KERN_DEBUG "%s: waiting for beacon from %pM\n",
3389 sdata->name, ifmgd->bssid); 3389 sdata->name, ifmgd->bssid);
3390 assoc_data->timeout = jiffies + 3390 assoc_data->timeout = TU_TO_EXP_TIME(req->bss->beacon_interval);
3391 TU_TO_EXP_TIME(req->bss->beacon_interval);
3392 } else { 3391 } else {
3393 assoc_data->have_beacon = true; 3392 assoc_data->have_beacon = true;
3394 assoc_data->sent_assoc = false; 3393 assoc_data->sent_assoc = false;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3cc4487ac349..729f157a0efa 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1592,7 +1592,7 @@ static int nf_conntrack_init_net(struct net *net)
1592 return 0; 1592 return 0;
1593 1593
1594err_timeout: 1594err_timeout:
1595 nf_conntrack_timeout_fini(net); 1595 nf_conntrack_ecache_fini(net);
1596err_ecache: 1596err_ecache:
1597 nf_conntrack_tstamp_fini(net); 1597 nf_conntrack_tstamp_fini(net);
1598err_tstamp: 1598err_tstamp:
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 361eade62a09..0d07a1dcf605 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -584,8 +584,8 @@ static bool tcp_in_window(const struct nf_conn *ct,
584 * Let's try to use the data from the packet. 584 * Let's try to use the data from the packet.
585 */ 585 */
586 sender->td_end = end; 586 sender->td_end = end;
587 win <<= sender->td_scale; 587 swin = win << sender->td_scale;
588 sender->td_maxwin = (win == 0 ? 1 : win); 588 sender->td_maxwin = (swin == 0 ? 1 : swin);
589 sender->td_maxend = end + sender->td_maxwin; 589 sender->td_maxend = end + sender->td_maxwin;
590 /* 590 /*
591 * We haven't seen traffic in the other direction yet 591 * We haven't seen traffic in the other direction yet
diff --git a/net/nfc/llcp/commands.c b/net/nfc/llcp/commands.c
index 7b76eb7192f3..ef10ffcb4b6f 100644
--- a/net/nfc/llcp/commands.c
+++ b/net/nfc/llcp/commands.c
@@ -474,7 +474,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
474 474
475 while (remaining_len > 0) { 475 while (remaining_len > 0) {
476 476
477 frag_len = min_t(u16, local->remote_miu, remaining_len); 477 frag_len = min_t(size_t, local->remote_miu, remaining_len);
478 478
479 pr_debug("Fragment %zd bytes remaining %zd", 479 pr_debug("Fragment %zd bytes remaining %zd",
480 frag_len, remaining_len); 480 frag_len, remaining_len);
@@ -497,7 +497,7 @@ int nfc_llcp_send_i_frame(struct nfc_llcp_sock *sock,
497 release_sock(sk); 497 release_sock(sk);
498 498
499 remaining_len -= frag_len; 499 remaining_len -= frag_len;
500 msg_ptr += len; 500 msg_ptr += frag_len;
501 } 501 }
502 502
503 kfree(msg_data); 503 kfree(msg_data);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index e49da2797022..f432c57af05d 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1294,6 +1294,11 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
1294 goto bad_res; 1294 goto bad_res;
1295 } 1295 }
1296 1296
1297 if (!netif_running(netdev)) {
1298 result = -ENETDOWN;
1299 goto bad_res;
1300 }
1301
1297 nla_for_each_nested(nl_txq_params, 1302 nla_for_each_nested(nl_txq_params,
1298 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], 1303 info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
1299 rem_txq_params) { 1304 rem_txq_params) {
@@ -6384,7 +6389,7 @@ static struct genl_ops nl80211_ops[] = {
6384 .doit = nl80211_get_key, 6389 .doit = nl80211_get_key,
6385 .policy = nl80211_policy, 6390 .policy = nl80211_policy,
6386 .flags = GENL_ADMIN_PERM, 6391 .flags = GENL_ADMIN_PERM,
6387 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6392 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6388 NL80211_FLAG_NEED_RTNL, 6393 NL80211_FLAG_NEED_RTNL,
6389 }, 6394 },
6390 { 6395 {
@@ -6416,7 +6421,7 @@ static struct genl_ops nl80211_ops[] = {
6416 .policy = nl80211_policy, 6421 .policy = nl80211_policy,
6417 .flags = GENL_ADMIN_PERM, 6422 .flags = GENL_ADMIN_PERM,
6418 .doit = nl80211_set_beacon, 6423 .doit = nl80211_set_beacon,
6419 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6424 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6420 NL80211_FLAG_NEED_RTNL, 6425 NL80211_FLAG_NEED_RTNL,
6421 }, 6426 },
6422 { 6427 {
@@ -6424,7 +6429,7 @@ static struct genl_ops nl80211_ops[] = {
6424 .policy = nl80211_policy, 6429 .policy = nl80211_policy,
6425 .flags = GENL_ADMIN_PERM, 6430 .flags = GENL_ADMIN_PERM,
6426 .doit = nl80211_start_ap, 6431 .doit = nl80211_start_ap,
6427 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6432 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6428 NL80211_FLAG_NEED_RTNL, 6433 NL80211_FLAG_NEED_RTNL,
6429 }, 6434 },
6430 { 6435 {
@@ -6432,7 +6437,7 @@ static struct genl_ops nl80211_ops[] = {
6432 .policy = nl80211_policy, 6437 .policy = nl80211_policy,
6433 .flags = GENL_ADMIN_PERM, 6438 .flags = GENL_ADMIN_PERM,
6434 .doit = nl80211_stop_ap, 6439 .doit = nl80211_stop_ap,
6435 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6440 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6436 NL80211_FLAG_NEED_RTNL, 6441 NL80211_FLAG_NEED_RTNL,
6437 }, 6442 },
6438 { 6443 {
@@ -6448,7 +6453,7 @@ static struct genl_ops nl80211_ops[] = {
6448 .doit = nl80211_set_station, 6453 .doit = nl80211_set_station,
6449 .policy = nl80211_policy, 6454 .policy = nl80211_policy,
6450 .flags = GENL_ADMIN_PERM, 6455 .flags = GENL_ADMIN_PERM,
6451 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6456 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6452 NL80211_FLAG_NEED_RTNL, 6457 NL80211_FLAG_NEED_RTNL,
6453 }, 6458 },
6454 { 6459 {
@@ -6464,7 +6469,7 @@ static struct genl_ops nl80211_ops[] = {
6464 .doit = nl80211_del_station, 6469 .doit = nl80211_del_station,
6465 .policy = nl80211_policy, 6470 .policy = nl80211_policy,
6466 .flags = GENL_ADMIN_PERM, 6471 .flags = GENL_ADMIN_PERM,
6467 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6472 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6468 NL80211_FLAG_NEED_RTNL, 6473 NL80211_FLAG_NEED_RTNL,
6469 }, 6474 },
6470 { 6475 {
@@ -6497,7 +6502,7 @@ static struct genl_ops nl80211_ops[] = {
6497 .doit = nl80211_del_mpath, 6502 .doit = nl80211_del_mpath,
6498 .policy = nl80211_policy, 6503 .policy = nl80211_policy,
6499 .flags = GENL_ADMIN_PERM, 6504 .flags = GENL_ADMIN_PERM,
6500 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6505 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6501 NL80211_FLAG_NEED_RTNL, 6506 NL80211_FLAG_NEED_RTNL,
6502 }, 6507 },
6503 { 6508 {
@@ -6505,7 +6510,7 @@ static struct genl_ops nl80211_ops[] = {
6505 .doit = nl80211_set_bss, 6510 .doit = nl80211_set_bss,
6506 .policy = nl80211_policy, 6511 .policy = nl80211_policy,
6507 .flags = GENL_ADMIN_PERM, 6512 .flags = GENL_ADMIN_PERM,
6508 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6513 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6509 NL80211_FLAG_NEED_RTNL, 6514 NL80211_FLAG_NEED_RTNL,
6510 }, 6515 },
6511 { 6516 {
@@ -6531,7 +6536,7 @@ static struct genl_ops nl80211_ops[] = {
6531 .doit = nl80211_get_mesh_config, 6536 .doit = nl80211_get_mesh_config,
6532 .policy = nl80211_policy, 6537 .policy = nl80211_policy,
6533 /* can be retrieved by unprivileged users */ 6538 /* can be retrieved by unprivileged users */
6534 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6539 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6535 NL80211_FLAG_NEED_RTNL, 6540 NL80211_FLAG_NEED_RTNL,
6536 }, 6541 },
6537 { 6542 {
@@ -6664,7 +6669,7 @@ static struct genl_ops nl80211_ops[] = {
6664 .doit = nl80211_setdel_pmksa, 6669 .doit = nl80211_setdel_pmksa,
6665 .policy = nl80211_policy, 6670 .policy = nl80211_policy,
6666 .flags = GENL_ADMIN_PERM, 6671 .flags = GENL_ADMIN_PERM,
6667 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6672 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6668 NL80211_FLAG_NEED_RTNL, 6673 NL80211_FLAG_NEED_RTNL,
6669 }, 6674 },
6670 { 6675 {
@@ -6672,7 +6677,7 @@ static struct genl_ops nl80211_ops[] = {
6672 .doit = nl80211_setdel_pmksa, 6677 .doit = nl80211_setdel_pmksa,
6673 .policy = nl80211_policy, 6678 .policy = nl80211_policy,
6674 .flags = GENL_ADMIN_PERM, 6679 .flags = GENL_ADMIN_PERM,
6675 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6680 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6676 NL80211_FLAG_NEED_RTNL, 6681 NL80211_FLAG_NEED_RTNL,
6677 }, 6682 },
6678 { 6683 {
@@ -6680,7 +6685,7 @@ static struct genl_ops nl80211_ops[] = {
6680 .doit = nl80211_flush_pmksa, 6685 .doit = nl80211_flush_pmksa,
6681 .policy = nl80211_policy, 6686 .policy = nl80211_policy,
6682 .flags = GENL_ADMIN_PERM, 6687 .flags = GENL_ADMIN_PERM,
6683 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6688 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6684 NL80211_FLAG_NEED_RTNL, 6689 NL80211_FLAG_NEED_RTNL,
6685 }, 6690 },
6686 { 6691 {
@@ -6840,7 +6845,7 @@ static struct genl_ops nl80211_ops[] = {
6840 .doit = nl80211_probe_client, 6845 .doit = nl80211_probe_client,
6841 .policy = nl80211_policy, 6846 .policy = nl80211_policy,
6842 .flags = GENL_ADMIN_PERM, 6847 .flags = GENL_ADMIN_PERM,
6843 .internal_flags = NL80211_FLAG_NEED_NETDEV | 6848 .internal_flags = NL80211_FLAG_NEED_NETDEV_UP |
6844 NL80211_FLAG_NEED_RTNL, 6849 NL80211_FLAG_NEED_RTNL,
6845 }, 6850 },
6846 { 6851 {
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 0af7f54e4f61..af648e08e61b 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -780,8 +780,10 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
780 if (cmd == SIOCSIWENCODEEXT) { 780 if (cmd == SIOCSIWENCODEEXT) {
781 struct iw_encode_ext *ee = (void *) extra; 781 struct iw_encode_ext *ee = (void *) extra;
782 782
783 if (iwp->length < sizeof(*ee) + ee->key_len) 783 if (iwp->length < sizeof(*ee) + ee->key_len) {
784 return -EFAULT; 784 err = -EFAULT;
785 goto out;
786 }
785 } 787 }
786 } 788 }
787 789
diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
index 0586085136d1..52577f052bc1 100644
--- a/scripts/kconfig/confdata.c
+++ b/scripts/kconfig/confdata.c
@@ -540,35 +540,6 @@ static struct conf_printer header_printer_cb =
540}; 540};
541 541
542/* 542/*
543 * Generate the __enabled_CONFIG_* and __enabled_CONFIG_*_MODULE macros for
544 * use by the IS_{ENABLED,BUILTIN,MODULE} macros. The _MODULE variant is
545 * generated even for booleans so that the IS_ENABLED() macro works.
546 */
547static void
548header_print__enabled_symbol(FILE *fp, struct symbol *sym, const char *value, void *arg)
549{
550
551 switch (sym->type) {
552 case S_BOOLEAN:
553 case S_TRISTATE: {
554 fprintf(fp, "#define __enabled_" CONFIG_ "%s %d\n",
555 sym->name, (*value == 'y'));
556 fprintf(fp, "#define __enabled_" CONFIG_ "%s_MODULE %d\n",
557 sym->name, (*value == 'm'));
558 break;
559 }
560 default:
561 break;
562 }
563}
564
565static struct conf_printer header__enabled_printer_cb =
566{
567 .print_symbol = header_print__enabled_symbol,
568 .print_comment = header_print_comment,
569};
570
571/*
572 * Tristate printer 543 * Tristate printer
573 * 544 *
574 * This printer is used when generating the `include/config/tristate.conf' file. 545 * This printer is used when generating the `include/config/tristate.conf' file.
@@ -949,16 +920,11 @@ int conf_write_autoconf(void)
949 conf_write_heading(out_h, &header_printer_cb, NULL); 920 conf_write_heading(out_h, &header_printer_cb, NULL);
950 921
951 for_all_symbols(i, sym) { 922 for_all_symbols(i, sym) {
952 if (!sym->name)
953 continue;
954
955 sym_calc_value(sym); 923 sym_calc_value(sym);
956 924 if (!(sym->flags & SYMBOL_WRITE) || !sym->name)
957 conf_write_symbol(out_h, sym, &header__enabled_printer_cb, NULL);
958
959 if (!(sym->flags & SYMBOL_WRITE))
960 continue; 925 continue;
961 926
927 /* write symbol to auto.conf, tristate and header files */
962 conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1); 928 conf_write_symbol(out, sym, &kconfig_printer_cb, (void *)1);
963 929
964 conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1); 930 conf_write_symbol(tristate, sym, &tristate_printer_cb, (void *)1);
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 3f01fd908730..c4e7d1510f9d 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -132,8 +132,10 @@ static struct module *new_module(char *modname)
132 /* strip trailing .o */ 132 /* strip trailing .o */
133 s = strrchr(p, '.'); 133 s = strrchr(p, '.');
134 if (s != NULL) 134 if (s != NULL)
135 if (strcmp(s, ".o") == 0) 135 if (strcmp(s, ".o") == 0) {
136 *s = '\0'; 136 *s = '\0';
137 mod->is_dot_o = 1;
138 }
137 139
138 /* add to list */ 140 /* add to list */
139 mod->name = p; 141 mod->name = p;
@@ -587,7 +589,8 @@ static void handle_modversions(struct module *mod, struct elf_info *info,
587 unsigned int crc; 589 unsigned int crc;
588 enum export export; 590 enum export export;
589 591
590 if (!is_vmlinux(mod->name) && strncmp(symname, "__ksymtab", 9) == 0) 592 if ((!is_vmlinux(mod->name) || mod->is_dot_o) &&
593 strncmp(symname, "__ksymtab", 9) == 0)
591 export = export_from_secname(info, get_secindex(info, sym)); 594 export = export_from_secname(info, get_secindex(info, sym));
592 else 595 else
593 export = export_from_sec(info, get_secindex(info, sym)); 596 export = export_from_sec(info, get_secindex(info, sym));
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
index 2031119080dc..51207e4d5f8b 100644
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -113,6 +113,7 @@ struct module {
113 int has_cleanup; 113 int has_cleanup;
114 struct buffer dev_table_buf; 114 struct buffer dev_table_buf;
115 char srcversion[25]; 115 char srcversion[25];
116 int is_dot_o;
116}; 117};
117 118
118struct elf_info { 119struct elf_info {
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 81c03a597112..10056f2f6df3 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1939,18 +1939,19 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
1939 char *hostsp; 1939 char *hostsp;
1940 struct socket_smack *ssp = sk->sk_security; 1940 struct socket_smack *ssp = sk->sk_security;
1941 struct smk_audit_info ad; 1941 struct smk_audit_info ad;
1942 struct lsm_network_audit net;
1943 1942
1944 rcu_read_lock(); 1943 rcu_read_lock();
1945 hostsp = smack_host_label(sap); 1944 hostsp = smack_host_label(sap);
1946 if (hostsp != NULL) { 1945 if (hostsp != NULL) {
1947 sk_lbl = SMACK_UNLABELED_SOCKET;
1948#ifdef CONFIG_AUDIT 1946#ifdef CONFIG_AUDIT
1947 struct lsm_network_audit net;
1948
1949 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); 1949 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
1950 ad.a.u.net->family = sap->sin_family; 1950 ad.a.u.net->family = sap->sin_family;
1951 ad.a.u.net->dport = sap->sin_port; 1951 ad.a.u.net->dport = sap->sin_port;
1952 ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr; 1952 ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
1953#endif 1953#endif
1954 sk_lbl = SMACK_UNLABELED_SOCKET;
1954 rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad); 1955 rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad);
1955 } else { 1956 } else {
1956 sk_lbl = SMACK_CIPSO_SOCKET; 1957 sk_lbl = SMACK_CIPSO_SOCKET;
@@ -2809,11 +2810,14 @@ static int smack_unix_stream_connect(struct sock *sock,
2809 struct socket_smack *osp = other->sk_security; 2810 struct socket_smack *osp = other->sk_security;
2810 struct socket_smack *nsp = newsk->sk_security; 2811 struct socket_smack *nsp = newsk->sk_security;
2811 struct smk_audit_info ad; 2812 struct smk_audit_info ad;
2812 struct lsm_network_audit net;
2813 int rc = 0; 2813 int rc = 0;
2814 2814
2815#ifdef CONFIG_AUDIT
2816 struct lsm_network_audit net;
2817
2815 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); 2818 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
2816 smk_ad_setfield_u_net_sk(&ad, other); 2819 smk_ad_setfield_u_net_sk(&ad, other);
2820#endif
2817 2821
2818 if (!capable(CAP_MAC_OVERRIDE)) 2822 if (!capable(CAP_MAC_OVERRIDE))
2819 rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad); 2823 rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
@@ -2842,11 +2846,14 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
2842 struct socket_smack *ssp = sock->sk->sk_security; 2846 struct socket_smack *ssp = sock->sk->sk_security;
2843 struct socket_smack *osp = other->sk->sk_security; 2847 struct socket_smack *osp = other->sk->sk_security;
2844 struct smk_audit_info ad; 2848 struct smk_audit_info ad;
2845 struct lsm_network_audit net;
2846 int rc = 0; 2849 int rc = 0;
2847 2850
2851#ifdef CONFIG_AUDIT
2852 struct lsm_network_audit net;
2853
2848 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); 2854 smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
2849 smk_ad_setfield_u_net_sk(&ad, other->sk); 2855 smk_ad_setfield_u_net_sk(&ad, other->sk);
2856#endif
2850 2857
2851 if (!capable(CAP_MAC_OVERRIDE)) 2858 if (!capable(CAP_MAC_OVERRIDE))
2852 rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad); 2859 rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
@@ -2993,7 +3000,9 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
2993 char *csp; 3000 char *csp;
2994 int rc; 3001 int rc;
2995 struct smk_audit_info ad; 3002 struct smk_audit_info ad;
3003#ifdef CONFIG_AUDIT
2996 struct lsm_network_audit net; 3004 struct lsm_network_audit net;
3005#endif
2997 if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) 3006 if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
2998 return 0; 3007 return 0;
2999 3008
@@ -3156,7 +3165,9 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
3156 char *sp; 3165 char *sp;
3157 int rc; 3166 int rc;
3158 struct smk_audit_info ad; 3167 struct smk_audit_info ad;
3168#ifdef CONFIG_AUDIT
3159 struct lsm_network_audit net; 3169 struct lsm_network_audit net;
3170#endif
3160 3171
3161 /* handle mapped IPv4 packets arriving via IPv6 sockets */ 3172 /* handle mapped IPv4 packets arriving via IPv6 sockets */
3162 if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) 3173 if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
diff --git a/sound/isa/sscape.c b/sound/isa/sscape.c
index b4a6aa960f4b..8490f59709bb 100644
--- a/sound/isa/sscape.c
+++ b/sound/isa/sscape.c
@@ -1019,13 +1019,15 @@ static int __devinit create_sscape(int dev, struct snd_card *card)
1019 irq_cfg = get_irq_config(sscape->type, irq[dev]); 1019 irq_cfg = get_irq_config(sscape->type, irq[dev]);
1020 if (irq_cfg == INVALID_IRQ) { 1020 if (irq_cfg == INVALID_IRQ) {
1021 snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", irq[dev]); 1021 snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", irq[dev]);
1022 return -ENXIO; 1022 err = -ENXIO;
1023 goto _release_dma;
1023 } 1024 }
1024 1025
1025 mpu_irq_cfg = get_irq_config(sscape->type, mpu_irq[dev]); 1026 mpu_irq_cfg = get_irq_config(sscape->type, mpu_irq[dev]);
1026 if (mpu_irq_cfg == INVALID_IRQ) { 1027 if (mpu_irq_cfg == INVALID_IRQ) {
1027 snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", mpu_irq[dev]); 1028 snd_printk(KERN_ERR "sscape: Invalid IRQ %d\n", mpu_irq[dev]);
1028 return -ENXIO; 1029 err = -ENXIO;
1030 goto _release_dma;
1029 } 1031 }
1030 1032
1031 /* 1033 /*
diff --git a/sound/oss/msnd_pinnacle.c b/sound/oss/msnd_pinnacle.c
index 2c79d60a725f..536c4c0514d3 100644
--- a/sound/oss/msnd_pinnacle.c
+++ b/sound/oss/msnd_pinnacle.c
@@ -1294,6 +1294,8 @@ static int __init calibrate_adc(WORD srate)
1294 1294
1295static int upload_dsp_code(void) 1295static int upload_dsp_code(void)
1296{ 1296{
1297 int ret = 0;
1298
1297 msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS); 1299 msnd_outb(HPBLKSEL_0, dev.io + HP_BLKS);
1298#ifndef HAVE_DSPCODEH 1300#ifndef HAVE_DSPCODEH
1299 INITCODESIZE = mod_firmware_load(INITCODEFILE, &INITCODE); 1301 INITCODESIZE = mod_firmware_load(INITCODEFILE, &INITCODE);
@@ -1312,7 +1314,8 @@ static int upload_dsp_code(void)
1312 memcpy_toio(dev.base, PERMCODE, PERMCODESIZE); 1314 memcpy_toio(dev.base, PERMCODE, PERMCODESIZE);
1313 if (msnd_upload_host(&dev, INITCODE, INITCODESIZE) < 0) { 1315 if (msnd_upload_host(&dev, INITCODE, INITCODESIZE) < 0) {
1314 printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n"); 1316 printk(KERN_WARNING LOGNAME ": Error uploading to DSP\n");
1315 return -ENODEV; 1317 ret = -ENODEV;
1318 goto out;
1316 } 1319 }
1317#ifdef HAVE_DSPCODEH 1320#ifdef HAVE_DSPCODEH
1318 printk(KERN_INFO LOGNAME ": DSP firmware uploaded (resident)\n"); 1321 printk(KERN_INFO LOGNAME ": DSP firmware uploaded (resident)\n");
@@ -1320,12 +1323,13 @@ static int upload_dsp_code(void)
1320 printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n"); 1323 printk(KERN_INFO LOGNAME ": DSP firmware uploaded\n");
1321#endif 1324#endif
1322 1325
1326out:
1323#ifndef HAVE_DSPCODEH 1327#ifndef HAVE_DSPCODEH
1324 vfree(INITCODE); 1328 vfree(INITCODE);
1325 vfree(PERMCODE); 1329 vfree(PERMCODE);
1326#endif 1330#endif
1327 1331
1328 return 0; 1332 return ret;
1329} 1333}
1330 1334
1331#ifdef MSND_CLASSIC 1335#ifdef MSND_CLASSIC
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 88168044375f..5ca0939e4223 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -2,8 +2,8 @@
2 2
3config SND_TEA575X 3config SND_TEA575X
4 tristate 4 tristate
5 depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2 5 depends on SND_FM801_TEA575X_BOOL || SND_ES1968_RADIO || RADIO_SF16FMR2 || RADIO_MAXIRADIO
6 default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2 6 default SND_FM801 || SND_ES1968 || RADIO_SF16FMR2 || RADIO_MAXIRADIO
7 7
8menuconfig SND_PCI 8menuconfig SND_PCI
9 bool "PCI sound devices" 9 bool "PCI sound devices"
diff --git a/sound/pci/asihpi/hpi_internal.h b/sound/pci/asihpi/hpi_internal.h
index 8c63200cf339..bc86cb726d79 100644
--- a/sound/pci/asihpi/hpi_internal.h
+++ b/sound/pci/asihpi/hpi_internal.h
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 2
3 AudioScience HPI driver 3 AudioScience HPI driver
4 Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com> 4 Copyright (C) 1997-2012 AudioScience Inc. <support@audioscience.com>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as 7 it under the terms of version 2 of the GNU General Public License as
@@ -42,7 +42,7 @@ On error *pLockedMemHandle marked invalid, non-zero returned.
42If this function succeeds, then HpiOs_LockedMem_GetVirtAddr() and 42If this function succeeds, then HpiOs_LockedMem_GetVirtAddr() and
43HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle. 43HpiOs_LockedMem_GetPyhsAddr() will always succed on the returned handle.
44*/ 44*/
45int hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle, 45u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_locked_mem_handle,
46 /**< memory handle */ 46 /**< memory handle */
47 u32 size, /**< Size in bytes to allocate */ 47 u32 size, /**< Size in bytes to allocate */
48 struct pci_dev *p_os_reference 48 struct pci_dev *p_os_reference
diff --git a/sound/pci/asihpi/hpios.c b/sound/pci/asihpi/hpios.c
index 87f4385fe8c7..5ef4fe964366 100644
--- a/sound/pci/asihpi/hpios.c
+++ b/sound/pci/asihpi/hpios.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 2
3 AudioScience HPI driver 3 AudioScience HPI driver
4 Copyright (C) 1997-2011 AudioScience Inc. <support@audioscience.com> 4 Copyright (C) 1997-2012 AudioScience Inc. <support@audioscience.com>
5 5
6 This program is free software; you can redistribute it and/or modify 6 This program is free software; you can redistribute it and/or modify
7 it under the terms of version 2 of the GNU General Public License as 7 it under the terms of version 2 of the GNU General Public License as
@@ -39,11 +39,11 @@ void hpios_delay_micro_seconds(u32 num_micro_sec)
39 39
40} 40}
41 41
42/** Allocated an area of locked memory for bus master DMA operations. 42/** Allocate an area of locked memory for bus master DMA operations.
43 43
44On error, return -ENOMEM, and *pMemArea.size = 0 44If allocation fails, return 1, and *pMemArea.size = 0
45*/ 45*/
46int hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size, 46u16 hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
47 struct pci_dev *pdev) 47 struct pci_dev *pdev)
48{ 48{
49 /*?? any benefit in using managed dmam_alloc_coherent? */ 49 /*?? any benefit in using managed dmam_alloc_coherent? */
@@ -62,7 +62,7 @@ int hpios_locked_mem_alloc(struct consistent_dma_area *p_mem_area, u32 size,
62 HPI_DEBUG_LOG(WARNING, 62 HPI_DEBUG_LOG(WARNING,
63 "failed to allocate %d bytes locked memory\n", size); 63 "failed to allocate %d bytes locked memory\n", size);
64 p_mem_area->size = 0; 64 p_mem_area->size = 0;
65 return -ENOMEM; 65 return 1;
66 } 66 }
67} 67}
68 68
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 9a9f372e1be4..56b4f74c0b13 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -851,6 +851,9 @@ struct hda_codec {
851 unsigned int pin_amp_workaround:1; /* pin out-amp takes index 851 unsigned int pin_amp_workaround:1; /* pin out-amp takes index
852 * (e.g. Conexant codecs) 852 * (e.g. Conexant codecs)
853 */ 853 */
854 unsigned int single_adc_amp:1; /* adc in-amp takes no index
855 * (e.g. CX20549 codec)
856 */
854 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */ 857 unsigned int no_sticky_stream:1; /* no sticky-PCM stream assignment */
855 unsigned int pins_shutup:1; /* pins are shut up */ 858 unsigned int pins_shutup:1; /* pins are shut up */
856 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */ 859 unsigned int no_trigger_sense:1; /* don't trigger at pin-sensing */
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index b58b4b1687fa..4c054f4486b9 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -418,7 +418,7 @@ static void hdmi_show_short_audio_desc(struct cea_sad *a)
418 else 418 else
419 buf2[0] = '\0'; 419 buf2[0] = '\0';
420 420
421 printk(KERN_INFO "HDMI: supports coding type %s:" 421 _snd_printd(SND_PR_VERBOSE, "HDMI: supports coding type %s:"
422 " channels = %d, rates =%s%s\n", 422 " channels = %d, rates =%s%s\n",
423 cea_audio_coding_type_names[a->format], 423 cea_audio_coding_type_names[a->format],
424 a->channels, 424 a->channels,
@@ -442,14 +442,14 @@ void snd_hdmi_show_eld(struct hdmi_eld *e)
442{ 442{
443 int i; 443 int i;
444 444
445 printk(KERN_INFO "HDMI: detected monitor %s at connection type %s\n", 445 _snd_printd(SND_PR_VERBOSE, "HDMI: detected monitor %s at connection type %s\n",
446 e->monitor_name, 446 e->monitor_name,
447 eld_connection_type_names[e->conn_type]); 447 eld_connection_type_names[e->conn_type]);
448 448
449 if (e->spk_alloc) { 449 if (e->spk_alloc) {
450 char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE]; 450 char buf[SND_PRINT_CHANNEL_ALLOCATION_ADVISED_BUFSIZE];
451 snd_print_channel_allocation(e->spk_alloc, buf, sizeof(buf)); 451 snd_print_channel_allocation(e->spk_alloc, buf, sizeof(buf));
452 printk(KERN_INFO "HDMI: available speakers:%s\n", buf); 452 _snd_printd(SND_PR_VERBOSE, "HDMI: available speakers:%s\n", buf);
453 } 453 }
454 454
455 for (i = 0; i < e->sad_count; i++) 455 for (i = 0; i < e->sad_count; i++)
diff --git a/sound/pci/hda/hda_proc.c b/sound/pci/hda/hda_proc.c
index 254ab5204603..e59e2f059b6e 100644
--- a/sound/pci/hda/hda_proc.c
+++ b/sound/pci/hda/hda_proc.c
@@ -651,9 +651,16 @@ static void print_codec_info(struct snd_info_entry *entry,
651 snd_iprintf(buffer, " Amp-In caps: "); 651 snd_iprintf(buffer, " Amp-In caps: ");
652 print_amp_caps(buffer, codec, nid, HDA_INPUT); 652 print_amp_caps(buffer, codec, nid, HDA_INPUT);
653 snd_iprintf(buffer, " Amp-In vals: "); 653 snd_iprintf(buffer, " Amp-In vals: ");
654 print_amp_vals(buffer, codec, nid, HDA_INPUT, 654 if (wid_type == AC_WID_PIN ||
655 wid_caps & AC_WCAP_STEREO, 655 (codec->single_adc_amp &&
656 wid_type == AC_WID_PIN ? 1 : conn_len); 656 wid_type == AC_WID_AUD_IN))
657 print_amp_vals(buffer, codec, nid, HDA_INPUT,
658 wid_caps & AC_WCAP_STEREO,
659 1);
660 else
661 print_amp_vals(buffer, codec, nid, HDA_INPUT,
662 wid_caps & AC_WCAP_STEREO,
663 conn_len);
657 } 664 }
658 if (wid_caps & AC_WCAP_OUT_AMP) { 665 if (wid_caps & AC_WCAP_OUT_AMP) {
659 snd_iprintf(buffer, " Amp-Out caps: "); 666 snd_iprintf(buffer, " Amp-Out caps: ");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 8c6523bbc797..a36488d94aaa 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -141,7 +141,6 @@ struct conexant_spec {
141 unsigned int hp_laptop:1; 141 unsigned int hp_laptop:1;
142 unsigned int asus:1; 142 unsigned int asus:1;
143 unsigned int pin_eapd_ctrls:1; 143 unsigned int pin_eapd_ctrls:1;
144 unsigned int single_adc_amp:1;
145 144
146 unsigned int adc_switching:1; 145 unsigned int adc_switching:1;
147 146
@@ -687,27 +686,26 @@ static const struct hda_channel_mode cxt5045_modes[1] = {
687static const struct hda_input_mux cxt5045_capture_source = { 686static const struct hda_input_mux cxt5045_capture_source = {
688 .num_items = 2, 687 .num_items = 2,
689 .items = { 688 .items = {
690 { "IntMic", 0x1 }, 689 { "Internal Mic", 0x1 },
691 { "ExtMic", 0x2 }, 690 { "Mic", 0x2 },
692 } 691 }
693}; 692};
694 693
695static const struct hda_input_mux cxt5045_capture_source_benq = { 694static const struct hda_input_mux cxt5045_capture_source_benq = {
696 .num_items = 5, 695 .num_items = 4,
697 .items = { 696 .items = {
698 { "IntMic", 0x1 }, 697 { "Internal Mic", 0x1 },
699 { "ExtMic", 0x2 }, 698 { "Mic", 0x2 },
700 { "LineIn", 0x3 }, 699 { "Line", 0x3 },
701 { "CD", 0x4 }, 700 { "Mixer", 0x0 },
702 { "Mixer", 0x0 },
703 } 701 }
704}; 702};
705 703
706static const struct hda_input_mux cxt5045_capture_source_hp530 = { 704static const struct hda_input_mux cxt5045_capture_source_hp530 = {
707 .num_items = 2, 705 .num_items = 2,
708 .items = { 706 .items = {
709 { "ExtMic", 0x1 }, 707 { "Mic", 0x1 },
710 { "IntMic", 0x2 }, 708 { "Internal Mic", 0x2 },
711 } 709 }
712}; 710};
713 711
@@ -798,10 +796,8 @@ static void cxt5045_hp_unsol_event(struct hda_codec *codec,
798} 796}
799 797
800static const struct snd_kcontrol_new cxt5045_mixers[] = { 798static const struct snd_kcontrol_new cxt5045_mixers[] = {
801 HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x01, HDA_INPUT), 799 HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
802 HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x01, HDA_INPUT), 800 HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
803 HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x02, HDA_INPUT),
804 HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x02, HDA_INPUT),
805 HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT), 801 HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
806 HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT), 802 HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
807 HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT), 803 HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x1, HDA_INPUT),
@@ -822,27 +818,15 @@ static const struct snd_kcontrol_new cxt5045_mixers[] = {
822}; 818};
823 819
824static const struct snd_kcontrol_new cxt5045_benq_mixers[] = { 820static const struct snd_kcontrol_new cxt5045_benq_mixers[] = {
825 HDA_CODEC_VOLUME("CD Capture Volume", 0x1a, 0x04, HDA_INPUT), 821 HDA_CODEC_VOLUME("Line Playback Volume", 0x17, 0x3, HDA_INPUT),
826 HDA_CODEC_MUTE("CD Capture Switch", 0x1a, 0x04, HDA_INPUT), 822 HDA_CODEC_MUTE("Line Playback Switch", 0x17, 0x3, HDA_INPUT),
827 HDA_CODEC_VOLUME("CD Playback Volume", 0x17, 0x4, HDA_INPUT),
828 HDA_CODEC_MUTE("CD Playback Switch", 0x17, 0x4, HDA_INPUT),
829
830 HDA_CODEC_VOLUME("Line In Capture Volume", 0x1a, 0x03, HDA_INPUT),
831 HDA_CODEC_MUTE("Line In Capture Switch", 0x1a, 0x03, HDA_INPUT),
832 HDA_CODEC_VOLUME("Line In Playback Volume", 0x17, 0x3, HDA_INPUT),
833 HDA_CODEC_MUTE("Line In Playback Switch", 0x17, 0x3, HDA_INPUT),
834
835 HDA_CODEC_VOLUME("Mixer Capture Volume", 0x1a, 0x0, HDA_INPUT),
836 HDA_CODEC_MUTE("Mixer Capture Switch", 0x1a, 0x0, HDA_INPUT),
837 823
838 {} 824 {}
839}; 825};
840 826
841static const struct snd_kcontrol_new cxt5045_mixers_hp530[] = { 827static const struct snd_kcontrol_new cxt5045_mixers_hp530[] = {
842 HDA_CODEC_VOLUME("Internal Mic Capture Volume", 0x1a, 0x02, HDA_INPUT), 828 HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x00, HDA_INPUT),
843 HDA_CODEC_MUTE("Internal Mic Capture Switch", 0x1a, 0x02, HDA_INPUT), 829 HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
844 HDA_CODEC_VOLUME("Mic Capture Volume", 0x1a, 0x01, HDA_INPUT),
845 HDA_CODEC_MUTE("Mic Capture Switch", 0x1a, 0x01, HDA_INPUT),
846 HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT), 830 HDA_CODEC_VOLUME("PCM Playback Volume", 0x17, 0x0, HDA_INPUT),
847 HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT), 831 HDA_CODEC_MUTE("PCM Playback Switch", 0x17, 0x0, HDA_INPUT),
848 HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT), 832 HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0x2, HDA_INPUT),
@@ -946,10 +930,10 @@ static const struct snd_kcontrol_new cxt5045_test_mixer[] = {
946 /* Output controls */ 930 /* Output controls */
947 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x10, 0x0, HDA_OUTPUT), 931 HDA_CODEC_VOLUME("Speaker Playback Volume", 0x10, 0x0, HDA_OUTPUT),
948 HDA_CODEC_MUTE("Speaker Playback Switch", 0x10, 0x0, HDA_OUTPUT), 932 HDA_CODEC_MUTE("Speaker Playback Switch", 0x10, 0x0, HDA_OUTPUT),
949 HDA_CODEC_VOLUME("Node 11 Playback Volume", 0x11, 0x0, HDA_OUTPUT), 933 HDA_CODEC_VOLUME("HP-OUT Playback Volume", 0x11, 0x0, HDA_OUTPUT),
950 HDA_CODEC_MUTE("Node 11 Playback Switch", 0x11, 0x0, HDA_OUTPUT), 934 HDA_CODEC_MUTE("HP-OUT Playback Switch", 0x11, 0x0, HDA_OUTPUT),
951 HDA_CODEC_VOLUME("Node 12 Playback Volume", 0x12, 0x0, HDA_OUTPUT), 935 HDA_CODEC_VOLUME("LINE1 Playback Volume", 0x12, 0x0, HDA_OUTPUT),
952 HDA_CODEC_MUTE("Node 12 Playback Switch", 0x12, 0x0, HDA_OUTPUT), 936 HDA_CODEC_MUTE("LINE1 Playback Switch", 0x12, 0x0, HDA_OUTPUT),
953 937
954 /* Modes for retasking pin widgets */ 938 /* Modes for retasking pin widgets */
955 CXT_PIN_MODE("HP-OUT pin mode", 0x11, CXT_PIN_DIR_INOUT), 939 CXT_PIN_MODE("HP-OUT pin mode", 0x11, CXT_PIN_DIR_INOUT),
@@ -960,16 +944,16 @@ static const struct snd_kcontrol_new cxt5045_test_mixer[] = {
960 944
961 /* Loopback mixer controls */ 945 /* Loopback mixer controls */
962 946
963 HDA_CODEC_VOLUME("Mixer-1 Volume", 0x17, 0x0, HDA_INPUT), 947 HDA_CODEC_VOLUME("PCM Volume", 0x17, 0x0, HDA_INPUT),
964 HDA_CODEC_MUTE("Mixer-1 Switch", 0x17, 0x0, HDA_INPUT), 948 HDA_CODEC_MUTE("PCM Switch", 0x17, 0x0, HDA_INPUT),
965 HDA_CODEC_VOLUME("Mixer-2 Volume", 0x17, 0x1, HDA_INPUT), 949 HDA_CODEC_VOLUME("MIC1 pin Volume", 0x17, 0x1, HDA_INPUT),
966 HDA_CODEC_MUTE("Mixer-2 Switch", 0x17, 0x1, HDA_INPUT), 950 HDA_CODEC_MUTE("MIC1 pin Switch", 0x17, 0x1, HDA_INPUT),
967 HDA_CODEC_VOLUME("Mixer-3 Volume", 0x17, 0x2, HDA_INPUT), 951 HDA_CODEC_VOLUME("LINE1 pin Volume", 0x17, 0x2, HDA_INPUT),
968 HDA_CODEC_MUTE("Mixer-3 Switch", 0x17, 0x2, HDA_INPUT), 952 HDA_CODEC_MUTE("LINE1 pin Switch", 0x17, 0x2, HDA_INPUT),
969 HDA_CODEC_VOLUME("Mixer-4 Volume", 0x17, 0x3, HDA_INPUT), 953 HDA_CODEC_VOLUME("HP-OUT pin Volume", 0x17, 0x3, HDA_INPUT),
970 HDA_CODEC_MUTE("Mixer-4 Switch", 0x17, 0x3, HDA_INPUT), 954 HDA_CODEC_MUTE("HP-OUT pin Switch", 0x17, 0x3, HDA_INPUT),
971 HDA_CODEC_VOLUME("Mixer-5 Volume", 0x17, 0x4, HDA_INPUT), 955 HDA_CODEC_VOLUME("CD pin Volume", 0x17, 0x4, HDA_INPUT),
972 HDA_CODEC_MUTE("Mixer-5 Switch", 0x17, 0x4, HDA_INPUT), 956 HDA_CODEC_MUTE("CD pin Switch", 0x17, 0x4, HDA_INPUT),
973 { 957 {
974 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, 958 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
975 .name = "Input Source", 959 .name = "Input Source",
@@ -978,16 +962,8 @@ static const struct snd_kcontrol_new cxt5045_test_mixer[] = {
978 .put = conexant_mux_enum_put, 962 .put = conexant_mux_enum_put,
979 }, 963 },
980 /* Audio input controls */ 964 /* Audio input controls */
981 HDA_CODEC_VOLUME("Input-1 Volume", 0x1a, 0x0, HDA_INPUT), 965 HDA_CODEC_VOLUME("Capture Volume", 0x1a, 0x0, HDA_INPUT),
982 HDA_CODEC_MUTE("Input-1 Switch", 0x1a, 0x0, HDA_INPUT), 966 HDA_CODEC_MUTE("Capture Switch", 0x1a, 0x0, HDA_INPUT),
983 HDA_CODEC_VOLUME("Input-2 Volume", 0x1a, 0x1, HDA_INPUT),
984 HDA_CODEC_MUTE("Input-2 Switch", 0x1a, 0x1, HDA_INPUT),
985 HDA_CODEC_VOLUME("Input-3 Volume", 0x1a, 0x2, HDA_INPUT),
986 HDA_CODEC_MUTE("Input-3 Switch", 0x1a, 0x2, HDA_INPUT),
987 HDA_CODEC_VOLUME("Input-4 Volume", 0x1a, 0x3, HDA_INPUT),
988 HDA_CODEC_MUTE("Input-4 Switch", 0x1a, 0x3, HDA_INPUT),
989 HDA_CODEC_VOLUME("Input-5 Volume", 0x1a, 0x4, HDA_INPUT),
990 HDA_CODEC_MUTE("Input-5 Switch", 0x1a, 0x4, HDA_INPUT),
991 { } /* end */ 967 { } /* end */
992}; 968};
993 969
@@ -1009,10 +985,6 @@ static const struct hda_verb cxt5045_test_init_verbs[] = {
1009 {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT}, 985 {0x13, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1010 {0x18, AC_VERB_SET_DIGI_CONVERT_1, 0}, 986 {0x18, AC_VERB_SET_DIGI_CONVERT_1, 0},
1011 987
1012 /* Start with output sum widgets muted and their output gains at min */
1013 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
1014 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
1015
1016 /* Unmute retasking pin widget output buffers since the default 988 /* Unmute retasking pin widget output buffers since the default
1017 * state appears to be output. As the pin mode is changed by the 989 * state appears to be output. As the pin mode is changed by the
1018 * user the pin mode control will take care of enabling the pin's 990 * user the pin mode control will take care of enabling the pin's
@@ -1027,11 +999,11 @@ static const struct hda_verb cxt5045_test_init_verbs[] = {
1027 /* Set ADC connection select to match default mixer setting (mic1 999 /* Set ADC connection select to match default mixer setting (mic1
1028 * pin) 1000 * pin)
1029 */ 1001 */
1030 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x00}, 1002 {0x1a, AC_VERB_SET_CONNECT_SEL, 0x01},
1031 {0x17, AC_VERB_SET_CONNECT_SEL, 0x00}, 1003 {0x17, AC_VERB_SET_CONNECT_SEL, 0x01},
1032 1004
1033 /* Mute all inputs to mixer widget (even unconnected ones) */ 1005 /* Mute all inputs to mixer widget (even unconnected ones) */
1034 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* Mixer pin */ 1006 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)}, /* Mixer */
1035 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, /* Mic1 pin */ 1007 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)}, /* Mic1 pin */
1036 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, /* Line pin */ 1008 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(2)}, /* Line pin */
1037 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, /* HP pin */ 1009 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(3)}, /* HP pin */
@@ -1110,7 +1082,7 @@ static int patch_cxt5045(struct hda_codec *codec)
1110 if (!spec) 1082 if (!spec)
1111 return -ENOMEM; 1083 return -ENOMEM;
1112 codec->spec = spec; 1084 codec->spec = spec;
1113 codec->pin_amp_workaround = 1; 1085 codec->single_adc_amp = 1;
1114 1086
1115 spec->multiout.max_channels = 2; 1087 spec->multiout.max_channels = 2;
1116 spec->multiout.num_dacs = ARRAY_SIZE(cxt5045_dac_nids); 1088 spec->multiout.num_dacs = ARRAY_SIZE(cxt5045_dac_nids);
@@ -4220,7 +4192,7 @@ static int cx_auto_add_capture_volume(struct hda_codec *codec, hda_nid_t nid,
4220 int idx = get_input_connection(codec, adc_nid, nid); 4192 int idx = get_input_connection(codec, adc_nid, nid);
4221 if (idx < 0) 4193 if (idx < 0)
4222 continue; 4194 continue;
4223 if (spec->single_adc_amp) 4195 if (codec->single_adc_amp)
4224 idx = 0; 4196 idx = 0;
4225 return cx_auto_add_volume_idx(codec, label, pfx, 4197 return cx_auto_add_volume_idx(codec, label, pfx,
4226 cidx, adc_nid, HDA_INPUT, idx); 4198 cidx, adc_nid, HDA_INPUT, idx);
@@ -4275,7 +4247,7 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
4275 if (cidx < 0) 4247 if (cidx < 0)
4276 continue; 4248 continue;
4277 input_conn[i] = spec->imux_info[i].adc; 4249 input_conn[i] = spec->imux_info[i].adc;
4278 if (!spec->single_adc_amp) 4250 if (!codec->single_adc_amp)
4279 input_conn[i] |= cidx << 8; 4251 input_conn[i] |= cidx << 8;
4280 if (i > 0 && input_conn[i] != input_conn[0]) 4252 if (i > 0 && input_conn[i] != input_conn[0])
4281 multi_connection = 1; 4253 multi_connection = 1;
@@ -4466,15 +4438,17 @@ static int patch_conexant_auto(struct hda_codec *codec)
4466 if (!spec) 4438 if (!spec)
4467 return -ENOMEM; 4439 return -ENOMEM;
4468 codec->spec = spec; 4440 codec->spec = spec;
4469 codec->pin_amp_workaround = 1;
4470 4441
4471 switch (codec->vendor_id) { 4442 switch (codec->vendor_id) {
4472 case 0x14f15045: 4443 case 0x14f15045:
4473 spec->single_adc_amp = 1; 4444 codec->single_adc_amp = 1;
4474 break; 4445 break;
4475 case 0x14f15051: 4446 case 0x14f15051:
4476 add_cx5051_fake_mutes(codec); 4447 add_cx5051_fake_mutes(codec);
4448 codec->pin_amp_workaround = 1;
4477 break; 4449 break;
4450 default:
4451 codec->pin_amp_workaround = 1;
4478 } 4452 }
4479 4453
4480 apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl); 4454 apply_pin_fixup(codec, cxt_fixups, cxt_pincfg_tbl);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 540cd13f7f15..83f345f3c961 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -757,8 +757,6 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
757 struct hdmi_spec *spec = codec->spec; 757 struct hdmi_spec *spec = codec->spec;
758 int tag = res >> AC_UNSOL_RES_TAG_SHIFT; 758 int tag = res >> AC_UNSOL_RES_TAG_SHIFT;
759 int pin_nid; 759 int pin_nid;
760 int pd = !!(res & AC_UNSOL_RES_PD);
761 int eldv = !!(res & AC_UNSOL_RES_ELDV);
762 int pin_idx; 760 int pin_idx;
763 struct hda_jack_tbl *jack; 761 struct hda_jack_tbl *jack;
764 762
@@ -768,9 +766,10 @@ static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
768 pin_nid = jack->nid; 766 pin_nid = jack->nid;
769 jack->jack_dirty = 1; 767 jack->jack_dirty = 1;
770 768
771 printk(KERN_INFO 769 _snd_printd(SND_PR_VERBOSE,
772 "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 770 "HDMI hot plug event: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
773 codec->addr, pin_nid, pd, eldv); 771 codec->addr, pin_nid,
772 !!(res & AC_UNSOL_RES_PD), !!(res & AC_UNSOL_RES_ELDV));
774 773
775 pin_idx = pin_nid_to_pin_index(spec, pin_nid); 774 pin_idx = pin_nid_to_pin_index(spec, pin_nid);
776 if (pin_idx < 0) 775 if (pin_idx < 0)
@@ -992,7 +991,7 @@ static void hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
992 if (eld->monitor_present) 991 if (eld->monitor_present)
993 eld_valid = !!(present & AC_PINSENSE_ELDV); 992 eld_valid = !!(present & AC_PINSENSE_ELDV);
994 993
995 printk(KERN_INFO 994 _snd_printd(SND_PR_VERBOSE,
996 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n", 995 "HDMI status: Codec=%d Pin=%d Presence_Detect=%d ELD_Valid=%d\n",
997 codec->addr, pin_nid, eld->monitor_present, eld_valid); 996 codec->addr, pin_nid, eld->monitor_present, eld_valid);
998 997
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9917e55d6f11..2508f8109f11 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3398,8 +3398,10 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
3398 for (;;) { 3398 for (;;) {
3399 badness = fill_and_eval_dacs(codec, fill_hardwired, 3399 badness = fill_and_eval_dacs(codec, fill_hardwired,
3400 fill_mio_first); 3400 fill_mio_first);
3401 if (badness < 0) 3401 if (badness < 0) {
3402 kfree(best_cfg);
3402 return badness; 3403 return badness;
3404 }
3403 debug_badness("==> lo_type=%d, wired=%d, mio=%d, badness=0x%x\n", 3405 debug_badness("==> lo_type=%d, wired=%d, mio=%d, badness=0x%x\n",
3404 cfg->line_out_type, fill_hardwired, fill_mio_first, 3406 cfg->line_out_type, fill_hardwired, fill_mio_first,
3405 badness); 3407 badness);
@@ -3434,7 +3436,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
3434 cfg->line_out_type = AUTO_PIN_SPEAKER_OUT; 3436 cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
3435 fill_hardwired = true; 3437 fill_hardwired = true;
3436 continue; 3438 continue;
3437 } 3439 }
3438 if (cfg->hp_outs > 0 && 3440 if (cfg->hp_outs > 0 &&
3439 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) { 3441 cfg->line_out_type == AUTO_PIN_SPEAKER_OUT) {
3440 cfg->speaker_outs = cfg->line_outs; 3442 cfg->speaker_outs = cfg->line_outs;
@@ -3448,7 +3450,7 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec)
3448 cfg->line_out_type = AUTO_PIN_HP_OUT; 3450 cfg->line_out_type = AUTO_PIN_HP_OUT;
3449 fill_hardwired = true; 3451 fill_hardwired = true;
3450 continue; 3452 continue;
3451 } 3453 }
3452 break; 3454 break;
3453 } 3455 }
3454 3456
@@ -4423,7 +4425,7 @@ static int alc_parse_auto_config(struct hda_codec *codec,
4423static int alc880_parse_auto_config(struct hda_codec *codec) 4425static int alc880_parse_auto_config(struct hda_codec *codec)
4424{ 4426{
4425 static const hda_nid_t alc880_ignore[] = { 0x1d, 0 }; 4427 static const hda_nid_t alc880_ignore[] = { 0x1d, 0 };
4426 static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 }; 4428 static const hda_nid_t alc880_ssids[] = { 0x15, 0x1b, 0x14, 0 };
4427 return alc_parse_auto_config(codec, alc880_ignore, alc880_ssids); 4429 return alc_parse_auto_config(codec, alc880_ignore, alc880_ssids);
4428} 4430}
4429 4431
@@ -5269,7 +5271,9 @@ static const struct alc_fixup alc882_fixups[] = {
5269 { 0x16, 0x99130111 }, /* CLFE speaker */ 5271 { 0x16, 0x99130111 }, /* CLFE speaker */
5270 { 0x17, 0x99130112 }, /* surround speaker */ 5272 { 0x17, 0x99130112 }, /* surround speaker */
5271 { } 5273 { }
5272 } 5274 },
5275 .chained = true,
5276 .chain_id = ALC882_FIXUP_GPIO1,
5273 }, 5277 },
5274 [ALC882_FIXUP_ACER_ASPIRE_8930G] = { 5278 [ALC882_FIXUP_ACER_ASPIRE_8930G] = {
5275 .type = ALC_FIXUP_PINS, 5279 .type = ALC_FIXUP_PINS,
@@ -5312,7 +5316,9 @@ static const struct alc_fixup alc882_fixups[] = {
5312 { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 }, 5316 { 0x20, AC_VERB_SET_COEF_INDEX, 0x07 },
5313 { 0x20, AC_VERB_SET_PROC_COEF, 0x3050 }, 5317 { 0x20, AC_VERB_SET_PROC_COEF, 0x3050 },
5314 { } 5318 { }
5315 } 5319 },
5320 .chained = true,
5321 .chain_id = ALC882_FIXUP_GPIO1,
5316 }, 5322 },
5317 [ALC885_FIXUP_MACPRO_GPIO] = { 5323 [ALC885_FIXUP_MACPRO_GPIO] = {
5318 .type = ALC_FIXUP_FUNC, 5324 .type = ALC_FIXUP_FUNC,
@@ -5359,6 +5365,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5359 ALC882_FIXUP_ACER_ASPIRE_4930G), 5365 ALC882_FIXUP_ACER_ASPIRE_4930G),
5360 SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210), 5366 SND_PCI_QUIRK(0x1025, 0x0155, "Packard-Bell M5120", ALC882_FIXUP_PB_M5210),
5361 SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE), 5367 SND_PCI_QUIRK(0x1025, 0x0259, "Acer Aspire 5935", ALC889_FIXUP_DAC_ROUTE),
5368 SND_PCI_QUIRK(0x1025, 0x026b, "Acer Aspire 8940G", ALC882_FIXUP_ACER_ASPIRE_8930G),
5362 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736), 5369 SND_PCI_QUIRK(0x1025, 0x0296, "Acer Aspire 7736z", ALC882_FIXUP_ACER_ASPIRE_7736),
5363 SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD), 5370 SND_PCI_QUIRK(0x1043, 0x13c2, "Asus A7M", ALC882_FIXUP_EAPD),
5364 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V), 5371 SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
@@ -5384,6 +5391,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5384 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF), 5391 SND_PCI_QUIRK(0x106b, 0x3f00, "Macbook 5,1", ALC889_FIXUP_IMAC91_VREF),
5385 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF), 5392 SND_PCI_QUIRK(0x106b, 0x4000, "MacbookPro 5,1", ALC889_FIXUP_IMAC91_VREF),
5386 SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF), 5393 SND_PCI_QUIRK(0x106b, 0x4100, "Macmini 3,1", ALC889_FIXUP_IMAC91_VREF),
5394 SND_PCI_QUIRK(0x106b, 0x4200, "Mac Pro 5,1", ALC885_FIXUP_MACPRO_GPIO),
5387 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), 5395 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
5388 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), 5396 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
5389 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), 5397 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF),
@@ -5399,6 +5407,13 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
5399 {} 5407 {}
5400}; 5408};
5401 5409
5410static const struct alc_model_fixup alc882_fixup_models[] = {
5411 {.id = ALC882_FIXUP_ACER_ASPIRE_4930G, .name = "acer-aspire-4930g"},
5412 {.id = ALC882_FIXUP_ACER_ASPIRE_8930G, .name = "acer-aspire-8930g"},
5413 {.id = ALC883_FIXUP_ACER_EAPD, .name = "acer-aspire"},
5414 {}
5415};
5416
5402/* 5417/*
5403 * BIOS auto configuration 5418 * BIOS auto configuration
5404 */ 5419 */
@@ -5439,7 +5454,8 @@ static int patch_alc882(struct hda_codec *codec)
5439 if (err < 0) 5454 if (err < 0)
5440 goto error; 5455 goto error;
5441 5456
5442 alc_pick_fixup(codec, NULL, alc882_fixup_tbl, alc882_fixups); 5457 alc_pick_fixup(codec, alc882_fixup_models, alc882_fixup_tbl,
5458 alc882_fixups);
5443 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE); 5459 alc_apply_fixup(codec, ALC_FIXUP_ACT_PRE_PROBE);
5444 5460
5445 alc_auto_parse_customize_define(codec); 5461 alc_auto_parse_customize_define(codec);
@@ -6079,7 +6095,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6079 * Basically the device should work as is without the fixup table. 6095 * Basically the device should work as is without the fixup table.
6080 * If BIOS doesn't give a proper info, enable the corresponding 6096 * If BIOS doesn't give a proper info, enable the corresponding
6081 * fixup entry. 6097 * fixup entry.
6082 */ 6098 */
6083 SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A", 6099 SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
6084 ALC269_FIXUP_AMIC), 6100 ALC269_FIXUP_AMIC),
6085 SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC), 6101 SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC),
@@ -6296,7 +6312,7 @@ static void alc_fixup_no_jack_detect(struct hda_codec *codec,
6296{ 6312{
6297 if (action == ALC_FIXUP_ACT_PRE_PROBE) 6313 if (action == ALC_FIXUP_ACT_PRE_PROBE)
6298 codec->no_jack_detect = 1; 6314 codec->no_jack_detect = 1;
6299} 6315}
6300 6316
6301static const struct alc_fixup alc861_fixups[] = { 6317static const struct alc_fixup alc861_fixups[] = {
6302 [ALC861_FIXUP_FSC_AMILO_PI1505] = { 6318 [ALC861_FIXUP_FSC_AMILO_PI1505] = {
@@ -6714,7 +6730,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6714 * Basically the device should work as is without the fixup table. 6730 * Basically the device should work as is without the fixup table.
6715 * If BIOS doesn't give a proper info, enable the corresponding 6731 * If BIOS doesn't give a proper info, enable the corresponding
6716 * fixup entry. 6732 * fixup entry.
6717 */ 6733 */
6718 SND_PCI_QUIRK(0x1043, 0x1000, "ASUS N50Vm", ALC662_FIXUP_ASUS_MODE1), 6734 SND_PCI_QUIRK(0x1043, 0x1000, "ASUS N50Vm", ALC662_FIXUP_ASUS_MODE1),
6719 SND_PCI_QUIRK(0x1043, 0x1092, "ASUS NB", ALC662_FIXUP_ASUS_MODE3), 6735 SND_PCI_QUIRK(0x1043, 0x1092, "ASUS NB", ALC662_FIXUP_ASUS_MODE3),
6720 SND_PCI_QUIRK(0x1043, 0x1173, "ASUS K73Jn", ALC662_FIXUP_ASUS_MODE1), 6736 SND_PCI_QUIRK(0x1043, 0x1173, "ASUS K73Jn", ALC662_FIXUP_ASUS_MODE1),
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index f8e10ced244a..b3e24f289421 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -140,7 +140,7 @@
140 * min : 0xFE : -115.0 dB 140 * min : 0xFE : -115.0 dB
141 * mute: 0xFF 141 * mute: 0xFF
142 */ 142 */
143static const DECLARE_TLV_DB_SCALE(out_tlv, -11500, 50, 1); 143static const DECLARE_TLV_DB_SCALE(out_tlv, -11550, 50, 1);
144 144
145static const struct snd_kcontrol_new ak4642_snd_controls[] = { 145static const struct snd_kcontrol_new ak4642_snd_controls[] = {
146 146
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index d1926266fe00..8e92fb88ed09 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -143,11 +143,11 @@ static int mic_bias_event(struct snd_soc_dapm_widget *w,
143} 143}
144 144
145/* 145/*
146 * using codec assist to small pop, hp_powerup or lineout_powerup 146 * As manual described, ADC/DAC only works when VAG powerup,
147 * should stay setting until vag_powerup is fully ramped down, 147 * So enabled VAG before ADC/DAC up.
148 * vag fully ramped down require 400ms. 148 * In power down case, we need wait 400ms when vag fully ramped down.
149 */ 149 */
150static int small_pop_event(struct snd_soc_dapm_widget *w, 150static int power_vag_event(struct snd_soc_dapm_widget *w,
151 struct snd_kcontrol *kcontrol, int event) 151 struct snd_kcontrol *kcontrol, int event)
152{ 152{
153 switch (event) { 153 switch (event) {
@@ -156,7 +156,7 @@ static int small_pop_event(struct snd_soc_dapm_widget *w,
156 SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP); 156 SGTL5000_VAG_POWERUP, SGTL5000_VAG_POWERUP);
157 break; 157 break;
158 158
159 case SND_SOC_DAPM_PRE_PMD: 159 case SND_SOC_DAPM_POST_PMD:
160 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER, 160 snd_soc_update_bits(w->codec, SGTL5000_CHIP_ANA_POWER,
161 SGTL5000_VAG_POWERUP, 0); 161 SGTL5000_VAG_POWERUP, 0);
162 msleep(400); 162 msleep(400);
@@ -201,12 +201,8 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
201 mic_bias_event, 201 mic_bias_event,
202 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), 202 SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
203 203
204 SND_SOC_DAPM_PGA_E("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0, 204 SND_SOC_DAPM_PGA("HP", SGTL5000_CHIP_ANA_POWER, 4, 0, NULL, 0),
205 small_pop_event, 205 SND_SOC_DAPM_PGA("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0),
206 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
207 SND_SOC_DAPM_PGA_E("LO", SGTL5000_CHIP_ANA_POWER, 0, 0, NULL, 0,
208 small_pop_event,
209 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD),
210 206
211 SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux), 207 SND_SOC_DAPM_MUX("Capture Mux", SND_SOC_NOPM, 0, 0, &adc_mux),
212 SND_SOC_DAPM_MUX("Headphone Mux", SND_SOC_NOPM, 0, 0, &dac_mux), 208 SND_SOC_DAPM_MUX("Headphone Mux", SND_SOC_NOPM, 0, 0, &dac_mux),
@@ -221,8 +217,11 @@ static const struct snd_soc_dapm_widget sgtl5000_dapm_widgets[] = {
221 0, SGTL5000_CHIP_DIG_POWER, 217 0, SGTL5000_CHIP_DIG_POWER,
222 1, 0), 218 1, 0),
223 219
224 SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0), 220 SND_SOC_DAPM_SUPPLY("VAG_POWER", SGTL5000_CHIP_ANA_POWER, 7, 0,
221 power_vag_event,
222 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
225 223
224 SND_SOC_DAPM_ADC("ADC", "Capture", SGTL5000_CHIP_ANA_POWER, 1, 0),
226 SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0), 225 SND_SOC_DAPM_DAC("DAC", "Playback", SGTL5000_CHIP_ANA_POWER, 3, 0),
227}; 226};
228 227
@@ -231,9 +230,11 @@ static const struct snd_soc_dapm_route sgtl5000_dapm_routes[] = {
231 {"Capture Mux", "LINE_IN", "LINE_IN"}, /* line_in --> adc_mux */ 230 {"Capture Mux", "LINE_IN", "LINE_IN"}, /* line_in --> adc_mux */
232 {"Capture Mux", "MIC_IN", "MIC_IN"}, /* mic_in --> adc_mux */ 231 {"Capture Mux", "MIC_IN", "MIC_IN"}, /* mic_in --> adc_mux */
233 232
233 {"ADC", NULL, "VAG_POWER"},
234 {"ADC", NULL, "Capture Mux"}, /* adc_mux --> adc */ 234 {"ADC", NULL, "Capture Mux"}, /* adc_mux --> adc */
235 {"AIFOUT", NULL, "ADC"}, /* adc --> i2s_out */ 235 {"AIFOUT", NULL, "ADC"}, /* adc --> i2s_out */
236 236
237 {"DAC", NULL, "VAG_POWER"},
237 {"DAC", NULL, "AIFIN"}, /* i2s-->dac,skip audio mux */ 238 {"DAC", NULL, "AIFIN"}, /* i2s-->dac,skip audio mux */
238 {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */ 239 {"Headphone Mux", "DAC", "DAC"}, /* dac --> hp_mux */
239 {"LO", NULL, "DAC"}, /* dac --> line_out */ 240 {"LO", NULL, "DAC"}, /* dac --> line_out */
diff --git a/sound/soc/imx/imx-audmux.c b/sound/soc/imx/imx-audmux.c
index 1765a197acb0..f23700359c67 100644
--- a/sound/soc/imx/imx-audmux.c
+++ b/sound/soc/imx/imx-audmux.c
@@ -73,6 +73,9 @@ static ssize_t audmux_read_file(struct file *file, char __user *user_buf,
73 if (!buf) 73 if (!buf)
74 return -ENOMEM; 74 return -ENOMEM;
75 75
76 if (!audmux_base)
77 return -ENOSYS;
78
76 if (audmux_clk) 79 if (audmux_clk)
77 clk_prepare_enable(audmux_clk); 80 clk_prepare_enable(audmux_clk);
78 81
@@ -152,7 +155,7 @@ static void __init audmux_debugfs_init(void)
152 return; 155 return;
153 } 156 }
154 157
155 for (i = 1; i < 8; i++) { 158 for (i = 0; i < MX31_AUDMUX_PORT6_SSI_PINS_6 + 1; i++) {
156 snprintf(buf, sizeof(buf), "ssi%d", i); 159 snprintf(buf, sizeof(buf), "ssi%d", i);
157 if (!debugfs_create_file(buf, 0444, audmux_debugfs_root, 160 if (!debugfs_create_file(buf, 0444, audmux_debugfs_root,
158 (void *)i, &audmux_debugfs_fops)) 161 (void *)i, &audmux_debugfs_fops))
diff --git a/sound/soc/pxa/pxa2xx-i2s.c b/sound/soc/pxa/pxa2xx-i2s.c
index 609abd51e55f..d08583790d23 100644
--- a/sound/soc/pxa/pxa2xx-i2s.c
+++ b/sound/soc/pxa/pxa2xx-i2s.c
@@ -17,6 +17,7 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/io.h>
20#include <sound/core.h> 21#include <sound/core.h>
21#include <sound/pcm.h> 22#include <sound/pcm.h>
22#include <sound/initval.h> 23#include <sound/initval.h>
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index e19c24ade414..accdcb7d4d9d 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1081,6 +1081,8 @@ static int soc_probe_platform(struct snd_soc_card *card,
1081 snd_soc_dapm_new_controls(&platform->dapm, 1081 snd_soc_dapm_new_controls(&platform->dapm,
1082 driver->dapm_widgets, driver->num_dapm_widgets); 1082 driver->dapm_widgets, driver->num_dapm_widgets);
1083 1083
1084 platform->dapm.idle_bias_off = 1;
1085
1084 if (driver->probe) { 1086 if (driver->probe) {
1085 ret = driver->probe(platform); 1087 ret = driver->probe(platform);
1086 if (ret < 0) { 1088 if (ret < 0) {
diff --git a/sound/soc/tegra/tegra_i2s.c b/sound/soc/tegra/tegra_i2s.c
index 33509de52540..e53349912b2e 100644
--- a/sound/soc/tegra/tegra_i2s.c
+++ b/sound/soc/tegra/tegra_i2s.c
@@ -79,11 +79,15 @@ static int tegra_i2s_show(struct seq_file *s, void *unused)
79 struct tegra_i2s *i2s = s->private; 79 struct tegra_i2s *i2s = s->private;
80 int i; 80 int i;
81 81
82 clk_enable(i2s->clk_i2s);
83
82 for (i = 0; i < ARRAY_SIZE(regs); i++) { 84 for (i = 0; i < ARRAY_SIZE(regs); i++) {
83 u32 val = tegra_i2s_read(i2s, regs[i].offset); 85 u32 val = tegra_i2s_read(i2s, regs[i].offset);
84 seq_printf(s, "%s = %08x\n", regs[i].name, val); 86 seq_printf(s, "%s = %08x\n", regs[i].name, val);
85 } 87 }
86 88
89 clk_disable(i2s->clk_i2s);
90
87 return 0; 91 return 0;
88} 92}
89 93
@@ -112,7 +116,7 @@ static void tegra_i2s_debug_remove(struct tegra_i2s *i2s)
112 debugfs_remove(i2s->debug); 116 debugfs_remove(i2s->debug);
113} 117}
114#else 118#else
115static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s, int id) 119static inline void tegra_i2s_debug_add(struct tegra_i2s *i2s)
116{ 120{
117} 121}
118 122
diff --git a/sound/soc/tegra/tegra_spdif.c b/sound/soc/tegra/tegra_spdif.c
index 475428cf270e..9ff2c601445f 100644
--- a/sound/soc/tegra/tegra_spdif.c
+++ b/sound/soc/tegra/tegra_spdif.c
@@ -79,11 +79,15 @@ static int tegra_spdif_show(struct seq_file *s, void *unused)
79 struct tegra_spdif *spdif = s->private; 79 struct tegra_spdif *spdif = s->private;
80 int i; 80 int i;
81 81
82 clk_enable(spdif->clk_spdif_out);
83
82 for (i = 0; i < ARRAY_SIZE(regs); i++) { 84 for (i = 0; i < ARRAY_SIZE(regs); i++) {
83 u32 val = tegra_spdif_read(spdif, regs[i].offset); 85 u32 val = tegra_spdif_read(spdif, regs[i].offset);
84 seq_printf(s, "%s = %08x\n", regs[i].name, val); 86 seq_printf(s, "%s = %08x\n", regs[i].name, val);
85 } 87 }
86 88
89 clk_disable(spdif->clk_spdif_out);
90
87 return 0; 91 return 0;
88} 92}
89 93
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index fb8b5f83b4a0..1cad3af4bf4c 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -17,6 +17,7 @@
17#include "util/debug.h" 17#include "util/debug.h"
18 18
19#include <sys/prctl.h> 19#include <sys/prctl.h>
20#include <sys/resource.h>
20 21
21#include <semaphore.h> 22#include <semaphore.h>
22#include <pthread.h> 23#include <pthread.h>
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index e3c63aef8efc..8ef59f8262bb 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -42,6 +42,7 @@
42#include "util/debug.h" 42#include "util/debug.h"
43 43
44#include <assert.h> 44#include <assert.h>
45#include <elf.h>
45#include <fcntl.h> 46#include <fcntl.h>
46 47
47#include <stdio.h> 48#include <stdio.h>
@@ -59,6 +60,7 @@
59#include <sys/prctl.h> 60#include <sys/prctl.h>
60#include <sys/wait.h> 61#include <sys/wait.h>
61#include <sys/uio.h> 62#include <sys/uio.h>
63#include <sys/utsname.h>
62#include <sys/mman.h> 64#include <sys/mman.h>
63 65
64#include <linux/unistd.h> 66#include <linux/unistd.h>
@@ -162,12 +164,40 @@ static void __zero_source_counters(struct hist_entry *he)
162 symbol__annotate_zero_histograms(sym); 164 symbol__annotate_zero_histograms(sym);
163} 165}
164 166
167static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
168{
169 struct utsname uts;
170 int err = uname(&uts);
171
172 ui__warning("Out of bounds address found:\n\n"
173 "Addr: %" PRIx64 "\n"
174 "DSO: %s %c\n"
175 "Map: %" PRIx64 "-%" PRIx64 "\n"
176 "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
177 "Arch: %s\n"
178 "Kernel: %s\n"
179 "Tools: %s\n\n"
180 "Not all samples will be on the annotation output.\n\n"
181 "Please report to linux-kernel@vger.kernel.org\n",
182 ip, map->dso->long_name, dso__symtab_origin(map->dso),
183 map->start, map->end, sym->start, sym->end,
184 sym->binding == STB_GLOBAL ? 'g' :
185 sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
186 err ? "[unknown]" : uts.machine,
187 err ? "[unknown]" : uts.release, perf_version_string);
188 if (use_browser <= 0)
189 sleep(5);
190
191 map->erange_warned = true;
192}
193
165static void perf_top__record_precise_ip(struct perf_top *top, 194static void perf_top__record_precise_ip(struct perf_top *top,
166 struct hist_entry *he, 195 struct hist_entry *he,
167 int counter, u64 ip) 196 int counter, u64 ip)
168{ 197{
169 struct annotation *notes; 198 struct annotation *notes;
170 struct symbol *sym; 199 struct symbol *sym;
200 int err;
171 201
172 if (he == NULL || he->ms.sym == NULL || 202 if (he == NULL || he->ms.sym == NULL ||
173 ((top->sym_filter_entry == NULL || 203 ((top->sym_filter_entry == NULL ||
@@ -189,9 +219,12 @@ static void perf_top__record_precise_ip(struct perf_top *top,
189 } 219 }
190 220
191 ip = he->ms.map->map_ip(he->ms.map, ip); 221 ip = he->ms.map->map_ip(he->ms.map, ip);
192 symbol__inc_addr_samples(sym, he->ms.map, counter, ip); 222 err = symbol__inc_addr_samples(sym, he->ms.map, counter, ip);
193 223
194 pthread_mutex_unlock(&notes->lock); 224 pthread_mutex_unlock(&notes->lock);
225
226 if (err == -ERANGE && !he->ms.map->erange_warned)
227 ui__warn_map_erange(he->ms.map, sym, ip);
195} 228}
196 229
197static void perf_top__show_details(struct perf_top *top) 230static void perf_top__show_details(struct perf_top *top)
@@ -615,6 +648,7 @@ process_hotkey:
615 648
616/* Tag samples to be skipped. */ 649/* Tag samples to be skipped. */
617static const char *skip_symbols[] = { 650static const char *skip_symbols[] = {
651 "intel_idle",
618 "default_idle", 652 "default_idle",
619 "native_safe_halt", 653 "native_safe_halt",
620 "cpu_idle", 654 "cpu_idle",
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 199f69ec656f..08c6d138a655 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -64,8 +64,8 @@ int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
64 64
65 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr)); 65 pr_debug3("%s: addr=%#" PRIx64 "\n", __func__, map->unmap_ip(map, addr));
66 66
67 if (addr > sym->end) 67 if (addr < sym->start || addr > sym->end)
68 return 0; 68 return -ERANGE;
69 69
70 offset = addr - sym->start; 70 offset = addr - sym->start;
71 h = annotation__histogram(notes, evidx); 71 h = annotation__histogram(notes, evidx);
@@ -561,16 +561,12 @@ void symbol__annotate_decay_histogram(struct symbol *sym, int evidx)
561{ 561{
562 struct annotation *notes = symbol__annotation(sym); 562 struct annotation *notes = symbol__annotation(sym);
563 struct sym_hist *h = annotation__histogram(notes, evidx); 563 struct sym_hist *h = annotation__histogram(notes, evidx);
564 struct objdump_line *pos; 564 int len = sym->end - sym->start, offset;
565 int len = sym->end - sym->start;
566 565
567 h->sum = 0; 566 h->sum = 0;
568 567 for (offset = 0; offset < len; ++offset) {
569 list_for_each_entry(pos, &notes->src->source, node) { 568 h->addr[offset] = h->addr[offset] * 7 / 8;
570 if (pos->offset != -1 && pos->offset < len) { 569 h->sum += h->addr[offset];
571 h->addr[pos->offset] = h->addr[pos->offset] * 7 / 8;
572 h->sum += h->addr[pos->offset];
573 }
574 } 570 }
575} 571}
576 572
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 2ec4b60aff6c..9f6d630d5316 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -256,6 +256,18 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
256 if (!cmp) { 256 if (!cmp) {
257 he->period += period; 257 he->period += period;
258 ++he->nr_events; 258 ++he->nr_events;
259
260 /* If the map of an existing hist_entry has
261 * become out-of-date due to an exec() or
262 * similar, update it. Otherwise we will
263 * mis-adjust symbol addresses when computing
264 * the history counter to increment.
265 */
266 if (he->ms.map != entry->ms.map) {
267 he->ms.map = entry->ms.map;
268 if (he->ms.map)
269 he->ms.map->referenced = true;
270 }
259 goto out; 271 goto out;
260 } 272 }
261 273
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index dea6d1c1a954..35ae56864e4f 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -38,6 +38,7 @@ void map__init(struct map *self, enum map_type type,
38 RB_CLEAR_NODE(&self->rb_node); 38 RB_CLEAR_NODE(&self->rb_node);
39 self->groups = NULL; 39 self->groups = NULL;
40 self->referenced = false; 40 self->referenced = false;
41 self->erange_warned = false;
41} 42}
42 43
43struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, 44struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index b100c20b7f94..81371bad4ef0 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -33,6 +33,7 @@ struct map {
33 u64 end; 33 u64 end;
34 u8 /* enum map_type */ type; 34 u8 /* enum map_type */ type;
35 bool referenced; 35 bool referenced;
36 bool erange_warned;
36 u32 priv; 37 u32 priv;
37 u64 pgoff; 38 u64 pgoff;
38 39
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 9412e3b05f68..00923cda4d9c 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -826,8 +826,16 @@ static struct machine *
826{ 826{
827 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK; 827 const u8 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
828 828
829 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) 829 if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
830 return perf_session__find_machine(session, event->ip.pid); 830 u32 pid;
831
832 if (event->header.type == PERF_RECORD_MMAP)
833 pid = event->mmap.pid;
834 else
835 pid = event->ip.pid;
836
837 return perf_session__find_machine(session, pid);
838 }
831 839
832 return perf_session__find_host_machine(session); 840 return perf_session__find_host_machine(session);
833} 841}
diff --git a/tools/perf/util/ui/browsers/hists.c b/tools/perf/util/ui/browsers/hists.c
index d7a1c4afe28b..2f83e5dc9967 100644
--- a/tools/perf/util/ui/browsers/hists.c
+++ b/tools/perf/util/ui/browsers/hists.c
@@ -125,6 +125,9 @@ static int callchain__count_rows(struct rb_root *chain)
125 125
126static bool map_symbol__toggle_fold(struct map_symbol *self) 126static bool map_symbol__toggle_fold(struct map_symbol *self)
127{ 127{
128 if (!self)
129 return false;
130
128 if (!self->has_children) 131 if (!self->has_children)
129 return false; 132 return false;
130 133