aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-12-21 18:48:54 -0500
committerDave Airlie <airlied@redhat.com>2010-12-21 18:48:54 -0500
commitae09f09e94d755ed45c58b695675636c0ec53f9e (patch)
tree77cb9bac7d81f5b1250b8638a007e10c17b600af
parent1d99e5c57255d188773fb437391df24fe8faf575 (diff)
parent5909a77ac62cc042f94bd262016cf468a2f96022 (diff)
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (771 commits) drm/i915: Undo "Uncouple render/power ctx before suspending" drm/i915: Allow the application to choose the constant addressing mode drm/i915: dynamic render p-state support for Sandy Bridge drm/i915: Enable EI mode for RCx decision making on Sandybridge drm/i915/sdvo: Border and stall select became test bits in gen5 drm/i915: Add Guess-o-matic for pageflip timestamping. drm/i915: Add support for precise vblank timestamping (v2) drm/i915: Add frame buffer compression on Sandybridge drm/i915: Add self-refresh support on Sandybridge drm/i915: Wait for vblank before unpinning old fb Revert "drm/i915: Avoid using PIPE_CONTROL on Ironlake" drm/i915: Pass clock limits down to PLL matcher drm/i915: Poll for seqno completion if IRQ is disabled drm/i915/ringbuffer: Make IRQ refcnting atomic agp/intel: Fix missed cached memory flags setting in i965_write_entry() drm/i915/sdvo: Only use the SDVO pin if it is in the valid range drm/i915: Enable RC6 autodownclocking on Sandybridge drm/i915: Terminate the FORCE WAKE after we have finished reading drm/i915/gtt: Clear the cachelines upon resume drm/i915: Restore GTT mapping first upon resume ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-rbd83
-rw-r--r--Documentation/ABI/testing/sysfs-platform-asus-laptop16
-rw-r--r--Documentation/ABI/testing/sysfs-platform-eeepc-wmi10
-rw-r--r--Documentation/DocBook/sh.tmpl4
-rw-r--r--Documentation/driver-model/interface.txt129
-rw-r--r--Documentation/edac.txt8
-rw-r--r--Documentation/fb/00-INDEX32
-rw-r--r--Documentation/filesystems/vfs.txt9
-rw-r--r--Documentation/kernel-parameters.txt5
-rw-r--r--Documentation/networking/ip-sysctl.txt1
-rw-r--r--Documentation/sh/clk.txt32
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/Makefile5
-rw-r--r--arch/arm/boot/bootp/init.S2
-rw-r--r--arch/arm/boot/compressed/head.S15
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in2
-rw-r--r--arch/arm/common/gic.c40
-rw-r--r--arch/arm/configs/at91rm9200_defconfig341
-rw-r--r--arch/arm/configs/at91rm9200dk_defconfig72
-rw-r--r--arch/arm/configs/at91rm9200ek_defconfig73
-rw-r--r--arch/arm/configs/ateb9200_defconfig131
-rw-r--r--arch/arm/configs/carmeva_defconfig47
-rw-r--r--arch/arm/configs/cpuat91_defconfig112
-rw-r--r--arch/arm/configs/csb337_defconfig104
-rw-r--r--arch/arm/configs/csb637_defconfig98
-rw-r--r--arch/arm/configs/ecbat91_defconfig99
-rw-r--r--arch/arm/configs/kafa_defconfig61
-rw-r--r--arch/arm/configs/kb9202_defconfig127
-rw-r--r--arch/arm/configs/onearm_defconfig80
-rw-r--r--arch/arm/configs/picotux200_defconfig242
-rw-r--r--arch/arm/configs/yl9200_defconfig137
-rw-r--r--arch/arm/include/asm/assembler.h2
-rw-r--r--arch/arm/include/asm/mmu.h4
-rw-r--r--arch/arm/include/asm/pgtable.h3
-rw-r--r--arch/arm/kernel/entry-armv.S2
-rw-r--r--arch/arm/kernel/head.S7
-rw-r--r--arch/arm/kernel/relocate_kernel.S2
-rw-r--r--arch/arm/lib/findbit.S6
-rw-r--r--arch/arm/mach-aaec2000/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-at91/Makefile4
-rw-r--r--arch/arm/mach-at91/at91rm9200_devices.c45
-rw-r--r--arch/arm/mach-at91/board-1arm.c26
-rw-r--r--arch/arm/mach-at91/board-kafa.c21
-rw-r--r--arch/arm/mach-at91/board-picotux200.c53
-rw-r--r--arch/arm/mach-at91/board-rm9200dk.c (renamed from arch/arm/mach-at91/board-dk.c)4
-rw-r--r--arch/arm/mach-at91/board-rm9200ek.c (renamed from arch/arm/mach-at91/board-ek.c)4
-rw-r--r--arch/arm/mach-at91/board-yl-9200.c2
-rw-r--r--arch/arm/mach-at91/include/mach/board.h6
-rw-r--r--arch/arm/mach-bcmring/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-clps711x/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-cns3xxx/pcie.c2
-rw-r--r--arch/arm/mach-davinci/dm355.c6
-rw-r--r--arch/arm/mach-davinci/dm365.c6
-rw-r--r--arch/arm/mach-davinci/dm644x.c4
-rw-r--r--arch/arm/mach-ebsa110/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-footbridge/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-h720x/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-imx/eukrea_mbimx27-baseboard.c6
-rw-r--r--arch/arm/mach-integrator/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-msm/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-mx25/devices-imx25.h4
-rw-r--r--arch/arm/mach-mx3/mach-pcm037_eet.c5
-rw-r--r--arch/arm/mach-netx/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap1/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap2/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-omap2/pm24xx.c7
-rw-r--r--arch/arm/mach-omap2/pm34xx.c10
-rw-r--r--arch/arm/mach-omap2/serial.c7
-rw-r--r--arch/arm/mach-pnx4008/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-realview/headsmp.S1
-rw-r--r--arch/arm/mach-rpc/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-s3c2410/h1940-bluetooth.c8
-rw-r--r--arch/arm/mach-s3c2412/Kconfig2
-rw-r--r--arch/arm/mach-s3c2416/Kconfig3
-rw-r--r--arch/arm/mach-s3c2416/irq.c5
-rw-r--r--arch/arm/mach-s3c2440/Kconfig4
-rw-r--r--arch/arm/mach-s3c2440/s3c2440.c11
-rw-r--r--arch/arm/mach-s3c2440/s3c2442.c14
-rw-r--r--arch/arm/mach-s3c2443/Kconfig1
-rw-r--r--arch/arm/mach-s3c2443/irq.c5
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c2
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c2
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkc110.c1
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c1
-rw-r--r--arch/arm/mach-shark/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c147
-rw-r--r--arch/arm/mach-shmobile/clock-sh7372.c39
-rw-r--r--arch/arm/mach-tegra/include/mach/debug-macro.S4
-rw-r--r--arch/arm/mach-ux500/cpu.c6
-rw-r--r--arch/arm/mach-versatile/include/mach/vmalloc.h2
-rw-r--r--arch/arm/mach-vexpress/headsmp.S1
-rw-r--r--arch/arm/mm/ioremap.c4
-rw-r--r--arch/arm/mm/proc-v7.S4
-rw-r--r--arch/arm/plat-iop/time.c3
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-dma.c8
-rw-r--r--arch/arm/plat-mxc/devices/platform-spi_imx.c1
-rw-r--r--arch/arm/plat-nomadik/timer.c89
-rw-r--r--arch/arm/plat-pxa/include/plat/sdhci.h3
-rw-r--r--arch/arm/plat-s3c24xx/cpu.c8
-rw-r--r--arch/arm/plat-s3c24xx/gpiolib.c2
-rw-r--r--arch/arm/plat-s3c24xx/include/plat/s3c244x.h7
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c6
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c6
-rw-r--r--arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c6
-rw-r--r--arch/arm/plat-samsung/gpio-config.c47
-rw-r--r--arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h11
-rw-r--r--arch/arm/vfp/vfphw.S1
-rw-r--r--arch/mn10300/include/asm/syscall.h117
-rw-r--r--arch/mn10300/kernel/gdb-io-serial.c3
-rw-r--r--arch/mn10300/kernel/gdb-io-ttysm.c3
-rw-r--r--arch/mn10300/kernel/gdb-stub.c3
-rw-r--r--arch/parisc/kernel/irq.c7
-rw-r--r--arch/parisc/kernel/signal.c9
-rw-r--r--arch/powerpc/mm/pgtable.c2
-rw-r--r--arch/s390/kernel/nmi.c10
-rw-r--r--arch/s390/kernel/vtime.c19
-rw-r--r--arch/s390/lib/delay.c14
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c34
-rw-r--r--arch/sh/boards/mach-se/7724/setup.c47
-rw-r--r--arch/sh/include/asm/cacheflush.h2
-rw-r--r--arch/sh/include/asm/processor_32.h7
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h3
-rw-r--r--arch/sh/kernel/cpu/sh4/clock-sh4-202.c2
-rw-r--r--arch/sh/kernel/cpu/sh4a/clock-sh7724.c44
-rw-r--r--arch/sh/kernel/sys_sh.c2
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall-trapa.S2
-rw-r--r--arch/sh/mm/cache-sh4.c4
-rw-r--r--arch/sh/mm/cache-sh7705.c2
-rw-r--r--arch/sh/mm/cache.c14
-rw-r--r--arch/sh/mm/kmap.c2
-rw-r--r--arch/tile/Kconfig12
-rw-r--r--arch/tile/include/asm/cacheflush.h52
-rw-r--r--arch/tile/include/asm/io.h15
-rw-r--r--arch/tile/include/asm/pci-bridge.h117
-rw-r--r--arch/tile/include/asm/pci.h107
-rw-r--r--arch/tile/include/asm/processor.h10
-rw-r--r--arch/tile/include/hv/drv_xgbe_impl.h300
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h615
-rw-r--r--arch/tile/include/hv/netio_errors.h122
-rw-r--r--arch/tile/include/hv/netio_intf.h2975
-rw-r--r--arch/tile/kernel/Makefile1
-rw-r--r--arch/tile/kernel/pci.c621
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/tile/lib/memchr_32.c35
-rw-r--r--arch/tile/lib/spinlock_32.c29
-rw-r--r--arch/um/drivers/line.c5
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/fixmap.h4
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/paravirt.h10
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/uv/uv_hub.h4
-rw-r--r--arch/x86/include/asm/uv/uv_mmrs.h19
-rw-r--r--arch/x86/include/asm/xen/interface.h6
-rw-r--r--arch/x86/include/asm/xen/interface_32.h5
-rw-r--r--arch/x86/include/asm/xen/interface_64.h13
-rw-r--r--arch/x86/include/asm/xen/page.h7
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c7
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c25
-rw-r--r--arch/x86/kernel/cpu/perf_event.c20
-rw-r--r--arch/x86/kernel/entry_32.S2
-rw-r--r--arch/x86/kernel/entry_64.S2
-rw-r--r--arch/x86/kernel/hw_breakpoint.c4
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c64
-rw-r--r--arch/x86/kernel/pvclock.c5
-rw-r--r--arch/x86/mm/tlb.c5
-rw-r--r--arch/x86/pci/xen.c27
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/platform/uv/uv_time.c4
-rw-r--r--arch/x86/xen/enlighten.c25
-rw-r--r--arch/x86/xen/mmu.c88
-rw-r--r--arch/x86/xen/platform-pci-unplug.c2
-rw-r--r--arch/x86/xen/setup.c53
-rw-r--r--arch/x86/xen/suspend.c1
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/x86/xen/xen-ops.h2
-rw-r--r--block/blk-throttle.c2
-rw-r--r--drivers/block/amiflop.c2
-rw-r--r--drivers/block/ataflop.c2
-rw-r--r--drivers/block/cciss.c3
-rw-r--r--drivers/block/rbd.c748
-rw-r--r--drivers/block/xen-blkfront.c55
-rw-r--r--drivers/char/agp/agp.h1
-rw-r--r--drivers/char/agp/compat_ioctl.c1
-rw-r--r--drivers/char/agp/compat_ioctl.h1
-rw-r--r--drivers/char/agp/frontend.c8
-rw-r--r--drivers/char/agp/generic.c27
-rw-r--r--drivers/char/agp/intel-agp.c5
-rw-r--r--drivers/char/agp/intel-agp.h14
-rw-r--r--drivers/char/agp/intel-gtt.c795
-rw-r--r--drivers/char/tpm/tpm_tis.c24
-rw-r--r--drivers/char/virtio_console.c37
-rw-r--r--drivers/dma/shdma.c1
-rw-r--r--drivers/edac/Makefile8
-rw-r--r--drivers/edac/mce_amd_inj.c2
-rw-r--r--drivers/firewire/net.c160
-rw-r--r--drivers/gpio/cs5535-gpio.c16
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c16
-rw-r--r--drivers/gpu/drm/drm_irq.c19
-rw-r--r--drivers/gpu/drm/drm_mm.c40
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c471
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c792
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h605
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c1343
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c139
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c724
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h194
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c144
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h91
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1082
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c207
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h24
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c25
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c160
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c116
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1056
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h141
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c100
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-egalax.c2
-rw-r--r--drivers/hid/hid-input.c21
-rw-r--r--drivers/hid/hid-tmff.c2
-rw-r--r--drivers/hwmon/i5k_amb.c2
-rw-r--r--drivers/hwmon/lis3lv02d_i2c.c10
-rw-r--r--drivers/i2c/Kconfig3
-rw-r--r--drivers/i2c/algos/Kconfig14
-rw-r--r--drivers/infiniband/core/ud_header.c30
-rw-r--r--drivers/infiniband/core/uverbs_marshall.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c4
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c10
-rw-r--r--drivers/input/joystick/turbografx.c1
-rw-r--r--drivers/input/keyboard/Kconfig16
-rw-r--r--drivers/input/keyboard/Makefile1
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c261
-rw-r--r--drivers/input/mouse/synaptics.h3
-rw-r--r--drivers/input/serio/gscps2.c2
-rw-r--r--drivers/input/tablet/wacom_wac.c9
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c1
-rw-r--r--drivers/isdn/icn/icn.c7
-rw-r--r--drivers/leds/Kconfig50
-rw-r--r--drivers/leds/leds-lp5521.c50
-rw-r--r--drivers/leds/leds-lp5523.c44
-rw-r--r--drivers/leds/leds-ss4200.c1
-rw-r--r--drivers/macintosh/Kconfig1
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/raid1.c1
-rw-r--r--drivers/media/common/tuners/Kconfig8
-rw-r--r--drivers/media/dvb/frontends/Kconfig5
-rw-r--r--drivers/media/radio/radio-si4713.c2
-rw-r--r--drivers/media/video/Kconfig2
-rw-r--r--drivers/media/video/au0828/au0828-cards.c4
-rw-r--r--drivers/media/video/bt8xx/bttv-cards.c22
-rw-r--r--drivers/media/video/cafe_ccic.c3
-rw-r--r--drivers/media/video/cx18/cx18-i2c.c8
-rw-r--r--drivers/media/video/cx231xx/cx231xx-cards.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/video/cx23885/cx23885-video.c4
-rw-r--r--drivers/media/video/cx88/cx88-cards.c9
-rw-r--r--drivers/media/video/cx88/cx88-video.c7
-rw-r--r--drivers/media/video/davinci/vpfe_capture.c1
-rw-r--r--drivers/media/video/davinci/vpif_capture.c1
-rw-r--r--drivers/media/video/davinci/vpif_display.c2
-rw-r--r--drivers/media/video/em28xx/em28xx-cards.c18
-rw-r--r--drivers/media/video/fsl-viu.c2
-rw-r--r--drivers/media/video/ivtv/ivtv-i2c.c22
-rw-r--r--drivers/media/video/mxb.c12
-rw-r--r--drivers/media/video/pvrusb2/pvrusb2-hdw.c6
-rw-r--r--drivers/media/video/s5p-fimc/fimc-capture.c2
-rw-r--r--drivers/media/video/saa7134/saa7134-cards.c8
-rw-r--r--drivers/media/video/saa7134/saa7134-core.c4
-rw-r--r--drivers/media/video/sh_vou.c2
-rw-r--r--drivers/media/video/soc_camera.c2
-rw-r--r--drivers/media/video/usbvision/usbvision-i2c.c6
-rw-r--r--drivers/media/video/v4l2-common.c15
-rw-r--r--drivers/media/video/via-camera.c2
-rw-r--r--drivers/media/video/vino.c4
-rw-r--r--drivers/media/video/zoran/zoran_card.c5
-rw-r--r--drivers/misc/isl29020.c4
-rw-r--r--drivers/misc/sgi-xp/xpc_partition.c25
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/mmc.c8
-rw-r--r--drivers/mmc/core/sdio.c51
-rw-r--r--drivers/mmc/core/sdio_bus.c33
-rw-r--r--drivers/mmc/host/omap_hsmmc.c2
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c12
-rw-r--r--drivers/mmc/host/sdhci-pci.c31
-rw-r--r--drivers/mmc/host/sdhci-pxa.c4
-rw-r--r--drivers/mmc/host/sdhci.c54
-rw-r--r--drivers/mmc/host/sdhci.h9
-rw-r--r--drivers/mmc/host/ushc.c30
-rw-r--r--drivers/mtd/ubi/io.c37
-rw-r--r--drivers/mtd/ubi/scan.c20
-rw-r--r--drivers/net/Kconfig18
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/atl1c/atl1c_hw.c2
-rw-r--r--drivers/net/au1000_eth.c10
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c73
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c94
-rw-r--r--drivers/net/e1000/e1000_main.c12
-rw-r--r--drivers/net/ehea/ehea_main.c18
-rw-r--r--drivers/net/irda/sh_sir.c2
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c6
-rw-r--r--drivers/net/pch_gbe/pch_gbe_param.c8
-rw-r--r--drivers/net/phy/marvell.c164
-rw-r--r--drivers/net/ppp_generic.c43
-rw-r--r--drivers/net/qlge/qlge_main.c6
-rw-r--r--drivers/net/tile/Makefile10
-rw-r--r--drivers/net/tile/tilepro.c2406
-rw-r--r--drivers/net/ucc_geth.h3
-rw-r--r--drivers/net/usb/hso.c10
-rw-r--r--drivers/net/wan/x25_asy.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c2
-rw-r--r--drivers/net/wireless/b43/sdio.c1
-rw-r--r--drivers/parisc/dino.c1
-rw-r--r--drivers/parisc/eisa.c3
-rw-r--r--drivers/parisc/gsc.c3
-rw-r--r--drivers/parisc/iosapic.c8
-rw-r--r--drivers/parisc/led.c17
-rw-r--r--drivers/parisc/superio.c3
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/quirks.c18
-rw-r--r--drivers/pcmcia/soc_common.c1
-rw-r--r--drivers/platform/x86/asus-laptop.c97
-rw-r--r--drivers/platform/x86/eeepc-wmi.c4
-rw-r--r--drivers/platform/x86/hp-wmi.c2
-rw-r--r--drivers/platform/x86/ibm_rtl.c34
-rw-r--r--drivers/platform/x86/msi-wmi.c16
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c1
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/platform/x86/wmi.c2
-rw-r--r--drivers/regulator/core.c30
-rw-r--r--drivers/regulator/mc13783-regulator.c4
-rw-r--r--drivers/regulator/twl-regulator.c6
-rw-r--r--drivers/s390/cio/css.c2
-rw-r--r--drivers/s390/cio/qdio_thinint.c2
-rw-r--r--drivers/scsi/arm/fas216.h2
-rw-r--r--drivers/serial/8250.c2
-rw-r--r--drivers/serial/mfd.c24
-rw-r--r--drivers/sh/clk/core.c16
-rw-r--r--drivers/sh/clk/cpg.c7
-rw-r--r--drivers/spi/atmel_spi.c6
-rw-r--r--drivers/ssb/b43_pci_bridge.c1
-rw-r--r--drivers/staging/asus_oled/asus_oled.c8
-rw-r--r--drivers/staging/batman-adv/hard-interface.c19
-rw-r--r--drivers/staging/batman-adv/soft-interface.c14
-rw-r--r--drivers/staging/brcm80211/README8
-rw-r--r--drivers/staging/brcm80211/TODO2
-rw-r--r--drivers/staging/comedi/drivers/usbdux.c4
-rw-r--r--drivers/staging/easycap/easycap.h1
-rw-r--r--drivers/staging/frontier/tranzport.c2
-rw-r--r--drivers/staging/go7007/go7007-driver.c2
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c2
-rw-r--r--drivers/staging/intel_sst/intel_sst_stream_encoded.c2
-rw-r--r--drivers/staging/line6/control.c204
-rw-r--r--drivers/staging/line6/midi.c4
-rw-r--r--drivers/staging/line6/pcm.c4
-rw-r--r--drivers/staging/line6/pod.c32
-rw-r--r--drivers/staging/line6/toneport.c4
-rw-r--r--drivers/staging/line6/variax.c12
-rw-r--r--drivers/staging/quickstart/quickstart.c10
-rw-r--r--drivers/staging/rt2860/usb_main_dev.c1
-rw-r--r--drivers/staging/rtl8187se/r8185b_init.c30
-rw-r--r--drivers/staging/rtl8712/usb_halinit.c2
-rw-r--r--drivers/staging/samsung-laptop/samsung-laptop.c2
-rw-r--r--drivers/staging/speakup/fakekey.c1
-rw-r--r--drivers/staging/spectra/ffsport.c2
-rw-r--r--drivers/staging/tm6000/tm6000-cards.c4
-rw-r--r--drivers/staging/udlfb/udlfb.c2
-rw-r--r--drivers/staging/winbond/sysdef.h3
-rw-r--r--drivers/staging/zram/zram_sysfs.c4
-rw-r--r--drivers/tty/tty_io.c13
-rw-r--r--drivers/tty/tty_ldisc.c2
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/usb/atm/ueagle-atm.c7
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2
-rw-r--r--drivers/usb/host/ehci-dbg.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c10
-rw-r--r--drivers/usb/host/ehci-mem.c26
-rw-r--r--drivers/usb/host/ehci-pci.c25
-rw-r--r--drivers/usb/host/ehci-sched.c21
-rw-r--r--drivers/usb/host/ehci.h2
-rw-r--r--drivers/usb/host/isp1362-hcd.c2
-rw-r--r--drivers/usb/host/xhci-hub.c7
-rw-r--r--drivers/usb/host/xhci-mem.c168
-rw-r--r--drivers/usb/host/xhci-ring.c1
-rw-r--r--drivers/usb/host/xhci.c91
-rw-r--r--drivers/usb/host/xhci.h31
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c6
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c10
-rw-r--r--drivers/usb/misc/yurex.c1
-rw-r--r--drivers/usb/musb/musb_core.c3
-rw-r--r--drivers/usb/musb/musb_gadget.c124
-rw-r--r--drivers/usb/otg/langwell_otg.c9
-rw-r--r--drivers/usb/serial/ftdi_sio.c2
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h4
-rw-r--r--drivers/usb/serial/usb-serial.c3
-rw-r--r--drivers/usb/storage/sierra_ms.c2
-rw-r--r--drivers/vhost/net.c5
-rw-r--r--drivers/video/backlight/backlight.c12
-rw-r--r--drivers/video/da8xx-fb.c14
-rw-r--r--drivers/video/fbcmap.c69
-rw-r--r--drivers/video/geode/lxfb.h4
-rw-r--r--drivers/video/geode/lxfb_ops.c24
-rw-r--r--drivers/video/mx3fb.c4
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/sis/init.c685
-rw-r--r--drivers/video/sis/init.h63
-rw-r--r--drivers/video/sis/init301.c467
-rw-r--r--drivers/video/sis/init301.h43
-rw-r--r--drivers/video/sis/initextlfb.c7
-rw-r--r--drivers/video/sis/osdef.h133
-rw-r--r--drivers/video/sis/sis.h1
-rw-r--r--drivers/video/sis/sis_main.c30
-rw-r--r--drivers/video/sis/vgatypes.h11
-rw-r--r--drivers/video/sis/vstruct.h12
-rw-r--r--drivers/virtio/virtio.c6
-rw-r--r--drivers/virtio/virtio_ring.c3
-rw-r--r--drivers/watchdog/Kconfig3
-rw-r--r--drivers/watchdog/bcm63xx_wdt.c42
-rw-r--r--drivers/watchdog/gef_wdt.c1
-rw-r--r--drivers/watchdog/iTCO_wdt.c8
-rw-r--r--drivers/xen/Makefile5
-rw-r--r--drivers/xen/balloon.c42
-rw-r--r--drivers/xen/events.c122
-rw-r--r--drivers/xen/evtchn.c100
-rw-r--r--drivers/xen/manage.c1
-rw-r--r--drivers/xen/xenfs/privcmd.c13
-rw-r--r--drivers/xen/xenfs/super.c46
-rw-r--r--fs/autofs4/root.c12
-rw-r--r--fs/btrfs/compression.c15
-rw-r--r--fs/btrfs/ctree.h6
-rw-r--r--fs/btrfs/disk-io.c38
-rw-r--r--fs/btrfs/export.c76
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_io.c77
-rw-r--r--fs/btrfs/extent_io.h3
-rw-r--r--fs/btrfs/file.c7
-rw-r--r--fs/btrfs/inode.c294
-rw-r--r--fs/btrfs/ioctl.c31
-rw-r--r--fs/btrfs/ordered-data.c67
-rw-r--r--fs/btrfs/ordered-data.h3
-rw-r--r--fs/btrfs/super.c41
-rw-r--r--fs/btrfs/transaction.c5
-rw-r--r--fs/btrfs/tree-log.c21
-rw-r--r--fs/cifs/Kconfig8
-rw-r--r--fs/cifs/cifsacl.c48
-rw-r--r--fs/cifs/cifsfs.c2
-rw-r--r--fs/cifs/cifsproto.h6
-rw-r--r--fs/cifs/connect.c5
-rw-r--r--fs/cifs/dns_resolve.c2
-rw-r--r--fs/cifs/file.c4
-rw-r--r--fs/cifs/fscache.c12
-rw-r--r--fs/cifs/inode.c36
-rw-r--r--fs/cifs/readdir.c29
-rw-r--r--fs/cifs/xattr.c55
-rw-r--r--fs/compat.c28
-rw-r--r--fs/exec.c36
-rw-r--r--fs/fuse/file.c10
-rw-r--r--fs/gfs2/quota.c15
-rw-r--r--fs/ioprio.c31
-rw-r--r--fs/nfs/dir.c68
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/internal.h9
-rw-r--r--fs/nfs/nfs2xdr.c4
-rw-r--r--fs/nfs/nfs3xdr.c4
-rw-r--r--fs/nfs/nfs4xdr.c6
-rw-r--r--fs/nilfs2/dat.c2
-rw-r--r--fs/nilfs2/ioctl.c4
-rw-r--r--fs/ocfs2/cluster/heartbeat.c14
-rw-r--r--fs/ocfs2/dcache.c1
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/ocfs2.h6
-rw-r--r--fs/ocfs2/stack_user.c2
-rw-r--r--fs/pipe.c14
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/reiserfs/ioctl.c7
-rw-r--r--fs/reiserfs/xattr_acl.c6
-rw-r--r--fs/splice.c24
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c94
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c35
-rw-r--r--fs/xfs/xfs_bmap.c85
-rw-r--r--fs/xfs/xfs_bmap.h5
-rw-r--r--fs/xfs/xfs_dfrag.c13
-rw-r--r--fs/xfs/xfs_error.c3
-rw-r--r--fs/xfs/xfs_error.h5
-rw-r--r--fs/xfs/xfs_inode_item.c31
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_mm.h7
-rw-r--r--include/drm/i915_drm.h14
-rw-r--r--include/drm/intel-gtt.h35
-rw-r--r--include/linux/agp_backend.h2
-rw-r--r--include/linux/binfmts.h5
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/dmar.h17
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/fs.h6
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/hw_breakpoint.h4
-rw-r--r--include/linux/input.h25
-rw-r--r--include/linux/intel-gtt.h20
-rw-r--r--include/linux/marvell_phy.h1
-rw-r--r--include/linux/memory_hotplug.h6
-rw-r--r--include/linux/mfd/wm8350/audio.h3
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/nfs_xdr.h1
-rw-r--r--include/linux/node.h5
-rw-r--r--include/linux/page_cgroup.h7
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/perf_event.h30
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/sh_clk.h34
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/usb.h4
-rw-r--r--include/linux/vmalloc.h2
-rw-r--r--include/media/v4l2-common.h16
-rw-r--r--include/net/af_unix.h2
-rw-r--r--include/sound/sh_fsi.h6
-rw-r--r--include/video/da8xx-fb.h1
-rw-r--r--include/xen/events.h7
-rw-r--r--include/xen/interface/memory.h13
-rw-r--r--include/xen/interface/physdev.h10
-rw-r--r--include/xen/page.h7
-rw-r--r--include/xen/privcmd.h5
-rw-r--r--init/Kconfig13
-rw-r--r--kernel/exit.c9
-rw-r--r--kernel/hw_breakpoint.c3
-rw-r--r--kernel/irq/proc.c2
-rw-r--r--kernel/irq_work.c4
-rw-r--r--kernel/module.c12
-rw-r--r--kernel/perf_event.c93
-rw-r--r--kernel/posix-cpu-timers.c12
-rw-r--r--kernel/power/hibernate.c22
-rw-r--r--kernel/power/suspend.c5
-rw-r--r--kernel/power/swap.c53
-rw-r--r--kernel/power/user.c2
-rw-r--r--kernel/printk.c4
-rw-r--r--kernel/sched_fair.c8
-rw-r--r--kernel/trace/trace.c19
-rw-r--r--lib/debug_locks.c2
-rw-r--r--mm/hugetlb.c3
-rw-r--r--mm/ksm.c7
-rw-r--r--mm/memcontrol.c66
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/memory_hotplug.c31
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/page_alloc.c33
-rw-r--r--mm/pagewalk.c5
-rw-r--r--mm/slub.c4
-rw-r--r--mm/vmalloc.c28
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/ceph/Makefile22
-rw-r--r--net/ceph/buffer.c2
-rw-r--r--net/core/request_sock.c4
-rw-r--r--net/dccp/input.c3
-rw-r--r--net/decnet/af_decnet.c2
-rw-r--r--net/econet/af_econet.c91
-rw-r--r--net/ipv4/fib_trie.c2
-rw-r--r--net/ipv4/inet_hashtables.c3
-rw-r--r--net/ipv4/sysctl_net_ipv4.c6
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_ipv4.c4
-rw-r--r--net/ipv6/addrconf.c6
-rw-r--r--net/mac80211/Kconfig2
-rw-r--r--net/sunrpc/clnt.c24
-rw-r--r--net/unix/af_unix.c37
-rw-r--r--net/unix/garbage.c9
-rw-r--r--net/xfrm/xfrm_hash.c2
-rw-r--r--scripts/gfp-translate7
-rw-r--r--scripts/kconfig/expr.h1
-rw-r--r--scripts/kconfig/lkc.h1
-rw-r--r--scripts/kconfig/menu.c11
-rw-r--r--scripts/kconfig/zconf.gperf1
-rw-r--r--scripts/kconfig/zconf.hash.c_shipped122
-rw-r--r--scripts/kconfig/zconf.tab.c_shipped570
-rw-r--r--scripts/kconfig/zconf.y21
-rw-r--r--sound/atmel/abdac.c4
-rw-r--r--sound/core/oss/pcm_oss.c19
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/oss/dev_table.c6
-rw-r--r--sound/oss/midibuf.c4
-rw-r--r--sound/oss/pss.c6
-rw-r--r--sound/oss/sequencer.c4
-rw-r--r--sound/pci/asihpi/hpioctl.c2
-rw-r--r--sound/pci/azt3328.c26
-rw-r--r--sound/pci/ctxfi/ctpcm.c16
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c85
-rw-r--r--sound/pci/hda/patch_sigmatel.c20
-rw-r--r--sound/pci/intel8x0.c6
-rw-r--r--sound/pci/mixart/mixart_hwdep.h10
-rw-r--r--sound/ppc/pmac.c12
-rw-r--r--sound/soc/atmel/Kconfig5
-rw-r--r--sound/soc/atmel/sam9g20_wm8731.c9
-rw-r--r--sound/soc/atmel/snd-soc-afeb9260.c1
-rw-r--r--sound/soc/codecs/max98088.c14
-rw-r--r--sound/soc/codecs/stac9766.c1
-rw-r--r--sound/soc/codecs/tlv320aic3x.c6
-rw-r--r--sound/soc/codecs/tpa6130a2.c4
-rw-r--r--sound/soc/codecs/uda134x.c1
-rw-r--r--sound/soc/codecs/wm8350.c9
-rw-r--r--sound/soc/codecs/wm8523.c1
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm8776.c1
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8961.c4
-rw-r--r--sound/soc/codecs/wm8962.c5
-rw-r--r--sound/soc/codecs/wm8994.c6
-rw-r--r--sound/soc/davinci/davinci-evm.c40
-rw-r--r--sound/soc/davinci/davinci-i2s.c15
-rw-r--r--sound/soc/davinci/davinci-mcasp.c13
-rw-r--r--sound/soc/davinci/davinci-sffsdr.c2
-rw-r--r--sound/soc/davinci/davinci-vcif.c16
-rw-r--r--sound/soc/ep93xx/simone.c18
-rw-r--r--sound/soc/fsl/efika-audio-fabric.c1
-rw-r--r--sound/soc/fsl/mpc5200_dma.c1
-rw-r--r--sound/soc/fsl/mpc5200_psc_i2s.c2
-rw-r--r--sound/soc/fsl/mpc8610_hpcd.c1
-rw-r--r--sound/soc/fsl/p1022_ds.c1
-rw-r--r--sound/soc/fsl/pcm030-audio-fabric.c1
-rw-r--r--sound/soc/imx/eukrea-tlv320.c8
-rw-r--r--sound/soc/imx/imx-pcm-dma-mx2.c221
-rw-r--r--sound/soc/imx/imx-ssi.c57
-rw-r--r--sound/soc/imx/imx-ssi.h4
-rw-r--r--sound/soc/imx/phycore-ac97.c33
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c17
-rw-r--r--sound/soc/nuc900/nuc900-audio.h2
-rw-r--r--sound/soc/nuc900/nuc900-pcm.c11
-rw-r--r--sound/soc/omap/Kconfig2
-rw-r--r--sound/soc/omap/omap-mcbsp.c8
-rw-r--r--sound/soc/omap/omap3pandora.c1
-rw-r--r--sound/soc/omap/osk5912.c11
-rw-r--r--sound/soc/pxa/Kconfig1
-rw-r--r--sound/soc/pxa/corgi.c5
-rw-r--r--sound/soc/pxa/magician.c4
-rw-r--r--sound/soc/pxa/poodle.c5
-rw-r--r--sound/soc/pxa/spitz.c5
-rw-r--r--sound/soc/pxa/tosa.c5
-rw-r--r--sound/soc/s3c24xx/Kconfig1
-rw-r--r--sound/soc/s3c24xx/rx1950_uda1380.c20
-rw-r--r--sound/soc/s3c24xx/smdk_spdif.c4
-rw-r--r--sound/soc/s6000/s6000-i2s.c2
-rw-r--r--sound/soc/s6000/s6000-pcm.c2
-rw-r--r--sound/soc/s6000/s6105-ipcam.c2
-rw-r--r--sound/soc/sh/fsi.c25
-rw-r--r--sound/soc/sh/ssi.c2
-rw-r--r--sound/soc/soc-core.c9
-rw-r--r--sound/soc/soc-dapm.c4
-rw-r--r--sound/spi/at73c213.c2
-rw-r--r--tools/perf/builtin-record.c23
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/symbol.c63
-rw-r--r--usr/initramfs_data.S5
688 files changed, 21876 insertions, 11807 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd
new file mode 100644
index 000000000000..90a87e2a572b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-rbd
@@ -0,0 +1,83 @@
1What: /sys/bus/rbd/
2Date: November 2010
3Contact: Yehuda Sadeh <yehuda@hq.newdream.net>,
4 Sage Weil <sage@newdream.net>
5Description:
6
7Being used for adding and removing rbd block devices.
8
9Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
10
11 $ echo "192.168.0.1 name=admin rbd foo" > /sys/bus/rbd/add
12
13The snapshot name can be "-" or omitted to map the image read/write. A <dev-id>
14will be assigned for any registered block device. If snapshot is used, it will
15be mapped read-only.
16
17Removal of a device:
18
19 $ echo <dev-id> > /sys/bus/rbd/remove
20
21Entries under /sys/bus/rbd/devices/<dev-id>/
22--------------------------------------------
23
24client_id
25
26 The ceph unique client id that was assigned for this specific session.
27
28major
29
30 The block device major number.
31
32name
33
34 The name of the rbd image.
35
36pool
37
38 The pool where this rbd image resides. The pool-name pair is unique
39 per rados system.
40
41size
42
43 The size (in bytes) of the mapped block device.
44
45refresh
46
47 Writing to this file will reread the image header data and set
48 all relevant datastructures accordingly.
49
50current_snap
51
52 The current snapshot for which the device is mapped.
53
54create_snap
55
56 Create a snapshot:
57
58 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
59
60rollback_snap
61
62 Rolls back data to the specified snapshot. This goes over the entire
63 list of rados blocks and sends a rollback command to each.
64
65 $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
66
67snap_*
68
69 A directory per each snapshot
70
71
72Entries under /sys/bus/rbd/devices/<dev-id>/snap_<snap-name>
73-------------------------------------------------------------
74
75id
76
77 The rados internal snapshot id assigned for this snapshot
78
79size
80
81 The size of the image when this snapshot was taken.
82
83
diff --git a/Documentation/ABI/testing/sysfs-platform-asus-laptop b/Documentation/ABI/testing/sysfs-platform-asus-laptop
index 1d775390e856..41ff8ae4dee0 100644
--- a/Documentation/ABI/testing/sysfs-platform-asus-laptop
+++ b/Documentation/ABI/testing/sysfs-platform-asus-laptop
@@ -47,6 +47,20 @@ Date: January 2007
47KernelVersion: 2.6.20 47KernelVersion: 2.6.20
48Contact: "Corentin Chary" <corentincj@iksaif.net> 48Contact: "Corentin Chary" <corentincj@iksaif.net>
49Description: 49Description:
50 Control the bluetooth device. 1 means on, 0 means off. 50 Control the wlan device. 1 means on, 0 means off.
51 This may control the led, the device or both. 51 This may control the led, the device or both.
52Users: Lapsus 52Users: Lapsus
53
54What: /sys/devices/platform/asus_laptop/wimax
55Date: October 2010
56KernelVersion: 2.6.37
57Contact: "Corentin Chary" <corentincj@iksaif.net>
58Description:
59 Control the wimax device. 1 means on, 0 means off.
60
61What: /sys/devices/platform/asus_laptop/wwan
62Date: October 2010
63KernelVersion: 2.6.37
64Contact: "Corentin Chary" <corentincj@iksaif.net>
65Description:
66 Control the wwan (3G) device. 1 means on, 0 means off.
diff --git a/Documentation/ABI/testing/sysfs-platform-eeepc-wmi b/Documentation/ABI/testing/sysfs-platform-eeepc-wmi
new file mode 100644
index 000000000000..e4b5fef5fadd
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-eeepc-wmi
@@ -0,0 +1,10 @@
1What: /sys/devices/platform/eeepc-wmi/cpufv
2Date: Oct 2010
3KernelVersion: 2.6.37
4Contact: "Corentin Chary" <corentincj@iksaif.net>
5Description:
6 Change CPU clock configuration (write-only).
7 There are three available clock configuration:
8 * 0 -> Super Performance Mode
9 * 1 -> High Performance Mode
10 * 2 -> Power Saving Mode
diff --git a/Documentation/DocBook/sh.tmpl b/Documentation/DocBook/sh.tmpl
index d858d92cf6d9..4a38f604fa66 100644
--- a/Documentation/DocBook/sh.tmpl
+++ b/Documentation/DocBook/sh.tmpl
@@ -79,10 +79,6 @@
79 </sect2> 79 </sect2>
80 </sect1> 80 </sect1>
81 </chapter> 81 </chapter>
82 <chapter id="clk">
83 <title>Clock Framework Extensions</title>
84!Iinclude/linux/sh_clk.h
85 </chapter>
86 <chapter id="mach"> 82 <chapter id="mach">
87 <title>Machine Specific Interfaces</title> 83 <title>Machine Specific Interfaces</title>
88 <sect1 id="dreamcast"> 84 <sect1 id="dreamcast">
diff --git a/Documentation/driver-model/interface.txt b/Documentation/driver-model/interface.txt
deleted file mode 100644
index c66912bfe866..000000000000
--- a/Documentation/driver-model/interface.txt
+++ /dev/null
@@ -1,129 +0,0 @@
1
2Device Interfaces
3
4Introduction
5~~~~~~~~~~~~
6
7Device interfaces are the logical interfaces of device classes that correlate
8directly to userspace interfaces, like device nodes.
9
10Each device class may have multiple interfaces through which you can
11access the same device. An input device may support the mouse interface,
12the 'evdev' interface, and the touchscreen interface. A SCSI disk would
13support the disk interface, the SCSI generic interface, and possibly a raw
14device interface.
15
16Device interfaces are registered with the class they belong to. As devices
17are added to the class, they are added to each interface registered with
18the class. The interface is responsible for determining whether the device
19supports the interface or not.
20
21
22Programming Interface
23~~~~~~~~~~~~~~~~~~~~~
24
25struct device_interface {
26 char * name;
27 rwlock_t lock;
28 u32 devnum;
29 struct device_class * devclass;
30
31 struct list_head node;
32 struct driver_dir_entry dir;
33
34 int (*add_device)(struct device *);
35 int (*add_device)(struct intf_data *);
36};
37
38int interface_register(struct device_interface *);
39void interface_unregister(struct device_interface *);
40
41
42An interface must specify the device class it belongs to. It is added
43to that class's list of interfaces on registration.
44
45
46Interfaces can be added to a device class at any time. Whenever it is
47added, each device in the class is passed to the interface's
48add_device callback. When an interface is removed, each device is
49removed from the interface.
50
51
52Devices
53~~~~~~~
54Once a device is added to a device class, it is added to each
55interface that is registered with the device class. The class
56is expected to place a class-specific data structure in
57struct device::class_data. The interface can use that (along with
58other fields of struct device) to determine whether or not the driver
59and/or device support that particular interface.
60
61
62Data
63~~~~
64
65struct intf_data {
66 struct list_head node;
67 struct device_interface * intf;
68 struct device * dev;
69 u32 intf_num;
70};
71
72int interface_add_data(struct interface_data *);
73
74The interface is responsible for allocating and initializing a struct
75intf_data and calling interface_add_data() to add it to the device's list
76of interfaces it belongs to. This list will be iterated over when the device
77is removed from the class (instead of all possible interfaces for a class).
78This structure should probably be embedded in whatever per-device data
79structure the interface is allocating anyway.
80
81Devices are enumerated within the interface. This happens in interface_add_data()
82and the enumerated value is stored in the struct intf_data for that device.
83
84sysfs
85~~~~~
86Each interface is given a directory in the directory of the device
87class it belongs to:
88
89Interfaces get a directory in the class's directory as well:
90
91 class/
92 `-- input
93 |-- devices
94 |-- drivers
95 |-- mouse
96 `-- evdev
97
98When a device is added to the interface, a symlink is created that points
99to the device's directory in the physical hierarchy:
100
101 class/
102 `-- input
103 |-- devices
104 | `-- 1 -> ../../../root/pci0/00:1f.0/usb_bus/00:1f.2-1:0/
105 |-- drivers
106 | `-- usb:usb_mouse -> ../../../bus/drivers/usb_mouse/
107 |-- mouse
108 | `-- 1 -> ../../../root/pci0/00:1f.0/usb_bus/00:1f.2-1:0/
109 `-- evdev
110 `-- 1 -> ../../../root/pci0/00:1f.0/usb_bus/00:1f.2-1:0/
111
112
113Future Plans
114~~~~~~~~~~~~
115A device interface is correlated directly with a userspace interface
116for a device, specifically a device node. For instance, a SCSI disk
117exposes at least two interfaces to userspace: the standard SCSI disk
118interface and the SCSI generic interface. It might also export a raw
119device interface.
120
121Many interfaces have a major number associated with them and each
122device gets a minor number. Or, multiple interfaces might share one
123major number, and each will receive a range of minor numbers (like in
124the case of input devices).
125
126These major and minor numbers could be stored in the interface
127structure. Major and minor allocations could happen when the interface
128is registered with the class, or via a helper function.
129
diff --git a/Documentation/edac.txt b/Documentation/edac.txt
index 0b875e8da969..9ee774de57cd 100644
--- a/Documentation/edac.txt
+++ b/Documentation/edac.txt
@@ -196,7 +196,7 @@ csrow3.
196The representation of the above is reflected in the directory tree 196The representation of the above is reflected in the directory tree
197in EDAC's sysfs interface. Starting in directory 197in EDAC's sysfs interface. Starting in directory
198/sys/devices/system/edac/mc each memory controller will be represented 198/sys/devices/system/edac/mc each memory controller will be represented
199by its own 'mcX' directory, where 'X" is the index of the MC. 199by its own 'mcX' directory, where 'X' is the index of the MC.
200 200
201 201
202 ..../edac/mc/ 202 ..../edac/mc/
@@ -207,7 +207,7 @@ by its own 'mcX' directory, where 'X" is the index of the MC.
207 .... 207 ....
208 208
209Under each 'mcX' directory each 'csrowX' is again represented by a 209Under each 'mcX' directory each 'csrowX' is again represented by a
210'csrowX', where 'X" is the csrow index: 210'csrowX', where 'X' is the csrow index:
211 211
212 212
213 .../mc/mc0/ 213 .../mc/mc0/
@@ -232,7 +232,7 @@ EDAC control and attribute files.
232 232
233 233
234In 'mcX' directories are EDAC control and attribute files for 234In 'mcX' directories are EDAC control and attribute files for
235this 'X" instance of the memory controllers: 235this 'X' instance of the memory controllers:
236 236
237 237
238Counter reset control file: 238Counter reset control file:
@@ -343,7 +343,7 @@ Sdram memory scrubbing rate:
343'csrowX' DIRECTORIES 343'csrowX' DIRECTORIES
344 344
345In the 'csrowX' directories are EDAC control and attribute files for 345In the 'csrowX' directories are EDAC control and attribute files for
346this 'X" instance of csrow: 346this 'X' instance of csrow:
347 347
348 348
349Total Uncorrectable Errors count attribute file: 349Total Uncorrectable Errors count attribute file:
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index a618fd99c9f0..30a70542e823 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -4,33 +4,41 @@ please mail me.
4 Geert Uytterhoeven <geert@linux-m68k.org> 4 Geert Uytterhoeven <geert@linux-m68k.org>
5 5
600-INDEX 600-INDEX
7 - this file 7 - this file.
8arkfb.txt 8arkfb.txt
9 - info on the fbdev driver for ARK Logic chips. 9 - info on the fbdev driver for ARK Logic chips.
10aty128fb.txt 10aty128fb.txt
11 - info on the ATI Rage128 frame buffer driver. 11 - info on the ATI Rage128 frame buffer driver.
12cirrusfb.txt 12cirrusfb.txt
13 - info on the driver for Cirrus Logic chipsets. 13 - info on the driver for Cirrus Logic chipsets.
14cmap_xfbdev.txt
15 - an introduction to fbdev's cmap structures.
14deferred_io.txt 16deferred_io.txt
15 - an introduction to deferred IO. 17 - an introduction to deferred IO.
18efifb.txt
19 - info on the EFI platform driver for Intel based Apple computers.
20ep93xx-fb.txt
21 - info on the driver for EP93xx LCD controller.
16fbcon.txt 22fbcon.txt
17 - intro to and usage guide for the framebuffer console (fbcon). 23 - intro to and usage guide for the framebuffer console (fbcon).
18framebuffer.txt 24framebuffer.txt
19 - introduction to frame buffer devices. 25 - introduction to frame buffer devices.
20imacfb.txt 26gxfb.txt
21 - info on the generic EFI platform driver for Intel based Macs. 27 - info on the framebuffer driver for AMD Geode GX2 based processors.
22intel810.txt 28intel810.txt
23 - documentation for the Intel 810/815 framebuffer driver. 29 - documentation for the Intel 810/815 framebuffer driver.
24intelfb.txt 30intelfb.txt
25 - docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver. 31 - docs for Intel 830M/845G/852GM/855GM/865G/915G/945G fb driver.
26internals.txt 32internals.txt
27 - quick overview of frame buffer device internals. 33 - quick overview of frame buffer device internals.
34lxfb.txt
35 - info on the framebuffer driver for AMD Geode LX based processors.
28matroxfb.txt 36matroxfb.txt
29 - info on the Matrox framebuffer driver for Alpha, Intel and PPC. 37 - info on the Matrox framebuffer driver for Alpha, Intel and PPC.
38metronomefb.txt
39 - info on the driver for the Metronome display controller.
30modedb.txt 40modedb.txt
31 - info on the video mode database. 41 - info on the video mode database.
32matroxfb.txt
33 - info on the Matrox frame buffer driver.
34pvr2fb.txt 42pvr2fb.txt
35 - info on the PowerVR 2 frame buffer driver. 43 - info on the PowerVR 2 frame buffer driver.
36pxafb.txt 44pxafb.txt
@@ -39,13 +47,23 @@ s3fb.txt
39 - info on the fbdev driver for S3 Trio/Virge chips. 47 - info on the fbdev driver for S3 Trio/Virge chips.
40sa1100fb.txt 48sa1100fb.txt
41 - information about the driver for the SA-1100 LCD controller. 49 - information about the driver for the SA-1100 LCD controller.
50sh7760fb.txt
51 - info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
42sisfb.txt 52sisfb.txt
43 - info on the framebuffer device driver for various SiS chips. 53 - info on the framebuffer device driver for various SiS chips.
44sstfb.txt 54sstfb.txt
45 - info on the frame buffer driver for 3dfx' Voodoo Graphics boards. 55 - info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
46tgafb.txt 56tgafb.txt
47 - info on the TGA (DECChip 21030) frame buffer driver 57 - info on the TGA (DECChip 21030) frame buffer driver.
58tridentfb.txt
59 info on the framebuffer driver for some Trident chip based cards.
60uvesafb.txt
61 - info on the userspace VESA (VBE2+ compliant) frame buffer device.
48vesafb.txt 62vesafb.txt
49 - info on the VESA frame buffer device 63 - info on the VESA frame buffer device.
64viafb.modes
65 - list of modes for VIA Integration Graphic Chip.
66viafb.txt
67 - info on the VIA Integration Graphic Chip console framebuffer driver.
50vt8623fb.txt 68vt8623fb.txt
51 - info on the fb driver for the graphics core in VIA VT8623 chipsets. 69 - info on the fb driver for the graphics core in VIA VT8623 chipsets.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index ed7e5efc06d8..55c28b79d8dc 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -660,11 +660,10 @@ struct address_space_operations {
660 releasepage: releasepage is called on PagePrivate pages to indicate 660 releasepage: releasepage is called on PagePrivate pages to indicate
661 that the page should be freed if possible. ->releasepage 661 that the page should be freed if possible. ->releasepage
662 should remove any private data from the page and clear the 662 should remove any private data from the page and clear the
663 PagePrivate flag. It may also remove the page from the 663 PagePrivate flag. If releasepage() fails for some reason, it must
664 address_space. If this fails for some reason, it may indicate 664 indicate failure with a 0 return value.
665 failure with a 0 return value. 665 releasepage() is used in two distinct though related cases. The
666 This is used in two distinct though related cases. The first 666 first is when the VM finds a clean page with no active users and
667 is when the VM finds a clean page with no active users and
668 wants to make it a free page. If ->releasepage succeeds, the 667 wants to make it a free page. If ->releasepage succeeds, the
669 page will be removed from the address_space and become free. 668 page will be removed from the address_space and become free.
670 669
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 92e83e53148f..cdd2a6e8a3b7 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2385,6 +2385,11 @@ and is between 256 and 4096 characters. It is defined in the file
2385 improve throughput, but will also increase the 2385 improve throughput, but will also increase the
2386 amount of memory reserved for use by the client. 2386 amount of memory reserved for use by the client.
2387 2387
2388 swapaccount[=0|1]
2389 [KNL] Enable accounting of swap in memory resource
2390 controller if no parameter or 1 is given or disable
2391 it if 0 is given (See Documentation/cgroups/memory.txt)
2392
2388 swiotlb= [IA-64] Number of I/O TLB slabs 2393 swiotlb= [IA-64] Number of I/O TLB slabs
2389 2394
2390 switches= [HW,M68k] 2395 switches= [HW,M68k]
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index fe95105992c5..3c5e465296e1 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -144,6 +144,7 @@ tcp_adv_win_scale - INTEGER
144 Count buffering overhead as bytes/2^tcp_adv_win_scale 144 Count buffering overhead as bytes/2^tcp_adv_win_scale
145 (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale), 145 (if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
146 if it is <= 0. 146 if it is <= 0.
147 Possible values are [-31, 31], inclusive.
147 Default: 2 148 Default: 2
148 149
149tcp_allowed_congestion_control - STRING 150tcp_allowed_congestion_control - STRING
diff --git a/Documentation/sh/clk.txt b/Documentation/sh/clk.txt
deleted file mode 100644
index 114b595cfa97..000000000000
--- a/Documentation/sh/clk.txt
+++ /dev/null
@@ -1,32 +0,0 @@
1Clock framework on SuperH architecture
2
3The framework on SH extends existing API by the function clk_set_rate_ex,
4which prototype is as follows:
5
6 clk_set_rate_ex (struct clk *clk, unsigned long rate, int algo_id)
7
8The algo_id parameter is used to specify algorithm used to recalculate clocks,
9adjanced to clock, specified as first argument. It is assumed that algo_id==0
10means no changes to adjanced clock
11
12Internally, the clk_set_rate_ex forwards request to clk->ops->set_rate method,
13if it is present in ops structure. The method should set the clock rate and adjust
14all needed clocks according to the passed algo_id.
15Exact values for algo_id are machine-dependent. For the sh7722, the following
16values are defined:
17
18 NO_CHANGE = 0,
19 IUS_N1_N1, /* I:U = N:1, U:Sh = N:1 */
20 IUS_322, /* I:U:Sh = 3:2:2 */
21 IUS_522, /* I:U:Sh = 5:2:2 */
22 IUS_N11, /* I:U:Sh = N:1:1 */
23 SB_N1, /* Sh:B = N:1 */
24 SB3_N1, /* Sh:B3 = N:1 */
25 SB3_32, /* Sh:B3 = 3:2 */
26 SB3_43, /* Sh:B3 = 4:3 */
27 SB3_54, /* Sh:B3 = 5:4 */
28 BP_N1, /* B:P = N:1 */
29 IP_N1 /* I:P = N:1 */
30
31Each of these constants means relation between clocks that can be set via the FRQCR
32register
diff --git a/MAINTAINERS b/MAINTAINERS
index a92c994ba935..1a1c27b9c557 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1359,7 +1359,7 @@ F: include/net/bluetooth/
1359 1359
1360BONDING DRIVER 1360BONDING DRIVER
1361M: Jay Vosburgh <fubar@us.ibm.com> 1361M: Jay Vosburgh <fubar@us.ibm.com>
1362L: bonding-devel@lists.sourceforge.net 1362L: netdev@vger.kernel.org
1363W: http://sourceforge.net/projects/bonding/ 1363W: http://sourceforge.net/projects/bonding/
1364S: Supported 1364S: Supported
1365F: drivers/net/bonding/ 1365F: drivers/net/bonding/
@@ -2060,7 +2060,7 @@ F: Documentation/blockdev/drbd/
2060 2060
2061DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS 2061DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
2062M: Greg Kroah-Hartman <gregkh@suse.de> 2062M: Greg Kroah-Hartman <gregkh@suse.de>
2063T: quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/ 2063T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
2064S: Supported 2064S: Supported
2065F: Documentation/kobject.txt 2065F: Documentation/kobject.txt
2066F: drivers/base/ 2066F: drivers/base/
@@ -2080,7 +2080,7 @@ F: include/drm/
2080 2080
2081INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets) 2081INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
2082M: Chris Wilson <chris@chris-wilson.co.uk> 2082M: Chris Wilson <chris@chris-wilson.co.uk>
2083L: intel-gfx@lists.freedesktop.org 2083L: intel-gfx@lists.freedesktop.org (subscribers-only)
2084L: dri-devel@lists.freedesktop.org 2084L: dri-devel@lists.freedesktop.org
2085T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git 2085T: git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
2086S: Supported 2086S: Supported
@@ -2444,10 +2444,12 @@ F: drivers/net/wan/sdla.c
2444FRAMEBUFFER LAYER 2444FRAMEBUFFER LAYER
2445L: linux-fbdev@vger.kernel.org 2445L: linux-fbdev@vger.kernel.org
2446W: http://linux-fbdev.sourceforge.net/ 2446W: http://linux-fbdev.sourceforge.net/
2447Q: http://patchwork.kernel.org/project/linux-fbdev/list/
2447T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git 2448T: git git://git.kernel.org/pub/scm/linux/kernel/git/lethal/fbdev-2.6.git
2448S: Orphan 2449S: Orphan
2449F: Documentation/fb/ 2450F: Documentation/fb/
2450F: drivers/video/fb* 2451F: drivers/video/
2452F: include/video/
2451F: include/linux/fb.h 2453F: include/linux/fb.h
2452 2454
2453FREESCALE DMA DRIVER 2455FREESCALE DMA DRIVER
@@ -4062,9 +4064,8 @@ F: drivers/scsi/NCR_D700.*
4062 4064
4063NETEFFECT IWARP RNIC DRIVER (IW_NES) 4065NETEFFECT IWARP RNIC DRIVER (IW_NES)
4064M: Faisal Latif <faisal.latif@intel.com> 4066M: Faisal Latif <faisal.latif@intel.com>
4065M: Chien Tung <chien.tin.tung@intel.com>
4066L: linux-rdma@vger.kernel.org 4067L: linux-rdma@vger.kernel.org
4067W: http://www.neteffect.com 4068W: http://www.intel.com/Products/Server/Adapters/Server-Cluster/Server-Cluster-overview.htm
4068S: Supported 4069S: Supported
4069F: drivers/infiniband/hw/nes/ 4070F: drivers/infiniband/hw/nes/
4070 4071
@@ -5837,6 +5838,8 @@ M: Chris Metcalf <cmetcalf@tilera.com>
5837W: http://www.tilera.com/scm/ 5838W: http://www.tilera.com/scm/
5838S: Supported 5839S: Supported
5839F: arch/tile/ 5840F: arch/tile/
5841F: drivers/char/hvc_tile.c
5842F: drivers/net/tile/
5840 5843
5841TLAN NETWORK DRIVER 5844TLAN NETWORK DRIVER
5842M: Samuel Chessman <chessman@tux.org> 5845M: Samuel Chessman <chessman@tux.org>
diff --git a/Makefile b/Makefile
index b31d21377e4c..3d94974542ea 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 37 3SUBLEVEL = 37
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc5
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index db524e75c4a2..f1d9297b1050 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -9,7 +9,7 @@ config ARM
9 select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI) 9 select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS) 10 select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
11 select HAVE_ARCH_KGDB 11 select HAVE_ARCH_KGDB
12 select HAVE_KPROBES if (!XIP_KERNEL) 12 select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
13 select HAVE_KRETPROBES if (HAVE_KPROBES) 13 select HAVE_KRETPROBES if (HAVE_KPROBES)
14 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL) 14 select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
15 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL) 15 select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
diff --git a/arch/arm/boot/Makefile b/arch/arm/boot/Makefile
index 4a590f4113e2..4d26f2c52a75 100644
--- a/arch/arm/boot/Makefile
+++ b/arch/arm/boot/Makefile
@@ -70,12 +70,7 @@ else
70$(obj)/uImage: LOADADDR=$(ZRELADDR) 70$(obj)/uImage: LOADADDR=$(ZRELADDR)
71endif 71endif
72 72
73ifeq ($(CONFIG_THUMB2_KERNEL),y)
74# Set bit 0 to 1 so that "mov pc, rx" switches to Thumb-2 mode
75$(obj)/uImage: STARTADDR=$(shell echo $(LOADADDR) | sed -e "s/.$$/1/")
76else
77$(obj)/uImage: STARTADDR=$(LOADADDR) 73$(obj)/uImage: STARTADDR=$(LOADADDR)
78endif
79 74
80$(obj)/uImage: $(obj)/zImage FORCE 75$(obj)/uImage: $(obj)/zImage FORCE
81 $(call if_changed,uimage) 76 $(call if_changed,uimage)
diff --git a/arch/arm/boot/bootp/init.S b/arch/arm/boot/bootp/init.S
index 8b0de41c3dcb..78b508075161 100644
--- a/arch/arm/boot/bootp/init.S
+++ b/arch/arm/boot/bootp/init.S
@@ -73,6 +73,8 @@ move: ldmia r4!, {r7 - r10} @ move 32-bytes at a time
73 73
74 .size _start, . - _start 74 .size _start, . - _start
75 75
76 .align
77
76 .type data,#object 78 .type data,#object
77data: .word initrd_start @ source initrd address 79data: .word initrd_start @ source initrd address
78 .word initrd_phys @ destination initrd address 80 .word initrd_phys @ destination initrd address
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 6825c34646d4..7193884ed8b0 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -125,9 +125,13 @@ wait: mrc p14, 0, pc, c0, c1, 0
125 * sort out different calling conventions 125 * sort out different calling conventions
126 */ 126 */
127 .align 127 .align
128 .arm @ Always enter in ARM state
128start: 129start:
129 .type start,#function 130 .type start,#function
130 .rept 8 131 THUMB( adr r12, BSYM(1f) )
132 THUMB( bx r12 )
133 THUMB( .rept 6 )
134 ARM( .rept 8 )
131 mov r0, r0 135 mov r0, r0
132 .endr 136 .endr
133 137
@@ -135,6 +139,7 @@ start:
135 .word 0x016f2818 @ Magic numbers to help the loader 139 .word 0x016f2818 @ Magic numbers to help the loader
136 .word start @ absolute load/run zImage address 140 .word start @ absolute load/run zImage address
137 .word _edata @ zImage end address 141 .word _edata @ zImage end address
142 THUMB( .thumb )
1381: mov r7, r1 @ save architecture ID 1431: mov r7, r1 @ save architecture ID
139 mov r8, r2 @ save atags pointer 144 mov r8, r2 @ save atags pointer
140 145
@@ -174,7 +179,8 @@ not_angel:
174 ldr sp, [r0, #28] 179 ldr sp, [r0, #28]
175#ifdef CONFIG_AUTO_ZRELADDR 180#ifdef CONFIG_AUTO_ZRELADDR
176 @ determine final kernel image address 181 @ determine final kernel image address
177 and r4, pc, #0xf8000000 182 mov r4, pc
183 and r4, r4, #0xf8000000
178 add r4, r4, #TEXT_OFFSET 184 add r4, r4, #TEXT_OFFSET
179#else 185#else
180 ldr r4, =zreladdr 186 ldr r4, =zreladdr
@@ -445,7 +451,8 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
445 */ 451 */
446 mov r1, #0x1e 452 mov r1, #0x1e
447 orr r1, r1, #3 << 10 453 orr r1, r1, #3 << 10
448 mov r2, pc, lsr #20 454 mov r2, pc
455 mov r2, r2, lsr #20
449 orr r1, r1, r2, lsl #20 456 orr r1, r1, r2, lsl #20
450 add r0, r3, r2, lsl #2 457 add r0, r3, r2, lsl #2
451 str r1, [r0], #4 458 str r1, [r0], #4
@@ -1084,6 +1091,6 @@ memdump: mov r12, r0
1084reloc_end: 1091reloc_end:
1085 1092
1086 .align 1093 .align
1087 .section ".stack", "w" 1094 .section ".stack", "aw", %nobits
1088user_stack: .space 4096 1095user_stack: .space 4096
1089user_stack_end: 1096user_stack_end:
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index d08168941bd6..366a924019ac 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -57,7 +57,7 @@ SECTIONS
57 .bss : { *(.bss) } 57 .bss : { *(.bss) }
58 _end = .; 58 _end = .;
59 59
60 .stack (NOLOAD) : { *(.stack) } 60 .stack : { *(.stack) }
61 61
62 .stab 0 : { *(.stab) } 62 .stab 0 : { *(.stab) }
63 .stabstr 0 : { *(.stabstr) } 63 .stabstr 0 : { *(.stabstr) }
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 772f95f1aecd..e6388dcd8cfa 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -146,9 +146,15 @@ static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
146 unsigned int shift = (irq % 4) * 8; 146 unsigned int shift = (irq % 4) * 8;
147 unsigned int cpu = cpumask_first(mask_val); 147 unsigned int cpu = cpumask_first(mask_val);
148 u32 val; 148 u32 val;
149 struct irq_desc *desc;
149 150
150 spin_lock(&irq_controller_lock); 151 spin_lock(&irq_controller_lock);
151 irq_desc[irq].node = cpu; 152 desc = irq_to_desc(irq);
153 if (desc == NULL) {
154 spin_unlock(&irq_controller_lock);
155 return -EINVAL;
156 }
157 desc->node = cpu;
152 val = readl(reg) & ~(0xff << shift); 158 val = readl(reg) & ~(0xff << shift);
153 val |= 1 << (cpu + shift); 159 val |= 1 << (cpu + shift);
154 writel(val, reg); 160 writel(val, reg);
@@ -210,7 +216,7 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
210void __init gic_dist_init(unsigned int gic_nr, void __iomem *base, 216void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
211 unsigned int irq_start) 217 unsigned int irq_start)
212{ 218{
213 unsigned int max_irq, i; 219 unsigned int gic_irqs, irq_limit, i;
214 u32 cpumask = 1 << smp_processor_id(); 220 u32 cpumask = 1 << smp_processor_id();
215 221
216 if (gic_nr >= MAX_GIC_NR) 222 if (gic_nr >= MAX_GIC_NR)
@@ -226,47 +232,49 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
226 232
227 /* 233 /*
228 * Find out how many interrupts are supported. 234 * Find out how many interrupts are supported.
229 */
230 max_irq = readl(base + GIC_DIST_CTR) & 0x1f;
231 max_irq = (max_irq + 1) * 32;
232
233 /*
234 * The GIC only supports up to 1020 interrupt sources. 235 * The GIC only supports up to 1020 interrupt sources.
235 * Limit this to either the architected maximum, or the
236 * platform maximum.
237 */ 236 */
238 if (max_irq > max(1020, NR_IRQS)) 237 gic_irqs = readl(base + GIC_DIST_CTR) & 0x1f;
239 max_irq = max(1020, NR_IRQS); 238 gic_irqs = (gic_irqs + 1) * 32;
239 if (gic_irqs > 1020)
240 gic_irqs = 1020;
240 241
241 /* 242 /*
242 * Set all global interrupts to be level triggered, active low. 243 * Set all global interrupts to be level triggered, active low.
243 */ 244 */
244 for (i = 32; i < max_irq; i += 16) 245 for (i = 32; i < gic_irqs; i += 16)
245 writel(0, base + GIC_DIST_CONFIG + i * 4 / 16); 246 writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
246 247
247 /* 248 /*
248 * Set all global interrupts to this CPU only. 249 * Set all global interrupts to this CPU only.
249 */ 250 */
250 for (i = 32; i < max_irq; i += 4) 251 for (i = 32; i < gic_irqs; i += 4)
251 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 252 writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
252 253
253 /* 254 /*
254 * Set priority on all global interrupts. 255 * Set priority on all global interrupts.
255 */ 256 */
256 for (i = 32; i < max_irq; i += 4) 257 for (i = 32; i < gic_irqs; i += 4)
257 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4); 258 writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
258 259
259 /* 260 /*
260 * Disable all interrupts. Leave the PPI and SGIs alone 261 * Disable all interrupts. Leave the PPI and SGIs alone
261 * as these enables are banked registers. 262 * as these enables are banked registers.
262 */ 263 */
263 for (i = 32; i < max_irq; i += 32) 264 for (i = 32; i < gic_irqs; i += 32)
264 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 265 writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
265 266
266 /* 267 /*
268 * Limit number of interrupts registered to the platform maximum
269 */
270 irq_limit = gic_data[gic_nr].irq_offset + gic_irqs;
271 if (WARN_ON(irq_limit > NR_IRQS))
272 irq_limit = NR_IRQS;
273
274 /*
267 * Setup the Linux IRQ subsystem. 275 * Setup the Linux IRQ subsystem.
268 */ 276 */
269 for (i = irq_start; i < gic_data[gic_nr].irq_offset + max_irq; i++) { 277 for (i = irq_start; i < irq_limit; i++) {
270 set_irq_chip(i, &gic_chip); 278 set_irq_chip(i, &gic_chip);
271 set_irq_chip_data(i, &gic_data[gic_nr]); 279 set_irq_chip_data(i, &gic_data[gic_nr]);
272 set_irq_handler(i, handle_level_irq); 280 set_irq_handler(i, handle_level_irq);
diff --git a/arch/arm/configs/at91rm9200_defconfig b/arch/arm/configs/at91rm9200_defconfig
new file mode 100644
index 000000000000..38cb7c985426
--- /dev/null
+++ b/arch/arm/configs/at91rm9200_defconfig
@@ -0,0 +1,341 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y
5CONFIG_IKCONFIG=y
6CONFIG_IKCONFIG_PROC=y
7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y
10CONFIG_MODULES=y
11CONFIG_MODULE_FORCE_LOAD=y
12CONFIG_MODULE_UNLOAD=y
13CONFIG_MODVERSIONS=y
14CONFIG_MODULE_SRCVERSION_ALL=y
15# CONFIG_BLK_DEV_BSG is not set
16# CONFIG_IOSCHED_CFQ is not set
17CONFIG_ARCH_AT91=y
18CONFIG_MACH_ONEARM=y
19CONFIG_ARCH_AT91RM9200DK=y
20CONFIG_MACH_AT91RM9200EK=y
21CONFIG_MACH_CSB337=y
22CONFIG_MACH_CSB637=y
23CONFIG_MACH_CARMEVA=y
24CONFIG_MACH_ATEB9200=y
25CONFIG_MACH_KB9200=y
26CONFIG_MACH_PICOTUX2XX=y
27CONFIG_MACH_KAFA=y
28CONFIG_MACH_ECBAT91=y
29CONFIG_MACH_YL9200=y
30CONFIG_MACH_CPUAT91=y
31CONFIG_MACH_ECO920=y
32CONFIG_MTD_AT91_DATAFLASH_CARD=y
33CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
34CONFIG_AT91_TIMER_HZ=100
35# CONFIG_ARM_THUMB is not set
36CONFIG_PCCARD=y
37CONFIG_AT91_CF=y
38CONFIG_NO_HZ=y
39CONFIG_HIGH_RES_TIMERS=y
40CONFIG_PREEMPT=y
41CONFIG_AEABI=y
42CONFIG_LEDS=y
43CONFIG_LEDS_CPU=y
44CONFIG_ZBOOT_ROM_TEXT=0x10000000
45CONFIG_ZBOOT_ROM_BSS=0x20040000
46CONFIG_KEXEC=y
47CONFIG_FPE_NWFPE=y
48CONFIG_BINFMT_MISC=y
49CONFIG_NET=y
50CONFIG_PACKET=y
51CONFIG_UNIX=y
52CONFIG_XFRM_USER=m
53CONFIG_INET=y
54CONFIG_IP_MULTICAST=y
55CONFIG_IP_PNP=y
56CONFIG_IP_PNP_DHCP=y
57CONFIG_IP_PNP_BOOTP=y
58CONFIG_NET_IPIP=m
59CONFIG_NET_IPGRE=m
60CONFIG_INET_AH=m
61CONFIG_INET_ESP=m
62CONFIG_INET_IPCOMP=m
63CONFIG_INET_XFRM_MODE_TRANSPORT=m
64CONFIG_INET_XFRM_MODE_TUNNEL=m
65CONFIG_INET_XFRM_MODE_BEET=m
66CONFIG_IPV6_PRIVACY=y
67CONFIG_IPV6_ROUTER_PREF=y
68CONFIG_IPV6_ROUTE_INFO=y
69CONFIG_INET6_AH=m
70CONFIG_INET6_ESP=m
71CONFIG_INET6_IPCOMP=m
72CONFIG_IPV6_MIP6=m
73CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
74CONFIG_IPV6_TUNNEL=m
75CONFIG_BRIDGE=m
76CONFIG_VLAN_8021Q=m
77CONFIG_BT=m
78CONFIG_BT_L2CAP=m
79CONFIG_BT_SCO=m
80CONFIG_BT_RFCOMM=m
81CONFIG_BT_RFCOMM_TTY=y
82CONFIG_BT_BNEP=m
83CONFIG_BT_BNEP_MC_FILTER=y
84CONFIG_BT_BNEP_PROTO_FILTER=y
85CONFIG_BT_HIDP=m
86CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
87CONFIG_MTD=y
88CONFIG_MTD_CONCAT=y
89CONFIG_MTD_PARTITIONS=y
90CONFIG_MTD_CMDLINE_PARTS=y
91CONFIG_MTD_AFS_PARTS=y
92CONFIG_MTD_CHAR=y
93CONFIG_MTD_BLOCK=y
94CONFIG_MTD_CFI=y
95CONFIG_MTD_JEDECPROBE=y
96CONFIG_MTD_CFI_INTELEXT=y
97CONFIG_MTD_CFI_AMDSTD=y
98CONFIG_MTD_COMPLEX_MAPPINGS=y
99CONFIG_MTD_PHYSMAP=y
100CONFIG_MTD_PLATRAM=y
101CONFIG_MTD_DATAFLASH=y
102CONFIG_MTD_NAND=y
103CONFIG_MTD_NAND_ATMEL=y
104CONFIG_MTD_NAND_PLATFORM=y
105CONFIG_MTD_UBI=y
106CONFIG_MTD_UBI_GLUEBI=y
107CONFIG_BLK_DEV_LOOP=y
108CONFIG_BLK_DEV_NBD=y
109CONFIG_BLK_DEV_RAM=y
110CONFIG_BLK_DEV_RAM_SIZE=8192
111CONFIG_ATMEL_TCLIB=y
112CONFIG_EEPROM_LEGACY=m
113CONFIG_SCSI=y
114CONFIG_BLK_DEV_SD=y
115CONFIG_BLK_DEV_SR=m
116CONFIG_BLK_DEV_SR_VENDOR=y
117CONFIG_CHR_DEV_SG=m
118CONFIG_SCSI_MULTI_LUN=y
119# CONFIG_SCSI_LOWLEVEL is not set
120CONFIG_NETDEVICES=y
121CONFIG_TUN=m
122CONFIG_PHYLIB=y
123CONFIG_DAVICOM_PHY=y
124CONFIG_SMSC_PHY=y
125CONFIG_MICREL_PHY=y
126CONFIG_NET_ETHERNET=y
127CONFIG_ARM_AT91_ETHER=y
128# CONFIG_NETDEV_1000 is not set
129# CONFIG_NETDEV_10000 is not set
130CONFIG_USB_CATC=m
131CONFIG_USB_KAWETH=m
132CONFIG_USB_PEGASUS=m
133CONFIG_USB_RTL8150=m
134CONFIG_USB_USBNET=m
135CONFIG_USB_NET_DM9601=m
136CONFIG_USB_NET_GL620A=m
137CONFIG_USB_NET_PLUSB=m
138CONFIG_USB_NET_RNDIS_HOST=m
139CONFIG_USB_ALI_M5632=y
140CONFIG_USB_AN2720=y
141CONFIG_USB_EPSON2888=y
142CONFIG_PPP=y
143CONFIG_PPP_MULTILINK=y
144CONFIG_PPP_FILTER=y
145CONFIG_PPP_ASYNC=y
146CONFIG_PPP_DEFLATE=y
147CONFIG_PPP_BSDCOMP=y
148CONFIG_PPP_MPPE=m
149CONFIG_PPPOE=m
150CONFIG_SLIP=m
151CONFIG_SLIP_COMPRESSED=y
152CONFIG_SLIP_SMART=y
153CONFIG_SLIP_MODE_SLIP6=y
154# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
155CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
156CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
157CONFIG_INPUT_EVDEV=y
158CONFIG_KEYBOARD_GPIO=y
159# CONFIG_INPUT_MOUSE is not set
160CONFIG_INPUT_TOUCHSCREEN=y
161CONFIG_SERIAL_ATMEL=y
162CONFIG_SERIAL_ATMEL_CONSOLE=y
163CONFIG_LEGACY_PTY_COUNT=32
164CONFIG_HW_RANDOM=y
165CONFIG_I2C=y
166CONFIG_I2C_CHARDEV=y
167CONFIG_I2C_GPIO=y
168CONFIG_SPI=y
169CONFIG_SPI_ATMEL=y
170CONFIG_SPI_BITBANG=y
171CONFIG_GPIO_SYSFS=y
172CONFIG_HWMON=m
173CONFIG_SENSORS_ADM1021=m
174CONFIG_SENSORS_ADM1025=m
175CONFIG_SENSORS_ADM1026=m
176CONFIG_SENSORS_ADM1029=m
177CONFIG_SENSORS_ADM1031=m
178CONFIG_SENSORS_ADM9240=m
179CONFIG_SENSORS_DS1621=m
180CONFIG_SENSORS_GL518SM=m
181CONFIG_SENSORS_GL520SM=m
182CONFIG_SENSORS_IT87=m
183CONFIG_SENSORS_LM63=m
184CONFIG_SENSORS_LM73=m
185CONFIG_SENSORS_LM75=m
186CONFIG_SENSORS_LM77=m
187CONFIG_SENSORS_LM78=m
188CONFIG_SENSORS_LM80=m
189CONFIG_SENSORS_LM83=m
190CONFIG_SENSORS_LM85=m
191CONFIG_SENSORS_LM87=m
192CONFIG_SENSORS_LM90=m
193CONFIG_SENSORS_LM92=m
194CONFIG_SENSORS_MAX1619=m
195CONFIG_SENSORS_PCF8591=m
196CONFIG_SENSORS_SMSC47B397=m
197CONFIG_SENSORS_W83781D=m
198CONFIG_SENSORS_W83791D=m
199CONFIG_SENSORS_W83792D=m
200CONFIG_SENSORS_W83793=m
201CONFIG_SENSORS_W83L785TS=m
202CONFIG_WATCHDOG=y
203CONFIG_WATCHDOG_NOWAYOUT=y
204CONFIG_AT91RM9200_WATCHDOG=y
205CONFIG_FB=y
206CONFIG_FB_MODE_HELPERS=y
207CONFIG_FB_TILEBLITTING=y
208CONFIG_FB_S1D13XXX=y
209CONFIG_BACKLIGHT_LCD_SUPPORT=y
210CONFIG_LCD_CLASS_DEVICE=y
211CONFIG_BACKLIGHT_CLASS_DEVICE=y
212# CONFIG_BACKLIGHT_GENERIC is not set
213CONFIG_DISPLAY_SUPPORT=y
214CONFIG_FRAMEBUFFER_CONSOLE=y
215CONFIG_FONTS=y
216CONFIG_FONT_MINI_4x6=y
217CONFIG_LOGO=y
218# CONFIG_LOGO_LINUX_MONO is not set
219# CONFIG_LOGO_LINUX_VGA16 is not set
220CONFIG_USB=y
221CONFIG_USB_DEVICEFS=y
222# CONFIG_USB_DEVICE_CLASS is not set
223CONFIG_USB_MON=y
224CONFIG_USB_OHCI_HCD=y
225CONFIG_USB_ACM=m
226CONFIG_USB_PRINTER=m
227CONFIG_USB_STORAGE=y
228CONFIG_USB_SERIAL=y
229CONFIG_USB_SERIAL_CONSOLE=y
230CONFIG_USB_SERIAL_GENERIC=y
231CONFIG_USB_SERIAL_FTDI_SIO=y
232CONFIG_USB_SERIAL_KEYSPAN=y
233CONFIG_USB_SERIAL_KEYSPAN_MPR=y
234CONFIG_USB_SERIAL_KEYSPAN_USA28=y
235CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
236CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
237CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
238CONFIG_USB_SERIAL_KEYSPAN_USA19=y
239CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
240CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
241CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
242CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
243CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
244CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
245CONFIG_USB_SERIAL_MCT_U232=y
246CONFIG_USB_SERIAL_PL2303=y
247CONFIG_USB_GADGET=y
248CONFIG_USB_ETH=m
249CONFIG_USB_MASS_STORAGE=m
250CONFIG_MMC=y
251CONFIG_MMC_AT91=y
252CONFIG_NEW_LEDS=y
253CONFIG_LEDS_CLASS=y
254CONFIG_LEDS_GPIO=y
255CONFIG_LEDS_TRIGGERS=y
256CONFIG_LEDS_TRIGGER_TIMER=y
257CONFIG_LEDS_TRIGGER_HEARTBEAT=y
258CONFIG_LEDS_TRIGGER_GPIO=y
259CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
260CONFIG_RTC_CLASS=y
261# CONFIG_RTC_HCTOSYS is not set
262CONFIG_RTC_DRV_DS1307=y
263CONFIG_RTC_DRV_PCF8563=y
264CONFIG_RTC_DRV_AT91RM9200=y
265CONFIG_EXT2_FS=y
266CONFIG_EXT2_FS_XATTR=y
267CONFIG_EXT3_FS=y
268# CONFIG_EXT3_FS_XATTR is not set
269CONFIG_REISERFS_FS=y
270CONFIG_AUTOFS4_FS=y
271CONFIG_ISO9660_FS=y
272CONFIG_JOLIET=y
273CONFIG_ZISOFS=y
274CONFIG_UDF_FS=y
275CONFIG_MSDOS_FS=y
276CONFIG_VFAT_FS=y
277CONFIG_NTFS_FS=m
278CONFIG_TMPFS=y
279CONFIG_CONFIGFS_FS=y
280CONFIG_JFFS2_FS=y
281CONFIG_JFFS2_SUMMARY=y
282CONFIG_JFFS2_COMPRESSION_OPTIONS=y
283CONFIG_JFFS2_LZO=y
284CONFIG_JFFS2_RUBIN=y
285CONFIG_CRAMFS=y
286CONFIG_MINIX_FS=y
287CONFIG_NFS_FS=y
288CONFIG_NFS_V3=y
289CONFIG_NFS_V3_ACL=y
290CONFIG_NFS_V4=y
291CONFIG_ROOT_NFS=y
292CONFIG_NFSD=y
293CONFIG_SMB_FS=m
294CONFIG_CIFS=m
295CONFIG_PARTITION_ADVANCED=y
296CONFIG_MAC_PARTITION=y
297CONFIG_NLS_CODEPAGE_437=y
298CONFIG_NLS_CODEPAGE_737=m
299CONFIG_NLS_CODEPAGE_775=m
300CONFIG_NLS_CODEPAGE_850=m
301CONFIG_NLS_CODEPAGE_852=m
302CONFIG_NLS_CODEPAGE_855=m
303CONFIG_NLS_CODEPAGE_857=m
304CONFIG_NLS_CODEPAGE_860=m
305CONFIG_NLS_CODEPAGE_861=m
306CONFIG_NLS_CODEPAGE_862=m
307CONFIG_NLS_CODEPAGE_863=m
308CONFIG_NLS_CODEPAGE_864=m
309CONFIG_NLS_CODEPAGE_865=m
310CONFIG_NLS_CODEPAGE_866=m
311CONFIG_NLS_CODEPAGE_869=m
312CONFIG_NLS_CODEPAGE_936=m
313CONFIG_NLS_CODEPAGE_950=m
314CONFIG_NLS_CODEPAGE_932=m
315CONFIG_NLS_CODEPAGE_949=m
316CONFIG_NLS_CODEPAGE_874=m
317CONFIG_NLS_ISO8859_8=m
318CONFIG_NLS_CODEPAGE_1250=m
319CONFIG_NLS_CODEPAGE_1251=m
320CONFIG_NLS_ASCII=m
321CONFIG_NLS_ISO8859_1=y
322CONFIG_NLS_ISO8859_2=m
323CONFIG_NLS_ISO8859_3=m
324CONFIG_NLS_ISO8859_4=m
325CONFIG_NLS_ISO8859_5=m
326CONFIG_NLS_ISO8859_6=m
327CONFIG_NLS_ISO8859_7=m
328CONFIG_NLS_ISO8859_9=m
329CONFIG_NLS_ISO8859_13=m
330CONFIG_NLS_ISO8859_14=m
331CONFIG_NLS_ISO8859_15=m
332CONFIG_NLS_KOI8_R=m
333CONFIG_NLS_KOI8_U=m
334CONFIG_NLS_UTF8=y
335CONFIG_MAGIC_SYSRQ=y
336CONFIG_DEBUG_FS=y
337CONFIG_DEBUG_KERNEL=y
338# CONFIG_RCU_CPU_STALL_DETECTOR is not set
339# CONFIG_FTRACE is not set
340CONFIG_CRYPTO_PCBC=y
341CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/at91rm9200dk_defconfig b/arch/arm/configs/at91rm9200dk_defconfig
deleted file mode 100644
index 4438e64f3bfb..000000000000
--- a/arch/arm/configs/at91rm9200dk_defconfig
+++ /dev/null
@@ -1,72 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_LOG_BUF_SHIFT=14
5CONFIG_BLK_DEV_INITRD=y
6CONFIG_MODULES=y
7CONFIG_MODULE_UNLOAD=y
8# CONFIG_IOSCHED_DEADLINE is not set
9# CONFIG_IOSCHED_CFQ is not set
10CONFIG_ARCH_AT91=y
11CONFIG_ARCH_AT91RM9200DK=y
12CONFIG_MACH_ECO920=y
13CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
14# CONFIG_ARM_THUMB is not set
15CONFIG_PCCARD=y
16CONFIG_AT91_CF=y
17CONFIG_LEDS=y
18CONFIG_ZBOOT_ROM_TEXT=0x0
19CONFIG_ZBOOT_ROM_BSS=0x0
20CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
21CONFIG_FPE_NWFPE=y
22CONFIG_NET=y
23CONFIG_PACKET=y
24CONFIG_UNIX=y
25CONFIG_INET=y
26CONFIG_IP_PNP=y
27CONFIG_IP_PNP_BOOTP=y
28# CONFIG_IPV6 is not set
29CONFIG_MTD=y
30CONFIG_MTD_PARTITIONS=y
31CONFIG_MTD_CMDLINE_PARTS=y
32CONFIG_MTD_CHAR=y
33CONFIG_MTD_BLOCK=y
34CONFIG_MTD_CFI=y
35CONFIG_MTD_JEDECPROBE=y
36CONFIG_MTD_CFI_AMDSTD=y
37CONFIG_MTD_PHYSMAP=y
38CONFIG_BLK_DEV_RAM=y
39CONFIG_BLK_DEV_RAM_SIZE=8192
40CONFIG_NETDEVICES=y
41CONFIG_NET_ETHERNET=y
42CONFIG_ARM_AT91_ETHER=y
43# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
44# CONFIG_INPUT_KEYBOARD is not set
45# CONFIG_INPUT_MOUSE is not set
46# CONFIG_SERIO is not set
47CONFIG_SERIAL_ATMEL=y
48CONFIG_SERIAL_ATMEL_CONSOLE=y
49CONFIG_I2C=y
50CONFIG_I2C_CHARDEV=y
51CONFIG_I2C_GPIO=y
52CONFIG_WATCHDOG=y
53CONFIG_WATCHDOG_NOWAYOUT=y
54CONFIG_AT91RM9200_WATCHDOG=y
55# CONFIG_VGA_CONSOLE is not set
56# CONFIG_USB_HID is not set
57CONFIG_USB=y
58CONFIG_USB_DEBUG=y
59CONFIG_USB_DEVICEFS=y
60CONFIG_USB_MON=y
61CONFIG_USB_OHCI_HCD=y
62CONFIG_USB_GADGET=y
63CONFIG_MMC=y
64CONFIG_RTC_CLASS=y
65CONFIG_RTC_DRV_AT91RM9200=y
66CONFIG_EXT2_FS=y
67CONFIG_INOTIFY=y
68CONFIG_TMPFS=y
69CONFIG_CRAMFS=y
70CONFIG_DEBUG_KERNEL=y
71CONFIG_DEBUG_USER=y
72CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/at91rm9200ek_defconfig b/arch/arm/configs/at91rm9200ek_defconfig
deleted file mode 100644
index ccd517c64bc7..000000000000
--- a/arch/arm/configs/at91rm9200ek_defconfig
+++ /dev/null
@@ -1,73 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y
5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_BLK_DEV_INITRD=y
7CONFIG_MODULES=y
8CONFIG_MODULE_UNLOAD=y
9# CONFIG_IOSCHED_DEADLINE is not set
10# CONFIG_IOSCHED_CFQ is not set
11CONFIG_ARCH_AT91=y
12CONFIG_MACH_AT91RM9200EK=y
13CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
14# CONFIG_ARM_THUMB is not set
15CONFIG_LEDS=y
16CONFIG_LEDS_CPU=y
17CONFIG_ZBOOT_ROM_TEXT=0x0
18CONFIG_ZBOOT_ROM_BSS=0x0
19CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
20CONFIG_FPE_NWFPE=y
21CONFIG_NET=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_INET=y
25CONFIG_IP_PNP=y
26CONFIG_IP_PNP_BOOTP=y
27# CONFIG_IPV6 is not set
28CONFIG_MTD=y
29CONFIG_MTD_PARTITIONS=y
30CONFIG_MTD_CMDLINE_PARTS=y
31CONFIG_MTD_CHAR=y
32CONFIG_MTD_BLOCK=y
33CONFIG_MTD_CFI=y
34CONFIG_MTD_JEDECPROBE=y
35CONFIG_MTD_CFI_AMDSTD=y
36CONFIG_MTD_PHYSMAP=y
37CONFIG_BLK_DEV_RAM=y
38CONFIG_BLK_DEV_RAM_SIZE=8192
39CONFIG_NETDEVICES=y
40CONFIG_NET_ETHERNET=y
41CONFIG_ARM_AT91_ETHER=y
42# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
43# CONFIG_INPUT_KEYBOARD is not set
44# CONFIG_INPUT_MOUSE is not set
45# CONFIG_SERIO is not set
46CONFIG_SERIAL_ATMEL=y
47CONFIG_SERIAL_ATMEL_CONSOLE=y
48CONFIG_I2C=y
49CONFIG_I2C_CHARDEV=y
50CONFIG_I2C_GPIO=y
51CONFIG_WATCHDOG=y
52CONFIG_WATCHDOG_NOWAYOUT=y
53CONFIG_AT91RM9200_WATCHDOG=y
54CONFIG_FB=y
55CONFIG_FB_S1D13XXX=y
56# CONFIG_VGA_CONSOLE is not set
57# CONFIG_USB_HID is not set
58CONFIG_USB=y
59CONFIG_USB_DEBUG=y
60CONFIG_USB_DEVICEFS=y
61CONFIG_USB_MON=y
62CONFIG_USB_OHCI_HCD=y
63CONFIG_USB_GADGET=y
64CONFIG_MMC=y
65CONFIG_RTC_CLASS=y
66CONFIG_RTC_DRV_AT91RM9200=y
67CONFIG_EXT2_FS=y
68CONFIG_INOTIFY=y
69CONFIG_TMPFS=y
70CONFIG_CRAMFS=y
71CONFIG_DEBUG_KERNEL=y
72CONFIG_DEBUG_USER=y
73CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/ateb9200_defconfig b/arch/arm/configs/ateb9200_defconfig
deleted file mode 100644
index 1b0e9a1689bb..000000000000
--- a/arch/arm/configs/ateb9200_defconfig
+++ /dev/null
@@ -1,131 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_LOG_BUF_SHIFT=14
4CONFIG_EMBEDDED=y
5CONFIG_SLAB=y
6CONFIG_PROFILING=y
7CONFIG_OPROFILE=m
8CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y
10CONFIG_ARCH_AT91=y
11CONFIG_MACH_ATEB9200=y
12CONFIG_PCCARD=m
13CONFIG_AT91_CF=m
14CONFIG_PREEMPT=y
15CONFIG_ZBOOT_ROM_TEXT=0x0
16CONFIG_ZBOOT_ROM_BSS=0x0
17CONFIG_FPE_NWFPE=y
18CONFIG_PM=y
19CONFIG_NET=y
20CONFIG_PACKET=y
21CONFIG_UNIX=y
22CONFIG_NET_KEY=y
23CONFIG_INET=y
24# CONFIG_IPV6 is not set
25CONFIG_BRIDGE=m
26CONFIG_VLAN_8021Q=m
27CONFIG_MTD=y
28CONFIG_MTD_PARTITIONS=y
29CONFIG_MTD_CMDLINE_PARTS=y
30CONFIG_MTD_CHAR=y
31CONFIG_MTD_BLOCK_RO=y
32CONFIG_BLK_DEV_LOOP=m
33CONFIG_BLK_DEV_NBD=m
34CONFIG_SCSI=m
35CONFIG_BLK_DEV_SD=m
36CONFIG_BLK_DEV_SR=m
37CONFIG_BLK_DEV_SR_VENDOR=y
38CONFIG_CHR_DEV_SG=m
39CONFIG_SCSI_MULTI_LUN=y
40CONFIG_NETDEVICES=y
41CONFIG_DUMMY=m
42CONFIG_TUN=m
43CONFIG_PHYLIB=y
44CONFIG_DAVICOM_PHY=y
45CONFIG_NET_ETHERNET=y
46CONFIG_ARM_AT91_ETHER=y
47CONFIG_USB_USBNET=y
48CONFIG_USB_NET_GL620A=y
49CONFIG_USB_NET_PLUSB=y
50CONFIG_USB_NET_RNDIS_HOST=y
51CONFIG_USB_ALI_M5632=y
52CONFIG_USB_AN2720=y
53CONFIG_USB_EPSON2888=y
54CONFIG_PPP=m
55CONFIG_PPP_ASYNC=m
56CONFIG_PPP_SYNC_TTY=m
57CONFIG_PPP_DEFLATE=m
58CONFIG_PPP_BSDCOMP=m
59CONFIG_PPPOE=m
60CONFIG_SERIAL_ATMEL=y
61CONFIG_SERIAL_ATMEL_CONSOLE=y
62CONFIG_I2C=m
63CONFIG_I2C_CHARDEV=m
64CONFIG_I2C_GPIO=m
65# CONFIG_VGA_CONSOLE is not set
66CONFIG_SOUND=y
67CONFIG_USB_HID=m
68CONFIG_HID_PID=y
69CONFIG_USB_HIDDEV=y
70CONFIG_USB=y
71CONFIG_USB_DEVICEFS=y
72CONFIG_USB_MON=y
73CONFIG_USB_OHCI_HCD=y
74CONFIG_USB_ACM=m
75CONFIG_USB_PRINTER=m
76CONFIG_USB_STORAGE=m
77CONFIG_USB_STORAGE_DATAFAB=m
78CONFIG_USB_STORAGE_FREECOM=m
79CONFIG_USB_STORAGE_USBAT=m
80CONFIG_USB_STORAGE_SDDR09=m
81CONFIG_USB_STORAGE_SDDR55=m
82CONFIG_USB_STORAGE_JUMPSHOT=m
83CONFIG_USB_SERIAL=m
84CONFIG_USB_SERIAL_GENERIC=y
85CONFIG_USB_SERIAL_FTDI_SIO=m
86CONFIG_USB_SERIAL_PL2303=m
87CONFIG_USB_GADGET=m
88CONFIG_USB_ETH=m
89CONFIG_USB_GADGETFS=m
90CONFIG_USB_FILE_STORAGE=m
91CONFIG_USB_G_SERIAL=m
92CONFIG_MMC=m
93CONFIG_MMC_DEBUG=y
94CONFIG_RTC_CLASS=y
95# CONFIG_RTC_HCTOSYS is not set
96CONFIG_RTC_DRV_AT91RM9200=y
97CONFIG_EXT2_FS=m
98CONFIG_EXT3_FS=m
99CONFIG_REISERFS_FS=m
100CONFIG_INOTIFY=y
101CONFIG_ISO9660_FS=m
102CONFIG_JOLIET=y
103CONFIG_ZISOFS=y
104CONFIG_UDF_FS=m
105CONFIG_MSDOS_FS=m
106CONFIG_VFAT_FS=m
107CONFIG_NTFS_FS=m
108CONFIG_NTFS_RW=y
109CONFIG_TMPFS=y
110CONFIG_CRAMFS=y
111CONFIG_NFS_FS=m
112CONFIG_NFS_V3=y
113CONFIG_NFS_V3_ACL=y
114CONFIG_NFS_V4=y
115CONFIG_NFSD=m
116CONFIG_NFSD_V4=y
117CONFIG_PARTITION_ADVANCED=y
118CONFIG_MAC_PARTITION=y
119CONFIG_BSD_DISKLABEL=y
120CONFIG_MINIX_SUBPARTITION=y
121CONFIG_SOLARIS_X86_PARTITION=y
122CONFIG_UNIXWARE_DISKLABEL=y
123CONFIG_NLS_CODEPAGE_932=m
124CONFIG_NLS_ASCII=m
125CONFIG_NLS_ISO8859_15=m
126CONFIG_NLS_UTF8=m
127CONFIG_CRYPTO_MD5=y
128CONFIG_CRYPTO_MICHAEL_MIC=m
129CONFIG_CRYPTO_ARC4=m
130CONFIG_CRC16=m
131CONFIG_LIBCRC32C=m
diff --git a/arch/arm/configs/carmeva_defconfig b/arch/arm/configs/carmeva_defconfig
deleted file mode 100644
index ac64dbd8a49c..000000000000
--- a/arch/arm/configs/carmeva_defconfig
+++ /dev/null
@@ -1,47 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_LOG_BUF_SHIFT=14
3CONFIG_BLK_DEV_INITRD=y
4CONFIG_EMBEDDED=y
5# CONFIG_HOTPLUG is not set
6CONFIG_MODULES=y
7CONFIG_MODULE_UNLOAD=y
8CONFIG_MODULE_FORCE_UNLOAD=y
9CONFIG_ARCH_AT91=y
10CONFIG_MACH_CARMEVA=y
11CONFIG_ZBOOT_ROM_TEXT=0x0
12CONFIG_ZBOOT_ROM_BSS=0x0
13CONFIG_FPE_NWFPE=y
14CONFIG_NET=y
15CONFIG_UNIX=y
16CONFIG_INET=y
17CONFIG_IP_MULTICAST=y
18CONFIG_IP_PNP=y
19# CONFIG_IPV6 is not set
20CONFIG_MTD=y
21CONFIG_MTD_PARTITIONS=y
22CONFIG_MTD_CMDLINE_PARTS=y
23CONFIG_MTD_CHAR=y
24CONFIG_MTD_BLOCK=y
25CONFIG_BLK_DEV_RAM=y
26CONFIG_NETDEVICES=y
27CONFIG_NET_ETHERNET=y
28CONFIG_ARM_AT91_ETHER=y
29# CONFIG_INPUT_MOUSEDEV is not set
30# CONFIG_INPUT_KEYBOARD is not set
31# CONFIG_INPUT_MOUSE is not set
32CONFIG_SERIO=m
33CONFIG_SERIAL_ATMEL=y
34CONFIG_SERIAL_ATMEL_CONSOLE=y
35# CONFIG_VGA_CONSOLE is not set
36CONFIG_MMC=m
37CONFIG_MMC_DEBUG=y
38CONFIG_EXT2_FS=y
39CONFIG_EXT2_FS_XATTR=y
40# CONFIG_DNOTIFY is not set
41CONFIG_JFFS2_FS=y
42CONFIG_JFFS2_COMPRESSION_OPTIONS=y
43CONFIG_NFS_FS=y
44CONFIG_NFS_V3=y
45CONFIG_NFS_V4=y
46CONFIG_ROOT_NFS=y
47CONFIG_NFSD=y
diff --git a/arch/arm/configs/cpuat91_defconfig b/arch/arm/configs/cpuat91_defconfig
deleted file mode 100644
index 022aeb55b676..000000000000
--- a/arch/arm/configs/cpuat91_defconfig
+++ /dev/null
@@ -1,112 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y
5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_SYSFS_DEPRECATED_V2=y
7CONFIG_MODULES=y
8CONFIG_MODULE_UNLOAD=y
9# CONFIG_BLK_DEV_BSG is not set
10# CONFIG_IOSCHED_CFQ is not set
11CONFIG_ARCH_AT91=y
12CONFIG_MACH_CPUAT91=y
13CONFIG_AT91_TIMER_HZ=100
14# CONFIG_ARM_THUMB is not set
15CONFIG_PREEMPT=y
16CONFIG_ZBOOT_ROM_TEXT=0x0
17CONFIG_ZBOOT_ROM_BSS=0x0
18CONFIG_NET=y
19CONFIG_PACKET=y
20CONFIG_UNIX=y
21CONFIG_INET=y
22CONFIG_IP_PNP=y
23# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
24# CONFIG_INET_XFRM_MODE_TUNNEL is not set
25# CONFIG_INET_XFRM_MODE_BEET is not set
26# CONFIG_IPV6 is not set
27CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
28CONFIG_MTD=y
29CONFIG_MTD_PARTITIONS=y
30CONFIG_MTD_CMDLINE_PARTS=y
31CONFIG_MTD_CHAR=y
32CONFIG_MTD_BLOCK=y
33CONFIG_MTD_CFI=y
34CONFIG_MTD_CFI_INTELEXT=y
35CONFIG_MTD_PHYSMAP=y
36CONFIG_MTD_PLATRAM=y
37CONFIG_BLK_DEV_LOOP=y
38CONFIG_BLK_DEV_NBD=y
39CONFIG_BLK_DEV_RAM=y
40# CONFIG_MISC_DEVICES is not set
41CONFIG_SCSI=y
42CONFIG_BLK_DEV_SD=y
43CONFIG_SCSI_MULTI_LUN=y
44# CONFIG_SCSI_LOWLEVEL is not set
45CONFIG_NETDEVICES=y
46CONFIG_PHYLIB=y
47CONFIG_NET_ETHERNET=y
48CONFIG_ARM_AT91_ETHER=y
49# CONFIG_NETDEV_1000 is not set
50# CONFIG_NETDEV_10000 is not set
51CONFIG_PPP=y
52CONFIG_PPP_ASYNC=y
53CONFIG_PPP_DEFLATE=y
54CONFIG_PPP_BSDCOMP=y
55# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
56# CONFIG_INPUT_KEYBOARD is not set
57# CONFIG_INPUT_MOUSE is not set
58# CONFIG_SERIO is not set
59CONFIG_SERIAL_ATMEL=y
60CONFIG_SERIAL_ATMEL_CONSOLE=y
61CONFIG_LEGACY_PTY_COUNT=32
62# CONFIG_HW_RANDOM is not set
63CONFIG_I2C=y
64CONFIG_I2C_CHARDEV=y
65CONFIG_I2C_GPIO=y
66CONFIG_GPIO_SYSFS=y
67# CONFIG_HWMON is not set
68CONFIG_WATCHDOG=y
69CONFIG_WATCHDOG_NOWAYOUT=y
70CONFIG_AT91RM9200_WATCHDOG=y
71# CONFIG_VGA_CONSOLE is not set
72# CONFIG_HID_SUPPORT is not set
73CONFIG_USB=y
74# CONFIG_USB_DEVICE_CLASS is not set
75CONFIG_USB_OHCI_HCD=y
76CONFIG_USB_STORAGE=y
77CONFIG_USB_GADGET=y
78CONFIG_USB_ETH=m
79CONFIG_MMC=y
80CONFIG_MMC_AT91=m
81CONFIG_NEW_LEDS=y
82CONFIG_LEDS_CLASS=y
83CONFIG_LEDS_GPIO=y
84CONFIG_LEDS_TRIGGERS=y
85CONFIG_LEDS_TRIGGER_TIMER=y
86CONFIG_LEDS_TRIGGER_HEARTBEAT=y
87CONFIG_LEDS_TRIGGER_GPIO=y
88CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
89CONFIG_RTC_CLASS=y
90# CONFIG_RTC_HCTOSYS is not set
91CONFIG_RTC_DRV_DS1307=y
92CONFIG_RTC_DRV_PCF8563=y
93CONFIG_EXT2_FS=y
94CONFIG_EXT3_FS=y
95# CONFIG_EXT3_FS_XATTR is not set
96CONFIG_INOTIFY=y
97CONFIG_AUTOFS4_FS=y
98CONFIG_MSDOS_FS=y
99CONFIG_VFAT_FS=y
100CONFIG_TMPFS=y
101CONFIG_JFFS2_FS=y
102CONFIG_JFFS2_SUMMARY=y
103CONFIG_CRAMFS=y
104CONFIG_MINIX_FS=y
105CONFIG_NFS_FS=y
106CONFIG_NFS_V3=y
107CONFIG_ROOT_NFS=y
108CONFIG_PARTITION_ADVANCED=y
109CONFIG_NLS_CODEPAGE_437=y
110CONFIG_NLS_ISO8859_1=y
111CONFIG_NLS_UTF8=y
112# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/csb337_defconfig b/arch/arm/configs/csb337_defconfig
deleted file mode 100644
index a24c448840c4..000000000000
--- a/arch/arm/configs/csb337_defconfig
+++ /dev/null
@@ -1,104 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_LOG_BUF_SHIFT=14
5CONFIG_BLK_DEV_INITRD=y
6CONFIG_MODULES=y
7CONFIG_MODULE_UNLOAD=y
8# CONFIG_BLK_DEV_BSG is not set
9CONFIG_ARCH_AT91=y
10CONFIG_MACH_CSB337=y
11CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
12# CONFIG_ARM_THUMB is not set
13CONFIG_PCCARD=y
14CONFIG_AT91_CF=y
15CONFIG_LEDS=y
16CONFIG_LEDS_CPU=y
17CONFIG_ZBOOT_ROM_TEXT=0x0
18CONFIG_ZBOOT_ROM_BSS=0x0
19CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
20CONFIG_FPE_NWFPE=y
21CONFIG_NET=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_INET=y
25CONFIG_IP_PNP=y
26CONFIG_IP_PNP_DHCP=y
27CONFIG_IP_PNP_BOOTP=y
28# CONFIG_INET_LRO is not set
29# CONFIG_IPV6 is not set
30CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
31CONFIG_MTD=y
32CONFIG_MTD_PARTITIONS=y
33CONFIG_MTD_CMDLINE_PARTS=y
34CONFIG_MTD_CHAR=y
35CONFIG_MTD_BLOCK=y
36CONFIG_MTD_CFI=y
37CONFIG_MTD_CFI_INTELEXT=y
38CONFIG_MTD_PHYSMAP=y
39CONFIG_BLK_DEV_LOOP=y
40CONFIG_BLK_DEV_RAM=y
41CONFIG_BLK_DEV_RAM_SIZE=8192
42CONFIG_ATMEL_SSC=y
43CONFIG_SCSI=y
44CONFIG_NETDEVICES=y
45CONFIG_NET_ETHERNET=y
46CONFIG_ARM_AT91_ETHER=y
47# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
48# CONFIG_INPUT_KEYBOARD is not set
49# CONFIG_INPUT_MOUSE is not set
50# CONFIG_SERIO is not set
51CONFIG_SERIAL_ATMEL=y
52CONFIG_SERIAL_ATMEL_CONSOLE=y
53# CONFIG_HW_RANDOM is not set
54CONFIG_I2C=y
55CONFIG_I2C_CHARDEV=y
56CONFIG_I2C_GPIO=y
57# CONFIG_HWMON is not set
58CONFIG_WATCHDOG=y
59CONFIG_WATCHDOG_NOWAYOUT=y
60CONFIG_AT91RM9200_WATCHDOG=y
61# CONFIG_VGA_CONSOLE is not set
62# CONFIG_USB_HID is not set
63CONFIG_USB=y
64CONFIG_USB_DEBUG=y
65CONFIG_USB_DEVICEFS=y
66CONFIG_USB_MON=y
67CONFIG_USB_OHCI_HCD=y
68CONFIG_USB_STORAGE=y
69CONFIG_USB_SERIAL=y
70CONFIG_USB_SERIAL_CONSOLE=y
71CONFIG_USB_SERIAL_GENERIC=y
72CONFIG_USB_SERIAL_FTDI_SIO=y
73CONFIG_USB_SERIAL_KEYSPAN=y
74CONFIG_USB_SERIAL_KEYSPAN_MPR=y
75CONFIG_USB_SERIAL_KEYSPAN_USA28=y
76CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
77CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
78CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
79CONFIG_USB_SERIAL_KEYSPAN_USA19=y
80CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
81CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
82CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
83CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
84CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
85CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
86CONFIG_USB_SERIAL_MCT_U232=y
87CONFIG_USB_GADGET=y
88CONFIG_MMC=y
89CONFIG_RTC_CLASS=y
90CONFIG_RTC_HCTOSYS_DEVICE="rtc1"
91# CONFIG_RTC_INTF_SYSFS is not set
92CONFIG_RTC_DRV_DS1307=y
93CONFIG_RTC_DRV_AT91RM9200=y
94CONFIG_EXT2_FS=y
95CONFIG_INOTIFY=y
96CONFIG_TMPFS=y
97CONFIG_CRAMFS=y
98CONFIG_NFS_FS=y
99CONFIG_NFS_V3=y
100CONFIG_NFS_V4=y
101CONFIG_ROOT_NFS=y
102CONFIG_DEBUG_KERNEL=y
103CONFIG_DEBUG_USER=y
104CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/csb637_defconfig b/arch/arm/configs/csb637_defconfig
deleted file mode 100644
index 98552adac5fb..000000000000
--- a/arch/arm/configs/csb637_defconfig
+++ /dev/null
@@ -1,98 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_LOG_BUF_SHIFT=14
5CONFIG_SYSFS_DEPRECATED_V2=y
6CONFIG_BLK_DEV_INITRD=y
7CONFIG_MODULES=y
8CONFIG_MODULE_UNLOAD=y
9# CONFIG_BLK_DEV_BSG is not set
10CONFIG_ARCH_AT91=y
11CONFIG_MACH_CSB637=y
12CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
13# CONFIG_ARM_THUMB is not set
14CONFIG_PCCARD=y
15CONFIG_AT91_CF=y
16CONFIG_LEDS=y
17CONFIG_LEDS_CPU=y
18CONFIG_ZBOOT_ROM_TEXT=0x0
19CONFIG_ZBOOT_ROM_BSS=0x0
20CONFIG_CMDLINE="mem=32M console=ttyS0,38400 initrd=0x20410000,3145728 root=/dev/ram0 rw"
21CONFIG_FPE_NWFPE=y
22CONFIG_NET=y
23CONFIG_PACKET=y
24CONFIG_UNIX=y
25CONFIG_INET=y
26CONFIG_IP_PNP=y
27CONFIG_IP_PNP_DHCP=y
28CONFIG_IP_PNP_BOOTP=y
29# CONFIG_INET_LRO is not set
30# CONFIG_IPV6 is not set
31CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
32CONFIG_MTD=y
33CONFIG_MTD_PARTITIONS=y
34CONFIG_MTD_CMDLINE_PARTS=y
35CONFIG_MTD_CHAR=y
36CONFIG_MTD_BLOCK=y
37CONFIG_MTD_CFI=y
38CONFIG_MTD_CFI_INTELEXT=y
39CONFIG_MTD_PHYSMAP=y
40CONFIG_BLK_DEV_LOOP=y
41CONFIG_BLK_DEV_RAM=y
42CONFIG_BLK_DEV_RAM_SIZE=8192
43CONFIG_SCSI=y
44CONFIG_NETDEVICES=y
45CONFIG_NET_ETHERNET=y
46CONFIG_ARM_AT91_ETHER=y
47# CONFIG_INPUT_KEYBOARD is not set
48# CONFIG_INPUT_MOUSE is not set
49# CONFIG_SERIO is not set
50CONFIG_SERIAL_ATMEL=y
51CONFIG_SERIAL_ATMEL_CONSOLE=y
52CONFIG_I2C=y
53CONFIG_I2C_CHARDEV=y
54CONFIG_WATCHDOG=y
55CONFIG_WATCHDOG_NOWAYOUT=y
56CONFIG_AT91RM9200_WATCHDOG=y
57# CONFIG_VGA_CONSOLE is not set
58# CONFIG_USB_HID is not set
59CONFIG_USB=y
60CONFIG_USB_DEBUG=y
61CONFIG_USB_DEVICEFS=y
62CONFIG_USB_MON=y
63CONFIG_USB_OHCI_HCD=y
64CONFIG_USB_STORAGE=y
65CONFIG_USB_SERIAL=y
66CONFIG_USB_SERIAL_CONSOLE=y
67CONFIG_USB_SERIAL_GENERIC=y
68CONFIG_USB_SERIAL_FTDI_SIO=y
69CONFIG_USB_SERIAL_KEYSPAN=y
70CONFIG_USB_SERIAL_KEYSPAN_MPR=y
71CONFIG_USB_SERIAL_KEYSPAN_USA28=y
72CONFIG_USB_SERIAL_KEYSPAN_USA28X=y
73CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y
74CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y
75CONFIG_USB_SERIAL_KEYSPAN_USA19=y
76CONFIG_USB_SERIAL_KEYSPAN_USA18X=y
77CONFIG_USB_SERIAL_KEYSPAN_USA19W=y
78CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y
79CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y
80CONFIG_USB_SERIAL_KEYSPAN_USA49W=y
81CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y
82CONFIG_USB_SERIAL_MCT_U232=y
83CONFIG_NEW_LEDS=y
84CONFIG_LEDS_CLASS=y
85CONFIG_LEDS_GPIO=y
86CONFIG_LEDS_TRIGGERS=y
87CONFIG_LEDS_TRIGGER_HEARTBEAT=y
88CONFIG_EXT2_FS=y
89CONFIG_INOTIFY=y
90CONFIG_TMPFS=y
91CONFIG_CRAMFS=y
92CONFIG_NFS_FS=y
93CONFIG_NFS_V3=y
94CONFIG_NFS_V4=y
95CONFIG_ROOT_NFS=y
96CONFIG_DEBUG_KERNEL=y
97CONFIG_DEBUG_USER=y
98CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/ecbat91_defconfig b/arch/arm/configs/ecbat91_defconfig
deleted file mode 100644
index 6bb6abdcea8c..000000000000
--- a/arch/arm/configs/ecbat91_defconfig
+++ /dev/null
@@ -1,99 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_IKCONFIG=y
4CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_SLAB=y
7CONFIG_MODULES=y
8CONFIG_MODULE_UNLOAD=y
9# CONFIG_IOSCHED_DEADLINE is not set
10# CONFIG_IOSCHED_CFQ is not set
11CONFIG_ARCH_AT91=y
12CONFIG_MACH_ECBAT91=y
13CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
14CONFIG_PCCARD=y
15CONFIG_AT91_CF=y
16CONFIG_PREEMPT=y
17CONFIG_LEDS=y
18CONFIG_LEDS_CPU=y
19CONFIG_ZBOOT_ROM_TEXT=0x0
20CONFIG_ZBOOT_ROM_BSS=0x0
21CONFIG_CMDLINE="rootfstype=reiserfs root=/dev/mmcblk0p1 console=ttyS0,115200n8 rootdelay=1"
22CONFIG_FPE_NWFPE=y
23CONFIG_NET=y
24CONFIG_PACKET=y
25CONFIG_UNIX=y
26CONFIG_INET=y
27CONFIG_IP_PNP=y
28CONFIG_IP_PNP_DHCP=y
29# CONFIG_IPV6 is not set
30CONFIG_CFG80211=y
31CONFIG_MAC80211=y
32# CONFIG_STANDALONE is not set
33# CONFIG_PREVENT_FIRMWARE_BUILD is not set
34CONFIG_MTD=y
35CONFIG_MTD_PARTITIONS=y
36CONFIG_MTD_CMDLINE_PARTS=y
37CONFIG_MTD_AFS_PARTS=y
38CONFIG_MTD_CHAR=y
39CONFIG_MTD_BLOCK=y
40CONFIG_MTD_DATAFLASH=y
41CONFIG_BLK_DEV_LOOP=y
42CONFIG_SCSI=y
43CONFIG_BLK_DEV_SD=y
44CONFIG_CHR_DEV_SG=y
45CONFIG_NETDEVICES=y
46CONFIG_NET_ETHERNET=y
47CONFIG_ARM_AT91_ETHER=y
48# CONFIG_NETDEV_1000 is not set
49# CONFIG_NETDEV_10000 is not set
50CONFIG_PPP=y
51CONFIG_PPP_MULTILINK=y
52CONFIG_PPP_FILTER=y
53CONFIG_PPP_ASYNC=y
54# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
55# CONFIG_INPUT_KEYBOARD is not set
56# CONFIG_INPUT_MOUSE is not set
57# CONFIG_SERIO is not set
58CONFIG_SERIAL_ATMEL=y
59CONFIG_SERIAL_ATMEL_CONSOLE=y
60CONFIG_HW_RANDOM=y
61CONFIG_I2C=y
62CONFIG_I2C_CHARDEV=y
63CONFIG_SPI=y
64CONFIG_SPI_BITBANG=y
65CONFIG_WATCHDOG=y
66CONFIG_WATCHDOG_NOWAYOUT=y
67# CONFIG_VGA_CONSOLE is not set
68# CONFIG_USB_HID is not set
69CONFIG_USB=y
70CONFIG_USB_DEVICEFS=y
71# CONFIG_USB_DEVICE_CLASS is not set
72CONFIG_USB_OHCI_HCD=y
73CONFIG_USB_PRINTER=y
74CONFIG_USB_STORAGE=y
75CONFIG_USB_GADGET=y
76CONFIG_MMC=y
77CONFIG_MMC_DEBUG=y
78CONFIG_MMC_AT91=m
79CONFIG_NEW_LEDS=y
80CONFIG_LEDS_CLASS=y
81CONFIG_RTC_CLASS=y
82# CONFIG_RTC_HCTOSYS is not set
83CONFIG_RTC_DRV_AT91RM9200=y
84CONFIG_EXT2_FS=y
85CONFIG_EXT3_FS=y
86CONFIG_REISERFS_FS=y
87CONFIG_INOTIFY=y
88CONFIG_TMPFS=y
89CONFIG_CONFIGFS_FS=y
90CONFIG_CRAMFS=y
91CONFIG_NFS_FS=y
92CONFIG_NFS_V3=y
93CONFIG_NFS_V3_ACL=y
94CONFIG_NFS_V4=y
95CONFIG_ROOT_NFS=y
96CONFIG_PARTITION_ADVANCED=y
97CONFIG_DEBUG_USER=y
98CONFIG_CRYPTO_PCBC=y
99CONFIG_CRYPTO_SHA1=y
diff --git a/arch/arm/configs/kafa_defconfig b/arch/arm/configs/kafa_defconfig
deleted file mode 100644
index 896dbe00dc6e..000000000000
--- a/arch/arm/configs/kafa_defconfig
+++ /dev/null
@@ -1,61 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_LOCALVERSION_AUTO is not set
3# CONFIG_SWAP is not set
4CONFIG_SYSVIPC=y
5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_SLAB=y
7CONFIG_MODULES=y
8CONFIG_MODULE_UNLOAD=y
9# CONFIG_IOSCHED_CFQ is not set
10CONFIG_ARCH_AT91=y
11CONFIG_MACH_KAFA=y
12# CONFIG_ARM_THUMB is not set
13CONFIG_PREEMPT=y
14CONFIG_LEDS=y
15CONFIG_LEDS_CPU=y
16CONFIG_ZBOOT_ROM_TEXT=0x0
17CONFIG_ZBOOT_ROM_BSS=0x0
18CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20800000,10M root=/dev/ram0 rw"
19CONFIG_FPE_NWFPE=y
20CONFIG_BINFMT_MISC=y
21CONFIG_NET=y
22CONFIG_PACKET=y
23CONFIG_UNIX=y
24CONFIG_INET=y
25# CONFIG_INET_DIAG is not set
26# CONFIG_IPV6 is not set
27CONFIG_MTD=y
28CONFIG_MTD_PARTITIONS=y
29CONFIG_MTD_CHAR=y
30CONFIG_MTD_BLOCK_RO=y
31CONFIG_NETDEVICES=y
32CONFIG_PHYLIB=y
33CONFIG_DAVICOM_PHY=y
34CONFIG_NET_ETHERNET=y
35CONFIG_ARM_AT91_ETHER=y
36# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
37# CONFIG_INPUT_KEYBOARD is not set
38# CONFIG_INPUT_MOUSE is not set
39# CONFIG_SERIO is not set
40CONFIG_SERIAL_ATMEL=y
41CONFIG_SERIAL_ATMEL_CONSOLE=y
42CONFIG_LEGACY_PTY_COUNT=32
43CONFIG_I2C=y
44CONFIG_I2C_CHARDEV=y
45CONFIG_I2C_GPIO=y
46# CONFIG_HWMON is not set
47CONFIG_WATCHDOG=y
48CONFIG_WATCHDOG_NOWAYOUT=y
49CONFIG_AT91RM9200_WATCHDOG=y
50# CONFIG_VGA_CONSOLE is not set
51CONFIG_RTC_CLASS=y
52# CONFIG_RTC_HCTOSYS is not set
53CONFIG_RTC_DRV_AT91RM9200=y
54CONFIG_EXT3_FS=y
55# CONFIG_EXT3_FS_XATTR is not set
56CONFIG_TMPFS=y
57CONFIG_CRAMFS=y
58CONFIG_NFS_FS=m
59CONFIG_NFS_V3=y
60CONFIG_CRYPTO_MD5=y
61CONFIG_CRYPTO_DES=y
diff --git a/arch/arm/configs/kb9202_defconfig b/arch/arm/configs/kb9202_defconfig
deleted file mode 100644
index 9f906a85f5c2..000000000000
--- a/arch/arm/configs/kb9202_defconfig
+++ /dev/null
@@ -1,127 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y
5CONFIG_BSD_PROCESS_ACCT=y
6CONFIG_AUDIT=y
7CONFIG_IKCONFIG=y
8CONFIG_IKCONFIG_PROC=y
9CONFIG_BLK_DEV_INITRD=y
10CONFIG_KALLSYMS_EXTRA_PASS=y
11CONFIG_MODULES=y
12CONFIG_MODULE_UNLOAD=y
13CONFIG_MODVERSIONS=y
14CONFIG_MODULE_SRCVERSION_ALL=y
15# CONFIG_BLK_DEV_BSG is not set
16# CONFIG_IOSCHED_DEADLINE is not set
17CONFIG_ARCH_AT91=y
18CONFIG_MACH_KB9200=y
19CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
20CONFIG_NO_HZ=y
21CONFIG_HIGH_RES_TIMERS=y
22CONFIG_PREEMPT=y
23CONFIG_AEABI=y
24CONFIG_ZBOOT_ROM_TEXT=0x10000000
25CONFIG_ZBOOT_ROM_BSS=0x20040000
26CONFIG_CMDLINE="noinitrd root=/dev/mtdblock0 rootfstype=jffs2 mem=64M"
27CONFIG_KEXEC=y
28CONFIG_FPE_NWFPE=y
29CONFIG_BINFMT_MISC=y
30CONFIG_NET=y
31CONFIG_PACKET=y
32CONFIG_UNIX=y
33CONFIG_INET=y
34CONFIG_IP_PNP=y
35CONFIG_IP_PNP_DHCP=y
36CONFIG_IP_PNP_BOOTP=y
37# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
38# CONFIG_INET_XFRM_MODE_TUNNEL is not set
39# CONFIG_INET_XFRM_MODE_BEET is not set
40# CONFIG_INET_LRO is not set
41# CONFIG_INET_DIAG is not set
42# CONFIG_IPV6 is not set
43CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
44# CONFIG_FIRMWARE_IN_KERNEL is not set
45CONFIG_MTD=y
46CONFIG_MTD_CONCAT=y
47CONFIG_MTD_PARTITIONS=y
48CONFIG_MTD_CMDLINE_PARTS=y
49CONFIG_MTD_CHAR=y
50CONFIG_MTD_BLOCK=y
51CONFIG_MTD_CFI=y
52CONFIG_MTD_CFI_INTELEXT=y
53CONFIG_MTD_COMPLEX_MAPPINGS=y
54CONFIG_MTD_PHYSMAP=y
55CONFIG_MTD_NAND=y
56CONFIG_MTD_NAND_ATMEL=y
57CONFIG_MTD_UBI=y
58CONFIG_MTD_UBI_GLUEBI=y
59CONFIG_BLK_DEV_LOOP=y
60CONFIG_BLK_DEV_RAM=y
61CONFIG_BLK_DEV_RAM_SIZE=16384
62CONFIG_ATMEL_TCLIB=y
63CONFIG_ATMEL_SSC=y
64CONFIG_SCSI=y
65CONFIG_BLK_DEV_SD=y
66CONFIG_CHR_DEV_SG=y
67CONFIG_SCSI_MULTI_LUN=y
68CONFIG_SCSI_CONSTANTS=y
69CONFIG_SCSI_LOGGING=y
70CONFIG_SCSI_SPI_ATTRS=m
71# CONFIG_SCSI_LOWLEVEL is not set
72CONFIG_NETDEVICES=y
73CONFIG_NET_ETHERNET=y
74CONFIG_ARM_AT91_ETHER=y
75# CONFIG_NETDEV_1000 is not set
76# CONFIG_NETDEV_10000 is not set
77# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
78# CONFIG_INPUT_KEYBOARD is not set
79# CONFIG_INPUT_MOUSE is not set
80# CONFIG_SERIO is not set
81CONFIG_SERIAL_ATMEL=y
82CONFIG_SERIAL_ATMEL_CONSOLE=y
83# CONFIG_LEGACY_PTYS is not set
84# CONFIG_HW_RANDOM is not set
85# CONFIG_HWMON is not set
86CONFIG_WATCHDOG=y
87CONFIG_AT91RM9200_WATCHDOG=y
88CONFIG_FB=y
89CONFIG_FB_MODE_HELPERS=y
90CONFIG_FB_TILEBLITTING=y
91CONFIG_BACKLIGHT_LCD_SUPPORT=y
92# CONFIG_LCD_CLASS_DEVICE is not set
93CONFIG_BACKLIGHT_CLASS_DEVICE=y
94# CONFIG_BACKLIGHT_GENERIC is not set
95# CONFIG_VGA_CONSOLE is not set
96CONFIG_FRAMEBUFFER_CONSOLE=y
97CONFIG_FONTS=y
98CONFIG_FONT_MINI_4x6=y
99# CONFIG_HID_SUPPORT is not set
100CONFIG_USB=y
101CONFIG_USB_DEVICEFS=y
102CONFIG_USB_OHCI_HCD=y
103CONFIG_USB_STORAGE=y
104CONFIG_USB_LIBUSUAL=y
105CONFIG_MMC=y
106CONFIG_MMC_AT91=m
107CONFIG_RTC_CLASS=y
108CONFIG_RTC_DRV_AT91RM9200=y
109CONFIG_EXT2_FS=y
110CONFIG_EXT3_FS=y
111# CONFIG_DNOTIFY is not set
112CONFIG_INOTIFY=y
113CONFIG_VFAT_FS=y
114CONFIG_TMPFS=y
115CONFIG_CONFIGFS_FS=y
116CONFIG_JFFS2_FS=y
117CONFIG_NFS_FS=y
118CONFIG_NFS_V3=y
119CONFIG_ROOT_NFS=y
120CONFIG_NLS_CODEPAGE_437=y
121CONFIG_NLS_UTF8=y
122CONFIG_MAGIC_SYSRQ=y
123CONFIG_DEBUG_FS=y
124CONFIG_DEBUG_KERNEL=y
125# CONFIG_SCHED_DEBUG is not set
126# CONFIG_DEBUG_PREEMPT is not set
127# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/arm/configs/onearm_defconfig b/arch/arm/configs/onearm_defconfig
deleted file mode 100644
index 1579857aeeaa..000000000000
--- a/arch/arm/configs/onearm_defconfig
+++ /dev/null
@@ -1,80 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2# CONFIG_SWAP is not set
3CONFIG_SYSVIPC=y
4CONFIG_LOG_BUF_SHIFT=14
5CONFIG_BLK_DEV_INITRD=y
6CONFIG_EMBEDDED=y
7CONFIG_SLAB=y
8CONFIG_MODULES=y
9CONFIG_MODULE_UNLOAD=y
10# CONFIG_IOSCHED_DEADLINE is not set
11# CONFIG_IOSCHED_CFQ is not set
12CONFIG_ARCH_AT91=y
13CONFIG_MACH_ONEARM=y
14CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
15# CONFIG_ARM_THUMB is not set
16CONFIG_PCCARD=y
17CONFIG_AT91_CF=y
18CONFIG_LEDS=y
19CONFIG_ZBOOT_ROM_TEXT=0x0
20CONFIG_ZBOOT_ROM_BSS=0x0
21CONFIG_CMDLINE="console=ttyS0,115200 root=/dev/nfs ip=bootp mem=64M"
22CONFIG_FPE_NWFPE=y
23CONFIG_NET=y
24CONFIG_PACKET=y
25CONFIG_UNIX=y
26CONFIG_INET=y
27CONFIG_IP_PNP=y
28CONFIG_IP_PNP_BOOTP=y
29CONFIG_IPV6=y
30# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
31# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
32# CONFIG_INET6_XFRM_MODE_BEET is not set
33# CONFIG_IPV6_SIT is not set
34CONFIG_MTD=y
35CONFIG_MTD_PARTITIONS=y
36CONFIG_MTD_CMDLINE_PARTS=y
37CONFIG_MTD_CHAR=y
38CONFIG_MTD_BLOCK=y
39CONFIG_MTD_CFI=y
40CONFIG_MTD_JEDECPROBE=y
41CONFIG_MTD_CFI_AMDSTD=y
42CONFIG_MTD_PHYSMAP=y
43CONFIG_BLK_DEV_NBD=y
44CONFIG_BLK_DEV_RAM=y
45CONFIG_BLK_DEV_RAM_SIZE=8192
46CONFIG_NETDEVICES=y
47CONFIG_NET_ETHERNET=y
48CONFIG_ARM_AT91_ETHER=y
49# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
50# CONFIG_INPUT_KEYBOARD is not set
51# CONFIG_INPUT_MOUSE is not set
52# CONFIG_SERIO is not set
53# CONFIG_VT is not set
54CONFIG_SERIAL_ATMEL=y
55CONFIG_SERIAL_ATMEL_CONSOLE=y
56# CONFIG_HW_RANDOM is not set
57CONFIG_I2C=y
58CONFIG_I2C_CHARDEV=y
59CONFIG_WATCHDOG=y
60CONFIG_WATCHDOG_NOWAYOUT=y
61CONFIG_AT91RM9200_WATCHDOG=y
62# CONFIG_USB_HID is not set
63CONFIG_USB=y
64CONFIG_USB_DEBUG=y
65CONFIG_USB_DEVICEFS=y
66CONFIG_USB_MON=y
67CONFIG_USB_OHCI_HCD=y
68CONFIG_USB_GADGET=y
69CONFIG_MMC=y
70CONFIG_EXT2_FS=y
71CONFIG_INOTIFY=y
72CONFIG_TMPFS=y
73CONFIG_CRAMFS=y
74CONFIG_NFS_FS=y
75CONFIG_NFS_V3=y
76CONFIG_NFS_V3_ACL=y
77CONFIG_ROOT_NFS=y
78CONFIG_DEBUG_KERNEL=y
79CONFIG_DEBUG_USER=y
80CONFIG_DEBUG_LL=y
diff --git a/arch/arm/configs/picotux200_defconfig b/arch/arm/configs/picotux200_defconfig
deleted file mode 100644
index 4c9afa478d57..000000000000
--- a/arch/arm/configs/picotux200_defconfig
+++ /dev/null
@@ -1,242 +0,0 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_IKCONFIG=m
4CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_EMBEDDED=y
7# CONFIG_KALLSYMS is not set
8CONFIG_SLAB=y
9CONFIG_MODULES=y
10CONFIG_MODULE_UNLOAD=y
11# CONFIG_IOSCHED_DEADLINE is not set
12# CONFIG_IOSCHED_CFQ is not set
13CONFIG_ARCH_AT91=y
14CONFIG_MACH_PICOTUX2XX=y
15CONFIG_AT91_PROGRAMMABLE_CLOCKS=y
16CONFIG_AEABI=y
17CONFIG_ZBOOT_ROM_TEXT=0x0
18CONFIG_ZBOOT_ROM_BSS=0x0
19CONFIG_KEXEC=y
20CONFIG_FPE_NWFPE=y
21CONFIG_BINFMT_MISC=m
22CONFIG_NET=y
23CONFIG_PACKET=m
24CONFIG_UNIX=y
25CONFIG_XFRM_USER=m
26CONFIG_INET=y
27CONFIG_IP_PNP=y
28CONFIG_IP_PNP_BOOTP=y
29CONFIG_NET_IPIP=m
30CONFIG_NET_IPGRE=m
31CONFIG_INET_AH=m
32CONFIG_INET_ESP=m
33CONFIG_INET_IPCOMP=m
34CONFIG_INET_XFRM_MODE_TRANSPORT=m
35CONFIG_INET_XFRM_MODE_TUNNEL=m
36CONFIG_INET_XFRM_MODE_BEET=m
37CONFIG_INET_DIAG=m
38CONFIG_IPV6_PRIVACY=y
39CONFIG_IPV6_ROUTER_PREF=y
40CONFIG_IPV6_ROUTE_INFO=y
41CONFIG_INET6_AH=m
42CONFIG_INET6_ESP=m
43CONFIG_INET6_IPCOMP=m
44CONFIG_IPV6_MIP6=m
45CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=m
46CONFIG_IPV6_TUNNEL=m
47CONFIG_BRIDGE=m
48CONFIG_VLAN_8021Q=m
49CONFIG_BT=m
50CONFIG_BT_L2CAP=m
51CONFIG_BT_SCO=m
52CONFIG_BT_RFCOMM=m
53CONFIG_BT_RFCOMM_TTY=y
54CONFIG_BT_BNEP=m
55CONFIG_BT_BNEP_MC_FILTER=y
56CONFIG_BT_BNEP_PROTO_FILTER=y
57CONFIG_BT_HIDP=m
58CONFIG_FW_LOADER=m
59CONFIG_MTD=y
60CONFIG_MTD_PARTITIONS=y
61CONFIG_MTD_CMDLINE_PARTS=y
62CONFIG_MTD_CHAR=y
63CONFIG_MTD_BLOCK=y
64CONFIG_MTD_CFI=y
65CONFIG_MTD_CFI_AMDSTD=y
66CONFIG_MTD_PHYSMAP=y
67CONFIG_BLK_DEV_LOOP=m
68CONFIG_EEPROM_LEGACY=m
69CONFIG_SCSI=m
70CONFIG_BLK_DEV_SD=m
71CONFIG_BLK_DEV_SR=m
72CONFIG_BLK_DEV_SR_VENDOR=y
73CONFIG_CHR_DEV_SG=m
74CONFIG_NETDEVICES=y
75CONFIG_TUN=m
76CONFIG_NET_ETHERNET=y
77CONFIG_ARM_AT91_ETHER=y
78CONFIG_USB_CATC=m
79CONFIG_USB_KAWETH=m
80CONFIG_USB_PEGASUS=m
81CONFIG_USB_RTL8150=m
82CONFIG_USB_USBNET=m
83CONFIG_USB_NET_DM9601=m
84CONFIG_USB_NET_GL620A=m
85CONFIG_USB_NET_PLUSB=m
86CONFIG_USB_NET_MCS7830=m
87CONFIG_USB_NET_RNDIS_HOST=m
88CONFIG_USB_ALI_M5632=y
89CONFIG_USB_AN2720=y
90CONFIG_USB_EPSON2888=y
91CONFIG_USB_KC2190=y
92CONFIG_PPP=m
93CONFIG_PPP_FILTER=y
94CONFIG_PPP_ASYNC=m
95CONFIG_PPP_DEFLATE=m
96CONFIG_PPP_BSDCOMP=m
97CONFIG_PPP_MPPE=m
98CONFIG_PPPOE=m
99CONFIG_SLIP=m
100CONFIG_SLIP_COMPRESSED=y
101CONFIG_SLIP_SMART=y
102CONFIG_SLIP_MODE_SLIP6=y
103# CONFIG_INPUT_MOUSEDEV is not set
104# CONFIG_INPUT_KEYBOARD is not set
105# CONFIG_INPUT_MOUSE is not set
106# CONFIG_SERIO is not set
107# CONFIG_VT is not set
108CONFIG_SERIAL_ATMEL=y
109CONFIG_SERIAL_ATMEL_CONSOLE=y
110# CONFIG_LEGACY_PTYS is not set
111CONFIG_I2C=m
112CONFIG_I2C_CHARDEV=m
113CONFIG_I2C_GPIO=m
114CONFIG_HWMON=m
115CONFIG_SENSORS_ADM1021=m
116CONFIG_SENSORS_ADM1025=m
117CONFIG_SENSORS_ADM1026=m
118CONFIG_SENSORS_ADM1029=m
119CONFIG_SENSORS_ADM1031=m
120CONFIG_SENSORS_ADM9240=m
121CONFIG_SENSORS_DS1621=m
122CONFIG_SENSORS_GL518SM=m
123CONFIG_SENSORS_GL520SM=m
124CONFIG_SENSORS_IT87=m
125CONFIG_SENSORS_LM63=m
126CONFIG_SENSORS_LM75=m
127CONFIG_SENSORS_LM77=m
128CONFIG_SENSORS_LM78=m
129CONFIG_SENSORS_LM80=m
130CONFIG_SENSORS_LM83=m
131CONFIG_SENSORS_LM85=m
132CONFIG_SENSORS_LM87=m
133CONFIG_SENSORS_LM90=m
134CONFIG_SENSORS_LM92=m
135CONFIG_SENSORS_MAX1619=m
136CONFIG_SENSORS_PCF8591=m
137CONFIG_SENSORS_SMSC47B397=m
138CONFIG_SENSORS_W83781D=m
139CONFIG_SENSORS_W83791D=m
140CONFIG_SENSORS_W83792D=m
141CONFIG_SENSORS_W83793=m
142CONFIG_SENSORS_W83L785TS=m
143CONFIG_WATCHDOG=y
144CONFIG_WATCHDOG_NOWAYOUT=y
145CONFIG_AT91RM9200_WATCHDOG=m
146CONFIG_HID=m
147CONFIG_USB=m
148CONFIG_USB_DEVICEFS=y
149CONFIG_USB_OHCI_HCD=m
150CONFIG_USB_ACM=m
151CONFIG_USB_PRINTER=m
152CONFIG_USB_STORAGE=m
153CONFIG_USB_SERIAL=m
154CONFIG_USB_SERIAL_GENERIC=y
155CONFIG_USB_SERIAL_PL2303=m
156CONFIG_MMC=m
157CONFIG_MMC_AT91=m
158CONFIG_RTC_CLASS=m
159CONFIG_RTC_DRV_AT91RM9200=m
160CONFIG_EXT2_FS=m
161CONFIG_EXT3_FS=m
162# CONFIG_EXT3_FS_XATTR is not set
163CONFIG_INOTIFY=y
164CONFIG_ISO9660_FS=m
165CONFIG_JOLIET=y
166CONFIG_UDF_FS=m
167CONFIG_MSDOS_FS=m
168CONFIG_VFAT_FS=m
169CONFIG_NTFS_FS=m
170CONFIG_TMPFS=y
171CONFIG_JFFS2_FS=y
172CONFIG_JFFS2_SUMMARY=y
173CONFIG_JFFS2_COMPRESSION_OPTIONS=y
174CONFIG_NFS_FS=m
175CONFIG_SMB_FS=m
176CONFIG_CIFS=m
177CONFIG_PARTITION_ADVANCED=y
178CONFIG_AMIGA_PARTITION=y
179CONFIG_NLS_DEFAULT="utf-8"
180CONFIG_NLS_CODEPAGE_437=m
181CONFIG_NLS_CODEPAGE_737=m
182CONFIG_NLS_CODEPAGE_775=m
183CONFIG_NLS_CODEPAGE_850=m
184CONFIG_NLS_CODEPAGE_852=m
185CONFIG_NLS_CODEPAGE_855=m
186CONFIG_NLS_CODEPAGE_857=m
187CONFIG_NLS_CODEPAGE_860=m
188CONFIG_NLS_CODEPAGE_861=m
189CONFIG_NLS_CODEPAGE_862=m
190CONFIG_NLS_CODEPAGE_863=m
191CONFIG_NLS_CODEPAGE_864=m
192CONFIG_NLS_CODEPAGE_865=m
193CONFIG_NLS_CODEPAGE_866=m
194CONFIG_NLS_CODEPAGE_869=m
195CONFIG_NLS_CODEPAGE_936=m
196CONFIG_NLS_CODEPAGE_950=m
197CONFIG_NLS_CODEPAGE_932=m
198CONFIG_NLS_CODEPAGE_949=m
199CONFIG_NLS_CODEPAGE_874=m
200CONFIG_NLS_ISO8859_8=m
201CONFIG_NLS_CODEPAGE_1250=m
202CONFIG_NLS_CODEPAGE_1251=m
203CONFIG_NLS_ASCII=m
204CONFIG_NLS_ISO8859_1=m
205CONFIG_NLS_ISO8859_2=m
206CONFIG_NLS_ISO8859_3=m
207CONFIG_NLS_ISO8859_4=m
208CONFIG_NLS_ISO8859_5=m
209CONFIG_NLS_ISO8859_6=m
210CONFIG_NLS_ISO8859_7=m
211CONFIG_NLS_ISO8859_9=m
212CONFIG_NLS_ISO8859_13=m
213CONFIG_NLS_ISO8859_14=m
214CONFIG_NLS_ISO8859_15=m
215CONFIG_NLS_KOI8_R=m
216CONFIG_NLS_KOI8_U=m
217CONFIG_NLS_UTF8=m
218CONFIG_DEBUG_KERNEL=y
219# CONFIG_DEBUG_BUGVERBOSE is not set
220CONFIG_DEBUG_LL=y
221CONFIG_CRYPTO_NULL=m
222CONFIG_CRYPTO_TEST=m
223CONFIG_CRYPTO_LRW=m
224CONFIG_CRYPTO_PCBC=m
225CONFIG_CRYPTO_XCBC=m
226CONFIG_CRYPTO_MD4=m
227CONFIG_CRYPTO_MICHAEL_MIC=m
228CONFIG_CRYPTO_SHA256=m
229CONFIG_CRYPTO_SHA512=m
230CONFIG_CRYPTO_TGR192=m
231CONFIG_CRYPTO_WP512=m
232CONFIG_CRYPTO_ANUBIS=m
233CONFIG_CRYPTO_BLOWFISH=m
234CONFIG_CRYPTO_CAMELLIA=m
235CONFIG_CRYPTO_CAST5=m
236CONFIG_CRYPTO_CAST6=m
237CONFIG_CRYPTO_FCRYPT=m
238CONFIG_CRYPTO_KHAZAD=m
239CONFIG_CRYPTO_SERPENT=m
240CONFIG_CRYPTO_TEA=m
241CONFIG_CRYPTO_TWOFISH=m
242CONFIG_LIBCRC32C=m
diff --git a/arch/arm/configs/yl9200_defconfig b/arch/arm/configs/yl9200_defconfig
deleted file mode 100644
index 30c537f61089..000000000000
--- a/arch/arm/configs/yl9200_defconfig
+++ /dev/null
@@ -1,137 +0,0 @@
1# CONFIG_SWAP is not set
2CONFIG_SYSVIPC=y
3CONFIG_LOG_BUF_SHIFT=14
4CONFIG_BLK_DEV_INITRD=y
5# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
6CONFIG_MODULES=y
7CONFIG_MODULE_UNLOAD=y
8# CONFIG_IOSCHED_DEADLINE is not set
9# CONFIG_IOSCHED_CFQ is not set
10CONFIG_ARCH_AT91=y
11CONFIG_ARCH_AT91RM9200DK=y
12CONFIG_MACH_YL9200=y
13# CONFIG_ARM_THUMB is not set
14CONFIG_ZBOOT_ROM_TEXT=0x0
15CONFIG_ZBOOT_ROM_BSS=0x0
16CONFIG_CMDLINE="mem=32M console=ttyS0,115200 initrd=0x20410000,3145728 root=/dev/ram0 rw"
17CONFIG_FPE_NWFPE=y
18CONFIG_NET=y
19CONFIG_PACKET=y
20CONFIG_UNIX=y
21CONFIG_INET=y
22CONFIG_IP_PNP=y
23CONFIG_IP_PNP_DHCP=y
24# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
25# CONFIG_INET_XFRM_MODE_TUNNEL is not set
26# CONFIG_INET_XFRM_MODE_BEET is not set
27# CONFIG_INET_LRO is not set
28# CONFIG_INET_DIAG is not set
29# CONFIG_IPV6 is not set
30CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
31CONFIG_MTD=y
32CONFIG_MTD_CONCAT=y
33CONFIG_MTD_PARTITIONS=y
34CONFIG_MTD_CMDLINE_PARTS=y
35CONFIG_MTD_CHAR=y
36CONFIG_MTD_BLOCK=y
37CONFIG_MTD_CFI=y
38CONFIG_MTD_JEDECPROBE=y
39CONFIG_MTD_CFI_INTELEXT=y
40CONFIG_MTD_COMPLEX_MAPPINGS=y
41CONFIG_MTD_PHYSMAP=y
42CONFIG_MTD_PLATRAM=y
43CONFIG_MTD_NAND=y
44CONFIG_MTD_NAND_ATMEL=y
45CONFIG_MTD_NAND_PLATFORM=y
46CONFIG_BLK_DEV_LOOP=y
47CONFIG_BLK_DEV_RAM=y
48CONFIG_BLK_DEV_RAM_COUNT=3
49CONFIG_BLK_DEV_RAM_SIZE=8192
50# CONFIG_MISC_DEVICES is not set
51CONFIG_BLK_DEV_SD=y
52CONFIG_ATA=y
53CONFIG_NETDEVICES=y
54CONFIG_PHYLIB=y
55CONFIG_DAVICOM_PHY=y
56CONFIG_NET_ETHERNET=y
57CONFIG_ARM_AT91_ETHER=y
58# CONFIG_NETDEV_1000 is not set
59# CONFIG_NETDEV_10000 is not set
60# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
61CONFIG_INPUT_MOUSEDEV_SCREEN_X=640
62CONFIG_INPUT_MOUSEDEV_SCREEN_Y=480
63CONFIG_INPUT_EVDEV=y
64# CONFIG_KEYBOARD_ATKBD is not set
65CONFIG_KEYBOARD_GPIO=y
66CONFIG_INPUT_TOUCHSCREEN=y
67CONFIG_TOUCHSCREEN_ADS7846=y
68# CONFIG_SERIO_SERPORT is not set
69CONFIG_SERIAL_ATMEL=y
70CONFIG_SERIAL_ATMEL_CONSOLE=y
71# CONFIG_HW_RANDOM is not set
72CONFIG_I2C=y
73CONFIG_SPI=y
74CONFIG_SPI_DEBUG=y
75CONFIG_SPI_ATMEL=y
76CONFIG_FB=y
77CONFIG_BACKLIGHT_LCD_SUPPORT=y
78CONFIG_LCD_CLASS_DEVICE=y
79CONFIG_BACKLIGHT_CLASS_DEVICE=y
80CONFIG_DISPLAY_SUPPORT=y
81# CONFIG_VGA_CONSOLE is not set
82CONFIG_LOGO=y
83# CONFIG_LOGO_LINUX_MONO is not set
84# CONFIG_LOGO_LINUX_VGA16 is not set
85CONFIG_USB=y
86CONFIG_USB_DEBUG=y
87CONFIG_USB_DEVICEFS=y
88# CONFIG_USB_DEVICE_CLASS is not set
89CONFIG_USB_MON=y
90CONFIG_USB_OHCI_HCD=y
91CONFIG_USB_STORAGE=y
92CONFIG_USB_GADGET=y
93CONFIG_USB_GADGET_M66592=y
94CONFIG_USB_FILE_STORAGE=m
95CONFIG_MMC=y
96CONFIG_MMC_DEBUG=y
97# CONFIG_MMC_BLOCK_BOUNCE is not set
98CONFIG_MMC_AT91=m
99CONFIG_NEW_LEDS=y
100CONFIG_LEDS_CLASS=y
101CONFIG_LEDS_GPIO=y
102CONFIG_LEDS_TRIGGERS=y
103CONFIG_LEDS_TRIGGER_TIMER=y
104CONFIG_LEDS_TRIGGER_HEARTBEAT=y
105CONFIG_RTC_CLASS=y
106CONFIG_RTC_DRV_AT91RM9200=y
107CONFIG_EXT2_FS=y
108CONFIG_EXT2_FS_XATTR=y
109CONFIG_EXT3_FS=y
110CONFIG_REISERFS_FS=y
111CONFIG_INOTIFY=y
112CONFIG_ISO9660_FS=y
113CONFIG_JOLIET=y
114CONFIG_ZISOFS=y
115CONFIG_UDF_FS=y
116CONFIG_MSDOS_FS=y
117CONFIG_VFAT_FS=y
118CONFIG_TMPFS=y
119CONFIG_JFFS2_FS=y
120CONFIG_JFFS2_FS_DEBUG=1
121CONFIG_JFFS2_COMPRESSION_OPTIONS=y
122CONFIG_JFFS2_RUBIN=y
123CONFIG_CRAMFS=y
124CONFIG_PARTITION_ADVANCED=y
125CONFIG_MAC_PARTITION=y
126CONFIG_NLS_CODEPAGE_437=y
127CONFIG_NLS_ISO8859_1=y
128# CONFIG_ENABLE_MUST_CHECK is not set
129CONFIG_DEBUG_FS=y
130CONFIG_DEBUG_KERNEL=y
131CONFIG_SLUB_DEBUG_ON=y
132CONFIG_DEBUG_KOBJECT=y
133CONFIG_DEBUG_INFO=y
134CONFIG_DEBUG_LIST=y
135CONFIG_DEBUG_USER=y
136CONFIG_DEBUG_ERRORS=y
137CONFIG_DEBUG_LL=y
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 062b58c029ab..749bb6622404 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -238,7 +238,7 @@
238 @ Slightly optimised to avoid incrementing the pointer twice 238 @ Slightly optimised to avoid incrementing the pointer twice
239 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort 239 usraccoff \instr, \reg, \ptr, \inc, 0, \cond, \abort
240 .if \rept == 2 240 .if \rept == 2
241 usraccoff \instr, \reg, \ptr, \inc, 4, \cond, \abort 241 usraccoff \instr, \reg, \ptr, \inc, \inc, \cond, \abort
242 .endif 242 .endif
243 243
244 add\cond \ptr, #\rept * \inc 244 add\cond \ptr, #\rept * \inc
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 68870c776671..b4ffe9d5b526 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -13,6 +13,10 @@ typedef struct {
13 13
14#ifdef CONFIG_CPU_HAS_ASID 14#ifdef CONFIG_CPU_HAS_ASID
15#define ASID(mm) ((mm)->context.id & 255) 15#define ASID(mm) ((mm)->context.id & 255)
16
17/* init_mm.context.id_lock should be initialized. */
18#define INIT_MM_CONTEXT(name) \
19 .context.id_lock = __SPIN_LOCK_UNLOCKED(name.context.id_lock),
16#else 20#else
17#define ASID(mm) (0) 21#define ASID(mm) (0)
18#endif 22#endif
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index b155414192da..53d1d5deb111 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -374,6 +374,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd)
374 374
375#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd))) 375#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd)))
376 376
377/* we don't need complex calculations here as the pmd is folded into the pgd */
378#define pmd_addr_end(addr,end) (end)
379
377/* 380/*
378 * Conversion functions: convert a page and protection to a page entry, 381 * Conversion functions: convert a page and protection to a page entry,
379 * and a page entry and page directory to the page they refer to. 382 * and a page entry and page directory to the page they refer to.
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index c09e3573c5de..bb96a7d4bbf5 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -911,7 +911,7 @@ __kuser_cmpxchg: @ 0xffff0fc0
911 * A special ghost syscall is used for that (see traps.c). 911 * A special ghost syscall is used for that (see traps.c).
912 */ 912 */
913 stmfd sp!, {r7, lr} 913 stmfd sp!, {r7, lr}
914 ldr r7, =1f @ it's 20 bits 914 ldr r7, 1f @ it's 20 bits
915 swi __ARM_NR_cmpxchg 915 swi __ARM_NR_cmpxchg
916 ldmfd sp!, {r7, pc} 916 ldmfd sp!, {r7, pc}
9171: .word __ARM_NR_cmpxchg 9171: .word __ARM_NR_cmpxchg
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index dd6b369ac69c..6bd82d25683c 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -85,9 +85,11 @@ ENTRY(stext)
85 mrc p15, 0, r9, c0, c0 @ get processor id 85 mrc p15, 0, r9, c0, c0 @ get processor id
86 bl __lookup_processor_type @ r5=procinfo r9=cpuid 86 bl __lookup_processor_type @ r5=procinfo r9=cpuid
87 movs r10, r5 @ invalid processor (r5=0)? 87 movs r10, r5 @ invalid processor (r5=0)?
88 THUMB( it eq ) @ force fixup-able long branch encoding
88 beq __error_p @ yes, error 'p' 89 beq __error_p @ yes, error 'p'
89 bl __lookup_machine_type @ r5=machinfo 90 bl __lookup_machine_type @ r5=machinfo
90 movs r8, r5 @ invalid machine (r5=0)? 91 movs r8, r5 @ invalid machine (r5=0)?
92 THUMB( it eq ) @ force fixup-able long branch encoding
91 beq __error_a @ yes, error 'a' 93 beq __error_a @ yes, error 'a'
92 bl __vet_atags 94 bl __vet_atags
93#ifdef CONFIG_SMP_ON_UP 95#ifdef CONFIG_SMP_ON_UP
@@ -262,6 +264,7 @@ __create_page_tables:
262 mov pc, lr 264 mov pc, lr
263ENDPROC(__create_page_tables) 265ENDPROC(__create_page_tables)
264 .ltorg 266 .ltorg
267 .align
265__enable_mmu_loc: 268__enable_mmu_loc:
266 .long . 269 .long .
267 .long __enable_mmu 270 .long __enable_mmu
@@ -282,6 +285,7 @@ ENTRY(secondary_startup)
282 bl __lookup_processor_type 285 bl __lookup_processor_type
283 movs r10, r5 @ invalid processor? 286 movs r10, r5 @ invalid processor?
284 moveq r0, #'p' @ yes, error 'p' 287 moveq r0, #'p' @ yes, error 'p'
288 THUMB( it eq ) @ force fixup-able long branch encoding
285 beq __error_p 289 beq __error_p
286 290
287 /* 291 /*
@@ -308,6 +312,8 @@ ENTRY(__secondary_switched)
308 b secondary_start_kernel 312 b secondary_start_kernel
309ENDPROC(__secondary_switched) 313ENDPROC(__secondary_switched)
310 314
315 .align
316
311 .type __secondary_data, %object 317 .type __secondary_data, %object
312__secondary_data: 318__secondary_data:
313 .long . 319 .long .
@@ -413,6 +419,7 @@ __fixup_smp_on_up:
413 mov pc, lr 419 mov pc, lr
414ENDPROC(__fixup_smp) 420ENDPROC(__fixup_smp)
415 421
422 .align
4161: .word . 4231: .word .
417 .word __smpalt_begin 424 .word __smpalt_begin
418 .word __smpalt_end 425 .word __smpalt_end
diff --git a/arch/arm/kernel/relocate_kernel.S b/arch/arm/kernel/relocate_kernel.S
index fd26f8d65151..9cf4cbf8f95b 100644
--- a/arch/arm/kernel/relocate_kernel.S
+++ b/arch/arm/kernel/relocate_kernel.S
@@ -59,6 +59,8 @@ relocate_new_kernel:
59 ldr r2,kexec_boot_atags 59 ldr r2,kexec_boot_atags
60 mov pc,lr 60 mov pc,lr
61 61
62 .align
63
62 .globl kexec_start_address 64 .globl kexec_start_address
63kexec_start_address: 65kexec_start_address:
64 .long 0x0 66 .long 0x0
diff --git a/arch/arm/lib/findbit.S b/arch/arm/lib/findbit.S
index 1e4cbd4e7be9..64f6bc1a9132 100644
--- a/arch/arm/lib/findbit.S
+++ b/arch/arm/lib/findbit.S
@@ -174,8 +174,8 @@ ENDPROC(_find_next_bit_be)
174 */ 174 */
175.L_found: 175.L_found:
176#if __LINUX_ARM_ARCH__ >= 5 176#if __LINUX_ARM_ARCH__ >= 5
177 rsb r1, r3, #0 177 rsb r0, r3, #0
178 and r3, r3, r1 178 and r3, r3, r0
179 clz r3, r3 179 clz r3, r3
180 rsb r3, r3, #31 180 rsb r3, r3, #31
181 add r0, r2, r3 181 add r0, r2, r3
@@ -190,5 +190,7 @@ ENDPROC(_find_next_bit_be)
190 addeq r2, r2, #1 190 addeq r2, r2, #1
191 mov r0, r2 191 mov r0, r2
192#endif 192#endif
193 cmp r1, r0 @ Clamp to maxbit
194 movlo r0, r1
193 mov pc, lr 195 mov pc, lr
194 196
diff --git a/arch/arm/mach-aaec2000/include/mach/vmalloc.h b/arch/arm/mach-aaec2000/include/mach/vmalloc.h
index cff4e0a996ce..a6299e8321bd 100644
--- a/arch/arm/mach-aaec2000/include/mach/vmalloc.h
+++ b/arch/arm/mach-aaec2000/include/mach/vmalloc.h
@@ -11,6 +11,6 @@
11#ifndef __ASM_ARCH_VMALLOC_H 11#ifndef __ASM_ARCH_VMALLOC_H
12#define __ASM_ARCH_VMALLOC_H 12#define __ASM_ARCH_VMALLOC_H
13 13
14#define VMALLOC_END 0xd0000000 14#define VMALLOC_END 0xd0000000UL
15 15
16#endif /* __ASM_ARCH_VMALLOC_H */ 16#endif /* __ASM_ARCH_VMALLOC_H */
diff --git a/arch/arm/mach-at91/Makefile b/arch/arm/mach-at91/Makefile
index 821eb842795f..62d686f0b426 100644
--- a/arch/arm/mach-at91/Makefile
+++ b/arch/arm/mach-at91/Makefile
@@ -24,8 +24,8 @@ obj-$(CONFIG_ARCH_AT91X40) += at91x40.o at91x40_time.o
24 24
25# AT91RM9200 board-specific support 25# AT91RM9200 board-specific support
26obj-$(CONFIG_MACH_ONEARM) += board-1arm.o 26obj-$(CONFIG_MACH_ONEARM) += board-1arm.o
27obj-$(CONFIG_ARCH_AT91RM9200DK) += board-dk.o 27obj-$(CONFIG_ARCH_AT91RM9200DK) += board-rm9200dk.o
28obj-$(CONFIG_MACH_AT91RM9200EK) += board-ek.o 28obj-$(CONFIG_MACH_AT91RM9200EK) += board-rm9200ek.o
29obj-$(CONFIG_MACH_CSB337) += board-csb337.o 29obj-$(CONFIG_MACH_CSB337) += board-csb337.o
30obj-$(CONFIG_MACH_CSB637) += board-csb637.o 30obj-$(CONFIG_MACH_CSB637) += board-csb637.o
31obj-$(CONFIG_MACH_CARMEVA) += board-carmeva.o 31obj-$(CONFIG_MACH_CARMEVA) += board-carmeva.o
diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c
index 9338825cfcd7..7b539228e0ef 100644
--- a/arch/arm/mach-at91/at91rm9200_devices.c
+++ b/arch/arm/mach-at91/at91rm9200_devices.c
@@ -1106,51 +1106,6 @@ static inline void configure_usart3_pins(unsigned pins)
1106static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */ 1106static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
1107struct platform_device *atmel_default_console_device; /* the serial console device */ 1107struct platform_device *atmel_default_console_device; /* the serial console device */
1108 1108
1109void __init __deprecated at91_init_serial(struct at91_uart_config *config)
1110{
1111 int i;
1112
1113 /* Fill in list of supported UARTs */
1114 for (i = 0; i < config->nr_tty; i++) {
1115 switch (config->tty_map[i]) {
1116 case 0:
1117 configure_usart0_pins(ATMEL_UART_CTS | ATMEL_UART_RTS);
1118 at91_uarts[i] = &at91rm9200_uart0_device;
1119 at91_clock_associate("usart0_clk", &at91rm9200_uart0_device.dev, "usart");
1120 break;
1121 case 1:
1122 configure_usart1_pins(ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DSR | ATMEL_UART_DTR | ATMEL_UART_DCD | ATMEL_UART_RI);
1123 at91_uarts[i] = &at91rm9200_uart1_device;
1124 at91_clock_associate("usart1_clk", &at91rm9200_uart1_device.dev, "usart");
1125 break;
1126 case 2:
1127 configure_usart2_pins(0);
1128 at91_uarts[i] = &at91rm9200_uart2_device;
1129 at91_clock_associate("usart2_clk", &at91rm9200_uart2_device.dev, "usart");
1130 break;
1131 case 3:
1132 configure_usart3_pins(0);
1133 at91_uarts[i] = &at91rm9200_uart3_device;
1134 at91_clock_associate("usart3_clk", &at91rm9200_uart3_device.dev, "usart");
1135 break;
1136 case 4:
1137 configure_dbgu_pins();
1138 at91_uarts[i] = &at91rm9200_dbgu_device;
1139 at91_clock_associate("mck", &at91rm9200_dbgu_device.dev, "usart");
1140 break;
1141 default:
1142 continue;
1143 }
1144 at91_uarts[i]->id = i; /* update ID number to mapped ID */
1145 }
1146
1147 /* Set serial console device */
1148 if (config->console_tty < ATMEL_MAX_UART)
1149 atmel_default_console_device = at91_uarts[config->console_tty];
1150 if (!atmel_default_console_device)
1151 printk(KERN_INFO "AT91: No default serial console defined.\n");
1152}
1153
1154void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) 1109void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
1155{ 1110{
1156 struct platform_device *pdev; 1111 struct platform_device *pdev;
diff --git a/arch/arm/mach-at91/board-1arm.c b/arch/arm/mach-at91/board-1arm.c
index 46bdc82d3fbf..8a3fc84847c1 100644
--- a/arch/arm/mach-at91/board-1arm.c
+++ b/arch/arm/mach-at91/board-1arm.c
@@ -39,24 +39,24 @@
39#include "generic.h" 39#include "generic.h"
40 40
41 41
42/*
43 * Serial port configuration.
44 * 0 .. 3 = USART0 .. USART3
45 * 4 = DBGU
46 */
47static struct at91_uart_config __initdata onearm_uart_config = {
48 .console_tty = 0, /* ttyS0 */
49 .nr_tty = 3,
50 .tty_map = { 4, 0, 1, -1, -1 }, /* ttyS0, ..., ttyS4 */
51};
52
53static void __init onearm_map_io(void) 42static void __init onearm_map_io(void)
54{ 43{
55 /* Initialize processor: 18.432 MHz crystal */ 44 /* Initialize processor: 18.432 MHz crystal */
56 at91rm9200_initialize(18432000, AT91RM9200_PQFP); 45 at91rm9200_initialize(18432000, AT91RM9200_PQFP);
57 46
58 /* Setup the serial ports and console */ 47 /* DBGU on ttyS0. (Rx & Tx only) */
59 at91_init_serial(&onearm_uart_config); 48 at91_register_uart(0, 0, 0);
49
50 /* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */
51 at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS);
52
53 /* USART1 on ttyS2 (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
54 at91_register_uart(AT91RM9200_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS
55 | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
56 | ATMEL_UART_RI);
57
58 /* set serial console to ttyS0 (ie, DBGU) */
59 at91_set_serial_console(0);
60} 60}
61 61
62static void __init onearm_init_irq(void) 62static void __init onearm_init_irq(void)
diff --git a/arch/arm/mach-at91/board-kafa.c b/arch/arm/mach-at91/board-kafa.c
index c0ce79d431a0..d2e1f4ec1fcc 100644
--- a/arch/arm/mach-at91/board-kafa.c
+++ b/arch/arm/mach-at91/board-kafa.c
@@ -39,17 +39,6 @@
39#include "generic.h" 39#include "generic.h"
40 40
41 41
42/*
43 * Serial port configuration.
44 * 0 .. 3 = USART0 .. USART3
45 * 4 = DBGU
46 */
47static struct at91_uart_config __initdata kafa_uart_config = {
48 .console_tty = 0, /* ttyS0 */
49 .nr_tty = 2,
50 .tty_map = { 4, 0, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
51};
52
53static void __init kafa_map_io(void) 42static void __init kafa_map_io(void)
54{ 43{
55 /* Initialize processor: 18.432 MHz crystal */ 44 /* Initialize processor: 18.432 MHz crystal */
@@ -58,8 +47,14 @@ static void __init kafa_map_io(void)
58 /* Set up the LEDs */ 47 /* Set up the LEDs */
59 at91_init_leds(AT91_PIN_PB4, AT91_PIN_PB4); 48 at91_init_leds(AT91_PIN_PB4, AT91_PIN_PB4);
60 49
61 /* Setup the serial ports and console */ 50 /* DBGU on ttyS0. (Rx & Tx only) */
62 at91_init_serial(&kafa_uart_config); 51 at91_register_uart(0, 0, 0);
52
53 /* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */
54 at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS);
55
56 /* set serial console to ttyS0 (ie, DBGU) */
57 at91_set_serial_console(0);
63} 58}
64 59
65static void __init kafa_init_irq(void) 60static void __init kafa_init_irq(void)
diff --git a/arch/arm/mach-at91/board-picotux200.c b/arch/arm/mach-at91/board-picotux200.c
index 9d833bbc592d..55dad3a46547 100644
--- a/arch/arm/mach-at91/board-picotux200.c
+++ b/arch/arm/mach-at91/board-picotux200.c
@@ -43,24 +43,21 @@
43#include "generic.h" 43#include "generic.h"
44 44
45 45
46/*
47 * Serial port configuration.
48 * 0 .. 3 = USART0 .. USART3
49 * 4 = DBGU
50 */
51static struct at91_uart_config __initdata picotux200_uart_config = {
52 .console_tty = 0, /* ttyS0 */
53 .nr_tty = 2,
54 .tty_map = { 4, 1, -1, -1, -1 } /* ttyS0, ..., ttyS4 */
55};
56
57static void __init picotux200_map_io(void) 46static void __init picotux200_map_io(void)
58{ 47{
59 /* Initialize processor: 18.432 MHz crystal */ 48 /* Initialize processor: 18.432 MHz crystal */
60 at91rm9200_initialize(18432000, AT91RM9200_BGA); 49 at91rm9200_initialize(18432000, AT91RM9200_BGA);
61 50
62 /* Setup the serial ports and console */ 51 /* DBGU on ttyS0. (Rx & Tx only) */
63 at91_init_serial(&picotux200_uart_config); 52 at91_register_uart(0, 0, 0);
53
54 /* USART1 on ttyS1. (Rx, Tx, CTS, RTS, DTR, DSR, DCD, RI) */
55 at91_register_uart(AT91RM9200_ID_US1, 1, ATMEL_UART_CTS | ATMEL_UART_RTS
56 | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD
57 | ATMEL_UART_RI);
58
59 /* set serial console to ttyS0 (ie, DBGU) */
60 at91_set_serial_console(0);
64} 61}
65 62
66static void __init picotux200_init_irq(void) 63static void __init picotux200_init_irq(void)
@@ -77,11 +74,6 @@ static struct at91_usbh_data __initdata picotux200_usbh_data = {
77 .ports = 1, 74 .ports = 1,
78}; 75};
79 76
80// static struct at91_udc_data __initdata picotux200_udc_data = {
81// .vbus_pin = AT91_PIN_PD4,
82// .pullup_pin = AT91_PIN_PD5,
83// };
84
85static struct at91_mmc_data __initdata picotux200_mmc_data = { 77static struct at91_mmc_data __initdata picotux200_mmc_data = {
86 .det_pin = AT91_PIN_PB27, 78 .det_pin = AT91_PIN_PB27,
87 .slot_b = 0, 79 .slot_b = 0,
@@ -89,21 +81,6 @@ static struct at91_mmc_data __initdata picotux200_mmc_data = {
89 .wp_pin = AT91_PIN_PA17, 81 .wp_pin = AT91_PIN_PA17,
90}; 82};
91 83
92// static struct spi_board_info picotux200_spi_devices[] = {
93// { /* DataFlash chip */
94// .modalias = "mtd_dataflash",
95// .chip_select = 0,
96// .max_speed_hz = 15 * 1000 * 1000,
97// },
98// #ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
99// { /* DataFlash card */
100// .modalias = "mtd_dataflash",
101// .chip_select = 3,
102// .max_speed_hz = 15 * 1000 * 1000,
103// },
104// #endif
105// };
106
107#define PICOTUX200_FLASH_BASE AT91_CHIPSELECT_0 84#define PICOTUX200_FLASH_BASE AT91_CHIPSELECT_0
108#define PICOTUX200_FLASH_SIZE SZ_4M 85#define PICOTUX200_FLASH_SIZE SZ_4M
109 86
@@ -135,21 +112,11 @@ static void __init picotux200_board_init(void)
135 at91_add_device_eth(&picotux200_eth_data); 112 at91_add_device_eth(&picotux200_eth_data);
136 /* USB Host */ 113 /* USB Host */
137 at91_add_device_usbh(&picotux200_usbh_data); 114 at91_add_device_usbh(&picotux200_usbh_data);
138 /* USB Device */
139 // at91_add_device_udc(&picotux200_udc_data);
140 // at91_set_multi_drive(picotux200_udc_data.pullup_pin, 1); /* pullup_pin is connected to reset */
141 /* I2C */ 115 /* I2C */
142 at91_add_device_i2c(NULL, 0); 116 at91_add_device_i2c(NULL, 0);
143 /* SPI */
144 // at91_add_device_spi(picotux200_spi_devices, ARRAY_SIZE(picotux200_spi_devices));
145#ifdef CONFIG_MTD_AT91_DATAFLASH_CARD
146 /* DataFlash card */
147 at91_set_gpio_output(AT91_PIN_PB22, 0);
148#else
149 /* MMC */ 117 /* MMC */
150 at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */ 118 at91_set_gpio_output(AT91_PIN_PB22, 1); /* this MMC card slot can optionally use SPI signaling (CS3). */
151 at91_add_device_mmc(0, &picotux200_mmc_data); 119 at91_add_device_mmc(0, &picotux200_mmc_data);
152#endif
153 /* NOR Flash */ 120 /* NOR Flash */
154 platform_device_register(&picotux200_flash); 121 platform_device_register(&picotux200_flash);
155} 122}
diff --git a/arch/arm/mach-at91/board-dk.c b/arch/arm/mach-at91/board-rm9200dk.c
index e14f0e165680..4c1047c8200d 100644
--- a/arch/arm/mach-at91/board-dk.c
+++ b/arch/arm/mach-at91/board-rm9200dk.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mach-at91/board-dk.c 2 * linux/arch/arm/mach-at91/board-rm9200dk.c
3 * 3 *
4 * Copyright (C) 2005 SAN People 4 * Copyright (C) 2005 SAN People
5 * 5 *
@@ -91,10 +91,12 @@ static struct at91_cf_data __initdata dk_cf_data = {
91 // .vcc_pin = ... always powered 91 // .vcc_pin = ... always powered
92}; 92};
93 93
94#ifndef CONFIG_MTD_AT91_DATAFLASH_CARD
94static struct at91_mmc_data __initdata dk_mmc_data = { 95static struct at91_mmc_data __initdata dk_mmc_data = {
95 .slot_b = 0, 96 .slot_b = 0,
96 .wire4 = 1, 97 .wire4 = 1,
97}; 98};
99#endif
98 100
99static struct spi_board_info dk_spi_devices[] = { 101static struct spi_board_info dk_spi_devices[] = {
100 { /* DataFlash chip */ 102 { /* DataFlash chip */
diff --git a/arch/arm/mach-at91/board-ek.c b/arch/arm/mach-at91/board-rm9200ek.c
index 56e92c4bbc2a..9df1be8818c0 100644
--- a/arch/arm/mach-at91/board-ek.c
+++ b/arch/arm/mach-at91/board-rm9200ek.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/arch/arm/mach-at91/board-ek.c 2 * linux/arch/arm/mach-at91/board-rm9200ek.c
3 * 3 *
4 * Copyright (C) 2005 SAN People 4 * Copyright (C) 2005 SAN People
5 * 5 *
@@ -84,12 +84,14 @@ static struct at91_udc_data __initdata ek_udc_data = {
84 .pullup_pin = AT91_PIN_PD5, 84 .pullup_pin = AT91_PIN_PD5,
85}; 85};
86 86
87#ifndef CONFIG_MTD_AT91_DATAFLASH_CARD
87static struct at91_mmc_data __initdata ek_mmc_data = { 88static struct at91_mmc_data __initdata ek_mmc_data = {
88 .det_pin = AT91_PIN_PB27, 89 .det_pin = AT91_PIN_PB27,
89 .slot_b = 0, 90 .slot_b = 0,
90 .wire4 = 1, 91 .wire4 = 1,
91 .wp_pin = AT91_PIN_PA17, 92 .wp_pin = AT91_PIN_PA17,
92}; 93};
94#endif
93 95
94static struct spi_board_info ek_spi_devices[] = { 96static struct spi_board_info ek_spi_devices[] = {
95 { /* DataFlash chip */ 97 { /* DataFlash chip */
diff --git a/arch/arm/mach-at91/board-yl-9200.c b/arch/arm/mach-at91/board-yl-9200.c
index 89df00a9d2f7..e0f0080eb639 100644
--- a/arch/arm/mach-at91/board-yl-9200.c
+++ b/arch/arm/mach-at91/board-yl-9200.c
@@ -387,7 +387,7 @@ static struct spi_board_info yl9200_spi_devices[] = {
387 * EPSON S1D13806 FB (discontinued chip) 387 * EPSON S1D13806 FB (discontinued chip)
388 * EPSON S1D13506 FB 388 * EPSON S1D13506 FB
389 */ 389 */
390#if defined(CONFIG_FB_S1D135XX) || defined(CONFIG_FB_S1D13XXX_MODULE) 390#if defined(CONFIG_FB_S1D13XXX) || defined(CONFIG_FB_S1D13XXX_MODULE)
391#include <video/s1d13xxxfb.h> 391#include <video/s1d13xxxfb.h>
392 392
393 393
diff --git a/arch/arm/mach-at91/include/mach/board.h b/arch/arm/mach-at91/include/mach/board.h
index 58528aa9c8a8..2b499eb343a1 100644
--- a/arch/arm/mach-at91/include/mach/board.h
+++ b/arch/arm/mach-at91/include/mach/board.h
@@ -137,13 +137,7 @@ extern void __init at91_add_device_spi(struct spi_board_info *devices, int nr_de
137extern void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins); 137extern void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins);
138extern void __init at91_set_serial_console(unsigned portnr); 138extern void __init at91_set_serial_console(unsigned portnr);
139 139
140struct at91_uart_config {
141 unsigned short console_tty; /* tty number of serial console */
142 unsigned short nr_tty; /* number of serial tty's */
143 short tty_map[]; /* map UART to tty number */
144};
145extern struct platform_device *atmel_default_console_device; 140extern struct platform_device *atmel_default_console_device;
146extern void __init __deprecated at91_init_serial(struct at91_uart_config *config);
147 141
148struct atmel_uart_data { 142struct atmel_uart_data {
149 short use_dma_tx; /* use transmit DMA? */ 143 short use_dma_tx; /* use transmit DMA? */
diff --git a/arch/arm/mach-bcmring/include/mach/vmalloc.h b/arch/arm/mach-bcmring/include/mach/vmalloc.h
index 3db3a09fd398..7397bd7817d9 100644
--- a/arch/arm/mach-bcmring/include/mach/vmalloc.h
+++ b/arch/arm/mach-bcmring/include/mach/vmalloc.h
@@ -22,4 +22,4 @@
22 * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles 22 * 0xe0000000 to 0xefffffff. This gives us 256 MB of vm space and handles
23 * larger physical memory designs better. 23 * larger physical memory designs better.
24 */ 24 */
25#define VMALLOC_END 0xf0000000 25#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-clps711x/include/mach/vmalloc.h b/arch/arm/mach-clps711x/include/mach/vmalloc.h
index 30b3a287ed88..467b96137e47 100644
--- a/arch/arm/mach-clps711x/include/mach/vmalloc.h
+++ b/arch/arm/mach-clps711x/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20#define VMALLOC_END 0xd0000000 20#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-cns3xxx/pcie.c b/arch/arm/mach-cns3xxx/pcie.c
index 38088c36936c..78defd71a829 100644
--- a/arch/arm/mach-cns3xxx/pcie.c
+++ b/arch/arm/mach-cns3xxx/pcie.c
@@ -369,7 +369,7 @@ static int __init cns3xxx_pcie_init(void)
369{ 369{
370 int i; 370 int i;
371 371
372 hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 372 hook_fault_code(16 + 6, cns3xxx_pcie_abort_handler, SIGBUS, 0,
373 "imprecise external abort"); 373 "imprecise external abort");
374 374
375 for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) { 375 for (i = 0; i < ARRAY_SIZE(cns3xxx_pcie); i++) {
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index 9be261beae7d..2652af124acd 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -359,8 +359,8 @@ static struct clk_lookup dm355_clks[] = {
359 CLK(NULL, "uart1", &uart1_clk), 359 CLK(NULL, "uart1", &uart1_clk),
360 CLK(NULL, "uart2", &uart2_clk), 360 CLK(NULL, "uart2", &uart2_clk),
361 CLK("i2c_davinci.1", NULL, &i2c_clk), 361 CLK("i2c_davinci.1", NULL, &i2c_clk),
362 CLK("davinci-asp.0", NULL, &asp0_clk), 362 CLK("davinci-mcbsp.0", NULL, &asp0_clk),
363 CLK("davinci-asp.1", NULL, &asp1_clk), 363 CLK("davinci-mcbsp.1", NULL, &asp1_clk),
364 CLK("davinci_mmc.0", NULL, &mmcsd0_clk), 364 CLK("davinci_mmc.0", NULL, &mmcsd0_clk),
365 CLK("davinci_mmc.1", NULL, &mmcsd1_clk), 365 CLK("davinci_mmc.1", NULL, &mmcsd1_clk),
366 CLK("spi_davinci.0", NULL, &spi0_clk), 366 CLK("spi_davinci.0", NULL, &spi0_clk),
@@ -664,7 +664,7 @@ static struct resource dm355_asp1_resources[] = {
664}; 664};
665 665
666static struct platform_device dm355_asp1_device = { 666static struct platform_device dm355_asp1_device = {
667 .name = "davinci-asp", 667 .name = "davinci-mcbsp",
668 .id = 1, 668 .id = 1,
669 .num_resources = ARRAY_SIZE(dm355_asp1_resources), 669 .num_resources = ARRAY_SIZE(dm355_asp1_resources),
670 .resource = dm355_asp1_resources, 670 .resource = dm355_asp1_resources,
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index a12065e87266..c466d710d3c1 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -459,7 +459,7 @@ static struct clk_lookup dm365_clks[] = {
459 CLK(NULL, "usb", &usb_clk), 459 CLK(NULL, "usb", &usb_clk),
460 CLK("davinci_emac.1", NULL, &emac_clk), 460 CLK("davinci_emac.1", NULL, &emac_clk),
461 CLK("davinci_voicecodec", NULL, &voicecodec_clk), 461 CLK("davinci_voicecodec", NULL, &voicecodec_clk),
462 CLK("davinci-asp.0", NULL, &asp0_clk), 462 CLK("davinci-mcbsp", NULL, &asp0_clk),
463 CLK(NULL, "rto", &rto_clk), 463 CLK(NULL, "rto", &rto_clk),
464 CLK(NULL, "mjcp", &mjcp_clk), 464 CLK(NULL, "mjcp", &mjcp_clk),
465 CLK(NULL, NULL, NULL), 465 CLK(NULL, NULL, NULL),
@@ -922,8 +922,8 @@ static struct resource dm365_asp_resources[] = {
922}; 922};
923 923
924static struct platform_device dm365_asp_device = { 924static struct platform_device dm365_asp_device = {
925 .name = "davinci-asp", 925 .name = "davinci-mcbsp",
926 .id = 0, 926 .id = -1,
927 .num_resources = ARRAY_SIZE(dm365_asp_resources), 927 .num_resources = ARRAY_SIZE(dm365_asp_resources),
928 .resource = dm365_asp_resources, 928 .resource = dm365_asp_resources,
929}; 929};
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 0608dd776a16..9a2376b3137c 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -302,7 +302,7 @@ static struct clk_lookup dm644x_clks[] = {
302 CLK("davinci_emac.1", NULL, &emac_clk), 302 CLK("davinci_emac.1", NULL, &emac_clk),
303 CLK("i2c_davinci.1", NULL, &i2c_clk), 303 CLK("i2c_davinci.1", NULL, &i2c_clk),
304 CLK("palm_bk3710", NULL, &ide_clk), 304 CLK("palm_bk3710", NULL, &ide_clk),
305 CLK("davinci-asp", NULL, &asp_clk), 305 CLK("davinci-mcbsp", NULL, &asp_clk),
306 CLK("davinci_mmc.0", NULL, &mmcsd_clk), 306 CLK("davinci_mmc.0", NULL, &mmcsd_clk),
307 CLK(NULL, "spi", &spi_clk), 307 CLK(NULL, "spi", &spi_clk),
308 CLK(NULL, "gpio", &gpio_clk), 308 CLK(NULL, "gpio", &gpio_clk),
@@ -580,7 +580,7 @@ static struct resource dm644x_asp_resources[] = {
580}; 580};
581 581
582static struct platform_device dm644x_asp_device = { 582static struct platform_device dm644x_asp_device = {
583 .name = "davinci-asp", 583 .name = "davinci-mcbsp",
584 .id = -1, 584 .id = -1,
585 .num_resources = ARRAY_SIZE(dm644x_asp_resources), 585 .num_resources = ARRAY_SIZE(dm644x_asp_resources),
586 .resource = dm644x_asp_resources, 586 .resource = dm644x_asp_resources,
diff --git a/arch/arm/mach-ebsa110/include/mach/vmalloc.h b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
index 60bde56fba4c..ea141b7a3e03 100644
--- a/arch/arm/mach-ebsa110/include/mach/vmalloc.h
+++ b/arch/arm/mach-ebsa110/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#define VMALLOC_END 0xdf000000 10#define VMALLOC_END 0xdf000000UL
diff --git a/arch/arm/mach-footbridge/include/mach/vmalloc.h b/arch/arm/mach-footbridge/include/mach/vmalloc.h
index 0ffbb7c85e59..40ba78e5782b 100644
--- a/arch/arm/mach-footbridge/include/mach/vmalloc.h
+++ b/arch/arm/mach-footbridge/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
7 */ 7 */
8 8
9 9
10#define VMALLOC_END 0xf0000000 10#define VMALLOC_END 0xf0000000UL
diff --git a/arch/arm/mach-h720x/include/mach/vmalloc.h b/arch/arm/mach-h720x/include/mach/vmalloc.h
index a45915b88756..8520b4a4d4e6 100644
--- a/arch/arm/mach-h720x/include/mach/vmalloc.h
+++ b/arch/arm/mach-h720x/include/mach/vmalloc.h
@@ -5,6 +5,6 @@
5#ifndef __ARCH_ARM_VMALLOC_H 5#ifndef __ARCH_ARM_VMALLOC_H
6#define __ARCH_ARM_VMALLOC_H 6#define __ARCH_ARM_VMALLOC_H
7 7
8#define VMALLOC_END 0xd0000000 8#define VMALLOC_END 0xd0000000UL
9 9
10#endif 10#endif
diff --git a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
index 026263c665ca..7e1e9dc2c8fc 100644
--- a/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
+++ b/arch/arm/mach-imx/eukrea_mbimx27-baseboard.c
@@ -250,9 +250,6 @@ static const struct imxuart_platform_data uart_pdata __initconst = {
250 .flags = IMXUART_HAVE_RTSCTS, 250 .flags = IMXUART_HAVE_RTSCTS,
251}; 251};
252 252
253#if defined(CONFIG_TOUCHSCREEN_ADS7846) \
254 || defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE)
255
256#define ADS7846_PENDOWN (GPIO_PORTD | 25) 253#define ADS7846_PENDOWN (GPIO_PORTD | 25)
257 254
258static void ads7846_dev_init(void) 255static void ads7846_dev_init(void)
@@ -273,9 +270,7 @@ static struct ads7846_platform_data ads7846_config __initdata = {
273 .get_pendown_state = ads7846_get_pendown_state, 270 .get_pendown_state = ads7846_get_pendown_state,
274 .keep_vref_on = 1, 271 .keep_vref_on = 1,
275}; 272};
276#endif
277 273
278#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
279static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = { 274static struct spi_board_info eukrea_mbimx27_spi_board_info[] __initdata = {
280 [0] = { 275 [0] = {
281 .modalias = "ads7846", 276 .modalias = "ads7846",
@@ -294,7 +289,6 @@ static const struct spi_imx_master eukrea_mbimx27_spi0_data __initconst = {
294 .chipselect = eukrea_mbimx27_spi_cs, 289 .chipselect = eukrea_mbimx27_spi_cs,
295 .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs), 290 .num_chipselect = ARRAY_SIZE(eukrea_mbimx27_spi_cs),
296}; 291};
297#endif
298 292
299static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = { 293static struct i2c_board_info eukrea_mbimx27_i2c_devices[] = {
300 { 294 {
diff --git a/arch/arm/mach-integrator/include/mach/vmalloc.h b/arch/arm/mach-integrator/include/mach/vmalloc.h
index e056e7cf5645..2f5a2bafb11f 100644
--- a/arch/arm/mach-integrator/include/mach/vmalloc.h
+++ b/arch/arm/mach-integrator/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20#define VMALLOC_END 0xd0000000 20#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-msm/include/mach/vmalloc.h b/arch/arm/mach-msm/include/mach/vmalloc.h
index 31a32ad062dc..d138448eff16 100644
--- a/arch/arm/mach-msm/include/mach/vmalloc.h
+++ b/arch/arm/mach-msm/include/mach/vmalloc.h
@@ -16,7 +16,7 @@
16#ifndef __ASM_ARCH_MSM_VMALLOC_H 16#ifndef __ASM_ARCH_MSM_VMALLOC_H
17#define __ASM_ARCH_MSM_VMALLOC_H 17#define __ASM_ARCH_MSM_VMALLOC_H
18 18
19#define VMALLOC_END 0xd0000000 19#define VMALLOC_END 0xd0000000UL
20 20
21#endif 21#endif
22 22
diff --git a/arch/arm/mach-mx25/devices-imx25.h b/arch/arm/mach-mx25/devices-imx25.h
index 93afa10b13cf..d94d282fa676 100644
--- a/arch/arm/mach-mx25/devices-imx25.h
+++ b/arch/arm/mach-mx25/devices-imx25.h
@@ -42,9 +42,9 @@ extern const struct imx_mxc_nand_data imx25_mxc_nand_data __initconst;
42#define imx25_add_mxc_nand(pdata) \ 42#define imx25_add_mxc_nand(pdata) \
43 imx_add_mxc_nand(&imx25_mxc_nand_data, pdata) 43 imx_add_mxc_nand(&imx25_mxc_nand_data, pdata)
44 44
45extern const struct imx_spi_imx_data imx25_spi_imx_data[] __initconst; 45extern const struct imx_spi_imx_data imx25_cspi_data[] __initconst;
46#define imx25_add_spi_imx(id, pdata) \ 46#define imx25_add_spi_imx(id, pdata) \
47 imx_add_spi_imx(&imx25_spi_imx_data[id], pdata) 47 imx_add_spi_imx(&imx25_cspi_data[id], pdata)
48#define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata) 48#define imx25_add_spi_imx0(pdata) imx25_add_spi_imx(0, pdata)
49#define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata) 49#define imx25_add_spi_imx1(pdata) imx25_add_spi_imx(1, pdata)
50#define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata) 50#define imx25_add_spi_imx2(pdata) imx25_add_spi_imx(2, pdata)
diff --git a/arch/arm/mach-mx3/mach-pcm037_eet.c b/arch/arm/mach-mx3/mach-pcm037_eet.c
index 99e0894e07db..fda56545d2fd 100644
--- a/arch/arm/mach-mx3/mach-pcm037_eet.c
+++ b/arch/arm/mach-mx3/mach-pcm037_eet.c
@@ -14,6 +14,7 @@
14 14
15#include <mach/common.h> 15#include <mach/common.h>
16#include <mach/iomux-mx3.h> 16#include <mach/iomux-mx3.h>
17#include <mach/spi.h>
17 18
18#include <asm/mach-types.h> 19#include <asm/mach-types.h>
19 20
@@ -59,14 +60,12 @@ static struct spi_board_info pcm037_spi_dev[] = {
59}; 60};
60 61
61/* Platform Data for MXC CSPI */ 62/* Platform Data for MXC CSPI */
62#if defined(CONFIG_SPI_IMX) || defined(CONFIG_SPI_IMX_MODULE)
63static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)}; 63static int pcm037_spi1_cs[] = {MXC_SPI_CS(1), IOMUX_TO_GPIO(MX31_PIN_KEY_COL7)};
64 64
65static const struct spi_imx_master pcm037_spi1_pdata __initconst = { 65static const struct spi_imx_master pcm037_spi1_pdata __initconst = {
66 .chipselect = pcm037_spi1_cs, 66 .chipselect = pcm037_spi1_cs,
67 .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs), 67 .num_chipselect = ARRAY_SIZE(pcm037_spi1_cs),
68}; 68};
69#endif
70 69
71/* GPIO-keys input device */ 70/* GPIO-keys input device */
72static struct gpio_keys_button pcm037_gpio_keys[] = { 71static struct gpio_keys_button pcm037_gpio_keys[] = {
@@ -171,7 +170,7 @@ static struct platform_device pcm037_gpio_keys_device = {
171 }, 170 },
172}; 171};
173 172
174static int eet_init_devices(void) 173static int __init eet_init_devices(void)
175{ 174{
176 if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) 175 if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET)
177 return 0; 176 return 0;
diff --git a/arch/arm/mach-netx/include/mach/vmalloc.h b/arch/arm/mach-netx/include/mach/vmalloc.h
index 7cca3574308f..871f1ef7bff5 100644
--- a/arch/arm/mach-netx/include/mach/vmalloc.h
+++ b/arch/arm/mach-netx/include/mach/vmalloc.h
@@ -16,4 +16,4 @@
16 * along with this program; if not, write to the Free Software 16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 18 */
19#define VMALLOC_END 0xd0000000 19#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-omap1/include/mach/vmalloc.h b/arch/arm/mach-omap1/include/mach/vmalloc.h
index b001f67d695b..22ec4a479577 100644
--- a/arch/arm/mach-omap1/include/mach/vmalloc.h
+++ b/arch/arm/mach-omap1/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20#define VMALLOC_END 0xd8000000 20#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-omap2/include/mach/vmalloc.h b/arch/arm/mach-omap2/include/mach/vmalloc.h
index 4da31e997efe..866319947760 100644
--- a/arch/arm/mach-omap2/include/mach/vmalloc.h
+++ b/arch/arm/mach-omap2/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */ 19 */
20#define VMALLOC_END 0xf8000000 20#define VMALLOC_END 0xf8000000UL
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index a40457d81927..c85923e56b85 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -30,6 +30,7 @@
30#include <linux/irq.h> 30#include <linux/irq.h>
31#include <linux/time.h> 31#include <linux/time.h>
32#include <linux/gpio.h> 32#include <linux/gpio.h>
33#include <linux/console.h>
33 34
34#include <asm/mach/time.h> 35#include <asm/mach/time.h>
35#include <asm/mach/irq.h> 36#include <asm/mach/irq.h>
@@ -118,6 +119,10 @@ static void omap2_enter_full_retention(void)
118 if (omap_irq_pending()) 119 if (omap_irq_pending())
119 goto no_sleep; 120 goto no_sleep;
120 121
122 /* Block console output in case it is on one of the OMAP UARTs */
123 if (try_acquire_console_sem())
124 goto no_sleep;
125
121 omap_uart_prepare_idle(0); 126 omap_uart_prepare_idle(0);
122 omap_uart_prepare_idle(1); 127 omap_uart_prepare_idle(1);
123 omap_uart_prepare_idle(2); 128 omap_uart_prepare_idle(2);
@@ -131,6 +136,8 @@ static void omap2_enter_full_retention(void)
131 omap_uart_resume_idle(1); 136 omap_uart_resume_idle(1);
132 omap_uart_resume_idle(0); 137 omap_uart_resume_idle(0);
133 138
139 release_console_sem();
140
134no_sleep: 141no_sleep:
135 if (omap2_pm_debug) { 142 if (omap2_pm_debug) {
136 unsigned long long tmp; 143 unsigned long long tmp;
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 75c0cd13ad8e..0ec8a04b7473 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -28,6 +28,7 @@
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/console.h>
31 32
32#include <plat/sram.h> 33#include <plat/sram.h>
33#include <plat/clockdomain.h> 34#include <plat/clockdomain.h>
@@ -385,6 +386,12 @@ void omap_sram_idle(void)
385 omap3_enable_io_chain(); 386 omap3_enable_io_chain();
386 } 387 }
387 388
389 /* Block console output in case it is on one of the OMAP UARTs */
390 if (per_next_state < PWRDM_POWER_ON ||
391 core_next_state < PWRDM_POWER_ON)
392 if (try_acquire_console_sem())
393 goto console_still_active;
394
388 /* PER */ 395 /* PER */
389 if (per_next_state < PWRDM_POWER_ON) { 396 if (per_next_state < PWRDM_POWER_ON) {
390 omap_uart_prepare_idle(2); 397 omap_uart_prepare_idle(2);
@@ -463,6 +470,9 @@ void omap_sram_idle(void)
463 omap_uart_resume_idle(3); 470 omap_uart_resume_idle(3);
464 } 471 }
465 472
473 release_console_sem();
474
475console_still_active:
466 /* Disable IO-PAD and IO-CHAIN wakeup */ 476 /* Disable IO-PAD and IO-CHAIN wakeup */
467 if (omap3_has_io_wakeup() && 477 if (omap3_has_io_wakeup() &&
468 (per_next_state < PWRDM_POWER_ON || 478 (per_next_state < PWRDM_POWER_ON ||
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index becf0e38ef7e..d17960a1be25 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/serial_8250.h> 28#include <linux/serial_8250.h>
29#include <linux/pm_runtime.h> 29#include <linux/pm_runtime.h>
30#include <linux/console.h>
30 31
31#ifdef CONFIG_SERIAL_OMAP 32#ifdef CONFIG_SERIAL_OMAP
32#include <plat/omap-serial.h> 33#include <plat/omap-serial.h>
@@ -406,7 +407,7 @@ void omap_uart_resume_idle(int num)
406 struct omap_uart_state *uart; 407 struct omap_uart_state *uart;
407 408
408 list_for_each_entry(uart, &uart_list, node) { 409 list_for_each_entry(uart, &uart_list, node) {
409 if (num == uart->num) { 410 if (num == uart->num && uart->can_sleep) {
410 omap_uart_enable_clocks(uart); 411 omap_uart_enable_clocks(uart);
411 412
412 /* Check for IO pad wakeup */ 413 /* Check for IO pad wakeup */
@@ -807,6 +808,8 @@ void __init omap_serial_init_port(int port)
807 808
808 oh->dev_attr = uart; 809 oh->dev_attr = uart;
809 810
811 acquire_console_sem(); /* in case the earlycon is on the UART */
812
810 /* 813 /*
811 * Because of early UART probing, UART did not get idled 814 * Because of early UART probing, UART did not get idled
812 * on init. Now that omap_device is ready, ensure full idle 815 * on init. Now that omap_device is ready, ensure full idle
@@ -831,6 +834,8 @@ void __init omap_serial_init_port(int port)
831 omap_uart_block_sleep(uart); 834 omap_uart_block_sleep(uart);
832 uart->timeout = DEFAULT_TIMEOUT; 835 uart->timeout = DEFAULT_TIMEOUT;
833 836
837 release_console_sem();
838
834 if ((cpu_is_omap34xx() && uart->padconf) || 839 if ((cpu_is_omap34xx() && uart->padconf) ||
835 (uart->wk_en && uart->wk_mask)) { 840 (uart->wk_en && uart->wk_mask)) {
836 device_init_wakeup(&od->pdev.dev, true); 841 device_init_wakeup(&od->pdev.dev, true);
diff --git a/arch/arm/mach-pnx4008/include/mach/vmalloc.h b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
index 31b65ee07b0b..184913c71141 100644
--- a/arch/arm/mach-pnx4008/include/mach/vmalloc.h
+++ b/arch/arm/mach-pnx4008/include/mach/vmalloc.h
@@ -17,4 +17,4 @@
17 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 17 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
18 * area for the same reason. ;) 18 * area for the same reason. ;)
19 */ 19 */
20#define VMALLOC_END 0xd0000000 20#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-realview/headsmp.S b/arch/arm/mach-realview/headsmp.S
index 4075473cf68a..b34be4554d40 100644
--- a/arch/arm/mach-realview/headsmp.S
+++ b/arch/arm/mach-realview/headsmp.S
@@ -35,5 +35,6 @@ pen: ldr r7, [r6]
35 */ 35 */
36 b secondary_startup 36 b secondary_startup
37 37
38 .align
381: .long . 391: .long .
39 .long pen_release 40 .long pen_release
diff --git a/arch/arm/mach-rpc/include/mach/vmalloc.h b/arch/arm/mach-rpc/include/mach/vmalloc.h
index 3bcd86fadb81..fb700228637a 100644
--- a/arch/arm/mach-rpc/include/mach/vmalloc.h
+++ b/arch/arm/mach-rpc/include/mach/vmalloc.h
@@ -7,4 +7,4 @@
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#define VMALLOC_END 0xdc000000 10#define VMALLOC_END 0xdc000000UL
diff --git a/arch/arm/mach-s3c2410/h1940-bluetooth.c b/arch/arm/mach-s3c2410/h1940-bluetooth.c
index 8aa2f1902a94..6b86a722a7db 100644
--- a/arch/arm/mach-s3c2410/h1940-bluetooth.c
+++ b/arch/arm/mach-s3c2410/h1940-bluetooth.c
@@ -77,13 +77,13 @@ static int __devinit h1940bt_probe(struct platform_device *pdev)
77 77
78 /* Configures BT serial port GPIOs */ 78 /* Configures BT serial port GPIOs */
79 s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0); 79 s3c_gpio_cfgpin(S3C2410_GPH(0), S3C2410_GPH0_nCTS0);
80 s3c_gpio_cfgpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE); 80 s3c_gpio_setpull(S3C2410_GPH(0), S3C_GPIO_PULL_NONE);
81 s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT); 81 s3c_gpio_cfgpin(S3C2410_GPH(1), S3C2410_GPIO_OUTPUT);
82 s3c_gpio_cfgpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE); 82 s3c_gpio_setpull(S3C2410_GPH(1), S3C_GPIO_PULL_NONE);
83 s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0); 83 s3c_gpio_cfgpin(S3C2410_GPH(2), S3C2410_GPH2_TXD0);
84 s3c_gpio_cfgpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE); 84 s3c_gpio_setpull(S3C2410_GPH(2), S3C_GPIO_PULL_NONE);
85 s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0); 85 s3c_gpio_cfgpin(S3C2410_GPH(3), S3C2410_GPH3_RXD0);
86 s3c_gpio_cfgpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE); 86 s3c_gpio_setpull(S3C2410_GPH(3), S3C_GPIO_PULL_NONE);
87 87
88 88
89 rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH, 89 rfk = rfkill_alloc(DRV_NAME, &pdev->dev, RFKILL_TYPE_BLUETOOTH,
diff --git a/arch/arm/mach-s3c2412/Kconfig b/arch/arm/mach-s3c2412/Kconfig
index cef6a65637bd..fa2e5bffbb8e 100644
--- a/arch/arm/mach-s3c2412/Kconfig
+++ b/arch/arm/mach-s3c2412/Kconfig
@@ -16,7 +16,7 @@ config CPU_S3C2412
16config CPU_S3C2412_ONLY 16config CPU_S3C2412_ONLY
17 bool 17 bool
18 depends on ARCH_S3C2410 && !CPU_S3C2400 && !CPU_S3C2410 && \ 18 depends on ARCH_S3C2410 && !CPU_S3C2400 && !CPU_S3C2410 && \
19 !CPU_2416 && !CPU_S3C2440 && !CPU_S3C2442 && \ 19 !CPU_S3C2416 && !CPU_S3C2440 && !CPU_S3C2442 && \
20 !CPU_S3C2443 && CPU_S3C2412 20 !CPU_S3C2443 && CPU_S3C2412
21 default y if CPU_S3C2412 21 default y if CPU_S3C2412
22 22
diff --git a/arch/arm/mach-s3c2416/Kconfig b/arch/arm/mach-s3c2416/Kconfig
index 87b9c9f003bd..27b3e7c9d613 100644
--- a/arch/arm/mach-s3c2416/Kconfig
+++ b/arch/arm/mach-s3c2416/Kconfig
@@ -35,9 +35,12 @@ menu "S3C2416 Machines"
35config MACH_SMDK2416 35config MACH_SMDK2416
36 bool "SMDK2416" 36 bool "SMDK2416"
37 select CPU_S3C2416 37 select CPU_S3C2416
38 select MACH_SMDK
38 select S3C_DEV_FB 39 select S3C_DEV_FB
39 select S3C_DEV_HSMMC 40 select S3C_DEV_HSMMC
40 select S3C_DEV_HSMMC1 41 select S3C_DEV_HSMMC1
42 select S3C_DEV_NAND
43 select S3C_DEV_USB_HOST
41 select S3C2416_PM if PM 44 select S3C2416_PM if PM
42 help 45 help
43 Say Y here if you are using an SMDK2416 46 Say Y here if you are using an SMDK2416
diff --git a/arch/arm/mach-s3c2416/irq.c b/arch/arm/mach-s3c2416/irq.c
index 084d121f368c..00174daf1526 100644
--- a/arch/arm/mach-s3c2416/irq.c
+++ b/arch/arm/mach-s3c2416/irq.c
@@ -168,12 +168,11 @@ static struct irq_chip s3c2416_irq_dma = {
168 168
169static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc) 169static void s3c2416_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
170{ 170{
171 s3c2416_irq_demux(IRQ_S3C2443_UART3, 3); 171 s3c2416_irq_demux(IRQ_S3C2443_RX3, 3);
172} 172}
173 173
174#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0)) 174#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
175#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0))) 175#define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
176
177 176
178static void s3c2416_irq_uart3_mask(unsigned int irqno) 177static void s3c2416_irq_uart3_mask(unsigned int irqno)
179{ 178{
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index ff024a6c0f85..a0cb2581894f 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -18,6 +18,7 @@ config CPU_S3C2440
18config CPU_S3C2442 18config CPU_S3C2442
19 bool 19 bool
20 select CPU_ARM920T 20 select CPU_ARM920T
21 select S3C_GPIO_PULL_DOWN
21 select S3C2410_CLOCK 22 select S3C2410_CLOCK
22 select S3C2410_GPIO 23 select S3C2410_GPIO
23 select S3C2410_PM if PM 24 select S3C2410_PM if PM
@@ -178,6 +179,9 @@ config MACH_MINI2440
178 bool "MINI2440 development board" 179 bool "MINI2440 development board"
179 select CPU_S3C2440 180 select CPU_S3C2440
180 select EEPROM_AT24 181 select EEPROM_AT24
182 select NEW_LEDS
183 select LEDS_CLASS
184 select LEDS_TRIGGER
181 select LEDS_TRIGGER_BACKLIGHT 185 select LEDS_TRIGGER_BACKLIGHT
182 select S3C_DEV_NAND 186 select S3C_DEV_NAND
183 select S3C_DEV_USB_HOST 187 select S3C_DEV_USB_HOST
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c
index d50f3ae6173d..f7663f731ea0 100644
--- a/arch/arm/mach-s3c2440/s3c2440.c
+++ b/arch/arm/mach-s3c2440/s3c2440.c
@@ -46,9 +46,6 @@ int __init s3c2440_init(void)
46{ 46{
47 printk("S3C2440: Initialising architecture\n"); 47 printk("S3C2440: Initialising architecture\n");
48 48
49 s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
50 s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
51
52 /* change irq for watchdog */ 49 /* change irq for watchdog */
53 50
54 s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; 51 s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT;
@@ -58,3 +55,11 @@ int __init s3c2440_init(void)
58 55
59 return sysdev_register(&s3c2440_sysdev); 56 return sysdev_register(&s3c2440_sysdev);
60} 57}
58
59void __init s3c2440_map_io(void)
60{
61 s3c244x_map_io();
62
63 s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1up;
64 s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1up;
65}
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c
index 188ad1e57dc0..ecf813546554 100644
--- a/arch/arm/mach-s3c2440/s3c2442.c
+++ b/arch/arm/mach-s3c2440/s3c2442.c
@@ -32,6 +32,7 @@
32#include <linux/interrupt.h> 32#include <linux/interrupt.h>
33#include <linux/ioport.h> 33#include <linux/ioport.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <linux/gpio.h>
35#include <linux/clk.h> 36#include <linux/clk.h>
36#include <linux/io.h> 37#include <linux/io.h>
37 38
@@ -43,6 +44,11 @@
43 44
44#include <plat/clock.h> 45#include <plat/clock.h>
45#include <plat/cpu.h> 46#include <plat/cpu.h>
47#include <plat/s3c244x.h>
48
49#include <plat/gpio-core.h>
50#include <plat/gpio-cfg.h>
51#include <plat/gpio-cfg-helpers.h>
46 52
47/* S3C2442 extended clock support */ 53/* S3C2442 extended clock support */
48 54
@@ -163,3 +169,11 @@ int __init s3c2442_init(void)
163 169
164 return sysdev_register(&s3c2442_sysdev); 170 return sysdev_register(&s3c2442_sysdev);
165} 171}
172
173void __init s3c2442_map_io(void)
174{
175 s3c244x_map_io();
176
177 s3c24xx_gpiocfg_default.set_pull = s3c_gpio_setpull_1down;
178 s3c24xx_gpiocfg_default.get_pull = s3c_gpio_getpull_1down;
179}
diff --git a/arch/arm/mach-s3c2443/Kconfig b/arch/arm/mach-s3c2443/Kconfig
index 4fef723126fa..31babec90cec 100644
--- a/arch/arm/mach-s3c2443/Kconfig
+++ b/arch/arm/mach-s3c2443/Kconfig
@@ -5,6 +5,7 @@
5config CPU_S3C2443 5config CPU_S3C2443
6 bool 6 bool
7 depends on ARCH_S3C2410 7 depends on ARCH_S3C2410
8 select CPU_ARM920T
8 select S3C2443_DMA if S3C2410_DMA 9 select S3C2443_DMA if S3C2410_DMA
9 select CPU_LLSERIAL_S3C2440 10 select CPU_LLSERIAL_S3C2440
10 select SAMSUNG_CLKSRC 11 select SAMSUNG_CLKSRC
diff --git a/arch/arm/mach-s3c2443/irq.c b/arch/arm/mach-s3c2443/irq.c
index 0e0d693f3974..893424767ce1 100644
--- a/arch/arm/mach-s3c2443/irq.c
+++ b/arch/arm/mach-s3c2443/irq.c
@@ -166,12 +166,11 @@ static struct irq_chip s3c2443_irq_dma = {
166 166
167static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc) 167static void s3c2443_irq_demux_uart3(unsigned int irq, struct irq_desc *desc)
168{ 168{
169 s3c2443_irq_demux(IRQ_S3C2443_UART3, 3); 169 s3c2443_irq_demux(IRQ_S3C2443_RX3, 3);
170} 170}
171 171
172#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0)) 172#define INTMSK_UART3 (1UL << (IRQ_S3C2443_UART3 - IRQ_EINT0))
173#define SUBMSK_UART3 (0xf << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0))) 173#define SUBMSK_UART3 (0x7 << (IRQ_S3C2443_RX3 - S3C2410_IRQSUB(0)))
174
175 174
176static void s3c2443_irq_uart3_mask(unsigned int irqno) 175static void s3c2443_irq_uart3_mask(unsigned int irqno)
177{ 176{
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index 249c62956471..89f35e02e883 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -45,7 +45,7 @@
45 45
46#include <video/platform_lcd.h> 46#include <video/platform_lcd.h>
47 47
48#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK) 48#define UCON S3C2410_UCON_DEFAULT
49#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) 49#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
50#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) 50#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
51 51
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index f9ef9b5c5f5a..4957ab0a0d4a 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -46,7 +46,7 @@
46 46
47#include <video/platform_lcd.h> 47#include <video/platform_lcd.h>
48 48
49#define UCON (S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK) 49#define UCON S3C2410_UCON_DEFAULT
50#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB) 50#define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE | S3C2410_LCON_STOPB)
51#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) 51#define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE)
52 52
diff --git a/arch/arm/mach-s5pv210/mach-smdkc110.c b/arch/arm/mach-s5pv210/mach-smdkc110.c
index 0ad7924fe62e..5dd1681c069e 100644
--- a/arch/arm/mach-s5pv210/mach-smdkc110.c
+++ b/arch/arm/mach-s5pv210/mach-smdkc110.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/serial_core.h> 14#include <linux/serial_core.h>
15#include <linux/i2c.h> 15#include <linux/i2c.h>
16#include <linux/sysdev.h>
16 17
17#include <asm/mach/arch.h> 18#include <asm/mach/arch.h>
18#include <asm/mach/map.h> 19#include <asm/mach/map.h>
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index bcd7a5d53401..1fbc45b2a432 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -13,6 +13,7 @@
13#include <linux/i2c.h> 13#include <linux/i2c.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/serial_core.h> 15#include <linux/serial_core.h>
16#include <linux/sysdev.h>
16 17
17#include <asm/mach/arch.h> 18#include <asm/mach/arch.h>
18#include <asm/mach/map.h> 19#include <asm/mach/map.h>
diff --git a/arch/arm/mach-shark/include/mach/vmalloc.h b/arch/arm/mach-shark/include/mach/vmalloc.h
index 8e845b6a7cb5..b10df988526d 100644
--- a/arch/arm/mach-shark/include/mach/vmalloc.h
+++ b/arch/arm/mach-shark/include/mach/vmalloc.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * arch/arm/mach-shark/include/mach/vmalloc.h 2 * arch/arm/mach-shark/include/mach/vmalloc.h
3 */ 3 */
4#define VMALLOC_END 0xd0000000 4#define VMALLOC_END 0xd0000000UL
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index d3260542b943..d440e5f456ad 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -567,38 +567,127 @@ static struct platform_device *qhd_devices[] __initdata = {
567 567
568/* FSI */ 568/* FSI */
569#define IRQ_FSI evt2irq(0x1840) 569#define IRQ_FSI evt2irq(0x1840)
570static int __fsi_set_rate(struct clk *clk, long rate, int enable)
571{
572 int ret = 0;
573
574 if (rate <= 0)
575 return ret;
576
577 if (enable) {
578 ret = clk_set_rate(clk, rate);
579 if (0 == ret)
580 ret = clk_enable(clk);
581 } else {
582 clk_disable(clk);
583 }
584
585 return ret;
586}
587
588static int __fsi_set_round_rate(struct clk *clk, long rate, int enable)
589{
590 return __fsi_set_rate(clk, clk_round_rate(clk, rate), enable);
591}
570 592
571static int fsi_set_rate(int is_porta, int rate) 593static int fsi_ak4642_set_rate(struct device *dev, int rate, int enable)
594{
595 struct clk *fsia_ick;
596 struct clk *fsiack;
597 int ret = -EIO;
598
599 fsia_ick = clk_get(dev, "icka");
600 if (IS_ERR(fsia_ick))
601 return PTR_ERR(fsia_ick);
602
603 /*
604 * FSIACK is connected to AK4642,
605 * and use external clock pin from it.
606 * it is parent of fsia_ick now.
607 */
608 fsiack = clk_get_parent(fsia_ick);
609 if (!fsiack)
610 goto fsia_ick_out;
611
612 /*
613 * we get 1/1 divided clock by setting same rate to fsiack and fsia_ick
614 *
615 ** FIXME **
616 * Because the freq_table of external clk (fsiack) are all 0,
617 * the return value of clk_round_rate became 0.
618 * So, it use __fsi_set_rate here.
619 */
620 ret = __fsi_set_rate(fsiack, rate, enable);
621 if (ret < 0)
622 goto fsiack_out;
623
624 ret = __fsi_set_round_rate(fsia_ick, rate, enable);
625 if ((ret < 0) && enable)
626 __fsi_set_round_rate(fsiack, rate, 0); /* disable FSI ACK */
627
628fsiack_out:
629 clk_put(fsiack);
630
631fsia_ick_out:
632 clk_put(fsia_ick);
633
634 return 0;
635}
636
637static int fsi_hdmi_set_rate(struct device *dev, int rate, int enable)
572{ 638{
573 struct clk *fsib_clk; 639 struct clk *fsib_clk;
574 struct clk *fdiv_clk = &sh7372_fsidivb_clk; 640 struct clk *fdiv_clk = &sh7372_fsidivb_clk;
641 long fsib_rate = 0;
642 long fdiv_rate = 0;
643 int ackmd_bpfmd;
575 int ret; 644 int ret;
576 645
577 /* set_rate is not needed if port A */
578 if (is_porta)
579 return 0;
580
581 fsib_clk = clk_get(NULL, "fsib_clk");
582 if (IS_ERR(fsib_clk))
583 return -EINVAL;
584
585 switch (rate) { 646 switch (rate) {
586 case 44100: 647 case 44100:
587 clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 11283000)); 648 fsib_rate = rate * 256;
588 ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; 649 ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
589 break; 650 break;
590 case 48000: 651 case 48000:
591 clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000)); 652 fsib_rate = 85428000; /* around 48kHz x 256 x 7 */
592 clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000)); 653 fdiv_rate = rate * 256;
593 ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64; 654 ackmd_bpfmd = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
594 break; 655 break;
595 default: 656 default:
596 pr_err("unsupported rate in FSI2 port B\n"); 657 pr_err("unsupported rate in FSI2 port B\n");
597 ret = -EINVAL; 658 return -EINVAL;
598 break;
599 } 659 }
600 660
661 /* FSI B setting */
662 fsib_clk = clk_get(dev, "ickb");
663 if (IS_ERR(fsib_clk))
664 return -EIO;
665
666 ret = __fsi_set_round_rate(fsib_clk, fsib_rate, enable);
601 clk_put(fsib_clk); 667 clk_put(fsib_clk);
668 if (ret < 0)
669 return ret;
670
671 /* FSI DIV setting */
672 ret = __fsi_set_round_rate(fdiv_clk, fdiv_rate, enable);
673 if (ret < 0) {
674 /* disable FSI B */
675 if (enable)
676 __fsi_set_round_rate(fsib_clk, fsib_rate, 0);
677 return ret;
678 }
679
680 return ackmd_bpfmd;
681}
682
683static int fsi_set_rate(struct device *dev, int is_porta, int rate, int enable)
684{
685 int ret;
686
687 if (is_porta)
688 ret = fsi_ak4642_set_rate(dev, rate, enable);
689 else
690 ret = fsi_hdmi_set_rate(dev, rate, enable);
602 691
603 return ret; 692 return ret;
604} 693}
@@ -880,6 +969,11 @@ static int __init hdmi_init_pm_clock(void)
880 goto out; 969 goto out;
881 } 970 }
882 971
972 ret = clk_enable(&sh7372_pllc2_clk);
973 if (ret < 0) {
974 pr_err("Cannot enable pllc2 clock\n");
975 goto out;
976 }
883 pr_debug("PLLC2 set frequency %lu\n", rate); 977 pr_debug("PLLC2 set frequency %lu\n", rate);
884 978
885 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk); 979 ret = clk_set_parent(hdmi_ick, &sh7372_pllc2_clk);
@@ -896,23 +990,11 @@ out:
896 990
897device_initcall(hdmi_init_pm_clock); 991device_initcall(hdmi_init_pm_clock);
898 992
899#define FSIACK_DUMMY_RATE 48000
900static int __init fsi_init_pm_clock(void) 993static int __init fsi_init_pm_clock(void)
901{ 994{
902 struct clk *fsia_ick; 995 struct clk *fsia_ick;
903 int ret; 996 int ret;
904 997
905 /*
906 * FSIACK is connected to AK4642,
907 * and the rate is depend on playing sound rate.
908 * So, set dummy rate (= 48k) here
909 */
910 ret = clk_set_rate(&sh7372_fsiack_clk, FSIACK_DUMMY_RATE);
911 if (ret < 0) {
912 pr_err("Cannot set FSIACK dummy rate: %d\n", ret);
913 return ret;
914 }
915
916 fsia_ick = clk_get(&fsi_device.dev, "icka"); 998 fsia_ick = clk_get(&fsi_device.dev, "icka");
917 if (IS_ERR(fsia_ick)) { 999 if (IS_ERR(fsia_ick)) {
918 ret = PTR_ERR(fsia_ick); 1000 ret = PTR_ERR(fsia_ick);
@@ -921,16 +1003,9 @@ static int __init fsi_init_pm_clock(void)
921 } 1003 }
922 1004
923 ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk); 1005 ret = clk_set_parent(fsia_ick, &sh7372_fsiack_clk);
924 if (ret < 0) {
925 pr_err("Cannot set FSI-A parent: %d\n", ret);
926 goto out;
927 }
928
929 ret = clk_set_rate(fsia_ick, FSIACK_DUMMY_RATE);
930 if (ret < 0) 1006 if (ret < 0)
931 pr_err("Cannot set FSI-A rate: %d\n", ret); 1007 pr_err("Cannot set FSI-A parent: %d\n", ret);
932 1008
933out:
934 clk_put(fsia_ick); 1009 clk_put(fsia_ick);
935 1010
936 return ret; 1011 return ret;
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 7db31e6c6bf2..3aa026069435 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -220,8 +220,7 @@ static void pllc2_disable(struct clk *clk)
220 __raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR); 220 __raw_writel(__raw_readl(PLLC2CR) & ~0x80000000, PLLC2CR);
221} 221}
222 222
223static int pllc2_set_rate(struct clk *clk, 223static int pllc2_set_rate(struct clk *clk, unsigned long rate)
224 unsigned long rate, int algo_id)
225{ 224{
226 unsigned long value; 225 unsigned long value;
227 int idx; 226 int idx;
@@ -230,21 +229,13 @@ static int pllc2_set_rate(struct clk *clk,
230 if (idx < 0) 229 if (idx < 0)
231 return idx; 230 return idx;
232 231
233 if (rate == clk->parent->rate) { 232 if (rate == clk->parent->rate)
234 pllc2_disable(clk); 233 return -EINVAL;
235 return 0;
236 }
237 234
238 value = __raw_readl(PLLC2CR) & ~(0x3f << 24); 235 value = __raw_readl(PLLC2CR) & ~(0x3f << 24);
239 236
240 if (value & 0x80000000)
241 pllc2_disable(clk);
242
243 __raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR); 237 __raw_writel((value & ~0x80000000) | ((idx + 19) << 24), PLLC2CR);
244 238
245 if (value & 0x80000000)
246 return pllc2_enable(clk);
247
248 return 0; 239 return 0;
249} 240}
250 241
@@ -453,32 +444,24 @@ static int fsidiv_enable(struct clk *clk)
453 unsigned long value; 444 unsigned long value;
454 445
455 value = __raw_readl(clk->mapping->base) >> 16; 446 value = __raw_readl(clk->mapping->base) >> 16;
456 if (value < 2) { 447 if (value < 2)
457 fsidiv_disable(clk); 448 return -EIO;
458 return -ENOENT;
459 }
460 449
461 __raw_writel((value << 16) | 0x3, clk->mapping->base); 450 __raw_writel((value << 16) | 0x3, clk->mapping->base);
462 451
463 return 0; 452 return 0;
464} 453}
465 454
466static int fsidiv_set_rate(struct clk *clk, 455static int fsidiv_set_rate(struct clk *clk, unsigned long rate)
467 unsigned long rate, int algo_id)
468{ 456{
469 int idx; 457 int idx;
470 458
471 if (clk->parent->rate == rate) {
472 fsidiv_disable(clk);
473 return 0;
474 }
475
476 idx = (clk->parent->rate / rate) & 0xffff; 459 idx = (clk->parent->rate / rate) & 0xffff;
477 if (idx < 2) 460 if (idx < 2)
478 return -ENOENT; 461 return -EINVAL;
479 462
480 __raw_writel(idx << 16, clk->mapping->base); 463 __raw_writel(idx << 16, clk->mapping->base);
481 return fsidiv_enable(clk); 464 return 0;
482} 465}
483 466
484static struct clk_ops fsidiv_clk_ops = { 467static struct clk_ops fsidiv_clk_ops = {
@@ -609,8 +592,6 @@ static struct clk_lookup lookups[] = {
609 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), 592 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
610 CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]), 593 CLKDEV_CON_ID("fmsi_clk", &div6_clks[DIV6_FMSI]),
611 CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]), 594 CLKDEV_CON_ID("fmso_clk", &div6_clks[DIV6_FMSO]),
612 CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FSIA]),
613 CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FSIB]),
614 CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]), 595 CLKDEV_CON_ID("sub_clk", &div6_clks[DIV6_SUB]),
615 CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]), 596 CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_SPU]),
616 CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]), 597 CLKDEV_CON_ID("vou_clk", &div6_clks[DIV6_VOU]),
@@ -647,8 +628,8 @@ static struct clk_lookup lookups[] = {
647 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */ 628 CLKDEV_DEV_ID("sh_cmt.10", &mstp_clks[MSTP329]), /* CMT10 */
648 CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */ 629 CLKDEV_DEV_ID("sh_fsi2", &mstp_clks[MSTP328]), /* FSI2 */
649 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */ 630 CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[MSTP323]), /* IIC1 */
650 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP323]), /* USB0 */ 631 CLKDEV_DEV_ID("r8a66597_hcd.0", &mstp_clks[MSTP322]), /* USB0 */
651 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP323]), /* USB0 */ 632 CLKDEV_DEV_ID("r8a66597_udc.0", &mstp_clks[MSTP322]), /* USB0 */
652 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */ 633 CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[MSTP314]), /* SDHI0 */
653 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */ 634 CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
654 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */ 635 CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMC */
diff --git a/arch/arm/mach-tegra/include/mach/debug-macro.S b/arch/arm/mach-tegra/include/mach/debug-macro.S
index 8ea3bffb4e00..a0e7c12868bd 100644
--- a/arch/arm/mach-tegra/include/mach/debug-macro.S
+++ b/arch/arm/mach-tegra/include/mach/debug-macro.S
@@ -21,8 +21,8 @@
21#include <mach/io.h> 21#include <mach/io.h>
22 22
23 .macro addruart, rp, rv 23 .macro addruart, rp, rv
24 ldreq \rp, =IO_APB_PHYS @ physical 24 ldr \rp, =IO_APB_PHYS @ physical
25 ldrne \rv, =IO_APB_VIRT @ virtual 25 ldr \rv, =IO_APB_VIRT @ virtual
26#if defined(CONFIG_TEGRA_DEBUG_UART_NONE) 26#if defined(CONFIG_TEGRA_DEBUG_UART_NONE)
27#error "A debug UART must be selected in the kernel config to use DEBUG_LL" 27#error "A debug UART must be selected in the kernel config to use DEBUG_LL"
28#elif defined(CONFIG_TEGRA_DEBUG_UARTA) 28#elif defined(CONFIG_TEGRA_DEBUG_UARTA)
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index 73fb1a551ec6..608a1372b172 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -75,14 +75,14 @@ void __init ux500_init_irq(void)
75static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) 75static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask)
76{ 76{
77 /* wait for the operation to complete */ 77 /* wait for the operation to complete */
78 while (readl(reg) & mask) 78 while (readl_relaxed(reg) & mask)
79 ; 79 ;
80} 80}
81 81
82static inline void ux500_cache_sync(void) 82static inline void ux500_cache_sync(void)
83{ 83{
84 void __iomem *base = __io_address(UX500_L2CC_BASE); 84 void __iomem *base = __io_address(UX500_L2CC_BASE);
85 writel(0, base + L2X0_CACHE_SYNC); 85 writel_relaxed(0, base + L2X0_CACHE_SYNC);
86 ux500_cache_wait(base + L2X0_CACHE_SYNC, 1); 86 ux500_cache_wait(base + L2X0_CACHE_SYNC, 1);
87} 87}
88 88
@@ -107,7 +107,7 @@ static void ux500_l2x0_inv_all(void)
107 uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ 107 uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */
108 108
109 /* invalidate all ways */ 109 /* invalidate all ways */
110 writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); 110 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
111 ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); 111 ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
112 ux500_cache_sync(); 112 ux500_cache_sync();
113} 113}
diff --git a/arch/arm/mach-versatile/include/mach/vmalloc.h b/arch/arm/mach-versatile/include/mach/vmalloc.h
index ebd8a2543d3b..7d8e069ad51b 100644
--- a/arch/arm/mach-versatile/include/mach/vmalloc.h
+++ b/arch/arm/mach-versatile/include/mach/vmalloc.h
@@ -18,4 +18,4 @@
18 * along with this program; if not, write to the Free Software 18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21#define VMALLOC_END 0xd8000000 21#define VMALLOC_END 0xd8000000UL
diff --git a/arch/arm/mach-vexpress/headsmp.S b/arch/arm/mach-vexpress/headsmp.S
index 8a78ff68e1ee..7a3f0632947c 100644
--- a/arch/arm/mach-vexpress/headsmp.S
+++ b/arch/arm/mach-vexpress/headsmp.S
@@ -35,5 +35,6 @@ pen: ldr r7, [r6]
35 */ 35 */
36 b secondary_startup 36 b secondary_startup
37 37
38 .align
381: .long . 391: .long .
39 .long pen_release 40 .long pen_release
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 17e7b0b57e49..55c17a6fb22f 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -206,8 +206,8 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
206 */ 206 */
207 if (pfn_valid(pfn)) { 207 if (pfn_valid(pfn)) {
208 printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n" 208 printk(KERN_WARNING "BUG: Your driver calls ioremap() on system memory. This leads\n"
209 KERN_WARNING "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n" 209 "to architecturally unpredictable behaviour on ARMv6+, and ioremap()\n"
210 KERN_WARNING "will fail in the next kernel release. Please fix your driver.\n"); 210 "will fail in the next kernel release. Please fix your driver.\n");
211 WARN_ON(1); 211 WARN_ON(1);
212 } 212 }
213 213
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 53cbe2225153..9b9ff5d949fd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -381,7 +381,7 @@ __v7_ca9mp_proc_info:
381 PMD_SECT_XN | \ 381 PMD_SECT_XN | \
382 PMD_SECT_AP_WRITE | \ 382 PMD_SECT_AP_WRITE | \
383 PMD_SECT_AP_READ 383 PMD_SECT_AP_READ
384 b __v7_ca9mp_setup 384 W(b) __v7_ca9mp_setup
385 .long cpu_arch_name 385 .long cpu_arch_name
386 .long cpu_elf_name 386 .long cpu_elf_name
387 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS 387 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
@@ -413,7 +413,7 @@ __v7_proc_info:
413 PMD_SECT_XN | \ 413 PMD_SECT_XN | \
414 PMD_SECT_AP_WRITE | \ 414 PMD_SECT_AP_WRITE | \
415 PMD_SECT_AP_READ 415 PMD_SECT_AP_READ
416 b __v7_setup 416 W(b) __v7_setup
417 .long cpu_arch_name 417 .long cpu_arch_name
418 .long cpu_elf_name 418 .long cpu_elf_name
419 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS 419 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c
index 85d3e55ca4a9..558cdfaf76b6 100644
--- a/arch/arm/plat-iop/time.c
+++ b/arch/arm/plat-iop/time.c
@@ -18,6 +18,7 @@
18#include <linux/time.h> 18#include <linux/time.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/timex.h> 20#include <linux/timex.h>
21#include <linux/sched.h>
21#include <linux/io.h> 22#include <linux/io.h>
22#include <linux/clocksource.h> 23#include <linux/clocksource.h>
23#include <linux/clockchips.h> 24#include <linux/clockchips.h>
@@ -36,7 +37,7 @@
36/* 37/*
37 * IOP clocksource (free-running timer 1). 38 * IOP clocksource (free-running timer 1).
38 */ 39 */
39static cycle_t iop_clocksource_read(struct clocksource *unused) 40static cycle_t notrace iop_clocksource_read(struct clocksource *unused)
40{ 41{
41 return 0xffffffffu - read_tcr1(); 42 return 0xffffffffu - read_tcr1();
42} 43}
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c
index 02d989018059..3a705c7877dd 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-dma.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c
@@ -12,15 +12,7 @@
12 12
13#include <mach/hardware.h> 13#include <mach/hardware.h>
14#include <mach/devices-common.h> 14#include <mach/devices-common.h>
15#ifdef SDMA_IS_MERGED
16#include <mach/sdma.h> 15#include <mach/sdma.h>
17#else
18struct sdma_platform_data {
19 int sdma_version;
20 char *cpu_name;
21 int to_version;
22};
23#endif
24 16
25struct imx_imx_sdma_data { 17struct imx_imx_sdma_data {
26 resource_size_t iobase; 18 resource_size_t iobase;
diff --git a/arch/arm/plat-mxc/devices/platform-spi_imx.c b/arch/arm/plat-mxc/devices/platform-spi_imx.c
index e48340ec331e..17f724c9452d 100644
--- a/arch/arm/plat-mxc/devices/platform-spi_imx.c
+++ b/arch/arm/plat-mxc/devices/platform-spi_imx.c
@@ -27,6 +27,7 @@ const struct imx_spi_imx_data imx21_cspi_data[] __initconst = {
27 imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K) 27 imx_spi_imx_data_entry(MX21, CSPI, "imx21-cspi", _id, _hwid, SZ_4K)
28 imx21_cspi_data_entry(0, 1), 28 imx21_cspi_data_entry(0, 1),
29 imx21_cspi_data_entry(1, 2), 29 imx21_cspi_data_entry(1, 2),
30};
30#endif 31#endif
31 32
32#ifdef CONFIG_ARCH_MX25 33#ifdef CONFIG_ARCH_MX25
diff --git a/arch/arm/plat-nomadik/timer.c b/arch/arm/plat-nomadik/timer.c
index aedf9c1d645e..63cdc6025bd7 100644
--- a/arch/arm/plat-nomadik/timer.c
+++ b/arch/arm/plat-nomadik/timer.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Copyright (C) 2008 STMicroelectronics 4 * Copyright (C) 2008 STMicroelectronics
5 * Copyright (C) 2010 Alessandro Rubini 5 * Copyright (C) 2010 Alessandro Rubini
6 * Copyright (C) 2010 Linus Walleij for ST-Ericsson
6 * 7 *
7 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2, as 9 * it under the terms of the GNU General Public License version 2, as
@@ -16,11 +17,13 @@
16#include <linux/clk.h> 17#include <linux/clk.h>
17#include <linux/jiffies.h> 18#include <linux/jiffies.h>
18#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/cnt32_to_63.h>
21#include <linux/timer.h>
19#include <asm/mach/time.h> 22#include <asm/mach/time.h>
20 23
21#include <plat/mtu.h> 24#include <plat/mtu.h>
22 25
23void __iomem *mtu_base; /* ssigned by machine code */ 26void __iomem *mtu_base; /* Assigned by machine code */
24 27
25/* 28/*
26 * Kernel assumes that sched_clock can be called early 29 * Kernel assumes that sched_clock can be called early
@@ -48,16 +51,82 @@ static struct clocksource nmdk_clksrc = {
48/* 51/*
49 * Override the global weak sched_clock symbol with this 52 * Override the global weak sched_clock symbol with this
50 * local implementation which uses the clocksource to get some 53 * local implementation which uses the clocksource to get some
51 * better resolution when scheduling the kernel. We accept that 54 * better resolution when scheduling the kernel.
52 * this wraps around for now, since it is just a relative time 55 *
53 * stamp. (Inspired by OMAP implementation.) 56 * Because the hardware timer period may be quite short
57 * (32.3 secs on the 133 MHz MTU timer selection on ux500)
58 * and because cnt32_to_63() needs to be called at least once per
59 * half period to work properly, a kernel keepwarm() timer is set up
60 * to ensure this requirement is always met.
61 *
62 * Also the sched_clock timer will wrap around at some point,
63 * here we set it to run continously for a year.
54 */ 64 */
65#define SCHED_CLOCK_MIN_WRAP 3600*24*365
66static struct timer_list cnt32_to_63_keepwarm_timer;
67static u32 sched_mult;
68static u32 sched_shift;
69
55unsigned long long notrace sched_clock(void) 70unsigned long long notrace sched_clock(void)
56{ 71{
57 return clocksource_cyc2ns(nmdk_clksrc.read( 72 u64 cycles;
58 &nmdk_clksrc), 73
59 nmdk_clksrc.mult, 74 if (unlikely(!mtu_base))
60 nmdk_clksrc.shift); 75 return 0;
76
77 cycles = cnt32_to_63(-readl(mtu_base + MTU_VAL(0)));
78 /*
79 * sched_mult is guaranteed to be even so will
80 * shift out bit 63
81 */
82 return (cycles * sched_mult) >> sched_shift;
83}
84
85/* Just kick sched_clock every so often */
86static void cnt32_to_63_keepwarm(unsigned long data)
87{
88 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + data));
89 (void) sched_clock();
90}
91
92/*
93 * Set up a timer to keep sched_clock():s 32_to_63 algorithm warm
94 * once in half a 32bit timer wrap interval.
95 */
96static void __init nmdk_sched_clock_init(unsigned long rate)
97{
98 u32 v;
99 unsigned long delta;
100 u64 days;
101
102 /* Find the apropriate mult and shift factors */
103 clocks_calc_mult_shift(&sched_mult, &sched_shift,
104 rate, NSEC_PER_SEC, SCHED_CLOCK_MIN_WRAP);
105 /* We need to multiply by an even number to get rid of bit 63 */
106 if (sched_mult & 1)
107 sched_mult++;
108
109 /* Let's see what we get, take max counter and scale it */
110 days = (0xFFFFFFFFFFFFFFFFLLU * sched_mult) >> sched_shift;
111 do_div(days, NSEC_PER_SEC);
112 do_div(days, (3600*24));
113
114 pr_info("sched_clock: using %d bits @ %lu Hz wrap in %lu days\n",
115 (64 - sched_shift), rate, (unsigned long) days);
116
117 /*
118 * Program a timer to kick us at half 32bit wraparound
119 * Formula: seconds per wrap = (2^32) / f
120 */
121 v = 0xFFFFFFFFUL / rate;
122 /* We want half of the wrap time to keep cnt32_to_63 warm */
123 v /= 2;
124 pr_debug("sched_clock: prescaled timer rate: %lu Hz, "
125 "initialize keepwarm timer every %d seconds\n", rate, v);
126 /* Convert seconds to jiffies */
127 delta = msecs_to_jiffies(v*1000);
128 setup_timer(&cnt32_to_63_keepwarm_timer, cnt32_to_63_keepwarm, delta);
129 mod_timer(&cnt32_to_63_keepwarm_timer, round_jiffies(jiffies + delta));
61} 130}
62 131
63/* Clockevent device: use one-shot mode */ 132/* Clockevent device: use one-shot mode */
@@ -161,13 +230,15 @@ void __init nmdk_timer_init(void)
161 writel(0, mtu_base + MTU_BGLR(0)); 230 writel(0, mtu_base + MTU_BGLR(0));
162 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0)); 231 writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));
163 232
164 /* Now the scheduling clock is ready */ 233 /* Now the clock source is ready */
165 nmdk_clksrc.read = nmdk_read_timer; 234 nmdk_clksrc.read = nmdk_read_timer;
166 235
167 if (clocksource_register(&nmdk_clksrc)) 236 if (clocksource_register(&nmdk_clksrc))
168 pr_err("timer: failed to initialize clock source %s\n", 237 pr_err("timer: failed to initialize clock source %s\n",
169 nmdk_clksrc.name); 238 nmdk_clksrc.name);
170 239
240 nmdk_sched_clock_init(rate);
241
171 /* Timer 1 is used for events */ 242 /* Timer 1 is used for events */
172 243
173 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE); 244 clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);
diff --git a/arch/arm/plat-pxa/include/plat/sdhci.h b/arch/arm/plat-pxa/include/plat/sdhci.h
index e49c5b6fc4e2..1ab332e37d7d 100644
--- a/arch/arm/plat-pxa/include/plat/sdhci.h
+++ b/arch/arm/plat-pxa/include/plat/sdhci.h
@@ -17,6 +17,9 @@
17/* Require clock free running */ 17/* Require clock free running */
18#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0) 18#define PXA_FLAG_DISABLE_CLOCK_GATING (1<<0)
19 19
20/* Board design supports 8-bit data on SD/SDIO BUS */
21#define PXA_FLAG_SD_8_BIT_CAPABLE_SLOT (1<<2)
22
20/* 23/*
21 * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI 24 * struct pxa_sdhci_platdata() - Platform device data for PXA SDHCI
22 * @max_speed: the maximum speed supported 25 * @max_speed: the maximum speed supported
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/plat-s3c24xx/cpu.c
index 76d0858c3cbb..4a10c0f684b2 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/plat-s3c24xx/cpu.c
@@ -88,7 +88,7 @@ static struct cpu_table cpu_ids[] __initdata = {
88 { 88 {
89 .idcode = 0x32440000, 89 .idcode = 0x32440000,
90 .idmask = 0xffffffff, 90 .idmask = 0xffffffff,
91 .map_io = s3c244x_map_io, 91 .map_io = s3c2440_map_io,
92 .init_clocks = s3c244x_init_clocks, 92 .init_clocks = s3c244x_init_clocks,
93 .init_uarts = s3c244x_init_uarts, 93 .init_uarts = s3c244x_init_uarts,
94 .init = s3c2440_init, 94 .init = s3c2440_init,
@@ -97,7 +97,7 @@ static struct cpu_table cpu_ids[] __initdata = {
97 { 97 {
98 .idcode = 0x32440001, 98 .idcode = 0x32440001,
99 .idmask = 0xffffffff, 99 .idmask = 0xffffffff,
100 .map_io = s3c244x_map_io, 100 .map_io = s3c2440_map_io,
101 .init_clocks = s3c244x_init_clocks, 101 .init_clocks = s3c244x_init_clocks,
102 .init_uarts = s3c244x_init_uarts, 102 .init_uarts = s3c244x_init_uarts,
103 .init = s3c2440_init, 103 .init = s3c2440_init,
@@ -106,7 +106,7 @@ static struct cpu_table cpu_ids[] __initdata = {
106 { 106 {
107 .idcode = 0x32440aaa, 107 .idcode = 0x32440aaa,
108 .idmask = 0xffffffff, 108 .idmask = 0xffffffff,
109 .map_io = s3c244x_map_io, 109 .map_io = s3c2442_map_io,
110 .init_clocks = s3c244x_init_clocks, 110 .init_clocks = s3c244x_init_clocks,
111 .init_uarts = s3c244x_init_uarts, 111 .init_uarts = s3c244x_init_uarts,
112 .init = s3c2442_init, 112 .init = s3c2442_init,
@@ -115,7 +115,7 @@ static struct cpu_table cpu_ids[] __initdata = {
115 { 115 {
116 .idcode = 0x32440aab, 116 .idcode = 0x32440aab,
117 .idmask = 0xffffffff, 117 .idmask = 0xffffffff,
118 .map_io = s3c244x_map_io, 118 .map_io = s3c2442_map_io,
119 .init_clocks = s3c244x_init_clocks, 119 .init_clocks = s3c244x_init_clocks,
120 .init_uarts = s3c244x_init_uarts, 120 .init_uarts = s3c244x_init_uarts,
121 .init = s3c2442_init, 121 .init = s3c2442_init,
diff --git a/arch/arm/plat-s3c24xx/gpiolib.c b/arch/arm/plat-s3c24xx/gpiolib.c
index 24c6f5a30596..243b6411050d 100644
--- a/arch/arm/plat-s3c24xx/gpiolib.c
+++ b/arch/arm/plat-s3c24xx/gpiolib.c
@@ -82,8 +82,6 @@ static struct s3c_gpio_cfg s3c24xx_gpiocfg_banka = {
82struct s3c_gpio_cfg s3c24xx_gpiocfg_default = { 82struct s3c_gpio_cfg s3c24xx_gpiocfg_default = {
83 .set_config = s3c_gpio_setcfg_s3c24xx, 83 .set_config = s3c_gpio_setcfg_s3c24xx,
84 .get_config = s3c_gpio_getcfg_s3c24xx, 84 .get_config = s3c_gpio_getcfg_s3c24xx,
85 .set_pull = s3c_gpio_setpull_1up,
86 .get_pull = s3c_gpio_getpull_1up,
87}; 85};
88 86
89struct s3c_gpio_chip s3c24xx_gpios[] = { 87struct s3c_gpio_chip s3c24xx_gpios[] = {
diff --git a/arch/arm/plat-s3c24xx/include/plat/s3c244x.h b/arch/arm/plat-s3c24xx/include/plat/s3c244x.h
index 307248d1ccbb..89e8d0a25f87 100644
--- a/arch/arm/plat-s3c24xx/include/plat/s3c244x.h
+++ b/arch/arm/plat-s3c24xx/include/plat/s3c244x.h
@@ -21,17 +21,22 @@ extern void s3c244x_init_clocks(int xtal);
21#else 21#else
22#define s3c244x_init_clocks NULL 22#define s3c244x_init_clocks NULL
23#define s3c244x_init_uarts NULL 23#define s3c244x_init_uarts NULL
24#define s3c244x_map_io NULL
25#endif 24#endif
26 25
27#ifdef CONFIG_CPU_S3C2440 26#ifdef CONFIG_CPU_S3C2440
28extern int s3c2440_init(void); 27extern int s3c2440_init(void);
28
29extern void s3c2440_map_io(void);
29#else 30#else
30#define s3c2440_init NULL 31#define s3c2440_init NULL
32#define s3c2440_map_io NULL
31#endif 33#endif
32 34
33#ifdef CONFIG_CPU_S3C2442 35#ifdef CONFIG_CPU_S3C2442
34extern int s3c2442_init(void); 36extern int s3c2442_init(void);
37
38extern void s3c2442_map_io(void);
35#else 39#else
36#define s3c2442_init NULL 40#define s3c2442_init NULL
41#define s3c2442_map_io NULL
37#endif 42#endif
diff --git a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
index 9793544a6ace..704175b0573f 100644
--- a/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
+++ b/arch/arm/plat-s3c24xx/spi-bus0-gpe11_12_13.c
@@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus0_gpe11_12_13(struct s3c2410_spi_info *spi,
29 } else { 29 } else {
30 s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT); 30 s3c_gpio_cfgpin(S3C2410_GPE(13), S3C2410_GPIO_INPUT);
31 s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT); 31 s3c_gpio_cfgpin(S3C2410_GPE(11), S3C2410_GPIO_INPUT);
32 s3c_gpio_cfgpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE); 32 s3c_gpio_setpull(S3C2410_GPE(11), S3C_GPIO_PULL_NONE);
33 s3c_gpio_cfgpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE); 33 s3c_gpio_setpull(S3C2410_GPE(12), S3C_GPIO_PULL_NONE);
34 s3c_gpio_cfgpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE); 34 s3c_gpio_setpull(S3C2410_GPE(13), S3C_GPIO_PULL_NONE);
35 } 35 }
36} 36}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
index db9e9e477ec1..72457afd6255 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpd8_9_10.c
@@ -31,8 +31,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpd8_9_10(struct s3c2410_spi_info *spi,
31 } else { 31 } else {
32 s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT); 32 s3c_gpio_cfgpin(S3C2410_GPD(8), S3C2410_GPIO_INPUT);
33 s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT); 33 s3c_gpio_cfgpin(S3C2410_GPD(9), S3C2410_GPIO_INPUT);
34 s3c_gpio_cfgpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE); 34 s3c_gpio_setpull(S3C2410_GPD(10), S3C_GPIO_PULL_NONE);
35 s3c_gpio_cfgpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE); 35 s3c_gpio_setpull(S3C2410_GPD(9), S3C_GPIO_PULL_NONE);
36 s3c_gpio_cfgpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE); 36 s3c_gpio_setpull(S3C2410_GPD(8), S3C_GPIO_PULL_NONE);
37 } 37 }
38} 38}
diff --git a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
index 8ea663a438bb..c3972b645d13 100644
--- a/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
+++ b/arch/arm/plat-s3c24xx/spi-bus1-gpg5_6_7.c
@@ -29,8 +29,8 @@ void s3c24xx_spi_gpiocfg_bus1_gpg5_6_7(struct s3c2410_spi_info *spi,
29 } else { 29 } else {
30 s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT); 30 s3c_gpio_cfgpin(S3C2410_GPG(7), S3C2410_GPIO_INPUT);
31 s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT); 31 s3c_gpio_cfgpin(S3C2410_GPG(5), S3C2410_GPIO_INPUT);
32 s3c_gpio_cfgpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE); 32 s3c_gpio_setpull(S3C2410_GPG(5), S3C_GPIO_PULL_NONE);
33 s3c_gpio_cfgpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE); 33 s3c_gpio_setpull(S3C2410_GPG(6), S3C_GPIO_PULL_NONE);
34 s3c_gpio_cfgpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE); 34 s3c_gpio_setpull(S3C2410_GPG(7), S3C_GPIO_PULL_NONE);
35 } 35 }
36} 36}
diff --git a/arch/arm/plat-samsung/gpio-config.c b/arch/arm/plat-samsung/gpio-config.c
index b732b773b9af..0aa32f242ee4 100644
--- a/arch/arm/plat-samsung/gpio-config.c
+++ b/arch/arm/plat-samsung/gpio-config.c
@@ -280,18 +280,17 @@ s3c_gpio_pull_t s3c_gpio_getpull_updown(struct s3c_gpio_chip *chip,
280} 280}
281#endif 281#endif
282 282
283#ifdef CONFIG_S3C_GPIO_PULL_UP 283#if defined(CONFIG_S3C_GPIO_PULL_UP) || defined(CONFIG_S3C_GPIO_PULL_DOWN)
284int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip, 284static int s3c_gpio_setpull_1(struct s3c_gpio_chip *chip,
285 unsigned int off, s3c_gpio_pull_t pull) 285 unsigned int off, s3c_gpio_pull_t pull,
286 s3c_gpio_pull_t updown)
286{ 287{
287 void __iomem *reg = chip->base + 0x08; 288 void __iomem *reg = chip->base + 0x08;
288 u32 pup = __raw_readl(reg); 289 u32 pup = __raw_readl(reg);
289 290
290 pup = __raw_readl(reg); 291 if (pull == updown)
291
292 if (pup == S3C_GPIO_PULL_UP)
293 pup &= ~(1 << off); 292 pup &= ~(1 << off);
294 else if (pup == S3C_GPIO_PULL_NONE) 293 else if (pull == S3C_GPIO_PULL_NONE)
295 pup |= (1 << off); 294 pup |= (1 << off);
296 else 295 else
297 return -EINVAL; 296 return -EINVAL;
@@ -300,17 +299,45 @@ int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip,
300 return 0; 299 return 0;
301} 300}
302 301
303s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip, 302static s3c_gpio_pull_t s3c_gpio_getpull_1(struct s3c_gpio_chip *chip,
304 unsigned int off) 303 unsigned int off, s3c_gpio_pull_t updown)
305{ 304{
306 void __iomem *reg = chip->base + 0x08; 305 void __iomem *reg = chip->base + 0x08;
307 u32 pup = __raw_readl(reg); 306 u32 pup = __raw_readl(reg);
308 307
309 pup &= (1 << off); 308 pup &= (1 << off);
310 return pup ? S3C_GPIO_PULL_NONE : S3C_GPIO_PULL_UP; 309 return pup ? S3C_GPIO_PULL_NONE : updown;
310}
311#endif /* CONFIG_S3C_GPIO_PULL_UP || CONFIG_S3C_GPIO_PULL_DOWN */
312
313#ifdef CONFIG_S3C_GPIO_PULL_UP
314s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
315 unsigned int off)
316{
317 return s3c_gpio_getpull_1(chip, off, S3C_GPIO_PULL_UP);
318}
319
320int s3c_gpio_setpull_1up(struct s3c_gpio_chip *chip,
321 unsigned int off, s3c_gpio_pull_t pull)
322{
323 return s3c_gpio_setpull_1(chip, off, pull, S3C_GPIO_PULL_UP);
311} 324}
312#endif /* CONFIG_S3C_GPIO_PULL_UP */ 325#endif /* CONFIG_S3C_GPIO_PULL_UP */
313 326
327#ifdef CONFIG_S3C_GPIO_PULL_DOWN
328s3c_gpio_pull_t s3c_gpio_getpull_1down(struct s3c_gpio_chip *chip,
329 unsigned int off)
330{
331 return s3c_gpio_getpull_1(chip, off, S3C_GPIO_PULL_DOWN);
332}
333
334int s3c_gpio_setpull_1down(struct s3c_gpio_chip *chip,
335 unsigned int off, s3c_gpio_pull_t pull)
336{
337 return s3c_gpio_setpull_1(chip, off, pull, S3C_GPIO_PULL_DOWN);
338}
339#endif /* CONFIG_S3C_GPIO_PULL_DOWN */
340
314#ifdef CONFIG_S5P_GPIO_DRVSTR 341#ifdef CONFIG_S5P_GPIO_DRVSTR
315s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin) 342s5p_gpio_drvstr_t s5p_gpio_get_drvstr(unsigned int pin)
316{ 343{
diff --git a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
index 8fd65d8b5863..0d2c5703f1ee 100644
--- a/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
+++ b/arch/arm/plat-samsung/include/plat/gpio-cfg-helpers.h
@@ -210,6 +210,17 @@ extern s3c_gpio_pull_t s3c_gpio_getpull_1up(struct s3c_gpio_chip *chip,
210 unsigned int off); 210 unsigned int off);
211 211
212/** 212/**
213 * s3c_gpio_getpull_1down() - Get configuration for choice of down or none
214 * @chip: The gpio chip that the GPIO pin belongs to
215 * @off: The offset to the pin to get the configuration of.
216 *
217 * This helper function reads the state of the pull-down resistor for the
218 * given GPIO in the same case as s3c_gpio_setpull_1down.
219*/
220extern s3c_gpio_pull_t s3c_gpio_getpull_1down(struct s3c_gpio_chip *chip,
221 unsigned int off);
222
223/**
213 * s3c_gpio_setpull_s3c2443() - Pull configuration for s3c2443. 224 * s3c_gpio_setpull_s3c2443() - Pull configuration for s3c2443.
214 * @chip: The gpio chip that is being configured. 225 * @chip: The gpio chip that is being configured.
215 * @off: The offset for the GPIO being configured. 226 * @off: The offset for the GPIO being configured.
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index d66cead97d28..9897dcfc16d6 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -206,6 +206,7 @@ ENTRY(vfp_save_state)
206 mov pc, lr 206 mov pc, lr
207ENDPROC(vfp_save_state) 207ENDPROC(vfp_save_state)
208 208
209 .align
209last_VFP_context_address: 210last_VFP_context_address:
210 .word last_VFP_context 211 .word last_VFP_context
211 212
diff --git a/arch/mn10300/include/asm/syscall.h b/arch/mn10300/include/asm/syscall.h
new file mode 100644
index 000000000000..b44b0bb75a01
--- /dev/null
+++ b/arch/mn10300/include/asm/syscall.h
@@ -0,0 +1,117 @@
1/* Access to user system call parameters and results
2 *
3 * See asm-generic/syscall.h for function descriptions.
4 *
5 * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
6 * Written by David Howells (dhowells@redhat.com)
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public Licence
10 * as published by the Free Software Foundation; either version
11 * 2 of the Licence, or (at your option) any later version.
12 */
13
14#ifndef _ASM_SYSCALL_H
15#define _ASM_SYSCALL_H
16
17#include <linux/sched.h>
18#include <linux/err.h>
19
20extern const unsigned long sys_call_table[];
21
22static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
23{
24 return regs->orig_d0;
25}
26
27static inline void syscall_rollback(struct task_struct *task,
28 struct pt_regs *regs)
29{
30 regs->d0 = regs->orig_d0;
31}
32
33static inline long syscall_get_error(struct task_struct *task,
34 struct pt_regs *regs)
35{
36 unsigned long error = regs->d0;
37 return IS_ERR_VALUE(error) ? error : 0;
38}
39
40static inline long syscall_get_return_value(struct task_struct *task,
41 struct pt_regs *regs)
42{
43 return regs->d0;
44}
45
46static inline void syscall_set_return_value(struct task_struct *task,
47 struct pt_regs *regs,
48 int error, long val)
49{
50 regs->d0 = (long) error ?: val;
51}
52
53static inline void syscall_get_arguments(struct task_struct *task,
54 struct pt_regs *regs,
55 unsigned int i, unsigned int n,
56 unsigned long *args)
57{
58 switch (i) {
59 case 0:
60 if (!n--) break;
61 *args++ = regs->a0;
62 case 1:
63 if (!n--) break;
64 *args++ = regs->d1;
65 case 2:
66 if (!n--) break;
67 *args++ = regs->a3;
68 case 3:
69 if (!n--) break;
70 *args++ = regs->a2;
71 case 4:
72 if (!n--) break;
73 *args++ = regs->d3;
74 case 5:
75 if (!n--) break;
76 *args++ = regs->d2;
77 case 6:
78 if (!n--) break;
79 default:
80 BUG();
81 break;
82 }
83}
84
85static inline void syscall_set_arguments(struct task_struct *task,
86 struct pt_regs *regs,
87 unsigned int i, unsigned int n,
88 const unsigned long *args)
89{
90 switch (i) {
91 case 0:
92 if (!n--) break;
93 regs->a0 = *args++;
94 case 1:
95 if (!n--) break;
96 regs->d1 = *args++;
97 case 2:
98 if (!n--) break;
99 regs->a3 = *args++;
100 case 3:
101 if (!n--) break;
102 regs->a2 = *args++;
103 case 4:
104 if (!n--) break;
105 regs->d3 = *args++;
106 case 5:
107 if (!n--) break;
108 regs->d2 = *args++;
109 case 6:
110 if (!n--) break;
111 default:
112 BUG();
113 break;
114 }
115}
116
117#endif /* _ASM_SYSCALL_H */
diff --git a/arch/mn10300/kernel/gdb-io-serial.c b/arch/mn10300/kernel/gdb-io-serial.c
index 0d5d63c91dc3..f28dc99c6f72 100644
--- a/arch/mn10300/kernel/gdb-io-serial.c
+++ b/arch/mn10300/kernel/gdb-io-serial.c
@@ -73,7 +73,8 @@ void gdbstub_io_init(void)
73 GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI; 73 GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
74 74
75 /* permit level 0 IRQs to take place */ 75 /* permit level 0 IRQs to take place */
76 local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1)); 76 arch_local_change_intr_mask_level(
77 NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
77} 78}
78 79
79/* 80/*
diff --git a/arch/mn10300/kernel/gdb-io-ttysm.c b/arch/mn10300/kernel/gdb-io-ttysm.c
index 97dfda23342c..abdeea153c89 100644
--- a/arch/mn10300/kernel/gdb-io-ttysm.c
+++ b/arch/mn10300/kernel/gdb-io-ttysm.c
@@ -87,7 +87,8 @@ void __init gdbstub_io_init(void)
87 tmp = *gdbstub_port->_control; 87 tmp = *gdbstub_port->_control;
88 88
89 /* permit level 0 IRQs only */ 89 /* permit level 0 IRQs only */
90 local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1)); 90 arch_local_change_intr_mask_level(
91 NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
91} 92}
92 93
93/* 94/*
diff --git a/arch/mn10300/kernel/gdb-stub.c b/arch/mn10300/kernel/gdb-stub.c
index a5fc3f05309b..b169d99d9f20 100644
--- a/arch/mn10300/kernel/gdb-stub.c
+++ b/arch/mn10300/kernel/gdb-stub.c
@@ -1194,7 +1194,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
1194 1194
1195 asm volatile("mov mdr,%0" : "=d"(mdr)); 1195 asm volatile("mov mdr,%0" : "=d"(mdr));
1196 local_save_flags(epsw); 1196 local_save_flags(epsw);
1197 local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1)); 1197 arch_local_change_intr_mask_level(
1198 NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
1198 1199
1199 gdbstub_store_fpu(); 1200 gdbstub_store_fpu();
1200 1201
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 5024f643b3b1..d7d94b845dc2 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -75,9 +75,6 @@ static void cpu_unmask_irq(unsigned int irq)
75 smp_send_all_nop(); 75 smp_send_all_nop();
76} 76}
77 77
78void no_ack_irq(unsigned int irq) { }
79void no_end_irq(unsigned int irq) { }
80
81void cpu_ack_irq(unsigned int irq) 78void cpu_ack_irq(unsigned int irq)
82{ 79{
83 unsigned long mask = EIEM_MASK(irq); 80 unsigned long mask = EIEM_MASK(irq);
@@ -241,7 +238,7 @@ int cpu_claim_irq(unsigned int irq, struct irq_chip *type, void *data)
241 238
242 /* for iosapic interrupts */ 239 /* for iosapic interrupts */
243 if (type) { 240 if (type) {
244 set_irq_chip_and_handler(irq, type, handle_level_irq); 241 set_irq_chip_and_handler(irq, type, handle_percpu_irq);
245 set_irq_chip_data(irq, data); 242 set_irq_chip_data(irq, data);
246 cpu_unmask_irq(irq); 243 cpu_unmask_irq(irq);
247 } 244 }
@@ -392,7 +389,7 @@ static void claim_cpu_irqs(void)
392 int i; 389 int i;
393 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) { 390 for (i = CPU_IRQ_BASE; i <= CPU_IRQ_MAX; i++) {
394 set_irq_chip_and_handler(i, &cpu_interrupt_type, 391 set_irq_chip_and_handler(i, &cpu_interrupt_type,
395 handle_level_irq); 392 handle_percpu_irq);
396 } 393 }
397 394
398 set_irq_handler(TIMER_IRQ, handle_percpu_irq); 395 set_irq_handler(TIMER_IRQ, handle_percpu_irq);
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 35c827e94e31..609a331878e7 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -98,7 +98,6 @@ void
98sys_rt_sigreturn(struct pt_regs *regs, int in_syscall) 98sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
99{ 99{
100 struct rt_sigframe __user *frame; 100 struct rt_sigframe __user *frame;
101 struct siginfo si;
102 sigset_t set; 101 sigset_t set;
103 unsigned long usp = (regs->gr[30] & ~(0x01UL)); 102 unsigned long usp = (regs->gr[30] & ~(0x01UL));
104 unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE; 103 unsigned long sigframe_size = PARISC_RT_SIGFRAME_SIZE;
@@ -178,13 +177,7 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
178 177
179give_sigsegv: 178give_sigsegv:
180 DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n"); 179 DBG(1,"sys_rt_sigreturn: Sending SIGSEGV\n");
181 si.si_signo = SIGSEGV; 180 force_sig(SIGSEGV, current);
182 si.si_errno = 0;
183 si.si_code = SI_KERNEL;
184 si.si_pid = task_pid_vnr(current);
185 si.si_uid = current_uid();
186 si.si_addr = &frame->uc;
187 force_sig_info(SIGSEGV, &si, current);
188 return; 181 return;
189} 182}
190 183
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 2c7e801ab20b..6a3997f98dfb 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -92,7 +92,7 @@ static void pte_free_rcu_callback(struct rcu_head *head)
92 92
93static void pte_free_submit(struct pte_freelist_batch *batch) 93static void pte_free_submit(struct pte_freelist_batch *batch)
94{ 94{
95 call_rcu(&batch->rcu, pte_free_rcu_callback); 95 call_rcu_sched(&batch->rcu, pte_free_rcu_callback);
96} 96}
97 97
98void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) 98void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift)
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index ac151399ef34..1995c1712fc8 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -95,7 +95,6 @@ EXPORT_SYMBOL_GPL(s390_handle_mcck);
95static int notrace s390_revalidate_registers(struct mci *mci) 95static int notrace s390_revalidate_registers(struct mci *mci)
96{ 96{
97 int kill_task; 97 int kill_task;
98 u64 tmpclock;
99 u64 zero; 98 u64 zero;
100 void *fpt_save_area, *fpt_creg_save_area; 99 void *fpt_save_area, *fpt_creg_save_area;
101 100
@@ -214,11 +213,10 @@ static int notrace s390_revalidate_registers(struct mci *mci)
214 : "0", "cc"); 213 : "0", "cc");
215#endif 214#endif
216 /* Revalidate clock comparator register */ 215 /* Revalidate clock comparator register */
217 asm volatile( 216 if (S390_lowcore.clock_comparator == -1)
218 " stck 0(%1)\n" 217 set_clock_comparator(S390_lowcore.mcck_clock);
219 " sckc 0(%1)" 218 else
220 : "=m" (tmpclock) : "a" (&(tmpclock)) : "cc", "memory"); 219 set_clock_comparator(S390_lowcore.clock_comparator);
221
222 /* Check if old PSW is valid */ 220 /* Check if old PSW is valid */
223 if (!mci->wp) 221 if (!mci->wp)
224 /* 222 /*
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 56c8687b29b3..7eff9b7347c0 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -19,6 +19,7 @@
19#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
20#include <linux/rcupdate.h> 20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h> 21#include <linux/posix-timers.h>
22#include <linux/cpu.h>
22 23
23#include <asm/s390_ext.h> 24#include <asm/s390_ext.h>
24#include <asm/timer.h> 25#include <asm/timer.h>
@@ -566,6 +567,23 @@ void init_cpu_vtimer(void)
566 __ctl_set_bit(0,10); 567 __ctl_set_bit(0,10);
567} 568}
568 569
570static int __cpuinit s390_nohz_notify(struct notifier_block *self,
571 unsigned long action, void *hcpu)
572{
573 struct s390_idle_data *idle;
574 long cpu = (long) hcpu;
575
576 idle = &per_cpu(s390_idle, cpu);
577 switch (action) {
578 case CPU_DYING:
579 case CPU_DYING_FROZEN:
580 idle->nohz_delay = 0;
581 default:
582 break;
583 }
584 return NOTIFY_OK;
585}
586
569void __init vtime_init(void) 587void __init vtime_init(void)
570{ 588{
571 /* request the cpu timer external interrupt */ 589 /* request the cpu timer external interrupt */
@@ -574,5 +592,6 @@ void __init vtime_init(void)
574 592
575 /* Enable cpu timer interrupts on the boot cpu. */ 593 /* Enable cpu timer interrupts on the boot cpu. */
576 init_cpu_vtimer(); 594 init_cpu_vtimer();
595 cpu_notifier(s390_nohz_notify, 0);
577} 596}
578 597
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 752b362bf651..7c37ec359ec2 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -29,17 +29,21 @@ static void __udelay_disabled(unsigned long long usecs)
29{ 29{
30 unsigned long mask, cr0, cr0_saved; 30 unsigned long mask, cr0, cr0_saved;
31 u64 clock_saved; 31 u64 clock_saved;
32 u64 end;
32 33
34 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
35 end = get_clock() + (usecs << 12);
33 clock_saved = local_tick_disable(); 36 clock_saved = local_tick_disable();
34 set_clock_comparator(get_clock() + (usecs << 12));
35 __ctl_store(cr0_saved, 0, 0); 37 __ctl_store(cr0_saved, 0, 0);
36 cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; 38 cr0 = (cr0_saved & 0xffff00e0) | 0x00000800;
37 __ctl_load(cr0 , 0, 0); 39 __ctl_load(cr0 , 0, 0);
38 mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT;
39 lockdep_off(); 40 lockdep_off();
40 trace_hardirqs_on(); 41 do {
41 __load_psw_mask(mask); 42 set_clock_comparator(end);
42 local_irq_disable(); 43 trace_hardirqs_on();
44 __load_psw_mask(mask);
45 local_irq_disable();
46 } while (get_clock() < end);
43 lockdep_on(); 47 lockdep_on();
44 __ctl_load(cr0_saved, 0, 0); 48 __ctl_load(cr0_saved, 0, 0);
45 local_tick_enable(clock_saved); 49 local_tick_enable(clock_saved);
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 2eaeb9e59585..f48c492a68d3 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -720,32 +720,6 @@ static struct platform_device camera_devices[] = {
720}; 720};
721 721
722/* FSI */ 722/* FSI */
723/*
724 * FSI-B use external clock which came from da7210.
725 * So, we should change parent of fsi
726 */
727#define FCLKBCR 0xa415000c
728static void fsimck_init(struct clk *clk)
729{
730 u32 status = __raw_readl(clk->enable_reg);
731
732 /* use external clock */
733 status &= ~0x000000ff;
734 status |= 0x00000080;
735
736 __raw_writel(status, clk->enable_reg);
737}
738
739static struct clk_ops fsimck_clk_ops = {
740 .init = fsimck_init,
741};
742
743static struct clk fsimckb_clk = {
744 .ops = &fsimck_clk_ops,
745 .enable_reg = (void __iomem *)FCLKBCR,
746 .rate = 0, /* unknown */
747};
748
749static struct sh_fsi_platform_info fsi_info = { 723static struct sh_fsi_platform_info fsi_info = {
750 .portb_flags = SH_FSI_BRS_INV | 724 .portb_flags = SH_FSI_BRS_INV |
751 SH_FSI_OUT_SLAVE_MODE | 725 SH_FSI_OUT_SLAVE_MODE |
@@ -1264,10 +1238,10 @@ static int __init arch_setup(void)
1264 /* change parent of FSI B */ 1238 /* change parent of FSI B */
1265 clk = clk_get(NULL, "fsib_clk"); 1239 clk = clk_get(NULL, "fsib_clk");
1266 if (!IS_ERR(clk)) { 1240 if (!IS_ERR(clk)) {
1267 clk_register(&fsimckb_clk); 1241 /* 48kHz dummy clock was used to make sure 1/1 divide */
1268 clk_set_parent(clk, &fsimckb_clk); 1242 clk_set_rate(&sh7724_fsimckb_clk, 48000);
1269 clk_set_rate(clk, 11000); 1243 clk_set_parent(clk, &sh7724_fsimckb_clk);
1270 clk_set_rate(&fsimckb_clk, 11000); 1244 clk_set_rate(clk, 48000);
1271 clk_put(clk); 1245 clk_put(clk);
1272 } 1246 }
1273 1247
diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c
index c31d228fdfc6..527a0cd956b5 100644
--- a/arch/sh/boards/mach-se/7724/setup.c
+++ b/arch/sh/boards/mach-se/7724/setup.c
@@ -283,31 +283,6 @@ static struct platform_device ceu1_device = {
283}; 283};
284 284
285/* FSI */ 285/* FSI */
286/*
287 * FSI-A use external clock which came from ak464x.
288 * So, we should change parent of fsi
289 */
290#define FCLKACR 0xa4150008
291static void fsimck_init(struct clk *clk)
292{
293 u32 status = __raw_readl(clk->enable_reg);
294
295 /* use external clock */
296 status &= ~0x000000ff;
297 status |= 0x00000080;
298 __raw_writel(status, clk->enable_reg);
299}
300
301static struct clk_ops fsimck_clk_ops = {
302 .init = fsimck_init,
303};
304
305static struct clk fsimcka_clk = {
306 .ops = &fsimck_clk_ops,
307 .enable_reg = (void __iomem *)FCLKACR,
308 .rate = 0, /* unknown */
309};
310
311/* change J20, J21, J22 pin to 1-2 connection to use slave mode */ 286/* change J20, J21, J22 pin to 1-2 connection to use slave mode */
312static struct sh_fsi_platform_info fsi_info = { 287static struct sh_fsi_platform_info fsi_info = {
313 .porta_flags = SH_FSI_BRS_INV | 288 .porta_flags = SH_FSI_BRS_INV |
@@ -852,37 +827,29 @@ static int __init devices_setup(void)
852 gpio_request(GPIO_FN_KEYOUT0, NULL); 827 gpio_request(GPIO_FN_KEYOUT0, NULL);
853 828
854 /* enable FSI */ 829 /* enable FSI */
855 gpio_request(GPIO_FN_FSIMCKB, NULL);
856 gpio_request(GPIO_FN_FSIMCKA, NULL); 830 gpio_request(GPIO_FN_FSIMCKA, NULL);
831 gpio_request(GPIO_FN_FSIIASD, NULL);
857 gpio_request(GPIO_FN_FSIOASD, NULL); 832 gpio_request(GPIO_FN_FSIOASD, NULL);
858 gpio_request(GPIO_FN_FSIIABCK, NULL); 833 gpio_request(GPIO_FN_FSIIABCK, NULL);
859 gpio_request(GPIO_FN_FSIIALRCK, NULL); 834 gpio_request(GPIO_FN_FSIIALRCK, NULL);
860 gpio_request(GPIO_FN_FSIOABCK, NULL); 835 gpio_request(GPIO_FN_FSIOABCK, NULL);
861 gpio_request(GPIO_FN_FSIOALRCK, NULL); 836 gpio_request(GPIO_FN_FSIOALRCK, NULL);
862 gpio_request(GPIO_FN_CLKAUDIOAO, NULL); 837 gpio_request(GPIO_FN_CLKAUDIOAO, NULL);
863 gpio_request(GPIO_FN_FSIIBSD, NULL);
864 gpio_request(GPIO_FN_FSIOBSD, NULL);
865 gpio_request(GPIO_FN_FSIIBBCK, NULL);
866 gpio_request(GPIO_FN_FSIIBLRCK, NULL);
867 gpio_request(GPIO_FN_FSIOBBCK, NULL);
868 gpio_request(GPIO_FN_FSIOBLRCK, NULL);
869 gpio_request(GPIO_FN_CLKAUDIOBO, NULL);
870 gpio_request(GPIO_FN_FSIIASD, NULL);
871 838
872 /* set SPU2 clock to 83.4 MHz */ 839 /* set SPU2 clock to 83.4 MHz */
873 clk = clk_get(NULL, "spu_clk"); 840 clk = clk_get(NULL, "spu_clk");
874 if (clk) { 841 if (!IS_ERR(clk)) {
875 clk_set_rate(clk, clk_round_rate(clk, 83333333)); 842 clk_set_rate(clk, clk_round_rate(clk, 83333333));
876 clk_put(clk); 843 clk_put(clk);
877 } 844 }
878 845
879 /* change parent of FSI A */ 846 /* change parent of FSI A */
880 clk = clk_get(NULL, "fsia_clk"); 847 clk = clk_get(NULL, "fsia_clk");
881 if (clk) { 848 if (!IS_ERR(clk)) {
882 clk_register(&fsimcka_clk); 849 /* 48kHz dummy clock was used to make sure 1/1 divide */
883 clk_set_parent(clk, &fsimcka_clk); 850 clk_set_rate(&sh7724_fsimcka_clk, 48000);
884 clk_set_rate(clk, 11000); 851 clk_set_parent(clk, &sh7724_fsimcka_clk);
885 clk_set_rate(&fsimcka_clk, 11000); 852 clk_set_rate(clk, 48000);
886 clk_put(clk); 853 clk_put(clk);
887 } 854 }
888 855
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h
index 1f4e562c5e8c..82e1eabeac98 100644
--- a/arch/sh/include/asm/cacheflush.h
+++ b/arch/sh/include/asm/cacheflush.h
@@ -96,7 +96,7 @@ void kmap_coherent_init(void);
96void *kmap_coherent(struct page *page, unsigned long addr); 96void *kmap_coherent(struct page *page, unsigned long addr);
97void kunmap_coherent(void *kvaddr); 97void kunmap_coherent(void *kvaddr);
98 98
99#define PG_dcache_dirty PG_arch_1 99#define PG_dcache_clean PG_arch_1
100 100
101void cpu_cache_init(void); 101void cpu_cache_init(void);
102 102
diff --git a/arch/sh/include/asm/processor_32.h b/arch/sh/include/asm/processor_32.h
index 46d5179c9f49..e3c73cdd8c90 100644
--- a/arch/sh/include/asm/processor_32.h
+++ b/arch/sh/include/asm/processor_32.h
@@ -199,10 +199,13 @@ extern unsigned long get_wchan(struct task_struct *p);
199#define ARCH_HAS_PREFETCHW 199#define ARCH_HAS_PREFETCHW
200static inline void prefetch(void *x) 200static inline void prefetch(void *x)
201{ 201{
202 __asm__ __volatile__ ("pref @%0\n\t" : : "r" (x) : "memory"); 202 __builtin_prefetch(x, 0, 3);
203} 203}
204 204
205#define prefetchw(x) prefetch(x) 205static inline void prefetchw(void *x)
206{
207 __builtin_prefetch(x, 1, 3);
208}
206#endif 209#endif
207 210
208#endif /* __KERNEL__ */ 211#endif /* __KERNEL__ */
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 4c27b68789b3..7eb435999426 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -303,4 +303,7 @@ enum {
303 SHDMA_SLAVE_SDHI1_RX, 303 SHDMA_SLAVE_SDHI1_RX,
304}; 304};
305 305
306extern struct clk sh7724_fsimcka_clk;
307extern struct clk sh7724_fsimckb_clk;
308
306#endif /* __ASM_SH7724_H__ */ 309#endif /* __ASM_SH7724_H__ */
diff --git a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
index 4eabc68cd753..b601fa3978d1 100644
--- a/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
+++ b/arch/sh/kernel/cpu/sh4/clock-sh4-202.c
@@ -110,7 +110,7 @@ static int shoc_clk_verify_rate(struct clk *clk, unsigned long rate)
110 return 0; 110 return 0;
111} 111}
112 112
113static int shoc_clk_set_rate(struct clk *clk, unsigned long rate, int algo_id) 113static int shoc_clk_set_rate(struct clk *clk, unsigned long rate)
114{ 114{
115 unsigned long frqcr3; 115 unsigned long frqcr3;
116 unsigned int tmp; 116 unsigned int tmp;
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 0fe2e9329cb2..271c0b325a9a 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -111,12 +111,21 @@ static struct clk div3_clk = {
111 .parent = &pll_clk, 111 .parent = &pll_clk,
112}; 112};
113 113
114/* External input clock (pin name: FSIMCKA/FSIMCKB ) */
115struct clk sh7724_fsimcka_clk = {
116};
117
118struct clk sh7724_fsimckb_clk = {
119};
120
114static struct clk *main_clks[] = { 121static struct clk *main_clks[] = {
115 &r_clk, 122 &r_clk,
116 &extal_clk, 123 &extal_clk,
117 &fll_clk, 124 &fll_clk,
118 &pll_clk, 125 &pll_clk,
119 &div3_clk, 126 &div3_clk,
127 &sh7724_fsimcka_clk,
128 &sh7724_fsimckb_clk,
120}; 129};
121 130
122static void div4_kick(struct clk *clk) 131static void div4_kick(struct clk *clk)
@@ -154,16 +163,38 @@ struct clk div4_clks[DIV4_NR] = {
154 [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT), 163 [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT),
155}; 164};
156 165
157enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR }; 166enum { DIV6_V, DIV6_I, DIV6_S, DIV6_NR };
158 167
159static struct clk div6_clks[DIV6_NR] = { 168static struct clk div6_clks[DIV6_NR] = {
160 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0), 169 [DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
161 [DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
162 [DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
163 [DIV6_I] = SH_CLK_DIV6(&div3_clk, IRDACLKCR, 0), 170 [DIV6_I] = SH_CLK_DIV6(&div3_clk, IRDACLKCR, 0),
164 [DIV6_S] = SH_CLK_DIV6(&div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT), 171 [DIV6_S] = SH_CLK_DIV6(&div3_clk, SPUCLKCR, CLK_ENABLE_ON_INIT),
165}; 172};
166 173
174enum { DIV6_FA, DIV6_FB, DIV6_REPARENT_NR };
175
176/* Indices are important - they are the actual src selecting values */
177static struct clk *fclkacr_parent[] = {
178 [0] = &div3_clk,
179 [1] = NULL,
180 [2] = &sh7724_fsimcka_clk,
181 [3] = NULL,
182};
183
184static struct clk *fclkbcr_parent[] = {
185 [0] = &div3_clk,
186 [1] = NULL,
187 [2] = &sh7724_fsimckb_clk,
188 [3] = NULL,
189};
190
191static struct clk div6_reparent_clks[DIV6_REPARENT_NR] = {
192 [DIV6_FA] = SH_CLK_DIV6_EXT(&div3_clk, FCLKACR, 0,
193 fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2),
194 [DIV6_FB] = SH_CLK_DIV6_EXT(&div3_clk, FCLKBCR, 0,
195 fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2),
196};
197
167static struct clk mstp_clks[HWBLK_NR] = { 198static struct clk mstp_clks[HWBLK_NR] = {
168 SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), 199 SH_HWBLK_CLK(HWBLK_TLB, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
169 SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT), 200 SH_HWBLK_CLK(HWBLK_IC, &div4_clks[DIV4_I], CLK_ENABLE_ON_INIT),
@@ -240,8 +271,8 @@ static struct clk_lookup lookups[] = {
240 271
241 /* DIV6 clocks */ 272 /* DIV6 clocks */
242 CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), 273 CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]),
243 CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]), 274 CLKDEV_CON_ID("fsia_clk", &div6_reparent_clks[DIV6_FA]),
244 CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]), 275 CLKDEV_CON_ID("fsib_clk", &div6_reparent_clks[DIV6_FB]),
245 CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]), 276 CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]),
246 CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]), 277 CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]),
247 278
@@ -376,6 +407,9 @@ int __init arch_clk_init(void)
376 ret = sh_clk_div6_register(div6_clks, DIV6_NR); 407 ret = sh_clk_div6_register(div6_clks, DIV6_NR);
377 408
378 if (!ret) 409 if (!ret)
410 ret = sh_clk_div6_reparent_register(div6_reparent_clks, DIV6_REPARENT_NR);
411
412 if (!ret)
379 ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR); 413 ret = sh_hwblk_clk_register(mstp_clks, HWBLK_NR);
380 414
381 return ret; 415 return ret;
diff --git a/arch/sh/kernel/sys_sh.c b/arch/sh/kernel/sys_sh.c
index 81f58371613d..8c6a350df751 100644
--- a/arch/sh/kernel/sys_sh.c
+++ b/arch/sh/kernel/sys_sh.c
@@ -88,7 +88,7 @@ asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len, int op)
88 } 88 }
89 89
90 if (op & CACHEFLUSH_I) 90 if (op & CACHEFLUSH_I)
91 flush_cache_all(); 91 flush_icache_range(addr, addr+len);
92 92
93 up_read(&current->mm->mmap_sem); 93 up_read(&current->mm->mmap_sem);
94 return 0; 94 return 0;
diff --git a/arch/sh/kernel/vsyscall/vsyscall-trapa.S b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
index 3b6eb34c43fa..3e70f851cdc6 100644
--- a/arch/sh/kernel/vsyscall/vsyscall-trapa.S
+++ b/arch/sh/kernel/vsyscall/vsyscall-trapa.S
@@ -8,9 +8,9 @@ __kernel_vsyscall:
8 * fill out .eh_frame -- PFM. */ 8 * fill out .eh_frame -- PFM. */
9.LEND_vsyscall: 9.LEND_vsyscall:
10 .size __kernel_vsyscall,.-.LSTART_vsyscall 10 .size __kernel_vsyscall,.-.LSTART_vsyscall
11 .previous
12 11
13 .section .eh_frame,"a",@progbits 12 .section .eh_frame,"a",@progbits
13 .previous
14.LCIE: 14.LCIE:
15 .ualong .LCIE_end - .LCIE_start 15 .ualong .LCIE_end - .LCIE_start
16.LCIE_start: 16.LCIE_start:
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 2cfae81914aa..92eb98633ab0 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -114,7 +114,7 @@ static void sh4_flush_dcache_page(void *arg)
114 struct address_space *mapping = page_mapping(page); 114 struct address_space *mapping = page_mapping(page);
115 115
116 if (mapping && !mapping_mapped(mapping)) 116 if (mapping && !mapping_mapped(mapping))
117 set_bit(PG_dcache_dirty, &page->flags); 117 clear_bit(PG_dcache_clean, &page->flags);
118 else 118 else
119#endif 119#endif
120 flush_cache_one(CACHE_OC_ADDRESS_ARRAY | 120 flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
@@ -239,7 +239,7 @@ static void sh4_flush_cache_page(void *args)
239 * another ASID than the current one. 239 * another ASID than the current one.
240 */ 240 */
241 map_coherent = (current_cpu_data.dcache.n_aliases && 241 map_coherent = (current_cpu_data.dcache.n_aliases &&
242 !test_bit(PG_dcache_dirty, &page->flags) && 242 test_bit(PG_dcache_clean, &page->flags) &&
243 page_mapped(page)); 243 page_mapped(page));
244 if (map_coherent) 244 if (map_coherent)
245 vaddr = kmap_coherent(page, address); 245 vaddr = kmap_coherent(page, address);
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index f498da1cce7a..7729cca727eb 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -139,7 +139,7 @@ static void sh7705_flush_dcache_page(void *arg)
139 struct address_space *mapping = page_mapping(page); 139 struct address_space *mapping = page_mapping(page);
140 140
141 if (mapping && !mapping_mapped(mapping)) 141 if (mapping && !mapping_mapped(mapping))
142 set_bit(PG_dcache_dirty, &page->flags); 142 clear_bit(PG_dcache_clean, &page->flags);
143 else 143 else
144 __flush_dcache_page(__pa(page_address(page))); 144 __flush_dcache_page(__pa(page_address(page)));
145} 145}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index ba401d137bb9..88d3dc3d30d5 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -60,14 +60,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
60 unsigned long len) 60 unsigned long len)
61{ 61{
62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && 62 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
63 !test_bit(PG_dcache_dirty, &page->flags)) { 63 test_bit(PG_dcache_clean, &page->flags)) {
64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 64 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
65 memcpy(vto, src, len); 65 memcpy(vto, src, len);
66 kunmap_coherent(vto); 66 kunmap_coherent(vto);
67 } else { 67 } else {
68 memcpy(dst, src, len); 68 memcpy(dst, src, len);
69 if (boot_cpu_data.dcache.n_aliases) 69 if (boot_cpu_data.dcache.n_aliases)
70 set_bit(PG_dcache_dirty, &page->flags); 70 clear_bit(PG_dcache_clean, &page->flags);
71 } 71 }
72 72
73 if (vma->vm_flags & VM_EXEC) 73 if (vma->vm_flags & VM_EXEC)
@@ -79,14 +79,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
79 unsigned long len) 79 unsigned long len)
80{ 80{
81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && 81 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
82 !test_bit(PG_dcache_dirty, &page->flags)) { 82 test_bit(PG_dcache_clean, &page->flags)) {
83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 83 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 memcpy(dst, vfrom, len); 84 memcpy(dst, vfrom, len);
85 kunmap_coherent(vfrom); 85 kunmap_coherent(vfrom);
86 } else { 86 } else {
87 memcpy(dst, src, len); 87 memcpy(dst, src, len);
88 if (boot_cpu_data.dcache.n_aliases) 88 if (boot_cpu_data.dcache.n_aliases)
89 set_bit(PG_dcache_dirty, &page->flags); 89 clear_bit(PG_dcache_clean, &page->flags);
90 } 90 }
91} 91}
92 92
@@ -98,7 +98,7 @@ void copy_user_highpage(struct page *to, struct page *from,
98 vto = kmap_atomic(to, KM_USER1); 98 vto = kmap_atomic(to, KM_USER1);
99 99
100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && 100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101 !test_bit(PG_dcache_dirty, &from->flags)) { 101 test_bit(PG_dcache_clean, &from->flags)) {
102 vfrom = kmap_coherent(from, vaddr); 102 vfrom = kmap_coherent(from, vaddr);
103 copy_page(vto, vfrom); 103 copy_page(vto, vfrom);
104 kunmap_coherent(vfrom); 104 kunmap_coherent(vfrom);
@@ -141,7 +141,7 @@ void __update_cache(struct vm_area_struct *vma,
141 141
142 page = pfn_to_page(pfn); 142 page = pfn_to_page(pfn);
143 if (pfn_valid(pfn)) { 143 if (pfn_valid(pfn)) {
144 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); 144 int dirty = !test_and_set_bit(PG_dcache_clean, &page->flags);
145 if (dirty) 145 if (dirty)
146 __flush_purge_region(page_address(page), PAGE_SIZE); 146 __flush_purge_region(page_address(page), PAGE_SIZE);
147 } 147 }
@@ -153,7 +153,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
153 153
154 if (pages_do_alias(addr, vmaddr)) { 154 if (pages_do_alias(addr, vmaddr)) {
155 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && 155 if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
156 !test_bit(PG_dcache_dirty, &page->flags)) { 156 test_bit(PG_dcache_clean, &page->flags)) {
157 void *kaddr; 157 void *kaddr;
158 158
159 kaddr = kmap_coherent(page, vmaddr); 159 kaddr = kmap_coherent(page, vmaddr);
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 15d74ea42094..ec29e14ec5a8 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -34,7 +34,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
34 enum fixed_addresses idx; 34 enum fixed_addresses idx;
35 unsigned long vaddr; 35 unsigned long vaddr;
36 36
37 BUG_ON(test_bit(PG_dcache_dirty, &page->flags)); 37 BUG_ON(!test_bit(PG_dcache_clean, &page->flags));
38 38
39 pagefault_disable(); 39 pagefault_disable();
40 40
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 07ec8a865c1d..e11b5fcb70eb 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -329,6 +329,18 @@ endmenu # Tilera-specific configuration
329 329
330menu "Bus options" 330menu "Bus options"
331 331
332config PCI
333 bool "PCI support"
334 default y
335 select PCI_DOMAINS
336 ---help---
337 Enable PCI root complex support, so PCIe endpoint devices can
338 be attached to the Tile chip. Many, but not all, PCI devices
339 are supported under Tilera's root complex driver.
340
341config PCI_DOMAINS
342 bool
343
332config NO_IOMEM 344config NO_IOMEM
333 def_bool !PCI 345 def_bool !PCI
334 346
diff --git a/arch/tile/include/asm/cacheflush.h b/arch/tile/include/asm/cacheflush.h
index c5741da4eeac..14a3f8556ace 100644
--- a/arch/tile/include/asm/cacheflush.h
+++ b/arch/tile/include/asm/cacheflush.h
@@ -137,4 +137,56 @@ static inline void finv_buffer(void *buffer, size_t size)
137 mb_incoherent(); 137 mb_incoherent();
138} 138}
139 139
140/*
141 * Flush & invalidate a VA range that is homed remotely on a single core,
142 * waiting until the memory controller holds the flushed values.
143 */
144static inline void finv_buffer_remote(void *buffer, size_t size)
145{
146 char *p;
147 int i;
148
149 /*
150 * Flush and invalidate the buffer out of the local L1/L2
151 * and request the home cache to flush and invalidate as well.
152 */
153 __finv_buffer(buffer, size);
154
155 /*
156 * Wait for the home cache to acknowledge that it has processed
157 * all the flush-and-invalidate requests. This does not mean
158 * that the flushed data has reached the memory controller yet,
159 * but it does mean the home cache is processing the flushes.
160 */
161 __insn_mf();
162
163 /*
164 * Issue a load to the last cache line, which can't complete
165 * until all the previously-issued flushes to the same memory
166 * controller have also completed. If we weren't striping
167 * memory, that one load would be sufficient, but since we may
168 * be, we also need to back up to the last load issued to
169 * another memory controller, which would be the point where
170 * we crossed an 8KB boundary (the granularity of striping
171 * across memory controllers). Keep backing up and doing this
172 * until we are before the beginning of the buffer, or have
173 * hit all the controllers.
174 */
175 for (i = 0, p = (char *)buffer + size - 1;
176 i < (1 << CHIP_LOG_NUM_MSHIMS()) && p >= (char *)buffer;
177 ++i) {
178 const unsigned long STRIPE_WIDTH = 8192;
179
180 /* Force a load instruction to issue. */
181 *(volatile char *)p;
182
183 /* Jump to end of previous stripe. */
184 p -= STRIPE_WIDTH;
185 p = (char *)((unsigned long)p | (STRIPE_WIDTH - 1));
186 }
187
188 /* Wait for the loads (and thus flushes) to have completed. */
189 __insn_mf();
190}
191
140#endif /* _ASM_TILE_CACHEFLUSH_H */ 192#endif /* _ASM_TILE_CACHEFLUSH_H */
diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h
index ee43328713ab..d3cbb9b14cbe 100644
--- a/arch/tile/include/asm/io.h
+++ b/arch/tile/include/asm/io.h
@@ -55,9 +55,6 @@ extern void iounmap(volatile void __iomem *addr);
55#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size) 55#define ioremap_writethrough(physaddr, size) ioremap(physaddr, size)
56#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) 56#define ioremap_fullcache(physaddr, size) ioremap(physaddr, size)
57 57
58void __iomem *ioport_map(unsigned long port, unsigned int len);
59extern inline void ioport_unmap(void __iomem *addr) {}
60
61#define mmiowb() 58#define mmiowb()
62 59
63/* Conversion between virtual and physical mappings. */ 60/* Conversion between virtual and physical mappings. */
@@ -189,12 +186,22 @@ static inline void memcpy_toio(volatile void __iomem *dst, const void *src,
189 * we never run, uses them unconditionally. 186 * we never run, uses them unconditionally.
190 */ 187 */
191 188
192static inline int ioport_panic(void) 189static inline long ioport_panic(void)
193{ 190{
194 panic("inb/outb and friends do not exist on tile"); 191 panic("inb/outb and friends do not exist on tile");
195 return 0; 192 return 0;
196} 193}
197 194
195static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
196{
197 return (void __iomem *) ioport_panic();
198}
199
200static inline void ioport_unmap(void __iomem *addr)
201{
202 ioport_panic();
203}
204
198static inline u8 inb(unsigned long addr) 205static inline u8 inb(unsigned long addr)
199{ 206{
200 return ioport_panic(); 207 return ioport_panic();
diff --git a/arch/tile/include/asm/pci-bridge.h b/arch/tile/include/asm/pci-bridge.h
deleted file mode 100644
index e853b0e2793b..000000000000
--- a/arch/tile/include/asm/pci-bridge.h
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_PCI_BRIDGE_H
16#define _ASM_TILE_PCI_BRIDGE_H
17
18#include <linux/ioport.h>
19#include <linux/pci.h>
20
21struct device_node;
22struct pci_controller;
23
24/*
25 * pci_io_base returns the memory address at which you can access
26 * the I/O space for PCI bus number `bus' (or NULL on error).
27 */
28extern void __iomem *pci_bus_io_base(unsigned int bus);
29extern unsigned long pci_bus_io_base_phys(unsigned int bus);
30extern unsigned long pci_bus_mem_base_phys(unsigned int bus);
31
32/* Allocate a new PCI host bridge structure */
33extern struct pci_controller *pcibios_alloc_controller(void);
34
35/* Helper function for setting up resources */
36extern void pci_init_resource(struct resource *res, unsigned long start,
37 unsigned long end, int flags, char *name);
38
39/* Get the PCI host controller for a bus */
40extern struct pci_controller *pci_bus_to_hose(int bus);
41
42/*
43 * Structure of a PCI controller (host bridge)
44 */
45struct pci_controller {
46 int index; /* PCI domain number */
47 struct pci_bus *root_bus;
48
49 int first_busno;
50 int last_busno;
51
52 int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
53 int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
54
55 struct pci_ops *ops;
56
57 int irq_base; /* Base IRQ from the Hypervisor */
58 int plx_gen1; /* flag for PLX Gen 1 configuration */
59
60 /* Address ranges that are routed to this controller/bridge. */
61 struct resource mem_resources[3];
62};
63
64static inline struct pci_controller *pci_bus_to_host(struct pci_bus *bus)
65{
66 return bus->sysdata;
67}
68
69extern void setup_indirect_pci_nomap(struct pci_controller *hose,
70 void __iomem *cfg_addr, void __iomem *cfg_data);
71extern void setup_indirect_pci(struct pci_controller *hose,
72 u32 cfg_addr, u32 cfg_data);
73extern void setup_grackle(struct pci_controller *hose);
74
75extern unsigned char common_swizzle(struct pci_dev *, unsigned char *);
76
77/*
78 * The following code swizzles for exactly one bridge. The routine
79 * common_swizzle below handles multiple bridges. But there are a
80 * some boards that don't follow the PCI spec's suggestion so we
81 * break this piece out separately.
82 */
83static inline unsigned char bridge_swizzle(unsigned char pin,
84 unsigned char idsel)
85{
86 return (((pin-1) + idsel) % 4) + 1;
87}
88
89/*
90 * The following macro is used to lookup irqs in a standard table
91 * format for those PPC systems that do not already have PCI
92 * interrupts properly routed.
93 */
94/* FIXME - double check this */
95#define PCI_IRQ_TABLE_LOOKUP ({ \
96 long _ctl_ = -1; \
97 if (idsel >= min_idsel && idsel <= max_idsel && pin <= irqs_per_slot) \
98 _ctl_ = pci_irq_table[idsel - min_idsel][pin-1]; \
99 _ctl_; \
100})
101
102/*
103 * Scan the buses below a given PCI host bridge and assign suitable
104 * resources to all devices found.
105 */
106extern int pciauto_bus_scan(struct pci_controller *, int);
107
108#ifdef CONFIG_PCI
109extern unsigned long pci_address_to_pio(phys_addr_t address);
110#else
111static inline unsigned long pci_address_to_pio(phys_addr_t address)
112{
113 return (unsigned long)-1;
114}
115#endif
116
117#endif /* _ASM_TILE_PCI_BRIDGE_H */
diff --git a/arch/tile/include/asm/pci.h b/arch/tile/include/asm/pci.h
index b0c15da2d5d5..c3fc458a0d32 100644
--- a/arch/tile/include/asm/pci.h
+++ b/arch/tile/include/asm/pci.h
@@ -15,7 +15,29 @@
15#ifndef _ASM_TILE_PCI_H 15#ifndef _ASM_TILE_PCI_H
16#define _ASM_TILE_PCI_H 16#define _ASM_TILE_PCI_H
17 17
18#include <asm/pci-bridge.h> 18#include <linux/pci.h>
19
20/*
21 * Structure of a PCI controller (host bridge)
22 */
23struct pci_controller {
24 int index; /* PCI domain number */
25 struct pci_bus *root_bus;
26
27 int first_busno;
28 int last_busno;
29
30 int hv_cfg_fd[2]; /* config{0,1} fds for this PCIe controller */
31 int hv_mem_fd; /* fd to Hypervisor for MMIO operations */
32
33 struct pci_ops *ops;
34
35 int irq_base; /* Base IRQ from the Hypervisor */
36 int plx_gen1; /* flag for PLX Gen 1 configuration */
37
38 /* Address ranges that are routed to this controller/bridge. */
39 struct resource mem_resources[3];
40};
19 41
20/* 42/*
21 * The hypervisor maps the entirety of CPA-space as bus addresses, so 43 * The hypervisor maps the entirety of CPA-space as bus addresses, so
@@ -24,56 +46,12 @@
24 */ 46 */
25#define PCI_DMA_BUS_IS_PHYS 1 47#define PCI_DMA_BUS_IS_PHYS 1
26 48
27struct pci_controller *pci_bus_to_hose(int bus);
28unsigned char __init common_swizzle(struct pci_dev *dev, unsigned char *pinp);
29int __init tile_pci_init(void); 49int __init tile_pci_init(void);
30void pci_iounmap(struct pci_dev *dev, void __iomem *addr);
31void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
32void __devinit pcibios_fixup_bus(struct pci_bus *bus);
33 50
34int __devinit _tile_cfg_read(struct pci_controller *hose, 51void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
35 int bus, 52static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
36 int slot,
37 int function,
38 int offset,
39 int size,
40 u32 *val);
41int __devinit _tile_cfg_write(struct pci_controller *hose,
42 int bus,
43 int slot,
44 int function,
45 int offset,
46 int size,
47 u32 val);
48 53
49/* 54void __devinit pcibios_fixup_bus(struct pci_bus *bus);
50 * These are used to to config reads and writes in the early stages of
51 * setup before the driver infrastructure has been set up enough to be
52 * able to do config reads and writes.
53 */
54#define early_cfg_read(where, size, value) \
55 _tile_cfg_read(controller, \
56 current_bus, \
57 pci_slot, \
58 pci_fn, \
59 where, \
60 size, \
61 value)
62
63#define early_cfg_write(where, size, value) \
64 _tile_cfg_write(controller, \
65 current_bus, \
66 pci_slot, \
67 pci_fn, \
68 where, \
69 size, \
70 value)
71
72
73
74#define PCICFG_BYTE 1
75#define PCICFG_WORD 2
76#define PCICFG_DWORD 4
77 55
78#define TILE_NUM_PCIE 2 56#define TILE_NUM_PCIE 2
79 57
@@ -88,33 +66,33 @@ static inline int pci_proc_domain(struct pci_bus *bus)
88} 66}
89 67
90/* 68/*
91 * I/O space is currently not supported. 69 * pcibios_assign_all_busses() tells whether or not the bus numbers
70 * should be reassigned, in case the BIOS didn't do it correctly, or
71 * in case we don't have a BIOS and we want to let Linux do it.
92 */ 72 */
73static inline int pcibios_assign_all_busses(void)
74{
75 return 1;
76}
93 77
94#define TILE_PCIE_LOWER_IO 0x0 78/*
95#define TILE_PCIE_UPPER_IO 0x10000 79 * No special bus mastering setup handling.
96#define TILE_PCIE_PCIE_IO_SIZE 0x0000FFFF 80 */
97
98#define _PAGE_NO_CACHE 0
99#define _PAGE_GUARDED 0
100
101
102#define pcibios_assign_all_busses() pci_assign_all_buses
103extern int pci_assign_all_buses;
104
105static inline void pcibios_set_master(struct pci_dev *dev) 81static inline void pcibios_set_master(struct pci_dev *dev)
106{ 82{
107 /* No special bus mastering setup handling */
108} 83}
109 84
110#define PCIBIOS_MIN_MEM 0 85#define PCIBIOS_MIN_MEM 0
111#define PCIBIOS_MIN_IO TILE_PCIE_LOWER_IO 86#define PCIBIOS_MIN_IO 0
112 87
113/* 88/*
114 * This flag tells if the platform is TILEmpower that needs 89 * This flag tells if the platform is TILEmpower that needs
115 * special configuration for the PLX switch chip. 90 * special configuration for the PLX switch chip.
116 */ 91 */
117extern int blade_pci; 92extern int tile_plx_gen1;
93
94/* Use any cpu for PCI. */
95#define cpumask_of_pcibus(bus) cpu_online_mask
118 96
119/* implement the pci_ DMA API in terms of the generic device dma_ one */ 97/* implement the pci_ DMA API in terms of the generic device dma_ one */
120#include <asm-generic/pci-dma-compat.h> 98#include <asm-generic/pci-dma-compat.h>
@@ -122,7 +100,4 @@ extern int blade_pci;
122/* generic pci stuff */ 100/* generic pci stuff */
123#include <asm-generic/pci.h> 101#include <asm-generic/pci.h>
124 102
125/* Use any cpu for PCI. */
126#define cpumask_of_pcibus(bus) cpu_online_mask
127
128#endif /* _ASM_TILE_PCI_H */ 103#endif /* _ASM_TILE_PCI_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 1747ff3946b2..a9e7c8760334 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -292,8 +292,18 @@ extern int kstack_hash;
292/* Are we using huge pages in the TLB for kernel data? */ 292/* Are we using huge pages in the TLB for kernel data? */
293extern int kdata_huge; 293extern int kdata_huge;
294 294
295/* Support standard Linux prefetching. */
296#define ARCH_HAS_PREFETCH
297#define prefetch(x) __builtin_prefetch(x)
295#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE() 298#define PREFETCH_STRIDE CHIP_L2_LINE_SIZE()
296 299
300/* Bring a value into the L1D, faulting the TLB if necessary. */
301#ifdef __tilegx__
302#define prefetch_L1(x) __insn_prefetch_l1_fault((void *)(x))
303#else
304#define prefetch_L1(x) __insn_prefetch_L1((void *)(x))
305#endif
306
297#else /* __ASSEMBLY__ */ 307#else /* __ASSEMBLY__ */
298 308
299/* Do some slow action (e.g. read a slow SPR). */ 309/* Do some slow action (e.g. read a slow SPR). */
diff --git a/arch/tile/include/hv/drv_xgbe_impl.h b/arch/tile/include/hv/drv_xgbe_impl.h
new file mode 100644
index 000000000000..3a73b2b44913
--- /dev/null
+++ b/arch/tile/include/hv/drv_xgbe_impl.h
@@ -0,0 +1,300 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * @file drivers/xgbe/impl.h
17 * Implementation details for the NetIO library.
18 */
19
20#ifndef __DRV_XGBE_IMPL_H__
21#define __DRV_XGBE_IMPL_H__
22
23#include <hv/netio_errors.h>
24#include <hv/netio_intf.h>
25#include <hv/drv_xgbe_intf.h>
26
27
28/** How many groups we have (log2). */
29#define LOG2_NUM_GROUPS (12)
30/** How many groups we have. */
31#define NUM_GROUPS (1 << LOG2_NUM_GROUPS)
32
33/** Number of output requests we'll buffer per tile. */
34#define EPP_REQS_PER_TILE (32)
35
36/** Words used in an eDMA command without checksum acceleration. */
37#define EDMA_WDS_NO_CSUM 8
38/** Words used in an eDMA command with checksum acceleration. */
39#define EDMA_WDS_CSUM 10
40/** Total available words in the eDMA command FIFO. */
41#define EDMA_WDS_TOTAL 128
42
43
44/*
45 * FIXME: These definitions are internal and should have underscores!
46 * NOTE: The actual numeric values here are intentional and allow us to
47 * optimize the concept "if small ... else if large ... else ...", by
48 * checking for the low bit being set, and then for non-zero.
49 * These are used as array indices, so they must have the values (0, 1, 2)
50 * in some order.
51 */
52#define SIZE_SMALL (1) /**< Small packet queue. */
53#define SIZE_LARGE (2) /**< Large packet queue. */
54#define SIZE_JUMBO (0) /**< Jumbo packet queue. */
55
56/** The number of "SIZE_xxx" values. */
57#define NETIO_NUM_SIZES 3
58
59
60/*
61 * Default numbers of packets for IPP drivers. These values are chosen
62 * such that CIPP1 will not overflow its L2 cache.
63 */
64
65/** The default number of small packets. */
66#define NETIO_DEFAULT_SMALL_PACKETS 2750
67/** The default number of large packets. */
68#define NETIO_DEFAULT_LARGE_PACKETS 2500
69/** The default number of jumbo packets. */
70#define NETIO_DEFAULT_JUMBO_PACKETS 250
71
72
73/** Log2 of the size of a memory arena. */
74#define NETIO_ARENA_SHIFT 24 /* 16 MB */
75/** Size of a memory arena. */
76#define NETIO_ARENA_SIZE (1 << NETIO_ARENA_SHIFT)
77
78
79/** A queue of packets.
80 *
81 * This structure partially defines a queue of packets waiting to be
82 * processed. The queue as a whole is written to by an interrupt handler and
83 * read by non-interrupt code; this data structure is what's touched by the
84 * interrupt handler. The other part of the queue state, the read offset, is
85 * kept in user space, not in hypervisor space, so it is in a separate data
86 * structure.
87 *
88 * The read offset (__packet_receive_read in the user part of the queue
89 * structure) points to the next packet to be read. When the read offset is
90 * equal to the write offset, the queue is empty; therefore the queue must
91 * contain one more slot than the required maximum queue size.
92 *
93 * Here's an example of all 3 state variables and what they mean. All
94 * pointers move left to right.
95 *
96 * @code
97 * I I V V V V I I I I
98 * 0 1 2 3 4 5 6 7 8 9 10
99 * ^ ^ ^ ^
100 * | | |
101 * | | __last_packet_plus_one
102 * | __buffer_write
103 * __packet_receive_read
104 * @endcode
105 *
106 * This queue has 10 slots, and thus can hold 9 packets (_last_packet_plus_one
107 * = 10). The read pointer is at 2, and the write pointer is at 6; thus,
108 * there are valid, unread packets in slots 2, 3, 4, and 5. The remaining
109 * slots are invalid (do not contain a packet).
110 */
111typedef struct {
112 /** Byte offset of the next notify packet to be written: zero for the first
113 * packet on the queue, sizeof (netio_pkt_t) for the second packet on the
114 * queue, etc. */
115 volatile uint32_t __packet_write;
116
117 /** Offset of the packet after the last valid packet (i.e., when any
118 * pointer is incremented to this value, it wraps back to zero). */
119 uint32_t __last_packet_plus_one;
120}
121__netio_packet_queue_t;
122
123
124/** A queue of buffers.
125 *
126 * This structure partially defines a queue of empty buffers which have been
127 * obtained via requests to the IPP. (The elements of the queue are packet
128 * handles, which are transformed into a full netio_pkt_t when the buffer is
129 * retrieved.) The queue as a whole is written to by an interrupt handler and
130 * read by non-interrupt code; this data structure is what's touched by the
131 * interrupt handler. The other parts of the queue state, the read offset and
132 * requested write offset, are kept in user space, not in hypervisor space, so
133 * they are in a separate data structure.
134 *
135 * The read offset (__buffer_read in the user part of the queue structure)
136 * points to the next buffer to be read. When the read offset is equal to the
137 * write offset, the queue is empty; therefore the queue must contain one more
138 * slot than the required maximum queue size.
139 *
140 * The requested write offset (__buffer_requested_write in the user part of
141 * the queue structure) points to the slot which will hold the next buffer we
142 * request from the IPP, once we get around to sending such a request. When
143 * the requested write offset is equal to the write offset, no requests for
144 * new buffers are outstanding; when the requested write offset is one greater
145 * than the read offset, no more requests may be sent.
146 *
147 * Note that, unlike the packet_queue, the buffer_queue places incoming
148 * buffers at decreasing addresses. This makes the check for "is it time to
149 * wrap the buffer pointer" cheaper in the assembly code which receives new
150 * buffers, and means that the value which defines the queue size,
151 * __last_buffer, is different than in the packet queue. Also, the offset
152 * used in the packet_queue is already scaled by the size of a packet; here we
153 * use unscaled slot indices for the offsets. (These differences are
154 * historical, and in the future it's possible that the packet_queue will look
155 * more like this queue.)
156 *
157 * @code
158 * Here's an example of all 4 state variables and what they mean. Remember:
159 * all pointers move right to left.
160 *
161 * V V V I I R R V V V
162 * 0 1 2 3 4 5 6 7 8 9
163 * ^ ^ ^ ^
164 * | | | |
165 * | | | __last_buffer
166 * | | __buffer_write
167 * | __buffer_requested_write
168 * __buffer_read
169 * @endcode
170 *
171 * This queue has 10 slots, and thus can hold 9 buffers (_last_buffer = 9).
172 * The read pointer is at 2, and the write pointer is at 6; thus, there are
173 * valid, unread buffers in slots 2, 1, 0, 9, 8, and 7. The requested write
174 * pointer is at 4; thus, requests have been made to the IPP for buffers which
175 * will be placed in slots 6 and 5 when they arrive. Finally, the remaining
176 * slots are invalid (do not contain a buffer).
177 */
178typedef struct
179{
180 /** Ordinal number of the next buffer to be written: 0 for the first slot in
181 * the queue, 1 for the second slot in the queue, etc. */
182 volatile uint32_t __buffer_write;
183
184 /** Ordinal number of the last buffer (i.e., when any pointer is decremented
185 * below zero, it is reloaded with this value). */
186 uint32_t __last_buffer;
187}
188__netio_buffer_queue_t;
189
190
191/**
192 * An object for providing Ethernet packets to a process.
193 */
194typedef struct __netio_queue_impl_t
195{
196 /** The queue of packets waiting to be received. */
197 __netio_packet_queue_t __packet_receive_queue;
198 /** The intr bit mask that IDs this device. */
199 unsigned int __intr_id;
200 /** Offset to queues of empty buffers, one per size. */
201 uint32_t __buffer_queue[NETIO_NUM_SIZES];
202 /** The address of the first EPP tile, or -1 if no EPP. */
203 /* ISSUE: Actually this is always "0" or "~0". */
204 uint32_t __epp_location;
205 /** The queue ID that this queue represents. */
206 unsigned int __queue_id;
207 /** Number of acknowledgements received. */
208 volatile uint32_t __acks_received;
209 /** Last completion number received for packet_sendv. */
210 volatile uint32_t __last_completion_rcv;
211 /** Number of packets allowed to be outstanding. */
212 uint32_t __max_outstanding;
213 /** First VA available for packets. */
214 void* __va_0;
215 /** First VA in second range available for packets. */
216 void* __va_1;
217 /** Padding to align the "__packets" field to the size of a netio_pkt_t. */
218 uint32_t __padding[3];
219 /** The packets themselves. */
220 netio_pkt_t __packets[0];
221}
222netio_queue_impl_t;
223
224
225/**
226 * An object for managing the user end of a NetIO queue.
227 */
228typedef struct __netio_queue_user_impl_t
229{
230 /** The next incoming packet to be read. */
231 uint32_t __packet_receive_read;
232 /** The next empty buffers to be read, one index per size. */
233 uint8_t __buffer_read[NETIO_NUM_SIZES];
234 /** Where the empty buffer we next request from the IPP will go, one index
235 * per size. */
236 uint8_t __buffer_requested_write[NETIO_NUM_SIZES];
237 /** PCIe interface flag. */
238 uint8_t __pcie;
239 /** Number of packets left to be received before we send a credit update. */
240 uint32_t __receive_credit_remaining;
241 /** Value placed in __receive_credit_remaining when it reaches zero. */
242 uint32_t __receive_credit_interval;
243 /** First fast I/O routine index. */
244 uint32_t __fastio_index;
245 /** Number of acknowledgements expected. */
246 uint32_t __acks_outstanding;
247 /** Last completion number requested. */
248 uint32_t __last_completion_req;
249 /** File descriptor for driver. */
250 int __fd;
251}
252netio_queue_user_impl_t;
253
254
255#define NETIO_GROUP_CHUNK_SIZE 64 /**< Max # groups in one IPP request */
256#define NETIO_BUCKET_CHUNK_SIZE 64 /**< Max # buckets in one IPP request */
257
258
259/** Internal structure used to convey packet send information to the
260 * hypervisor. FIXME: Actually, it's not used for that anymore, but
261 * netio_packet_send() still uses it internally.
262 */
263typedef struct
264{
265 uint16_t flags; /**< Packet flags (__NETIO_SEND_FLG_xxx) */
266 uint16_t transfer_size; /**< Size of packet */
267 uint32_t va; /**< VA of start of packet */
268 __netio_pkt_handle_t handle; /**< Packet handle */
269 uint32_t csum0; /**< First checksum word */
270 uint32_t csum1; /**< Second checksum word */
271}
272__netio_send_cmd_t;
273
274
275/** Flags used in two contexts:
276 * - As the "flags" member in the __netio_send_cmd_t, above; used only
277 * for netio_pkt_send_{prepare,commit}.
278 * - As part of the flags passed to the various send packet fast I/O calls.
279 */
280
281/** Need acknowledgement on this packet. Note that some code in the
282 * normal send_pkt fast I/O handler assumes that this is equal to 1. */
283#define __NETIO_SEND_FLG_ACK 0x1
284
285/** Do checksum on this packet. (Only used with the __netio_send_cmd_t;
286 * normal packet sends use a special fast I/O index to denote checksumming,
287 * and multi-segment sends test the checksum descriptor.) */
288#define __NETIO_SEND_FLG_CSUM 0x2
289
290/** Get a completion on this packet. Only used with multi-segment sends. */
291#define __NETIO_SEND_FLG_COMPLETION 0x4
292
293/** Position of the number-of-extra-segments value in the flags word.
294 Only used with multi-segment sends. */
295#define __NETIO_SEND_FLG_XSEG_SHIFT 3
296
297/** Width of the number-of-extra-segments value in the flags word. */
298#define __NETIO_SEND_FLG_XSEG_WIDTH 2
299
300#endif /* __DRV_XGBE_IMPL_H__ */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
new file mode 100644
index 000000000000..146e47d5334b
--- /dev/null
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -0,0 +1,615 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * @file drv_xgbe_intf.h
17 * Interface to the hypervisor XGBE driver.
18 */
19
20#ifndef __DRV_XGBE_INTF_H__
21#define __DRV_XGBE_INTF_H__
22
23/**
24 * An object for forwarding VAs and PAs to the hypervisor.
25 * @ingroup types
26 *
27 * This allows the supervisor to specify a number of areas of memory to
28 * store packet buffers.
29 */
30typedef struct
31{
32 /** The physical address of the memory. */
33 HV_PhysAddr pa;
34 /** Page table entry for the memory. This is only used to derive the
35 * memory's caching mode; the PA bits are ignored. */
36 HV_PTE pte;
37 /** The virtual address of the memory. */
38 HV_VirtAddr va;
39 /** Size (in bytes) of the memory area. */
40 int size;
41
42}
43netio_ipp_address_t;
44
45/** The various pread/pwrite offsets into the hypervisor-level driver.
46 * @ingroup types
47 */
48typedef enum
49{
50 /** Inform the Linux driver of the address of the NetIO arena memory.
51 * This offset is actually only used to convey information from netio
52 * to the Linux driver; it never makes it from there to the hypervisor.
53 * Write-only; takes a uint32_t specifying the VA address. */
54 NETIO_FIXED_ADDR = 0x5000000000000000ULL,
55
56 /** Inform the Linux driver of the size of the NetIO arena memory.
57 * This offset is actually only used to convey information from netio
58 * to the Linux driver; it never makes it from there to the hypervisor.
59 * Write-only; takes a uint32_t specifying the VA size. */
60 NETIO_FIXED_SIZE = 0x5100000000000000ULL,
61
62 /** Register current tile with IPP. Write then read: write, takes a
63 * netio_input_config_t, read returns a pointer to a netio_queue_impl_t. */
64 NETIO_IPP_INPUT_REGISTER_OFF = 0x6000000000000000ULL,
65
66 /** Unregister current tile from IPP. Write-only, takes a dummy argument. */
67 NETIO_IPP_INPUT_UNREGISTER_OFF = 0x6100000000000000ULL,
68
69 /** Start packets flowing. Write-only, takes a dummy argument. */
70 NETIO_IPP_INPUT_INIT_OFF = 0x6200000000000000ULL,
71
72 /** Stop packets flowing. Write-only, takes a dummy argument. */
73 NETIO_IPP_INPUT_UNINIT_OFF = 0x6300000000000000ULL,
74
75 /** Configure group (typically we group on VLAN). Write-only: takes an
76 * array of netio_group_t's, low 24 bits of the offset is the base group
77 * number times the size of a netio_group_t. */
78 NETIO_IPP_INPUT_GROUP_CFG_OFF = 0x6400000000000000ULL,
79
80 /** Configure bucket. Write-only: takes an array of netio_bucket_t's, low
81 * 24 bits of the offset is the base bucket number times the size of a
82 * netio_bucket_t. */
83 NETIO_IPP_INPUT_BUCKET_CFG_OFF = 0x6500000000000000ULL,
84
85 /** Get/set a parameter. Read or write: read or write data is the parameter
86 * value, low 32 bits of the offset is a __netio_getset_offset_t. */
87 NETIO_IPP_PARAM_OFF = 0x6600000000000000ULL,
88
89 /** Get fast I/O index. Read-only; returns a 4-byte base index value. */
90 NETIO_IPP_GET_FASTIO_OFF = 0x6700000000000000ULL,
91
92 /** Configure hijack IP address. Packets with this IPv4 dest address
93 * go to bucket NETIO_NUM_BUCKETS - 1. Write-only: takes an IP address
94 * in some standard form. FIXME: Define the form! */
95 NETIO_IPP_INPUT_HIJACK_CFG_OFF = 0x6800000000000000ULL,
96
97 /**
98 * Offsets beyond this point are reserved for the supervisor (although that
99 * enforcement must be done by the supervisor driver itself).
100 */
101 NETIO_IPP_USER_MAX_OFF = 0x6FFFFFFFFFFFFFFFULL,
102
103 /** Register I/O memory. Write-only, takes a netio_ipp_address_t. */
104 NETIO_IPP_IOMEM_REGISTER_OFF = 0x7000000000000000ULL,
105
106 /** Unregister I/O memory. Write-only, takes a netio_ipp_address_t. */
107 NETIO_IPP_IOMEM_UNREGISTER_OFF = 0x7100000000000000ULL,
108
109 /* Offsets greater than 0x7FFFFFFF can't be used directly from Linux
110 * userspace code due to limitations in the pread/pwrite syscalls. */
111
112 /** Drain LIPP buffers. */
113 NETIO_IPP_DRAIN_OFF = 0xFA00000000000000ULL,
114
115 /** Supply a netio_ipp_address_t to be used as shared memory for the
116 * LEPP command queue. */
117 NETIO_EPP_SHM_OFF = 0xFB00000000000000ULL,
118
119 /* 0xFC... is currently unused. */
120
121 /** Stop IPP/EPP tiles. Write-only, takes a dummy argument. */
122 NETIO_IPP_STOP_SHIM_OFF = 0xFD00000000000000ULL,
123
124 /** Start IPP/EPP tiles. Write-only, takes a dummy argument. */
125 NETIO_IPP_START_SHIM_OFF = 0xFE00000000000000ULL,
126
127 /** Supply packet arena. Write-only, takes an array of
128 * netio_ipp_address_t values. */
129 NETIO_IPP_ADDRESS_OFF = 0xFF00000000000000ULL,
130} netio_hv_offset_t;
131
132/** Extract the base offset from an offset */
133#define NETIO_BASE_OFFSET(off) ((off) & 0xFF00000000000000ULL)
134/** Extract the local offset from an offset */
135#define NETIO_LOCAL_OFFSET(off) ((off) & 0x00FFFFFFFFFFFFFFULL)
136
137
138/**
139 * Get/set offset.
140 */
141typedef union
142{
143 struct
144 {
145 uint64_t addr:48; /**< Class-specific address */
146 unsigned int class:8; /**< Class (e.g., NETIO_PARAM) */
147 unsigned int opcode:8; /**< High 8 bits of NETIO_IPP_PARAM_OFF */
148 }
149 bits; /**< Bitfields */
150 uint64_t word; /**< Aggregated value to use as the offset */
151}
152__netio_getset_offset_t;
153
154/**
155 * Fast I/O index offsets (must be contiguous).
156 */
157typedef enum
158{
159 NETIO_FASTIO_ALLOCATE = 0, /**< Get empty packet buffer */
160 NETIO_FASTIO_FREE_BUFFER = 1, /**< Give buffer back to IPP */
161 NETIO_FASTIO_RETURN_CREDITS = 2, /**< Give credits to IPP */
162 NETIO_FASTIO_SEND_PKT_NOCK = 3, /**< Send a packet, no checksum */
163 NETIO_FASTIO_SEND_PKT_CK = 4, /**< Send a packet, with checksum */
164 NETIO_FASTIO_SEND_PKT_VEC = 5, /**< Send a vector of packets */
165 NETIO_FASTIO_SENDV_PKT = 6, /**< Sendv one packet */
166 NETIO_FASTIO_NUM_INDEX = 7, /**< Total number of fast I/O indices */
167} netio_fastio_index_t;
168
169/** 3-word return type for Fast I/O call. */
170typedef struct
171{
172 int err; /**< Error code. */
173 uint32_t val0; /**< Value. Meaning depends upon the specific call. */
174 uint32_t val1; /**< Value. Meaning depends upon the specific call. */
175} netio_fastio_rv3_t;
176
177/** 0-argument fast I/O call */
178int __netio_fastio0(uint32_t fastio_index);
179/** 1-argument fast I/O call */
180int __netio_fastio1(uint32_t fastio_index, uint32_t arg0);
181/** 3-argument fast I/O call, 2-word return value */
182netio_fastio_rv3_t __netio_fastio3_rv3(uint32_t fastio_index, uint32_t arg0,
183 uint32_t arg1, uint32_t arg2);
184/** 4-argument fast I/O call */
185int __netio_fastio4(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
186 uint32_t arg2, uint32_t arg3);
187/** 6-argument fast I/O call */
188int __netio_fastio6(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
189 uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5);
190/** 9-argument fast I/O call */
191int __netio_fastio9(uint32_t fastio_index, uint32_t arg0, uint32_t arg1,
192 uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5,
193 uint32_t arg6, uint32_t arg7, uint32_t arg8);
194
195/** Allocate an empty packet.
196 * @param fastio_index Fast I/O index.
197 * @param size Size of the packet to allocate.
198 */
199#define __netio_fastio_allocate(fastio_index, size) \
200 __netio_fastio1((fastio_index) + NETIO_FASTIO_ALLOCATE, size)
201
202/** Free a buffer.
203 * @param fastio_index Fast I/O index.
204 * @param handle Handle for the packet to free.
205 */
206#define __netio_fastio_free_buffer(fastio_index, handle) \
207 __netio_fastio1((fastio_index) + NETIO_FASTIO_FREE_BUFFER, handle)
208
209/** Increment our receive credits.
210 * @param fastio_index Fast I/O index.
211 * @param credits Number of credits to add.
212 */
213#define __netio_fastio_return_credits(fastio_index, credits) \
214 __netio_fastio1((fastio_index) + NETIO_FASTIO_RETURN_CREDITS, credits)
215
216/** Send packet, no checksum.
217 * @param fastio_index Fast I/O index.
218 * @param ackflag Nonzero if we want an ack.
219 * @param size Size of the packet.
220 * @param va Virtual address of start of packet.
221 * @param handle Packet handle.
222 */
223#define __netio_fastio_send_pkt_nock(fastio_index, ackflag, size, va, handle) \
224 __netio_fastio4((fastio_index) + NETIO_FASTIO_SEND_PKT_NOCK, ackflag, \
225 size, va, handle)
226
227/** Send packet, calculate checksum.
228 * @param fastio_index Fast I/O index.
229 * @param ackflag Nonzero if we want an ack.
230 * @param size Size of the packet.
231 * @param va Virtual address of start of packet.
232 * @param handle Packet handle.
233 * @param csum0 Shim checksum header.
234 * @param csum1 Checksum seed.
235 */
236#define __netio_fastio_send_pkt_ck(fastio_index, ackflag, size, va, handle, \
237 csum0, csum1) \
238 __netio_fastio6((fastio_index) + NETIO_FASTIO_SEND_PKT_CK, ackflag, \
239 size, va, handle, csum0, csum1)
240
241
242/** Format for the "csum0" argument to the __netio_fastio_send routines
243 * and LEPP. Note that this is currently exactly identical to the
244 * ShimProtocolOffloadHeader.
245 */
246typedef union
247{
248 struct
249 {
250 unsigned int start_byte:7; /**< The first byte to be checksummed */
251 unsigned int count:14; /**< Number of bytes to be checksummed. */
252 unsigned int destination_byte:7; /**< The byte to write the checksum to. */
253 unsigned int reserved:4; /**< Reserved. */
254 } bits; /**< Decomposed method of access. */
255 unsigned int word; /**< To send out the IDN. */
256} __netio_checksum_header_t;
257
258
259/** Sendv packet with 1 or 2 segments.
260 * @param fastio_index Fast I/O index.
261 * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
262 * 1 in next 2 bits; expected checksum in high 16 bits.
263 * @param confno Confirmation number to request, if notify flag set.
264 * @param csum0 Checksum descriptor; if zero, no checksum.
265 * @param va_F Virtual address of first segment.
266 * @param va_L Virtual address of last segment, if 2 segments.
267 * @param len_F_L Length of first segment in low 16 bits; length of last
268 * segment, if 2 segments, in high 16 bits.
269 */
270#define __netio_fastio_sendv_pkt_1_2(fastio_index, flags, confno, csum0, \
271 va_F, va_L, len_F_L) \
272 __netio_fastio6((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
273 csum0, va_F, va_L, len_F_L)
274
275/** Send packet on PCIe interface.
276 * @param fastio_index Fast I/O index.
277 * @param flags Ack/csum/notify flags in low 3 bits.
278 * @param confno Confirmation number to request, if notify flag set.
279 * @param csum0 Checksum descriptor; Hard wired 0, not needed for PCIe.
280 * @param va_F Virtual address of the packet buffer.
281 * @param va_L Virtual address of last segment, if 2 segments. Hard wired 0.
282 * @param len_F_L Length of the packet buffer in low 16 bits.
283 */
284#define __netio_fastio_send_pcie_pkt(fastio_index, flags, confno, csum0, \
285 va_F, va_L, len_F_L) \
286 __netio_fastio6((fastio_index) + PCIE_FASTIO_SENDV_PKT, flags, confno, \
287 csum0, va_F, va_L, len_F_L)
288
289/** Sendv packet with 3 or 4 segments.
290 * @param fastio_index Fast I/O index.
291 * @param flags Ack/csum/notify flags in low 3 bits; number of segments minus
292 * 1 in next 2 bits; expected checksum in high 16 bits.
293 * @param confno Confirmation number to request, if notify flag set.
294 * @param csum0 Checksum descriptor; if zero, no checksum.
295 * @param va_F Virtual address of first segment.
296 * @param va_L Virtual address of last segment (third segment if 3 segments,
297 * fourth segment if 4 segments).
298 * @param len_F_L Length of first segment in low 16 bits; length of last
299 * segment in high 16 bits.
300 * @param va_M0 Virtual address of "middle 0" segment; this segment is sent
301 * second when there are three segments, and third if there are four.
302 * @param va_M1 Virtual address of "middle 1" segment; this segment is sent
303 * second when there are four segments.
304 * @param len_M0_M1 Length of middle 0 segment in low 16 bits; length of middle
305 * 1 segment, if 4 segments, in high 16 bits.
306 */
307#define __netio_fastio_sendv_pkt_3_4(fastio_index, flags, confno, csum0, va_F, \
308 va_L, len_F_L, va_M0, va_M1, len_M0_M1) \
309 __netio_fastio9((fastio_index) + NETIO_FASTIO_SENDV_PKT, flags, confno, \
310 csum0, va_F, va_L, len_F_L, va_M0, va_M1, len_M0_M1)
311
312/** Send vector of packets.
313 * @param fastio_index Fast I/O index.
314 * @param seqno Number of packets transmitted so far on this interface;
315 * used to decide which packets should be acknowledged.
316 * @param nentries Number of entries in vector.
317 * @param va Virtual address of start of vector entry array.
318 * @return 3-word netio_fastio_rv3_t structure. The structure's err member
319 * is an error code, or zero if no error. The val0 member is the
320 * updated value of seqno; it has been incremented by 1 for each
321 * packet sent. That increment may be less than nentries if an
322 * error occured, or if some of the entries in the vector contain
323 * handles equal to NETIO_PKT_HANDLE_NONE. The val1 member is the
324 * updated value of nentries; it has been decremented by 1 for each
325 * vector entry processed. Again, that decrement may be less than
326 * nentries (leaving the returned value positive) if an error
327 * occurred.
328 */
329#define __netio_fastio_send_pkt_vec(fastio_index, seqno, nentries, va) \
330 __netio_fastio3_rv3((fastio_index) + NETIO_FASTIO_SEND_PKT_VEC, seqno, \
331 nentries, va)
332
333
334/** An egress DMA command for LEPP. */
335typedef struct
336{
337 /** Is this a TSO transfer?
338 *
339 * NOTE: This field is always 0, to distinguish it from
340 * lepp_tso_cmd_t. It must come first!
341 */
342 uint8_t tso : 1;
343
344 /** Unused padding bits. */
345 uint8_t _unused : 3;
346
347 /** Should this packet be sent directly from caches instead of DRAM,
348 * using hash-for-home to locate the packet data?
349 */
350 uint8_t hash_for_home : 1;
351
352 /** Should we compute a checksum? */
353 uint8_t compute_checksum : 1;
354
355 /** Is this the final buffer for this packet?
356 *
357 * A single packet can be split over several input buffers (a "gather"
358 * operation). This flag indicates that this is the last buffer
359 * in a packet.
360 */
361 uint8_t end_of_packet : 1;
362
363 /** Should LEPP advance 'comp_busy' when this DMA is fully finished? */
364 uint8_t send_completion : 1;
365
366 /** High bits of Client Physical Address of the start of the buffer
367 * to be egressed.
368 *
369 * NOTE: Only 6 bits are actually needed here, as CPAs are
370 * currently 38 bits. So two bits could be scavenged from this.
371 */
372 uint8_t cpa_hi;
373
374 /** The number of bytes to be egressed. */
375 uint16_t length;
376
377 /** Low 32 bits of Client Physical Address of the start of the buffer
378 * to be egressed.
379 */
380 uint32_t cpa_lo;
381
382 /** Checksum information (only used if 'compute_checksum'). */
383 __netio_checksum_header_t checksum_data;
384
385} lepp_cmd_t;
386
387
388/** A chunk of physical memory for a TSO egress. */
389typedef struct
390{
391 /** The low bits of the CPA. */
392 uint32_t cpa_lo;
393 /** The high bits of the CPA. */
394 uint16_t cpa_hi : 15;
395 /** Should this packet be sent directly from caches instead of DRAM,
396 * using hash-for-home to locate the packet data?
397 */
398 uint16_t hash_for_home : 1;
399 /** The length in bytes. */
400 uint16_t length;
401} lepp_frag_t;
402
403
404/** An LEPP command that handles TSO. */
405typedef struct
406{
407 /** Is this a TSO transfer?
408 *
409 * NOTE: This field is always 1, to distinguish it from
410 * lepp_cmd_t. It must come first!
411 */
412 uint8_t tso : 1;
413
414 /** Unused padding bits. */
415 uint8_t _unused : 7;
416
417 /** Size of the header[] array in bytes. It must be in the range
418 * [40, 127], which are the smallest header for a TCP packet over
419 * Ethernet and the maximum possible prepend size supported by
420 * hardware, respectively. Note that the array storage must be
421 * padded out to a multiple of four bytes so that the following
422 * LEPP command is aligned properly.
423 */
424 uint8_t header_size;
425
426 /** Byte offset of the IP header in header[]. */
427 uint8_t ip_offset;
428
429 /** Byte offset of the TCP header in header[]. */
430 uint8_t tcp_offset;
431
432 /** The number of bytes to use for the payload of each packet,
433 * except of course the last one, which may not have enough bytes.
434 * This means that each Ethernet packet except the last will have a
435 * size of header_size + payload_size.
436 */
437 uint16_t payload_size;
438
439 /** The length of the 'frags' array that follows this struct. */
440 uint16_t num_frags;
441
442 /** The actual frags. */
443 lepp_frag_t frags[0 /* Variable-sized; num_frags entries. */];
444
445 /*
446 * The packet header template logically follows frags[],
447 * but you can't declare that in C.
448 *
449 * uint32_t header[header_size_in_words_rounded_up];
450 */
451
452} lepp_tso_cmd_t;
453
454
455/** An LEPP completion ring entry. */
456typedef void* lepp_comp_t;
457
458
459/** Maximum number of frags for one TSO command. This is adapted from
460 * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
461 * our page size of exactly 65536. We add one for a "body" fragment.
462 */
463#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1)
464
465/** Total number of bytes needed for an lepp_tso_cmd_t. */
466#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
467 (sizeof(lepp_tso_cmd_t) + \
468 (num_frags) * sizeof(lepp_frag_t) + \
469 (((header_size) + 3) & -4))
470
471/** The size of the lepp "cmd" queue. */
472#define LEPP_CMD_QUEUE_BYTES \
473 (((CHIP_L2_CACHE_SIZE() - 2 * CHIP_L2_LINE_SIZE()) / \
474 (sizeof(lepp_cmd_t) + sizeof(lepp_comp_t))) * sizeof(lepp_cmd_t))
475
476/** The largest possible command that can go in lepp_queue_t::cmds[]. */
477#define LEPP_MAX_CMD_SIZE LEPP_TSO_CMD_SIZE(LEPP_MAX_FRAGS, 128)
478
479/** The largest possible value of lepp_queue_t::cmd_{head, tail} (inclusive).
480 */
481#define LEPP_CMD_LIMIT \
482 (LEPP_CMD_QUEUE_BYTES - LEPP_MAX_CMD_SIZE)
483
484/** The maximum number of completions in an LEPP queue. */
485#define LEPP_COMP_QUEUE_SIZE \
486 ((LEPP_CMD_LIMIT + sizeof(lepp_cmd_t) - 1) / sizeof(lepp_cmd_t))
487
488/** Increment an index modulo the queue size. */
489#define LEPP_QINC(var) \
490 (var = __insn_mnz(var - (LEPP_COMP_QUEUE_SIZE - 1), var + 1))
491
492/** A queue used to convey egress commands from the client to LEPP. */
493typedef struct
494{
495 /** Index of first completion not yet processed by user code.
496 * If this is equal to comp_busy, there are no such completions.
497 *
498 * NOTE: This is only read/written by the user.
499 */
500 unsigned int comp_head;
501
502 /** Index of first completion record not yet completed.
503 * If this is equal to comp_tail, there are no such completions.
504 * This index gets advanced (modulo LEPP_QUEUE_SIZE) whenever
505 * a command with the 'completion' bit set is finished.
506 *
507 * NOTE: This is only written by LEPP, only read by the user.
508 */
509 volatile unsigned int comp_busy;
510
511 /** Index of the first empty slot in the completion ring.
512 * Entries from this up to but not including comp_head (in ring order)
513 * can be filled in with completion data.
514 *
515 * NOTE: This is only read/written by the user.
516 */
517 unsigned int comp_tail;
518
519 /** Byte index of first command enqueued for LEPP but not yet processed.
520 *
521 * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
522 *
523 * NOTE: LEPP advances this counter as soon as it no longer needs
524 * the cmds[] storage for this entry, but the transfer is not actually
525 * complete (i.e. the buffer pointed to by the command is no longer
526 * needed) until comp_busy advances.
527 *
528 * If this is equal to cmd_tail, the ring is empty.
529 *
530 * NOTE: This is only written by LEPP, only read by the user.
531 */
532 volatile unsigned int cmd_head;
533
534 /** Byte index of first empty slot in the command ring. This field can
535 * be incremented up to but not equal to cmd_head (because that would
536 * mean the ring is empty).
537 *
538 * This is always divisible by sizeof(void*) and always <= LEPP_CMD_LIMIT.
539 *
540 * NOTE: This is read/written by the user, only read by LEPP.
541 */
542 volatile unsigned int cmd_tail;
543
544 /** A ring of variable-sized egress DMA commands.
545 *
546 * NOTE: Only written by the user, only read by LEPP.
547 */
548 char cmds[LEPP_CMD_QUEUE_BYTES]
549 __attribute__((aligned(CHIP_L2_LINE_SIZE())));
550
551 /** A ring of user completion data.
552 * NOTE: Only read/written by the user.
553 */
554 lepp_comp_t comps[LEPP_COMP_QUEUE_SIZE]
555 __attribute__((aligned(CHIP_L2_LINE_SIZE())));
556} lepp_queue_t;
557
558
559/** An internal helper function for determining the number of entries
560 * available in a ring buffer, given that there is one sentinel.
561 */
562static inline unsigned int
563_lepp_num_free_slots(unsigned int head, unsigned int tail)
564{
565 /*
566 * One entry is reserved for use as a sentinel, to distinguish
567 * "empty" from "full". So we compute
568 * (head - tail - 1) % LEPP_QUEUE_SIZE, but without using a slow % operation.
569 */
570 return (head - tail - 1) + ((head <= tail) ? LEPP_COMP_QUEUE_SIZE : 0);
571}
572
573
574/** Returns how many new comp entries can be enqueued. */
575static inline unsigned int
576lepp_num_free_comp_slots(const lepp_queue_t* q)
577{
578 return _lepp_num_free_slots(q->comp_head, q->comp_tail);
579}
580
581static inline int
582lepp_qsub(int v1, int v2)
583{
584 int delta = v1 - v2;
585 return delta + ((delta >> 31) & LEPP_COMP_QUEUE_SIZE);
586}
587
588
589/** FIXME: Check this from linux, via a new "pwrite()" call. */
590#define LIPP_VERSION 1
591
592
593/** We use exactly two bytes of alignment padding. */
594#define LIPP_PACKET_PADDING 2
595
596/** The minimum size of a "small" buffer (including the padding). */
597#define LIPP_SMALL_PACKET_SIZE 128
598
599/*
600 * NOTE: The following two values should total to less than around
601 * 13582, to keep the total size used for "lipp_state_t" below 64K.
602 */
603
604/** The maximum number of "small" buffers.
605 * This is enough for 53 network cpus with 128 credits. Note that
606 * if these are exhausted, we will fall back to using large buffers.
607 */
608#define LIPP_SMALL_BUFFERS 6785
609
610/** The maximum number of "large" buffers.
611 * This is enough for 53 network cpus with 128 credits.
612 */
613#define LIPP_LARGE_BUFFERS 6785
614
615#endif /* __DRV_XGBE_INTF_H__ */
diff --git a/arch/tile/include/hv/netio_errors.h b/arch/tile/include/hv/netio_errors.h
new file mode 100644
index 000000000000..e1591bff61b5
--- /dev/null
+++ b/arch/tile/include/hv/netio_errors.h
@@ -0,0 +1,122 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * Error codes returned from NetIO routines.
17 */
18
19#ifndef __NETIO_ERRORS_H__
20#define __NETIO_ERRORS_H__
21
22/**
23 * @addtogroup error
24 *
25 * @brief The error codes returned by NetIO functions.
26 *
27 * NetIO functions return 0 (defined as ::NETIO_NO_ERROR) on success, and
28 * a negative value if an error occurs.
29 *
30 * In cases where a NetIO function failed due to a error reported by
31 * system libraries, the error code will be the negation of the
32 * system errno at the time of failure. The @ref netio_strerror()
33 * function will deliver error strings for both NetIO and system error
34 * codes.
35 *
36 * @{
37 */
38
39/** The set of all NetIO errors. */
40typedef enum
41{
42 /** Operation successfully completed. */
43 NETIO_NO_ERROR = 0,
44
45 /** A packet was successfully retrieved from an input queue. */
46 NETIO_PKT = 0,
47
48 /** Largest NetIO error number. */
49 NETIO_ERR_MAX = -701,
50
51 /** The tile is not registered with the IPP. */
52 NETIO_NOT_REGISTERED = -701,
53
54 /** No packet was available to retrieve from the input queue. */
55 NETIO_NOPKT = -702,
56
57 /** The requested function is not implemented. */
58 NETIO_NOT_IMPLEMENTED = -703,
59
60 /** On a registration operation, the target queue already has the maximum
61 * number of tiles registered for it, and no more may be added. On a
62 * packet send operation, the output queue is full and nothing more can
63 * be queued until some of the queued packets are actually transmitted. */
64 NETIO_QUEUE_FULL = -704,
65
66 /** The calling process or thread is not bound to exactly one CPU. */
67 NETIO_BAD_AFFINITY = -705,
68
69 /** Cannot allocate memory on requested controllers. */
70 NETIO_CANNOT_HOME = -706,
71
72 /** On a registration operation, the IPP specified is not configured
73 * to support the options requested; for instance, the application
74 * wants a specific type of tagged headers which the configured IPP
75 * doesn't support. Or, the supplied configuration information is
76 * not self-consistent, or is out of range; for instance, specifying
77 * both NETIO_RECV and NETIO_NO_RECV, or asking for more than
78 * NETIO_MAX_SEND_BUFFERS to be preallocated. On a VLAN or bucket
79 * configure operation, the number of items, or the base item, was
80 * out of range.
81 */
82 NETIO_BAD_CONFIG = -707,
83
84 /** Too many tiles have registered to transmit packets. */
85 NETIO_TOOMANY_XMIT = -708,
86
87 /** Packet transmission was attempted on a queue which was registered
88 with transmit disabled. */
89 NETIO_UNREG_XMIT = -709,
90
91 /** This tile is already registered with the IPP. */
92 NETIO_ALREADY_REGISTERED = -710,
93
94 /** The Ethernet link is down. The application should try again later. */
95 NETIO_LINK_DOWN = -711,
96
97 /** An invalid memory buffer has been specified. This may be an unmapped
98 * virtual address, or one which does not meet alignment requirements.
99 * For netio_input_register(), this error may be returned when multiple
100 * processes specify different memory regions to be used for NetIO
101 * buffers. That can happen if these processes specify explicit memory
102 * regions with the ::NETIO_FIXED_BUFFER_VA flag, or if tmc_cmem_init()
103 * has not been called by a common ancestor of the processes.
104 */
105 NETIO_FAULT = -712,
106
107 /** Cannot combine user-managed shared memory and cache coherence. */
108 NETIO_BAD_CACHE_CONFIG = -713,
109
110 /** Smallest NetIO error number. */
111 NETIO_ERR_MIN = -713,
112
113#ifndef __DOXYGEN__
114 /** Used internally to mean that no response is needed; never returned to
115 * an application. */
116 NETIO_NO_RESPONSE = 1
117#endif
118} netio_error_t;
119
120/** @} */
121
122#endif /* __NETIO_ERRORS_H__ */
diff --git a/arch/tile/include/hv/netio_intf.h b/arch/tile/include/hv/netio_intf.h
new file mode 100644
index 000000000000..8d20972aba2c
--- /dev/null
+++ b/arch/tile/include/hv/netio_intf.h
@@ -0,0 +1,2975 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15/**
16 * NetIO interface structures and macros.
17 */
18
19#ifndef __NETIO_INTF_H__
20#define __NETIO_INTF_H__
21
22#include <hv/netio_errors.h>
23
24#ifdef __KERNEL__
25#include <linux/types.h>
26#else
27#include <stdint.h>
28#endif
29
30#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__)
31#include <assert.h>
32#define netio_assert assert /**< Enable assertions from macros */
33#else
34#define netio_assert(...) ((void)(0)) /**< Disable assertions from macros */
35#endif
36
37/*
38 * If none of these symbols are defined, we're building libnetio in an
39 * environment where we have pthreads, so we'll enable locking.
40 */
41#if !defined(__HV__) && !defined(__BOGUX__) && !defined(__KERNEL__) && \
42 !defined(__NEWLIB__)
43#define _NETIO_PTHREAD /**< Include a mutex in netio_queue_t below */
44
45/*
46 * If NETIO_UNLOCKED is defined, we don't do use per-cpu locks on
47 * per-packet NetIO operations. We still do pthread locking on things
48 * like netio_input_register, though. This is used for building
49 * libnetio_unlocked.
50 */
51#ifndef NETIO_UNLOCKED
52
53/* Avoid PLT overhead by using our own inlined per-cpu lock. */
54#include <sched.h>
55typedef int _netio_percpu_mutex_t;
56
57static __inline int
58_netio_percpu_mutex_init(_netio_percpu_mutex_t* lock)
59{
60 *lock = 0;
61 return 0;
62}
63
64static __inline int
65_netio_percpu_mutex_lock(_netio_percpu_mutex_t* lock)
66{
67 while (__builtin_expect(__insn_tns(lock), 0))
68 sched_yield();
69 return 0;
70}
71
72static __inline int
73_netio_percpu_mutex_unlock(_netio_percpu_mutex_t* lock)
74{
75 *lock = 0;
76 return 0;
77}
78
79#else /* NETIO_UNLOCKED */
80
81/* Don't do any locking for per-packet NetIO operations. */
82typedef int _netio_percpu_mutex_t;
83#define _netio_percpu_mutex_init(L)
84#define _netio_percpu_mutex_lock(L)
85#define _netio_percpu_mutex_unlock(L)
86
87#endif /* NETIO_UNLOCKED */
88#endif /* !__HV__, !__BOGUX, !__KERNEL__, !__NEWLIB__ */
89
90/** How many tiles can register for a given queue.
91 * @ingroup setup */
92#define NETIO_MAX_TILES_PER_QUEUE 64
93
94
95/** Largest permissible queue identifier.
96 * @ingroup setup */
97#define NETIO_MAX_QUEUE_ID 255
98
99
100#ifndef __DOXYGEN__
101
102/* Metadata packet checksum/ethertype flags. */
103
104/** The L4 checksum has not been calculated. */
105#define _NETIO_PKT_NO_L4_CSUM_SHIFT 0
106#define _NETIO_PKT_NO_L4_CSUM_RMASK 1
107#define _NETIO_PKT_NO_L4_CSUM_MASK \
108 (_NETIO_PKT_NO_L4_CSUM_RMASK << _NETIO_PKT_NO_L4_CSUM_SHIFT)
109
110/** The L3 checksum has not been calculated. */
111#define _NETIO_PKT_NO_L3_CSUM_SHIFT 1
112#define _NETIO_PKT_NO_L3_CSUM_RMASK 1
113#define _NETIO_PKT_NO_L3_CSUM_MASK \
114 (_NETIO_PKT_NO_L3_CSUM_RMASK << _NETIO_PKT_NO_L3_CSUM_SHIFT)
115
116/** The L3 checksum is incorrect (or perhaps has not been calculated). */
117#define _NETIO_PKT_BAD_L3_CSUM_SHIFT 2
118#define _NETIO_PKT_BAD_L3_CSUM_RMASK 1
119#define _NETIO_PKT_BAD_L3_CSUM_MASK \
120 (_NETIO_PKT_BAD_L3_CSUM_RMASK << _NETIO_PKT_BAD_L3_CSUM_SHIFT)
121
122/** The Ethernet packet type is unrecognized. */
123#define _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT 3
124#define _NETIO_PKT_TYPE_UNRECOGNIZED_RMASK 1
125#define _NETIO_PKT_TYPE_UNRECOGNIZED_MASK \
126 (_NETIO_PKT_TYPE_UNRECOGNIZED_RMASK << \
127 _NETIO_PKT_TYPE_UNRECOGNIZED_SHIFT)
128
129/* Metadata packet type flags. */
130
131/** Where the packet type bits are; this field is the index into
132 * _netio_pkt_info. */
133#define _NETIO_PKT_TYPE_SHIFT 4
134#define _NETIO_PKT_TYPE_RMASK 0x3F
135
136/** How many VLAN tags the packet has, and, if we have two, which one we
137 * actually grouped on. A VLAN within a proprietary (Marvell or Broadcom)
138 * tag is counted here. */
139#define _NETIO_PKT_VLAN_SHIFT 4
140#define _NETIO_PKT_VLAN_RMASK 0x3
141#define _NETIO_PKT_VLAN_MASK \
142 (_NETIO_PKT_VLAN_RMASK << _NETIO_PKT_VLAN_SHIFT)
143#define _NETIO_PKT_VLAN_NONE 0 /* No VLAN tag. */
144#define _NETIO_PKT_VLAN_ONE 1 /* One VLAN tag. */
145#define _NETIO_PKT_VLAN_TWO_OUTER 2 /* Two VLAN tags, outer one used. */
146#define _NETIO_PKT_VLAN_TWO_INNER 3 /* Two VLAN tags, inner one used. */
147
148/** Which proprietary tags the packet has. */
149#define _NETIO_PKT_TAG_SHIFT 6
150#define _NETIO_PKT_TAG_RMASK 0x3
151#define _NETIO_PKT_TAG_MASK \
152 (_NETIO_PKT_TAG_RMASK << _NETIO_PKT_TAG_SHIFT)
153#define _NETIO_PKT_TAG_NONE 0 /* No proprietary tags. */
154#define _NETIO_PKT_TAG_MRVL 1 /* Marvell HyperG.Stack tags. */
155#define _NETIO_PKT_TAG_MRVL_EXT 2 /* HyperG.Stack extended tags. */
156#define _NETIO_PKT_TAG_BRCM 3 /* Broadcom HiGig tags. */
157
158/** Whether a packet has an LLC + SNAP header. */
159#define _NETIO_PKT_SNAP_SHIFT 8
160#define _NETIO_PKT_SNAP_RMASK 0x1
161#define _NETIO_PKT_SNAP_MASK \
162 (_NETIO_PKT_SNAP_RMASK << _NETIO_PKT_SNAP_SHIFT)
163
164/* NOTE: Bits 9 and 10 are unused. */
165
166/** Length of any custom data before the L2 header, in words. */
167#define _NETIO_PKT_CUSTOM_LEN_SHIFT 11
168#define _NETIO_PKT_CUSTOM_LEN_RMASK 0x1F
169#define _NETIO_PKT_CUSTOM_LEN_MASK \
170 (_NETIO_PKT_CUSTOM_LEN_RMASK << _NETIO_PKT_CUSTOM_LEN_SHIFT)
171
172/** The L4 checksum is incorrect (or perhaps has not been calculated). */
173#define _NETIO_PKT_BAD_L4_CSUM_SHIFT 16
174#define _NETIO_PKT_BAD_L4_CSUM_RMASK 0x1
175#define _NETIO_PKT_BAD_L4_CSUM_MASK \
176 (_NETIO_PKT_BAD_L4_CSUM_RMASK << _NETIO_PKT_BAD_L4_CSUM_SHIFT)
177
178/** Length of the L2 header, in words. */
179#define _NETIO_PKT_L2_LEN_SHIFT 17
180#define _NETIO_PKT_L2_LEN_RMASK 0x1F
181#define _NETIO_PKT_L2_LEN_MASK \
182 (_NETIO_PKT_L2_LEN_RMASK << _NETIO_PKT_L2_LEN_SHIFT)
183
184
185/* Flags in minimal packet metadata. */
186
187/** We need an eDMA checksum on this packet. */
188#define _NETIO_PKT_NEED_EDMA_CSUM_SHIFT 0
189#define _NETIO_PKT_NEED_EDMA_CSUM_RMASK 1
190#define _NETIO_PKT_NEED_EDMA_CSUM_MASK \
191 (_NETIO_PKT_NEED_EDMA_CSUM_RMASK << _NETIO_PKT_NEED_EDMA_CSUM_SHIFT)
192
193/* Data within the packet information table. */
194
195/* Note that, for efficiency, code which uses these fields assumes that none
196 * of the shift values below are zero. See uses below for an explanation. */
197
198/** Offset within the L2 header of the innermost ethertype (in halfwords). */
199#define _NETIO_PKT_INFO_ETYPE_SHIFT 6
200#define _NETIO_PKT_INFO_ETYPE_RMASK 0x1F
201
202/** Offset within the L2 header of the VLAN tag (in halfwords). */
203#define _NETIO_PKT_INFO_VLAN_SHIFT 11
204#define _NETIO_PKT_INFO_VLAN_RMASK 0x1F
205
206#endif
207
208
209/** The size of a memory buffer representing a small packet.
210 * @ingroup egress */
211#define SMALL_PACKET_SIZE 256
212
213/** The size of a memory buffer representing a large packet.
214 * @ingroup egress */
215#define LARGE_PACKET_SIZE 2048
216
217/** The size of a memory buffer representing a jumbo packet.
218 * @ingroup egress */
219#define JUMBO_PACKET_SIZE (12 * 1024)
220
221
222/* Common ethertypes.
223 * @ingroup ingress */
224/** @{ */
225/** The ethertype of IPv4. */
226#define ETHERTYPE_IPv4 (0x0800)
227/** The ethertype of ARP. */
228#define ETHERTYPE_ARP (0x0806)
229/** The ethertype of VLANs. */
230#define ETHERTYPE_VLAN (0x8100)
231/** The ethertype of a Q-in-Q header. */
232#define ETHERTYPE_Q_IN_Q (0x9100)
233/** The ethertype of IPv6. */
234#define ETHERTYPE_IPv6 (0x86DD)
235/** The ethertype of MPLS. */
236#define ETHERTYPE_MPLS (0x8847)
237/** @} */
238
239
240/** The possible return values of NETIO_PKT_STATUS.
241 * @ingroup ingress
242 */
243typedef enum
244{
245 /** No problems were detected with this packet. */
246 NETIO_PKT_STATUS_OK,
247 /** The packet is undersized; this is expected behavior if the packet's
248 * ethertype is unrecognized, but otherwise the packet is likely corrupt. */
249 NETIO_PKT_STATUS_UNDERSIZE,
250 /** The packet is oversized and some trailing bytes have been discarded.
251 This is expected behavior for short packets, since it's impossible to
252 precisely determine the amount of padding which may have been added to
253 them to make them meet the minimum Ethernet packet size. */
254 NETIO_PKT_STATUS_OVERSIZE,
255 /** The packet was judged to be corrupt by hardware (for instance, it had
256 a bad CRC, or part of it was discarded due to lack of buffer space in
257 the I/O shim) and should be discarded. */
258 NETIO_PKT_STATUS_BAD
259} netio_pkt_status_t;
260
261
262/** Log2 of how many buckets we have. */
263#define NETIO_LOG2_NUM_BUCKETS (10)
264
265/** How many buckets we have.
266 * @ingroup ingress */
267#define NETIO_NUM_BUCKETS (1 << NETIO_LOG2_NUM_BUCKETS)
268
269
270/**
271 * @brief A group-to-bucket identifier.
272 *
273 * @ingroup setup
274 *
275 * This tells us what to do with a given group.
276 */
277typedef union {
278 /** The header broken down into bits. */
279 struct {
280 /** Whether we should balance on L4, if available */
281 unsigned int __balance_on_l4:1;
282 /** Whether we should balance on L3, if available */
283 unsigned int __balance_on_l3:1;
284 /** Whether we should balance on L2, if available */
285 unsigned int __balance_on_l2:1;
286 /** Reserved for future use */
287 unsigned int __reserved:1;
288 /** The base bucket to use to send traffic */
289 unsigned int __bucket_base:NETIO_LOG2_NUM_BUCKETS;
290 /** The mask to apply to the balancing value. This must be one less
291 * than a power of two, e.g. 0x3 or 0xFF.
292 */
293 unsigned int __bucket_mask:NETIO_LOG2_NUM_BUCKETS;
294 /** Pad to 32 bits */
295 unsigned int __padding:(32 - 4 - 2 * NETIO_LOG2_NUM_BUCKETS);
296 } bits;
297 /** To send out the IDN. */
298 unsigned int word;
299}
300netio_group_t;
301
302
303/**
304 * @brief A VLAN-to-bucket identifier.
305 *
306 * @ingroup setup
307 *
308 * This tells us what to do with a given VLAN.
309 */
310typedef netio_group_t netio_vlan_t;
311
312
313/**
314 * A bucket-to-queue mapping.
315 * @ingroup setup
316 */
317typedef unsigned char netio_bucket_t;
318
319
320/**
321 * A packet size can always fit in a netio_size_t.
322 * @ingroup setup
323 */
324typedef unsigned int netio_size_t;
325
326
327/**
328 * @brief Ethernet standard (ingress) packet metadata.
329 *
330 * @ingroup ingress
331 *
332 * This is additional data associated with each packet.
333 * This structure is opaque and accessed through the @ref ingress.
334 *
335 * Also, the buffer population operation currently assumes that standard
336 * metadata is at least as large as minimal metadata, and will need to be
337 * modified if that is no longer the case.
338 */
339typedef struct
340{
341#ifdef __DOXYGEN__
342 /** This structure is opaque. */
343 unsigned char opaque[24];
344#else
345 /** The overall ordinal of the packet */
346 unsigned int __packet_ordinal;
347 /** The ordinal of the packet within the group */
348 unsigned int __group_ordinal;
349 /** The best flow hash IPP could compute. */
350 unsigned int __flow_hash;
351 /** Flags pertaining to checksum calculation, packet type, etc. */
352 unsigned int __flags;
353 /** The first word of "user data". */
354 unsigned int __user_data_0;
355 /** The second word of "user data". */
356 unsigned int __user_data_1;
357#endif
358}
359netio_pkt_metadata_t;
360
361
362/** To ensure that the L3 header is aligned mod 4, the L2 header should be
363 * aligned mod 4 plus 2, since every supported L2 header is 4n + 2 bytes
364 * long. The standard way to do this is to simply add 2 bytes of padding
365 * before the L2 header.
366 */
367#define NETIO_PACKET_PADDING 2
368
369
370
371/**
372 * @brief Ethernet minimal (egress) packet metadata.
373 *
374 * @ingroup egress
375 *
376 * This structure represents information about packets which have
377 * been processed by @ref netio_populate_buffer() or
378 * @ref netio_populate_prepend_buffer(). This structure is opaque
379 * and accessed through the @ref egress.
380 *
381 * @internal This structure is actually copied into the memory used by
382 * standard metadata, which is assumed to be large enough.
383 */
384typedef struct
385{
386#ifdef __DOXYGEN__
387 /** This structure is opaque. */
388 unsigned char opaque[14];
389#else
390 /** The offset of the L2 header from the start of the packet data. */
391 unsigned short l2_offset;
392 /** The offset of the L3 header from the start of the packet data. */
393 unsigned short l3_offset;
394 /** Where to write the checksum. */
395 unsigned char csum_location;
396 /** Where to start checksumming from. */
397 unsigned char csum_start;
398 /** Flags pertaining to checksum calculation etc. */
399 unsigned short flags;
400 /** The L2 length of the packet. */
401 unsigned short l2_length;
402 /** The checksum with which to seed the checksum generator. */
403 unsigned short csum_seed;
404 /** How much to checksum. */
405 unsigned short csum_length;
406#endif
407}
408netio_pkt_minimal_metadata_t;
409
410
411#ifndef __DOXYGEN__
412
413/**
414 * @brief An I/O notification header.
415 *
416 * This is the first word of data received from an I/O shim in a notification
417 * packet. It contains framing and status information.
418 */
419typedef union
420{
421 unsigned int word; /**< The whole word. */
422 /** The various fields. */
423 struct
424 {
425 unsigned int __channel:7; /**< Resource channel. */
426 unsigned int __type:4; /**< Type. */
427 unsigned int __ack:1; /**< Whether an acknowledgement is needed. */
428 unsigned int __reserved:1; /**< Reserved. */
429 unsigned int __protocol:1; /**< A protocol-specific word is added. */
430 unsigned int __status:2; /**< Status of the transfer. */
431 unsigned int __framing:2; /**< Framing of the transfer. */
432 unsigned int __transfer_size:14; /**< Transfer size in bytes (total). */
433 } bits;
434}
435__netio_pkt_notif_t;
436
437
438/**
439 * Returns the base address of the packet.
440 */
441#define _NETIO_PKT_HANDLE_BASE(p) \
442 ((unsigned char*)((p).word & 0xFFFFFFC0))
443
444/**
445 * Returns the base address of the packet.
446 */
447#define _NETIO_PKT_BASE(p) \
448 _NETIO_PKT_HANDLE_BASE(p->__packet)
449
450/**
451 * @brief An I/O notification packet (second word)
452 *
453 * This is the second word of data received from an I/O shim in a notification
454 * packet. This is the virtual address of the packet buffer, plus some flag
455 * bits. (The virtual address of the packet is always 256-byte aligned so we
456 * have room for 8 bits' worth of flags in the low 8 bits.)
457 *
458 * @internal
459 * NOTE: The low two bits must contain "__queue", so the "packet size"
460 * (SIZE_SMALL, SIZE_LARGE, or SIZE_JUMBO) can be determined quickly.
461 *
462 * If __addr or __offset are moved, _NETIO_PKT_BASE
463 * (defined right below this) must be changed.
464 */
465typedef union
466{
467 unsigned int word; /**< The whole word. */
468 /** The various fields. */
469 struct
470 {
471 /** Which queue the packet will be returned to once it is sent back to
472 the IPP. This is one of the SIZE_xxx values. */
473 unsigned int __queue:2;
474
475 /** The IPP handle of the sending IPP. */
476 unsigned int __ipp_handle:2;
477
478 /** Reserved for future use. */
479 unsigned int __reserved:1;
480
481 /** If 1, this packet has minimal (egress) metadata; otherwise, it
482 has standard (ingress) metadata. */
483 unsigned int __minimal:1;
484
485 /** Offset of the metadata within the packet. This value is multiplied
486 * by 64 and added to the base packet address to get the metadata
487 * address. Note that this field is aligned within the word such that
488 * you can easily extract the metadata address with a 26-bit mask. */
489 unsigned int __offset:2;
490
491 /** The top 24 bits of the packet's virtual address. */
492 unsigned int __addr:24;
493 } bits;
494}
495__netio_pkt_handle_t;
496
497#endif /* !__DOXYGEN__ */
498
499
500/**
501 * @brief A handle for an I/O packet's storage.
502 * @ingroup ingress
503 *
504 * netio_pkt_handle_t encodes the concept of a ::netio_pkt_t with its
505 * packet metadata removed. It is a much smaller type that exists to
506 * facilitate applications where the full ::netio_pkt_t type is too
507 * large, such as those that cache enormous numbers of packets or wish
508 * to transmit packet descriptors over the UDN.
509 *
510 * Because there is no metadata, most ::netio_pkt_t operations cannot be
511 * performed on a netio_pkt_handle_t. It supports only
512 * netio_free_handle() (to free the buffer) and
513 * NETIO_PKT_CUSTOM_DATA_H() (to access a pointer to its contents).
514 * The application must acquire any additional metadata it wants from the
515 * original ::netio_pkt_t and record it separately.
516 *
517 * A netio_pkt_handle_t can be extracted from a ::netio_pkt_t by calling
518 * NETIO_PKT_HANDLE(). An invalid handle (analogous to NULL) can be
519 * created by assigning the value ::NETIO_PKT_HANDLE_NONE. A handle can
520 * be tested for validity with NETIO_PKT_HANDLE_IS_VALID().
521 */
522typedef struct
523{
524 unsigned int word; /**< Opaque bits. */
525} netio_pkt_handle_t;
526
527/**
528 * @brief A packet descriptor.
529 *
530 * @ingroup ingress
531 * @ingroup egress
532 *
533 * This data structure represents a packet. The structure is manipulated
534 * through the @ref ingress and the @ref egress.
535 *
536 * While the contents of a netio_pkt_t are opaque, the structure itself is
537 * portable. This means that it may be shared between all tiles which have
538 * done a netio_input_register() call for the interface on which the pkt_t
539 * was initially received (via netio_get_packet()) or retrieved (via
540 * netio_get_buffer()). The contents of a netio_pkt_t can be transmitted to
541 * another tile via shared memory, or via a UDN message, or by other means.
542 * The destination tile may then use the pkt_t as if it had originally been
543 * received locally; it may read or write the packet's data, read its
544 * metadata, free the packet, send the packet, transfer the netio_pkt_t to
545 * yet another tile, and so forth.
546 *
547 * Once a netio_pkt_t has been transferred to a second tile, the first tile
548 * should not reference the original copy; in particular, if more than one
549 * tile frees or sends the same netio_pkt_t, the IPP's packet free lists will
550 * become corrupted. Note also that each tile which reads or modifies
551 * packet data must obey the memory coherency rules outlined in @ref input.
552 */
553typedef struct
554{
555#ifdef __DOXYGEN__
556 /** This structure is opaque. */
557 unsigned char opaque[32];
558#else
559 /** For an ingress packet (one with standard metadata), this is the
560 * notification header we got from the I/O shim. For an egress packet
561 * (one with minimal metadata), this word is zero if the packet has not
562 * been populated, and nonzero if it has. */
563 __netio_pkt_notif_t __notif_header;
564
565 /** Virtual address of the packet buffer, plus state flags. */
566 __netio_pkt_handle_t __packet;
567
568 /** Metadata associated with the packet. */
569 netio_pkt_metadata_t __metadata;
570#endif
571}
572netio_pkt_t;
573
574
575#ifndef __DOXYGEN__
576
577#define __NETIO_PKT_NOTIF_HEADER(pkt) ((pkt)->__notif_header)
578#define __NETIO_PKT_IPP_HANDLE(pkt) ((pkt)->__packet.bits.__ipp_handle)
579#define __NETIO_PKT_QUEUE(pkt) ((pkt)->__packet.bits.__queue)
580#define __NETIO_PKT_NOTIF_HEADER_M(mda, pkt) ((pkt)->__notif_header)
581#define __NETIO_PKT_IPP_HANDLE_M(mda, pkt) ((pkt)->__packet.bits.__ipp_handle)
582#define __NETIO_PKT_MINIMAL(pkt) ((pkt)->__packet.bits.__minimal)
583#define __NETIO_PKT_QUEUE_M(mda, pkt) ((pkt)->__packet.bits.__queue)
584#define __NETIO_PKT_FLAGS_M(mda, pkt) ((mda)->__flags)
585
586/* Packet information table, used by the attribute access functions below. */
587extern const uint16_t _netio_pkt_info[];
588
589#endif /* __DOXYGEN__ */
590
591
592#ifndef __DOXYGEN__
593/* These macros are deprecated and will disappear in a future MDE release. */
594#define NETIO_PKT_GOOD_CHECKSUM(pkt) \
595 NETIO_PKT_L4_CSUM_CORRECT(pkt)
596#define NETIO_PKT_GOOD_CHECKSUM_M(mda, pkt) \
597 NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt)
598#endif /* __DOXYGEN__ */
599
600
601/* Packet attribute access functions. */
602
603/** Return a pointer to the metadata for a packet.
604 * @ingroup ingress
605 *
606 * Calling this function once and passing the result to other retrieval
607 * functions with a "_M" suffix usually improves performance. This
608 * function must be called on an 'ingress' packet (i.e. one retrieved
609 * by @ref netio_get_packet(), on which @ref netio_populate_buffer() or
610 * @ref netio_populate_prepend_buffer have not been called). Use of this
611 * function on an 'egress' packet will cause an assertion failure.
612 *
613 * @param[in] pkt Packet on which to operate.
614 * @return A pointer to the packet's standard metadata.
615 */
616static __inline netio_pkt_metadata_t*
617NETIO_PKT_METADATA(netio_pkt_t* pkt)
618{
619 netio_assert(!pkt->__packet.bits.__minimal);
620 return &pkt->__metadata;
621}
622
623
624/** Return a pointer to the minimal metadata for a packet.
625 * @ingroup egress
626 *
627 * Calling this function once and passing the result to other retrieval
628 * functions with a "_MM" suffix usually improves performance. This
629 * function must be called on an 'egress' packet (i.e. one on which
630 * @ref netio_populate_buffer() or @ref netio_populate_prepend_buffer()
631 * have been called, or one retrieved by @ref netio_get_buffer()). Use of
632 * this function on an 'ingress' packet will cause an assertion failure.
633 *
634 * @param[in] pkt Packet on which to operate.
635 * @return A pointer to the packet's standard metadata.
636 */
637static __inline netio_pkt_minimal_metadata_t*
638NETIO_PKT_MINIMAL_METADATA(netio_pkt_t* pkt)
639{
640 netio_assert(pkt->__packet.bits.__minimal);
641 return (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
642}
643
644
645/** Determine whether a packet has 'minimal' metadata.
646 * @ingroup pktfuncs
647 *
648 * This function will return nonzero if the packet is an 'egress'
649 * packet (i.e. one on which @ref netio_populate_buffer() or
650 * @ref netio_populate_prepend_buffer() have been called, or one
651 * retrieved by @ref netio_get_buffer()), and zero if the packet
652 * is an 'ingress' packet (i.e. one retrieved by @ref netio_get_packet(),
653 * which has not been converted into an 'egress' packet).
654 *
655 * @param[in] pkt Packet on which to operate.
656 * @return Nonzero if the packet has minimal metadata.
657 */
658static __inline unsigned int
659NETIO_PKT_IS_MINIMAL(netio_pkt_t* pkt)
660{
661 return pkt->__packet.bits.__minimal;
662}
663
664
665/** Return a handle for a packet's storage.
666 * @ingroup pktfuncs
667 *
668 * @param[in] pkt Packet on which to operate.
669 * @return A handle for the packet's storage.
670 */
671static __inline netio_pkt_handle_t
672NETIO_PKT_HANDLE(netio_pkt_t* pkt)
673{
674 netio_pkt_handle_t h;
675 h.word = pkt->__packet.word;
676 return h;
677}
678
679
680/** A special reserved value indicating the absence of a packet handle.
681 *
682 * @ingroup pktfuncs
683 */
684#define NETIO_PKT_HANDLE_NONE ((netio_pkt_handle_t) { 0 })
685
686
687/** Test whether a packet handle is valid.
688 *
689 * Applications may wish to use the reserved value NETIO_PKT_HANDLE_NONE
690 * to indicate no packet at all. This function tests to see if a packet
691 * handle is a real handle, not this special reserved value.
692 *
693 * @ingroup pktfuncs
694 *
695 * @param[in] handle Handle on which to operate.
696 * @return One if the packet handle is valid, else zero.
697 */
698static __inline unsigned int
699NETIO_PKT_HANDLE_IS_VALID(netio_pkt_handle_t handle)
700{
701 return handle.word != 0;
702}
703
704
705
706/** Return a pointer to the start of the packet's custom header.
707 * A custom header may or may not be present, depending upon the IPP; its
708 * contents and alignment are also IPP-dependent. Currently, none of the
709 * standard IPPs supplied by Tilera produce a custom header. If present,
710 * the custom header precedes the L2 header in the packet buffer.
711 * @ingroup ingress
712 *
713 * @param[in] handle Handle on which to operate.
714 * @return A pointer to start of the packet.
715 */
716static __inline unsigned char*
717NETIO_PKT_CUSTOM_DATA_H(netio_pkt_handle_t handle)
718{
719 return _NETIO_PKT_HANDLE_BASE(handle) + NETIO_PACKET_PADDING;
720}
721
722
723/** Return the length of the packet's custom header.
724 * A custom header may or may not be present, depending upon the IPP; its
725 * contents and alignment are also IPP-dependent. Currently, none of the
726 * standard IPPs supplied by Tilera produce a custom header. If present,
727 * the custom header precedes the L2 header in the packet buffer.
728 *
729 * @ingroup ingress
730 *
731 * @param[in] mda Pointer to packet's standard metadata.
732 * @param[in] pkt Packet on which to operate.
733 * @return The length of the packet's custom header, in bytes.
734 */
735static __inline netio_size_t
736NETIO_PKT_CUSTOM_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
737{
738 /*
739 * Note that we effectively need to extract a quantity from the flags word
740 * which is measured in words, and then turn it into bytes by shifting
741 * it left by 2. We do this all at once by just shifting right two less
742 * bits, and shifting the mask up two bits.
743 */
744 return ((mda->__flags >> (_NETIO_PKT_CUSTOM_LEN_SHIFT - 2)) &
745 (_NETIO_PKT_CUSTOM_LEN_RMASK << 2));
746}
747
748
749/** Return the length of the packet, starting with the custom header.
750 * A custom header may or may not be present, depending upon the IPP; its
751 * contents and alignment are also IPP-dependent. Currently, none of the
752 * standard IPPs supplied by Tilera produce a custom header. If present,
753 * the custom header precedes the L2 header in the packet buffer.
754 * @ingroup ingress
755 *
756 * @param[in] mda Pointer to packet's standard metadata.
757 * @param[in] pkt Packet on which to operate.
758 * @return The length of the packet, in bytes.
759 */
760static __inline netio_size_t
761NETIO_PKT_CUSTOM_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
762{
763 return (__NETIO_PKT_NOTIF_HEADER(pkt).bits.__transfer_size -
764 NETIO_PACKET_PADDING);
765}
766
767
768/** Return a pointer to the start of the packet's custom header.
769 * A custom header may or may not be present, depending upon the IPP; its
770 * contents and alignment are also IPP-dependent. Currently, none of the
771 * standard IPPs supplied by Tilera produce a custom header. If present,
772 * the custom header precedes the L2 header in the packet buffer.
773 * @ingroup ingress
774 *
775 * @param[in] mda Pointer to packet's standard metadata.
776 * @param[in] pkt Packet on which to operate.
777 * @return A pointer to start of the packet.
778 */
779static __inline unsigned char*
780NETIO_PKT_CUSTOM_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
781{
782 return NETIO_PKT_CUSTOM_DATA_H(NETIO_PKT_HANDLE(pkt));
783}
784
785
786/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
787 * @ingroup ingress
788 *
789 * @param[in] mda Pointer to packet's standard metadata.
790 * @param[in] pkt Packet on which to operate.
791 * @return The length of the packet's L2 header, in bytes.
792 */
793static __inline netio_size_t
794NETIO_PKT_L2_HEADER_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
795{
796 /*
797 * Note that we effectively need to extract a quantity from the flags word
798 * which is measured in words, and then turn it into bytes by shifting
799 * it left by 2. We do this all at once by just shifting right two less
800 * bits, and shifting the mask up two bits. We then add two bytes.
801 */
802 return ((mda->__flags >> (_NETIO_PKT_L2_LEN_SHIFT - 2)) &
803 (_NETIO_PKT_L2_LEN_RMASK << 2)) + 2;
804}
805
806
807/** Return the length of the packet, starting with the L2 (Ethernet) header.
808 * @ingroup ingress
809 *
810 * @param[in] mda Pointer to packet's standard metadata.
811 * @param[in] pkt Packet on which to operate.
812 * @return The length of the packet, in bytes.
813 */
814static __inline netio_size_t
815NETIO_PKT_L2_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
816{
817 return (NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt) -
818 NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda,pkt));
819}
820
821
822/** Return a pointer to the start of the packet's L2 (Ethernet) header.
823 * @ingroup ingress
824 *
825 * @param[in] mda Pointer to packet's standard metadata.
826 * @param[in] pkt Packet on which to operate.
827 * @return A pointer to start of the packet.
828 */
829static __inline unsigned char*
830NETIO_PKT_L2_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
831{
832 return (NETIO_PKT_CUSTOM_DATA_M(mda, pkt) +
833 NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt));
834}
835
836
837/** Retrieve the length of the packet, starting with the L3 (generally,
838 * the IP) header.
839 * @ingroup ingress
840 *
841 * @param[in] mda Pointer to packet's standard metadata.
842 * @param[in] pkt Packet on which to operate.
843 * @return Length of the packet's L3 header and data, in bytes.
844 */
845static __inline netio_size_t
846NETIO_PKT_L3_LENGTH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
847{
848 return (NETIO_PKT_L2_LENGTH_M(mda, pkt) -
849 NETIO_PKT_L2_HEADER_LENGTH_M(mda,pkt));
850}
851
852
853/** Return a pointer to the packet's L3 (generally, the IP) header.
854 * @ingroup ingress
855 *
856 * Note that we guarantee word alignment of the L3 header.
857 *
858 * @param[in] mda Pointer to packet's standard metadata.
859 * @param[in] pkt Packet on which to operate.
860 * @return A pointer to the packet's L3 header.
861 */
862static __inline unsigned char*
863NETIO_PKT_L3_DATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
864{
865 return (NETIO_PKT_L2_DATA_M(mda, pkt) +
866 NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt));
867}
868
869
870/** Return the ordinal of the packet.
871 * @ingroup ingress
872 *
873 * Each packet is given an ordinal number when it is delivered by the IPP.
874 * In the medium term, the ordinal is unique and monotonically increasing,
875 * being incremented by 1 for each packet; the ordinal of the first packet
876 * delivered after the IPP starts is zero. (Since the ordinal is of finite
877 * size, given enough input packets, it will eventually wrap around to zero;
878 * in the long term, therefore, ordinals are not unique.) The ordinals
879 * handed out by different IPPs are not disjoint, so two packets from
880 * different IPPs may have identical ordinals. Packets dropped by the
881 * IPP or by the I/O shim are not assigned ordinals.
882 *
883 * @param[in] mda Pointer to packet's standard metadata.
884 * @param[in] pkt Packet on which to operate.
885 * @return The packet's per-IPP packet ordinal.
886 */
887static __inline unsigned int
888NETIO_PKT_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
889{
890 return mda->__packet_ordinal;
891}
892
893
894/** Return the per-group ordinal of the packet.
895 * @ingroup ingress
896 *
897 * Each packet is given a per-group ordinal number when it is
898 * delivered by the IPP. By default, the group is the packet's VLAN,
899 * although IPP can be recompiled to use different values. In
900 * the medium term, the ordinal is unique and monotonically
901 * increasing, being incremented by 1 for each packet; the ordinal of
902 * the first packet distributed to a particular group is zero.
903 * (Since the ordinal is of finite size, given enough input packets,
904 * it will eventually wrap around to zero; in the long term,
905 * therefore, ordinals are not unique.) The ordinals handed out by
906 * different IPPs are not disjoint, so two packets from different IPPs
907 * may have identical ordinals; similarly, packets distributed to
908 * different groups may have identical ordinals. Packets dropped by
909 * the IPP or by the I/O shim are not assigned ordinals.
910 *
911 * @param[in] mda Pointer to packet's standard metadata.
912 * @param[in] pkt Packet on which to operate.
913 * @return The packet's per-IPP, per-group ordinal.
914 */
915static __inline unsigned int
916NETIO_PKT_GROUP_ORDINAL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
917{
918 return mda->__group_ordinal;
919}
920
921
922/** Return the VLAN ID assigned to the packet.
923 * @ingroup ingress
924 *
925 * This value is usually contained within the packet header.
926 *
927 * This value will be zero if the packet does not have a VLAN tag, or if
928 * this value was not extracted from the packet.
929 *
930 * @param[in] mda Pointer to packet's standard metadata.
931 * @param[in] pkt Packet on which to operate.
932 * @return The packet's VLAN ID.
933 */
934static __inline unsigned short
935NETIO_PKT_VLAN_ID_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
936{
937 int vl = (mda->__flags >> _NETIO_PKT_VLAN_SHIFT) & _NETIO_PKT_VLAN_RMASK;
938 unsigned short* pkt_p;
939 int index;
940 unsigned short val;
941
942 if (vl == _NETIO_PKT_VLAN_NONE)
943 return 0;
944
945 pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
946 index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
947
948 val = pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_VLAN_SHIFT) &
949 _NETIO_PKT_INFO_VLAN_RMASK];
950
951#ifdef __TILECC__
952 return (__insn_bytex(val) >> 16) & 0xFFF;
953#else
954 return (__builtin_bswap32(val) >> 16) & 0xFFF;
955#endif
956}
957
958
959/** Return the ethertype of the packet.
960 * @ingroup ingress
961 *
962 * This value is usually contained within the packet header.
963 *
964 * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED_M()
965 * returns true, and otherwise, may not be well defined.
966 *
967 * @param[in] mda Pointer to packet's standard metadata.
968 * @param[in] pkt Packet on which to operate.
969 * @return The packet's ethertype.
970 */
971static __inline unsigned short
972NETIO_PKT_ETHERTYPE_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
973{
974 unsigned short* pkt_p = (unsigned short*) NETIO_PKT_L2_DATA_M(mda, pkt);
975 int index = (mda->__flags >> _NETIO_PKT_TYPE_SHIFT) & _NETIO_PKT_TYPE_RMASK;
976
977 unsigned short val =
978 pkt_p[(_netio_pkt_info[index] >> _NETIO_PKT_INFO_ETYPE_SHIFT) &
979 _NETIO_PKT_INFO_ETYPE_RMASK];
980
981 return __builtin_bswap32(val) >> 16;
982}
983
984
985/** Return the flow hash computed on the packet.
986 * @ingroup ingress
987 *
988 * For TCP and UDP packets, this hash is calculated by hashing together
989 * the "5-tuple" values, specifically the source IP address, destination
990 * IP address, protocol type, source port and destination port.
991 * The hash value is intended to be helpful for millions of distinct
992 * flows.
993 *
994 * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
995 * derived by hashing together the source and destination IP addresses.
996 *
997 * For MPLS-encapsulated packets, the flow hash is derived by hashing
998 * the first MPLS label.
999 *
1000 * For all other packets the flow hash is computed from the source
1001 * and destination Ethernet addresses.
1002 *
1003 * The hash is symmetric, meaning it produces the same value if the
1004 * source and destination are swapped. The only exceptions are
1005 * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
1006 * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
1007 * (Encap Security Payload), which use only the destination address
1008 * since the source address is not meaningful.
1009 *
1010 * @param[in] mda Pointer to packet's standard metadata.
1011 * @param[in] pkt Packet on which to operate.
1012 * @return The packet's 32-bit flow hash.
1013 */
1014static __inline unsigned int
1015NETIO_PKT_FLOW_HASH_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1016{
1017 return mda->__flow_hash;
1018}
1019
1020
1021/** Return the first word of "user data" for the packet.
1022 *
1023 * The contents of the user data words depend on the IPP.
1024 *
1025 * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
1026 * word of user data contains the least significant bits of the 64-bit
1027 * arrival cycle count (see @c get_cycle_count_low()).
1028 *
1029 * See the <em>System Programmer's Guide</em> for details.
1030 *
1031 * @ingroup ingress
1032 *
1033 * @param[in] mda Pointer to packet's standard metadata.
1034 * @param[in] pkt Packet on which to operate.
1035 * @return The packet's first word of "user data".
1036 */
1037static __inline unsigned int
1038NETIO_PKT_USER_DATA_0_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1039{
1040 return mda->__user_data_0;
1041}
1042
1043
1044/** Return the second word of "user data" for the packet.
1045 *
1046 * The contents of the user data words depend on the IPP.
1047 *
1048 * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
1049 * word of user data contains the most significant bits of the 64-bit
1050 * arrival cycle count (see @c get_cycle_count_high()).
1051 *
1052 * See the <em>System Programmer's Guide</em> for details.
1053 *
1054 * @ingroup ingress
1055 *
1056 * @param[in] mda Pointer to packet's standard metadata.
1057 * @param[in] pkt Packet on which to operate.
1058 * @return The packet's second word of "user data".
1059 */
1060static __inline unsigned int
1061NETIO_PKT_USER_DATA_1_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1062{
1063 return mda->__user_data_1;
1064}
1065
1066
1067/** Determine whether the L4 (TCP/UDP) checksum was calculated.
1068 * @ingroup ingress
1069 *
1070 * @param[in] mda Pointer to packet's standard metadata.
1071 * @param[in] pkt Packet on which to operate.
1072 * @return Nonzero if the L4 checksum was calculated.
1073 */
1074static __inline unsigned int
1075NETIO_PKT_L4_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1076{
1077 return !(mda->__flags & _NETIO_PKT_NO_L4_CSUM_MASK);
1078}
1079
1080
1081/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
1082 * be correct.
1083 * @ingroup ingress
1084 *
1085 * @param[in] mda Pointer to packet's standard metadata.
1086 * @param[in] pkt Packet on which to operate.
1087 * @return Nonzero if the checksum was calculated and is correct.
1088 */
1089static __inline unsigned int
1090NETIO_PKT_L4_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1091{
1092 return !(mda->__flags &
1093 (_NETIO_PKT_BAD_L4_CSUM_MASK | _NETIO_PKT_NO_L4_CSUM_MASK));
1094}
1095
1096
1097/** Determine whether the L3 (IP) checksum was calculated.
1098 * @ingroup ingress
1099 *
1100 * @param[in] mda Pointer to packet's standard metadata.
1101 * @param[in] pkt Packet on which to operate.
1102 * @return Nonzero if the L3 (IP) checksum was calculated.
1103*/
1104static __inline unsigned int
1105NETIO_PKT_L3_CSUM_CALCULATED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1106{
1107 return !(mda->__flags & _NETIO_PKT_NO_L3_CSUM_MASK);
1108}
1109
1110
1111/** Determine whether the L3 (IP) checksum was calculated and found to be
1112 * correct.
1113 * @ingroup ingress
1114 *
1115 * @param[in] mda Pointer to packet's standard metadata.
1116 * @param[in] pkt Packet on which to operate.
1117 * @return Nonzero if the checksum was calculated and is correct.
1118 */
1119static __inline unsigned int
1120NETIO_PKT_L3_CSUM_CORRECT_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1121{
1122 return !(mda->__flags &
1123 (_NETIO_PKT_BAD_L3_CSUM_MASK | _NETIO_PKT_NO_L3_CSUM_MASK));
1124}
1125
1126
1127/** Determine whether the ethertype was recognized and L3 packet data was
1128 * processed.
1129 * @ingroup ingress
1130 *
1131 * @param[in] mda Pointer to packet's standard metadata.
1132 * @param[in] pkt Packet on which to operate.
1133 * @return Nonzero if the ethertype was recognized and L3 packet data was
1134 * processed.
1135 */
1136static __inline unsigned int
1137NETIO_PKT_ETHERTYPE_RECOGNIZED_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1138{
1139 return !(mda->__flags & _NETIO_PKT_TYPE_UNRECOGNIZED_MASK);
1140}
1141
1142
1143/** Retrieve the status of a packet and any errors that may have occurred
1144 * during ingress processing (length mismatches, CRC errors, etc.).
1145 * @ingroup ingress
1146 *
1147 * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
1148 * returns zero are always reported as underlength, as there is no a priori
1149 * means to determine their length. Normally, applications should use
1150 * @ref NETIO_PKT_BAD_M() instead of explicitly checking status with this
1151 * function.
1152 *
1153 * @param[in] mda Pointer to packet's standard metadata.
1154 * @param[in] pkt Packet on which to operate.
1155 * @return The packet's status.
1156 */
1157static __inline netio_pkt_status_t
1158NETIO_PKT_STATUS_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1159{
1160 return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
1161}
1162
1163
1164/** Report whether a packet is bad (i.e., was shorter than expected based on
1165 * its headers, or had a bad CRC).
1166 * @ingroup ingress
1167 *
1168 * Note that this function does not verify L3 or L4 checksums.
1169 *
1170 * @param[in] mda Pointer to packet's standard metadata.
1171 * @param[in] pkt Packet on which to operate.
1172 * @return Nonzero if the packet is bad and should be discarded.
1173 */
1174static __inline unsigned int
1175NETIO_PKT_BAD_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1176{
1177 return ((NETIO_PKT_STATUS_M(mda, pkt) & 1) &&
1178 (NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt) ||
1179 NETIO_PKT_STATUS_M(mda, pkt) == NETIO_PKT_STATUS_BAD));
1180}
1181
1182
1183/** Return the length of the packet, starting with the L2 (Ethernet) header.
1184 * @ingroup egress
1185 *
1186 * @param[in] mmd Pointer to packet's minimal metadata.
1187 * @param[in] pkt Packet on which to operate.
1188 * @return The length of the packet, in bytes.
1189 */
1190static __inline netio_size_t
1191NETIO_PKT_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
1192{
1193 return mmd->l2_length;
1194}
1195
1196
1197/** Return the length of the L2 (Ethernet) header.
1198 * @ingroup egress
1199 *
1200 * @param[in] mmd Pointer to packet's minimal metadata.
1201 * @param[in] pkt Packet on which to operate.
1202 * @return The length of the packet's L2 header, in bytes.
1203 */
1204static __inline netio_size_t
1205NETIO_PKT_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
1206 netio_pkt_t* pkt)
1207{
1208 return mmd->l3_offset - mmd->l2_offset;
1209}
1210
1211
1212/** Return the length of the packet, starting with the L3 (IP) header.
1213 * @ingroup egress
1214 *
1215 * @param[in] mmd Pointer to packet's minimal metadata.
1216 * @param[in] pkt Packet on which to operate.
1217 * @return Length of the packet's L3 header and data, in bytes.
1218 */
1219static __inline netio_size_t
1220NETIO_PKT_L3_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
1221{
1222 return (NETIO_PKT_L2_LENGTH_MM(mmd, pkt) -
1223 NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt));
1224}
1225
1226
1227/** Return a pointer to the packet's L3 (generally, the IP) header.
1228 * @ingroup egress
1229 *
1230 * Note that we guarantee word alignment of the L3 header.
1231 *
1232 * @param[in] mmd Pointer to packet's minimal metadata.
1233 * @param[in] pkt Packet on which to operate.
1234 * @return A pointer to the packet's L3 header.
1235 */
1236static __inline unsigned char*
1237NETIO_PKT_L3_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
1238{
1239 return _NETIO_PKT_BASE(pkt) + mmd->l3_offset;
1240}
1241
1242
1243/** Return a pointer to the packet's L2 (Ethernet) header.
1244 * @ingroup egress
1245 *
1246 * @param[in] mmd Pointer to packet's minimal metadata.
1247 * @param[in] pkt Packet on which to operate.
1248 * @return A pointer to start of the packet.
1249 */
1250static __inline unsigned char*
1251NETIO_PKT_L2_DATA_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
1252{
1253 return _NETIO_PKT_BASE(pkt) + mmd->l2_offset;
1254}
1255
1256
1257/** Retrieve the status of a packet and any errors that may have occurred
1258 * during ingress processing (length mismatches, CRC errors, etc.).
1259 * @ingroup ingress
1260 *
1261 * Note that packets for which @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
1262 * returns zero are always reported as underlength, as there is no a priori
1263 * means to determine their length. Normally, applications should use
1264 * @ref NETIO_PKT_BAD() instead of explicitly checking status with this
1265 * function.
1266 *
1267 * @param[in] pkt Packet on which to operate.
1268 * @return The packet's status.
1269 */
1270static __inline netio_pkt_status_t
1271NETIO_PKT_STATUS(netio_pkt_t* pkt)
1272{
1273 netio_assert(!pkt->__packet.bits.__minimal);
1274
1275 return (netio_pkt_status_t) __NETIO_PKT_NOTIF_HEADER(pkt).bits.__status;
1276}
1277
1278
1279/** Report whether a packet is bad (i.e., was shorter than expected based on
1280 * its headers, or had a bad CRC).
1281 * @ingroup ingress
1282 *
1283 * Note that this function does not verify L3 or L4 checksums.
1284 *
1285 * @param[in] pkt Packet on which to operate.
1286 * @return Nonzero if the packet is bad and should be discarded.
1287 */
1288static __inline unsigned int
1289NETIO_PKT_BAD(netio_pkt_t* pkt)
1290{
1291 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1292
1293 return NETIO_PKT_BAD_M(mda, pkt);
1294}
1295
1296
1297/** Return the length of the packet's custom header.
1298 * A custom header may or may not be present, depending upon the IPP; its
1299 * contents and alignment are also IPP-dependent. Currently, none of the
1300 * standard IPPs supplied by Tilera produce a custom header. If present,
1301 * the custom header precedes the L2 header in the packet buffer.
1302 * @ingroup pktfuncs
1303 *
1304 * @param[in] pkt Packet on which to operate.
1305 * @return The length of the packet's custom header, in bytes.
1306 */
1307static __inline netio_size_t
1308NETIO_PKT_CUSTOM_HEADER_LENGTH(netio_pkt_t* pkt)
1309{
1310 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1311
1312 return NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
1313}
1314
1315
1316/** Return the length of the packet, starting with the custom header.
1317 * A custom header may or may not be present, depending upon the IPP; its
1318 * contents and alignment are also IPP-dependent. Currently, none of the
1319 * standard IPPs supplied by Tilera produce a custom header. If present,
1320 * the custom header precedes the L2 header in the packet buffer.
1321 * @ingroup pktfuncs
1322 *
1323 * @param[in] pkt Packet on which to operate.
1324 * @return The length of the packet, in bytes.
1325 */
1326static __inline netio_size_t
1327NETIO_PKT_CUSTOM_LENGTH(netio_pkt_t* pkt)
1328{
1329 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1330
1331 return NETIO_PKT_CUSTOM_LENGTH_M(mda, pkt);
1332}
1333
1334
1335/** Return a pointer to the packet's custom header.
1336 * A custom header may or may not be present, depending upon the IPP; its
1337 * contents and alignment are also IPP-dependent. Currently, none of the
1338 * standard IPPs supplied by Tilera produce a custom header. If present,
1339 * the custom header precedes the L2 header in the packet buffer.
1340 * @ingroup pktfuncs
1341 *
1342 * @param[in] pkt Packet on which to operate.
1343 * @return A pointer to start of the packet.
1344 */
1345static __inline unsigned char*
1346NETIO_PKT_CUSTOM_DATA(netio_pkt_t* pkt)
1347{
1348 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1349
1350 return NETIO_PKT_CUSTOM_DATA_M(mda, pkt);
1351}
1352
1353
1354/** Return the length of the packet's L2 (Ethernet plus VLAN or SNAP) header.
1355 * @ingroup pktfuncs
1356 *
1357 * @param[in] pkt Packet on which to operate.
1358 * @return The length of the packet's L2 header, in bytes.
1359 */
1360static __inline netio_size_t
1361NETIO_PKT_L2_HEADER_LENGTH(netio_pkt_t* pkt)
1362{
1363 if (NETIO_PKT_IS_MINIMAL(pkt))
1364 {
1365 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1366
1367 return NETIO_PKT_L2_HEADER_LENGTH_MM(mmd, pkt);
1368 }
1369 else
1370 {
1371 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1372
1373 return NETIO_PKT_L2_HEADER_LENGTH_M(mda, pkt);
1374 }
1375}
1376
1377
1378/** Return the length of the packet, starting with the L2 (Ethernet) header.
1379 * @ingroup pktfuncs
1380 *
1381 * @param[in] pkt Packet on which to operate.
1382 * @return The length of the packet, in bytes.
1383 */
1384static __inline netio_size_t
1385NETIO_PKT_L2_LENGTH(netio_pkt_t* pkt)
1386{
1387 if (NETIO_PKT_IS_MINIMAL(pkt))
1388 {
1389 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1390
1391 return NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
1392 }
1393 else
1394 {
1395 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1396
1397 return NETIO_PKT_L2_LENGTH_M(mda, pkt);
1398 }
1399}
1400
1401
1402/** Return a pointer to the packet's L2 (Ethernet) header.
1403 * @ingroup pktfuncs
1404 *
1405 * @param[in] pkt Packet on which to operate.
1406 * @return A pointer to start of the packet.
1407 */
1408static __inline unsigned char*
1409NETIO_PKT_L2_DATA(netio_pkt_t* pkt)
1410{
1411 if (NETIO_PKT_IS_MINIMAL(pkt))
1412 {
1413 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1414
1415 return NETIO_PKT_L2_DATA_MM(mmd, pkt);
1416 }
1417 else
1418 {
1419 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1420
1421 return NETIO_PKT_L2_DATA_M(mda, pkt);
1422 }
1423}
1424
1425
1426/** Retrieve the length of the packet, starting with the L3 (generally, the IP)
1427 * header.
1428 * @ingroup pktfuncs
1429 *
1430 * @param[in] pkt Packet on which to operate.
1431 * @return Length of the packet's L3 header and data, in bytes.
1432 */
1433static __inline netio_size_t
1434NETIO_PKT_L3_LENGTH(netio_pkt_t* pkt)
1435{
1436 if (NETIO_PKT_IS_MINIMAL(pkt))
1437 {
1438 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1439
1440 return NETIO_PKT_L3_LENGTH_MM(mmd, pkt);
1441 }
1442 else
1443 {
1444 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1445
1446 return NETIO_PKT_L3_LENGTH_M(mda, pkt);
1447 }
1448}
1449
1450
1451/** Return a pointer to the packet's L3 (generally, the IP) header.
1452 * @ingroup pktfuncs
1453 *
1454 * Note that we guarantee word alignment of the L3 header.
1455 *
1456 * @param[in] pkt Packet on which to operate.
1457 * @return A pointer to the packet's L3 header.
1458 */
1459static __inline unsigned char*
1460NETIO_PKT_L3_DATA(netio_pkt_t* pkt)
1461{
1462 if (NETIO_PKT_IS_MINIMAL(pkt))
1463 {
1464 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1465
1466 return NETIO_PKT_L3_DATA_MM(mmd, pkt);
1467 }
1468 else
1469 {
1470 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1471
1472 return NETIO_PKT_L3_DATA_M(mda, pkt);
1473 }
1474}
1475
1476
1477/** Return the ordinal of the packet.
1478 * @ingroup ingress
1479 *
1480 * Each packet is given an ordinal number when it is delivered by the IPP.
1481 * In the medium term, the ordinal is unique and monotonically increasing,
1482 * being incremented by 1 for each packet; the ordinal of the first packet
1483 * delivered after the IPP starts is zero. (Since the ordinal is of finite
1484 * size, given enough input packets, it will eventually wrap around to zero;
1485 * in the long term, therefore, ordinals are not unique.) The ordinals
1486 * handed out by different IPPs are not disjoint, so two packets from
1487 * different IPPs may have identical ordinals. Packets dropped by the
1488 * IPP or by the I/O shim are not assigned ordinals.
1489 *
1490 *
1491 * @param[in] pkt Packet on which to operate.
1492 * @return The packet's per-IPP packet ordinal.
1493 */
1494static __inline unsigned int
1495NETIO_PKT_ORDINAL(netio_pkt_t* pkt)
1496{
1497 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1498
1499 return NETIO_PKT_ORDINAL_M(mda, pkt);
1500}
1501
1502
1503/** Return the per-group ordinal of the packet.
1504 * @ingroup ingress
1505 *
1506 * Each packet is given a per-group ordinal number when it is
1507 * delivered by the IPP. By default, the group is the packet's VLAN,
1508 * although IPP can be recompiled to use different values. In
1509 * the medium term, the ordinal is unique and monotonically
1510 * increasing, being incremented by 1 for each packet; the ordinal of
1511 * the first packet distributed to a particular group is zero.
1512 * (Since the ordinal is of finite size, given enough input packets,
1513 * it will eventually wrap around to zero; in the long term,
1514 * therefore, ordinals are not unique.) The ordinals handed out by
1515 * different IPPs are not disjoint, so two packets from different IPPs
1516 * may have identical ordinals; similarly, packets distributed to
1517 * different groups may have identical ordinals. Packets dropped by
1518 * the IPP or by the I/O shim are not assigned ordinals.
1519 *
1520 * @param[in] pkt Packet on which to operate.
1521 * @return The packet's per-IPP, per-group ordinal.
1522 */
1523static __inline unsigned int
1524NETIO_PKT_GROUP_ORDINAL(netio_pkt_t* pkt)
1525{
1526 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1527
1528 return NETIO_PKT_GROUP_ORDINAL_M(mda, pkt);
1529}
1530
1531
1532/** Return the VLAN ID assigned to the packet.
1533 * @ingroup ingress
1534 *
1535 * This is usually also contained within the packet header. If the packet
1536 * does not have a VLAN tag, the VLAN ID returned by this function is zero.
1537 *
1538 * @param[in] pkt Packet on which to operate.
1539 * @return The packet's VLAN ID.
1540 */
1541static __inline unsigned short
1542NETIO_PKT_VLAN_ID(netio_pkt_t* pkt)
1543{
1544 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1545
1546 return NETIO_PKT_VLAN_ID_M(mda, pkt);
1547}
1548
1549
1550/** Return the ethertype of the packet.
1551 * @ingroup ingress
1552 *
1553 * This value is reliable if @ref NETIO_PKT_ETHERTYPE_RECOGNIZED()
1554 * returns true, and otherwise, may not be well defined.
1555 *
1556 * @param[in] pkt Packet on which to operate.
1557 * @return The packet's ethertype.
1558 */
1559static __inline unsigned short
1560NETIO_PKT_ETHERTYPE(netio_pkt_t* pkt)
1561{
1562 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1563
1564 return NETIO_PKT_ETHERTYPE_M(mda, pkt);
1565}
1566
1567
1568/** Return the flow hash computed on the packet.
1569 * @ingroup ingress
1570 *
1571 * For TCP and UDP packets, this hash is calculated by hashing together
1572 * the "5-tuple" values, specifically the source IP address, destination
1573 * IP address, protocol type, source port and destination port.
1574 * The hash value is intended to be helpful for millions of distinct
1575 * flows.
1576 *
1577 * For IPv4 or IPv6 packets which are neither TCP nor UDP, the flow hash is
1578 * derived by hashing together the source and destination IP addresses.
1579 *
1580 * For MPLS-encapsulated packets, the flow hash is derived by hashing
1581 * the first MPLS label.
1582 *
1583 * For all other packets the flow hash is computed from the source
1584 * and destination Ethernet addresses.
1585 *
1586 * The hash is symmetric, meaning it produces the same value if the
1587 * source and destination are swapped. The only exceptions are
1588 * tunneling protocols 0x04 (IP in IP Encapsulation), 0x29 (Simple
1589 * Internet Protocol), 0x2F (General Routing Encapsulation) and 0x32
1590 * (Encap Security Payload), which use only the destination address
1591 * since the source address is not meaningful.
1592 *
1593 * @param[in] pkt Packet on which to operate.
1594 * @return The packet's 32-bit flow hash.
1595 */
1596static __inline unsigned int
1597NETIO_PKT_FLOW_HASH(netio_pkt_t* pkt)
1598{
1599 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1600
1601 return NETIO_PKT_FLOW_HASH_M(mda, pkt);
1602}
1603
1604
1605/** Return the first word of "user data" for the packet.
1606 *
1607 * The contents of the user data words depend on the IPP.
1608 *
1609 * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the first
1610 * word of user data contains the least significant bits of the 64-bit
1611 * arrival cycle count (see @c get_cycle_count_low()).
1612 *
1613 * See the <em>System Programmer's Guide</em> for details.
1614 *
1615 * @ingroup ingress
1616 *
1617 * @param[in] pkt Packet on which to operate.
1618 * @return The packet's first word of "user data".
1619 */
1620static __inline unsigned int
1621NETIO_PKT_USER_DATA_0(netio_pkt_t* pkt)
1622{
1623 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1624
1625 return NETIO_PKT_USER_DATA_0_M(mda, pkt);
1626}
1627
1628
1629/** Return the second word of "user data" for the packet.
1630 *
1631 * The contents of the user data words depend on the IPP.
1632 *
1633 * When using the standard ipp1, ipp2, or ipp4 sub-drivers, the second
1634 * word of user data contains the most significant bits of the 64-bit
1635 * arrival cycle count (see @c get_cycle_count_high()).
1636 *
1637 * See the <em>System Programmer's Guide</em> for details.
1638 *
1639 * @ingroup ingress
1640 *
1641 * @param[in] pkt Packet on which to operate.
1642 * @return The packet's second word of "user data".
1643 */
1644static __inline unsigned int
1645NETIO_PKT_USER_DATA_1(netio_pkt_t* pkt)
1646{
1647 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1648
1649 return NETIO_PKT_USER_DATA_1_M(mda, pkt);
1650}
1651
1652
1653/** Determine whether the L4 (TCP/UDP) checksum was calculated.
1654 * @ingroup ingress
1655 *
1656 * @param[in] pkt Packet on which to operate.
1657 * @return Nonzero if the L4 checksum was calculated.
1658 */
1659static __inline unsigned int
1660NETIO_PKT_L4_CSUM_CALCULATED(netio_pkt_t* pkt)
1661{
1662 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1663
1664 return NETIO_PKT_L4_CSUM_CALCULATED_M(mda, pkt);
1665}
1666
1667
1668/** Determine whether the L4 (TCP/UDP) checksum was calculated and found to
1669 * be correct.
1670 * @ingroup ingress
1671 *
1672 * @param[in] pkt Packet on which to operate.
1673 * @return Nonzero if the checksum was calculated and is correct.
1674 */
1675static __inline unsigned int
1676NETIO_PKT_L4_CSUM_CORRECT(netio_pkt_t* pkt)
1677{
1678 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1679
1680 return NETIO_PKT_L4_CSUM_CORRECT_M(mda, pkt);
1681}
1682
1683
1684/** Determine whether the L3 (IP) checksum was calculated.
1685 * @ingroup ingress
1686 *
1687 * @param[in] pkt Packet on which to operate.
1688 * @return Nonzero if the L3 (IP) checksum was calculated.
1689*/
1690static __inline unsigned int
1691NETIO_PKT_L3_CSUM_CALCULATED(netio_pkt_t* pkt)
1692{
1693 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1694
1695 return NETIO_PKT_L3_CSUM_CALCULATED_M(mda, pkt);
1696}
1697
1698
1699/** Determine whether the L3 (IP) checksum was calculated and found to be
1700 * correct.
1701 * @ingroup ingress
1702 *
1703 * @param[in] pkt Packet on which to operate.
1704 * @return Nonzero if the checksum was calculated and is correct.
1705 */
1706static __inline unsigned int
1707NETIO_PKT_L3_CSUM_CORRECT(netio_pkt_t* pkt)
1708{
1709 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1710
1711 return NETIO_PKT_L3_CSUM_CORRECT_M(mda, pkt);
1712}
1713
1714
1715/** Determine whether the Ethertype was recognized and L3 packet data was
1716 * processed.
1717 * @ingroup ingress
1718 *
1719 * @param[in] pkt Packet on which to operate.
1720 * @return Nonzero if the Ethertype was recognized and L3 packet data was
1721 * processed.
1722 */
1723static __inline unsigned int
1724NETIO_PKT_ETHERTYPE_RECOGNIZED(netio_pkt_t* pkt)
1725{
1726 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1727
1728 return NETIO_PKT_ETHERTYPE_RECOGNIZED_M(mda, pkt);
1729}
1730
1731
1732/** Set an egress packet's L2 length, using a metadata pointer to speed the
1733 * computation.
1734 * @ingroup egress
1735 *
1736 * @param[in,out] mmd Pointer to packet's minimal metadata.
1737 * @param[in] pkt Packet on which to operate.
1738 * @param[in] len Packet L2 length, in bytes.
1739 */
1740static __inline void
1741NETIO_PKT_SET_L2_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt,
1742 int len)
1743{
1744 mmd->l2_length = len;
1745}
1746
1747
1748/** Set an egress packet's L2 length.
1749 * @ingroup egress
1750 *
1751 * @param[in,out] pkt Packet on which to operate.
1752 * @param[in] len Packet L2 length, in bytes.
1753 */
1754static __inline void
1755NETIO_PKT_SET_L2_LENGTH(netio_pkt_t* pkt, int len)
1756{
1757 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1758
1759 NETIO_PKT_SET_L2_LENGTH_MM(mmd, pkt, len);
1760}
1761
1762
1763/** Set an egress packet's L2 header length, using a metadata pointer to
1764 * speed the computation.
1765 * @ingroup egress
1766 *
1767 * It is not normally necessary to call this routine; only the L2 length,
1768 * not the header length, is needed to transmit a packet. It may be useful if
1769 * the egress packet will later be processed by code which expects to use
1770 * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
1771 *
1772 * @param[in,out] mmd Pointer to packet's minimal metadata.
1773 * @param[in] pkt Packet on which to operate.
1774 * @param[in] len Packet L2 header length, in bytes.
1775 */
1776static __inline void
1777NETIO_PKT_SET_L2_HEADER_LENGTH_MM(netio_pkt_minimal_metadata_t* mmd,
1778 netio_pkt_t* pkt, int len)
1779{
1780 mmd->l3_offset = mmd->l2_offset + len;
1781}
1782
1783
1784/** Set an egress packet's L2 header length.
1785 * @ingroup egress
1786 *
1787 * It is not normally necessary to call this routine; only the L2 length,
1788 * not the header length, is needed to transmit a packet. It may be useful if
1789 * the egress packet will later be processed by code which expects to use
1790 * functions like @ref NETIO_PKT_L3_DATA() to get a pointer to the L3 payload.
1791 *
1792 * @param[in,out] pkt Packet on which to operate.
1793 * @param[in] len Packet L2 header length, in bytes.
1794 */
1795static __inline void
1796NETIO_PKT_SET_L2_HEADER_LENGTH(netio_pkt_t* pkt, int len)
1797{
1798 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1799
1800 NETIO_PKT_SET_L2_HEADER_LENGTH_MM(mmd, pkt, len);
1801}
1802
1803
1804/** Set up an egress packet for hardware checksum computation, using a
1805 * metadata pointer to speed the operation.
1806 * @ingroup egress
1807 *
1808 * NetIO provides the ability to automatically calculate a standard
1809 * 16-bit Internet checksum on transmitted packets. The application
1810 * may specify the point in the packet where the checksum starts, the
1811 * number of bytes to be checksummed, and the two bytes in the packet
1812 * which will be replaced with the completed checksum. (If the range
1813 * of bytes to be checksummed includes the bytes to be replaced, the
1814 * initial values of those bytes will be included in the checksum.)
1815 *
1816 * For some protocols, the packet checksum covers data which is not present
1817 * in the packet, or is at least not contiguous to the main data payload.
1818 * For instance, the TCP checksum includes a "pseudo-header" which includes
1819 * the source and destination IP addresses of the packet. To accommodate
1820 * this, the checksum engine may be "seeded" with an initial value, which
1821 * the application would need to compute based on the specific protocol's
1822 * requirements. Note that the seed is given in host byte order (little-
1823 * endian), not network byte order (big-endian); code written to compute a
1824 * pseudo-header checksum in network byte order will need to byte-swap it
1825 * before use as the seed.
1826 *
1827 * Note that the checksum is computed as part of the transmission process,
1828 * so it will not be present in the packet upon completion of this routine.
1829 *
1830 * @param[in,out] mmd Pointer to packet's minimal metadata.
1831 * @param[in] pkt Packet on which to operate.
1832 * @param[in] start Offset within L2 packet of the first byte to include in
1833 * the checksum.
1834 * @param[in] length Number of bytes to include in the checksum.
1835 * the checksum.
1836 * @param[in] location Offset within L2 packet of the first of the two bytes
1837 * to be replaced with the calculated checksum.
1838 * @param[in] seed Initial value of the running checksum before any of the
1839 * packet data is added.
1840 */
1841static __inline void
1842NETIO_PKT_DO_EGRESS_CSUM_MM(netio_pkt_minimal_metadata_t* mmd,
1843 netio_pkt_t* pkt, int start, int length,
1844 int location, uint16_t seed)
1845{
1846 mmd->csum_start = start;
1847 mmd->csum_length = length;
1848 mmd->csum_location = location;
1849 mmd->csum_seed = seed;
1850 mmd->flags |= _NETIO_PKT_NEED_EDMA_CSUM_MASK;
1851}
1852
1853
1854/** Set up an egress packet for hardware checksum computation.
1855 * @ingroup egress
1856 *
1857 * NetIO provides the ability to automatically calculate a standard
1858 * 16-bit Internet checksum on transmitted packets. The application
1859 * may specify the point in the packet where the checksum starts, the
1860 * number of bytes to be checksummed, and the two bytes in the packet
1861 * which will be replaced with the completed checksum. (If the range
1862 * of bytes to be checksummed includes the bytes to be replaced, the
1863 * initial values of those bytes will be included in the checksum.)
1864 *
1865 * For some protocols, the packet checksum covers data which is not present
1866 * in the packet, or is at least not contiguous to the main data payload.
1867 * For instance, the TCP checksum includes a "pseudo-header" which includes
1868 * the source and destination IP addresses of the packet. To accommodate
1869 * this, the checksum engine may be "seeded" with an initial value, which
1870 * the application would need to compute based on the specific protocol's
1871 * requirements. Note that the seed is given in host byte order (little-
1872 * endian), not network byte order (big-endian); code written to compute a
1873 * pseudo-header checksum in network byte order will need to byte-swap it
1874 * before use as the seed.
1875 *
1876 * Note that the checksum is computed as part of the transmission process,
1877 * so it will not be present in the packet upon completion of this routine.
1878 *
1879 * @param[in,out] pkt Packet on which to operate.
1880 * @param[in] start Offset within L2 packet of the first byte to include in
1881 * the checksum.
1882 * @param[in] length Number of bytes to include in the checksum.
1883 * the checksum.
1884 * @param[in] location Offset within L2 packet of the first of the two bytes
1885 * to be replaced with the calculated checksum.
1886 * @param[in] seed Initial value of the running checksum before any of the
1887 * packet data is added.
1888 */
1889static __inline void
1890NETIO_PKT_DO_EGRESS_CSUM(netio_pkt_t* pkt, int start, int length,
1891 int location, uint16_t seed)
1892{
1893 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1894
1895 NETIO_PKT_DO_EGRESS_CSUM_MM(mmd, pkt, start, length, location, seed);
1896}
1897
1898
1899/** Return the number of bytes which could be prepended to a packet, using a
1900 * metadata pointer to speed the operation.
1901 * See @ref netio_populate_prepend_buffer() to get a full description of
1902 * prepending.
1903 *
1904 * @param[in,out] mda Pointer to packet's standard metadata.
1905 * @param[in] pkt Packet on which to operate.
1906 */
1907static __inline int
1908NETIO_PKT_PREPEND_AVAIL_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
1909{
1910 return (pkt->__packet.bits.__offset << 6) +
1911 NETIO_PKT_CUSTOM_HEADER_LENGTH_M(mda, pkt);
1912}
1913
1914
1915/** Return the number of bytes which could be prepended to a packet, using a
1916 * metadata pointer to speed the operation.
1917 * See @ref netio_populate_prepend_buffer() to get a full description of
1918 * prepending.
1919 * @ingroup egress
1920 *
1921 * @param[in,out] mmd Pointer to packet's minimal metadata.
1922 * @param[in] pkt Packet on which to operate.
1923 */
1924static __inline int
1925NETIO_PKT_PREPEND_AVAIL_MM(netio_pkt_minimal_metadata_t* mmd, netio_pkt_t* pkt)
1926{
1927 return (pkt->__packet.bits.__offset << 6) + mmd->l2_offset;
1928}
1929
1930
1931/** Return the number of bytes which could be prepended to a packet.
1932 * See @ref netio_populate_prepend_buffer() to get a full description of
1933 * prepending.
1934 * @ingroup egress
1935 *
1936 * @param[in] pkt Packet on which to operate.
1937 */
1938static __inline int
1939NETIO_PKT_PREPEND_AVAIL(netio_pkt_t* pkt)
1940{
1941 if (NETIO_PKT_IS_MINIMAL(pkt))
1942 {
1943 netio_pkt_minimal_metadata_t* mmd = NETIO_PKT_MINIMAL_METADATA(pkt);
1944
1945 return NETIO_PKT_PREPEND_AVAIL_MM(mmd, pkt);
1946 }
1947 else
1948 {
1949 netio_pkt_metadata_t* mda = NETIO_PKT_METADATA(pkt);
1950
1951 return NETIO_PKT_PREPEND_AVAIL_M(mda, pkt);
1952 }
1953}
1954
1955
1956/** Flush a packet's minimal metadata from the cache, using a metadata pointer
1957 * to speed the operation.
1958 * @ingroup egress
1959 *
1960 * @param[in] mmd Pointer to packet's minimal metadata.
1961 * @param[in] pkt Packet on which to operate.
1962 */
1963static __inline void
1964NETIO_PKT_FLUSH_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
1965 netio_pkt_t* pkt)
1966{
1967}
1968
1969
1970/** Invalidate a packet's minimal metadata from the cache, using a metadata
1971 * pointer to speed the operation.
1972 * @ingroup egress
1973 *
1974 * @param[in] mmd Pointer to packet's minimal metadata.
1975 * @param[in] pkt Packet on which to operate.
1976 */
1977static __inline void
1978NETIO_PKT_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
1979 netio_pkt_t* pkt)
1980{
1981}
1982
1983
1984/** Flush and then invalidate a packet's minimal metadata from the cache,
1985 * using a metadata pointer to speed the operation.
1986 * @ingroup egress
1987 *
1988 * @param[in] mmd Pointer to packet's minimal metadata.
1989 * @param[in] pkt Packet on which to operate.
1990 */
1991static __inline void
1992NETIO_PKT_FLUSH_INV_MINIMAL_METADATA_MM(netio_pkt_minimal_metadata_t* mmd,
1993 netio_pkt_t* pkt)
1994{
1995}
1996
1997
1998/** Flush a packet's metadata from the cache, using a metadata pointer
1999 * to speed the operation.
2000 * @ingroup ingress
2001 *
2002 * @param[in] mda Pointer to packet's minimal metadata.
2003 * @param[in] pkt Packet on which to operate.
2004 */
2005static __inline void
2006NETIO_PKT_FLUSH_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
2007{
2008}
2009
2010
2011/** Invalidate a packet's metadata from the cache, using a metadata
2012 * pointer to speed the operation.
2013 * @ingroup ingress
2014 *
2015 * @param[in] mda Pointer to packet's metadata.
2016 * @param[in] pkt Packet on which to operate.
2017 */
2018static __inline void
2019NETIO_PKT_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
2020{
2021}
2022
2023
2024/** Flush and then invalidate a packet's metadata from the cache,
2025 * using a metadata pointer to speed the operation.
2026 * @ingroup ingress
2027 *
2028 * @param[in] mda Pointer to packet's metadata.
2029 * @param[in] pkt Packet on which to operate.
2030 */
2031static __inline void
2032NETIO_PKT_FLUSH_INV_METADATA_M(netio_pkt_metadata_t* mda, netio_pkt_t* pkt)
2033{
2034}
2035
2036
2037/** Flush a packet's minimal metadata from the cache.
2038 * @ingroup egress
2039 *
2040 * @param[in] pkt Packet on which to operate.
2041 */
2042static __inline void
2043NETIO_PKT_FLUSH_MINIMAL_METADATA(netio_pkt_t* pkt)
2044{
2045}
2046
2047
2048/** Invalidate a packet's minimal metadata from the cache.
2049 * @ingroup egress
2050 *
2051 * @param[in] pkt Packet on which to operate.
2052 */
2053static __inline void
2054NETIO_PKT_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
2055{
2056}
2057
2058
2059/** Flush and then invalidate a packet's minimal metadata from the cache.
2060 * @ingroup egress
2061 *
2062 * @param[in] pkt Packet on which to operate.
2063 */
2064static __inline void
2065NETIO_PKT_FLUSH_INV_MINIMAL_METADATA(netio_pkt_t* pkt)
2066{
2067}
2068
2069
2070/** Flush a packet's metadata from the cache.
2071 * @ingroup ingress
2072 *
2073 * @param[in] pkt Packet on which to operate.
2074 */
2075static __inline void
2076NETIO_PKT_FLUSH_METADATA(netio_pkt_t* pkt)
2077{
2078}
2079
2080
2081/** Invalidate a packet's metadata from the cache.
2082 * @ingroup ingress
2083 *
2084 * @param[in] pkt Packet on which to operate.
2085 */
2086static __inline void
2087NETIO_PKT_INV_METADATA(netio_pkt_t* pkt)
2088{
2089}
2090
2091
2092/** Flush and then invalidate a packet's metadata from the cache.
2093 * @ingroup ingress
2094 *
2095 * @param[in] pkt Packet on which to operate.
2096 */
2097static __inline void
2098NETIO_PKT_FLUSH_INV_METADATA(netio_pkt_t* pkt)
2099{
2100}
2101
2102/** Number of NUMA nodes we can distribute buffers to.
2103 * @ingroup setup */
2104#define NETIO_NUM_NODE_WEIGHTS 16
2105
2106/**
2107 * @brief An object for specifying the characteristics of NetIO communication
2108 * endpoint.
2109 *
2110 * @ingroup setup
2111 *
2112 * The @ref netio_input_register() function uses this structure to define
2113 * how an application tile will communicate with an IPP.
2114 *
2115 *
2116 * Future updates to NetIO may add new members to this structure,
2117 * which can affect the success of the registration operation. Thus,
2118 * if dynamically initializing the structure, applications are urged to
2119 * zero it out first, for example:
2120 *
2121 * @code
2122 * netio_input_config_t config;
2123 * memset(&config, 0, sizeof (config));
2124 * config.flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE;
2125 * config.num_receive_packets = NETIO_MAX_RECEIVE_PKTS;
2126 * config.queue_id = 0;
2127 * .
2128 * .
2129 * .
2130 * @endcode
2131 *
2132 * since that guarantees that any unused structure members, including
2133 * members which did not exist when the application was first developed,
2134 * will not have unexpected values.
2135 *
2136 * If statically initializing the structure, we strongly recommend use of
2137 * C99-style named initializers, for example:
2138 *
2139 * @code
2140 * netio_input_config_t config = {
2141 * .flags = NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE,
2142 * .num_receive_packets = NETIO_MAX_RECEIVE_PKTS,
2143 * .queue_id = 0,
2144 * },
2145 * @endcode
2146 *
2147 * instead of the old-style structure initialization:
2148 *
2149 * @code
2150 * // Bad example! Currently equivalent to the above, but don't do this.
2151 * netio_input_config_t config = {
2152 * NETIO_RECV | NETIO_XMIT_CSUM | NETIO_TAG_NONE, NETIO_MAX_RECEIVE_PKTS, 0
2153 * },
2154 * @endcode
2155 *
2156 * since the C99 style requires no changes to the code if elements of the
2157 * config structure are rearranged. (It also makes the initialization much
2158 * easier to understand.)
2159 *
2160 * Except for items which address a particular tile's transmit or receive
2161 * characteristics, such as the ::NETIO_RECV flag, applications are advised
2162 * to specify the same set of configuration data on all registrations.
2163 * This prevents differing results if multiple tiles happen to do their
2164 * registration operations in a different order on different invocations of
2165 * the application. This is particularly important for things like link
2166 * management flags, and buffer size and homing specifications.
2167 *
2168 * Unless the ::NETIO_FIXED_BUFFER_VA flag is specified in flags, the NetIO
2169 * buffer pool is automatically created and mapped into the application's
2170 * virtual address space at an address chosen by the operating system,
2171 * using the common memory (cmem) facility in the Tilera Multicore
2172 * Components library. The cmem facility allows multiple processes to gain
2173 * access to shared memory which is mapped into each process at an
2174 * identical virtual address. In order for this to work, the processes
2175 * must have a common ancestor, which must create the common memory using
2176 * tmc_cmem_init().
2177 *
2178 * In programs using the iLib process creation API, or in programs which use
2179 * only one process (which include programs using the pthreads library),
2180 * tmc_cmem_init() is called automatically. All other applications
2181 * must call it explicitly, before any child processes which might call
2182 * netio_input_register() are created.
2183 */
2184typedef struct
2185{
2186 /** Registration characteristics.
2187
2188 This value determines several characteristics of the registration;
2189 flags for different types of behavior are ORed together to make the
2190 final flag value. Generally applications should specify exactly
2191 one flag from each of the following categories:
2192
2193 - Whether the application will be receiving packets on this queue
2194 (::NETIO_RECV or ::NETIO_NO_RECV).
2195
2196 - Whether the application will be transmitting packets on this queue,
2197 and if so, whether it will request egress checksum calculation
2198 (::NETIO_XMIT, ::NETIO_XMIT_CSUM, or ::NETIO_NO_XMIT). It is
2199 legal to call netio_get_buffer() without one of the XMIT flags,
2200 as long as ::NETIO_RECV is specified; in this case, the retrieved
2201 buffers must be passed to another tile for transmission.
2202
2203 - Whether the application expects any vendor-specific tags in
2204 its packets' L2 headers (::NETIO_TAG_NONE, ::NETIO_TAG_BRCM,
2205 or ::NETIO_TAG_MRVL). This must match the configuration of the
2206 target IPP.
2207
2208 To accommodate applications written to previous versions of the NetIO
2209 interface, none of the flags above are currently required; if omitted,
2210 NetIO behaves more or less as if ::NETIO_RECV | ::NETIO_XMIT_CSUM |
2211 ::NETIO_TAG_NONE were used. However, explicit specification of
2212 the relevant flags allows NetIO to do a better job of resource
2213 allocation, allows earlier detection of certain configuration errors,
2214 and may enable advanced features or higher performance in the future,
2215 so their use is strongly recommended.
2216
2217 Note that specifying ::NETIO_NO_RECV along with ::NETIO_NO_XMIT
2218 is a special case, intended primarily for use by programs which
2219 retrieve network statistics or do link management operations.
2220 When these flags are both specified, the resulting queue may not
2221 be used with NetIO routines other than netio_get(), netio_set(),
2222 and netio_input_unregister(). See @ref link for more information
2223 on link management.
2224
2225 Other flags are optional; their use is described below.
2226 */
2227 int flags;
2228
2229 /** Interface name. This is a string which identifies the specific
2230 Ethernet controller hardware to be used. The format of the string
2231 is a device type and a device index, separated by a slash; so,
2232 the first 10 Gigabit Ethernet controller is named "xgbe/0", while
2233 the second 10/100/1000 Megabit Ethernet controller is named "gbe/1".
2234 */
2235 const char* interface;
2236
2237 /** Receive packet queue size. This specifies the maximum number
2238 of ingress packets that can be received on this queue without
2239 being retrieved by @ref netio_get_packet(). If the IPP's distribution
2240 algorithm calls for a packet to be sent to this queue, and this
2241 number of packets are already pending there, the new packet
2242 will either be discarded, or sent to another tile registered
2243 for the same queue_id (see @ref drops). This value must
2244 be at least ::NETIO_MIN_RECEIVE_PKTS, can always be at least
2245 ::NETIO_MAX_RECEIVE_PKTS, and may be larger than that on certain
2246 interfaces.
2247 */
2248 int num_receive_packets;
2249
2250 /** The queue ID being requested. Legal values for this range from 0
2251 to ::NETIO_MAX_QUEUE_ID, inclusive. ::NETIO_MAX_QUEUE_ID is always
2252 greater than or equal to the number of tiles; this allows one queue
2253 for each tile, plus at least one additional queue. Some applications
2254 may wish to use the additional queue as a destination for unwanted
2255 packets, since packets delivered to queues for which no tiles have
2256 registered are discarded.
2257 */
2258 unsigned int queue_id;
2259
2260 /** Maximum number of small send buffers to be held in the local empty
2261 buffer cache. This specifies the size of the area which holds
2262 empty small egress buffers requested from the IPP but not yet
2263 retrieved via @ref netio_get_buffer(). This value must be greater
2264 than zero if the application will ever use @ref netio_get_buffer()
2265 to allocate empty small egress buffers; it may be no larger than
2266 ::NETIO_MAX_SEND_BUFFERS. See @ref epp for more details on empty
2267 buffer caching.
2268 */
2269 int num_send_buffers_small_total;
2270
2271 /** Number of small send buffers to be preallocated at registration.
2272 If this value is nonzero, the specified number of empty small egress
2273 buffers will be requested from the IPP during the netio_input_register
2274 operation; this may speed the execution of @ref netio_get_buffer().
2275 This may be no larger than @ref num_send_buffers_small_total. See @ref
2276 epp for more details on empty buffer caching.
2277 */
2278 int num_send_buffers_small_prealloc;
2279
2280 /** Maximum number of large send buffers to be held in the local empty
2281 buffer cache. This specifies the size of the area which holds empty
2282 large egress buffers requested from the IPP but not yet retrieved via
2283 @ref netio_get_buffer(). This value must be greater than zero if the
2284 application will ever use @ref netio_get_buffer() to allocate empty
2285 large egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
2286 See @ref epp for more details on empty buffer caching.
2287 */
2288 int num_send_buffers_large_total;
2289
2290 /** Number of large send buffers to be preallocated at registration.
2291 If this value is nonzero, the specified number of empty large egress
2292 buffers will be requested from the IPP during the netio_input_register
2293 operation; this may speed the execution of @ref netio_get_buffer().
2294 This may be no larger than @ref num_send_buffers_large_total. See @ref
2295 epp for more details on empty buffer caching.
2296 */
2297 int num_send_buffers_large_prealloc;
2298
2299 /** Maximum number of jumbo send buffers to be held in the local empty
2300 buffer cache. This specifies the size of the area which holds empty
2301 jumbo egress buffers requested from the IPP but not yet retrieved via
2302 @ref netio_get_buffer(). This value must be greater than zero if the
2303 application will ever use @ref netio_get_buffer() to allocate empty
2304 jumbo egress buffers; it may be no larger than ::NETIO_MAX_SEND_BUFFERS.
2305 See @ref epp for more details on empty buffer caching.
2306 */
2307 int num_send_buffers_jumbo_total;
2308
2309 /** Number of jumbo send buffers to be preallocated at registration.
2310 If this value is nonzero, the specified number of empty jumbo egress
2311 buffers will be requested from the IPP during the netio_input_register
2312 operation; this may speed the execution of @ref netio_get_buffer().
2313 This may be no larger than @ref num_send_buffers_jumbo_total. See @ref
2314 epp for more details on empty buffer caching.
2315 */
2316 int num_send_buffers_jumbo_prealloc;
2317
2318 /** Total packet buffer size. This determines the total size, in bytes,
2319 of the NetIO buffer pool. Note that the maximum number of available
2320 buffers of each size is determined during hypervisor configuration
2321 (see the <em>System Programmer's Guide</em> for details); this just
2322 influences how much host memory is allocated for those buffers.
2323
2324 The buffer pool is allocated from common memory, which will be
2325 automatically initialized if needed. If your buffer pool is larger
2326 than 240 MB, you might need to explicitly call @c tmc_cmem_init(),
2327 as described in the Application Libraries Reference Manual (UG227).
2328
2329 Packet buffers are currently allocated in chunks of 16 MB; this
2330 value will be rounded up to the next larger multiple of 16 MB.
2331 If this value is zero, a default of 32 MB will be used; this was
2332 the value used by previous versions of NetIO. Note that taking this
2333 default also affects the placement of buffers on Linux NUMA nodes.
2334 See @ref buffer_node_weights for an explanation of buffer placement.
2335
2336 In order to successfully allocate packet buffers, Linux must have
2337 available huge pages on the relevant Linux NUMA nodes. See the
2338 <em>System Programmer's Guide</em> for information on configuring
2339 huge page support in Linux.
2340 */
2341 uint64_t total_buffer_size;
2342
2343 /** Buffer placement weighting factors.
2344
2345 This array specifies the relative amount of buffering to place
2346 on each of the available Linux NUMA nodes. This array is
2347 indexed by the NUMA node, and the values in the array are
2348 proportional to the amount of buffer space to allocate on that
2349 node.
2350
2351 If memory striping is enabled in the Hypervisor, then there is
2352 only one logical NUMA node (node 0). In that case, NetIO will by
2353 default ignore the suggested buffer node weights, and buffers
2354 will be striped across the physical memory controllers. See
2355 UG209 System Programmer's Guide for a description of the
2356 hypervisor option that controls memory striping.
2357
2358 If memory striping is disabled, then there are up to four NUMA
2359 nodes, corresponding to the four DDRAM controllers in the TILE
2360 processor architecture. See UG100 Tile Processor Architecture
2361 Overview for a diagram showing the location of each of the DDRAM
2362 controllers relative to the tile array.
2363
2364 For instance, if memory striping is disabled, the following
2365 configuration strucure:
2366
2367 @code
2368 netio_input_config_t config = {
2369 .
2370 .
2371 .
2372 .total_buffer_size = 4 * 16 * 1024 * 1024;
2373 .buffer_node_weights = { 1, 0, 1, 0 },
2374 },
2375 @endcode
2376
2377 would result in 32 MB of buffers being placed on controller 0, and
2378 32 MB on controller 2. (Since buffers are allocated in units of
2379 16 MB, some sets of weights will not be able to be matched exactly.)
2380
2381 For the weights to be effective, @ref total_buffer_size must be
2382 nonzero. If @ref total_buffer_size is zero, causing the default
2383 32 MB of buffer space to be used, then any specified weights will
2384 be ignored, and buffers will positioned as they were in previous
2385 versions of NetIO:
2386
2387 - For xgbe/0 and gbe/0, 16 MB of buffers will be placed on controller 1,
2388 and the other 16 MB will be placed on controller 2.
2389
2390 - For xgbe/1 and gbe/1, 16 MB of buffers will be placed on controller 2,
2391 and the other 16 MB will be placed on controller 3.
2392
2393 If @ref total_buffer_size is nonzero, but all weights are zero,
2394 then all buffer space will be allocated on Linux NUMA node zero.
2395
2396 By default, the specified buffer placement is treated as a hint;
2397 if sufficient free memory is not available on the specified
2398 controllers, the buffers will be allocated elsewhere. However,
2399 if the ::NETIO_STRICT_HOMING flag is specified in @ref flags, then a
2400 failure to allocate buffer space exactly as requested will cause the
2401 registration operation to fail with an error of ::NETIO_CANNOT_HOME.
2402
2403 Note that maximal network performance cannot be achieved with
2404 only one memory controller.
2405 */
2406 uint8_t buffer_node_weights[NETIO_NUM_NODE_WEIGHTS];
2407
2408 /** Fixed virtual address for packet buffers. Only valid when
2409 ::NETIO_FIXED_BUFFER_VA is specified in @ref flags; see the
2410 description of that flag for details.
2411 */
2412 void* fixed_buffer_va;
2413
2414 /**
2415 Maximum number of outstanding send packet requests. This value is
2416 only relevant when an EPP is in use; it determines the number of
2417 slots in the EPP's outgoing packet queue which this tile is allowed
2418 to consume, and thus the number of packets which may be sent before
2419 the sending tile must wait for an acknowledgment from the EPP.
2420 Modifying this value is generally only helpful when using @ref
2421 netio_send_packet_vector(), where it can help improve performance by
2422 allowing a single vector send operation to process more packets.
2423 Typically it is not specified, and the default, which divides the
2424 outgoing packet slots evenly between all tiles on the chip, is used.
2425
2426 If a registration asks for more outgoing packet queue slots than are
2427 available, ::NETIO_TOOMANY_XMIT will be returned. The total number
2428 of packet queue slots which are available for all tiles for each EPP
2429 is subject to change, but is currently ::NETIO_TOTAL_SENDS_OUTSTANDING.
2430
2431
2432 This value is ignored if ::NETIO_XMIT is not specified in flags.
2433 If you want to specify a large value here for a specific tile, you are
2434 advised to specify NETIO_NO_XMIT on other, non-transmitting tiles so
2435 that they do not consume a default number of packet slots. Any tile
2436 transmitting is required to have at least ::NETIO_MIN_SENDS_OUTSTANDING
2437 slots allocated to it; values less than that will be silently
2438 increased by the NetIO library.
2439 */
2440 int num_sends_outstanding;
2441}
2442netio_input_config_t;
2443
2444
2445/** Registration flags; used in the @ref netio_input_config_t structure.
2446 * @addtogroup setup
2447 */
2448/** @{ */
2449
2450/** Fail a registration request if we can't put packet buffers
2451 on the specified memory controllers. */
2452#define NETIO_STRICT_HOMING 0x00000002
2453
2454/** This application expects no tags on its L2 headers. */
2455#define NETIO_TAG_NONE 0x00000004
2456
2457/** This application expects Marvell extended tags on its L2 headers. */
2458#define NETIO_TAG_MRVL 0x00000008
2459
2460/** This application expects Broadcom tags on its L2 headers. */
2461#define NETIO_TAG_BRCM 0x00000010
2462
2463/** This registration may call routines which receive packets. */
2464#define NETIO_RECV 0x00000020
2465
2466/** This registration may not call routines which receive packets. */
2467#define NETIO_NO_RECV 0x00000040
2468
2469/** This registration may call routines which transmit packets. */
2470#define NETIO_XMIT 0x00000080
2471
2472/** This registration may call routines which transmit packets with
2473 checksum acceleration. */
2474#define NETIO_XMIT_CSUM 0x00000100
2475
2476/** This registration may not call routines which transmit packets. */
2477#define NETIO_NO_XMIT 0x00000200
2478
2479/** This registration wants NetIO buffers mapped at an application-specified
2480 virtual address.
2481
2482 NetIO buffers are by default created by the TMC common memory facility,
2483 which must be configured by a common ancestor of all processes sharing
2484 a network interface. When this flag is specified, NetIO buffers are
2485 instead mapped at an address chosen by the application (and specified
2486 in @ref netio_input_config_t::fixed_buffer_va). This allows multiple
2487 unrelated but cooperating processes to share a NetIO interface.
2488 All processes sharing the same interface must specify this flag,
2489 and all must specify the same fixed virtual address.
2490
2491 @ref netio_input_config_t::fixed_buffer_va must be a
2492 multiple of 16 MB, and the packet buffers will occupy @ref
2493 netio_input_config_t::total_buffer_size bytes of virtual address
2494 space, beginning at that address. If any of those virtual addresses
2495 are currently occupied by other memory objects, like application or
2496 shared library code or data, @ref netio_input_register() will return
2497 ::NETIO_FAULT. While it is impossible to provide a fixed_buffer_va
2498 which will work for all applications, a good first guess might be to
2499 use 0xb0000000 minus @ref netio_input_config_t::total_buffer_size.
2500 If that fails, it might be helpful to consult the running application's
2501 virtual address description file (/proc/<em>pid</em>/maps) to see
2502 which regions of virtual address space are available.
2503 */
2504#define NETIO_FIXED_BUFFER_VA 0x00000400
2505
2506/** This registration call will not complete unless the network link
2507 is up. The process will wait several seconds for this to happen (the
2508 precise interval is link-dependent), but if the link does not come up,
2509 ::NETIO_LINK_DOWN will be returned. This flag is the default if
2510 ::NETIO_NOREQUIRE_LINK_UP is not specified. Note that this flag by
2511 itself does not request that the link be brought up; that can be done
2512 with the ::NETIO_AUTO_LINK_UPDN or ::NETIO_AUTO_LINK_UP flags (the
2513 latter is the default if no NETIO_AUTO_LINK_xxx flags are specified),
2514 or by explicitly setting the link's desired state via netio_set().
2515 If the link is not brought up by one of those methods, and this flag
2516 is specified, the registration operation will return ::NETIO_LINK_DOWN.
2517 This flag is ignored if it is specified along with ::NETIO_NO_XMIT and
2518 ::NETIO_NO_RECV. See @ref link for more information on link
2519 management.
2520 */
2521#define NETIO_REQUIRE_LINK_UP 0x00000800
2522
2523/** This registration call will complete even if the network link is not up.
2524 Whenever the link is not up, packets will not be sent or received:
2525 netio_get_packet() will return ::NETIO_NOPKT once all queued packets
2526 have been drained, and netio_send_packet() and similar routines will
2527 return NETIO_QUEUE_FULL once the outgoing packet queue in the EPP
2528 or the I/O shim is full. See @ref link for more information on link
2529 management.
2530 */
2531#define NETIO_NOREQUIRE_LINK_UP 0x00001000
2532
2533#ifndef __DOXYGEN__
2534/*
2535 * These are part of the implementation of the NETIO_AUTO_LINK_xxx flags,
2536 * but should not be used directly by applications, and are thus not
2537 * documented.
2538 */
2539#define _NETIO_AUTO_UP 0x00002000
2540#define _NETIO_AUTO_DN 0x00004000
2541#define _NETIO_AUTO_PRESENT 0x00008000
2542#endif
2543
2544/** Set the desired state of the link to up, allowing any speeds which are
2545 supported by the link hardware, as part of this registration operation.
2546 Do not take down the link automatically. This is the default if
2547 no other NETIO_AUTO_LINK_xxx flags are specified. This flag is ignored
2548 if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
2549 See @ref link for more information on link management.
2550 */
2551#define NETIO_AUTO_LINK_UP (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP)
2552
2553/** Set the desired state of the link to up, allowing any speeds which are
2554 supported by the link hardware, as part of this registration operation.
2555 Set the desired state of the link to down the next time no tiles are
2556 registered for packet reception or transmission. This flag is ignored
2557 if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
2558 See @ref link for more information on link management.
2559 */
2560#define NETIO_AUTO_LINK_UPDN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_UP | \
2561 _NETIO_AUTO_DN)
2562
2563/** Set the desired state of the link to down the next time no tiles are
2564 registered for packet reception or transmission. This flag is ignored
2565 if it is specified along with ::NETIO_NO_XMIT and ::NETIO_NO_RECV.
2566 See @ref link for more information on link management.
2567 */
2568#define NETIO_AUTO_LINK_DN (_NETIO_AUTO_PRESENT | _NETIO_AUTO_DN)
2569
2570/** Do not bring up the link automatically as part of this registration
2571 operation. Do not take down the link automatically. This flag
2572 is ignored if it is specified along with ::NETIO_NO_XMIT and
2573 ::NETIO_NO_RECV. See @ref link for more information on link management.
2574 */
2575#define NETIO_AUTO_LINK_NONE _NETIO_AUTO_PRESENT
2576
2577
2578/** Minimum number of receive packets. */
2579#define NETIO_MIN_RECEIVE_PKTS 16
2580
2581/** Lower bound on the maximum number of receive packets; may be higher
2582 than this on some interfaces. */
2583#define NETIO_MAX_RECEIVE_PKTS 128
2584
2585/** Maximum number of send buffers, per packet size. */
2586#define NETIO_MAX_SEND_BUFFERS 16
2587
2588/** Number of EPP queue slots, and thus outstanding sends, per EPP. */
2589#define NETIO_TOTAL_SENDS_OUTSTANDING 2015
2590
2591/** Minimum number of EPP queue slots, and thus outstanding sends, per
2592 * transmitting tile. */
2593#define NETIO_MIN_SENDS_OUTSTANDING 16
2594
2595
2596/**@}*/
2597
2598#ifndef __DOXYGEN__
2599
2600/**
2601 * An object for providing Ethernet packets to a process.
2602 */
2603struct __netio_queue_impl_t;
2604
2605/**
2606 * An object for managing the user end of a NetIO queue.
2607 */
2608struct __netio_queue_user_impl_t;
2609
2610#endif /* !__DOXYGEN__ */
2611
2612
2613/** A netio_queue_t describes a NetIO communications endpoint.
2614 * @ingroup setup
2615 */
2616typedef struct
2617{
2618#ifdef __DOXYGEN__
2619 uint8_t opaque[8]; /**< This is an opaque structure. */
2620#else
2621 struct __netio_queue_impl_t* __system_part; /**< The system part. */
2622 struct __netio_queue_user_impl_t* __user_part; /**< The user part. */
2623#ifdef _NETIO_PTHREAD
2624 _netio_percpu_mutex_t lock; /**< Queue lock. */
2625#endif
2626#endif
2627}
2628netio_queue_t;
2629
2630
2631/**
2632 * @brief Packet send context.
2633 *
2634 * @ingroup egress
2635 *
2636 * Packet send context for use with netio_send_packet_prepare and _commit.
2637 */
2638typedef struct
2639{
2640#ifdef __DOXYGEN__
2641 uint8_t opaque[44]; /**< This is an opaque structure. */
2642#else
2643 uint8_t flags; /**< Defined below */
2644 uint8_t datalen; /**< Number of valid words pointed to by data. */
2645 uint32_t request[9]; /**< Request to be sent to the EPP or shim. Note
2646 that this is smaller than the 11-word maximum
2647 request size, since some constant values are
2648 not saved in the context. */
2649 uint32_t *data; /**< Data to be sent to the EPP or shim via IDN. */
2650#endif
2651}
2652netio_send_pkt_context_t;
2653
2654
2655#ifndef __DOXYGEN__
2656#define SEND_PKT_CTX_USE_EPP 1 /**< We're sending to an EPP. */
2657#define SEND_PKT_CTX_SEND_CSUM 2 /**< Request includes a checksum. */
2658#endif
2659
2660/**
2661 * @brief Packet vector entry.
2662 *
2663 * @ingroup egress
2664 *
2665 * This data structure is used with netio_send_packet_vector() to send multiple
2666 * packets with one NetIO call. The structure should be initialized by
2667 * calling netio_pkt_vector_set(), rather than by setting the fields
2668 * directly.
2669 *
2670 * This structure is guaranteed to be a power of two in size, no
2671 * bigger than one L2 cache line, and to be aligned modulo its size.
2672 */
2673typedef struct
2674#ifndef __DOXYGEN__
2675__attribute__((aligned(8)))
2676#endif
2677{
2678 /** Reserved for use by the user application. When initialized with
2679 * the netio_set_pkt_vector_entry() function, this field is guaranteed
2680 * to be visible to readers only after all other fields are already
2681 * visible. This way it can be used as a valid flag or generation
2682 * counter. */
2683 uint8_t user_data;
2684
2685 /* Structure members below this point should not be accessed directly by
2686 * applications, as they may change in the future. */
2687
2688 /** Low 8 bits of the packet address to send. The high bits are
2689 * acquired from the 'handle' field. */
2690 uint8_t buffer_address_low;
2691
2692 /** Number of bytes to transmit. */
2693 uint16_t size;
2694
2695 /** The raw handle from a netio_pkt_t. If this is NETIO_PKT_HANDLE_NONE,
2696 * this vector entry will be skipped and no packet will be transmitted. */
2697 netio_pkt_handle_t handle;
2698}
2699netio_pkt_vector_entry_t;
2700
2701
2702/**
2703 * @brief Initialize fields in a packet vector entry.
2704 *
2705 * @ingroup egress
2706 *
2707 * @param[out] v Pointer to the vector entry to be initialized.
2708 * @param[in] pkt Packet to be transmitted when the vector entry is passed to
2709 * netio_send_packet_vector(). Note that the packet's attributes
2710 * (e.g., its L2 offset and length) are captured at the time this
2711 * routine is called; subsequent changes in those attributes will not
2712 * be reflected in the packet which is actually transmitted.
2713 * Changes in the packet's contents, however, will be so reflected.
2714 * If this is NULL, no packet will be transmitted.
2715 * @param[in] user_data User data to be set in the vector entry.
2716 * This function guarantees that the "user_data" field will become
2717 * visible to a reader only after all other fields have become visible.
2718 * This allows a structure in a ring buffer to be written and read
2719 * by a polling reader without any locks or other synchronization.
2720 */
2721static __inline void
2722netio_pkt_vector_set(volatile netio_pkt_vector_entry_t* v, netio_pkt_t* pkt,
2723 uint8_t user_data)
2724{
2725 if (pkt)
2726 {
2727 if (NETIO_PKT_IS_MINIMAL(pkt))
2728 {
2729 netio_pkt_minimal_metadata_t* mmd =
2730 (netio_pkt_minimal_metadata_t*) &pkt->__metadata;
2731 v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_MM(mmd, pkt) & 0xFF;
2732 v->size = NETIO_PKT_L2_LENGTH_MM(mmd, pkt);
2733 }
2734 else
2735 {
2736 netio_pkt_metadata_t* mda = &pkt->__metadata;
2737 v->buffer_address_low = (uintptr_t) NETIO_PKT_L2_DATA_M(mda, pkt) & 0xFF;
2738 v->size = NETIO_PKT_L2_LENGTH_M(mda, pkt);
2739 }
2740 v->handle.word = pkt->__packet.word;
2741 }
2742 else
2743 {
2744 v->handle.word = 0; /* Set handle to NETIO_PKT_HANDLE_NONE. */
2745 }
2746
2747 __asm__("" : : : "memory");
2748
2749 v->user_data = user_data;
2750}
2751
2752
2753/**
2754 * Flags and structures for @ref netio_get() and @ref netio_set().
2755 * @ingroup config
2756 */
2757
2758/** @{ */
2759/** Parameter class; addr is a NETIO_PARAM_xxx value. */
2760#define NETIO_PARAM 0
2761/** Interface MAC address. This address is only valid with @ref netio_get().
2762 * The value is a 6-byte MAC address. Depending upon the overall system
2763 * design, a MAC address may or may not be available for each interface. */
2764#define NETIO_PARAM_MAC 0
2765
2766/** Determine whether to suspend output on the receipt of pause frames.
2767 * If the value is nonzero, the I/O shim will suspend output when a pause
2768 * frame is received. If the value is zero, pause frames will be ignored. */
2769#define NETIO_PARAM_PAUSE_IN 1
2770
2771/** Determine whether to send pause frames if the I/O shim packet FIFOs are
2772 * nearly full. If the value is zero, pause frames are not sent. If
2773 * the value is nonzero, it is the delay value which will be sent in any
2774 * pause frames which are output, in units of 512 bit times. */
2775#define NETIO_PARAM_PAUSE_OUT 2
2776
2777/** Jumbo frame support. The value is a 4-byte integer. If the value is
2778 * nonzero, the MAC will accept frames of up to 10240 bytes. If the value
2779 * is zero, the MAC will only accept frames of up to 1544 bytes. */
2780#define NETIO_PARAM_JUMBO 3
2781
2782/** I/O shim's overflow statistics register. The value is two 16-bit integers.
2783 * The first 16-bit value (or the low 16 bits, if the value is treated as a
2784 * 32-bit number) is the count of packets which were completely dropped and
2785 * not delivered by the shim. The second 16-bit value (or the high 16 bits,
2786 * if the value is treated as a 32-bit number) is the count of packets
2787 * which were truncated and thus only partially delivered by the shim. This
2788 * register is automatically reset to zero after it has been read.
2789 */
2790#define NETIO_PARAM_OVERFLOW 4
2791
2792/** IPP statistics. This address is only valid with @ref netio_get(). The
2793 * value is a netio_stat_t structure. Unlike the I/O shim statistics, the
2794 * IPP statistics are not all reset to zero on read; see the description
2795 * of the netio_stat_t for details. */
2796#define NETIO_PARAM_STAT 5
2797
2798/** Possible link state. The value is a combination of "NETIO_LINK_xxx"
2799 * flags. With @ref netio_get(), this will indicate which flags are
2800 * actually supported by the hardware.
2801 *
2802 * For historical reasons, specifying this value to netio_set() will have
2803 * the same behavior as using ::NETIO_PARAM_LINK_CONFIG, but this usage is
2804 * discouraged.
2805 */
2806#define NETIO_PARAM_LINK_POSSIBLE_STATE 6
2807
2808/** Link configuration. The value is a combination of "NETIO_LINK_xxx" flags.
2809 * With @ref netio_set(), this will attempt to immediately bring up the
2810 * link using whichever of the requested flags are supported by the
2811 * hardware, or take down the link if the flags are zero; if this is
2812 * not possible, an error will be returned. Many programs will want
2813 * to use ::NETIO_PARAM_LINK_DESIRED_STATE instead.
2814 *
2815 * For historical reasons, specifying this value to netio_get() will
2816 * have the same behavior as using ::NETIO_PARAM_LINK_POSSIBLE_STATE,
2817 * but this usage is discouraged.
2818 */
2819#define NETIO_PARAM_LINK_CONFIG NETIO_PARAM_LINK_POSSIBLE_STATE
2820
2821/** Current link state. This address is only valid with @ref netio_get().
2822 * The value is zero or more of the "NETIO_LINK_xxx" flags, ORed together.
2823 * If the link is down, the value ANDed with NETIO_LINK_SPEED will be
2824 * zero; if the link is up, the value ANDed with NETIO_LINK_SPEED will
2825 * result in exactly one of the NETIO_LINK_xxx values, indicating the
2826 * current speed. */
2827#define NETIO_PARAM_LINK_CURRENT_STATE 7
2828
2829/** Variant symbol for current state, retained for compatibility with
2830 * pre-MDE-2.1 programs. */
2831#define NETIO_PARAM_LINK_STATUS NETIO_PARAM_LINK_CURRENT_STATE
2832
2833/** Packet Coherence protocol. This address is only valid with @ref netio_get().
2834 * The value is nonzero if the interface is configured for cache-coherent DMA.
2835 */
2836#define NETIO_PARAM_COHERENT 8
2837
2838/** Desired link state. The value is a conbination of "NETIO_LINK_xxx"
2839 * flags, which specify the desired state for the link. With @ref
2840 * netio_set(), this will, in the background, attempt to bring up the link
2841 * using whichever of the requested flags are reasonable, or take down the
2842 * link if the flags are zero. The actual link up or down operation may
2843 * happen after this call completes. If the link state changes in the
2844 * future, the system will continue to try to get back to the desired link
2845 * state; for instance, if the link is brought up successfully, and then
2846 * the network cable is disconnected, the link will go down. However, the
2847 * desired state of the link is still up, so if the cable is reconnected,
2848 * the link will be brought up again.
2849 *
2850 * With @ref netio_get(), this will indicate the desired state for the
2851 * link, as set with a previous netio_set() call, or implicitly by a
2852 * netio_input_register() or netio_input_unregister() operation. This may
2853 * not reflect the current state of the link; to get that, use
2854 * ::NETIO_PARAM_LINK_CURRENT_STATE. */
2855#define NETIO_PARAM_LINK_DESIRED_STATE 9
2856
2857/** NetIO statistics structure. Retrieved using the ::NETIO_PARAM_STAT
2858 * address passed to @ref netio_get(). */
2859typedef struct
2860{
2861 /** Number of packets which have been received by the IPP and forwarded
2862 * to a tile's receive queue for processing. This value wraps at its
2863 * maximum, and is not cleared upon read. */
2864 uint32_t packets_received;
2865
2866 /** Number of packets which have been dropped by the IPP, because they could
2867 * not be received, or could not be forwarded to a tile. The former happens
2868 * when the IPP does not have a free packet buffer of suitable size for an
2869 * incoming frame. The latter happens when all potential destination tiles
2870 * for a packet, as defined by the group, bucket, and queue configuration,
2871 * have full receive queues. This value wraps at its maximum, and is not
2872 * cleared upon read. */
2873 uint32_t packets_dropped;
2874
2875 /*
2876 * Note: the #defines after each of the following four one-byte values
2877 * denote their location within the third word of the netio_stat_t. They
2878 * are intended for use only by the IPP implementation and are thus omitted
2879 * from the Doxygen output.
2880 */
2881
2882 /** Number of packets dropped because no worker was able to accept a new
2883 * packet. This value saturates at its maximum, and is cleared upon
2884 * read. */
2885 uint8_t drops_no_worker;
2886#ifndef __DOXYGEN__
2887#define NETIO_STAT_DROPS_NO_WORKER 0
2888#endif
2889
2890 /** Number of packets dropped because no small buffers were available.
2891 * This value saturates at its maximum, and is cleared upon read. */
2892 uint8_t drops_no_smallbuf;
2893#ifndef __DOXYGEN__
2894#define NETIO_STAT_DROPS_NO_SMALLBUF 1
2895#endif
2896
2897 /** Number of packets dropped because no large buffers were available.
2898 * This value saturates at its maximum, and is cleared upon read. */
2899 uint8_t drops_no_largebuf;
2900#ifndef __DOXYGEN__
2901#define NETIO_STAT_DROPS_NO_LARGEBUF 2
2902#endif
2903
2904 /** Number of packets dropped because no jumbo buffers were available.
2905 * This value saturates at its maximum, and is cleared upon read. */
2906 uint8_t drops_no_jumbobuf;
2907#ifndef __DOXYGEN__
2908#define NETIO_STAT_DROPS_NO_JUMBOBUF 3
2909#endif
2910}
2911netio_stat_t;
2912
2913
2914/** Link can run, should run, or is running at 10 Mbps. */
2915#define NETIO_LINK_10M 0x01
2916
2917/** Link can run, should run, or is running at 100 Mbps. */
2918#define NETIO_LINK_100M 0x02
2919
2920/** Link can run, should run, or is running at 1 Gbps. */
2921#define NETIO_LINK_1G 0x04
2922
2923/** Link can run, should run, or is running at 10 Gbps. */
2924#define NETIO_LINK_10G 0x08
2925
2926/** Link should run at the highest speed supported by the link and by
2927 * the device connected to the link. Only usable as a value for
2928 * the link's desired state; never returned as a value for the current
2929 * or possible states. */
2930#define NETIO_LINK_ANYSPEED 0x10
2931
2932/** All legal link speeds. */
2933#define NETIO_LINK_SPEED (NETIO_LINK_10M | \
2934 NETIO_LINK_100M | \
2935 NETIO_LINK_1G | \
2936 NETIO_LINK_10G | \
2937 NETIO_LINK_ANYSPEED)
2938
2939
2940/** MAC register class. Addr is a register offset within the MAC.
2941 * Registers within the XGbE and GbE MACs are documented in the Tile
2942 * Processor I/O Device Guide (UG104). MAC registers start at address
2943 * 0x4000, and do not include the MAC_INTERFACE registers. */
2944#define NETIO_MAC 1
2945
2946/** MDIO register class (IEEE 802.3 clause 22 format). Addr is the "addr"
2947 * member of a netio_mdio_addr_t structure. */
2948#define NETIO_MDIO 2
2949
2950/** MDIO register class (IEEE 802.3 clause 45 format). Addr is the "addr"
2951 * member of a netio_mdio_addr_t structure. */
2952#define NETIO_MDIO_CLAUSE45 3
2953
2954/** NetIO MDIO address type. Retrieved or provided using the ::NETIO_MDIO
2955 * address passed to @ref netio_get() or @ref netio_set(). */
2956typedef union
2957{
2958 struct
2959 {
2960 unsigned int reg:16; /**< MDIO register offset. For clause 22 access,
2961 must be less than 32. */
2962 unsigned int phy:5; /**< Which MDIO PHY to access. */
2963 unsigned int dev:5; /**< Which MDIO device to access within that PHY.
2964 Applicable for clause 45 access only; ignored
2965 for clause 22 access. */
2966 }
2967 bits; /**< Container for bitfields. */
2968 uint64_t addr; /**< Value to pass to @ref netio_get() or
2969 * @ref netio_set(). */
2970}
2971netio_mdio_addr_t;
2972
2973/** @} */
2974
2975#endif /* __NETIO_INTF_H__ */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 112b1e248f05..b4c8e8ec45dc 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -15,3 +15,4 @@ obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
15obj-$(CONFIG_MODULES) += module.o 15obj-$(CONFIG_MODULES) += module.o
16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
17obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 17obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
18obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/pci.c b/arch/tile/kernel/pci.c
new file mode 100644
index 000000000000..a1ee25be9ad9
--- /dev/null
+++ b/arch/tile/kernel/pci.c
@@ -0,0 +1,621 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/pci.h>
17#include <linux/delay.h>
18#include <linux/string.h>
19#include <linux/init.h>
20#include <linux/capability.h>
21#include <linux/sched.h>
22#include <linux/errno.h>
23#include <linux/bootmem.h>
24#include <linux/irq.h>
25#include <linux/io.h>
26#include <linux/uaccess.h>
27
28#include <asm/processor.h>
29#include <asm/sections.h>
30#include <asm/byteorder.h>
31#include <asm/hv_driver.h>
32#include <hv/drv_pcie_rc_intf.h>
33
34
35/*
36 * Initialization flow and process
37 * -------------------------------
38 *
39 * This files containes the routines to search for PCI buses,
40 * enumerate the buses, and configure any attached devices.
41 *
42 * There are two entry points here:
43 * 1) tile_pci_init
44 * This sets up the pci_controller structs, and opens the
45 * FDs to the hypervisor. This is called from setup_arch() early
46 * in the boot process.
47 * 2) pcibios_init
48 * This probes the PCI bus(es) for any attached hardware. It's
49 * called by subsys_initcall. All of the real work is done by the
50 * generic Linux PCI layer.
51 *
52 */
53
54/*
55 * This flag tells if the platform is TILEmpower that needs
56 * special configuration for the PLX switch chip.
57 */
58int __write_once tile_plx_gen1;
59
60static struct pci_controller controllers[TILE_NUM_PCIE];
61static int num_controllers;
62
63static struct pci_ops tile_cfg_ops;
64
65
66/*
67 * We don't need to worry about the alignment of resources.
68 */
69resource_size_t pcibios_align_resource(void *data, const struct resource *res,
70 resource_size_t size, resource_size_t align)
71{
72 return res->start;
73}
74EXPORT_SYMBOL(pcibios_align_resource);
75
76/*
77 * Open a FD to the hypervisor PCI device.
78 *
79 * controller_id is the controller number, config type is 0 or 1 for
80 * config0 or config1 operations.
81 */
82static int __init tile_pcie_open(int controller_id, int config_type)
83{
84 char filename[32];
85 int fd;
86
87 sprintf(filename, "pcie/%d/config%d", controller_id, config_type);
88
89 fd = hv_dev_open((HV_VirtAddr)filename, 0);
90
91 return fd;
92}
93
94
95/*
96 * Get the IRQ numbers from the HV and set up the handlers for them.
97 */
98static int __init tile_init_irqs(int controller_id,
99 struct pci_controller *controller)
100{
101 char filename[32];
102 int fd;
103 int ret;
104 int x;
105 struct pcie_rc_config rc_config;
106
107 sprintf(filename, "pcie/%d/ctl", controller_id);
108 fd = hv_dev_open((HV_VirtAddr)filename, 0);
109 if (fd < 0) {
110 pr_err("PCI: hv_dev_open(%s) failed\n", filename);
111 return -1;
112 }
113 ret = hv_dev_pread(fd, 0, (HV_VirtAddr)(&rc_config),
114 sizeof(rc_config), PCIE_RC_CONFIG_MASK_OFF);
115 hv_dev_close(fd);
116 if (ret != sizeof(rc_config)) {
117 pr_err("PCI: wanted %zd bytes, got %d\n",
118 sizeof(rc_config), ret);
119 return -1;
120 }
121 /* Record irq_base so that we can map INTx to IRQ # later. */
122 controller->irq_base = rc_config.intr;
123
124 for (x = 0; x < 4; x++)
125 tile_irq_activate(rc_config.intr + x,
126 TILE_IRQ_HW_CLEAR);
127
128 if (rc_config.plx_gen1)
129 controller->plx_gen1 = 1;
130
131 return 0;
132}
133
134/*
135 * First initialization entry point, called from setup_arch().
136 *
137 * Find valid controllers and fill in pci_controller structs for each
138 * of them.
139 *
140 * Returns the number of controllers discovered.
141 */
142int __init tile_pci_init(void)
143{
144 int i;
145
146 pr_info("PCI: Searching for controllers...\n");
147
148 /* Do any configuration we need before using the PCIe */
149
150 for (i = 0; i < TILE_NUM_PCIE; i++) {
151 int hv_cfg_fd0 = -1;
152 int hv_cfg_fd1 = -1;
153 int hv_mem_fd = -1;
154 char name[32];
155 struct pci_controller *controller;
156
157 /*
158 * Open the fd to the HV. If it fails then this
159 * device doesn't exist.
160 */
161 hv_cfg_fd0 = tile_pcie_open(i, 0);
162 if (hv_cfg_fd0 < 0)
163 continue;
164 hv_cfg_fd1 = tile_pcie_open(i, 1);
165 if (hv_cfg_fd1 < 0) {
166 pr_err("PCI: Couldn't open config fd to HV "
167 "for controller %d\n", i);
168 goto err_cont;
169 }
170
171 sprintf(name, "pcie/%d/mem", i);
172 hv_mem_fd = hv_dev_open((HV_VirtAddr)name, 0);
173 if (hv_mem_fd < 0) {
174 pr_err("PCI: Could not open mem fd to HV!\n");
175 goto err_cont;
176 }
177
178 pr_info("PCI: Found PCI controller #%d\n", i);
179
180 controller = &controllers[num_controllers];
181
182 if (tile_init_irqs(i, controller)) {
183 pr_err("PCI: Could not initialize "
184 "IRQs, aborting.\n");
185 goto err_cont;
186 }
187
188 controller->index = num_controllers;
189 controller->hv_cfg_fd[0] = hv_cfg_fd0;
190 controller->hv_cfg_fd[1] = hv_cfg_fd1;
191 controller->hv_mem_fd = hv_mem_fd;
192 controller->first_busno = 0;
193 controller->last_busno = 0xff;
194 controller->ops = &tile_cfg_ops;
195
196 num_controllers++;
197 continue;
198
199err_cont:
200 if (hv_cfg_fd0 >= 0)
201 hv_dev_close(hv_cfg_fd0);
202 if (hv_cfg_fd1 >= 0)
203 hv_dev_close(hv_cfg_fd1);
204 if (hv_mem_fd >= 0)
205 hv_dev_close(hv_mem_fd);
206 continue;
207 }
208
209 /*
210 * Before using the PCIe, see if we need to do any platform-specific
211 * configuration, such as the PLX switch Gen 1 issue on TILEmpower.
212 */
213 for (i = 0; i < num_controllers; i++) {
214 struct pci_controller *controller = &controllers[i];
215
216 if (controller->plx_gen1)
217 tile_plx_gen1 = 1;
218 }
219
220 return num_controllers;
221}
222
223/*
224 * (pin - 1) converts from the PCI standard's [1:4] convention to
225 * a normal [0:3] range.
226 */
227static int tile_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
228{
229 struct pci_controller *controller =
230 (struct pci_controller *)dev->sysdata;
231 return (pin - 1) + controller->irq_base;
232}
233
234
235static void __init fixup_read_and_payload_sizes(void)
236{
237 struct pci_dev *dev = NULL;
238 int smallest_max_payload = 0x1; /* Tile maxes out at 256 bytes. */
239 int max_read_size = 0x2; /* Limit to 512 byte reads. */
240 u16 new_values;
241
242 /* Scan for the smallest maximum payload size. */
243 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
244 int pcie_caps_offset;
245 u32 devcap;
246 int max_payload;
247
248 pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
249 if (pcie_caps_offset == 0)
250 continue;
251
252 pci_read_config_dword(dev, pcie_caps_offset + PCI_EXP_DEVCAP,
253 &devcap);
254 max_payload = devcap & PCI_EXP_DEVCAP_PAYLOAD;
255 if (max_payload < smallest_max_payload)
256 smallest_max_payload = max_payload;
257 }
258
259 /* Now, set the max_payload_size for all devices to that value. */
260 new_values = (max_read_size << 12) | (smallest_max_payload << 5);
261 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
262 int pcie_caps_offset;
263 u16 devctl;
264
265 pcie_caps_offset = pci_find_capability(dev, PCI_CAP_ID_EXP);
266 if (pcie_caps_offset == 0)
267 continue;
268
269 pci_read_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
270 &devctl);
271 devctl &= ~(PCI_EXP_DEVCTL_PAYLOAD | PCI_EXP_DEVCTL_READRQ);
272 devctl |= new_values;
273 pci_write_config_word(dev, pcie_caps_offset + PCI_EXP_DEVCTL,
274 devctl);
275 }
276}
277
278
279/*
280 * Second PCI initialization entry point, called by subsys_initcall.
281 *
282 * The controllers have been set up by the time we get here, by a call to
283 * tile_pci_init.
284 */
285static int __init pcibios_init(void)
286{
287 int i;
288
289 pr_info("PCI: Probing PCI hardware\n");
290
291 /*
292 * Delay a bit in case devices aren't ready. Some devices are
293 * known to require at least 20ms here, but we use a more
294 * conservative value.
295 */
296 mdelay(250);
297
298 /* Scan all of the recorded PCI controllers. */
299 for (i = 0; i < num_controllers; i++) {
300 struct pci_controller *controller = &controllers[i];
301 struct pci_bus *bus;
302
303 pr_info("PCI: initializing controller #%d\n", i);
304
305 /*
306 * This comes from the generic Linux PCI driver.
307 *
308 * It reads the PCI tree for this bus into the Linux
309 * data structures.
310 *
311 * This is inlined in linux/pci.h and calls into
312 * pci_scan_bus_parented() in probe.c.
313 */
314 bus = pci_scan_bus(0, controller->ops, controller);
315 controller->root_bus = bus;
316 controller->last_busno = bus->subordinate;
317
318 }
319
320 /* Do machine dependent PCI interrupt routing */
321 pci_fixup_irqs(pci_common_swizzle, tile_map_irq);
322
323 /*
324 * This comes from the generic Linux PCI driver.
325 *
326 * It allocates all of the resources (I/O memory, etc)
327 * associated with the devices read in above.
328 */
329
330 pci_assign_unassigned_resources();
331
332 /* Configure the max_read_size and max_payload_size values. */
333 fixup_read_and_payload_sizes();
334
335 /* Record the I/O resources in the PCI controller structure. */
336 for (i = 0; i < num_controllers; i++) {
337 struct pci_bus *root_bus = controllers[i].root_bus;
338 struct pci_bus *next_bus;
339 struct pci_dev *dev;
340
341 list_for_each_entry(dev, &root_bus->devices, bus_list) {
342 /* Find the PCI host controller, ie. the 1st bridge. */
343 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI &&
344 (PCI_SLOT(dev->devfn) == 0)) {
345 next_bus = dev->subordinate;
346 controllers[i].mem_resources[0] =
347 *next_bus->resource[0];
348 controllers[i].mem_resources[1] =
349 *next_bus->resource[1];
350 controllers[i].mem_resources[2] =
351 *next_bus->resource[2];
352
353 break;
354 }
355 }
356
357 }
358
359 return 0;
360}
361subsys_initcall(pcibios_init);
362
363/*
364 * No bus fixups needed.
365 */
366void __devinit pcibios_fixup_bus(struct pci_bus *bus)
367{
368 /* Nothing needs to be done. */
369}
370
371/*
372 * This can be called from the generic PCI layer, but doesn't need to
373 * do anything.
374 */
375char __devinit *pcibios_setup(char *str)
376{
377 /* Nothing needs to be done. */
378 return str;
379}
380
381/*
382 * This is called from the generic Linux layer.
383 */
384void __init pcibios_update_irq(struct pci_dev *dev, int irq)
385{
386 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
387}
388
389/*
390 * Enable memory and/or address decoding, as appropriate, for the
391 * device described by the 'dev' struct.
392 *
393 * This is called from the generic PCI layer, and can be called
394 * for bridges or endpoints.
395 */
396int pcibios_enable_device(struct pci_dev *dev, int mask)
397{
398 u16 cmd, old_cmd;
399 u8 header_type;
400 int i;
401 struct resource *r;
402
403 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type);
404
405 pci_read_config_word(dev, PCI_COMMAND, &cmd);
406 old_cmd = cmd;
407 if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
408 /*
409 * For bridges, we enable both memory and I/O decoding
410 * in call cases.
411 */
412 cmd |= PCI_COMMAND_IO;
413 cmd |= PCI_COMMAND_MEMORY;
414 } else {
415 /*
416 * For endpoints, we enable memory and/or I/O decoding
417 * only if they have a memory resource of that type.
418 */
419 for (i = 0; i < 6; i++) {
420 r = &dev->resource[i];
421 if (r->flags & IORESOURCE_UNSET) {
422 pr_err("PCI: Device %s not available "
423 "because of resource collisions\n",
424 pci_name(dev));
425 return -EINVAL;
426 }
427 if (r->flags & IORESOURCE_IO)
428 cmd |= PCI_COMMAND_IO;
429 if (r->flags & IORESOURCE_MEM)
430 cmd |= PCI_COMMAND_MEMORY;
431 }
432 }
433
434 /*
435 * We only write the command if it changed.
436 */
437 if (cmd != old_cmd)
438 pci_write_config_word(dev, PCI_COMMAND, cmd);
439 return 0;
440}
441
442void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
443{
444 unsigned long start = pci_resource_start(dev, bar);
445 unsigned long len = pci_resource_len(dev, bar);
446 unsigned long flags = pci_resource_flags(dev, bar);
447
448 if (!len)
449 return NULL;
450 if (max && len > max)
451 len = max;
452
453 if (!(flags & IORESOURCE_MEM)) {
454 pr_info("PCI: Trying to map invalid resource %#lx\n", flags);
455 start = 0;
456 }
457
458 return (void __iomem *)start;
459}
460EXPORT_SYMBOL(pci_iomap);
461
462
463/****************************************************************
464 *
465 * Tile PCI config space read/write routines
466 *
467 ****************************************************************/
468
469/*
470 * These are the normal read and write ops
471 * These are expanded with macros from pci_bus_read_config_byte() etc.
472 *
473 * devfn is the combined PCI slot & function.
474 *
475 * offset is in bytes, from the start of config space for the
476 * specified bus & slot.
477 */
478
479static int __devinit tile_cfg_read(struct pci_bus *bus,
480 unsigned int devfn,
481 int offset,
482 int size,
483 u32 *val)
484{
485 struct pci_controller *controller = bus->sysdata;
486 int busnum = bus->number & 0xff;
487 int slot = (devfn >> 3) & 0x1f;
488 int function = devfn & 0x7;
489 u32 addr;
490 int config_mode = 1;
491
492 /*
493 * There is no bridge between the Tile and bus 0, so we
494 * use config0 to talk to bus 0.
495 *
496 * If we're talking to a bus other than zero then we
497 * must have found a bridge.
498 */
499 if (busnum == 0) {
500 /*
501 * We fake an empty slot for (busnum == 0) && (slot > 0),
502 * since there is only one slot on bus 0.
503 */
504 if (slot) {
505 *val = 0xFFFFFFFF;
506 return 0;
507 }
508 config_mode = 0;
509 }
510
511 addr = busnum << 20; /* Bus in 27:20 */
512 addr |= slot << 15; /* Slot (device) in 19:15 */
513 addr |= function << 12; /* Function is in 14:12 */
514 addr |= (offset & 0xFFF); /* byte address in 0:11 */
515
516 return hv_dev_pread(controller->hv_cfg_fd[config_mode], 0,
517 (HV_VirtAddr)(val), size, addr);
518}
519
520
521/*
522 * See tile_cfg_read() for relevent comments.
523 * Note that "val" is the value to write, not a pointer to that value.
524 */
525static int __devinit tile_cfg_write(struct pci_bus *bus,
526 unsigned int devfn,
527 int offset,
528 int size,
529 u32 val)
530{
531 struct pci_controller *controller = bus->sysdata;
532 int busnum = bus->number & 0xff;
533 int slot = (devfn >> 3) & 0x1f;
534 int function = devfn & 0x7;
535 u32 addr;
536 int config_mode = 1;
537 HV_VirtAddr valp = (HV_VirtAddr)&val;
538
539 /*
540 * For bus 0 slot 0 we use config 0 accesses.
541 */
542 if (busnum == 0) {
543 /*
544 * We fake an empty slot for (busnum == 0) && (slot > 0),
545 * since there is only one slot on bus 0.
546 */
547 if (slot)
548 return 0;
549 config_mode = 0;
550 }
551
552 addr = busnum << 20; /* Bus in 27:20 */
553 addr |= slot << 15; /* Slot (device) in 19:15 */
554 addr |= function << 12; /* Function is in 14:12 */
555 addr |= (offset & 0xFFF); /* byte address in 0:11 */
556
557#ifdef __BIG_ENDIAN
558 /* Point to the correct part of the 32-bit "val". */
559 valp += 4 - size;
560#endif
561
562 return hv_dev_pwrite(controller->hv_cfg_fd[config_mode], 0,
563 valp, size, addr);
564}
565
566
567static struct pci_ops tile_cfg_ops = {
568 .read = tile_cfg_read,
569 .write = tile_cfg_write,
570};
571
572
573/*
574 * In the following, each PCI controller's mem_resources[1]
575 * represents its (non-prefetchable) PCI memory resource.
576 * mem_resources[0] and mem_resources[2] refer to its PCI I/O and
577 * prefetchable PCI memory resources, respectively.
578 * For more details, see pci_setup_bridge() in setup-bus.c.
579 * By comparing the target PCI memory address against the
580 * end address of controller 0, we can determine the controller
581 * that should accept the PCI memory access.
582 */
583#define TILE_READ(size, type) \
584type _tile_read##size(unsigned long addr) \
585{ \
586 type val; \
587 int idx = 0; \
588 if (addr > controllers[0].mem_resources[1].end && \
589 addr > controllers[0].mem_resources[2].end) \
590 idx = 1; \
591 if (hv_dev_pread(controllers[idx].hv_mem_fd, 0, \
592 (HV_VirtAddr)(&val), sizeof(type), addr)) \
593 pr_err("PCI: read %zd bytes at 0x%lX failed\n", \
594 sizeof(type), addr); \
595 return val; \
596} \
597EXPORT_SYMBOL(_tile_read##size)
598
599TILE_READ(b, u8);
600TILE_READ(w, u16);
601TILE_READ(l, u32);
602TILE_READ(q, u64);
603
604#define TILE_WRITE(size, type) \
605void _tile_write##size(type val, unsigned long addr) \
606{ \
607 int idx = 0; \
608 if (addr > controllers[0].mem_resources[1].end && \
609 addr > controllers[0].mem_resources[2].end) \
610 idx = 1; \
611 if (hv_dev_pwrite(controllers[idx].hv_mem_fd, 0, \
612 (HV_VirtAddr)(&val), sizeof(type), addr)) \
613 pr_err("PCI: write %zd bytes at 0x%lX failed\n", \
614 sizeof(type), addr); \
615} \
616EXPORT_SYMBOL(_tile_write##size)
617
618TILE_WRITE(b, u8);
619TILE_WRITE(w, u16);
620TILE_WRITE(l, u32);
621TILE_WRITE(q, u64);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index fb0b3cbeae14..f18573643ed1 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -840,7 +840,7 @@ static int __init topology_init(void)
840 for_each_online_node(i) 840 for_each_online_node(i)
841 register_one_node(i); 841 register_one_node(i);
842 842
843 for_each_present_cpu(i) 843 for (i = 0; i < smp_height * smp_width; ++i)
844 register_cpu(&cpu_devices[i], i); 844 register_cpu(&cpu_devices[i], i);
845 845
846 return 0; 846 return 0;
diff --git a/arch/tile/lib/memchr_32.c b/arch/tile/lib/memchr_32.c
index 6235283b4859..cc3d9badf030 100644
--- a/arch/tile/lib/memchr_32.c
+++ b/arch/tile/lib/memchr_32.c
@@ -18,12 +18,24 @@
18 18
19void *memchr(const void *s, int c, size_t n) 19void *memchr(const void *s, int c, size_t n)
20{ 20{
21 const uint32_t *last_word_ptr;
22 const uint32_t *p;
23 const char *last_byte_ptr;
24 uintptr_t s_int;
25 uint32_t goal, before_mask, v, bits;
26 char *ret;
27
28 if (__builtin_expect(n == 0, 0)) {
29 /* Don't dereference any memory if the array is empty. */
30 return NULL;
31 }
32
21 /* Get an aligned pointer. */ 33 /* Get an aligned pointer. */
22 const uintptr_t s_int = (uintptr_t) s; 34 s_int = (uintptr_t) s;
23 const uint32_t *p = (const uint32_t *)(s_int & -4); 35 p = (const uint32_t *)(s_int & -4);
24 36
25 /* Create four copies of the byte for which we are looking. */ 37 /* Create four copies of the byte for which we are looking. */
26 const uint32_t goal = 0x01010101 * (uint8_t) c; 38 goal = 0x01010101 * (uint8_t) c;
27 39
28 /* Read the first word, but munge it so that bytes before the array 40 /* Read the first word, but munge it so that bytes before the array
29 * will not match goal. 41 * will not match goal.
@@ -31,23 +43,14 @@ void *memchr(const void *s, int c, size_t n)
31 * Note that this shift count expression works because we know 43 * Note that this shift count expression works because we know
32 * shift counts are taken mod 32. 44 * shift counts are taken mod 32.
33 */ 45 */
34 const uint32_t before_mask = (1 << (s_int << 3)) - 1; 46 before_mask = (1 << (s_int << 3)) - 1;
35 uint32_t v = (*p | before_mask) ^ (goal & before_mask); 47 v = (*p | before_mask) ^ (goal & before_mask);
36 48
37 /* Compute the address of the last byte. */ 49 /* Compute the address of the last byte. */
38 const char *const last_byte_ptr = (const char *)s + n - 1; 50 last_byte_ptr = (const char *)s + n - 1;
39 51
40 /* Compute the address of the word containing the last byte. */ 52 /* Compute the address of the word containing the last byte. */
41 const uint32_t *const last_word_ptr = 53 last_word_ptr = (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
42 (const uint32_t *)((uintptr_t) last_byte_ptr & -4);
43
44 uint32_t bits;
45 char *ret;
46
47 if (__builtin_expect(n == 0, 0)) {
48 /* Don't dereference any memory if the array is empty. */
49 return NULL;
50 }
51 54
52 while ((bits = __insn_seqb(v, goal)) == 0) { 55 while ((bits = __insn_seqb(v, goal)) == 0) {
53 if (__builtin_expect(p == last_word_ptr, 0)) { 56 if (__builtin_expect(p == last_word_ptr, 0)) {
diff --git a/arch/tile/lib/spinlock_32.c b/arch/tile/lib/spinlock_32.c
index 485e24d62c6b..5cd1c4004eca 100644
--- a/arch/tile/lib/spinlock_32.c
+++ b/arch/tile/lib/spinlock_32.c
@@ -167,23 +167,30 @@ void arch_write_lock_slow(arch_rwlock_t *rwlock, u32 val)
167 * when we compare them. 167 * when we compare them.
168 */ 168 */
169 u32 my_ticket_; 169 u32 my_ticket_;
170 u32 iterations = 0;
170 171
171 /* Take out the next ticket; this will also stop would-be readers. */ 172 /*
172 if (val & 1) 173 * Wait until there are no readers, then bump up the next
173 val = get_rwlock(rwlock); 174 * field and capture the ticket value.
174 rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT); 175 */
176 for (;;) {
177 if (!(val & 1)) {
178 if ((val >> RD_COUNT_SHIFT) == 0)
179 break;
180 rwlock->lock = val;
181 }
182 delay_backoff(iterations++);
183 val = __insn_tns((int *)&rwlock->lock);
184 }
175 185
176 /* Extract my ticket value from the original word. */ 186 /* Take out the next ticket and extract my ticket value. */
187 rwlock->lock = __insn_addb(val, 1 << WR_NEXT_SHIFT);
177 my_ticket_ = val >> WR_NEXT_SHIFT; 188 my_ticket_ = val >> WR_NEXT_SHIFT;
178 189
179 /* 190 /* Wait until the "current" field matches our ticket. */
180 * Wait until the "current" field matches our ticket, and
181 * there are no remaining readers.
182 */
183 for (;;) { 191 for (;;) {
184 u32 curr_ = val >> WR_CURR_SHIFT; 192 u32 curr_ = val >> WR_CURR_SHIFT;
185 u32 readers = val >> RD_COUNT_SHIFT; 193 u32 delta = ((my_ticket_ - curr_) & WR_MASK);
186 u32 delta = ((my_ticket_ - curr_) & WR_MASK) + !!readers;
187 if (likely(delta == 0)) 194 if (likely(delta == 0))
188 break; 195 break;
189 196
diff --git a/arch/um/drivers/line.c b/arch/um/drivers/line.c
index 7f7338c90784..1664cce7b0ac 100644
--- a/arch/um/drivers/line.c
+++ b/arch/um/drivers/line.c
@@ -727,6 +727,9 @@ struct winch {
727 727
728static void free_winch(struct winch *winch, int free_irq_ok) 728static void free_winch(struct winch *winch, int free_irq_ok)
729{ 729{
730 if (free_irq_ok)
731 free_irq(WINCH_IRQ, winch);
732
730 list_del(&winch->list); 733 list_del(&winch->list);
731 734
732 if (winch->pid != -1) 735 if (winch->pid != -1)
@@ -735,8 +738,6 @@ static void free_winch(struct winch *winch, int free_irq_ok)
735 os_close_file(winch->fd); 738 os_close_file(winch->fd);
736 if (winch->stack != 0) 739 if (winch->stack != 0)
737 free_stack(winch->stack, 0); 740 free_stack(winch->stack, 0);
738 if (free_irq_ok)
739 free_irq(WINCH_IRQ, winch);
740 kfree(winch); 741 kfree(winch);
741} 742}
742 743
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e8327686d3c5..e330da21b84f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -21,7 +21,7 @@ config X86
21 select HAVE_UNSTABLE_SCHED_CLOCK 21 select HAVE_UNSTABLE_SCHED_CLOCK
22 select HAVE_IDE 22 select HAVE_IDE
23 select HAVE_OPROFILE 23 select HAVE_OPROFILE
24 select HAVE_PERF_EVENTS if (!M386 && !M486) 24 select HAVE_PERF_EVENTS
25 select HAVE_IRQ_WORK 25 select HAVE_IRQ_WORK
26 select HAVE_IOREMAP_PROT 26 select HAVE_IOREMAP_PROT
27 select HAVE_KPROBES 27 select HAVE_KPROBES
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 4d293dced62f..9479a037419f 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -216,8 +216,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
216} 216}
217 217
218/* Return an pointer with offset calculated */ 218/* Return an pointer with offset calculated */
219static inline unsigned long __set_fixmap_offset(enum fixed_addresses idx, 219static __always_inline unsigned long
220 phys_addr_t phys, pgprot_t flags) 220__set_fixmap_offset(enum fixed_addresses idx, phys_addr_t phys, pgprot_t flags)
221{ 221{
222 __set_fixmap(idx, phys, flags); 222 __set_fixmap(idx, phys, flags);
223 return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1)); 223 return fix_to_virt(idx) + (phys & (PAGE_SIZE - 1));
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 3ea3dc487047..6b89f5e86021 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -128,7 +128,7 @@
128#define FAM10H_MMIO_CONF_ENABLE (1<<0) 128#define FAM10H_MMIO_CONF_ENABLE (1<<0)
129#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf 129#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
130#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2 130#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
131#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffff 131#define FAM10H_MMIO_CONF_BASE_MASK 0xfffffffULL
132#define FAM10H_MMIO_CONF_BASE_SHIFT 20 132#define FAM10H_MMIO_CONF_BASE_SHIFT 20
133#define MSR_FAM10H_NODE_ID 0xc001100c 133#define MSR_FAM10H_NODE_ID 0xc001100c
134 134
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 18e3b8a8709f..ef9975812c77 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -824,27 +824,27 @@ static __always_inline void arch_spin_unlock(struct arch_spinlock *lock)
824#define __PV_IS_CALLEE_SAVE(func) \ 824#define __PV_IS_CALLEE_SAVE(func) \
825 ((struct paravirt_callee_save) { func }) 825 ((struct paravirt_callee_save) { func })
826 826
827static inline unsigned long arch_local_save_flags(void) 827static inline notrace unsigned long arch_local_save_flags(void)
828{ 828{
829 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl); 829 return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
830} 830}
831 831
832static inline void arch_local_irq_restore(unsigned long f) 832static inline notrace void arch_local_irq_restore(unsigned long f)
833{ 833{
834 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f); 834 PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
835} 835}
836 836
837static inline void arch_local_irq_disable(void) 837static inline notrace void arch_local_irq_disable(void)
838{ 838{
839 PVOP_VCALLEE0(pv_irq_ops.irq_disable); 839 PVOP_VCALLEE0(pv_irq_ops.irq_disable);
840} 840}
841 841
842static inline void arch_local_irq_enable(void) 842static inline notrace void arch_local_irq_enable(void)
843{ 843{
844 PVOP_VCALLEE0(pv_irq_ops.irq_enable); 844 PVOP_VCALLEE0(pv_irq_ops.irq_enable);
845} 845}
846 846
847static inline unsigned long arch_local_irq_save(void) 847static inline notrace unsigned long arch_local_irq_save(void)
848{ 848{
849 unsigned long f; 849 unsigned long f;
850 850
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 7f7e577a0e39..31d84acc1512 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -11,6 +11,7 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src);
11void pvclock_read_wallclock(struct pvclock_wall_clock *wall, 11void pvclock_read_wallclock(struct pvclock_wall_clock *wall,
12 struct pvclock_vcpu_time_info *vcpu, 12 struct pvclock_vcpu_time_info *vcpu,
13 struct timespec *ts); 13 struct timespec *ts);
14void pvclock_resume(void);
14 15
15/* 16/*
16 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction, 17 * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
diff --git a/arch/x86/include/asm/uv/uv_hub.h b/arch/x86/include/asm/uv/uv_hub.h
index e969f691cbfd..a501741c2335 100644
--- a/arch/x86/include/asm/uv/uv_hub.h
+++ b/arch/x86/include/asm/uv/uv_hub.h
@@ -199,6 +199,8 @@ union uvh_apicid {
199#define UVH_APICID 0x002D0E00L 199#define UVH_APICID 0x002D0E00L
200#define UV_APIC_PNODE_SHIFT 6 200#define UV_APIC_PNODE_SHIFT 6
201 201
202#define UV_APICID_HIBIT_MASK 0xffff0000
203
202/* Local Bus from cpu's perspective */ 204/* Local Bus from cpu's perspective */
203#define LOCAL_BUS_BASE 0x1c00000 205#define LOCAL_BUS_BASE 0x1c00000
204#define LOCAL_BUS_SIZE (4 * 1024 * 1024) 206#define LOCAL_BUS_SIZE (4 * 1024 * 1024)
@@ -491,8 +493,10 @@ static inline void uv_set_cpu_scir_bits(int cpu, unsigned char value)
491 } 493 }
492} 494}
493 495
496extern unsigned int uv_apicid_hibits;
494static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode) 497static unsigned long uv_hub_ipi_value(int apicid, int vector, int mode)
495{ 498{
499 apicid |= uv_apicid_hibits;
496 return (1UL << UVH_IPI_INT_SEND_SHFT) | 500 return (1UL << UVH_IPI_INT_SEND_SHFT) |
497 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) | 501 ((apicid) << UVH_IPI_INT_APIC_ID_SHFT) |
498 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) | 502 (mode << UVH_IPI_INT_DELIVERY_MODE_SHFT) |
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index 6d90adf4428a..20cafeac7455 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -5,7 +5,7 @@
5 * 5 *
6 * SGI UV MMR definitions 6 * SGI UV MMR definitions
7 * 7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10 10
11#ifndef _ASM_X86_UV_UV_MMRS_H 11#ifndef _ASM_X86_UV_UV_MMRS_H
@@ -754,6 +754,23 @@ union uvh_lb_bau_sb_descriptor_base_u {
754}; 754};
755 755
756/* ========================================================================= */ 756/* ========================================================================= */
757/* UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK */
758/* ========================================================================= */
759#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK 0x320130UL
760#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_32 0x009f0
761
762#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_SHFT 0
763#define UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK_BIT_ENABLES_MASK 0x00000000ffffffffUL
764
765union uvh_lb_target_physical_apic_id_mask_u {
766 unsigned long v;
767 struct uvh_lb_target_physical_apic_id_mask_s {
768 unsigned long bit_enables : 32; /* RW */
769 unsigned long rsvd_32_63 : 32; /* */
770 } s;
771};
772
773/* ========================================================================= */
757/* UVH_NODE_ID */ 774/* UVH_NODE_ID */
758/* ========================================================================= */ 775/* ========================================================================= */
759#define UVH_NODE_ID 0x0UL 776#define UVH_NODE_ID 0x0UL
diff --git a/arch/x86/include/asm/xen/interface.h b/arch/x86/include/asm/xen/interface.h
index e8506c1f0c55..1c10c88ee4e1 100644
--- a/arch/x86/include/asm/xen/interface.h
+++ b/arch/x86/include/asm/xen/interface.h
@@ -61,9 +61,9 @@ DEFINE_GUEST_HANDLE(void);
61#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START) 61#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
62#endif 62#endif
63 63
64#ifndef machine_to_phys_mapping 64#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
65#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START) 65#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
66#endif 66#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>__MACH2PHYS_SHIFT)
67 67
68/* Maximum number of virtual CPUs in multi-processor guests. */ 68/* Maximum number of virtual CPUs in multi-processor guests. */
69#define MAX_VIRT_CPUS 32 69#define MAX_VIRT_CPUS 32
diff --git a/arch/x86/include/asm/xen/interface_32.h b/arch/x86/include/asm/xen/interface_32.h
index 42a7e004ae5c..8413688b2571 100644
--- a/arch/x86/include/asm/xen/interface_32.h
+++ b/arch/x86/include/asm/xen/interface_32.h
@@ -32,6 +32,11 @@
32/* And the trap vector is... */ 32/* And the trap vector is... */
33#define TRAP_INSTR "int $0x82" 33#define TRAP_INSTR "int $0x82"
34 34
35#define __MACH2PHYS_VIRT_START 0xF5800000
36#define __MACH2PHYS_VIRT_END 0xF6800000
37
38#define __MACH2PHYS_SHIFT 2
39
35/* 40/*
36 * Virtual addresses beyond this are not modifiable by guest OSes. The 41 * Virtual addresses beyond this are not modifiable by guest OSes. The
37 * machine->physical mapping table starts at this address, read-only. 42 * machine->physical mapping table starts at this address, read-only.
diff --git a/arch/x86/include/asm/xen/interface_64.h b/arch/x86/include/asm/xen/interface_64.h
index 100d2662b97c..839a4811cf98 100644
--- a/arch/x86/include/asm/xen/interface_64.h
+++ b/arch/x86/include/asm/xen/interface_64.h
@@ -39,18 +39,7 @@
39#define __HYPERVISOR_VIRT_END 0xFFFF880000000000 39#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
40#define __MACH2PHYS_VIRT_START 0xFFFF800000000000 40#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
41#define __MACH2PHYS_VIRT_END 0xFFFF804000000000 41#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
42 42#define __MACH2PHYS_SHIFT 3
43#ifndef HYPERVISOR_VIRT_START
44#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
45#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
46#endif
47
48#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
49#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
50#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
51#ifndef machine_to_phys_mapping
52#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
53#endif
54 43
55/* 44/*
56 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base) 45 * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index dd8c1414b3d5..8760cc60a21c 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -5,6 +5,7 @@
5#include <linux/types.h> 5#include <linux/types.h>
6#include <linux/spinlock.h> 6#include <linux/spinlock.h>
7#include <linux/pfn.h> 7#include <linux/pfn.h>
8#include <linux/mm.h>
8 9
9#include <asm/uaccess.h> 10#include <asm/uaccess.h>
10#include <asm/page.h> 11#include <asm/page.h>
@@ -35,6 +36,8 @@ typedef struct xpaddr {
35#define MAX_DOMAIN_PAGES \ 36#define MAX_DOMAIN_PAGES \
36 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE)) 37 ((unsigned long)((u64)CONFIG_XEN_MAX_DOMAIN_MEMORY * 1024 * 1024 * 1024 / PAGE_SIZE))
37 38
39extern unsigned long *machine_to_phys_mapping;
40extern unsigned int machine_to_phys_order;
38 41
39extern unsigned long get_phys_to_machine(unsigned long pfn); 42extern unsigned long get_phys_to_machine(unsigned long pfn);
40extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 43extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
@@ -69,10 +72,8 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
69 if (xen_feature(XENFEAT_auto_translated_physmap)) 72 if (xen_feature(XENFEAT_auto_translated_physmap))
70 return mfn; 73 return mfn;
71 74
72#if 0
73 if (unlikely((mfn >> machine_to_phys_order) != 0)) 75 if (unlikely((mfn >> machine_to_phys_order) != 0))
74 return max_mapnr; 76 return ~0;
75#endif
76 77
77 pfn = 0; 78 pfn = 0;
78 /* 79 /*
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index cefd6942f0e9..62f6e1e55b90 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -17,15 +17,16 @@
17#include <linux/nmi.h> 17#include <linux/nmi.h>
18#include <linux/module.h> 18#include <linux/module.h>
19 19
20/* For reliability, we're prepared to waste bits here. */
21static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
22
23u64 hw_nmi_get_sample_period(void) 20u64 hw_nmi_get_sample_period(void)
24{ 21{
25 return (u64)(cpu_khz) * 1000 * 60; 22 return (u64)(cpu_khz) * 1000 * 60;
26} 23}
27 24
28#ifdef ARCH_HAS_NMI_WATCHDOG 25#ifdef ARCH_HAS_NMI_WATCHDOG
26
27/* For reliability, we're prepared to waste bits here. */
28static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
29
29void arch_trigger_all_cpu_backtrace(void) 30void arch_trigger_all_cpu_backtrace(void)
30{ 31{
31 int i; 32 int i;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 194539aea175..c1c52c341f40 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -44,6 +44,8 @@ static u64 gru_start_paddr, gru_end_paddr;
44static union uvh_apicid uvh_apicid; 44static union uvh_apicid uvh_apicid;
45int uv_min_hub_revision_id; 45int uv_min_hub_revision_id;
46EXPORT_SYMBOL_GPL(uv_min_hub_revision_id); 46EXPORT_SYMBOL_GPL(uv_min_hub_revision_id);
47unsigned int uv_apicid_hibits;
48EXPORT_SYMBOL_GPL(uv_apicid_hibits);
47static DEFINE_SPINLOCK(uv_nmi_lock); 49static DEFINE_SPINLOCK(uv_nmi_lock);
48 50
49static inline bool is_GRU_range(u64 start, u64 end) 51static inline bool is_GRU_range(u64 start, u64 end)
@@ -85,6 +87,23 @@ static void __init early_get_apic_pnode_shift(void)
85 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT; 87 uvh_apicid.s.pnode_shift = UV_APIC_PNODE_SHIFT;
86} 88}
87 89
90/*
91 * Add an extra bit as dictated by bios to the destination apicid of
92 * interrupts potentially passing through the UV HUB. This prevents
93 * a deadlock between interrupts and IO port operations.
94 */
95static void __init uv_set_apicid_hibit(void)
96{
97 union uvh_lb_target_physical_apic_id_mask_u apicid_mask;
98 unsigned long *mmr;
99
100 mmr = early_ioremap(UV_LOCAL_MMR_BASE |
101 UVH_LB_TARGET_PHYSICAL_APIC_ID_MASK, sizeof(*mmr));
102 apicid_mask.v = *mmr;
103 early_iounmap(mmr, sizeof(*mmr));
104 uv_apicid_hibits = apicid_mask.s.bit_enables & UV_APICID_HIBIT_MASK;
105}
106
88static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 107static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
89{ 108{
90 int nodeid; 109 int nodeid;
@@ -102,6 +121,7 @@ static int __init uv_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
102 __get_cpu_var(x2apic_extra_bits) = 121 __get_cpu_var(x2apic_extra_bits) =
103 nodeid << (uvh_apicid.s.pnode_shift - 1); 122 nodeid << (uvh_apicid.s.pnode_shift - 1);
104 uv_system_type = UV_NON_UNIQUE_APIC; 123 uv_system_type = UV_NON_UNIQUE_APIC;
124 uv_set_apicid_hibit();
105 return 1; 125 return 1;
106 } 126 }
107 } 127 }
@@ -155,6 +175,7 @@ static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_ri
155 int pnode; 175 int pnode;
156 176
157 pnode = uv_apicid_to_pnode(phys_apicid); 177 pnode = uv_apicid_to_pnode(phys_apicid);
178 phys_apicid |= uv_apicid_hibits;
158 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 179 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
159 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 180 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
160 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 181 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
@@ -236,7 +257,7 @@ static unsigned int uv_cpu_mask_to_apicid(const struct cpumask *cpumask)
236 int cpu = cpumask_first(cpumask); 257 int cpu = cpumask_first(cpumask);
237 258
238 if ((unsigned)cpu < nr_cpu_ids) 259 if ((unsigned)cpu < nr_cpu_ids)
239 return per_cpu(x86_cpu_to_apicid, cpu); 260 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
240 else 261 else
241 return BAD_APICID; 262 return BAD_APICID;
242} 263}
@@ -255,7 +276,7 @@ uv_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
255 if (cpumask_test_cpu(cpu, cpu_online_mask)) 276 if (cpumask_test_cpu(cpu, cpu_online_mask))
256 break; 277 break;
257 } 278 }
258 return per_cpu(x86_cpu_to_apicid, cpu); 279 return per_cpu(x86_cpu_to_apicid, cpu) | uv_apicid_hibits;
259} 280}
260 281
261static unsigned int x2apic_get_apic_id(unsigned long x) 282static unsigned int x2apic_get_apic_id(unsigned long x)
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index ed6310183efb..6d75b9145b13 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -381,6 +381,20 @@ static void release_pmc_hardware(void) {}
381 381
382#endif 382#endif
383 383
384static bool check_hw_exists(void)
385{
386 u64 val, val_new = 0;
387 int ret = 0;
388
389 val = 0xabcdUL;
390 ret |= checking_wrmsrl(x86_pmu.perfctr, val);
391 ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new);
392 if (ret || val != val_new)
393 return false;
394
395 return true;
396}
397
384static void reserve_ds_buffers(void); 398static void reserve_ds_buffers(void);
385static void release_ds_buffers(void); 399static void release_ds_buffers(void);
386 400
@@ -1372,6 +1386,12 @@ void __init init_hw_perf_events(void)
1372 1386
1373 pmu_check_apic(); 1387 pmu_check_apic();
1374 1388
1389 /* sanity check that the hardware exists or is emulated */
1390 if (!check_hw_exists()) {
1391 pr_cont("Broken PMU hardware detected, software events only.\n");
1392 return;
1393 }
1394
1375 pr_cont("%s PMU driver.\n", x86_pmu.name); 1395 pr_cont("%s PMU driver.\n", x86_pmu.name);
1376 1396
1377 if (x86_pmu.quirks) 1397 if (x86_pmu.quirks)
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 59e175e89599..591e60104278 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -395,7 +395,7 @@ sysenter_past_esp:
395 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words 395 * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
396 * pushed above; +8 corresponds to copy_thread's esp0 setting. 396 * pushed above; +8 corresponds to copy_thread's esp0 setting.
397 */ 397 */
398 pushl_cfi (TI_sysenter_return-THREAD_SIZE_asm+8+4*4)(%esp) 398 pushl_cfi ((TI_sysenter_return)-THREAD_SIZE_asm+8+4*4)(%esp)
399 CFI_REL_OFFSET eip, 0 399 CFI_REL_OFFSET eip, 0
400 400
401 pushl_cfi %eax 401 pushl_cfi %eax
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index fe2690d71c0c..e3ba417e8697 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -295,6 +295,7 @@ ENDPROC(native_usergs_sysret64)
295 .endm 295 .endm
296 296
297/* save partial stack frame */ 297/* save partial stack frame */
298 .pushsection .kprobes.text, "ax"
298ENTRY(save_args) 299ENTRY(save_args)
299 XCPT_FRAME 300 XCPT_FRAME
300 cld 301 cld
@@ -334,6 +335,7 @@ ENTRY(save_args)
334 ret 335 ret
335 CFI_ENDPROC 336 CFI_ENDPROC
336END(save_args) 337END(save_args)
338 .popsection
337 339
338ENTRY(save_rest) 340ENTRY(save_rest)
339 PARTIAL_FRAME 1 REST_SKIP+8 341 PARTIAL_FRAME 1 REST_SKIP+8
diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c
index ff15c9dcc25d..42c594254507 100644
--- a/arch/x86/kernel/hw_breakpoint.c
+++ b/arch/x86/kernel/hw_breakpoint.c
@@ -433,6 +433,10 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args)
433 dr6_p = (unsigned long *)ERR_PTR(args->err); 433 dr6_p = (unsigned long *)ERR_PTR(args->err);
434 dr6 = *dr6_p; 434 dr6 = *dr6_p;
435 435
436 /* If it's a single step, TRAP bits are random */
437 if (dr6 & DR_STEP)
438 return NOTIFY_DONE;
439
436 /* Do an early return if no trap bits are set in DR6 */ 440 /* Do an early return if no trap bits are set in DR6 */
437 if ((dr6 & DR_TRAP_BITS) == 0) 441 if ((dr6 & DR_TRAP_BITS) == 0)
438 return NOTIFY_DONE; 442 return NOTIFY_DONE;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 6da143c2a6b8..ac861b8348e2 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -25,7 +25,6 @@ struct pci_hostbridge_probe {
25}; 25};
26 26
27static u64 __cpuinitdata fam10h_pci_mmconf_base; 27static u64 __cpuinitdata fam10h_pci_mmconf_base;
28static int __cpuinitdata fam10h_pci_mmconf_base_status;
29 28
30static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { 29static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = {
31 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
@@ -44,10 +43,12 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
44 return start1 - start2; 43 return start1 - start2;
45} 44}
46 45
47/*[47:0] */ 46#define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT)
48/* need to avoid (0xfd<<32) and (0xfe<<32), ht used space */ 47#define MMCONF_MASK (~(MMCONF_UNIT - 1))
48#define MMCONF_SIZE (MMCONF_UNIT << 8)
49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
49#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) 50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
50#define BASE_VALID(b) ((b != (0xfdULL << 32)) && (b != (0xfeULL << 32))) 51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
51static void __cpuinit get_fam10h_pci_mmconf_base(void) 52static void __cpuinit get_fam10h_pci_mmconf_base(void)
52{ 53{
53 int i; 54 int i;
@@ -64,12 +65,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
64 struct range range[8]; 65 struct range range[8];
65 66
66 /* only try to get setting from BSP */ 67 /* only try to get setting from BSP */
67 /* -1 or 1 */ 68 if (fam10h_pci_mmconf_base)
68 if (fam10h_pci_mmconf_base_status)
69 return; 69 return;
70 70
71 if (!early_pci_allowed()) 71 if (!early_pci_allowed())
72 goto fail; 72 return;
73 73
74 found = 0; 74 found = 0;
75 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { 75 for (i = 0; i < ARRAY_SIZE(pci_probes); i++) {
@@ -91,7 +91,7 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
91 } 91 }
92 92
93 if (!found) 93 if (!found)
94 goto fail; 94 return;
95 95
96 /* SYS_CFG */ 96 /* SYS_CFG */
97 address = MSR_K8_SYSCFG; 97 address = MSR_K8_SYSCFG;
@@ -99,16 +99,16 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
99 99
100 /* TOP_MEM2 is not enabled? */ 100 /* TOP_MEM2 is not enabled? */
101 if (!(val & (1<<21))) { 101 if (!(val & (1<<21))) {
102 tom2 = 0; 102 tom2 = 1ULL << 32;
103 } else { 103 } else {
104 /* TOP_MEM2 */ 104 /* TOP_MEM2 */
105 address = MSR_K8_TOP_MEM2; 105 address = MSR_K8_TOP_MEM2;
106 rdmsrl(address, val); 106 rdmsrl(address, val);
107 tom2 = val & (0xffffULL<<32); 107 tom2 = max(val & 0xffffff800000ULL, 1ULL << 32);
108 } 108 }
109 109
110 if (base <= tom2) 110 if (base <= tom2)
111 base = tom2 + (1ULL<<32); 111 base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK;
112 112
113 /* 113 /*
114 * need to check if the range is in the high mmio range that is 114 * need to check if the range is in the high mmio range that is
@@ -123,11 +123,11 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
123 if (!(reg & 3)) 123 if (!(reg & 3))
124 continue; 124 continue;
125 125
126 start = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ 126 start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/
127 reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); 127 reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3));
128 end = (((u64)reg) << 8) & (0xffULL << 32); /* 39:16 on 31:8*/ 128 end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/
129 129
130 if (!end) 130 if (end < tom2)
131 continue; 131 continue;
132 132
133 range[hi_mmio_num].start = start; 133 range[hi_mmio_num].start = start;
@@ -143,32 +143,27 @@ static void __cpuinit get_fam10h_pci_mmconf_base(void)
143 143
144 if (range[hi_mmio_num - 1].end < base) 144 if (range[hi_mmio_num - 1].end < base)
145 goto out; 145 goto out;
146 if (range[0].start > base) 146 if (range[0].start > base + MMCONF_SIZE)
147 goto out; 147 goto out;
148 148
149 /* need to find one window */ 149 /* need to find one window */
150 base = range[0].start - (1ULL << 32); 150 base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT;
151 if ((base > tom2) && BASE_VALID(base)) 151 if ((base > tom2) && BASE_VALID(base))
152 goto out; 152 goto out;
153 base = range[hi_mmio_num - 1].end + (1ULL << 32); 153 base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK;
154 if ((base > tom2) && BASE_VALID(base)) 154 if (BASE_VALID(base))
155 goto out; 155 goto out;
156 /* need to find window between ranges */ 156 /* need to find window between ranges */
157 if (hi_mmio_num > 1) 157 for (i = 1; i < hi_mmio_num; i++) {
158 for (i = 0; i < hi_mmio_num - 1; i++) { 158 base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK;
159 if (range[i + 1].start > (range[i].end + (1ULL << 32))) { 159 val = range[i].start & MMCONF_MASK;
160 base = range[i].end + (1ULL << 32); 160 if (val >= base + MMCONF_SIZE && BASE_VALID(base))
161 if ((base > tom2) && BASE_VALID(base)) 161 goto out;
162 goto out;
163 }
164 } 162 }
165
166fail:
167 fam10h_pci_mmconf_base_status = -1;
168 return; 163 return;
164
169out: 165out:
170 fam10h_pci_mmconf_base = base; 166 fam10h_pci_mmconf_base = base;
171 fam10h_pci_mmconf_base_status = 1;
172} 167}
173 168
174void __cpuinit fam10h_check_enable_mmcfg(void) 169void __cpuinit fam10h_check_enable_mmcfg(void)
@@ -190,11 +185,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
190 185
191 /* only trust the one handle 256 buses, if acpi=off */ 186 /* only trust the one handle 256 buses, if acpi=off */
192 if (!acpi_pci_disabled || busnbits >= 8) { 187 if (!acpi_pci_disabled || busnbits >= 8) {
193 u64 base; 188 u64 base = val & MMCONF_MASK;
194 base = val & (0xffffULL << 32); 189
195 if (fam10h_pci_mmconf_base_status <= 0) { 190 if (!fam10h_pci_mmconf_base) {
196 fam10h_pci_mmconf_base = base; 191 fam10h_pci_mmconf_base = base;
197 fam10h_pci_mmconf_base_status = 1;
198 return; 192 return;
199 } else if (fam10h_pci_mmconf_base == base) 193 } else if (fam10h_pci_mmconf_base == base)
200 return; 194 return;
@@ -206,8 +200,10 @@ void __cpuinit fam10h_check_enable_mmcfg(void)
206 * with 256 buses 200 * with 256 buses
207 */ 201 */
208 get_fam10h_pci_mmconf_base(); 202 get_fam10h_pci_mmconf_base();
209 if (fam10h_pci_mmconf_base_status <= 0) 203 if (!fam10h_pci_mmconf_base) {
204 pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF;
210 return; 205 return;
206 }
211 207
212 printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); 208 printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n");
213 val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | 209 val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) |
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index 008b91eefa18..42eb3300dfc6 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -83,6 +83,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
83 83
84static atomic64_t last_value = ATOMIC64_INIT(0); 84static atomic64_t last_value = ATOMIC64_INIT(0);
85 85
86void pvclock_resume(void)
87{
88 atomic64_set(&last_value, 0);
89}
90
86cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) 91cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
87{ 92{
88 struct pvclock_shadow_time shadow; 93 struct pvclock_shadow_time shadow;
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 12cdbb17ad18..6acc724d5d8f 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -223,7 +223,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
223 223
224static void __cpuinit calculate_tlb_offset(void) 224static void __cpuinit calculate_tlb_offset(void)
225{ 225{
226 int cpu, node, nr_node_vecs; 226 int cpu, node, nr_node_vecs, idx = 0;
227 /* 227 /*
228 * we are changing tlb_vector_offset for each CPU in runtime, but this 228 * we are changing tlb_vector_offset for each CPU in runtime, but this
229 * will not cause inconsistency, as the write is atomic under X86. we 229 * will not cause inconsistency, as the write is atomic under X86. we
@@ -239,7 +239,7 @@ static void __cpuinit calculate_tlb_offset(void)
239 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes; 239 nr_node_vecs = NUM_INVALIDATE_TLB_VECTORS/nr_online_nodes;
240 240
241 for_each_online_node(node) { 241 for_each_online_node(node) {
242 int node_offset = (node % NUM_INVALIDATE_TLB_VECTORS) * 242 int node_offset = (idx % NUM_INVALIDATE_TLB_VECTORS) *
243 nr_node_vecs; 243 nr_node_vecs;
244 int cpu_offset = 0; 244 int cpu_offset = 0;
245 for_each_cpu(cpu, cpumask_of_node(node)) { 245 for_each_cpu(cpu, cpumask_of_node(node)) {
@@ -248,6 +248,7 @@ static void __cpuinit calculate_tlb_offset(void)
248 cpu_offset++; 248 cpu_offset++;
249 cpu_offset = cpu_offset % nr_node_vecs; 249 cpu_offset = cpu_offset % nr_node_vecs;
250 } 250 }
251 idx++;
251 } 252 }
252} 253}
253 254
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index d7b5109f7a9c..25cd4a07d09f 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -70,6 +70,9 @@ static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi,
70struct xen_pci_frontend_ops *xen_pci_frontend; 70struct xen_pci_frontend_ops *xen_pci_frontend;
71EXPORT_SYMBOL_GPL(xen_pci_frontend); 71EXPORT_SYMBOL_GPL(xen_pci_frontend);
72 72
73#define XEN_PIRQ_MSI_DATA (MSI_DATA_TRIGGER_EDGE | \
74 MSI_DATA_LEVEL_ASSERT | (3 << 8) | MSI_DATA_VECTOR(0))
75
73static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, 76static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
74 struct msi_msg *msg) 77 struct msi_msg *msg)
75{ 78{
@@ -83,12 +86,7 @@ static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq,
83 MSI_ADDR_REDIRECTION_CPU | 86 MSI_ADDR_REDIRECTION_CPU |
84 MSI_ADDR_DEST_ID(pirq); 87 MSI_ADDR_DEST_ID(pirq);
85 88
86 msg->data = 89 msg->data = XEN_PIRQ_MSI_DATA;
87 MSI_DATA_TRIGGER_EDGE |
88 MSI_DATA_LEVEL_ASSERT |
89 /* delivery mode reserved */
90 (3 << 8) |
91 MSI_DATA_VECTOR(0);
92} 90}
93 91
94static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) 92static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
@@ -98,8 +96,23 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
98 struct msi_msg msg; 96 struct msi_msg msg;
99 97
100 list_for_each_entry(msidesc, &dev->msi_list, list) { 98 list_for_each_entry(msidesc, &dev->msi_list, list) {
99 __read_msi_msg(msidesc, &msg);
100 pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) |
101 ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff);
102 if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) {
103 xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
104 "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ);
105 if (irq < 0)
106 goto error;
107 ret = set_irq_msi(irq, msidesc);
108 if (ret < 0)
109 goto error_while;
110 printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d"
111 " pirq=%d\n", irq, pirq);
112 return 0;
113 }
101 xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? 114 xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ?
102 "msi-x" : "msi", &irq, &pirq); 115 "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ));
103 if (irq < 0 || pirq < 0) 116 if (irq < 0 || pirq < 0)
104 goto error; 117 goto error;
105 printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq); 118 printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq);
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index a318194002b5..ba9caa808a9c 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1455,7 +1455,7 @@ static void __init uv_init_uvhub(int uvhub, int vector)
1455 * the below initialization can't be in firmware because the 1455 * the below initialization can't be in firmware because the
1456 * messaging IRQ will be determined by the OS 1456 * messaging IRQ will be determined by the OS
1457 */ 1457 */
1458 apicid = uvhub_to_first_apicid(uvhub); 1458 apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
1459 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG, 1459 uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
1460 ((apicid << 32) | vector)); 1460 ((apicid << 32) | vector));
1461} 1461}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
index 56e421bc379b..9daf5d1af9f1 100644
--- a/arch/x86/platform/uv/uv_time.c
+++ b/arch/x86/platform/uv/uv_time.c
@@ -89,6 +89,7 @@ static void uv_rtc_send_IPI(int cpu)
89 89
90 apicid = cpu_physical_id(cpu); 90 apicid = cpu_physical_id(cpu);
91 pnode = uv_apicid_to_pnode(apicid); 91 pnode = uv_apicid_to_pnode(apicid);
92 apicid |= uv_apicid_hibits;
92 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 93 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
93 (apicid << UVH_IPI_INT_APIC_ID_SHFT) | 94 (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
94 (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT); 95 (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
@@ -107,6 +108,7 @@ static int uv_intr_pending(int pnode)
107static int uv_setup_intr(int cpu, u64 expires) 108static int uv_setup_intr(int cpu, u64 expires)
108{ 109{
109 u64 val; 110 u64 val;
111 unsigned long apicid = cpu_physical_id(cpu) | uv_apicid_hibits;
110 int pnode = uv_cpu_to_pnode(cpu); 112 int pnode = uv_cpu_to_pnode(cpu);
111 113
112 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, 114 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
@@ -117,7 +119,7 @@ static int uv_setup_intr(int cpu, u64 expires)
117 UVH_EVENT_OCCURRED0_RTC1_MASK); 119 UVH_EVENT_OCCURRED0_RTC1_MASK);
118 120
119 val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) | 121 val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
120 ((u64)cpu_physical_id(cpu) << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT); 122 ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
121 123
122 /* Set configuration */ 124 /* Set configuration */
123 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val); 125 uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 235c0f4d3861..44dcad43989d 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -75,6 +75,11 @@ DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
75enum xen_domain_type xen_domain_type = XEN_NATIVE; 75enum xen_domain_type xen_domain_type = XEN_NATIVE;
76EXPORT_SYMBOL_GPL(xen_domain_type); 76EXPORT_SYMBOL_GPL(xen_domain_type);
77 77
78unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
79EXPORT_SYMBOL(machine_to_phys_mapping);
80unsigned int machine_to_phys_order;
81EXPORT_SYMBOL(machine_to_phys_order);
82
78struct start_info *xen_start_info; 83struct start_info *xen_start_info;
79EXPORT_SYMBOL_GPL(xen_start_info); 84EXPORT_SYMBOL_GPL(xen_start_info);
80 85
@@ -1016,10 +1021,6 @@ static void xen_reboot(int reason)
1016{ 1021{
1017 struct sched_shutdown r = { .reason = reason }; 1022 struct sched_shutdown r = { .reason = reason };
1018 1023
1019#ifdef CONFIG_SMP
1020 stop_other_cpus();
1021#endif
1022
1023 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) 1024 if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
1024 BUG(); 1025 BUG();
1025} 1026}
@@ -1090,6 +1091,8 @@ static void __init xen_setup_stackprotector(void)
1090/* First C function to be called on Xen boot */ 1091/* First C function to be called on Xen boot */
1091asmlinkage void __init xen_start_kernel(void) 1092asmlinkage void __init xen_start_kernel(void)
1092{ 1093{
1094 struct physdev_set_iopl set_iopl;
1095 int rc;
1093 pgd_t *pgd; 1096 pgd_t *pgd;
1094 1097
1095 if (!xen_start_info) 1098 if (!xen_start_info)
@@ -1097,6 +1100,8 @@ asmlinkage void __init xen_start_kernel(void)
1097 1100
1098 xen_domain_type = XEN_PV_DOMAIN; 1101 xen_domain_type = XEN_PV_DOMAIN;
1099 1102
1103 xen_setup_machphys_mapping();
1104
1100 /* Install Xen paravirt ops */ 1105 /* Install Xen paravirt ops */
1101 pv_info = xen_info; 1106 pv_info = xen_info;
1102 pv_init_ops = xen_init_ops; 1107 pv_init_ops = xen_init_ops;
@@ -1191,8 +1196,6 @@ asmlinkage void __init xen_start_kernel(void)
1191 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1196 /* Allocate and initialize top and mid mfn levels for p2m structure */
1192 xen_build_mfn_list_list(); 1197 xen_build_mfn_list_list();
1193 1198
1194 init_mm.pgd = pgd;
1195
1196 /* keep using Xen gdt for now; no urgent need to change it */ 1199 /* keep using Xen gdt for now; no urgent need to change it */
1197 1200
1198#ifdef CONFIG_X86_32 1201#ifdef CONFIG_X86_32
@@ -1202,10 +1205,18 @@ asmlinkage void __init xen_start_kernel(void)
1202#else 1205#else
1203 pv_info.kernel_rpl = 0; 1206 pv_info.kernel_rpl = 0;
1204#endif 1207#endif
1205
1206 /* set the limit of our address space */ 1208 /* set the limit of our address space */
1207 xen_reserve_top(); 1209 xen_reserve_top();
1208 1210
1211 /* We used to do this in xen_arch_setup, but that is too late on AMD
1212 * were early_cpu_init (run before ->arch_setup()) calls early_amd_init
1213 * which pokes 0xcf8 port.
1214 */
1215 set_iopl.iopl = 1;
1216 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
1217 if (rc != 0)
1218 xen_raw_printk("physdev_op failed %d\n", rc);
1219
1209#ifdef CONFIG_X86_32 1220#ifdef CONFIG_X86_32
1210 /* set up basic CPUID stuff */ 1221 /* set up basic CPUID stuff */
1211 cpu_detect(&new_cpu_data); 1222 cpu_detect(&new_cpu_data);
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 21ed8d7f75a5..44924e551fde 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2034,6 +2034,20 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
2034 set_page_prot(pmd, PAGE_KERNEL_RO); 2034 set_page_prot(pmd, PAGE_KERNEL_RO);
2035} 2035}
2036 2036
2037void __init xen_setup_machphys_mapping(void)
2038{
2039 struct xen_machphys_mapping mapping;
2040 unsigned long machine_to_phys_nr_ents;
2041
2042 if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
2043 machine_to_phys_mapping = (unsigned long *)mapping.v_start;
2044 machine_to_phys_nr_ents = mapping.max_mfn + 1;
2045 } else {
2046 machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
2047 }
2048 machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
2049}
2050
2037#ifdef CONFIG_X86_64 2051#ifdef CONFIG_X86_64
2038static void convert_pfn_mfn(void *v) 2052static void convert_pfn_mfn(void *v)
2039{ 2053{
@@ -2119,44 +2133,83 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2119 return pgd; 2133 return pgd;
2120} 2134}
2121#else /* !CONFIG_X86_64 */ 2135#else /* !CONFIG_X86_64 */
2122static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD); 2136static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD);
2137static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD);
2138
2139static __init void xen_write_cr3_init(unsigned long cr3)
2140{
2141 unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir));
2142
2143 BUG_ON(read_cr3() != __pa(initial_page_table));
2144 BUG_ON(cr3 != __pa(swapper_pg_dir));
2145
2146 /*
2147 * We are switching to swapper_pg_dir for the first time (from
2148 * initial_page_table) and therefore need to mark that page
2149 * read-only and then pin it.
2150 *
2151 * Xen disallows sharing of kernel PMDs for PAE
2152 * guests. Therefore we must copy the kernel PMD from
2153 * initial_page_table into a new kernel PMD to be used in
2154 * swapper_pg_dir.
2155 */
2156 swapper_kernel_pmd =
2157 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2158 memcpy(swapper_kernel_pmd, initial_kernel_pmd,
2159 sizeof(pmd_t) * PTRS_PER_PMD);
2160 swapper_pg_dir[KERNEL_PGD_BOUNDARY] =
2161 __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT);
2162 set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO);
2163
2164 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO);
2165 xen_write_cr3(cr3);
2166 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn);
2167
2168 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE,
2169 PFN_DOWN(__pa(initial_page_table)));
2170 set_page_prot(initial_page_table, PAGE_KERNEL);
2171 set_page_prot(initial_kernel_pmd, PAGE_KERNEL);
2172
2173 pv_mmu_ops.write_cr3 = &xen_write_cr3;
2174}
2123 2175
2124__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, 2176__init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
2125 unsigned long max_pfn) 2177 unsigned long max_pfn)
2126{ 2178{
2127 pmd_t *kernel_pmd; 2179 pmd_t *kernel_pmd;
2128 2180
2129 level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 2181 initial_kernel_pmd =
2182 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
2130 2183
2131 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + 2184 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
2132 xen_start_info->nr_pt_frames * PAGE_SIZE + 2185 xen_start_info->nr_pt_frames * PAGE_SIZE +
2133 512*1024); 2186 512*1024);
2134 2187
2135 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 2188 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
2136 memcpy(level2_kernel_pgt, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 2189 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
2137 2190
2138 xen_map_identity_early(level2_kernel_pgt, max_pfn); 2191 xen_map_identity_early(initial_kernel_pmd, max_pfn);
2139 2192
2140 memcpy(swapper_pg_dir, pgd, sizeof(pgd_t) * PTRS_PER_PGD); 2193 memcpy(initial_page_table, pgd, sizeof(pgd_t) * PTRS_PER_PGD);
2141 set_pgd(&swapper_pg_dir[KERNEL_PGD_BOUNDARY], 2194 initial_page_table[KERNEL_PGD_BOUNDARY] =
2142 __pgd(__pa(level2_kernel_pgt) | _PAGE_PRESENT)); 2195 __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT);
2143 2196
2144 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); 2197 set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO);
2145 set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); 2198 set_page_prot(initial_page_table, PAGE_KERNEL_RO);
2146 set_page_prot(empty_zero_page, PAGE_KERNEL_RO); 2199 set_page_prot(empty_zero_page, PAGE_KERNEL_RO);
2147 2200
2148 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); 2201 pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
2149 2202
2150 xen_write_cr3(__pa(swapper_pg_dir)); 2203 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE,
2151 2204 PFN_DOWN(__pa(initial_page_table)));
2152 pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(swapper_pg_dir))); 2205 xen_write_cr3(__pa(initial_page_table));
2153 2206
2154 memblock_x86_reserve_range(__pa(xen_start_info->pt_base), 2207 memblock_x86_reserve_range(__pa(xen_start_info->pt_base),
2155 __pa(xen_start_info->pt_base + 2208 __pa(xen_start_info->pt_base +
2156 xen_start_info->nr_pt_frames * PAGE_SIZE), 2209 xen_start_info->nr_pt_frames * PAGE_SIZE),
2157 "XEN PAGETABLES"); 2210 "XEN PAGETABLES");
2158 2211
2159 return swapper_pg_dir; 2212 return initial_page_table;
2160} 2213}
2161#endif /* CONFIG_X86_64 */ 2214#endif /* CONFIG_X86_64 */
2162 2215
@@ -2290,7 +2343,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
2290 .write_cr2 = xen_write_cr2, 2343 .write_cr2 = xen_write_cr2,
2291 2344
2292 .read_cr3 = xen_read_cr3, 2345 .read_cr3 = xen_read_cr3,
2346#ifdef CONFIG_X86_32
2347 .write_cr3 = xen_write_cr3_init,
2348#else
2293 .write_cr3 = xen_write_cr3, 2349 .write_cr3 = xen_write_cr3,
2350#endif
2294 2351
2295 .flush_tlb_user = xen_flush_tlb, 2352 .flush_tlb_user = xen_flush_tlb,
2296 .flush_tlb_kernel = xen_flush_tlb, 2353 .flush_tlb_kernel = xen_flush_tlb,
@@ -2358,8 +2415,6 @@ void __init xen_init_mmu_ops(void)
2358 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; 2415 x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
2359 pv_mmu_ops = xen_mmu_ops; 2416 pv_mmu_ops = xen_mmu_ops;
2360 2417
2361 vmap_lazy_unmap = false;
2362
2363 memset(dummy_mapping, 0xff, PAGE_SIZE); 2418 memset(dummy_mapping, 0xff, PAGE_SIZE);
2364} 2419}
2365 2420
@@ -2627,7 +2682,8 @@ int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
2627 2682
2628 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); 2683 prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP);
2629 2684
2630 vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP; 2685 BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) ==
2686 (VM_PFNMAP | VM_RESERVED | VM_IO)));
2631 2687
2632 rmd.mfn = mfn; 2688 rmd.mfn = mfn;
2633 rmd.prot = prot; 2689 rmd.prot = prot;
diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c
index 0f456386cce5..25c52f94a27c 100644
--- a/arch/x86/xen/platform-pci-unplug.c
+++ b/arch/x86/xen/platform-pci-unplug.c
@@ -68,7 +68,7 @@ static int __init check_platform_magic(void)
68 return 0; 68 return 0;
69} 69}
70 70
71void __init xen_unplug_emulated_devices(void) 71void xen_unplug_emulated_devices(void)
72{ 72{
73 int r; 73 int r;
74 74
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 769c4b01fa32..b5a7f928234b 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -23,7 +23,6 @@
23#include <xen/interface/callback.h> 23#include <xen/interface/callback.h>
24#include <xen/interface/memory.h> 24#include <xen/interface/memory.h>
25#include <xen/interface/physdev.h> 25#include <xen/interface/physdev.h>
26#include <xen/interface/memory.h>
27#include <xen/features.h> 26#include <xen/features.h>
28 27
29#include "xen-ops.h" 28#include "xen-ops.h"
@@ -182,24 +181,21 @@ char * __init xen_memory_setup(void)
182 for (i = 0; i < memmap.nr_entries; i++) { 181 for (i = 0; i < memmap.nr_entries; i++) {
183 unsigned long long end = map[i].addr + map[i].size; 182 unsigned long long end = map[i].addr + map[i].size;
184 183
185 if (map[i].type == E820_RAM) { 184 if (map[i].type == E820_RAM && end > mem_end) {
186 if (map[i].addr < mem_end && end > mem_end) { 185 /* RAM off the end - may be partially included */
187 /* Truncate region to max_mem. */ 186 u64 delta = min(map[i].size, end - mem_end);
188 u64 delta = end - mem_end;
189 187
190 map[i].size -= delta; 188 map[i].size -= delta;
191 extra_pages += PFN_DOWN(delta); 189 end -= delta;
192 190
193 end = mem_end; 191 extra_pages += PFN_DOWN(delta);
194 }
195 } 192 }
196 193
197 if (end > xen_extra_mem_start) 194 if (map[i].size > 0 && end > xen_extra_mem_start)
198 xen_extra_mem_start = end; 195 xen_extra_mem_start = end;
199 196
200 /* If region is non-RAM or below mem_end, add what remains */ 197 /* Add region if any remains */
201 if ((map[i].type != E820_RAM || map[i].addr < mem_end) && 198 if (map[i].size > 0)
202 map[i].size > 0)
203 e820_add_region(map[i].addr, map[i].size, map[i].type); 199 e820_add_region(map[i].addr, map[i].size, map[i].type);
204 } 200 }
205 201
@@ -248,26 +244,11 @@ char * __init xen_memory_setup(void)
248 else 244 else
249 extra_pages = 0; 245 extra_pages = 0;
250 246
251 if (!xen_initial_domain()) 247 xen_add_extra_mem(extra_pages);
252 xen_add_extra_mem(extra_pages);
253 248
254 return "Xen"; 249 return "Xen";
255} 250}
256 251
257static void xen_idle(void)
258{
259 local_irq_disable();
260
261 if (need_resched())
262 local_irq_enable();
263 else {
264 current_thread_info()->status &= ~TS_POLLING;
265 smp_mb__after_clear_bit();
266 safe_halt();
267 current_thread_info()->status |= TS_POLLING;
268 }
269}
270
271/* 252/*
272 * Set the bit indicating "nosegneg" library variants should be used. 253 * Set the bit indicating "nosegneg" library variants should be used.
273 * We only need to bother in pure 32-bit mode; compat 32-bit processes 254 * We only need to bother in pure 32-bit mode; compat 32-bit processes
@@ -337,9 +318,6 @@ void __cpuinit xen_enable_syscall(void)
337 318
338void __init xen_arch_setup(void) 319void __init xen_arch_setup(void)
339{ 320{
340 struct physdev_set_iopl set_iopl;
341 int rc;
342
343 xen_panic_handler_init(); 321 xen_panic_handler_init();
344 322
345 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments); 323 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
@@ -356,11 +334,6 @@ void __init xen_arch_setup(void)
356 xen_enable_sysenter(); 334 xen_enable_sysenter();
357 xen_enable_syscall(); 335 xen_enable_syscall();
358 336
359 set_iopl.iopl = 1;
360 rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
361 if (rc != 0)
362 printk(KERN_INFO "physdev_op failed %d\n", rc);
363
364#ifdef CONFIG_ACPI 337#ifdef CONFIG_ACPI
365 if (!(xen_start_info->flags & SIF_INITDOMAIN)) { 338 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
366 printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); 339 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
@@ -372,7 +345,11 @@ void __init xen_arch_setup(void)
372 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? 345 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
373 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); 346 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
374 347
375 pm_idle = xen_idle; 348 /* Set up idle, making sure it calls safe_halt() pvop */
349#ifdef CONFIG_X86_32
350 boot_cpu_data.hlt_works_ok = 1;
351#endif
352 pm_idle = default_idle;
376 353
377 fiddle_vdso(); 354 fiddle_vdso();
378} 355}
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index 1d789d56877c..9bbd63a129b5 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -31,6 +31,7 @@ void xen_hvm_post_suspend(int suspend_cancelled)
31 int cpu; 31 int cpu;
32 xen_hvm_init_shared_info(); 32 xen_hvm_init_shared_info();
33 xen_callback_vector(); 33 xen_callback_vector();
34 xen_unplug_emulated_devices();
34 if (xen_feature(XENFEAT_hvm_safe_pvclock)) { 35 if (xen_feature(XENFEAT_hvm_safe_pvclock)) {
35 for_each_online_cpu(cpu) { 36 for_each_online_cpu(cpu) {
36 xen_setup_runstate_info(cpu); 37 xen_setup_runstate_info(cpu);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index b2bb5aa3b054..5da5e53fb94c 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -426,6 +426,8 @@ void xen_timer_resume(void)
426{ 426{
427 int cpu; 427 int cpu;
428 428
429 pvclock_resume();
430
429 if (xen_clockevent != &xen_vcpuop_clockevent) 431 if (xen_clockevent != &xen_vcpuop_clockevent)
430 return; 432 return;
431 433
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 64044747348e..9d41bf985757 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -43,7 +43,7 @@ void xen_vcpu_restore(void);
43 43
44void xen_callback_vector(void); 44void xen_callback_vector(void);
45void xen_hvm_init_shared_info(void); 45void xen_hvm_init_shared_info(void);
46void __init xen_unplug_emulated_devices(void); 46void xen_unplug_emulated_devices(void);
47 47
48void __init xen_build_dynamic_phys_to_machine(void); 48void __init xen_build_dynamic_phys_to_machine(void);
49 49
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 56ad4531b412..004be80fd894 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -645,7 +645,7 @@ static int throtl_dispatch_tg(struct throtl_data *td, struct throtl_grp *tg,
645{ 645{
646 unsigned int nr_reads = 0, nr_writes = 0; 646 unsigned int nr_reads = 0, nr_writes = 0;
647 unsigned int max_nr_reads = throtl_grp_quantum*3/4; 647 unsigned int max_nr_reads = throtl_grp_quantum*3/4;
648 unsigned int max_nr_writes = throtl_grp_quantum - nr_reads; 648 unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
649 struct bio *bio; 649 struct bio *bio;
650 650
651 /* Try to dispatch 75% READS and 25% WRITES */ 651 /* Try to dispatch 75% READS and 25% WRITES */
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
index a1725e6488d3..7888501ad9ee 100644
--- a/drivers/block/amiflop.c
+++ b/drivers/block/amiflop.c
@@ -1341,7 +1341,7 @@ static struct request *set_next_request(void)
1341{ 1341{
1342 struct request_queue *q; 1342 struct request_queue *q;
1343 int cnt = FD_MAX_UNITS; 1343 int cnt = FD_MAX_UNITS;
1344 struct request *rq; 1344 struct request *rq = NULL;
1345 1345
1346 /* Find next queue we can dispatch from */ 1346 /* Find next queue we can dispatch from */
1347 fdc_queue = fdc_queue + 1; 1347 fdc_queue = fdc_queue + 1;
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
index 4e4cc6c828cb..605a67e40bbf 100644
--- a/drivers/block/ataflop.c
+++ b/drivers/block/ataflop.c
@@ -1399,7 +1399,7 @@ static struct request *set_next_request(void)
1399{ 1399{
1400 struct request_queue *q; 1400 struct request_queue *q;
1401 int old_pos = fdc_queue; 1401 int old_pos = fdc_queue;
1402 struct request *rq; 1402 struct request *rq = NULL;
1403 1403
1404 do { 1404 do {
1405 q = unit[fdc_queue].disk->queue; 1405 q = unit[fdc_queue].disk->queue;
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index a67d0a611a8a..f291587d753e 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -66,6 +66,7 @@ MODULE_VERSION("3.6.26");
66MODULE_LICENSE("GPL"); 66MODULE_LICENSE("GPL");
67 67
68static DEFINE_MUTEX(cciss_mutex); 68static DEFINE_MUTEX(cciss_mutex);
69static struct proc_dir_entry *proc_cciss;
69 70
70#include "cciss_cmd.h" 71#include "cciss_cmd.h"
71#include "cciss.h" 72#include "cciss.h"
@@ -363,8 +364,6 @@ static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
363#define ENG_GIG_FACTOR (ENG_GIG/512) 364#define ENG_GIG_FACTOR (ENG_GIG/512)
364#define ENGAGE_SCSI "engage scsi" 365#define ENGAGE_SCSI "engage scsi"
365 366
366static struct proc_dir_entry *proc_cciss;
367
368static void cciss_seq_show_header(struct seq_file *seq) 367static void cciss_seq_show_header(struct seq_file *seq)
369{ 368{
370 ctlr_info_t *h = seq->private; 369 ctlr_info_t *h = seq->private;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 6ec9d53806c5..008d4a00b50d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -21,80 +21,9 @@
21 21
22 22
23 23
24 Instructions for use 24 For usage instructions, please refer to:
25 --------------------
26 25
27 1) Map a Linux block device to an existing rbd image. 26 Documentation/ABI/testing/sysfs-bus-rbd
28
29 Usage: <mon ip addr> <options> <pool name> <rbd image name> [snap name]
30
31 $ echo "192.168.0.1 name=admin rbd foo" > /sys/class/rbd/add
32
33 The snapshot name can be "-" or omitted to map the image read/write.
34
35 2) List all active blkdev<->object mappings.
36
37 In this example, we have performed step #1 twice, creating two blkdevs,
38 mapped to two separate rados objects in the rados rbd pool
39
40 $ cat /sys/class/rbd/list
41 #id major client_name pool name snap KB
42 0 254 client4143 rbd foo - 1024000
43
44 The columns, in order, are:
45 - blkdev unique id
46 - blkdev assigned major
47 - rados client id
48 - rados pool name
49 - rados block device name
50 - mapped snapshot ("-" if none)
51 - device size in KB
52
53
54 3) Create a snapshot.
55
56 Usage: <blkdev id> <snapname>
57
58 $ echo "0 mysnap" > /sys/class/rbd/snap_create
59
60
61 4) Listing a snapshot.
62
63 $ cat /sys/class/rbd/snaps_list
64 #id snap KB
65 0 - 1024000 (*)
66 0 foo 1024000
67
68 The columns, in order, are:
69 - blkdev unique id
70 - snapshot name, '-' means none (active read/write version)
71 - size of device at time of snapshot
72 - the (*) indicates this is the active version
73
74 5) Rollback to snapshot.
75
76 Usage: <blkdev id> <snapname>
77
78 $ echo "0 mysnap" > /sys/class/rbd/snap_rollback
79
80
81 6) Mapping an image using snapshot.
82
83 A snapshot mapping is read-only. This is being done by passing
84 snap=<snapname> to the options when adding a device.
85
86 $ echo "192.168.0.1 name=admin,snap=mysnap rbd foo" > /sys/class/rbd/add
87
88
89 7) Remove an active blkdev<->rbd image mapping.
90
91 In this example, we remove the mapping with blkdev unique id 1.
92
93 $ echo 1 > /sys/class/rbd/remove
94
95
96 NOTE: The actual creation and deletion of rados objects is outside the scope
97 of this driver.
98 27
99 */ 28 */
100 29
@@ -163,6 +92,14 @@ struct rbd_request {
163 u64 len; 92 u64 len;
164}; 93};
165 94
95struct rbd_snap {
96 struct device dev;
97 const char *name;
98 size_t size;
99 struct list_head node;
100 u64 id;
101};
102
166/* 103/*
167 * a single device 104 * a single device
168 */ 105 */
@@ -193,21 +130,60 @@ struct rbd_device {
193 int read_only; 130 int read_only;
194 131
195 struct list_head node; 132 struct list_head node;
133
134 /* list of snapshots */
135 struct list_head snaps;
136
137 /* sysfs related */
138 struct device dev;
139};
140
141static struct bus_type rbd_bus_type = {
142 .name = "rbd",
196}; 143};
197 144
198static spinlock_t node_lock; /* protects client get/put */ 145static spinlock_t node_lock; /* protects client get/put */
199 146
200static struct class *class_rbd; /* /sys/class/rbd */
201static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */ 147static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
202static LIST_HEAD(rbd_dev_list); /* devices */ 148static LIST_HEAD(rbd_dev_list); /* devices */
203static LIST_HEAD(rbd_client_list); /* clients */ 149static LIST_HEAD(rbd_client_list); /* clients */
204 150
151static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
152static void rbd_dev_release(struct device *dev);
153static ssize_t rbd_snap_rollback(struct device *dev,
154 struct device_attribute *attr,
155 const char *buf,
156 size_t size);
157static ssize_t rbd_snap_add(struct device *dev,
158 struct device_attribute *attr,
159 const char *buf,
160 size_t count);
161static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
162 struct rbd_snap *snap);;
163
164
165static struct rbd_device *dev_to_rbd(struct device *dev)
166{
167 return container_of(dev, struct rbd_device, dev);
168}
169
170static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
171{
172 return get_device(&rbd_dev->dev);
173}
174
175static void rbd_put_dev(struct rbd_device *rbd_dev)
176{
177 put_device(&rbd_dev->dev);
178}
205 179
206static int rbd_open(struct block_device *bdev, fmode_t mode) 180static int rbd_open(struct block_device *bdev, fmode_t mode)
207{ 181{
208 struct gendisk *disk = bdev->bd_disk; 182 struct gendisk *disk = bdev->bd_disk;
209 struct rbd_device *rbd_dev = disk->private_data; 183 struct rbd_device *rbd_dev = disk->private_data;
210 184
185 rbd_get_dev(rbd_dev);
186
211 set_device_ro(bdev, rbd_dev->read_only); 187 set_device_ro(bdev, rbd_dev->read_only);
212 188
213 if ((mode & FMODE_WRITE) && rbd_dev->read_only) 189 if ((mode & FMODE_WRITE) && rbd_dev->read_only)
@@ -216,9 +192,19 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
216 return 0; 192 return 0;
217} 193}
218 194
195static int rbd_release(struct gendisk *disk, fmode_t mode)
196{
197 struct rbd_device *rbd_dev = disk->private_data;
198
199 rbd_put_dev(rbd_dev);
200
201 return 0;
202}
203
219static const struct block_device_operations rbd_bd_ops = { 204static const struct block_device_operations rbd_bd_ops = {
220 .owner = THIS_MODULE, 205 .owner = THIS_MODULE,
221 .open = rbd_open, 206 .open = rbd_open,
207 .release = rbd_release,
222}; 208};
223 209
224/* 210/*
@@ -361,7 +347,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
361 int ret = -ENOMEM; 347 int ret = -ENOMEM;
362 348
363 init_rwsem(&header->snap_rwsem); 349 init_rwsem(&header->snap_rwsem);
364
365 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len); 350 header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
366 header->snapc = kmalloc(sizeof(struct ceph_snap_context) + 351 header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
367 snap_count * 352 snap_count *
@@ -1256,10 +1241,20 @@ bad:
1256 return -ERANGE; 1241 return -ERANGE;
1257} 1242}
1258 1243
1244static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
1245{
1246 struct rbd_snap *snap;
1247
1248 while (!list_empty(&rbd_dev->snaps)) {
1249 snap = list_first_entry(&rbd_dev->snaps, struct rbd_snap, node);
1250 __rbd_remove_snap_dev(rbd_dev, snap);
1251 }
1252}
1253
1259/* 1254/*
1260 * only read the first part of the ondisk header, without the snaps info 1255 * only read the first part of the ondisk header, without the snaps info
1261 */ 1256 */
1262static int rbd_update_snaps(struct rbd_device *rbd_dev) 1257static int __rbd_update_snaps(struct rbd_device *rbd_dev)
1263{ 1258{
1264 int ret; 1259 int ret;
1265 struct rbd_image_header h; 1260 struct rbd_image_header h;
@@ -1280,12 +1275,15 @@ static int rbd_update_snaps(struct rbd_device *rbd_dev)
1280 rbd_dev->header.total_snaps = h.total_snaps; 1275 rbd_dev->header.total_snaps = h.total_snaps;
1281 rbd_dev->header.snapc = h.snapc; 1276 rbd_dev->header.snapc = h.snapc;
1282 rbd_dev->header.snap_names = h.snap_names; 1277 rbd_dev->header.snap_names = h.snap_names;
1278 rbd_dev->header.snap_names_len = h.snap_names_len;
1283 rbd_dev->header.snap_sizes = h.snap_sizes; 1279 rbd_dev->header.snap_sizes = h.snap_sizes;
1284 rbd_dev->header.snapc->seq = snap_seq; 1280 rbd_dev->header.snapc->seq = snap_seq;
1285 1281
1282 ret = __rbd_init_snaps_header(rbd_dev);
1283
1286 up_write(&rbd_dev->header.snap_rwsem); 1284 up_write(&rbd_dev->header.snap_rwsem);
1287 1285
1288 return 0; 1286 return ret;
1289} 1287}
1290 1288
1291static int rbd_init_disk(struct rbd_device *rbd_dev) 1289static int rbd_init_disk(struct rbd_device *rbd_dev)
@@ -1300,6 +1298,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
1300 if (rc) 1298 if (rc)
1301 return rc; 1299 return rc;
1302 1300
1301 /* no need to lock here, as rbd_dev is not registered yet */
1302 rc = __rbd_init_snaps_header(rbd_dev);
1303 if (rc)
1304 return rc;
1305
1303 rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size); 1306 rc = rbd_header_set_snap(rbd_dev, rbd_dev->snap_name, &total_size);
1304 if (rc) 1307 if (rc)
1305 return rc; 1308 return rc;
@@ -1343,54 +1346,360 @@ out:
1343 return rc; 1346 return rc;
1344} 1347}
1345 1348
1346/******************************************************************** 1349/*
1347 * /sys/class/rbd/ 1350 sysfs
1348 * add map rados objects to blkdev 1351*/
1349 * remove unmap rados objects 1352
1350 * list show mappings 1353static ssize_t rbd_size_show(struct device *dev,
1351 *******************************************************************/ 1354 struct device_attribute *attr, char *buf)
1355{
1356 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1357
1358 return sprintf(buf, "%llu\n", (unsigned long long)rbd_dev->header.image_size);
1359}
1360
1361static ssize_t rbd_major_show(struct device *dev,
1362 struct device_attribute *attr, char *buf)
1363{
1364 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1352 1365
1353static void class_rbd_release(struct class *cls) 1366 return sprintf(buf, "%d\n", rbd_dev->major);
1367}
1368
1369static ssize_t rbd_client_id_show(struct device *dev,
1370 struct device_attribute *attr, char *buf)
1354{ 1371{
1355 kfree(cls); 1372 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1373
1374 return sprintf(buf, "client%lld\n", ceph_client_id(rbd_dev->client));
1356} 1375}
1357 1376
1358static ssize_t class_rbd_list(struct class *c, 1377static ssize_t rbd_pool_show(struct device *dev,
1359 struct class_attribute *attr, 1378 struct device_attribute *attr, char *buf)
1360 char *data)
1361{ 1379{
1362 int n = 0; 1380 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1363 struct list_head *tmp; 1381
1364 int max = PAGE_SIZE; 1382 return sprintf(buf, "%s\n", rbd_dev->pool_name);
1383}
1384
1385static ssize_t rbd_name_show(struct device *dev,
1386 struct device_attribute *attr, char *buf)
1387{
1388 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1389
1390 return sprintf(buf, "%s\n", rbd_dev->obj);
1391}
1392
1393static ssize_t rbd_snap_show(struct device *dev,
1394 struct device_attribute *attr,
1395 char *buf)
1396{
1397 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1398
1399 return sprintf(buf, "%s\n", rbd_dev->snap_name);
1400}
1401
1402static ssize_t rbd_image_refresh(struct device *dev,
1403 struct device_attribute *attr,
1404 const char *buf,
1405 size_t size)
1406{
1407 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1408 int rc;
1409 int ret = size;
1365 1410
1366 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1411 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1367 1412
1368 n += snprintf(data, max, 1413 rc = __rbd_update_snaps(rbd_dev);
1369 "#id\tmajor\tclient_name\tpool\tname\tsnap\tKB\n"); 1414 if (rc < 0)
1415 ret = rc;
1370 1416
1371 list_for_each(tmp, &rbd_dev_list) { 1417 mutex_unlock(&ctl_mutex);
1372 struct rbd_device *rbd_dev; 1418 return ret;
1419}
1373 1420
1374 rbd_dev = list_entry(tmp, struct rbd_device, node); 1421static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
1375 n += snprintf(data+n, max-n, 1422static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
1376 "%d\t%d\tclient%lld\t%s\t%s\t%s\t%lld\n", 1423static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
1377 rbd_dev->id, 1424static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
1378 rbd_dev->major, 1425static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
1379 ceph_client_id(rbd_dev->client), 1426static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
1380 rbd_dev->pool_name, 1427static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
1381 rbd_dev->obj, rbd_dev->snap_name, 1428static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
1382 rbd_dev->header.image_size >> 10); 1429static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
1383 if (n == max) 1430
1431static struct attribute *rbd_attrs[] = {
1432 &dev_attr_size.attr,
1433 &dev_attr_major.attr,
1434 &dev_attr_client_id.attr,
1435 &dev_attr_pool.attr,
1436 &dev_attr_name.attr,
1437 &dev_attr_current_snap.attr,
1438 &dev_attr_refresh.attr,
1439 &dev_attr_create_snap.attr,
1440 &dev_attr_rollback_snap.attr,
1441 NULL
1442};
1443
1444static struct attribute_group rbd_attr_group = {
1445 .attrs = rbd_attrs,
1446};
1447
1448static const struct attribute_group *rbd_attr_groups[] = {
1449 &rbd_attr_group,
1450 NULL
1451};
1452
1453static void rbd_sysfs_dev_release(struct device *dev)
1454{
1455}
1456
1457static struct device_type rbd_device_type = {
1458 .name = "rbd",
1459 .groups = rbd_attr_groups,
1460 .release = rbd_sysfs_dev_release,
1461};
1462
1463
1464/*
1465 sysfs - snapshots
1466*/
1467
1468static ssize_t rbd_snap_size_show(struct device *dev,
1469 struct device_attribute *attr,
1470 char *buf)
1471{
1472 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
1473
1474 return sprintf(buf, "%lld\n", (long long)snap->size);
1475}
1476
1477static ssize_t rbd_snap_id_show(struct device *dev,
1478 struct device_attribute *attr,
1479 char *buf)
1480{
1481 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
1482
1483 return sprintf(buf, "%lld\n", (long long)snap->id);
1484}
1485
1486static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
1487static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
1488
1489static struct attribute *rbd_snap_attrs[] = {
1490 &dev_attr_snap_size.attr,
1491 &dev_attr_snap_id.attr,
1492 NULL,
1493};
1494
1495static struct attribute_group rbd_snap_attr_group = {
1496 .attrs = rbd_snap_attrs,
1497};
1498
1499static void rbd_snap_dev_release(struct device *dev)
1500{
1501 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
1502 kfree(snap->name);
1503 kfree(snap);
1504}
1505
1506static const struct attribute_group *rbd_snap_attr_groups[] = {
1507 &rbd_snap_attr_group,
1508 NULL
1509};
1510
1511static struct device_type rbd_snap_device_type = {
1512 .groups = rbd_snap_attr_groups,
1513 .release = rbd_snap_dev_release,
1514};
1515
1516static void __rbd_remove_snap_dev(struct rbd_device *rbd_dev,
1517 struct rbd_snap *snap)
1518{
1519 list_del(&snap->node);
1520 device_unregister(&snap->dev);
1521}
1522
1523static int rbd_register_snap_dev(struct rbd_device *rbd_dev,
1524 struct rbd_snap *snap,
1525 struct device *parent)
1526{
1527 struct device *dev = &snap->dev;
1528 int ret;
1529
1530 dev->type = &rbd_snap_device_type;
1531 dev->parent = parent;
1532 dev->release = rbd_snap_dev_release;
1533 dev_set_name(dev, "snap_%s", snap->name);
1534 ret = device_register(dev);
1535
1536 return ret;
1537}
1538
1539static int __rbd_add_snap_dev(struct rbd_device *rbd_dev,
1540 int i, const char *name,
1541 struct rbd_snap **snapp)
1542{
1543 int ret;
1544 struct rbd_snap *snap = kzalloc(sizeof(*snap), GFP_KERNEL);
1545 if (!snap)
1546 return -ENOMEM;
1547 snap->name = kstrdup(name, GFP_KERNEL);
1548 snap->size = rbd_dev->header.snap_sizes[i];
1549 snap->id = rbd_dev->header.snapc->snaps[i];
1550 if (device_is_registered(&rbd_dev->dev)) {
1551 ret = rbd_register_snap_dev(rbd_dev, snap,
1552 &rbd_dev->dev);
1553 if (ret < 0)
1554 goto err;
1555 }
1556 *snapp = snap;
1557 return 0;
1558err:
1559 kfree(snap->name);
1560 kfree(snap);
1561 return ret;
1562}
1563
1564/*
1565 * search for the previous snap in a null delimited string list
1566 */
1567const char *rbd_prev_snap_name(const char *name, const char *start)
1568{
1569 if (name < start + 2)
1570 return NULL;
1571
1572 name -= 2;
1573 while (*name) {
1574 if (name == start)
1575 return start;
1576 name--;
1577 }
1578 return name + 1;
1579}
1580
1581/*
1582 * compare the old list of snapshots that we have to what's in the header
1583 * and update it accordingly. Note that the header holds the snapshots
1584 * in a reverse order (from newest to oldest) and we need to go from
1585 * older to new so that we don't get a duplicate snap name when
1586 * doing the process (e.g., removed snapshot and recreated a new
1587 * one with the same name.
1588 */
1589static int __rbd_init_snaps_header(struct rbd_device *rbd_dev)
1590{
1591 const char *name, *first_name;
1592 int i = rbd_dev->header.total_snaps;
1593 struct rbd_snap *snap, *old_snap = NULL;
1594 int ret;
1595 struct list_head *p, *n;
1596
1597 first_name = rbd_dev->header.snap_names;
1598 name = first_name + rbd_dev->header.snap_names_len;
1599
1600 list_for_each_prev_safe(p, n, &rbd_dev->snaps) {
1601 u64 cur_id;
1602
1603 old_snap = list_entry(p, struct rbd_snap, node);
1604
1605 if (i)
1606 cur_id = rbd_dev->header.snapc->snaps[i - 1];
1607
1608 if (!i || old_snap->id < cur_id) {
1609 /* old_snap->id was skipped, thus was removed */
1610 __rbd_remove_snap_dev(rbd_dev, old_snap);
1611 continue;
1612 }
1613 if (old_snap->id == cur_id) {
1614 /* we have this snapshot already */
1615 i--;
1616 name = rbd_prev_snap_name(name, first_name);
1617 continue;
1618 }
1619 for (; i > 0;
1620 i--, name = rbd_prev_snap_name(name, first_name)) {
1621 if (!name) {
1622 WARN_ON(1);
1623 return -EINVAL;
1624 }
1625 cur_id = rbd_dev->header.snapc->snaps[i];
1626 /* snapshot removal? handle it above */
1627 if (cur_id >= old_snap->id)
1628 break;
1629 /* a new snapshot */
1630 ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
1631 if (ret < 0)
1632 return ret;
1633
1634 /* note that we add it backward so using n and not p */
1635 list_add(&snap->node, n);
1636 p = &snap->node;
1637 }
1638 }
1639 /* we're done going over the old snap list, just add what's left */
1640 for (; i > 0; i--) {
1641 name = rbd_prev_snap_name(name, first_name);
1642 if (!name) {
1643 WARN_ON(1);
1644 return -EINVAL;
1645 }
1646 ret = __rbd_add_snap_dev(rbd_dev, i - 1, name, &snap);
1647 if (ret < 0)
1648 return ret;
1649 list_add(&snap->node, &rbd_dev->snaps);
1650 }
1651
1652 return 0;
1653}
1654
1655
1656static void rbd_root_dev_release(struct device *dev)
1657{
1658}
1659
1660static struct device rbd_root_dev = {
1661 .init_name = "rbd",
1662 .release = rbd_root_dev_release,
1663};
1664
1665static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
1666{
1667 int ret = -ENOMEM;
1668 struct device *dev;
1669 struct rbd_snap *snap;
1670
1671 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1672 dev = &rbd_dev->dev;
1673
1674 dev->bus = &rbd_bus_type;
1675 dev->type = &rbd_device_type;
1676 dev->parent = &rbd_root_dev;
1677 dev->release = rbd_dev_release;
1678 dev_set_name(dev, "%d", rbd_dev->id);
1679 ret = device_register(dev);
1680 if (ret < 0)
1681 goto done_free;
1682
1683 list_for_each_entry(snap, &rbd_dev->snaps, node) {
1684 ret = rbd_register_snap_dev(rbd_dev, snap,
1685 &rbd_dev->dev);
1686 if (ret < 0)
1384 break; 1687 break;
1385 } 1688 }
1386 1689
1387 mutex_unlock(&ctl_mutex); 1690 mutex_unlock(&ctl_mutex);
1388 return n; 1691 return 0;
1692done_free:
1693 mutex_unlock(&ctl_mutex);
1694 return ret;
1389} 1695}
1390 1696
1391static ssize_t class_rbd_add(struct class *c, 1697static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
1392 struct class_attribute *attr, 1698{
1393 const char *buf, size_t count) 1699 device_unregister(&rbd_dev->dev);
1700}
1701
1702static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count)
1394{ 1703{
1395 struct ceph_osd_client *osdc; 1704 struct ceph_osd_client *osdc;
1396 struct rbd_device *rbd_dev; 1705 struct rbd_device *rbd_dev;
@@ -1419,6 +1728,7 @@ static ssize_t class_rbd_add(struct class *c,
1419 /* static rbd_device initialization */ 1728 /* static rbd_device initialization */
1420 spin_lock_init(&rbd_dev->lock); 1729 spin_lock_init(&rbd_dev->lock);
1421 INIT_LIST_HEAD(&rbd_dev->node); 1730 INIT_LIST_HEAD(&rbd_dev->node);
1731 INIT_LIST_HEAD(&rbd_dev->snaps);
1422 1732
1423 /* generate unique id: find highest unique id, add one */ 1733 /* generate unique id: find highest unique id, add one */
1424 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1734 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
@@ -1478,6 +1788,9 @@ static ssize_t class_rbd_add(struct class *c,
1478 } 1788 }
1479 rbd_dev->major = irc; 1789 rbd_dev->major = irc;
1480 1790
1791 rc = rbd_bus_add_dev(rbd_dev);
1792 if (rc)
1793 goto err_out_disk;
1481 /* set up and announce blkdev mapping */ 1794 /* set up and announce blkdev mapping */
1482 rc = rbd_init_disk(rbd_dev); 1795 rc = rbd_init_disk(rbd_dev);
1483 if (rc) 1796 if (rc)
@@ -1487,6 +1800,8 @@ static ssize_t class_rbd_add(struct class *c,
1487 1800
1488err_out_blkdev: 1801err_out_blkdev:
1489 unregister_blkdev(rbd_dev->major, rbd_dev->name); 1802 unregister_blkdev(rbd_dev->major, rbd_dev->name);
1803err_out_disk:
1804 rbd_free_disk(rbd_dev);
1490err_out_client: 1805err_out_client:
1491 rbd_put_client(rbd_dev); 1806 rbd_put_client(rbd_dev);
1492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1807 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
@@ -1518,35 +1833,10 @@ static struct rbd_device *__rbd_get_dev(unsigned long id)
1518 return NULL; 1833 return NULL;
1519} 1834}
1520 1835
1521static ssize_t class_rbd_remove(struct class *c, 1836static void rbd_dev_release(struct device *dev)
1522 struct class_attribute *attr,
1523 const char *buf,
1524 size_t count)
1525{ 1837{
1526 struct rbd_device *rbd_dev = NULL; 1838 struct rbd_device *rbd_dev =
1527 int target_id, rc; 1839 container_of(dev, struct rbd_device, dev);
1528 unsigned long ul;
1529
1530 rc = strict_strtoul(buf, 10, &ul);
1531 if (rc)
1532 return rc;
1533
1534 /* convert to int; abort if we lost anything in the conversion */
1535 target_id = (int) ul;
1536 if (target_id != ul)
1537 return -EINVAL;
1538
1539 /* remove object from list immediately */
1540 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1541
1542 rbd_dev = __rbd_get_dev(target_id);
1543 if (rbd_dev)
1544 list_del_init(&rbd_dev->node);
1545
1546 mutex_unlock(&ctl_mutex);
1547
1548 if (!rbd_dev)
1549 return -ENOENT;
1550 1840
1551 rbd_put_client(rbd_dev); 1841 rbd_put_client(rbd_dev);
1552 1842
@@ -1557,67 +1847,11 @@ static ssize_t class_rbd_remove(struct class *c,
1557 1847
1558 /* release module ref */ 1848 /* release module ref */
1559 module_put(THIS_MODULE); 1849 module_put(THIS_MODULE);
1560
1561 return count;
1562} 1850}
1563 1851
1564static ssize_t class_rbd_snaps_list(struct class *c, 1852static ssize_t rbd_remove(struct bus_type *bus,
1565 struct class_attribute *attr, 1853 const char *buf,
1566 char *data) 1854 size_t count)
1567{
1568 struct rbd_device *rbd_dev = NULL;
1569 struct list_head *tmp;
1570 struct rbd_image_header *header;
1571 int i, n = 0, max = PAGE_SIZE;
1572 int ret;
1573
1574 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1575
1576 n += snprintf(data, max, "#id\tsnap\tKB\n");
1577
1578 list_for_each(tmp, &rbd_dev_list) {
1579 char *names, *p;
1580 struct ceph_snap_context *snapc;
1581
1582 rbd_dev = list_entry(tmp, struct rbd_device, node);
1583 header = &rbd_dev->header;
1584
1585 down_read(&header->snap_rwsem);
1586
1587 names = header->snap_names;
1588 snapc = header->snapc;
1589
1590 n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
1591 rbd_dev->id, RBD_SNAP_HEAD_NAME,
1592 header->image_size >> 10,
1593 (!rbd_dev->cur_snap ? " (*)" : ""));
1594 if (n == max)
1595 break;
1596
1597 p = names;
1598 for (i = 0; i < header->total_snaps; i++, p += strlen(p) + 1) {
1599 n += snprintf(data + n, max - n, "%d\t%s\t%lld%s\n",
1600 rbd_dev->id, p, header->snap_sizes[i] >> 10,
1601 (rbd_dev->cur_snap &&
1602 (snap_index(header, i) == rbd_dev->cur_snap) ?
1603 " (*)" : ""));
1604 if (n == max)
1605 break;
1606 }
1607
1608 up_read(&header->snap_rwsem);
1609 }
1610
1611
1612 ret = n;
1613 mutex_unlock(&ctl_mutex);
1614 return ret;
1615}
1616
1617static ssize_t class_rbd_snaps_refresh(struct class *c,
1618 struct class_attribute *attr,
1619 const char *buf,
1620 size_t count)
1621{ 1855{
1622 struct rbd_device *rbd_dev = NULL; 1856 struct rbd_device *rbd_dev = NULL;
1623 int target_id, rc; 1857 int target_id, rc;
@@ -1641,95 +1875,70 @@ static ssize_t class_rbd_snaps_refresh(struct class *c,
1641 goto done; 1875 goto done;
1642 } 1876 }
1643 1877
1644 rc = rbd_update_snaps(rbd_dev); 1878 list_del_init(&rbd_dev->node);
1645 if (rc < 0) 1879
1646 ret = rc; 1880 __rbd_remove_all_snaps(rbd_dev);
1881 rbd_bus_del_dev(rbd_dev);
1647 1882
1648done: 1883done:
1649 mutex_unlock(&ctl_mutex); 1884 mutex_unlock(&ctl_mutex);
1650 return ret; 1885 return ret;
1651} 1886}
1652 1887
1653static ssize_t class_rbd_snap_create(struct class *c, 1888static ssize_t rbd_snap_add(struct device *dev,
1654 struct class_attribute *attr, 1889 struct device_attribute *attr,
1655 const char *buf, 1890 const char *buf,
1656 size_t count) 1891 size_t count)
1657{ 1892{
1658 struct rbd_device *rbd_dev = NULL; 1893 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1659 int target_id, ret; 1894 int ret;
1660 char *name; 1895 char *name = kmalloc(count + 1, GFP_KERNEL);
1661
1662 name = kmalloc(RBD_MAX_SNAP_NAME_LEN + 1, GFP_KERNEL);
1663 if (!name) 1896 if (!name)
1664 return -ENOMEM; 1897 return -ENOMEM;
1665 1898
1666 /* parse snaps add command */ 1899 snprintf(name, count, "%s", buf);
1667 if (sscanf(buf, "%d "
1668 "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
1669 &target_id,
1670 name) != 2) {
1671 ret = -EINVAL;
1672 goto done;
1673 }
1674 1900
1675 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1901 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1676 1902
1677 rbd_dev = __rbd_get_dev(target_id);
1678 if (!rbd_dev) {
1679 ret = -ENOENT;
1680 goto done_unlock;
1681 }
1682
1683 ret = rbd_header_add_snap(rbd_dev, 1903 ret = rbd_header_add_snap(rbd_dev,
1684 name, GFP_KERNEL); 1904 name, GFP_KERNEL);
1685 if (ret < 0) 1905 if (ret < 0)
1686 goto done_unlock; 1906 goto done_unlock;
1687 1907
1688 ret = rbd_update_snaps(rbd_dev); 1908 ret = __rbd_update_snaps(rbd_dev);
1689 if (ret < 0) 1909 if (ret < 0)
1690 goto done_unlock; 1910 goto done_unlock;
1691 1911
1692 ret = count; 1912 ret = count;
1693done_unlock: 1913done_unlock:
1694 mutex_unlock(&ctl_mutex); 1914 mutex_unlock(&ctl_mutex);
1695done:
1696 kfree(name); 1915 kfree(name);
1697 return ret; 1916 return ret;
1698} 1917}
1699 1918
1700static ssize_t class_rbd_rollback(struct class *c, 1919static ssize_t rbd_snap_rollback(struct device *dev,
1701 struct class_attribute *attr, 1920 struct device_attribute *attr,
1702 const char *buf, 1921 const char *buf,
1703 size_t count) 1922 size_t count)
1704{ 1923{
1705 struct rbd_device *rbd_dev = NULL; 1924 struct rbd_device *rbd_dev = dev_to_rbd(dev);
1706 int target_id, ret; 1925 int ret;
1707 u64 snapid; 1926 u64 snapid;
1708 char snap_name[RBD_MAX_SNAP_NAME_LEN];
1709 u64 cur_ofs; 1927 u64 cur_ofs;
1710 char *seg_name; 1928 char *seg_name = NULL;
1929 char *snap_name = kmalloc(count + 1, GFP_KERNEL);
1930 ret = -ENOMEM;
1931 if (!snap_name)
1932 return ret;
1711 1933
1712 /* parse snaps add command */ 1934 /* parse snaps add command */
1713 if (sscanf(buf, "%d " 1935 snprintf(snap_name, count, "%s", buf);
1714 "%" __stringify(RBD_MAX_SNAP_NAME_LEN) "s",
1715 &target_id,
1716 snap_name) != 2) {
1717 return -EINVAL;
1718 }
1719
1720 ret = -ENOMEM;
1721 seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO); 1936 seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
1722 if (!seg_name) 1937 if (!seg_name)
1723 return ret; 1938 goto done;
1724 1939
1725 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 1940 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1726 1941
1727 rbd_dev = __rbd_get_dev(target_id);
1728 if (!rbd_dev) {
1729 ret = -ENOENT;
1730 goto done_unlock;
1731 }
1732
1733 ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL); 1942 ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
1734 if (ret < 0) 1943 if (ret < 0)
1735 goto done_unlock; 1944 goto done_unlock;
@@ -1750,7 +1959,7 @@ static ssize_t class_rbd_rollback(struct class *c,
1750 seg_name, ret); 1959 seg_name, ret);
1751 } 1960 }
1752 1961
1753 ret = rbd_update_snaps(rbd_dev); 1962 ret = __rbd_update_snaps(rbd_dev);
1754 if (ret < 0) 1963 if (ret < 0)
1755 goto done_unlock; 1964 goto done_unlock;
1756 1965
@@ -1758,57 +1967,42 @@ static ssize_t class_rbd_rollback(struct class *c,
1758 1967
1759done_unlock: 1968done_unlock:
1760 mutex_unlock(&ctl_mutex); 1969 mutex_unlock(&ctl_mutex);
1970done:
1761 kfree(seg_name); 1971 kfree(seg_name);
1972 kfree(snap_name);
1762 1973
1763 return ret; 1974 return ret;
1764} 1975}
1765 1976
1766static struct class_attribute class_rbd_attrs[] = { 1977static struct bus_attribute rbd_bus_attrs[] = {
1767 __ATTR(add, 0200, NULL, class_rbd_add), 1978 __ATTR(add, S_IWUSR, NULL, rbd_add),
1768 __ATTR(remove, 0200, NULL, class_rbd_remove), 1979 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
1769 __ATTR(list, 0444, class_rbd_list, NULL),
1770 __ATTR(snaps_refresh, 0200, NULL, class_rbd_snaps_refresh),
1771 __ATTR(snap_create, 0200, NULL, class_rbd_snap_create),
1772 __ATTR(snaps_list, 0444, class_rbd_snaps_list, NULL),
1773 __ATTR(snap_rollback, 0200, NULL, class_rbd_rollback),
1774 __ATTR_NULL 1980 __ATTR_NULL
1775}; 1981};
1776 1982
1777/* 1983/*
1778 * create control files in sysfs 1984 * create control files in sysfs
1779 * /sys/class/rbd/... 1985 * /sys/bus/rbd/...
1780 */ 1986 */
1781static int rbd_sysfs_init(void) 1987static int rbd_sysfs_init(void)
1782{ 1988{
1783 int ret = -ENOMEM; 1989 int ret;
1784 1990
1785 class_rbd = kzalloc(sizeof(*class_rbd), GFP_KERNEL); 1991 rbd_bus_type.bus_attrs = rbd_bus_attrs;
1786 if (!class_rbd)
1787 goto out;
1788 1992
1789 class_rbd->name = DRV_NAME; 1993 ret = bus_register(&rbd_bus_type);
1790 class_rbd->owner = THIS_MODULE; 1994 if (ret < 0)
1791 class_rbd->class_release = class_rbd_release; 1995 return ret;
1792 class_rbd->class_attrs = class_rbd_attrs;
1793 1996
1794 ret = class_register(class_rbd); 1997 ret = device_register(&rbd_root_dev);
1795 if (ret)
1796 goto out_class;
1797 return 0;
1798 1998
1799out_class:
1800 kfree(class_rbd);
1801 class_rbd = NULL;
1802 pr_err(DRV_NAME ": failed to create class rbd\n");
1803out:
1804 return ret; 1999 return ret;
1805} 2000}
1806 2001
1807static void rbd_sysfs_cleanup(void) 2002static void rbd_sysfs_cleanup(void)
1808{ 2003{
1809 if (class_rbd) 2004 device_unregister(&rbd_root_dev);
1810 class_destroy(class_rbd); 2005 bus_unregister(&rbd_bus_type);
1811 class_rbd = NULL;
1812} 2006}
1813 2007
1814int __init rbd_init(void) 2008int __init rbd_init(void)
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 255035cfc88a..4f9e22f29138 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -65,7 +65,7 @@ enum blkif_state {
65 65
66struct blk_shadow { 66struct blk_shadow {
67 struct blkif_request req; 67 struct blkif_request req;
68 unsigned long request; 68 struct request *request;
69 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST]; 69 unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
70}; 70};
71 71
@@ -136,7 +136,7 @@ static void add_id_to_freelist(struct blkfront_info *info,
136 unsigned long id) 136 unsigned long id)
137{ 137{
138 info->shadow[id].req.id = info->shadow_free; 138 info->shadow[id].req.id = info->shadow_free;
139 info->shadow[id].request = 0; 139 info->shadow[id].request = NULL;
140 info->shadow_free = id; 140 info->shadow_free = id;
141} 141}
142 142
@@ -245,14 +245,11 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
245} 245}
246 246
247/* 247/*
248 * blkif_queue_request 248 * Generate a Xen blkfront IO request from a blk layer request. Reads
249 * and writes are handled as expected. Since we lack a loose flush
250 * request, we map flushes into a full ordered barrier.
249 * 251 *
250 * request block io 252 * @req: a request struct
251 *
252 * id: for guest use only.
253 * operation: BLKIF_OP_{READ,WRITE,PROBE}
254 * buffer: buffer to read/write into. this should be a
255 * virtual address in the guest os.
256 */ 253 */
257static int blkif_queue_request(struct request *req) 254static int blkif_queue_request(struct request *req)
258{ 255{
@@ -281,7 +278,7 @@ static int blkif_queue_request(struct request *req)
281 /* Fill out a communications ring structure. */ 278 /* Fill out a communications ring structure. */
282 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt); 279 ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
283 id = get_id_from_freelist(info); 280 id = get_id_from_freelist(info);
284 info->shadow[id].request = (unsigned long)req; 281 info->shadow[id].request = req;
285 282
286 ring_req->id = id; 283 ring_req->id = id;
287 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); 284 ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req);
@@ -290,6 +287,18 @@ static int blkif_queue_request(struct request *req)
290 ring_req->operation = rq_data_dir(req) ? 287 ring_req->operation = rq_data_dir(req) ?
291 BLKIF_OP_WRITE : BLKIF_OP_READ; 288 BLKIF_OP_WRITE : BLKIF_OP_READ;
292 289
290 if (req->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
291 /*
292 * Ideally we could just do an unordered
293 * flush-to-disk, but all we have is a full write
294 * barrier at the moment. However, a barrier write is
295 * a superset of FUA, so we can implement it the same
296 * way. (It's also a FLUSH+FUA, since it is
297 * guaranteed ordered WRT previous writes.)
298 */
299 ring_req->operation = BLKIF_OP_WRITE_BARRIER;
300 }
301
293 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); 302 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
294 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); 303 BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
295 304
@@ -634,7 +643,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
634 643
635 bret = RING_GET_RESPONSE(&info->ring, i); 644 bret = RING_GET_RESPONSE(&info->ring, i);
636 id = bret->id; 645 id = bret->id;
637 req = (struct request *)info->shadow[id].request; 646 req = info->shadow[id].request;
638 647
639 blkif_completion(&info->shadow[id]); 648 blkif_completion(&info->shadow[id]);
640 649
@@ -647,6 +656,16 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
647 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 656 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n",
648 info->gd->disk_name); 657 info->gd->disk_name);
649 error = -EOPNOTSUPP; 658 error = -EOPNOTSUPP;
659 }
660 if (unlikely(bret->status == BLKIF_RSP_ERROR &&
661 info->shadow[id].req.nr_segments == 0)) {
662 printk(KERN_WARNING "blkfront: %s: empty write barrier op failed\n",
663 info->gd->disk_name);
664 error = -EOPNOTSUPP;
665 }
666 if (unlikely(error)) {
667 if (error == -EOPNOTSUPP)
668 error = 0;
650 info->feature_flush = 0; 669 info->feature_flush = 0;
651 xlvbd_flush(info); 670 xlvbd_flush(info);
652 } 671 }
@@ -899,7 +918,7 @@ static int blkif_recover(struct blkfront_info *info)
899 /* Stage 3: Find pending requests and requeue them. */ 918 /* Stage 3: Find pending requests and requeue them. */
900 for (i = 0; i < BLK_RING_SIZE; i++) { 919 for (i = 0; i < BLK_RING_SIZE; i++) {
901 /* Not in use? */ 920 /* Not in use? */
902 if (copy[i].request == 0) 921 if (!copy[i].request)
903 continue; 922 continue;
904 923
905 /* Grab a request slot and copy shadow state into it. */ 924 /* Grab a request slot and copy shadow state into it. */
@@ -916,9 +935,7 @@ static int blkif_recover(struct blkfront_info *info)
916 req->seg[j].gref, 935 req->seg[j].gref,
917 info->xbdev->otherend_id, 936 info->xbdev->otherend_id,
918 pfn_to_mfn(info->shadow[req->id].frame[j]), 937 pfn_to_mfn(info->shadow[req->id].frame[j]),
919 rq_data_dir( 938 rq_data_dir(info->shadow[req->id].request));
920 (struct request *)
921 info->shadow[req->id].request));
922 info->shadow[req->id].req = *req; 939 info->shadow[req->id].req = *req;
923 940
924 info->ring.req_prod_pvt++; 941 info->ring.req_prod_pvt++;
@@ -1067,14 +1084,8 @@ static void blkfront_connect(struct blkfront_info *info)
1067 */ 1084 */
1068 info->feature_flush = 0; 1085 info->feature_flush = 0;
1069 1086
1070 /*
1071 * The driver doesn't properly handled empty flushes, so
1072 * lets disable barrier support for now.
1073 */
1074#if 0
1075 if (!err && barrier) 1087 if (!err && barrier)
1076 info->feature_flush = REQ_FLUSH; 1088 info->feature_flush = REQ_FLUSH | REQ_FUA;
1077#endif
1078 1089
1079 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1090 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size);
1080 if (err) { 1091 if (err) {
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index 5259065f3c79..3e67ddde9e16 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -120,7 +120,6 @@ struct agp_bridge_driver {
120 void (*agp_destroy_page)(struct page *, int flags); 120 void (*agp_destroy_page)(struct page *, int flags);
121 void (*agp_destroy_pages)(struct agp_memory *); 121 void (*agp_destroy_pages)(struct agp_memory *);
122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int); 122 int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
123 void (*chipset_flush)(struct agp_bridge_data *);
124}; 123};
125 124
126struct agp_bridge_data { 125struct agp_bridge_data {
diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
index 9d2c97a69cdd..a48e05b31593 100644
--- a/drivers/char/agp/compat_ioctl.c
+++ b/drivers/char/agp/compat_ioctl.c
@@ -276,7 +276,6 @@ long compat_agp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
276 break; 276 break;
277 277
278 case AGPIOC_CHIPSET_FLUSH32: 278 case AGPIOC_CHIPSET_FLUSH32:
279 ret_val = agpioc_chipset_flush_wrap(curr_priv);
280 break; 279 break;
281 } 280 }
282 281
diff --git a/drivers/char/agp/compat_ioctl.h b/drivers/char/agp/compat_ioctl.h
index 0c9678ac0371..f30e0fd97963 100644
--- a/drivers/char/agp/compat_ioctl.h
+++ b/drivers/char/agp/compat_ioctl.h
@@ -102,6 +102,5 @@ void agp_free_memory_wrap(struct agp_memory *memory);
102struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type); 102struct agp_memory *agp_allocate_memory_wrap(size_t pg_count, u32 type);
103struct agp_memory *agp_find_mem_by_key(int key); 103struct agp_memory *agp_find_mem_by_key(int key);
104struct agp_client *agp_find_client_by_pid(pid_t id); 104struct agp_client *agp_find_client_by_pid(pid_t id);
105int agpioc_chipset_flush_wrap(struct agp_file_private *priv);
106 105
107#endif /* _AGP_COMPAT_H */ 106#endif /* _AGP_COMPAT_H */
diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
index 3cb4539a98b2..2e044338753c 100644
--- a/drivers/char/agp/frontend.c
+++ b/drivers/char/agp/frontend.c
@@ -957,13 +957,6 @@ static int agpioc_unbind_wrap(struct agp_file_private *priv, void __user *arg)
957 return agp_unbind_memory(memory); 957 return agp_unbind_memory(memory);
958} 958}
959 959
960int agpioc_chipset_flush_wrap(struct agp_file_private *priv)
961{
962 DBG("");
963 agp_flush_chipset(agp_bridge);
964 return 0;
965}
966
967static long agp_ioctl(struct file *file, 960static long agp_ioctl(struct file *file,
968 unsigned int cmd, unsigned long arg) 961 unsigned int cmd, unsigned long arg)
969{ 962{
@@ -1039,7 +1032,6 @@ static long agp_ioctl(struct file *file,
1039 break; 1032 break;
1040 1033
1041 case AGPIOC_CHIPSET_FLUSH: 1034 case AGPIOC_CHIPSET_FLUSH:
1042 ret_val = agpioc_chipset_flush_wrap(curr_priv);
1043 break; 1035 break;
1044 } 1036 }
1045 1037
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index 4956f1c8f9d5..012cba0d6d96 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -81,13 +81,6 @@ static int agp_get_key(void)
81 return -1; 81 return -1;
82} 82}
83 83
84void agp_flush_chipset(struct agp_bridge_data *bridge)
85{
86 if (bridge->driver->chipset_flush)
87 bridge->driver->chipset_flush(bridge);
88}
89EXPORT_SYMBOL(agp_flush_chipset);
90
91/* 84/*
92 * Use kmalloc if possible for the page list. Otherwise fall back to 85 * Use kmalloc if possible for the page list. Otherwise fall back to
93 * vmalloc. This speeds things up and also saves memory for small AGP 86 * vmalloc. This speeds things up and also saves memory for small AGP
@@ -487,26 +480,6 @@ int agp_unbind_memory(struct agp_memory *curr)
487} 480}
488EXPORT_SYMBOL(agp_unbind_memory); 481EXPORT_SYMBOL(agp_unbind_memory);
489 482
490/**
491 * agp_rebind_emmory - Rewrite the entire GATT, useful on resume
492 */
493int agp_rebind_memory(void)
494{
495 struct agp_memory *curr;
496 int ret_val = 0;
497
498 spin_lock(&agp_bridge->mapped_lock);
499 list_for_each_entry(curr, &agp_bridge->mapped_list, mapped_list) {
500 ret_val = curr->bridge->driver->insert_memory(curr,
501 curr->pg_start,
502 curr->type);
503 if (ret_val != 0)
504 break;
505 }
506 spin_unlock(&agp_bridge->mapped_lock);
507 return ret_val;
508}
509EXPORT_SYMBOL(agp_rebind_memory);
510 483
511/* End - Routines for handling swapping of agp_memory into the GATT */ 484/* End - Routines for handling swapping of agp_memory into the GATT */
512 485
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index e72f49d52202..07e9796fead7 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -828,14 +828,9 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
828static int agp_intel_resume(struct pci_dev *pdev) 828static int agp_intel_resume(struct pci_dev *pdev)
829{ 829{
830 struct agp_bridge_data *bridge = pci_get_drvdata(pdev); 830 struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
831 int ret_val;
832 831
833 bridge->driver->configure(); 832 bridge->driver->configure();
834 833
835 ret_val = agp_rebind_memory();
836 if (ret_val != 0)
837 return ret_val;
838
839 return 0; 834 return 0;
840} 835}
841#endif 836#endif
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index 90539df02504..010e3defd6c3 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -75,6 +75,8 @@
75#define I810_GMS_DISABLE 0x00000000 75#define I810_GMS_DISABLE 0x00000000
76#define I810_PGETBL_CTL 0x2020 76#define I810_PGETBL_CTL 0x2020
77#define I810_PGETBL_ENABLED 0x00000001 77#define I810_PGETBL_ENABLED 0x00000001
78/* Note: PGETBL_CTL2 has a different offset on G33. */
79#define I965_PGETBL_CTL2 0x20c4
78#define I965_PGETBL_SIZE_MASK 0x0000000e 80#define I965_PGETBL_SIZE_MASK 0x0000000e
79#define I965_PGETBL_SIZE_512KB (0 << 1) 81#define I965_PGETBL_SIZE_512KB (0 << 1)
80#define I965_PGETBL_SIZE_256KB (1 << 1) 82#define I965_PGETBL_SIZE_256KB (1 << 1)
@@ -82,9 +84,15 @@
82#define I965_PGETBL_SIZE_1MB (3 << 1) 84#define I965_PGETBL_SIZE_1MB (3 << 1)
83#define I965_PGETBL_SIZE_2MB (4 << 1) 85#define I965_PGETBL_SIZE_2MB (4 << 1)
84#define I965_PGETBL_SIZE_1_5MB (5 << 1) 86#define I965_PGETBL_SIZE_1_5MB (5 << 1)
85#define G33_PGETBL_SIZE_MASK (3 << 8) 87#define G33_GMCH_SIZE_MASK (3 << 8)
86#define G33_PGETBL_SIZE_1M (1 << 8) 88#define G33_GMCH_SIZE_1M (1 << 8)
87#define G33_PGETBL_SIZE_2M (2 << 8) 89#define G33_GMCH_SIZE_2M (2 << 8)
90#define G4x_GMCH_SIZE_MASK (0xf << 8)
91#define G4x_GMCH_SIZE_1M (0x1 << 8)
92#define G4x_GMCH_SIZE_2M (0x3 << 8)
93#define G4x_GMCH_SIZE_VT_1M (0x9 << 8)
94#define G4x_GMCH_SIZE_VT_1_5M (0xa << 8)
95#define G4x_GMCH_SIZE_VT_2M (0xc << 8)
88 96
89#define I810_DRAM_CTL 0x3000 97#define I810_DRAM_CTL 0x3000
90#define I810_DRAM_ROW_0 0x00000001 98#define I810_DRAM_ROW_0 0x00000001
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 9272c38dd3c6..356f73e0d17e 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -24,7 +24,6 @@
24#include <asm/smp.h> 24#include <asm/smp.h>
25#include "agp.h" 25#include "agp.h"
26#include "intel-agp.h" 26#include "intel-agp.h"
27#include <linux/intel-gtt.h>
28#include <drm/intel-gtt.h> 27#include <drm/intel-gtt.h>
29 28
30/* 29/*
@@ -39,40 +38,12 @@
39#define USE_PCI_DMA_API 0 38#define USE_PCI_DMA_API 0
40#endif 39#endif
41 40
42/* Max amount of stolen space, anything above will be returned to Linux */
43int intel_max_stolen = 32 * 1024 * 1024;
44
45static const struct aper_size_info_fixed intel_i810_sizes[] =
46{
47 {64, 16384, 4},
48 /* The 32M mode still requires a 64k gatt */
49 {32, 8192, 4}
50};
51
52#define AGP_DCACHE_MEMORY 1
53#define AGP_PHYS_MEMORY 2
54#define INTEL_AGP_CACHED_MEMORY 3
55
56static struct gatt_mask intel_i810_masks[] =
57{
58 {.mask = I810_PTE_VALID, .type = 0},
59 {.mask = (I810_PTE_VALID | I810_PTE_LOCAL), .type = AGP_DCACHE_MEMORY},
60 {.mask = I810_PTE_VALID, .type = 0},
61 {.mask = I810_PTE_VALID | I830_PTE_SYSTEM_CACHED,
62 .type = INTEL_AGP_CACHED_MEMORY}
63};
64
65#define INTEL_AGP_UNCACHED_MEMORY 0
66#define INTEL_AGP_CACHED_MEMORY_LLC 1
67#define INTEL_AGP_CACHED_MEMORY_LLC_GFDT 2
68#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
69#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
70
71struct intel_gtt_driver { 41struct intel_gtt_driver {
72 unsigned int gen : 8; 42 unsigned int gen : 8;
73 unsigned int is_g33 : 1; 43 unsigned int is_g33 : 1;
74 unsigned int is_pineview : 1; 44 unsigned int is_pineview : 1;
75 unsigned int is_ironlake : 1; 45 unsigned int is_ironlake : 1;
46 unsigned int has_pgtbl_enable : 1;
76 unsigned int dma_mask_size : 8; 47 unsigned int dma_mask_size : 8;
77 /* Chipset specific GTT setup */ 48 /* Chipset specific GTT setup */
78 int (*setup)(void); 49 int (*setup)(void);
@@ -95,13 +66,14 @@ static struct _intel_private {
95 u8 __iomem *registers; 66 u8 __iomem *registers;
96 phys_addr_t gtt_bus_addr; 67 phys_addr_t gtt_bus_addr;
97 phys_addr_t gma_bus_addr; 68 phys_addr_t gma_bus_addr;
98 phys_addr_t pte_bus_addr; 69 u32 PGETBL_save;
99 u32 __iomem *gtt; /* I915G */ 70 u32 __iomem *gtt; /* I915G */
100 int num_dcache_entries; 71 int num_dcache_entries;
101 union { 72 union {
102 void __iomem *i9xx_flush_page; 73 void __iomem *i9xx_flush_page;
103 void *i8xx_flush_page; 74 void *i8xx_flush_page;
104 }; 75 };
76 char *i81x_gtt_table;
105 struct page *i8xx_page; 77 struct page *i8xx_page;
106 struct resource ifp_resource; 78 struct resource ifp_resource;
107 int resource_valid; 79 int resource_valid;
@@ -113,42 +85,31 @@ static struct _intel_private {
113#define IS_G33 intel_private.driver->is_g33 85#define IS_G33 intel_private.driver->is_g33
114#define IS_PINEVIEW intel_private.driver->is_pineview 86#define IS_PINEVIEW intel_private.driver->is_pineview
115#define IS_IRONLAKE intel_private.driver->is_ironlake 87#define IS_IRONLAKE intel_private.driver->is_ironlake
88#define HAS_PGTBL_EN intel_private.driver->has_pgtbl_enable
116 89
117static void intel_agp_free_sglist(struct agp_memory *mem) 90int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
118{ 91 struct scatterlist **sg_list, int *num_sg)
119 struct sg_table st;
120
121 st.sgl = mem->sg_list;
122 st.orig_nents = st.nents = mem->page_count;
123
124 sg_free_table(&st);
125
126 mem->sg_list = NULL;
127 mem->num_sg = 0;
128}
129
130static int intel_agp_map_memory(struct agp_memory *mem)
131{ 92{
132 struct sg_table st; 93 struct sg_table st;
133 struct scatterlist *sg; 94 struct scatterlist *sg;
134 int i; 95 int i;
135 96
136 if (mem->sg_list) 97 if (*sg_list)
137 return 0; /* already mapped (for e.g. resume */ 98 return 0; /* already mapped (for e.g. resume */
138 99
139 DBG("try mapping %lu pages\n", (unsigned long)mem->page_count); 100 DBG("try mapping %lu pages\n", (unsigned long)num_entries);
140 101
141 if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL)) 102 if (sg_alloc_table(&st, num_entries, GFP_KERNEL))
142 goto err; 103 goto err;
143 104
144 mem->sg_list = sg = st.sgl; 105 *sg_list = sg = st.sgl;
145 106
146 for (i = 0 ; i < mem->page_count; i++, sg = sg_next(sg)) 107 for (i = 0 ; i < num_entries; i++, sg = sg_next(sg))
147 sg_set_page(sg, mem->pages[i], PAGE_SIZE, 0); 108 sg_set_page(sg, pages[i], PAGE_SIZE, 0);
148 109
149 mem->num_sg = pci_map_sg(intel_private.pcidev, mem->sg_list, 110 *num_sg = pci_map_sg(intel_private.pcidev, *sg_list,
150 mem->page_count, PCI_DMA_BIDIRECTIONAL); 111 num_entries, PCI_DMA_BIDIRECTIONAL);
151 if (unlikely(!mem->num_sg)) 112 if (unlikely(!*num_sg))
152 goto err; 113 goto err;
153 114
154 return 0; 115 return 0;
@@ -157,90 +118,22 @@ err:
157 sg_free_table(&st); 118 sg_free_table(&st);
158 return -ENOMEM; 119 return -ENOMEM;
159} 120}
121EXPORT_SYMBOL(intel_gtt_map_memory);
160 122
161static void intel_agp_unmap_memory(struct agp_memory *mem) 123void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg)
162{ 124{
125 struct sg_table st;
163 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count); 126 DBG("try unmapping %lu pages\n", (unsigned long)mem->page_count);
164 127
165 pci_unmap_sg(intel_private.pcidev, mem->sg_list, 128 pci_unmap_sg(intel_private.pcidev, sg_list,
166 mem->page_count, PCI_DMA_BIDIRECTIONAL); 129 num_sg, PCI_DMA_BIDIRECTIONAL);
167 intel_agp_free_sglist(mem);
168}
169
170static int intel_i810_fetch_size(void)
171{
172 u32 smram_miscc;
173 struct aper_size_info_fixed *values;
174
175 pci_read_config_dword(intel_private.bridge_dev,
176 I810_SMRAM_MISCC, &smram_miscc);
177 values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
178
179 if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
180 dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
181 return 0;
182 }
183 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
184 agp_bridge->current_size = (void *) (values + 1);
185 agp_bridge->aperture_size_idx = 1;
186 return values[1].size;
187 } else {
188 agp_bridge->current_size = (void *) (values);
189 agp_bridge->aperture_size_idx = 0;
190 return values[0].size;
191 }
192
193 return 0;
194}
195
196static int intel_i810_configure(void)
197{
198 struct aper_size_info_fixed *current_size;
199 u32 temp;
200 int i;
201
202 current_size = A_SIZE_FIX(agp_bridge->current_size);
203
204 if (!intel_private.registers) {
205 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
206 temp &= 0xfff80000;
207 130
208 intel_private.registers = ioremap(temp, 128 * 4096); 131 st.sgl = sg_list;
209 if (!intel_private.registers) { 132 st.orig_nents = st.nents = num_sg;
210 dev_err(&intel_private.pcidev->dev,
211 "can't remap memory\n");
212 return -ENOMEM;
213 }
214 }
215 133
216 if ((readl(intel_private.registers+I810_DRAM_CTL) 134 sg_free_table(&st);
217 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
218 /* This will need to be dynamically assigned */
219 dev_info(&intel_private.pcidev->dev,
220 "detected 4MB dedicated video ram\n");
221 intel_private.num_dcache_entries = 1024;
222 }
223 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
224 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
225 writel(agp_bridge->gatt_bus_addr | I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
226 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
227
228 if (agp_bridge->driver->needs_scratch_page) {
229 for (i = 0; i < current_size->num_entries; i++) {
230 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
231 }
232 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI posting. */
233 }
234 global_cache_flush();
235 return 0;
236}
237
238static void intel_i810_cleanup(void)
239{
240 writel(0, intel_private.registers+I810_PGETBL_CTL);
241 readl(intel_private.registers); /* PCI Posting. */
242 iounmap(intel_private.registers);
243} 135}
136EXPORT_SYMBOL(intel_gtt_unmap_memory);
244 137
245static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode) 138static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
246{ 139{
@@ -277,80 +170,64 @@ static void i8xx_destroy_pages(struct page *page)
277 atomic_dec(&agp_bridge->current_memory_agp); 170 atomic_dec(&agp_bridge->current_memory_agp);
278} 171}
279 172
280static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start, 173#define I810_GTT_ORDER 4
281 int type) 174static int i810_setup(void)
282{ 175{
283 int i, j, num_entries; 176 u32 reg_addr;
284 void *temp; 177 char *gtt_table;
285 int ret = -EINVAL;
286 int mask_type;
287
288 if (mem->page_count == 0)
289 goto out;
290
291 temp = agp_bridge->current_size;
292 num_entries = A_SIZE_FIX(temp)->num_entries;
293 178
294 if ((pg_start + mem->page_count) > num_entries) 179 /* i81x does not preallocate the gtt. It's always 64kb in size. */
295 goto out_err; 180 gtt_table = alloc_gatt_pages(I810_GTT_ORDER);
181 if (gtt_table == NULL)
182 return -ENOMEM;
183 intel_private.i81x_gtt_table = gtt_table;
296 184
185 pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
186 reg_addr &= 0xfff80000;
297 187
298 for (j = pg_start; j < (pg_start + mem->page_count); j++) { 188 intel_private.registers = ioremap(reg_addr, KB(64));
299 if (!PGE_EMPTY(agp_bridge, readl(agp_bridge->gatt_table+j))) { 189 if (!intel_private.registers)
300 ret = -EBUSY; 190 return -ENOMEM;
301 goto out_err;
302 }
303 }
304 191
305 if (type != mem->type) 192 writel(virt_to_phys(gtt_table) | I810_PGETBL_ENABLED,
306 goto out_err; 193 intel_private.registers+I810_PGETBL_CTL);
307 194
308 mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type); 195 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
309 196
310 switch (mask_type) { 197 if ((readl(intel_private.registers+I810_DRAM_CTL)
311 case AGP_DCACHE_MEMORY: 198 & I810_DRAM_ROW_0) == I810_DRAM_ROW_0_SDRAM) {
312 if (!mem->is_flushed) 199 dev_info(&intel_private.pcidev->dev,
313 global_cache_flush(); 200 "detected 4MB dedicated video ram\n");
314 for (i = pg_start; i < (pg_start + mem->page_count); i++) { 201 intel_private.num_dcache_entries = 1024;
315 writel((i*4096)|I810_PTE_LOCAL|I810_PTE_VALID,
316 intel_private.registers+I810_PTE_BASE+(i*4));
317 }
318 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
319 break;
320 case AGP_PHYS_MEMORY:
321 case AGP_NORMAL_MEMORY:
322 if (!mem->is_flushed)
323 global_cache_flush();
324 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
325 writel(agp_bridge->driver->mask_memory(agp_bridge,
326 page_to_phys(mem->pages[i]), mask_type),
327 intel_private.registers+I810_PTE_BASE+(j*4));
328 }
329 readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
330 break;
331 default:
332 goto out_err;
333 } 202 }
334 203
335out: 204 return 0;
336 ret = 0; 205}
337out_err: 206
338 mem->is_flushed = true; 207static void i810_cleanup(void)
339 return ret; 208{
209 writel(0, intel_private.registers+I810_PGETBL_CTL);
210 free_gatt_pages(intel_private.i81x_gtt_table, I810_GTT_ORDER);
340} 211}
341 212
342static int intel_i810_remove_entries(struct agp_memory *mem, off_t pg_start, 213static int i810_insert_dcache_entries(struct agp_memory *mem, off_t pg_start,
343 int type) 214 int type)
344{ 215{
345 int i; 216 int i;
346 217
347 if (mem->page_count == 0) 218 if ((pg_start + mem->page_count)
348 return 0; 219 > intel_private.num_dcache_entries)
220 return -EINVAL;
221
222 if (!mem->is_flushed)
223 global_cache_flush();
349 224
350 for (i = pg_start; i < (mem->page_count + pg_start); i++) { 225 for (i = pg_start; i < (pg_start + mem->page_count); i++) {
351 writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4)); 226 dma_addr_t addr = i << PAGE_SHIFT;
227 intel_private.driver->write_entry(addr,
228 i, type);
352 } 229 }
353 readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); 230 readl(intel_private.gtt+i-1);
354 231
355 return 0; 232 return 0;
356} 233}
@@ -397,29 +274,6 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
397 return new; 274 return new;
398} 275}
399 276
400static struct agp_memory *intel_i810_alloc_by_type(size_t pg_count, int type)
401{
402 struct agp_memory *new;
403
404 if (type == AGP_DCACHE_MEMORY) {
405 if (pg_count != intel_private.num_dcache_entries)
406 return NULL;
407
408 new = agp_create_memory(1);
409 if (new == NULL)
410 return NULL;
411
412 new->type = AGP_DCACHE_MEMORY;
413 new->page_count = pg_count;
414 new->num_scratch_pages = 0;
415 agp_free_page_array(new);
416 return new;
417 }
418 if (type == AGP_PHYS_MEMORY)
419 return alloc_agpphysmem_i8xx(pg_count, type);
420 return NULL;
421}
422
423static void intel_i810_free_by_type(struct agp_memory *curr) 277static void intel_i810_free_by_type(struct agp_memory *curr)
424{ 278{
425 agp_free_key(curr->key); 279 agp_free_key(curr->key);
@@ -437,13 +291,6 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
437 kfree(curr); 291 kfree(curr);
438} 292}
439 293
440static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
441 dma_addr_t addr, int type)
442{
443 /* Type checking must be done elsewhere */
444 return addr | bridge->driver->masks[type].mask;
445}
446
447static int intel_gtt_setup_scratch_page(void) 294static int intel_gtt_setup_scratch_page(void)
448{ 295{
449 struct page *page; 296 struct page *page;
@@ -455,7 +302,7 @@ static int intel_gtt_setup_scratch_page(void)
455 get_page(page); 302 get_page(page);
456 set_pages_uc(page, 1); 303 set_pages_uc(page, 1);
457 304
458 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { 305 if (intel_private.base.needs_dmar) {
459 dma_addr = pci_map_page(intel_private.pcidev, page, 0, 306 dma_addr = pci_map_page(intel_private.pcidev, page, 0,
460 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 307 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
461 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr)) 308 if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
@@ -470,34 +317,45 @@ static int intel_gtt_setup_scratch_page(void)
470 return 0; 317 return 0;
471} 318}
472 319
473static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = { 320static void i810_write_entry(dma_addr_t addr, unsigned int entry,
321 unsigned int flags)
322{
323 u32 pte_flags = I810_PTE_VALID;
324
325 switch (flags) {
326 case AGP_DCACHE_MEMORY:
327 pte_flags |= I810_PTE_LOCAL;
328 break;
329 case AGP_USER_CACHED_MEMORY:
330 pte_flags |= I830_PTE_SYSTEM_CACHED;
331 break;
332 }
333
334 writel(addr | pte_flags, intel_private.gtt + entry);
335}
336
337static const struct aper_size_info_fixed intel_fake_agp_sizes[] = {
338 {32, 8192, 3},
339 {64, 16384, 4},
474 {128, 32768, 5}, 340 {128, 32768, 5},
475 /* The 64M mode still requires a 128k gatt */
476 {64, 16384, 5},
477 {256, 65536, 6}, 341 {256, 65536, 6},
478 {512, 131072, 7}, 342 {512, 131072, 7},
479}; 343};
480 344
481static unsigned int intel_gtt_stolen_entries(void) 345static unsigned int intel_gtt_stolen_size(void)
482{ 346{
483 u16 gmch_ctrl; 347 u16 gmch_ctrl;
484 u8 rdct; 348 u8 rdct;
485 int local = 0; 349 int local = 0;
486 static const int ddt[4] = { 0, 16, 32, 64 }; 350 static const int ddt[4] = { 0, 16, 32, 64 };
487 unsigned int overhead_entries, stolen_entries;
488 unsigned int stolen_size = 0; 351 unsigned int stolen_size = 0;
489 352
353 if (INTEL_GTT_GEN == 1)
354 return 0; /* no stolen mem on i81x */
355
490 pci_read_config_word(intel_private.bridge_dev, 356 pci_read_config_word(intel_private.bridge_dev,
491 I830_GMCH_CTRL, &gmch_ctrl); 357 I830_GMCH_CTRL, &gmch_ctrl);
492 358
493 if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
494 overhead_entries = 0;
495 else
496 overhead_entries = intel_private.base.gtt_mappable_entries
497 / 1024;
498
499 overhead_entries += 1; /* BIOS popup */
500
501 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB || 359 if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
502 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) { 360 intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
503 switch (gmch_ctrl & I830_GMCH_GMS_MASK) { 361 switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
@@ -623,12 +481,7 @@ static unsigned int intel_gtt_stolen_entries(void)
623 } 481 }
624 } 482 }
625 483
626 if (!local && stolen_size > intel_max_stolen) { 484 if (stolen_size > 0) {
627 dev_info(&intel_private.bridge_dev->dev,
628 "detected %dK stolen memory, trimming to %dK\n",
629 stolen_size / KB(1), intel_max_stolen / KB(1));
630 stolen_size = intel_max_stolen;
631 } else if (stolen_size > 0) {
632 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n", 485 dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
633 stolen_size / KB(1), local ? "local" : "stolen"); 486 stolen_size / KB(1), local ? "local" : "stolen");
634 } else { 487 } else {
@@ -637,46 +490,88 @@ static unsigned int intel_gtt_stolen_entries(void)
637 stolen_size = 0; 490 stolen_size = 0;
638 } 491 }
639 492
640 stolen_entries = stolen_size/KB(4) - overhead_entries; 493 return stolen_size;
494}
641 495
642 return stolen_entries; 496static void i965_adjust_pgetbl_size(unsigned int size_flag)
497{
498 u32 pgetbl_ctl, pgetbl_ctl2;
499
500 /* ensure that ppgtt is disabled */
501 pgetbl_ctl2 = readl(intel_private.registers+I965_PGETBL_CTL2);
502 pgetbl_ctl2 &= ~I810_PGETBL_ENABLED;
503 writel(pgetbl_ctl2, intel_private.registers+I965_PGETBL_CTL2);
504
505 /* write the new ggtt size */
506 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
507 pgetbl_ctl &= ~I965_PGETBL_SIZE_MASK;
508 pgetbl_ctl |= size_flag;
509 writel(pgetbl_ctl, intel_private.registers+I810_PGETBL_CTL);
643} 510}
644 511
645static unsigned int intel_gtt_total_entries(void) 512static unsigned int i965_gtt_total_entries(void)
646{ 513{
647 int size; 514 int size;
515 u32 pgetbl_ctl;
516 u16 gmch_ctl;
648 517
649 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) { 518 pci_read_config_word(intel_private.bridge_dev,
650 u32 pgetbl_ctl; 519 I830_GMCH_CTRL, &gmch_ctl);
651 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
652 520
653 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) { 521 if (INTEL_GTT_GEN == 5) {
654 case I965_PGETBL_SIZE_128KB: 522 switch (gmch_ctl & G4x_GMCH_SIZE_MASK) {
655 size = KB(128); 523 case G4x_GMCH_SIZE_1M:
656 break; 524 case G4x_GMCH_SIZE_VT_1M:
657 case I965_PGETBL_SIZE_256KB: 525 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1MB);
658 size = KB(256);
659 break;
660 case I965_PGETBL_SIZE_512KB:
661 size = KB(512);
662 break; 526 break;
663 case I965_PGETBL_SIZE_1MB: 527 case G4x_GMCH_SIZE_VT_1_5M:
664 size = KB(1024); 528 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_1_5MB);
665 break; 529 break;
666 case I965_PGETBL_SIZE_2MB: 530 case G4x_GMCH_SIZE_2M:
667 size = KB(2048); 531 case G4x_GMCH_SIZE_VT_2M:
532 i965_adjust_pgetbl_size(I965_PGETBL_SIZE_2MB);
668 break; 533 break;
669 case I965_PGETBL_SIZE_1_5MB:
670 size = KB(1024 + 512);
671 break;
672 default:
673 dev_info(&intel_private.pcidev->dev,
674 "unknown page table size, assuming 512KB\n");
675 size = KB(512);
676 } 534 }
535 }
677 536
678 return size/4; 537 pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
679 } else if (INTEL_GTT_GEN == 6) { 538
539 switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
540 case I965_PGETBL_SIZE_128KB:
541 size = KB(128);
542 break;
543 case I965_PGETBL_SIZE_256KB:
544 size = KB(256);
545 break;
546 case I965_PGETBL_SIZE_512KB:
547 size = KB(512);
548 break;
549 /* GTT pagetable sizes bigger than 512KB are not possible on G33! */
550 case I965_PGETBL_SIZE_1MB:
551 size = KB(1024);
552 break;
553 case I965_PGETBL_SIZE_2MB:
554 size = KB(2048);
555 break;
556 case I965_PGETBL_SIZE_1_5MB:
557 size = KB(1024 + 512);
558 break;
559 default:
560 dev_info(&intel_private.pcidev->dev,
561 "unknown page table size, assuming 512KB\n");
562 size = KB(512);
563 }
564
565 return size/4;
566}
567
568static unsigned int intel_gtt_total_entries(void)
569{
570 int size;
571
572 if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5)
573 return i965_gtt_total_entries();
574 else if (INTEL_GTT_GEN == 6) {
680 u16 snb_gmch_ctl; 575 u16 snb_gmch_ctl;
681 576
682 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl); 577 pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
@@ -706,7 +601,18 @@ static unsigned int intel_gtt_mappable_entries(void)
706{ 601{
707 unsigned int aperture_size; 602 unsigned int aperture_size;
708 603
709 if (INTEL_GTT_GEN == 2) { 604 if (INTEL_GTT_GEN == 1) {
605 u32 smram_miscc;
606
607 pci_read_config_dword(intel_private.bridge_dev,
608 I810_SMRAM_MISCC, &smram_miscc);
609
610 if ((smram_miscc & I810_GFX_MEM_WIN_SIZE)
611 == I810_GFX_MEM_WIN_32M)
612 aperture_size = MB(32);
613 else
614 aperture_size = MB(64);
615 } else if (INTEL_GTT_GEN == 2) {
710 u16 gmch_ctrl; 616 u16 gmch_ctrl;
711 617
712 pci_read_config_word(intel_private.bridge_dev, 618 pci_read_config_word(intel_private.bridge_dev,
@@ -739,7 +645,7 @@ static void intel_gtt_cleanup(void)
739 645
740 iounmap(intel_private.gtt); 646 iounmap(intel_private.gtt);
741 iounmap(intel_private.registers); 647 iounmap(intel_private.registers);
742 648
743 intel_gtt_teardown_scratch_page(); 649 intel_gtt_teardown_scratch_page();
744} 650}
745 651
@@ -755,6 +661,14 @@ static int intel_gtt_init(void)
755 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries(); 661 intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
756 intel_private.base.gtt_total_entries = intel_gtt_total_entries(); 662 intel_private.base.gtt_total_entries = intel_gtt_total_entries();
757 663
664 /* save the PGETBL reg for resume */
665 intel_private.PGETBL_save =
666 readl(intel_private.registers+I810_PGETBL_CTL)
667 & ~I810_PGETBL_ENABLED;
668 /* we only ever restore the register when enabling the PGTBL... */
669 if (HAS_PGTBL_EN)
670 intel_private.PGETBL_save |= I810_PGETBL_ENABLED;
671
758 dev_info(&intel_private.bridge_dev->dev, 672 dev_info(&intel_private.bridge_dev->dev,
759 "detected gtt size: %dK total, %dK mappable\n", 673 "detected gtt size: %dK total, %dK mappable\n",
760 intel_private.base.gtt_total_entries * 4, 674 intel_private.base.gtt_total_entries * 4,
@@ -772,14 +686,7 @@ static int intel_gtt_init(void)
772 686
773 global_cache_flush(); /* FIXME: ? */ 687 global_cache_flush(); /* FIXME: ? */
774 688
775 /* we have to call this as early as possible after the MMIO base address is known */ 689 intel_private.base.stolen_size = intel_gtt_stolen_size();
776 intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
777 if (intel_private.base.gtt_stolen_entries == 0) {
778 intel_private.driver->cleanup();
779 iounmap(intel_private.registers);
780 iounmap(intel_private.gtt);
781 return -ENOMEM;
782 }
783 690
784 ret = intel_gtt_setup_scratch_page(); 691 ret = intel_gtt_setup_scratch_page();
785 if (ret != 0) { 692 if (ret != 0) {
@@ -787,6 +694,8 @@ static int intel_gtt_init(void)
787 return ret; 694 return ret;
788 } 695 }
789 696
697 intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
698
790 return 0; 699 return 0;
791} 700}
792 701
@@ -812,8 +721,10 @@ static int intel_fake_agp_fetch_size(void)
812 721
813static void i830_cleanup(void) 722static void i830_cleanup(void)
814{ 723{
815 kunmap(intel_private.i8xx_page); 724 if (intel_private.i8xx_flush_page) {
816 intel_private.i8xx_flush_page = NULL; 725 kunmap(intel_private.i8xx_flush_page);
726 intel_private.i8xx_flush_page = NULL;
727 }
817 728
818 __free_page(intel_private.i8xx_page); 729 __free_page(intel_private.i8xx_page);
819 intel_private.i8xx_page = NULL; 730 intel_private.i8xx_page = NULL;
@@ -860,25 +771,19 @@ static void i830_write_entry(dma_addr_t addr, unsigned int entry,
860 unsigned int flags) 771 unsigned int flags)
861{ 772{
862 u32 pte_flags = I810_PTE_VALID; 773 u32 pte_flags = I810_PTE_VALID;
863 774
864 switch (flags) { 775 if (flags == AGP_USER_CACHED_MEMORY)
865 case AGP_DCACHE_MEMORY:
866 pte_flags |= I810_PTE_LOCAL;
867 break;
868 case AGP_USER_CACHED_MEMORY:
869 pte_flags |= I830_PTE_SYSTEM_CACHED; 776 pte_flags |= I830_PTE_SYSTEM_CACHED;
870 break;
871 }
872 777
873 writel(addr | pte_flags, intel_private.gtt + entry); 778 writel(addr | pte_flags, intel_private.gtt + entry);
874} 779}
875 780
876static void intel_enable_gtt(void) 781static bool intel_enable_gtt(void)
877{ 782{
878 u32 gma_addr; 783 u32 gma_addr;
879 u16 gmch_ctrl; 784 u8 __iomem *reg;
880 785
881 if (INTEL_GTT_GEN == 2) 786 if (INTEL_GTT_GEN <= 2)
882 pci_read_config_dword(intel_private.pcidev, I810_GMADDR, 787 pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
883 &gma_addr); 788 &gma_addr);
884 else 789 else
@@ -887,13 +792,38 @@ static void intel_enable_gtt(void)
887 792
888 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK); 793 intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
889 794
890 pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl); 795 if (INTEL_GTT_GEN >= 6)
891 gmch_ctrl |= I830_GMCH_ENABLED; 796 return true;
892 pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
893 797
894 writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED, 798 if (INTEL_GTT_GEN == 2) {
895 intel_private.registers+I810_PGETBL_CTL); 799 u16 gmch_ctrl;
896 readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */ 800
801 pci_read_config_word(intel_private.bridge_dev,
802 I830_GMCH_CTRL, &gmch_ctrl);
803 gmch_ctrl |= I830_GMCH_ENABLED;
804 pci_write_config_word(intel_private.bridge_dev,
805 I830_GMCH_CTRL, gmch_ctrl);
806
807 pci_read_config_word(intel_private.bridge_dev,
808 I830_GMCH_CTRL, &gmch_ctrl);
809 if ((gmch_ctrl & I830_GMCH_ENABLED) == 0) {
810 dev_err(&intel_private.pcidev->dev,
811 "failed to enable the GTT: GMCH_CTRL=%x\n",
812 gmch_ctrl);
813 return false;
814 }
815 }
816
817 reg = intel_private.registers+I810_PGETBL_CTL;
818 writel(intel_private.PGETBL_save, reg);
819 if (HAS_PGTBL_EN && (readl(reg) & I810_PGETBL_ENABLED) == 0) {
820 dev_err(&intel_private.pcidev->dev,
821 "failed to enable the GTT: PGETBL=%x [expected %x]\n",
822 readl(reg), intel_private.PGETBL_save);
823 return false;
824 }
825
826 return true;
897} 827}
898 828
899static int i830_setup(void) 829static int i830_setup(void)
@@ -908,8 +838,6 @@ static int i830_setup(void)
908 return -ENOMEM; 838 return -ENOMEM;
909 839
910 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; 840 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
911 intel_private.pte_bus_addr =
912 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
913 841
914 intel_i830_setup_flush(); 842 intel_i830_setup_flush();
915 843
@@ -934,12 +862,12 @@ static int intel_fake_agp_configure(void)
934{ 862{
935 int i; 863 int i;
936 864
937 intel_enable_gtt(); 865 if (!intel_enable_gtt())
866 return -EIO;
938 867
939 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr; 868 agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
940 869
941 for (i = intel_private.base.gtt_stolen_entries; 870 for (i = 0; i < intel_private.base.gtt_total_entries; i++) {
942 i < intel_private.base.gtt_total_entries; i++) {
943 intel_private.driver->write_entry(intel_private.scratch_page_dma, 871 intel_private.driver->write_entry(intel_private.scratch_page_dma,
944 i, 0); 872 i, 0);
945 } 873 }
@@ -963,10 +891,10 @@ static bool i830_check_flags(unsigned int flags)
963 return false; 891 return false;
964} 892}
965 893
966static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list, 894void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
967 unsigned int sg_len, 895 unsigned int sg_len,
968 unsigned int pg_start, 896 unsigned int pg_start,
969 unsigned int flags) 897 unsigned int flags)
970{ 898{
971 struct scatterlist *sg; 899 struct scatterlist *sg;
972 unsigned int len, m; 900 unsigned int len, m;
@@ -987,27 +915,34 @@ static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
987 } 915 }
988 readl(intel_private.gtt+j-1); 916 readl(intel_private.gtt+j-1);
989} 917}
918EXPORT_SYMBOL(intel_gtt_insert_sg_entries);
919
920void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
921 struct page **pages, unsigned int flags)
922{
923 int i, j;
924
925 for (i = 0, j = first_entry; i < num_entries; i++, j++) {
926 dma_addr_t addr = page_to_phys(pages[i]);
927 intel_private.driver->write_entry(addr,
928 j, flags);
929 }
930 readl(intel_private.gtt+j-1);
931}
932EXPORT_SYMBOL(intel_gtt_insert_pages);
990 933
991static int intel_fake_agp_insert_entries(struct agp_memory *mem, 934static int intel_fake_agp_insert_entries(struct agp_memory *mem,
992 off_t pg_start, int type) 935 off_t pg_start, int type)
993{ 936{
994 int i, j;
995 int ret = -EINVAL; 937 int ret = -EINVAL;
996 938
939 if (INTEL_GTT_GEN == 1 && type == AGP_DCACHE_MEMORY)
940 return i810_insert_dcache_entries(mem, pg_start, type);
941
997 if (mem->page_count == 0) 942 if (mem->page_count == 0)
998 goto out; 943 goto out;
999 944
1000 if (pg_start < intel_private.base.gtt_stolen_entries) { 945 if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
1001 dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
1002 "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
1003 pg_start, intel_private.base.gtt_stolen_entries);
1004
1005 dev_info(&intel_private.pcidev->dev,
1006 "trying to insert into local/stolen memory\n");
1007 goto out_err;
1008 }
1009
1010 if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
1011 goto out_err; 946 goto out_err;
1012 947
1013 if (type != mem->type) 948 if (type != mem->type)
@@ -1019,21 +954,17 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
1019 if (!mem->is_flushed) 954 if (!mem->is_flushed)
1020 global_cache_flush(); 955 global_cache_flush();
1021 956
1022 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) { 957 if (intel_private.base.needs_dmar) {
1023 ret = intel_agp_map_memory(mem); 958 ret = intel_gtt_map_memory(mem->pages, mem->page_count,
959 &mem->sg_list, &mem->num_sg);
1024 if (ret != 0) 960 if (ret != 0)
1025 return ret; 961 return ret;
1026 962
1027 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg, 963 intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
1028 pg_start, type); 964 pg_start, type);
1029 } else { 965 } else
1030 for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { 966 intel_gtt_insert_pages(pg_start, mem->page_count, mem->pages,
1031 dma_addr_t addr = page_to_phys(mem->pages[i]); 967 type);
1032 intel_private.driver->write_entry(addr,
1033 j, type);
1034 }
1035 readl(intel_private.gtt+j-1);
1036 }
1037 968
1038out: 969out:
1039 ret = 0; 970 ret = 0;
@@ -1042,40 +973,54 @@ out_err:
1042 return ret; 973 return ret;
1043} 974}
1044 975
976void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
977{
978 unsigned int i;
979
980 for (i = first_entry; i < (first_entry + num_entries); i++) {
981 intel_private.driver->write_entry(intel_private.scratch_page_dma,
982 i, 0);
983 }
984 readl(intel_private.gtt+i-1);
985}
986EXPORT_SYMBOL(intel_gtt_clear_range);
987
1045static int intel_fake_agp_remove_entries(struct agp_memory *mem, 988static int intel_fake_agp_remove_entries(struct agp_memory *mem,
1046 off_t pg_start, int type) 989 off_t pg_start, int type)
1047{ 990{
1048 int i;
1049
1050 if (mem->page_count == 0) 991 if (mem->page_count == 0)
1051 return 0; 992 return 0;
1052 993
1053 if (pg_start < intel_private.base.gtt_stolen_entries) { 994 if (intel_private.base.needs_dmar) {
1054 dev_info(&intel_private.pcidev->dev, 995 intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
1055 "trying to disable local/stolen memory\n"); 996 mem->sg_list = NULL;
1056 return -EINVAL; 997 mem->num_sg = 0;
1057 } 998 }
1058 999
1059 if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) 1000 intel_gtt_clear_range(pg_start, mem->page_count);
1060 intel_agp_unmap_memory(mem);
1061
1062 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
1063 intel_private.driver->write_entry(intel_private.scratch_page_dma,
1064 i, 0);
1065 }
1066 readl(intel_private.gtt+i-1);
1067 1001
1068 return 0; 1002 return 0;
1069} 1003}
1070 1004
1071static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
1072{
1073 intel_private.driver->chipset_flush();
1074}
1075
1076static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count, 1005static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
1077 int type) 1006 int type)
1078{ 1007{
1008 struct agp_memory *new;
1009
1010 if (type == AGP_DCACHE_MEMORY && INTEL_GTT_GEN == 1) {
1011 if (pg_count != intel_private.num_dcache_entries)
1012 return NULL;
1013
1014 new = agp_create_memory(1);
1015 if (new == NULL)
1016 return NULL;
1017
1018 new->type = AGP_DCACHE_MEMORY;
1019 new->page_count = pg_count;
1020 new->num_scratch_pages = 0;
1021 agp_free_page_array(new);
1022 return new;
1023 }
1079 if (type == AGP_PHYS_MEMORY) 1024 if (type == AGP_PHYS_MEMORY)
1080 return alloc_agpphysmem_i8xx(pg_count, type); 1025 return alloc_agpphysmem_i8xx(pg_count, type);
1081 /* always return NULL for other allocation types for now */ 1026 /* always return NULL for other allocation types for now */
@@ -1190,12 +1135,19 @@ static void i9xx_chipset_flush(void)
1190 writel(1, intel_private.i9xx_flush_page); 1135 writel(1, intel_private.i9xx_flush_page);
1191} 1136}
1192 1137
1193static void i965_write_entry(dma_addr_t addr, unsigned int entry, 1138static void i965_write_entry(dma_addr_t addr,
1139 unsigned int entry,
1194 unsigned int flags) 1140 unsigned int flags)
1195{ 1141{
1142 u32 pte_flags;
1143
1144 pte_flags = I810_PTE_VALID;
1145 if (flags == AGP_USER_CACHED_MEMORY)
1146 pte_flags |= I830_PTE_SYSTEM_CACHED;
1147
1196 /* Shift high bits down */ 1148 /* Shift high bits down */
1197 addr |= (addr >> 28) & 0xf0; 1149 addr |= (addr >> 28) & 0xf0;
1198 writel(addr | I810_PTE_VALID, intel_private.gtt + entry); 1150 writel(addr | pte_flags, intel_private.gtt + entry);
1199} 1151}
1200 1152
1201static bool gen6_check_flags(unsigned int flags) 1153static bool gen6_check_flags(unsigned int flags)
@@ -1265,40 +1217,11 @@ static int i9xx_setup(void)
1265 intel_private.gtt_bus_addr = reg_addr + gtt_offset; 1217 intel_private.gtt_bus_addr = reg_addr + gtt_offset;
1266 } 1218 }
1267 1219
1268 intel_private.pte_bus_addr =
1269 readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
1270
1271 intel_i9xx_setup_flush(); 1220 intel_i9xx_setup_flush();
1272 1221
1273 return 0; 1222 return 0;
1274} 1223}
1275 1224
1276static const struct agp_bridge_driver intel_810_driver = {
1277 .owner = THIS_MODULE,
1278 .aperture_sizes = intel_i810_sizes,
1279 .size_type = FIXED_APER_SIZE,
1280 .num_aperture_sizes = 2,
1281 .needs_scratch_page = true,
1282 .configure = intel_i810_configure,
1283 .fetch_size = intel_i810_fetch_size,
1284 .cleanup = intel_i810_cleanup,
1285 .mask_memory = intel_i810_mask_memory,
1286 .masks = intel_i810_masks,
1287 .agp_enable = intel_fake_agp_enable,
1288 .cache_flush = global_cache_flush,
1289 .create_gatt_table = agp_generic_create_gatt_table,
1290 .free_gatt_table = agp_generic_free_gatt_table,
1291 .insert_memory = intel_i810_insert_entries,
1292 .remove_memory = intel_i810_remove_entries,
1293 .alloc_by_type = intel_i810_alloc_by_type,
1294 .free_by_type = intel_i810_free_by_type,
1295 .agp_alloc_page = agp_generic_alloc_page,
1296 .agp_alloc_pages = agp_generic_alloc_pages,
1297 .agp_destroy_page = agp_generic_destroy_page,
1298 .agp_destroy_pages = agp_generic_destroy_pages,
1299 .agp_type_to_mask_type = agp_generic_type_to_mask_type,
1300};
1301
1302static const struct agp_bridge_driver intel_fake_agp_driver = { 1225static const struct agp_bridge_driver intel_fake_agp_driver = {
1303 .owner = THIS_MODULE, 1226 .owner = THIS_MODULE,
1304 .size_type = FIXED_APER_SIZE, 1227 .size_type = FIXED_APER_SIZE,
@@ -1319,15 +1242,20 @@ static const struct agp_bridge_driver intel_fake_agp_driver = {
1319 .agp_alloc_pages = agp_generic_alloc_pages, 1242 .agp_alloc_pages = agp_generic_alloc_pages,
1320 .agp_destroy_page = agp_generic_destroy_page, 1243 .agp_destroy_page = agp_generic_destroy_page,
1321 .agp_destroy_pages = agp_generic_destroy_pages, 1244 .agp_destroy_pages = agp_generic_destroy_pages,
1322 .chipset_flush = intel_fake_agp_chipset_flush,
1323}; 1245};
1324 1246
1325static const struct intel_gtt_driver i81x_gtt_driver = { 1247static const struct intel_gtt_driver i81x_gtt_driver = {
1326 .gen = 1, 1248 .gen = 1,
1249 .has_pgtbl_enable = 1,
1327 .dma_mask_size = 32, 1250 .dma_mask_size = 32,
1251 .setup = i810_setup,
1252 .cleanup = i810_cleanup,
1253 .check_flags = i830_check_flags,
1254 .write_entry = i810_write_entry,
1328}; 1255};
1329static const struct intel_gtt_driver i8xx_gtt_driver = { 1256static const struct intel_gtt_driver i8xx_gtt_driver = {
1330 .gen = 2, 1257 .gen = 2,
1258 .has_pgtbl_enable = 1,
1331 .setup = i830_setup, 1259 .setup = i830_setup,
1332 .cleanup = i830_cleanup, 1260 .cleanup = i830_cleanup,
1333 .write_entry = i830_write_entry, 1261 .write_entry = i830_write_entry,
@@ -1337,10 +1265,11 @@ static const struct intel_gtt_driver i8xx_gtt_driver = {
1337}; 1265};
1338static const struct intel_gtt_driver i915_gtt_driver = { 1266static const struct intel_gtt_driver i915_gtt_driver = {
1339 .gen = 3, 1267 .gen = 3,
1268 .has_pgtbl_enable = 1,
1340 .setup = i9xx_setup, 1269 .setup = i9xx_setup,
1341 .cleanup = i9xx_cleanup, 1270 .cleanup = i9xx_cleanup,
1342 /* i945 is the last gpu to need phys mem (for overlay and cursors). */ 1271 /* i945 is the last gpu to need phys mem (for overlay and cursors). */
1343 .write_entry = i830_write_entry, 1272 .write_entry = i830_write_entry,
1344 .dma_mask_size = 32, 1273 .dma_mask_size = 32,
1345 .check_flags = i830_check_flags, 1274 .check_flags = i830_check_flags,
1346 .chipset_flush = i9xx_chipset_flush, 1275 .chipset_flush = i9xx_chipset_flush,
@@ -1367,6 +1296,7 @@ static const struct intel_gtt_driver pineview_gtt_driver = {
1367}; 1296};
1368static const struct intel_gtt_driver i965_gtt_driver = { 1297static const struct intel_gtt_driver i965_gtt_driver = {
1369 .gen = 4, 1298 .gen = 4,
1299 .has_pgtbl_enable = 1,
1370 .setup = i9xx_setup, 1300 .setup = i9xx_setup,
1371 .cleanup = i9xx_cleanup, 1301 .cleanup = i9xx_cleanup,
1372 .write_entry = i965_write_entry, 1302 .write_entry = i965_write_entry,
@@ -1410,93 +1340,92 @@ static const struct intel_gtt_driver sandybridge_gtt_driver = {
1410static const struct intel_gtt_driver_description { 1340static const struct intel_gtt_driver_description {
1411 unsigned int gmch_chip_id; 1341 unsigned int gmch_chip_id;
1412 char *name; 1342 char *name;
1413 const struct agp_bridge_driver *gmch_driver;
1414 const struct intel_gtt_driver *gtt_driver; 1343 const struct intel_gtt_driver *gtt_driver;
1415} intel_gtt_chipsets[] = { 1344} intel_gtt_chipsets[] = {
1416 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810", &intel_810_driver, 1345 { PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
1417 &i81x_gtt_driver}, 1346 &i81x_gtt_driver},
1418 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810", &intel_810_driver, 1347 { PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
1419 &i81x_gtt_driver}, 1348 &i81x_gtt_driver},
1420 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810", &intel_810_driver, 1349 { PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
1421 &i81x_gtt_driver}, 1350 &i81x_gtt_driver},
1422 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815", &intel_810_driver, 1351 { PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
1423 &i81x_gtt_driver}, 1352 &i81x_gtt_driver},
1424 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M", 1353 { PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
1425 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1354 &i8xx_gtt_driver},
1426 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M", 1355 { PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
1427 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1356 &i8xx_gtt_driver},
1428 { PCI_DEVICE_ID_INTEL_82854_IG, "854", 1357 { PCI_DEVICE_ID_INTEL_82854_IG, "854",
1429 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1358 &i8xx_gtt_driver},
1430 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM", 1359 { PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
1431 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1360 &i8xx_gtt_driver},
1432 { PCI_DEVICE_ID_INTEL_82865_IG, "865", 1361 { PCI_DEVICE_ID_INTEL_82865_IG, "865",
1433 &intel_fake_agp_driver, &i8xx_gtt_driver}, 1362 &i8xx_gtt_driver},
1434 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)", 1363 { PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
1435 &intel_fake_agp_driver, &i915_gtt_driver }, 1364 &i915_gtt_driver },
1436 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G", 1365 { PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
1437 &intel_fake_agp_driver, &i915_gtt_driver }, 1366 &i915_gtt_driver },
1438 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM", 1367 { PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
1439 &intel_fake_agp_driver, &i915_gtt_driver }, 1368 &i915_gtt_driver },
1440 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G", 1369 { PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
1441 &intel_fake_agp_driver, &i915_gtt_driver }, 1370 &i915_gtt_driver },
1442 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM", 1371 { PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
1443 &intel_fake_agp_driver, &i915_gtt_driver }, 1372 &i915_gtt_driver },
1444 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME", 1373 { PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
1445 &intel_fake_agp_driver, &i915_gtt_driver }, 1374 &i915_gtt_driver },
1446 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ", 1375 { PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
1447 &intel_fake_agp_driver, &i965_gtt_driver }, 1376 &i965_gtt_driver },
1448 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35", 1377 { PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
1449 &intel_fake_agp_driver, &i965_gtt_driver }, 1378 &i965_gtt_driver },
1450 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q", 1379 { PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
1451 &intel_fake_agp_driver, &i965_gtt_driver }, 1380 &i965_gtt_driver },
1452 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G", 1381 { PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
1453 &intel_fake_agp_driver, &i965_gtt_driver }, 1382 &i965_gtt_driver },
1454 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM", 1383 { PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
1455 &intel_fake_agp_driver, &i965_gtt_driver }, 1384 &i965_gtt_driver },
1456 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE", 1385 { PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
1457 &intel_fake_agp_driver, &i965_gtt_driver }, 1386 &i965_gtt_driver },
1458 { PCI_DEVICE_ID_INTEL_G33_IG, "G33", 1387 { PCI_DEVICE_ID_INTEL_G33_IG, "G33",
1459 &intel_fake_agp_driver, &g33_gtt_driver }, 1388 &g33_gtt_driver },
1460 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35", 1389 { PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
1461 &intel_fake_agp_driver, &g33_gtt_driver }, 1390 &g33_gtt_driver },
1462 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33", 1391 { PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
1463 &intel_fake_agp_driver, &g33_gtt_driver }, 1392 &g33_gtt_driver },
1464 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150", 1393 { PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
1465 &intel_fake_agp_driver, &pineview_gtt_driver }, 1394 &pineview_gtt_driver },
1466 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150", 1395 { PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
1467 &intel_fake_agp_driver, &pineview_gtt_driver }, 1396 &pineview_gtt_driver },
1468 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45", 1397 { PCI_DEVICE_ID_INTEL_GM45_IG, "GM45",
1469 &intel_fake_agp_driver, &g4x_gtt_driver }, 1398 &g4x_gtt_driver },
1470 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake", 1399 { PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, "Eaglelake",
1471 &intel_fake_agp_driver, &g4x_gtt_driver }, 1400 &g4x_gtt_driver },
1472 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43", 1401 { PCI_DEVICE_ID_INTEL_Q45_IG, "Q45/Q43",
1473 &intel_fake_agp_driver, &g4x_gtt_driver }, 1402 &g4x_gtt_driver },
1474 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43", 1403 { PCI_DEVICE_ID_INTEL_G45_IG, "G45/G43",
1475 &intel_fake_agp_driver, &g4x_gtt_driver }, 1404 &g4x_gtt_driver },
1476 { PCI_DEVICE_ID_INTEL_B43_IG, "B43", 1405 { PCI_DEVICE_ID_INTEL_B43_IG, "B43",
1477 &intel_fake_agp_driver, &g4x_gtt_driver }, 1406 &g4x_gtt_driver },
1478 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43", 1407 { PCI_DEVICE_ID_INTEL_B43_1_IG, "B43",
1479 &intel_fake_agp_driver, &g4x_gtt_driver }, 1408 &g4x_gtt_driver },
1480 { PCI_DEVICE_ID_INTEL_G41_IG, "G41", 1409 { PCI_DEVICE_ID_INTEL_G41_IG, "G41",
1481 &intel_fake_agp_driver, &g4x_gtt_driver }, 1410 &g4x_gtt_driver },
1482 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 1411 { PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
1483 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, 1412 "HD Graphics", &ironlake_gtt_driver },
1484 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 1413 { PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
1485 "HD Graphics", &intel_fake_agp_driver, &ironlake_gtt_driver }, 1414 "HD Graphics", &ironlake_gtt_driver },
1486 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG, 1415 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
1487 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1416 "Sandybridge", &sandybridge_gtt_driver },
1488 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG, 1417 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
1489 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1418 "Sandybridge", &sandybridge_gtt_driver },
1490 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG, 1419 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
1491 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1420 "Sandybridge", &sandybridge_gtt_driver },
1492 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG, 1421 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
1493 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1422 "Sandybridge", &sandybridge_gtt_driver },
1494 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG, 1423 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
1495 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1424 "Sandybridge", &sandybridge_gtt_driver },
1496 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG, 1425 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
1497 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1426 "Sandybridge", &sandybridge_gtt_driver },
1498 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG, 1427 { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
1499 "Sandybridge", &intel_fake_agp_driver, &sandybridge_gtt_driver }, 1428 "Sandybridge", &sandybridge_gtt_driver },
1500 { 0, NULL, NULL } 1429 { 0, NULL, NULL }
1501}; 1430};
1502 1431
@@ -1521,21 +1450,20 @@ int intel_gmch_probe(struct pci_dev *pdev,
1521 struct agp_bridge_data *bridge) 1450 struct agp_bridge_data *bridge)
1522{ 1451{
1523 int i, mask; 1452 int i, mask;
1524 bridge->driver = NULL; 1453 intel_private.driver = NULL;
1525 1454
1526 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) { 1455 for (i = 0; intel_gtt_chipsets[i].name != NULL; i++) {
1527 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) { 1456 if (find_gmch(intel_gtt_chipsets[i].gmch_chip_id)) {
1528 bridge->driver = 1457 intel_private.driver =
1529 intel_gtt_chipsets[i].gmch_driver;
1530 intel_private.driver =
1531 intel_gtt_chipsets[i].gtt_driver; 1458 intel_gtt_chipsets[i].gtt_driver;
1532 break; 1459 break;
1533 } 1460 }
1534 } 1461 }
1535 1462
1536 if (!bridge->driver) 1463 if (!intel_private.driver)
1537 return 0; 1464 return 0;
1538 1465
1466 bridge->driver = &intel_fake_agp_driver;
1539 bridge->dev_private_data = &intel_private; 1467 bridge->dev_private_data = &intel_private;
1540 bridge->dev = pdev; 1468 bridge->dev = pdev;
1541 1469
@@ -1551,8 +1479,8 @@ int intel_gmch_probe(struct pci_dev *pdev,
1551 pci_set_consistent_dma_mask(intel_private.pcidev, 1479 pci_set_consistent_dma_mask(intel_private.pcidev,
1552 DMA_BIT_MASK(mask)); 1480 DMA_BIT_MASK(mask));
1553 1481
1554 if (bridge->driver == &intel_810_driver) 1482 /*if (bridge->driver == &intel_810_driver)
1555 return 1; 1483 return 1;*/
1556 1484
1557 if (intel_gtt_init() != 0) 1485 if (intel_gtt_init() != 0)
1558 return 0; 1486 return 0;
@@ -1561,12 +1489,19 @@ int intel_gmch_probe(struct pci_dev *pdev,
1561} 1489}
1562EXPORT_SYMBOL(intel_gmch_probe); 1490EXPORT_SYMBOL(intel_gmch_probe);
1563 1491
1564struct intel_gtt *intel_gtt_get(void) 1492const struct intel_gtt *intel_gtt_get(void)
1565{ 1493{
1566 return &intel_private.base; 1494 return &intel_private.base;
1567} 1495}
1568EXPORT_SYMBOL(intel_gtt_get); 1496EXPORT_SYMBOL(intel_gtt_get);
1569 1497
1498void intel_gtt_chipset_flush(void)
1499{
1500 if (intel_private.driver->chipset_flush)
1501 intel_private.driver->chipset_flush();
1502}
1503EXPORT_SYMBOL(intel_gtt_chipset_flush);
1504
1570void intel_gmch_remove(struct pci_dev *pdev) 1505void intel_gmch_remove(struct pci_dev *pdev)
1571{ 1506{
1572 if (intel_private.pcidev) 1507 if (intel_private.pcidev)
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 1030f8420137..c17a305ecb28 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/wait.h> 27#include <linux/wait.h>
28#include <linux/acpi.h>
28#include "tpm.h" 29#include "tpm.h"
29 30
30#define TPM_HEADER_SIZE 10 31#define TPM_HEADER_SIZE 10
@@ -78,6 +79,26 @@ enum tis_defaults {
78static LIST_HEAD(tis_chips); 79static LIST_HEAD(tis_chips);
79static DEFINE_SPINLOCK(tis_lock); 80static DEFINE_SPINLOCK(tis_lock);
80 81
82#ifdef CONFIG_ACPI
83static int is_itpm(struct pnp_dev *dev)
84{
85 struct acpi_device *acpi = pnp_acpi_device(dev);
86 struct acpi_hardware_id *id;
87
88 list_for_each_entry(id, &acpi->pnp.ids, list) {
89 if (!strcmp("INTC0102", id->id))
90 return 1;
91 }
92
93 return 0;
94}
95#else
96static int is_itpm(struct pnp_dev *dev)
97{
98 return 0;
99}
100#endif
101
81static int check_locality(struct tpm_chip *chip, int l) 102static int check_locality(struct tpm_chip *chip, int l)
82{ 103{
83 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) & 104 if ((ioread8(chip->vendor.iobase + TPM_ACCESS(l)) &
@@ -472,6 +493,9 @@ static int tpm_tis_init(struct device *dev, resource_size_t start,
472 "1.2 TPM (device-id 0x%X, rev-id %d)\n", 493 "1.2 TPM (device-id 0x%X, rev-id %d)\n",
473 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0))); 494 vendor >> 16, ioread8(chip->vendor.iobase + TPM_RID(0)));
474 495
496 if (is_itpm(to_pnp_dev(dev)))
497 itpm = 1;
498
475 if (itpm) 499 if (itpm)
476 dev_info(dev, "Intel iTPM workaround enabled\n"); 500 dev_info(dev, "Intel iTPM workaround enabled\n");
477 501
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 6c1b676643a9..896a2ced1d27 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1547,31 +1547,16 @@ static int init_vqs(struct ports_device *portdev)
1547 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2; 1547 nr_queues = use_multiport(portdev) ? (nr_ports + 1) * 2 : 2;
1548 1548
1549 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL); 1549 vqs = kmalloc(nr_queues * sizeof(struct virtqueue *), GFP_KERNEL);
1550 if (!vqs) {
1551 err = -ENOMEM;
1552 goto fail;
1553 }
1554 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL); 1550 io_callbacks = kmalloc(nr_queues * sizeof(vq_callback_t *), GFP_KERNEL);
1555 if (!io_callbacks) {
1556 err = -ENOMEM;
1557 goto free_vqs;
1558 }
1559 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL); 1551 io_names = kmalloc(nr_queues * sizeof(char *), GFP_KERNEL);
1560 if (!io_names) {
1561 err = -ENOMEM;
1562 goto free_callbacks;
1563 }
1564 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1552 portdev->in_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1565 GFP_KERNEL); 1553 GFP_KERNEL);
1566 if (!portdev->in_vqs) {
1567 err = -ENOMEM;
1568 goto free_names;
1569 }
1570 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *), 1554 portdev->out_vqs = kmalloc(nr_ports * sizeof(struct virtqueue *),
1571 GFP_KERNEL); 1555 GFP_KERNEL);
1572 if (!portdev->out_vqs) { 1556 if (!vqs || !io_callbacks || !io_names || !portdev->in_vqs ||
1557 !portdev->out_vqs) {
1573 err = -ENOMEM; 1558 err = -ENOMEM;
1574 goto free_invqs; 1559 goto free;
1575 } 1560 }
1576 1561
1577 /* 1562 /*
@@ -1605,7 +1590,7 @@ static int init_vqs(struct ports_device *portdev)
1605 io_callbacks, 1590 io_callbacks,
1606 (const char **)io_names); 1591 (const char **)io_names);
1607 if (err) 1592 if (err)
1608 goto free_outvqs; 1593 goto free;
1609 1594
1610 j = 0; 1595 j = 0;
1611 portdev->in_vqs[0] = vqs[0]; 1596 portdev->in_vqs[0] = vqs[0];
@@ -1621,23 +1606,19 @@ static int init_vqs(struct ports_device *portdev)
1621 portdev->out_vqs[i] = vqs[j + 1]; 1606 portdev->out_vqs[i] = vqs[j + 1];
1622 } 1607 }
1623 } 1608 }
1624 kfree(io_callbacks);
1625 kfree(io_names); 1609 kfree(io_names);
1610 kfree(io_callbacks);
1626 kfree(vqs); 1611 kfree(vqs);
1627 1612
1628 return 0; 1613 return 0;
1629 1614
1630free_names: 1615free:
1631 kfree(io_names);
1632free_callbacks:
1633 kfree(io_callbacks);
1634free_outvqs:
1635 kfree(portdev->out_vqs); 1616 kfree(portdev->out_vqs);
1636free_invqs:
1637 kfree(portdev->in_vqs); 1617 kfree(portdev->in_vqs);
1638free_vqs: 1618 kfree(io_names);
1619 kfree(io_callbacks);
1639 kfree(vqs); 1620 kfree(vqs);
1640fail: 1621
1641 return err; 1622 return err;
1642} 1623}
1643 1624
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index eb6b54dbb806..85ffd5e38c50 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -1213,3 +1213,4 @@ module_exit(sh_dmae_exit);
1213MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 1213MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1214MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 1214MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1215MODULE_LICENSE("GPL"); 1215MODULE_LICENSE("GPL");
1216MODULE_ALIAS("platform:sh-dma-engine");
diff --git a/drivers/edac/Makefile b/drivers/edac/Makefile
index b3781399b38a..ba2898b3639b 100644
--- a/drivers/edac/Makefile
+++ b/drivers/edac/Makefile
@@ -10,16 +10,16 @@ obj-$(CONFIG_EDAC) := edac_stub.o
10obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o 10obj-$(CONFIG_EDAC_MM_EDAC) += edac_core.o
11obj-$(CONFIG_EDAC_MCE) += edac_mce.o 11obj-$(CONFIG_EDAC_MCE) += edac_mce.o
12 12
13edac_core-objs := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o 13edac_core-y := edac_mc.o edac_device.o edac_mc_sysfs.o edac_pci_sysfs.o
14edac_core-objs += edac_module.o edac_device_sysfs.o 14edac_core-y += edac_module.o edac_device_sysfs.o
15 15
16ifdef CONFIG_PCI 16ifdef CONFIG_PCI
17edac_core-objs += edac_pci.o edac_pci_sysfs.o 17edac_core-y += edac_pci.o edac_pci_sysfs.o
18endif 18endif
19 19
20obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o 20obj-$(CONFIG_EDAC_MCE_INJ) += mce_amd_inj.o
21 21
22edac_mce_amd-objs := mce_amd.o 22edac_mce_amd-y := mce_amd.o
23obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o 23obj-$(CONFIG_EDAC_DECODE_MCE) += edac_mce_amd.o
24 24
25obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o 25obj-$(CONFIG_EDAC_AMD76X) += amd76x_edac.o
diff --git a/drivers/edac/mce_amd_inj.c b/drivers/edac/mce_amd_inj.c
index 8d0688f36d4c..39faded3cadd 100644
--- a/drivers/edac/mce_amd_inj.c
+++ b/drivers/edac/mce_amd_inj.c
@@ -139,7 +139,7 @@ static int __init edac_init_mce_inject(void)
139 return 0; 139 return 0;
140 140
141err_sysfs_create: 141err_sysfs_create:
142 while (i-- >= 0) 142 while (--i >= 0)
143 sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr); 143 sysfs_remove_file(mce_kobj, &sysfs_attrs[i]->attr);
144 144
145 kobject_del(mce_kobj); 145 kobject_del(mce_kobj);
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 18fdd9703b48..1a467a91fb0b 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/bug.h> 9#include <linux/bug.h>
10#include <linux/delay.h>
10#include <linux/device.h> 11#include <linux/device.h>
11#include <linux/firewire.h> 12#include <linux/firewire.h>
12#include <linux/firewire-constants.h> 13#include <linux/firewire-constants.h>
@@ -26,8 +27,14 @@
26#include <asm/unaligned.h> 27#include <asm/unaligned.h>
27#include <net/arp.h> 28#include <net/arp.h>
28 29
29#define FWNET_MAX_FRAGMENTS 25 /* arbitrary limit */ 30/* rx limits */
30#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16 * 1024 ? 4 : 2) 31#define FWNET_MAX_FRAGMENTS 30 /* arbitrary, > TX queue depth */
32#define FWNET_ISO_PAGE_COUNT (PAGE_SIZE < 16*1024 ? 4 : 2)
33
34/* tx limits */
35#define FWNET_MAX_QUEUED_DATAGRAMS 20 /* < 64 = number of tlabels */
36#define FWNET_MIN_QUEUED_DATAGRAMS 10 /* should keep AT DMA busy enough */
37#define FWNET_TX_QUEUE_LEN FWNET_MAX_QUEUED_DATAGRAMS /* ? */
31 38
32#define IEEE1394_BROADCAST_CHANNEL 31 39#define IEEE1394_BROADCAST_CHANNEL 31
33#define IEEE1394_ALL_NODES (0xffc0 | 0x003f) 40#define IEEE1394_ALL_NODES (0xffc0 | 0x003f)
@@ -169,15 +176,8 @@ struct fwnet_device {
169 struct fw_address_handler handler; 176 struct fw_address_handler handler;
170 u64 local_fifo; 177 u64 local_fifo;
171 178
172 /* List of packets to be sent */ 179 /* Number of tx datagrams that have been queued but not yet acked */
173 struct list_head packet_list; 180 int queued_datagrams;
174 /*
175 * List of packets that were broadcasted. When we get an ISO interrupt
176 * one of them has been sent
177 */
178 struct list_head broadcasted_list;
179 /* List of packets that have been sent but not yet acked */
180 struct list_head sent_list;
181 181
182 struct list_head peer_list; 182 struct list_head peer_list;
183 struct fw_card *card; 183 struct fw_card *card;
@@ -195,7 +195,7 @@ struct fwnet_peer {
195 unsigned pdg_size; /* pd_list size */ 195 unsigned pdg_size; /* pd_list size */
196 196
197 u16 datagram_label; /* outgoing datagram label */ 197 u16 datagram_label; /* outgoing datagram label */
198 unsigned max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */ 198 u16 max_payload; /* includes RFC2374_FRAG_HDR_SIZE overhead */
199 int node_id; 199 int node_id;
200 int generation; 200 int generation;
201 unsigned speed; 201 unsigned speed;
@@ -203,22 +203,18 @@ struct fwnet_peer {
203 203
204/* This is our task struct. It's used for the packet complete callback. */ 204/* This is our task struct. It's used for the packet complete callback. */
205struct fwnet_packet_task { 205struct fwnet_packet_task {
206 /*
207 * ptask can actually be on dev->packet_list, dev->broadcasted_list,
208 * or dev->sent_list depending on its current state.
209 */
210 struct list_head pt_link;
211 struct fw_transaction transaction; 206 struct fw_transaction transaction;
212 struct rfc2734_header hdr; 207 struct rfc2734_header hdr;
213 struct sk_buff *skb; 208 struct sk_buff *skb;
214 struct fwnet_device *dev; 209 struct fwnet_device *dev;
215 210
216 int outstanding_pkts; 211 int outstanding_pkts;
217 unsigned max_payload;
218 u64 fifo_addr; 212 u64 fifo_addr;
219 u16 dest_node; 213 u16 dest_node;
214 u16 max_payload;
220 u8 generation; 215 u8 generation;
221 u8 speed; 216 u8 speed;
217 u8 enqueued;
222}; 218};
223 219
224/* 220/*
@@ -650,8 +646,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
650 net->stats.rx_packets++; 646 net->stats.rx_packets++;
651 net->stats.rx_bytes += skb->len; 647 net->stats.rx_bytes += skb->len;
652 } 648 }
653 if (netif_queue_stopped(net))
654 netif_wake_queue(net);
655 649
656 return 0; 650 return 0;
657 651
@@ -660,8 +654,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
660 net->stats.rx_dropped++; 654 net->stats.rx_dropped++;
661 655
662 dev_kfree_skb_any(skb); 656 dev_kfree_skb_any(skb);
663 if (netif_queue_stopped(net))
664 netif_wake_queue(net);
665 657
666 return -ENOENT; 658 return -ENOENT;
667} 659}
@@ -793,15 +785,10 @@ static int fwnet_incoming_packet(struct fwnet_device *dev, __be32 *buf, int len,
793 * Datagram is not complete, we're done for the 785 * Datagram is not complete, we're done for the
794 * moment. 786 * moment.
795 */ 787 */
796 spin_unlock_irqrestore(&dev->lock, flags); 788 retval = 0;
797
798 return 0;
799 fail: 789 fail:
800 spin_unlock_irqrestore(&dev->lock, flags); 790 spin_unlock_irqrestore(&dev->lock, flags);
801 791
802 if (netif_queue_stopped(net))
803 netif_wake_queue(net);
804
805 return retval; 792 return retval;
806} 793}
807 794
@@ -901,11 +888,19 @@ static void fwnet_free_ptask(struct fwnet_packet_task *ptask)
901 kmem_cache_free(fwnet_packet_task_cache, ptask); 888 kmem_cache_free(fwnet_packet_task_cache, ptask);
902} 889}
903 890
891/* Caller must hold dev->lock. */
892static void dec_queued_datagrams(struct fwnet_device *dev)
893{
894 if (--dev->queued_datagrams == FWNET_MIN_QUEUED_DATAGRAMS)
895 netif_wake_queue(dev->netdev);
896}
897
904static int fwnet_send_packet(struct fwnet_packet_task *ptask); 898static int fwnet_send_packet(struct fwnet_packet_task *ptask);
905 899
906static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask) 900static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
907{ 901{
908 struct fwnet_device *dev = ptask->dev; 902 struct fwnet_device *dev = ptask->dev;
903 struct sk_buff *skb = ptask->skb;
909 unsigned long flags; 904 unsigned long flags;
910 bool free; 905 bool free;
911 906
@@ -914,10 +909,14 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
914 ptask->outstanding_pkts--; 909 ptask->outstanding_pkts--;
915 910
916 /* Check whether we or the networking TX soft-IRQ is last user. */ 911 /* Check whether we or the networking TX soft-IRQ is last user. */
917 free = (ptask->outstanding_pkts == 0 && !list_empty(&ptask->pt_link)); 912 free = (ptask->outstanding_pkts == 0 && ptask->enqueued);
913 if (free)
914 dec_queued_datagrams(dev);
918 915
919 if (ptask->outstanding_pkts == 0) 916 if (ptask->outstanding_pkts == 0) {
920 list_del(&ptask->pt_link); 917 dev->netdev->stats.tx_packets++;
918 dev->netdev->stats.tx_bytes += skb->len;
919 }
921 920
922 spin_unlock_irqrestore(&dev->lock, flags); 921 spin_unlock_irqrestore(&dev->lock, flags);
923 922
@@ -926,7 +925,6 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
926 u16 fg_off; 925 u16 fg_off;
927 u16 datagram_label; 926 u16 datagram_label;
928 u16 lf; 927 u16 lf;
929 struct sk_buff *skb;
930 928
931 /* Update the ptask to point to the next fragment and send it */ 929 /* Update the ptask to point to the next fragment and send it */
932 lf = fwnet_get_hdr_lf(&ptask->hdr); 930 lf = fwnet_get_hdr_lf(&ptask->hdr);
@@ -953,7 +951,7 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
953 datagram_label = fwnet_get_hdr_dgl(&ptask->hdr); 951 datagram_label = fwnet_get_hdr_dgl(&ptask->hdr);
954 break; 952 break;
955 } 953 }
956 skb = ptask->skb; 954
957 skb_pull(skb, ptask->max_payload); 955 skb_pull(skb, ptask->max_payload);
958 if (ptask->outstanding_pkts > 1) { 956 if (ptask->outstanding_pkts > 1) {
959 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG, 957 fwnet_make_sf_hdr(&ptask->hdr, RFC2374_HDR_INTFRAG,
@@ -970,6 +968,31 @@ static void fwnet_transmit_packet_done(struct fwnet_packet_task *ptask)
970 fwnet_free_ptask(ptask); 968 fwnet_free_ptask(ptask);
971} 969}
972 970
971static void fwnet_transmit_packet_failed(struct fwnet_packet_task *ptask)
972{
973 struct fwnet_device *dev = ptask->dev;
974 unsigned long flags;
975 bool free;
976
977 spin_lock_irqsave(&dev->lock, flags);
978
979 /* One fragment failed; don't try to send remaining fragments. */
980 ptask->outstanding_pkts = 0;
981
982 /* Check whether we or the networking TX soft-IRQ is last user. */
983 free = ptask->enqueued;
984 if (free)
985 dec_queued_datagrams(dev);
986
987 dev->netdev->stats.tx_dropped++;
988 dev->netdev->stats.tx_errors++;
989
990 spin_unlock_irqrestore(&dev->lock, flags);
991
992 if (free)
993 fwnet_free_ptask(ptask);
994}
995
973static void fwnet_write_complete(struct fw_card *card, int rcode, 996static void fwnet_write_complete(struct fw_card *card, int rcode,
974 void *payload, size_t length, void *data) 997 void *payload, size_t length, void *data)
975{ 998{
@@ -977,11 +1000,12 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
977 1000
978 ptask = data; 1001 ptask = data;
979 1002
980 if (rcode == RCODE_COMPLETE) 1003 if (rcode == RCODE_COMPLETE) {
981 fwnet_transmit_packet_done(ptask); 1004 fwnet_transmit_packet_done(ptask);
982 else 1005 } else {
983 fw_error("fwnet_write_complete: failed: %x\n", rcode); 1006 fw_error("fwnet_write_complete: failed: %x\n", rcode);
984 /* ??? error recovery */ 1007 fwnet_transmit_packet_failed(ptask);
1008 }
985} 1009}
986 1010
987static int fwnet_send_packet(struct fwnet_packet_task *ptask) 1011static int fwnet_send_packet(struct fwnet_packet_task *ptask)
@@ -1039,9 +1063,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1039 spin_lock_irqsave(&dev->lock, flags); 1063 spin_lock_irqsave(&dev->lock, flags);
1040 1064
1041 /* If the AT tasklet already ran, we may be last user. */ 1065 /* If the AT tasklet already ran, we may be last user. */
1042 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); 1066 free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
1043 if (!free) 1067 if (!free)
1044 list_add_tail(&ptask->pt_link, &dev->broadcasted_list); 1068 ptask->enqueued = true;
1069 else
1070 dec_queued_datagrams(dev);
1045 1071
1046 spin_unlock_irqrestore(&dev->lock, flags); 1072 spin_unlock_irqrestore(&dev->lock, flags);
1047 1073
@@ -1056,9 +1082,11 @@ static int fwnet_send_packet(struct fwnet_packet_task *ptask)
1056 spin_lock_irqsave(&dev->lock, flags); 1082 spin_lock_irqsave(&dev->lock, flags);
1057 1083
1058 /* If the AT tasklet already ran, we may be last user. */ 1084 /* If the AT tasklet already ran, we may be last user. */
1059 free = (ptask->outstanding_pkts == 0 && list_empty(&ptask->pt_link)); 1085 free = (ptask->outstanding_pkts == 0 && !ptask->enqueued);
1060 if (!free) 1086 if (!free)
1061 list_add_tail(&ptask->pt_link, &dev->sent_list); 1087 ptask->enqueued = true;
1088 else
1089 dec_queued_datagrams(dev);
1062 1090
1063 spin_unlock_irqrestore(&dev->lock, flags); 1091 spin_unlock_irqrestore(&dev->lock, flags);
1064 1092
@@ -1224,6 +1252,15 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1224 struct fwnet_peer *peer; 1252 struct fwnet_peer *peer;
1225 unsigned long flags; 1253 unsigned long flags;
1226 1254
1255 spin_lock_irqsave(&dev->lock, flags);
1256
1257 /* Can this happen? */
1258 if (netif_queue_stopped(dev->netdev)) {
1259 spin_unlock_irqrestore(&dev->lock, flags);
1260
1261 return NETDEV_TX_BUSY;
1262 }
1263
1227 ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC); 1264 ptask = kmem_cache_alloc(fwnet_packet_task_cache, GFP_ATOMIC);
1228 if (ptask == NULL) 1265 if (ptask == NULL)
1229 goto fail; 1266 goto fail;
@@ -1242,9 +1279,6 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1242 proto = hdr_buf.h_proto; 1279 proto = hdr_buf.h_proto;
1243 dg_size = skb->len; 1280 dg_size = skb->len;
1244 1281
1245 /* serialize access to peer, including peer->datagram_label */
1246 spin_lock_irqsave(&dev->lock, flags);
1247
1248 /* 1282 /*
1249 * Set the transmission type for the packet. ARP packets and IP 1283 * Set the transmission type for the packet. ARP packets and IP
1250 * broadcast packets are sent via GASP. 1284 * broadcast packets are sent via GASP.
@@ -1266,7 +1300,7 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1266 1300
1267 peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid)); 1301 peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
1268 if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR) 1302 if (!peer || peer->fifo == FWNET_NO_FIFO_ADDR)
1269 goto fail_unlock; 1303 goto fail;
1270 1304
1271 generation = peer->generation; 1305 generation = peer->generation;
1272 dest_node = peer->node_id; 1306 dest_node = peer->node_id;
@@ -1320,18 +1354,21 @@ static netdev_tx_t fwnet_tx(struct sk_buff *skb, struct net_device *net)
1320 max_payload += RFC2374_FRAG_HDR_SIZE; 1354 max_payload += RFC2374_FRAG_HDR_SIZE;
1321 } 1355 }
1322 1356
1357 if (++dev->queued_datagrams == FWNET_MAX_QUEUED_DATAGRAMS)
1358 netif_stop_queue(dev->netdev);
1359
1323 spin_unlock_irqrestore(&dev->lock, flags); 1360 spin_unlock_irqrestore(&dev->lock, flags);
1324 1361
1325 ptask->max_payload = max_payload; 1362 ptask->max_payload = max_payload;
1326 INIT_LIST_HEAD(&ptask->pt_link); 1363 ptask->enqueued = 0;
1327 1364
1328 fwnet_send_packet(ptask); 1365 fwnet_send_packet(ptask);
1329 1366
1330 return NETDEV_TX_OK; 1367 return NETDEV_TX_OK;
1331 1368
1332 fail_unlock:
1333 spin_unlock_irqrestore(&dev->lock, flags);
1334 fail: 1369 fail:
1370 spin_unlock_irqrestore(&dev->lock, flags);
1371
1335 if (ptask) 1372 if (ptask)
1336 kmem_cache_free(fwnet_packet_task_cache, ptask); 1373 kmem_cache_free(fwnet_packet_task_cache, ptask);
1337 1374
@@ -1377,7 +1414,7 @@ static void fwnet_init_dev(struct net_device *net)
1377 net->addr_len = FWNET_ALEN; 1414 net->addr_len = FWNET_ALEN;
1378 net->hard_header_len = FWNET_HLEN; 1415 net->hard_header_len = FWNET_HLEN;
1379 net->type = ARPHRD_IEEE1394; 1416 net->type = ARPHRD_IEEE1394;
1380 net->tx_queue_len = 10; 1417 net->tx_queue_len = FWNET_TX_QUEUE_LEN;
1381} 1418}
1382 1419
1383/* caller must hold fwnet_device_mutex */ 1420/* caller must hold fwnet_device_mutex */
@@ -1457,14 +1494,9 @@ static int fwnet_probe(struct device *_dev)
1457 dev->broadcast_rcv_context = NULL; 1494 dev->broadcast_rcv_context = NULL;
1458 dev->broadcast_xmt_max_payload = 0; 1495 dev->broadcast_xmt_max_payload = 0;
1459 dev->broadcast_xmt_datagramlabel = 0; 1496 dev->broadcast_xmt_datagramlabel = 0;
1460
1461 dev->local_fifo = FWNET_NO_FIFO_ADDR; 1497 dev->local_fifo = FWNET_NO_FIFO_ADDR;
1462 1498 dev->queued_datagrams = 0;
1463 INIT_LIST_HEAD(&dev->packet_list);
1464 INIT_LIST_HEAD(&dev->broadcasted_list);
1465 INIT_LIST_HEAD(&dev->sent_list);
1466 INIT_LIST_HEAD(&dev->peer_list); 1499 INIT_LIST_HEAD(&dev->peer_list);
1467
1468 dev->card = card; 1500 dev->card = card;
1469 dev->netdev = net; 1501 dev->netdev = net;
1470 1502
@@ -1522,7 +1554,7 @@ static int fwnet_remove(struct device *_dev)
1522 struct fwnet_peer *peer = dev_get_drvdata(_dev); 1554 struct fwnet_peer *peer = dev_get_drvdata(_dev);
1523 struct fwnet_device *dev = peer->dev; 1555 struct fwnet_device *dev = peer->dev;
1524 struct net_device *net; 1556 struct net_device *net;
1525 struct fwnet_packet_task *ptask, *pt_next; 1557 int i;
1526 1558
1527 mutex_lock(&fwnet_device_mutex); 1559 mutex_lock(&fwnet_device_mutex);
1528 1560
@@ -1540,21 +1572,9 @@ static int fwnet_remove(struct device *_dev)
1540 dev->card); 1572 dev->card);
1541 fw_iso_context_destroy(dev->broadcast_rcv_context); 1573 fw_iso_context_destroy(dev->broadcast_rcv_context);
1542 } 1574 }
1543 list_for_each_entry_safe(ptask, pt_next, 1575 for (i = 0; dev->queued_datagrams && i < 5; i++)
1544 &dev->packet_list, pt_link) { 1576 ssleep(1);
1545 dev_kfree_skb_any(ptask->skb); 1577 WARN_ON(dev->queued_datagrams);
1546 kmem_cache_free(fwnet_packet_task_cache, ptask);
1547 }
1548 list_for_each_entry_safe(ptask, pt_next,
1549 &dev->broadcasted_list, pt_link) {
1550 dev_kfree_skb_any(ptask->skb);
1551 kmem_cache_free(fwnet_packet_task_cache, ptask);
1552 }
1553 list_for_each_entry_safe(ptask, pt_next,
1554 &dev->sent_list, pt_link) {
1555 dev_kfree_skb_any(ptask->skb);
1556 kmem_cache_free(fwnet_packet_task_cache, ptask);
1557 }
1558 list_del(&dev->dev_link); 1578 list_del(&dev->dev_link);
1559 1579
1560 free_netdev(net); 1580 free_netdev(net);
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index e23c06893d19..599f6c9e0fbf 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -56,6 +56,18 @@ static struct cs5535_gpio_chip {
56 * registers, see include/linux/cs5535.h. 56 * registers, see include/linux/cs5535.h.
57 */ 57 */
58 58
59static void errata_outl(u32 val, unsigned long addr)
60{
61 /*
62 * According to the CS5536 errata (#36), after suspend
63 * a write to the high bank GPIO register will clear all
64 * non-selected bits; the recommended workaround is a
65 * read-modify-write operation.
66 */
67 val |= inl(addr);
68 outl(val, addr);
69}
70
59static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset, 71static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
60 unsigned int reg) 72 unsigned int reg)
61{ 73{
@@ -64,7 +76,7 @@ static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
64 outl(1 << offset, chip->base + reg); 76 outl(1 << offset, chip->base + reg);
65 else 77 else
66 /* high bank register */ 78 /* high bank register */
67 outl(1 << (offset - 16), chip->base + 0x80 + reg); 79 errata_outl(1 << (offset - 16), chip->base + 0x80 + reg);
68} 80}
69 81
70void cs5535_gpio_set(unsigned offset, unsigned int reg) 82void cs5535_gpio_set(unsigned offset, unsigned int reg)
@@ -86,7 +98,7 @@ static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
86 outl(1 << (offset + 16), chip->base + reg); 98 outl(1 << (offset + 16), chip->base + reg);
87 else 99 else
88 /* high bank register */ 100 /* high bank register */
89 outl(1 << offset, chip->base + 0x80 + reg); 101 errata_outl(1 << offset, chip->base + 0x80 + reg);
90} 102}
91 103
92void cs5535_gpio_clear(unsigned offset, unsigned int reg) 104void cs5535_gpio_clear(unsigned offset, unsigned int reg)
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb98b73a..0cb2ba50af53 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
466} 466}
467EXPORT_SYMBOL(drm_agp_bind_pages); 467EXPORT_SYMBOL(drm_agp_bind_pages);
468 468
469void drm_agp_chipset_flush(struct drm_device *dev)
470{
471 agp_flush_chipset(dev->agp->bridge);
472}
473EXPORT_SYMBOL(drm_agp_chipset_flush);
474
475#endif /* __OS_HAS_AGP */ 469#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4c200931a6bc..c6b2e27a446a 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -241,7 +241,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
241 } 241 }
242 242
243 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 243 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
244 if (!drm_helper_encoder_in_use(encoder)) { 244 if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) {
245 drm_encoder_disable(encoder); 245 drm_encoder_disable(encoder);
246 /* disconnector encoder from any connector */ 246 /* disconnector encoder from any connector */
247 encoder->crtc = NULL; 247 encoder->crtc = NULL;
@@ -482,6 +482,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
482 int count = 0, ro, fail = 0; 482 int count = 0, ro, fail = 0;
483 struct drm_crtc_helper_funcs *crtc_funcs; 483 struct drm_crtc_helper_funcs *crtc_funcs;
484 int ret = 0; 484 int ret = 0;
485 int i;
485 486
486 DRM_DEBUG_KMS("\n"); 487 DRM_DEBUG_KMS("\n");
487 488
@@ -677,6 +678,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
677 if (ret != 0) 678 if (ret != 0)
678 goto fail; 679 goto fail;
679 } 680 }
681 DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
682 for (i = 0; i < set->num_connectors; i++) {
683 DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
684 drm_get_connector_name(set->connectors[i]));
685 set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
686 }
680 687
681 kfree(save_connectors); 688 kfree(save_connectors);
682 kfree(save_encoders); 689 kfree(save_encoders);
@@ -852,7 +859,7 @@ static void output_poll_execute(struct work_struct *work)
852 struct delayed_work *delayed_work = to_delayed_work(work); 859 struct delayed_work *delayed_work = to_delayed_work(work);
853 struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work); 860 struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
854 struct drm_connector *connector; 861 struct drm_connector *connector;
855 enum drm_connector_status old_status, status; 862 enum drm_connector_status old_status;
856 bool repoll = false, changed = false; 863 bool repoll = false, changed = false;
857 864
858 if (!drm_kms_helper_poll) 865 if (!drm_kms_helper_poll)
@@ -877,8 +884,9 @@ static void output_poll_execute(struct work_struct *work)
877 !(connector->polled & DRM_CONNECTOR_POLL_HPD)) 884 !(connector->polled & DRM_CONNECTOR_POLL_HPD))
878 continue; 885 continue;
879 886
880 status = connector->funcs->detect(connector, false); 887 connector->status = connector->funcs->detect(connector, false);
881 if (old_status != status) 888 DRM_DEBUG_KMS("connector status updated to %d\n", connector->status);
889 if (old_status != connector->status)
882 changed = true; 890 changed = true;
883 } 891 }
884 892
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 55160d7c38b6..8304c42195fc 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1047,10 +1047,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1047 struct timeval now; 1047 struct timeval now;
1048 unsigned long flags; 1048 unsigned long flags;
1049 unsigned int seq; 1049 unsigned int seq;
1050 int ret;
1050 1051
1051 e = kzalloc(sizeof *e, GFP_KERNEL); 1052 e = kzalloc(sizeof *e, GFP_KERNEL);
1052 if (e == NULL) 1053 if (e == NULL) {
1053 return -ENOMEM; 1054 ret = -ENOMEM;
1055 goto err_put;
1056 }
1054 1057
1055 e->pipe = pipe; 1058 e->pipe = pipe;
1056 e->base.pid = current->pid; 1059 e->base.pid = current->pid;
@@ -1064,9 +1067,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1064 spin_lock_irqsave(&dev->event_lock, flags); 1067 spin_lock_irqsave(&dev->event_lock, flags);
1065 1068
1066 if (file_priv->event_space < sizeof e->event) { 1069 if (file_priv->event_space < sizeof e->event) {
1067 spin_unlock_irqrestore(&dev->event_lock, flags); 1070 ret = -EBUSY;
1068 kfree(e); 1071 goto err_unlock;
1069 return -ENOMEM;
1070 } 1072 }
1071 1073
1072 file_priv->event_space -= sizeof e->event; 1074 file_priv->event_space -= sizeof e->event;
@@ -1103,6 +1105,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1103 spin_unlock_irqrestore(&dev->event_lock, flags); 1105 spin_unlock_irqrestore(&dev->event_lock, flags);
1104 1106
1105 return 0; 1107 return 0;
1108
1109err_unlock:
1110 spin_unlock_irqrestore(&dev->event_lock, flags);
1111 kfree(e);
1112err_put:
1113 drm_vblank_put(dev, e->pipe);
1114 return ret;
1106} 1115}
1107 1116
1108/** 1117/**
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc302ed90..c59515ba7e69 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
392 mm->scanned_blocks = 0; 392 mm->scanned_blocks = 0;
393 mm->scan_hit_start = 0; 393 mm->scan_hit_start = 0;
394 mm->scan_hit_size = 0; 394 mm->scan_hit_size = 0;
395 mm->scan_check_range = 0;
395} 396}
396EXPORT_SYMBOL(drm_mm_init_scan); 397EXPORT_SYMBOL(drm_mm_init_scan);
397 398
398/** 399/**
400 * Initializa lru scanning.
401 *
402 * This simply sets up the scanning routines with the parameters for the desired
403 * hole. This version is for range-restricted scans.
404 *
405 * Warning: As long as the scan list is non-empty, no other operations than
406 * adding/removing nodes to/from the scan list are allowed.
407 */
408void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
409 unsigned alignment,
410 unsigned long start,
411 unsigned long end)
412{
413 mm->scan_alignment = alignment;
414 mm->scan_size = size;
415 mm->scanned_blocks = 0;
416 mm->scan_hit_start = 0;
417 mm->scan_hit_size = 0;
418 mm->scan_start = start;
419 mm->scan_end = end;
420 mm->scan_check_range = 1;
421}
422EXPORT_SYMBOL(drm_mm_init_scan_with_range);
423
424/**
399 * Add a node to the scan list that might be freed to make space for the desired 425 * Add a node to the scan list that might be freed to make space for the desired
400 * hole. 426 * hole.
401 * 427 *
@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
406 struct drm_mm *mm = node->mm; 432 struct drm_mm *mm = node->mm;
407 struct list_head *prev_free, *next_free; 433 struct list_head *prev_free, *next_free;
408 struct drm_mm_node *prev_node, *next_node; 434 struct drm_mm_node *prev_node, *next_node;
435 unsigned long adj_start;
436 unsigned long adj_end;
409 437
410 mm->scanned_blocks++; 438 mm->scanned_blocks++;
411 439
@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
452 node->free_stack.prev = prev_free; 480 node->free_stack.prev = prev_free;
453 node->free_stack.next = next_free; 481 node->free_stack.next = next_free;
454 482
455 if (check_free_hole(node->start, node->start + node->size, 483 if (mm->scan_check_range) {
484 adj_start = node->start < mm->scan_start ?
485 mm->scan_start : node->start;
486 adj_end = node->start + node->size > mm->scan_end ?
487 mm->scan_end : node->start + node->size;
488 } else {
489 adj_start = node->start;
490 adj_end = node->start + node->size;
491 }
492
493 if (check_free_hole(adj_start , adj_end,
456 mm->scan_size, mm->scan_alignment)) { 494 mm->scan_size, mm->scan_alignment)) {
457 mm->scan_hit_start = node->start; 495 mm->scan_hit_start = node->start;
458 mm->scan_hit_size = node->size; 496 mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d5cc7b..0ae6a7c5020f 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_debug.o \ 10 i915_gem_debug.o \
11 i915_gem_evict.o \ 11 i915_gem_evict.o \
12 i915_gem_execbuffer.o \
13 i915_gem_gtt.o \
12 i915_gem_tiling.o \ 14 i915_gem_tiling.o \
13 i915_trace_points.o \ 15 i915_trace_points.o \
14 intel_display.o \ 16 intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ceb63c7..92f75782c332 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "drm.h" 33#include "drm.h"
34#include "intel_drv.h" 34#include "intel_drv.h"
35#include "intel_ringbuffer.h"
35#include "i915_drm.h" 36#include "i915_drm.h"
36#include "i915_drv.h" 37#include "i915_drv.h"
37 38
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
72 B(is_broadwater); 73 B(is_broadwater);
73 B(is_crestline); 74 B(is_crestline);
74 B(has_fbc); 75 B(has_fbc);
75 B(has_rc6);
76 B(has_pipe_cxsr); 76 B(has_pipe_cxsr);
77 B(has_hotplug); 77 B(has_hotplug);
78 B(cursor_needs_physical); 78 B(cursor_needs_physical);
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
86 return 0; 86 return 0;
87} 87}
88 88
89static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv) 89static const char *get_pin_flag(struct drm_i915_gem_object *obj)
90{ 90{
91 if (obj_priv->user_pin_count > 0) 91 if (obj->user_pin_count > 0)
92 return "P"; 92 return "P";
93 else if (obj_priv->pin_count > 0) 93 else if (obj->pin_count > 0)
94 return "p"; 94 return "p";
95 else 95 else
96 return " "; 96 return " ";
97} 97}
98 98
99static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv) 99static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
100{ 100{
101 switch (obj_priv->tiling_mode) { 101 switch (obj->tiling_mode) {
102 default: 102 default:
103 case I915_TILING_NONE: return " "; 103 case I915_TILING_NONE: return " ";
104 case I915_TILING_X: return "X"; 104 case I915_TILING_X: return "X";
@@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
109static void 109static void
110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) 110describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
111{ 111{
112 seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s", 112 seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
113 &obj->base, 113 &obj->base,
114 get_pin_flag(obj), 114 get_pin_flag(obj),
115 get_tiling_flag(obj), 115 get_tiling_flag(obj),
@@ -117,6 +117,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
117 obj->base.read_domains, 117 obj->base.read_domains,
118 obj->base.write_domain, 118 obj->base.write_domain,
119 obj->last_rendering_seqno, 119 obj->last_rendering_seqno,
120 obj->last_fenced_seqno,
120 obj->dirty ? " dirty" : "", 121 obj->dirty ? " dirty" : "",
121 obj->madv == I915_MADV_DONTNEED ? " purgeable" : ""); 122 obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
122 if (obj->base.name) 123 if (obj->base.name)
@@ -124,7 +125,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
124 if (obj->fence_reg != I915_FENCE_REG_NONE) 125 if (obj->fence_reg != I915_FENCE_REG_NONE)
125 seq_printf(m, " (fence: %d)", obj->fence_reg); 126 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL) 127 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); 128 seq_printf(m, " (gtt offset: %08x, size: %08x)",
129 obj->gtt_offset, (unsigned int)obj->gtt_space->size);
130 if (obj->pin_mappable || obj->fault_mappable) {
131 char s[3], *t = s;
132 if (obj->pin_mappable)
133 *t++ = 'p';
134 if (obj->fault_mappable)
135 *t++ = 'f';
136 *t = '\0';
137 seq_printf(m, " (%s mappable)", s);
138 }
128 if (obj->ring != NULL) 139 if (obj->ring != NULL)
129 seq_printf(m, " (%s)", obj->ring->name); 140 seq_printf(m, " (%s)", obj->ring->name);
130} 141}
@@ -136,7 +147,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
136 struct list_head *head; 147 struct list_head *head;
137 struct drm_device *dev = node->minor->dev; 148 struct drm_device *dev = node->minor->dev;
138 drm_i915_private_t *dev_priv = dev->dev_private; 149 drm_i915_private_t *dev_priv = dev->dev_private;
139 struct drm_i915_gem_object *obj_priv; 150 struct drm_i915_gem_object *obj;
140 size_t total_obj_size, total_gtt_size; 151 size_t total_obj_size, total_gtt_size;
141 int count, ret; 152 int count, ret;
142 153
@@ -171,12 +182,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
171 } 182 }
172 183
173 total_obj_size = total_gtt_size = count = 0; 184 total_obj_size = total_gtt_size = count = 0;
174 list_for_each_entry(obj_priv, head, mm_list) { 185 list_for_each_entry(obj, head, mm_list) {
175 seq_printf(m, " "); 186 seq_printf(m, " ");
176 describe_obj(m, obj_priv); 187 describe_obj(m, obj);
177 seq_printf(m, "\n"); 188 seq_printf(m, "\n");
178 total_obj_size += obj_priv->base.size; 189 total_obj_size += obj->base.size;
179 total_gtt_size += obj_priv->gtt_space->size; 190 total_gtt_size += obj->gtt_space->size;
180 count++; 191 count++;
181 } 192 }
182 mutex_unlock(&dev->struct_mutex); 193 mutex_unlock(&dev->struct_mutex);
@@ -186,24 +197,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
186 return 0; 197 return 0;
187} 198}
188 199
200#define count_objects(list, member) do { \
201 list_for_each_entry(obj, list, member) { \
202 size += obj->gtt_space->size; \
203 ++count; \
204 if (obj->map_and_fenceable) { \
205 mappable_size += obj->gtt_space->size; \
206 ++mappable_count; \
207 } \
208 } \
209} while(0)
210
189static int i915_gem_object_info(struct seq_file *m, void* data) 211static int i915_gem_object_info(struct seq_file *m, void* data)
190{ 212{
191 struct drm_info_node *node = (struct drm_info_node *) m->private; 213 struct drm_info_node *node = (struct drm_info_node *) m->private;
192 struct drm_device *dev = node->minor->dev; 214 struct drm_device *dev = node->minor->dev;
193 struct drm_i915_private *dev_priv = dev->dev_private; 215 struct drm_i915_private *dev_priv = dev->dev_private;
216 u32 count, mappable_count;
217 size_t size, mappable_size;
218 struct drm_i915_gem_object *obj;
194 int ret; 219 int ret;
195 220
196 ret = mutex_lock_interruptible(&dev->struct_mutex); 221 ret = mutex_lock_interruptible(&dev->struct_mutex);
197 if (ret) 222 if (ret)
198 return ret; 223 return ret;
199 224
200 seq_printf(m, "%u objects\n", dev_priv->mm.object_count); 225 seq_printf(m, "%u objects, %zu bytes\n",
201 seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory); 226 dev_priv->mm.object_count,
202 seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count); 227 dev_priv->mm.object_memory);
203 seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory); 228
204 seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count); 229 size = count = mappable_size = mappable_count = 0;
205 seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory); 230 count_objects(&dev_priv->mm.gtt_list, gtt_list);
206 seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total); 231 seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
232 count, mappable_count, size, mappable_size);
233
234 size = count = mappable_size = mappable_count = 0;
235 count_objects(&dev_priv->mm.active_list, mm_list);
236 count_objects(&dev_priv->mm.flushing_list, mm_list);
237 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
238 count, mappable_count, size, mappable_size);
239
240 size = count = mappable_size = mappable_count = 0;
241 count_objects(&dev_priv->mm.pinned_list, mm_list);
242 seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
243 count, mappable_count, size, mappable_size);
244
245 size = count = mappable_size = mappable_count = 0;
246 count_objects(&dev_priv->mm.inactive_list, mm_list);
247 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
248 count, mappable_count, size, mappable_size);
249
250 size = count = mappable_size = mappable_count = 0;
251 count_objects(&dev_priv->mm.deferred_free_list, mm_list);
252 seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
253 count, mappable_count, size, mappable_size);
254
255 size = count = mappable_size = mappable_count = 0;
256 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
257 if (obj->fault_mappable) {
258 size += obj->gtt_space->size;
259 ++count;
260 }
261 if (obj->pin_mappable) {
262 mappable_size += obj->gtt_space->size;
263 ++mappable_count;
264 }
265 }
266 seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
267 mappable_count, mappable_size);
268 seq_printf(m, "%u fault mappable objects, %zu bytes\n",
269 count, size);
270
271 seq_printf(m, "%zu [%zu] gtt total\n",
272 dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
207 273
208 mutex_unlock(&dev->struct_mutex); 274 mutex_unlock(&dev->struct_mutex);
209 275
@@ -243,14 +309,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
243 seq_printf(m, "%d prepares\n", work->pending); 309 seq_printf(m, "%d prepares\n", work->pending);
244 310
245 if (work->old_fb_obj) { 311 if (work->old_fb_obj) {
246 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj); 312 struct drm_i915_gem_object *obj = work->old_fb_obj;
247 if(obj_priv) 313 if (obj)
248 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); 314 seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
249 } 315 }
250 if (work->pending_flip_obj) { 316 if (work->pending_flip_obj) {
251 struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj); 317 struct drm_i915_gem_object *obj = work->pending_flip_obj;
252 if(obj_priv) 318 if (obj)
253 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset ); 319 seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
254 } 320 }
255 } 321 }
256 spin_unlock_irqrestore(&dev->event_lock, flags); 322 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +331,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
265 struct drm_device *dev = node->minor->dev; 331 struct drm_device *dev = node->minor->dev;
266 drm_i915_private_t *dev_priv = dev->dev_private; 332 drm_i915_private_t *dev_priv = dev->dev_private;
267 struct drm_i915_gem_request *gem_request; 333 struct drm_i915_gem_request *gem_request;
268 int ret; 334 int ret, count;
269 335
270 ret = mutex_lock_interruptible(&dev->struct_mutex); 336 ret = mutex_lock_interruptible(&dev->struct_mutex);
271 if (ret) 337 if (ret)
272 return ret; 338 return ret;
273 339
274 seq_printf(m, "Request:\n"); 340 count = 0;
275 list_for_each_entry(gem_request, &dev_priv->render_ring.request_list, 341 if (!list_empty(&dev_priv->ring[RCS].request_list)) {
276 list) { 342 seq_printf(m, "Render requests:\n");
277 seq_printf(m, " %d @ %d\n", 343 list_for_each_entry(gem_request,
278 gem_request->seqno, 344 &dev_priv->ring[RCS].request_list,
279 (int) (jiffies - gem_request->emitted_jiffies)); 345 list) {
346 seq_printf(m, " %d @ %d\n",
347 gem_request->seqno,
348 (int) (jiffies - gem_request->emitted_jiffies));
349 }
350 count++;
351 }
352 if (!list_empty(&dev_priv->ring[VCS].request_list)) {
353 seq_printf(m, "BSD requests:\n");
354 list_for_each_entry(gem_request,
355 &dev_priv->ring[VCS].request_list,
356 list) {
357 seq_printf(m, " %d @ %d\n",
358 gem_request->seqno,
359 (int) (jiffies - gem_request->emitted_jiffies));
360 }
361 count++;
362 }
363 if (!list_empty(&dev_priv->ring[BCS].request_list)) {
364 seq_printf(m, "BLT requests:\n");
365 list_for_each_entry(gem_request,
366 &dev_priv->ring[BCS].request_list,
367 list) {
368 seq_printf(m, " %d @ %d\n",
369 gem_request->seqno,
370 (int) (jiffies - gem_request->emitted_jiffies));
371 }
372 count++;
280 } 373 }
281 mutex_unlock(&dev->struct_mutex); 374 mutex_unlock(&dev->struct_mutex);
282 375
376 if (count == 0)
377 seq_printf(m, "No requests\n");
378
283 return 0; 379 return 0;
284} 380}
285 381
382static void i915_ring_seqno_info(struct seq_file *m,
383 struct intel_ring_buffer *ring)
384{
385 if (ring->get_seqno) {
386 seq_printf(m, "Current sequence (%s): %d\n",
387 ring->name, ring->get_seqno(ring));
388 seq_printf(m, "Waiter sequence (%s): %d\n",
389 ring->name, ring->waiting_seqno);
390 seq_printf(m, "IRQ sequence (%s): %d\n",
391 ring->name, ring->irq_seqno);
392 }
393}
394
286static int i915_gem_seqno_info(struct seq_file *m, void *data) 395static int i915_gem_seqno_info(struct seq_file *m, void *data)
287{ 396{
288 struct drm_info_node *node = (struct drm_info_node *) m->private; 397 struct drm_info_node *node = (struct drm_info_node *) m->private;
289 struct drm_device *dev = node->minor->dev; 398 struct drm_device *dev = node->minor->dev;
290 drm_i915_private_t *dev_priv = dev->dev_private; 399 drm_i915_private_t *dev_priv = dev->dev_private;
291 int ret; 400 int ret, i;
292 401
293 ret = mutex_lock_interruptible(&dev->struct_mutex); 402 ret = mutex_lock_interruptible(&dev->struct_mutex);
294 if (ret) 403 if (ret)
295 return ret; 404 return ret;
296 405
297 if (dev_priv->render_ring.status_page.page_addr != NULL) { 406 for (i = 0; i < I915_NUM_RINGS; i++)
298 seq_printf(m, "Current sequence: %d\n", 407 i915_ring_seqno_info(m, &dev_priv->ring[i]);
299 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
300 } else {
301 seq_printf(m, "Current sequence: hws uninitialized\n");
302 }
303 seq_printf(m, "Waiter sequence: %d\n",
304 dev_priv->mm.waiting_gem_seqno);
305 seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
306 408
307 mutex_unlock(&dev->struct_mutex); 409 mutex_unlock(&dev->struct_mutex);
308 410
@@ -315,7 +417,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
315 struct drm_info_node *node = (struct drm_info_node *) m->private; 417 struct drm_info_node *node = (struct drm_info_node *) m->private;
316 struct drm_device *dev = node->minor->dev; 418 struct drm_device *dev = node->minor->dev;
317 drm_i915_private_t *dev_priv = dev->dev_private; 419 drm_i915_private_t *dev_priv = dev->dev_private;
318 int ret; 420 int ret, i;
319 421
320 ret = mutex_lock_interruptible(&dev->struct_mutex); 422 ret = mutex_lock_interruptible(&dev->struct_mutex);
321 if (ret) 423 if (ret)
@@ -354,16 +456,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
354 } 456 }
355 seq_printf(m, "Interrupts received: %d\n", 457 seq_printf(m, "Interrupts received: %d\n",
356 atomic_read(&dev_priv->irq_received)); 458 atomic_read(&dev_priv->irq_received));
357 if (dev_priv->render_ring.status_page.page_addr != NULL) { 459 for (i = 0; i < I915_NUM_RINGS; i++)
358 seq_printf(m, "Current sequence: %d\n", 460 i915_ring_seqno_info(m, &dev_priv->ring[i]);
359 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
360 } else {
361 seq_printf(m, "Current sequence: hws uninitialized\n");
362 }
363 seq_printf(m, "Waiter sequence: %d\n",
364 dev_priv->mm.waiting_gem_seqno);
365 seq_printf(m, "IRQ sequence: %d\n",
366 dev_priv->mm.irq_gem_seqno);
367 mutex_unlock(&dev->struct_mutex); 461 mutex_unlock(&dev->struct_mutex);
368 462
369 return 0; 463 return 0;
@@ -383,29 +477,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
383 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start); 477 seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
384 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs); 478 seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
385 for (i = 0; i < dev_priv->num_fence_regs; i++) { 479 for (i = 0; i < dev_priv->num_fence_regs; i++) {
386 struct drm_gem_object *obj = dev_priv->fence_regs[i].obj; 480 struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
387 481
388 if (obj == NULL) { 482 seq_printf(m, "Fenced object[%2d] = ", i);
389 seq_printf(m, "Fenced object[%2d] = unused\n", i); 483 if (obj == NULL)
390 } else { 484 seq_printf(m, "unused");
391 struct drm_i915_gem_object *obj_priv; 485 else
392 486 describe_obj(m, obj);
393 obj_priv = to_intel_bo(obj); 487 seq_printf(m, "\n");
394 seq_printf(m, "Fenced object[%2d] = %p: %s "
395 "%08x %08zx %08x %s %08x %08x %d",
396 i, obj, get_pin_flag(obj_priv),
397 obj_priv->gtt_offset,
398 obj->size, obj_priv->stride,
399 get_tiling_flag(obj_priv),
400 obj->read_domains, obj->write_domain,
401 obj_priv->last_rendering_seqno);
402 if (obj->name)
403 seq_printf(m, " (name: %d)", obj->name);
404 seq_printf(m, "\n");
405 }
406 } 488 }
407 mutex_unlock(&dev->struct_mutex);
408 489
490 mutex_unlock(&dev->struct_mutex);
409 return 0; 491 return 0;
410} 492}
411 493
@@ -414,10 +496,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
414 struct drm_info_node *node = (struct drm_info_node *) m->private; 496 struct drm_info_node *node = (struct drm_info_node *) m->private;
415 struct drm_device *dev = node->minor->dev; 497 struct drm_device *dev = node->minor->dev;
416 drm_i915_private_t *dev_priv = dev->dev_private; 498 drm_i915_private_t *dev_priv = dev->dev_private;
417 int i; 499 struct intel_ring_buffer *ring;
418 volatile u32 *hws; 500 volatile u32 *hws;
501 int i;
419 502
420 hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr; 503 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
504 hws = (volatile u32 *)ring->status_page.page_addr;
421 if (hws == NULL) 505 if (hws == NULL)
422 return 0; 506 return 0;
423 507
@@ -431,14 +515,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
431 515
432static void i915_dump_object(struct seq_file *m, 516static void i915_dump_object(struct seq_file *m,
433 struct io_mapping *mapping, 517 struct io_mapping *mapping,
434 struct drm_i915_gem_object *obj_priv) 518 struct drm_i915_gem_object *obj)
435{ 519{
436 int page, page_count, i; 520 int page, page_count, i;
437 521
438 page_count = obj_priv->base.size / PAGE_SIZE; 522 page_count = obj->base.size / PAGE_SIZE;
439 for (page = 0; page < page_count; page++) { 523 for (page = 0; page < page_count; page++) {
440 u32 *mem = io_mapping_map_wc(mapping, 524 u32 *mem = io_mapping_map_wc(mapping,
441 obj_priv->gtt_offset + page * PAGE_SIZE); 525 obj->gtt_offset + page * PAGE_SIZE);
442 for (i = 0; i < PAGE_SIZE; i += 4) 526 for (i = 0; i < PAGE_SIZE; i += 4)
443 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 527 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
444 io_mapping_unmap(mem); 528 io_mapping_unmap(mem);
@@ -450,25 +534,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
450 struct drm_info_node *node = (struct drm_info_node *) m->private; 534 struct drm_info_node *node = (struct drm_info_node *) m->private;
451 struct drm_device *dev = node->minor->dev; 535 struct drm_device *dev = node->minor->dev;
452 drm_i915_private_t *dev_priv = dev->dev_private; 536 drm_i915_private_t *dev_priv = dev->dev_private;
453 struct drm_gem_object *obj; 537 struct drm_i915_gem_object *obj;
454 struct drm_i915_gem_object *obj_priv;
455 int ret; 538 int ret;
456 539
457 ret = mutex_lock_interruptible(&dev->struct_mutex); 540 ret = mutex_lock_interruptible(&dev->struct_mutex);
458 if (ret) 541 if (ret)
459 return ret; 542 return ret;
460 543
461 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 544 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
462 obj = &obj_priv->base; 545 if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
463 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 546 seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
464 seq_printf(m, "--- gtt_offset = 0x%08x\n", 547 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
465 obj_priv->gtt_offset);
466 i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
467 } 548 }
468 } 549 }
469 550
470 mutex_unlock(&dev->struct_mutex); 551 mutex_unlock(&dev->struct_mutex);
471
472 return 0; 552 return 0;
473} 553}
474 554
@@ -477,19 +557,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
477 struct drm_info_node *node = (struct drm_info_node *) m->private; 557 struct drm_info_node *node = (struct drm_info_node *) m->private;
478 struct drm_device *dev = node->minor->dev; 558 struct drm_device *dev = node->minor->dev;
479 drm_i915_private_t *dev_priv = dev->dev_private; 559 drm_i915_private_t *dev_priv = dev->dev_private;
560 struct intel_ring_buffer *ring;
480 int ret; 561 int ret;
481 562
482 ret = mutex_lock_interruptible(&dev->struct_mutex); 563 ret = mutex_lock_interruptible(&dev->struct_mutex);
483 if (ret) 564 if (ret)
484 return ret; 565 return ret;
485 566
486 if (!dev_priv->render_ring.gem_object) { 567 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
568 if (!ring->obj) {
487 seq_printf(m, "No ringbuffer setup\n"); 569 seq_printf(m, "No ringbuffer setup\n");
488 } else { 570 } else {
489 u8 *virt = dev_priv->render_ring.virtual_start; 571 u8 *virt = ring->virtual_start;
490 uint32_t off; 572 uint32_t off;
491 573
492 for (off = 0; off < dev_priv->render_ring.size; off += 4) { 574 for (off = 0; off < ring->size; off += 4) {
493 uint32_t *ptr = (uint32_t *)(virt + off); 575 uint32_t *ptr = (uint32_t *)(virt + off);
494 seq_printf(m, "%08x : %08x\n", off, *ptr); 576 seq_printf(m, "%08x : %08x\n", off, *ptr);
495 } 577 }
@@ -504,19 +586,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
504 struct drm_info_node *node = (struct drm_info_node *) m->private; 586 struct drm_info_node *node = (struct drm_info_node *) m->private;
505 struct drm_device *dev = node->minor->dev; 587 struct drm_device *dev = node->minor->dev;
506 drm_i915_private_t *dev_priv = dev->dev_private; 588 drm_i915_private_t *dev_priv = dev->dev_private;
507 unsigned int head, tail; 589 struct intel_ring_buffer *ring;
508 590
509 head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 591 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
510 tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 592 if (ring->size == 0)
593 return 0;
511 594
512 seq_printf(m, "RingHead : %08x\n", head); 595 seq_printf(m, "Ring %s:\n", ring->name);
513 seq_printf(m, "RingTail : %08x\n", tail); 596 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
514 seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size); 597 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
515 seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD)); 598 seq_printf(m, " Size : %08x\n", ring->size);
599 seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
600 seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
601 if (IS_GEN6(dev)) {
602 seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
603 seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
604 }
605 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
606 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
516 607
517 return 0; 608 return 0;
518} 609}
519 610
611static const char *ring_str(int ring)
612{
613 switch (ring) {
614 case RING_RENDER: return " render";
615 case RING_BSD: return " bsd";
616 case RING_BLT: return " blt";
617 default: return "";
618 }
619}
620
520static const char *pin_flag(int pinned) 621static const char *pin_flag(int pinned)
521{ 622{
522 if (pinned > 0) 623 if (pinned > 0)
@@ -547,6 +648,36 @@ static const char *purgeable_flag(int purgeable)
547 return purgeable ? " purgeable" : ""; 648 return purgeable ? " purgeable" : "";
548} 649}
549 650
651static void print_error_buffers(struct seq_file *m,
652 const char *name,
653 struct drm_i915_error_buffer *err,
654 int count)
655{
656 seq_printf(m, "%s [%d]:\n", name, count);
657
658 while (count--) {
659 seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s",
660 err->gtt_offset,
661 err->size,
662 err->read_domains,
663 err->write_domain,
664 err->seqno,
665 pin_flag(err->pinned),
666 tiling_flag(err->tiling),
667 dirty_flag(err->dirty),
668 purgeable_flag(err->purgeable),
669 ring_str(err->ring));
670
671 if (err->name)
672 seq_printf(m, " (name: %d)", err->name);
673 if (err->fence_reg != I915_FENCE_REG_NONE)
674 seq_printf(m, " (fence: %d)", err->fence_reg);
675
676 seq_printf(m, "\n");
677 err++;
678 }
679}
680
550static int i915_error_state(struct seq_file *m, void *unused) 681static int i915_error_state(struct seq_file *m, void *unused)
551{ 682{
552 struct drm_info_node *node = (struct drm_info_node *) m->private; 683 struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,41 +699,46 @@ static int i915_error_state(struct seq_file *m, void *unused)
568 error->time.tv_usec); 699 error->time.tv_usec);
569 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device); 700 seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
570 seq_printf(m, "EIR: 0x%08x\n", error->eir); 701 seq_printf(m, "EIR: 0x%08x\n", error->eir);
571 seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er); 702 seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
572 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 703 if (INTEL_INFO(dev)->gen >= 6) {
704 seq_printf(m, "ERROR: 0x%08x\n", error->error);
705 seq_printf(m, "Blitter command stream:\n");
706 seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
707 seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
708 seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
709 seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
710 seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
711 seq_printf(m, "Video (BSD) command stream:\n");
712 seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
713 seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
714 seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
715 seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
716 seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
717 }
718 seq_printf(m, "Render command stream:\n");
719 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
573 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir); 720 seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
574 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr); 721 seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
575 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone); 722 seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
576 seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
577 if (INTEL_INFO(dev)->gen >= 4) { 723 if (INTEL_INFO(dev)->gen >= 4) {
578 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
579 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1); 724 seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
725 seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
580 } 726 }
581 seq_printf(m, "seqno: 0x%08x\n", error->seqno); 727 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
582 728 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
583 if (error->active_bo_count) { 729
584 seq_printf(m, "Buffers [%d]:\n", error->active_bo_count); 730 for (i = 0; i < 16; i++)
585 731 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
586 for (i = 0; i < error->active_bo_count; i++) { 732
587 seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s", 733 if (error->active_bo)
588 error->active_bo[i].gtt_offset, 734 print_error_buffers(m, "Active",
589 error->active_bo[i].size, 735 error->active_bo,
590 error->active_bo[i].read_domains, 736 error->active_bo_count);
591 error->active_bo[i].write_domain, 737
592 error->active_bo[i].seqno, 738 if (error->pinned_bo)
593 pin_flag(error->active_bo[i].pinned), 739 print_error_buffers(m, "Pinned",
594 tiling_flag(error->active_bo[i].tiling), 740 error->pinned_bo,
595 dirty_flag(error->active_bo[i].dirty), 741 error->pinned_bo_count);
596 purgeable_flag(error->active_bo[i].purgeable));
597
598 if (error->active_bo[i].name)
599 seq_printf(m, " (name: %d)", error->active_bo[i].name);
600 if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
601 seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
602
603 seq_printf(m, "\n");
604 }
605 }
606 742
607 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) { 743 for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
608 if (error->batchbuffer[i]) { 744 if (error->batchbuffer[i]) {
@@ -635,6 +771,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
635 if (error->overlay) 771 if (error->overlay)
636 intel_overlay_print_error_state(m, error->overlay); 772 intel_overlay_print_error_state(m, error->overlay);
637 773
774 if (error->display)
775 intel_display_print_error_state(m, dev, error->display);
776
638out: 777out:
639 spin_unlock_irqrestore(&dev_priv->error_lock, flags); 778 spin_unlock_irqrestore(&dev_priv->error_lock, flags);
640 779
@@ -658,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
658 struct drm_info_node *node = (struct drm_info_node *) m->private; 797 struct drm_info_node *node = (struct drm_info_node *) m->private;
659 struct drm_device *dev = node->minor->dev; 798 struct drm_device *dev = node->minor->dev;
660 drm_i915_private_t *dev_priv = dev->dev_private; 799 drm_i915_private_t *dev_priv = dev->dev_private;
661 u16 rgvswctl = I915_READ16(MEMSWCTL);
662 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
663 800
664 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf); 801 if (IS_GEN5(dev)) {
665 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f); 802 u16 rgvswctl = I915_READ16(MEMSWCTL);
666 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >> 803 u16 rgvstat = I915_READ16(MEMSTAT_ILK);
667 MEMSTAT_VID_SHIFT); 804
668 seq_printf(m, "Current P-state: %d\n", 805 seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
669 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); 806 seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
807 seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
808 MEMSTAT_VID_SHIFT);
809 seq_printf(m, "Current P-state: %d\n",
810 (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
811 } else if (IS_GEN6(dev)) {
812 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
813 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
814 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
815 int max_freq;
816
817 /* RPSTAT1 is in the GT power well */
818 __gen6_force_wake_get(dev_priv);
819
820 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
821 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
822 seq_printf(m, "Render p-state ratio: %d\n",
823 (gt_perf_status & 0xff00) >> 8);
824 seq_printf(m, "Render p-state VID: %d\n",
825 gt_perf_status & 0xff);
826 seq_printf(m, "Render p-state limit: %d\n",
827 rp_state_limits & 0xff);
828
829 max_freq = (rp_state_cap & 0xff0000) >> 16;
830 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
831 max_freq * 100);
832
833 max_freq = (rp_state_cap & 0xff00) >> 8;
834 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
835 max_freq * 100);
836
837 max_freq = rp_state_cap & 0xff;
838 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
839 max_freq * 100);
840
841 __gen6_force_wake_put(dev_priv);
842 } else {
843 seq_printf(m, "no P-state info available\n");
844 }
670 845
671 return 0; 846 return 0;
672} 847}
@@ -794,7 +969,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
794 drm_i915_private_t *dev_priv = dev->dev_private; 969 drm_i915_private_t *dev_priv = dev->dev_private;
795 bool sr_enabled = false; 970 bool sr_enabled = false;
796 971
797 if (IS_GEN5(dev)) 972 if (HAS_PCH_SPLIT(dev))
798 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN; 973 sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
799 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev)) 974 else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
800 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN; 975 sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1061,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
886 fb->base.height, 1061 fb->base.height,
887 fb->base.depth, 1062 fb->base.depth,
888 fb->base.bits_per_pixel); 1063 fb->base.bits_per_pixel);
889 describe_obj(m, to_intel_bo(fb->obj)); 1064 describe_obj(m, fb->obj);
890 seq_printf(m, "\n"); 1065 seq_printf(m, "\n");
891 1066
892 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) { 1067 list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1073,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
898 fb->base.height, 1073 fb->base.height,
899 fb->base.depth, 1074 fb->base.depth,
900 fb->base.bits_per_pixel); 1075 fb->base.bits_per_pixel);
901 describe_obj(m, to_intel_bo(fb->obj)); 1076 describe_obj(m, fb->obj);
902 seq_printf(m, "\n"); 1077 seq_printf(m, "\n");
903 } 1078 }
904 1079
@@ -943,7 +1118,6 @@ i915_wedged_write(struct file *filp,
943 loff_t *ppos) 1118 loff_t *ppos)
944{ 1119{
945 struct drm_device *dev = filp->private_data; 1120 struct drm_device *dev = filp->private_data;
946 drm_i915_private_t *dev_priv = dev->dev_private;
947 char buf[20]; 1121 char buf[20];
948 int val = 1; 1122 int val = 1;
949 1123
@@ -959,12 +1133,7 @@ i915_wedged_write(struct file *filp,
959 } 1133 }
960 1134
961 DRM_INFO("Manually setting wedged to %d\n", val); 1135 DRM_INFO("Manually setting wedged to %d\n", val);
962 1136 i915_handle_error(dev, val);
963 atomic_set(&dev_priv->mm.wedged, val);
964 if (val) {
965 wake_up_all(&dev_priv->irq_queue);
966 queue_work(dev_priv->wq, &dev_priv->error_work);
967 }
968 1137
969 return cnt; 1138 return cnt;
970} 1139}
@@ -1028,9 +1197,15 @@ static struct drm_info_list i915_debugfs_list[] = {
1028 {"i915_gem_seqno", i915_gem_seqno_info, 0}, 1197 {"i915_gem_seqno", i915_gem_seqno_info, 0},
1029 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0}, 1198 {"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
1030 {"i915_gem_interrupt", i915_interrupt_info, 0}, 1199 {"i915_gem_interrupt", i915_interrupt_info, 0},
1031 {"i915_gem_hws", i915_hws_info, 0}, 1200 {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
1032 {"i915_ringbuffer_data", i915_ringbuffer_data, 0}, 1201 {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
1033 {"i915_ringbuffer_info", i915_ringbuffer_info, 0}, 1202 {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
1203 {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
1204 {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
1205 {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
1206 {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
1207 {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
1208 {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
1034 {"i915_batchbuffers", i915_batchbuffer_info, 0}, 1209 {"i915_batchbuffers", i915_batchbuffer_info, 0},
1035 {"i915_error_state", i915_error_state, 0}, 1210 {"i915_error_state", i915_error_state, 0},
1036 {"i915_rstdby_delays", i915_rstdby_delays, 0}, 1211 {"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a26f4dd21ae..18746e6cb129 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -49,6 +49,8 @@
49static int i915_init_phys_hws(struct drm_device *dev) 49static int i915_init_phys_hws(struct drm_device *dev)
50{ 50{
51 drm_i915_private_t *dev_priv = dev->dev_private; 51 drm_i915_private_t *dev_priv = dev->dev_private;
52 struct intel_ring_buffer *ring = LP_RING(dev_priv);
53
52 /* Program Hardware Status Page */ 54 /* Program Hardware Status Page */
53 dev_priv->status_page_dmah = 55 dev_priv->status_page_dmah =
54 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE); 56 drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
57 DRM_ERROR("Can not allocate hardware status page\n"); 59 DRM_ERROR("Can not allocate hardware status page\n");
58 return -ENOMEM; 60 return -ENOMEM;
59 } 61 }
60 dev_priv->render_ring.status_page.page_addr 62 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
61 = dev_priv->status_page_dmah->vaddr;
62 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 63 dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
63 64
64 memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE); 65 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
65 66
66 if (INTEL_INFO(dev)->gen >= 4) 67 if (INTEL_INFO(dev)->gen >= 4)
67 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) & 68 dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
79static void i915_free_hws(struct drm_device *dev) 80static void i915_free_hws(struct drm_device *dev)
80{ 81{
81 drm_i915_private_t *dev_priv = dev->dev_private; 82 drm_i915_private_t *dev_priv = dev->dev_private;
83 struct intel_ring_buffer *ring = LP_RING(dev_priv);
84
82 if (dev_priv->status_page_dmah) { 85 if (dev_priv->status_page_dmah) {
83 drm_pci_free(dev, dev_priv->status_page_dmah); 86 drm_pci_free(dev, dev_priv->status_page_dmah);
84 dev_priv->status_page_dmah = NULL; 87 dev_priv->status_page_dmah = NULL;
85 } 88 }
86 89
87 if (dev_priv->render_ring.status_page.gfx_addr) { 90 if (ring->status_page.gfx_addr) {
88 dev_priv->render_ring.status_page.gfx_addr = 0; 91 ring->status_page.gfx_addr = 0;
89 drm_core_ioremapfree(&dev_priv->hws_map, dev); 92 drm_core_ioremapfree(&dev_priv->hws_map, dev);
90 } 93 }
91 94
@@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
97{ 100{
98 drm_i915_private_t *dev_priv = dev->dev_private; 101 drm_i915_private_t *dev_priv = dev->dev_private;
99 struct drm_i915_master_private *master_priv; 102 struct drm_i915_master_private *master_priv;
100 struct intel_ring_buffer *ring = &dev_priv->render_ring; 103 struct intel_ring_buffer *ring = LP_RING(dev_priv);
101 104
102 /* 105 /*
103 * We should never lose context on the ring with modesetting 106 * We should never lose context on the ring with modesetting
@@ -106,8 +109,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
106 if (drm_core_check_feature(dev, DRIVER_MODESET)) 109 if (drm_core_check_feature(dev, DRIVER_MODESET))
107 return; 110 return;
108 111
109 ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 112 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
110 ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 113 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
111 ring->space = ring->head - (ring->tail + 8); 114 ring->space = ring->head - (ring->tail + 8);
112 if (ring->space < 0) 115 if (ring->space < 0)
113 ring->space += ring->size; 116 ring->space += ring->size;
@@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
123static int i915_dma_cleanup(struct drm_device * dev) 126static int i915_dma_cleanup(struct drm_device * dev)
124{ 127{
125 drm_i915_private_t *dev_priv = dev->dev_private; 128 drm_i915_private_t *dev_priv = dev->dev_private;
129 int i;
130
126 /* Make sure interrupts are disabled here because the uninstall ioctl 131 /* Make sure interrupts are disabled here because the uninstall ioctl
127 * may not have been called from userspace and after dev_private 132 * may not have been called from userspace and after dev_private
128 * is freed, it's too late. 133 * is freed, it's too late.
@@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
131 drm_irq_uninstall(dev); 136 drm_irq_uninstall(dev);
132 137
133 mutex_lock(&dev->struct_mutex); 138 mutex_lock(&dev->struct_mutex);
134 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 139 for (i = 0; i < I915_NUM_RINGS; i++)
135 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 140 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
136 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
137 mutex_unlock(&dev->struct_mutex); 141 mutex_unlock(&dev->struct_mutex);
138 142
139 /* Clear the HWS virtual address at teardown */ 143 /* Clear the HWS virtual address at teardown */
@@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
147{ 151{
148 drm_i915_private_t *dev_priv = dev->dev_private; 152 drm_i915_private_t *dev_priv = dev->dev_private;
149 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 153 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
154 struct intel_ring_buffer *ring = LP_RING(dev_priv);
150 155
151 master_priv->sarea = drm_getsarea(dev); 156 master_priv->sarea = drm_getsarea(dev);
152 if (master_priv->sarea) { 157 if (master_priv->sarea) {
@@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
157 } 162 }
158 163
159 if (init->ring_size != 0) { 164 if (init->ring_size != 0) {
160 if (dev_priv->render_ring.gem_object != NULL) { 165 if (ring->obj != NULL) {
161 i915_dma_cleanup(dev); 166 i915_dma_cleanup(dev);
162 DRM_ERROR("Client tried to initialize ringbuffer in " 167 DRM_ERROR("Client tried to initialize ringbuffer in "
163 "GEM mode\n"); 168 "GEM mode\n");
164 return -EINVAL; 169 return -EINVAL;
165 } 170 }
166 171
167 dev_priv->render_ring.size = init->ring_size; 172 ring->size = init->ring_size;
168 173
169 dev_priv->render_ring.map.offset = init->ring_start; 174 ring->map.offset = init->ring_start;
170 dev_priv->render_ring.map.size = init->ring_size; 175 ring->map.size = init->ring_size;
171 dev_priv->render_ring.map.type = 0; 176 ring->map.type = 0;
172 dev_priv->render_ring.map.flags = 0; 177 ring->map.flags = 0;
173 dev_priv->render_ring.map.mtrr = 0; 178 ring->map.mtrr = 0;
174 179
175 drm_core_ioremap_wc(&dev_priv->render_ring.map, dev); 180 drm_core_ioremap_wc(&ring->map, dev);
176 181
177 if (dev_priv->render_ring.map.handle == NULL) { 182 if (ring->map.handle == NULL) {
178 i915_dma_cleanup(dev); 183 i915_dma_cleanup(dev);
179 DRM_ERROR("can not ioremap virtual address for" 184 DRM_ERROR("can not ioremap virtual address for"
180 " ring buffer\n"); 185 " ring buffer\n");
@@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
182 } 187 }
183 } 188 }
184 189
185 dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle; 190 ring->virtual_start = ring->map.handle;
186 191
187 dev_priv->cpp = init->cpp; 192 dev_priv->cpp = init->cpp;
188 dev_priv->back_offset = init->back_offset; 193 dev_priv->back_offset = init->back_offset;
@@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
201static int i915_dma_resume(struct drm_device * dev) 206static int i915_dma_resume(struct drm_device * dev)
202{ 207{
203 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 208 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
209 struct intel_ring_buffer *ring = LP_RING(dev_priv);
204 210
205 struct intel_ring_buffer *ring;
206 DRM_DEBUG_DRIVER("%s\n", __func__); 211 DRM_DEBUG_DRIVER("%s\n", __func__);
207 212
208 ring = &dev_priv->render_ring;
209
210 if (ring->map.handle == NULL) { 213 if (ring->map.handle == NULL) {
211 DRM_ERROR("can not ioremap virtual address for" 214 DRM_ERROR("can not ioremap virtual address for"
212 " ring buffer\n"); 215 " ring buffer\n");
@@ -221,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
221 DRM_DEBUG_DRIVER("hw status page @ %p\n", 224 DRM_DEBUG_DRIVER("hw status page @ %p\n",
222 ring->status_page.page_addr); 225 ring->status_page.page_addr);
223 if (ring->status_page.gfx_addr != 0) 226 if (ring->status_page.gfx_addr != 0)
224 intel_ring_setup_status_page(dev, ring); 227 intel_ring_setup_status_page(ring);
225 else 228 else
226 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 229 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
227 230
@@ -263,7 +266,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
263 * instruction detected will be given a size of zero, which is a 266 * instruction detected will be given a size of zero, which is a
264 * signal to abort the rest of the buffer. 267 * signal to abort the rest of the buffer.
265 */ 268 */
266static int do_validate_cmd(int cmd) 269static int validate_cmd(int cmd)
267{ 270{
268 switch (((cmd >> 29) & 0x7)) { 271 switch (((cmd >> 29) & 0x7)) {
269 case 0x0: 272 case 0x0:
@@ -321,40 +324,27 @@ static int do_validate_cmd(int cmd)
321 return 0; 324 return 0;
322} 325}
323 326
324static int validate_cmd(int cmd)
325{
326 int ret = do_validate_cmd(cmd);
327
328/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
329
330 return ret;
331}
332
333static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords) 327static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
334{ 328{
335 drm_i915_private_t *dev_priv = dev->dev_private; 329 drm_i915_private_t *dev_priv = dev->dev_private;
336 int i; 330 int i, ret;
337 331
338 if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8) 332 if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
339 return -EINVAL; 333 return -EINVAL;
340 334
341 BEGIN_LP_RING((dwords+1)&~1);
342
343 for (i = 0; i < dwords;) { 335 for (i = 0; i < dwords;) {
344 int cmd, sz; 336 int sz = validate_cmd(buffer[i]);
345 337 if (sz == 0 || i + sz > dwords)
346 cmd = buffer[i];
347
348 if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
349 return -EINVAL; 338 return -EINVAL;
350 339 i += sz;
351 OUT_RING(cmd);
352
353 while (++i, --sz) {
354 OUT_RING(buffer[i]);
355 }
356 } 340 }
357 341
342 ret = BEGIN_LP_RING((dwords+1)&~1);
343 if (ret)
344 return ret;
345
346 for (i = 0; i < dwords; i++)
347 OUT_RING(buffer[i]);
358 if (dwords & 1) 348 if (dwords & 1)
359 OUT_RING(0); 349 OUT_RING(0);
360 350
@@ -365,34 +355,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
365 355
366int 356int
367i915_emit_box(struct drm_device *dev, 357i915_emit_box(struct drm_device *dev,
368 struct drm_clip_rect *boxes, 358 struct drm_clip_rect *box,
369 int i, int DR1, int DR4) 359 int DR1, int DR4)
370{ 360{
371 struct drm_clip_rect box = boxes[i]; 361 struct drm_i915_private *dev_priv = dev->dev_private;
362 int ret;
372 363
373 if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { 364 if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
365 box->y2 <= 0 || box->x2 <= 0) {
374 DRM_ERROR("Bad box %d,%d..%d,%d\n", 366 DRM_ERROR("Bad box %d,%d..%d,%d\n",
375 box.x1, box.y1, box.x2, box.y2); 367 box->x1, box->y1, box->x2, box->y2);
376 return -EINVAL; 368 return -EINVAL;
377 } 369 }
378 370
379 if (INTEL_INFO(dev)->gen >= 4) { 371 if (INTEL_INFO(dev)->gen >= 4) {
380 BEGIN_LP_RING(4); 372 ret = BEGIN_LP_RING(4);
373 if (ret)
374 return ret;
375
381 OUT_RING(GFX_OP_DRAWRECT_INFO_I965); 376 OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
382 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 377 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
383 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 378 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
384 OUT_RING(DR4); 379 OUT_RING(DR4);
385 ADVANCE_LP_RING();
386 } else { 380 } else {
387 BEGIN_LP_RING(6); 381 ret = BEGIN_LP_RING(6);
382 if (ret)
383 return ret;
384
388 OUT_RING(GFX_OP_DRAWRECT_INFO); 385 OUT_RING(GFX_OP_DRAWRECT_INFO);
389 OUT_RING(DR1); 386 OUT_RING(DR1);
390 OUT_RING((box.x1 & 0xffff) | (box.y1 << 16)); 387 OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
391 OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16)); 388 OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
392 OUT_RING(DR4); 389 OUT_RING(DR4);
393 OUT_RING(0); 390 OUT_RING(0);
394 ADVANCE_LP_RING();
395 } 391 }
392 ADVANCE_LP_RING();
396 393
397 return 0; 394 return 0;
398} 395}
@@ -412,12 +409,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
412 if (master_priv->sarea_priv) 409 if (master_priv->sarea_priv)
413 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 410 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
414 411
415 BEGIN_LP_RING(4); 412 if (BEGIN_LP_RING(4) == 0) {
416 OUT_RING(MI_STORE_DWORD_INDEX); 413 OUT_RING(MI_STORE_DWORD_INDEX);
417 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 414 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
418 OUT_RING(dev_priv->counter); 415 OUT_RING(dev_priv->counter);
419 OUT_RING(0); 416 OUT_RING(0);
420 ADVANCE_LP_RING(); 417 ADVANCE_LP_RING();
418 }
421} 419}
422 420
423static int i915_dispatch_cmdbuffer(struct drm_device * dev, 421static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -439,7 +437,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
439 437
440 for (i = 0; i < count; i++) { 438 for (i = 0; i < count; i++) {
441 if (i < nbox) { 439 if (i < nbox) {
442 ret = i915_emit_box(dev, cliprects, i, 440 ret = i915_emit_box(dev, &cliprects[i],
443 cmd->DR1, cmd->DR4); 441 cmd->DR1, cmd->DR4);
444 if (ret) 442 if (ret)
445 return ret; 443 return ret;
@@ -458,8 +456,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
458 drm_i915_batchbuffer_t * batch, 456 drm_i915_batchbuffer_t * batch,
459 struct drm_clip_rect *cliprects) 457 struct drm_clip_rect *cliprects)
460{ 458{
459 struct drm_i915_private *dev_priv = dev->dev_private;
461 int nbox = batch->num_cliprects; 460 int nbox = batch->num_cliprects;
462 int i = 0, count; 461 int i, count, ret;
463 462
464 if ((batch->start | batch->used) & 0x7) { 463 if ((batch->start | batch->used) & 0x7) {
465 DRM_ERROR("alignment"); 464 DRM_ERROR("alignment");
@@ -469,17 +468,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
469 i915_kernel_lost_context(dev); 468 i915_kernel_lost_context(dev);
470 469
471 count = nbox ? nbox : 1; 470 count = nbox ? nbox : 1;
472
473 for (i = 0; i < count; i++) { 471 for (i = 0; i < count; i++) {
474 if (i < nbox) { 472 if (i < nbox) {
475 int ret = i915_emit_box(dev, cliprects, i, 473 ret = i915_emit_box(dev, &cliprects[i],
476 batch->DR1, batch->DR4); 474 batch->DR1, batch->DR4);
477 if (ret) 475 if (ret)
478 return ret; 476 return ret;
479 } 477 }
480 478
481 if (!IS_I830(dev) && !IS_845G(dev)) { 479 if (!IS_I830(dev) && !IS_845G(dev)) {
482 BEGIN_LP_RING(2); 480 ret = BEGIN_LP_RING(2);
481 if (ret)
482 return ret;
483
483 if (INTEL_INFO(dev)->gen >= 4) { 484 if (INTEL_INFO(dev)->gen >= 4) {
484 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); 485 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
485 OUT_RING(batch->start); 486 OUT_RING(batch->start);
@@ -487,26 +488,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
487 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6)); 488 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
488 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 489 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
489 } 490 }
490 ADVANCE_LP_RING();
491 } else { 491 } else {
492 BEGIN_LP_RING(4); 492 ret = BEGIN_LP_RING(4);
493 if (ret)
494 return ret;
495
493 OUT_RING(MI_BATCH_BUFFER); 496 OUT_RING(MI_BATCH_BUFFER);
494 OUT_RING(batch->start | MI_BATCH_NON_SECURE); 497 OUT_RING(batch->start | MI_BATCH_NON_SECURE);
495 OUT_RING(batch->start + batch->used - 4); 498 OUT_RING(batch->start + batch->used - 4);
496 OUT_RING(0); 499 OUT_RING(0);
497 ADVANCE_LP_RING();
498 } 500 }
501 ADVANCE_LP_RING();
499 } 502 }
500 503
501 504
502 if (IS_G4X(dev) || IS_GEN5(dev)) { 505 if (IS_G4X(dev) || IS_GEN5(dev)) {
503 BEGIN_LP_RING(2); 506 if (BEGIN_LP_RING(2) == 0) {
504 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP); 507 OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
505 OUT_RING(MI_NOOP); 508 OUT_RING(MI_NOOP);
506 ADVANCE_LP_RING(); 509 ADVANCE_LP_RING();
510 }
507 } 511 }
508 i915_emit_breadcrumb(dev);
509 512
513 i915_emit_breadcrumb(dev);
510 return 0; 514 return 0;
511} 515}
512 516
@@ -515,6 +519,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
515 drm_i915_private_t *dev_priv = dev->dev_private; 519 drm_i915_private_t *dev_priv = dev->dev_private;
516 struct drm_i915_master_private *master_priv = 520 struct drm_i915_master_private *master_priv =
517 dev->primary->master->driver_priv; 521 dev->primary->master->driver_priv;
522 int ret;
518 523
519 if (!master_priv->sarea_priv) 524 if (!master_priv->sarea_priv)
520 return -EINVAL; 525 return -EINVAL;
@@ -526,12 +531,13 @@ static int i915_dispatch_flip(struct drm_device * dev)
526 531
527 i915_kernel_lost_context(dev); 532 i915_kernel_lost_context(dev);
528 533
529 BEGIN_LP_RING(2); 534 ret = BEGIN_LP_RING(10);
535 if (ret)
536 return ret;
537
530 OUT_RING(MI_FLUSH | MI_READ_FLUSH); 538 OUT_RING(MI_FLUSH | MI_READ_FLUSH);
531 OUT_RING(0); 539 OUT_RING(0);
532 ADVANCE_LP_RING();
533 540
534 BEGIN_LP_RING(6);
535 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP); 541 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
536 OUT_RING(0); 542 OUT_RING(0);
537 if (dev_priv->current_page == 0) { 543 if (dev_priv->current_page == 0) {
@@ -542,33 +548,32 @@ static int i915_dispatch_flip(struct drm_device * dev)
542 dev_priv->current_page = 0; 548 dev_priv->current_page = 0;
543 } 549 }
544 OUT_RING(0); 550 OUT_RING(0);
545 ADVANCE_LP_RING();
546 551
547 BEGIN_LP_RING(2);
548 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP); 552 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
549 OUT_RING(0); 553 OUT_RING(0);
554
550 ADVANCE_LP_RING(); 555 ADVANCE_LP_RING();
551 556
552 master_priv->sarea_priv->last_enqueue = dev_priv->counter++; 557 master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
553 558
554 BEGIN_LP_RING(4); 559 if (BEGIN_LP_RING(4) == 0) {
555 OUT_RING(MI_STORE_DWORD_INDEX); 560 OUT_RING(MI_STORE_DWORD_INDEX);
556 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 561 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
557 OUT_RING(dev_priv->counter); 562 OUT_RING(dev_priv->counter);
558 OUT_RING(0); 563 OUT_RING(0);
559 ADVANCE_LP_RING(); 564 ADVANCE_LP_RING();
565 }
560 566
561 master_priv->sarea_priv->pf_current_page = dev_priv->current_page; 567 master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
562 return 0; 568 return 0;
563} 569}
564 570
565static int i915_quiescent(struct drm_device * dev) 571static int i915_quiescent(struct drm_device *dev)
566{ 572{
567 drm_i915_private_t *dev_priv = dev->dev_private; 573 struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
568 574
569 i915_kernel_lost_context(dev); 575 i915_kernel_lost_context(dev);
570 return intel_wait_ring_buffer(dev, &dev_priv->render_ring, 576 return intel_wait_ring_buffer(ring, ring->size - 8);
571 dev_priv->render_ring.size - 8);
572} 577}
573 578
574static int i915_flush_ioctl(struct drm_device *dev, void *data, 579static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -767,6 +772,15 @@ static int i915_getparam(struct drm_device *dev, void *data,
767 case I915_PARAM_HAS_BLT: 772 case I915_PARAM_HAS_BLT:
768 value = HAS_BLT(dev); 773 value = HAS_BLT(dev);
769 break; 774 break;
775 case I915_PARAM_HAS_RELAXED_FENCING:
776 value = 1;
777 break;
778 case I915_PARAM_HAS_COHERENT_RINGS:
779 value = 1;
780 break;
781 case I915_PARAM_HAS_EXEC_CONSTANTS:
782 value = INTEL_INFO(dev)->gen >= 4;
783 break;
770 default: 784 default:
771 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 785 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
772 param->param); 786 param->param);
@@ -822,7 +836,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
822{ 836{
823 drm_i915_private_t *dev_priv = dev->dev_private; 837 drm_i915_private_t *dev_priv = dev->dev_private;
824 drm_i915_hws_addr_t *hws = data; 838 drm_i915_hws_addr_t *hws = data;
825 struct intel_ring_buffer *ring = &dev_priv->render_ring; 839 struct intel_ring_buffer *ring = LP_RING(dev_priv);
826 840
827 if (!I915_NEED_GFX_HWS(dev)) 841 if (!I915_NEED_GFX_HWS(dev))
828 return -EINVAL; 842 return -EINVAL;
@@ -1001,73 +1015,47 @@ intel_teardown_mchbar(struct drm_device *dev)
1001#define PTE_VALID (1 << 0) 1015#define PTE_VALID (1 << 0)
1002 1016
1003/** 1017/**
1004 * i915_gtt_to_phys - take a GTT address and turn it into a physical one 1018 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1019 * a physical one
1005 * @dev: drm device 1020 * @dev: drm device
1006 * @gtt_addr: address to translate 1021 * @offset: address to translate
1007 * 1022 *
1008 * Some chip functions require allocations from stolen space but need the 1023 * Some chip functions require allocations from stolen space and need the
1009 * physical address of the memory in question. We use this routine 1024 * physical address of the memory in question.
1010 * to get a physical address suitable for register programming from a given
1011 * GTT address.
1012 */ 1025 */
1013static unsigned long i915_gtt_to_phys(struct drm_device *dev, 1026static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
1014 unsigned long gtt_addr)
1015{ 1027{
1016 unsigned long *gtt; 1028 struct drm_i915_private *dev_priv = dev->dev_private;
1017 unsigned long entry, phys; 1029 struct pci_dev *pdev = dev_priv->bridge_dev;
1018 int gtt_bar = IS_GEN2(dev) ? 1 : 0; 1030 u32 base;
1019 int gtt_offset, gtt_size; 1031
1020 1032#if 0
1021 if (INTEL_INFO(dev)->gen >= 4) { 1033 /* On the machines I have tested the Graphics Base of Stolen Memory
1022 if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) { 1034 * is unreliable, so compute the base by subtracting the stolen memory
1023 gtt_offset = 2*1024*1024; 1035 * from the Top of Low Usable DRAM which is where the BIOS places
1024 gtt_size = 2*1024*1024; 1036 * the graphics stolen memory.
1025 } else { 1037 */
1026 gtt_offset = 512*1024; 1038 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1027 gtt_size = 512*1024; 1039 /* top 32bits are reserved = 0 */
1028 } 1040 pci_read_config_dword(pdev, 0xA4, &base);
1029 } else { 1041 } else {
1030 gtt_bar = 3; 1042 /* XXX presume 8xx is the same as i915 */
1031 gtt_offset = 0; 1043 pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
1032 gtt_size = pci_resource_len(dev->pdev, gtt_bar); 1044 }
1033 } 1045#else
1034 1046 if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
1035 gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset, 1047 u16 val;
1036 gtt_size); 1048 pci_read_config_word(pdev, 0xb0, &val);
1037 if (!gtt) { 1049 base = val >> 4 << 20;
1038 DRM_ERROR("ioremap of GTT failed\n"); 1050 } else {
1039 return 0; 1051 u8 val;
1040 } 1052 pci_read_config_byte(pdev, 0x9c, &val);
1041 1053 base = val >> 3 << 27;
1042 entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
1043
1044 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
1045
1046 /* Mask out these reserved bits on this hardware. */
1047 if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
1048 entry &= ~PTE_ADDRESS_MASK_HIGH;
1049
1050 /* If it's not a mapping type we know, then bail. */
1051 if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
1052 (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
1053 iounmap(gtt);
1054 return 0;
1055 }
1056
1057 if (!(entry & PTE_VALID)) {
1058 DRM_ERROR("bad GTT entry in stolen space\n");
1059 iounmap(gtt);
1060 return 0;
1061 } 1054 }
1055 base -= dev_priv->mm.gtt->stolen_size;
1056#endif
1062 1057
1063 iounmap(gtt); 1058 return base + offset;
1064
1065 phys =(entry & PTE_ADDRESS_MASK) |
1066 ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
1067
1068 DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
1069
1070 return phys;
1071} 1059}
1072 1060
1073static void i915_warn_stolen(struct drm_device *dev) 1061static void i915_warn_stolen(struct drm_device *dev)
@@ -1083,54 +1071,35 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1083 unsigned long cfb_base; 1071 unsigned long cfb_base;
1084 unsigned long ll_base = 0; 1072 unsigned long ll_base = 0;
1085 1073
1086 /* Leave 1M for line length buffer & misc. */ 1074 compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
1087 compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0); 1075 if (compressed_fb)
1088 if (!compressed_fb) { 1076 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
1089 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1077 if (!compressed_fb)
1090 i915_warn_stolen(dev); 1078 goto err;
1091 return;
1092 }
1093 1079
1094 compressed_fb = drm_mm_get_block(compressed_fb, size, 4096); 1080 cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
1095 if (!compressed_fb) { 1081 if (!cfb_base)
1096 i915_warn_stolen(dev); 1082 goto err_fb;
1097 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1098 return;
1099 }
1100 1083
1101 cfb_base = i915_gtt_to_phys(dev, compressed_fb->start); 1084 if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
1102 if (!cfb_base) { 1085 compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
1103 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n"); 1086 4096, 4096, 0);
1104 drm_mm_put_block(compressed_fb); 1087 if (compressed_llb)
1105 } 1088 compressed_llb = drm_mm_get_block(compressed_llb,
1089 4096, 4096);
1090 if (!compressed_llb)
1091 goto err_fb;
1106 1092
1107 if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) { 1093 ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
1108 compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096, 1094 if (!ll_base)
1109 4096, 0); 1095 goto err_llb;
1110 if (!compressed_llb) {
1111 i915_warn_stolen(dev);
1112 return;
1113 }
1114
1115 compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
1116 if (!compressed_llb) {
1117 i915_warn_stolen(dev);
1118 return;
1119 }
1120
1121 ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
1122 if (!ll_base) {
1123 DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
1124 drm_mm_put_block(compressed_fb);
1125 drm_mm_put_block(compressed_llb);
1126 }
1127 } 1096 }
1128 1097
1129 dev_priv->cfb_size = size; 1098 dev_priv->cfb_size = size;
1130 1099
1131 intel_disable_fbc(dev); 1100 intel_disable_fbc(dev);
1132 dev_priv->compressed_fb = compressed_fb; 1101 dev_priv->compressed_fb = compressed_fb;
1133 if (IS_IRONLAKE_M(dev)) 1102 if (HAS_PCH_SPLIT(dev))
1134 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); 1103 I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
1135 else if (IS_GM45(dev)) { 1104 else if (IS_GM45(dev)) {
1136 I915_WRITE(DPFC_CB_BASE, compressed_fb->start); 1105 I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1140,8 +1109,17 @@ static void i915_setup_compression(struct drm_device *dev, int size)
1140 dev_priv->compressed_llb = compressed_llb; 1109 dev_priv->compressed_llb = compressed_llb;
1141 } 1110 }
1142 1111
1143 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base, 1112 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1144 ll_base, size >> 20); 1113 cfb_base, ll_base, size >> 20);
1114 return;
1115
1116err_llb:
1117 drm_mm_put_block(compressed_llb);
1118err_fb:
1119 drm_mm_put_block(compressed_fb);
1120err:
1121 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
1122 i915_warn_stolen(dev);
1145} 1123}
1146 1124
1147static void i915_cleanup_compression(struct drm_device *dev) 1125static void i915_cleanup_compression(struct drm_device *dev)
@@ -1192,17 +1170,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
1192 return can_switch; 1170 return can_switch;
1193} 1171}
1194 1172
1195static int i915_load_modeset_init(struct drm_device *dev, 1173static int i915_load_modeset_init(struct drm_device *dev)
1196 unsigned long prealloc_size,
1197 unsigned long agp_size)
1198{ 1174{
1199 struct drm_i915_private *dev_priv = dev->dev_private; 1175 struct drm_i915_private *dev_priv = dev->dev_private;
1176 unsigned long prealloc_size, gtt_size, mappable_size;
1200 int ret = 0; 1177 int ret = 0;
1201 1178
1202 /* Basic memrange allocator for stolen space (aka mm.vram) */ 1179 prealloc_size = dev_priv->mm.gtt->stolen_size;
1203 drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size); 1180 gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
1181 mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1182
1183 /* Basic memrange allocator for stolen space */
1184 drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
1204 1185
1205 /* Let GEM Manage from end of prealloc space to end of aperture. 1186 /* Let GEM Manage all of the aperture.
1206 * 1187 *
1207 * However, leave one page at the end still bound to the scratch page. 1188 * However, leave one page at the end still bound to the scratch page.
1208 * There are a number of places where the hardware apparently 1189 * There are a number of places where the hardware apparently
@@ -1211,7 +1192,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
1211 * at the last page of the aperture. One page should be enough to 1192 * at the last page of the aperture. One page should be enough to
1212 * keep any prefetching inside of the aperture. 1193 * keep any prefetching inside of the aperture.
1213 */ 1194 */
1214 i915_gem_do_init(dev, prealloc_size, agp_size - 4096); 1195 i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
1215 1196
1216 mutex_lock(&dev->struct_mutex); 1197 mutex_lock(&dev->struct_mutex);
1217 ret = i915_gem_init_ringbuffer(dev); 1198 ret = i915_gem_init_ringbuffer(dev);
@@ -1223,16 +1204,17 @@ static int i915_load_modeset_init(struct drm_device *dev,
1223 if (I915_HAS_FBC(dev) && i915_powersave) { 1204 if (I915_HAS_FBC(dev) && i915_powersave) {
1224 int cfb_size; 1205 int cfb_size;
1225 1206
1226 /* Try to get an 8M buffer... */ 1207 /* Leave 1M for line length buffer & misc. */
1227 if (prealloc_size > (9*1024*1024)) 1208
1228 cfb_size = 8*1024*1024; 1209 /* Try to get a 32M buffer... */
1210 if (prealloc_size > (36*1024*1024))
1211 cfb_size = 32*1024*1024;
1229 else /* fall back to 7/8 of the stolen space */ 1212 else /* fall back to 7/8 of the stolen space */
1230 cfb_size = prealloc_size * 7 / 8; 1213 cfb_size = prealloc_size * 7 / 8;
1231 i915_setup_compression(dev, cfb_size); 1214 i915_setup_compression(dev, cfb_size);
1232 } 1215 }
1233 1216
1234 /* Allow hardware batchbuffers unless told otherwise. 1217 /* Allow hardware batchbuffers unless told otherwise. */
1235 */
1236 dev_priv->allow_batchbuffer = 1; 1218 dev_priv->allow_batchbuffer = 1;
1237 1219
1238 ret = intel_parse_bios(dev); 1220 ret = intel_parse_bios(dev);
@@ -1422,152 +1404,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
1422 } 1404 }
1423} 1405}
1424 1406
1425struct v_table { 1407static const struct cparams {
1426 u8 vid; 1408 u16 i;
1427 unsigned long vd; /* in .1 mil */ 1409 u16 t;
1428 unsigned long vm; /* in .1 mil */ 1410 u16 m;
1429 u8 pvid; 1411 u16 c;
1430}; 1412} cparams[] = {
1431
1432static struct v_table v_table[] = {
1433 { 0, 16125, 15000, 0x7f, },
1434 { 1, 16000, 14875, 0x7e, },
1435 { 2, 15875, 14750, 0x7d, },
1436 { 3, 15750, 14625, 0x7c, },
1437 { 4, 15625, 14500, 0x7b, },
1438 { 5, 15500, 14375, 0x7a, },
1439 { 6, 15375, 14250, 0x79, },
1440 { 7, 15250, 14125, 0x78, },
1441 { 8, 15125, 14000, 0x77, },
1442 { 9, 15000, 13875, 0x76, },
1443 { 10, 14875, 13750, 0x75, },
1444 { 11, 14750, 13625, 0x74, },
1445 { 12, 14625, 13500, 0x73, },
1446 { 13, 14500, 13375, 0x72, },
1447 { 14, 14375, 13250, 0x71, },
1448 { 15, 14250, 13125, 0x70, },
1449 { 16, 14125, 13000, 0x6f, },
1450 { 17, 14000, 12875, 0x6e, },
1451 { 18, 13875, 12750, 0x6d, },
1452 { 19, 13750, 12625, 0x6c, },
1453 { 20, 13625, 12500, 0x6b, },
1454 { 21, 13500, 12375, 0x6a, },
1455 { 22, 13375, 12250, 0x69, },
1456 { 23, 13250, 12125, 0x68, },
1457 { 24, 13125, 12000, 0x67, },
1458 { 25, 13000, 11875, 0x66, },
1459 { 26, 12875, 11750, 0x65, },
1460 { 27, 12750, 11625, 0x64, },
1461 { 28, 12625, 11500, 0x63, },
1462 { 29, 12500, 11375, 0x62, },
1463 { 30, 12375, 11250, 0x61, },
1464 { 31, 12250, 11125, 0x60, },
1465 { 32, 12125, 11000, 0x5f, },
1466 { 33, 12000, 10875, 0x5e, },
1467 { 34, 11875, 10750, 0x5d, },
1468 { 35, 11750, 10625, 0x5c, },
1469 { 36, 11625, 10500, 0x5b, },
1470 { 37, 11500, 10375, 0x5a, },
1471 { 38, 11375, 10250, 0x59, },
1472 { 39, 11250, 10125, 0x58, },
1473 { 40, 11125, 10000, 0x57, },
1474 { 41, 11000, 9875, 0x56, },
1475 { 42, 10875, 9750, 0x55, },
1476 { 43, 10750, 9625, 0x54, },
1477 { 44, 10625, 9500, 0x53, },
1478 { 45, 10500, 9375, 0x52, },
1479 { 46, 10375, 9250, 0x51, },
1480 { 47, 10250, 9125, 0x50, },
1481 { 48, 10125, 9000, 0x4f, },
1482 { 49, 10000, 8875, 0x4e, },
1483 { 50, 9875, 8750, 0x4d, },
1484 { 51, 9750, 8625, 0x4c, },
1485 { 52, 9625, 8500, 0x4b, },
1486 { 53, 9500, 8375, 0x4a, },
1487 { 54, 9375, 8250, 0x49, },
1488 { 55, 9250, 8125, 0x48, },
1489 { 56, 9125, 8000, 0x47, },
1490 { 57, 9000, 7875, 0x46, },
1491 { 58, 8875, 7750, 0x45, },
1492 { 59, 8750, 7625, 0x44, },
1493 { 60, 8625, 7500, 0x43, },
1494 { 61, 8500, 7375, 0x42, },
1495 { 62, 8375, 7250, 0x41, },
1496 { 63, 8250, 7125, 0x40, },
1497 { 64, 8125, 7000, 0x3f, },
1498 { 65, 8000, 6875, 0x3e, },
1499 { 66, 7875, 6750, 0x3d, },
1500 { 67, 7750, 6625, 0x3c, },
1501 { 68, 7625, 6500, 0x3b, },
1502 { 69, 7500, 6375, 0x3a, },
1503 { 70, 7375, 6250, 0x39, },
1504 { 71, 7250, 6125, 0x38, },
1505 { 72, 7125, 6000, 0x37, },
1506 { 73, 7000, 5875, 0x36, },
1507 { 74, 6875, 5750, 0x35, },
1508 { 75, 6750, 5625, 0x34, },
1509 { 76, 6625, 5500, 0x33, },
1510 { 77, 6500, 5375, 0x32, },
1511 { 78, 6375, 5250, 0x31, },
1512 { 79, 6250, 5125, 0x30, },
1513 { 80, 6125, 5000, 0x2f, },
1514 { 81, 6000, 4875, 0x2e, },
1515 { 82, 5875, 4750, 0x2d, },
1516 { 83, 5750, 4625, 0x2c, },
1517 { 84, 5625, 4500, 0x2b, },
1518 { 85, 5500, 4375, 0x2a, },
1519 { 86, 5375, 4250, 0x29, },
1520 { 87, 5250, 4125, 0x28, },
1521 { 88, 5125, 4000, 0x27, },
1522 { 89, 5000, 3875, 0x26, },
1523 { 90, 4875, 3750, 0x25, },
1524 { 91, 4750, 3625, 0x24, },
1525 { 92, 4625, 3500, 0x23, },
1526 { 93, 4500, 3375, 0x22, },
1527 { 94, 4375, 3250, 0x21, },
1528 { 95, 4250, 3125, 0x20, },
1529 { 96, 4125, 3000, 0x1f, },
1530 { 97, 4125, 3000, 0x1e, },
1531 { 98, 4125, 3000, 0x1d, },
1532 { 99, 4125, 3000, 0x1c, },
1533 { 100, 4125, 3000, 0x1b, },
1534 { 101, 4125, 3000, 0x1a, },
1535 { 102, 4125, 3000, 0x19, },
1536 { 103, 4125, 3000, 0x18, },
1537 { 104, 4125, 3000, 0x17, },
1538 { 105, 4125, 3000, 0x16, },
1539 { 106, 4125, 3000, 0x15, },
1540 { 107, 4125, 3000, 0x14, },
1541 { 108, 4125, 3000, 0x13, },
1542 { 109, 4125, 3000, 0x12, },
1543 { 110, 4125, 3000, 0x11, },
1544 { 111, 4125, 3000, 0x10, },
1545 { 112, 4125, 3000, 0x0f, },
1546 { 113, 4125, 3000, 0x0e, },
1547 { 114, 4125, 3000, 0x0d, },
1548 { 115, 4125, 3000, 0x0c, },
1549 { 116, 4125, 3000, 0x0b, },
1550 { 117, 4125, 3000, 0x0a, },
1551 { 118, 4125, 3000, 0x09, },
1552 { 119, 4125, 3000, 0x08, },
1553 { 120, 1125, 0, 0x07, },
1554 { 121, 1000, 0, 0x06, },
1555 { 122, 875, 0, 0x05, },
1556 { 123, 750, 0, 0x04, },
1557 { 124, 625, 0, 0x03, },
1558 { 125, 500, 0, 0x02, },
1559 { 126, 375, 0, 0x01, },
1560 { 127, 0, 0, 0x00, },
1561};
1562
1563struct cparams {
1564 int i;
1565 int t;
1566 int m;
1567 int c;
1568};
1569
1570static struct cparams cparams[] = {
1571 { 1, 1333, 301, 28664 }, 1413 { 1, 1333, 301, 28664 },
1572 { 1, 1066, 294, 24460 }, 1414 { 1, 1066, 294, 24460 },
1573 { 1, 800, 294, 25192 }, 1415 { 1, 800, 294, 25192 },
@@ -1633,21 +1475,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
1633 return ((m * x) / 127) - b; 1475 return ((m * x) / 127) - b;
1634} 1476}
1635 1477
1636static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) 1478static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
1637{ 1479{
1638 unsigned long val = 0; 1480 static const struct v_table {
1639 int i; 1481 u16 vd; /* in .1 mil */
1640 1482 u16 vm; /* in .1 mil */
1641 for (i = 0; i < ARRAY_SIZE(v_table); i++) { 1483 } v_table[] = {
1642 if (v_table[i].pvid == pxvid) { 1484 { 0, 0, },
1643 if (IS_MOBILE(dev_priv->dev)) 1485 { 375, 0, },
1644 val = v_table[i].vm; 1486 { 500, 0, },
1645 else 1487 { 625, 0, },
1646 val = v_table[i].vd; 1488 { 750, 0, },
1647 } 1489 { 875, 0, },
1648 } 1490 { 1000, 0, },
1649 1491 { 1125, 0, },
1650 return val; 1492 { 4125, 3000, },
1493 { 4125, 3000, },
1494 { 4125, 3000, },
1495 { 4125, 3000, },
1496 { 4125, 3000, },
1497 { 4125, 3000, },
1498 { 4125, 3000, },
1499 { 4125, 3000, },
1500 { 4125, 3000, },
1501 { 4125, 3000, },
1502 { 4125, 3000, },
1503 { 4125, 3000, },
1504 { 4125, 3000, },
1505 { 4125, 3000, },
1506 { 4125, 3000, },
1507 { 4125, 3000, },
1508 { 4125, 3000, },
1509 { 4125, 3000, },
1510 { 4125, 3000, },
1511 { 4125, 3000, },
1512 { 4125, 3000, },
1513 { 4125, 3000, },
1514 { 4125, 3000, },
1515 { 4125, 3000, },
1516 { 4250, 3125, },
1517 { 4375, 3250, },
1518 { 4500, 3375, },
1519 { 4625, 3500, },
1520 { 4750, 3625, },
1521 { 4875, 3750, },
1522 { 5000, 3875, },
1523 { 5125, 4000, },
1524 { 5250, 4125, },
1525 { 5375, 4250, },
1526 { 5500, 4375, },
1527 { 5625, 4500, },
1528 { 5750, 4625, },
1529 { 5875, 4750, },
1530 { 6000, 4875, },
1531 { 6125, 5000, },
1532 { 6250, 5125, },
1533 { 6375, 5250, },
1534 { 6500, 5375, },
1535 { 6625, 5500, },
1536 { 6750, 5625, },
1537 { 6875, 5750, },
1538 { 7000, 5875, },
1539 { 7125, 6000, },
1540 { 7250, 6125, },
1541 { 7375, 6250, },
1542 { 7500, 6375, },
1543 { 7625, 6500, },
1544 { 7750, 6625, },
1545 { 7875, 6750, },
1546 { 8000, 6875, },
1547 { 8125, 7000, },
1548 { 8250, 7125, },
1549 { 8375, 7250, },
1550 { 8500, 7375, },
1551 { 8625, 7500, },
1552 { 8750, 7625, },
1553 { 8875, 7750, },
1554 { 9000, 7875, },
1555 { 9125, 8000, },
1556 { 9250, 8125, },
1557 { 9375, 8250, },
1558 { 9500, 8375, },
1559 { 9625, 8500, },
1560 { 9750, 8625, },
1561 { 9875, 8750, },
1562 { 10000, 8875, },
1563 { 10125, 9000, },
1564 { 10250, 9125, },
1565 { 10375, 9250, },
1566 { 10500, 9375, },
1567 { 10625, 9500, },
1568 { 10750, 9625, },
1569 { 10875, 9750, },
1570 { 11000, 9875, },
1571 { 11125, 10000, },
1572 { 11250, 10125, },
1573 { 11375, 10250, },
1574 { 11500, 10375, },
1575 { 11625, 10500, },
1576 { 11750, 10625, },
1577 { 11875, 10750, },
1578 { 12000, 10875, },
1579 { 12125, 11000, },
1580 { 12250, 11125, },
1581 { 12375, 11250, },
1582 { 12500, 11375, },
1583 { 12625, 11500, },
1584 { 12750, 11625, },
1585 { 12875, 11750, },
1586 { 13000, 11875, },
1587 { 13125, 12000, },
1588 { 13250, 12125, },
1589 { 13375, 12250, },
1590 { 13500, 12375, },
1591 { 13625, 12500, },
1592 { 13750, 12625, },
1593 { 13875, 12750, },
1594 { 14000, 12875, },
1595 { 14125, 13000, },
1596 { 14250, 13125, },
1597 { 14375, 13250, },
1598 { 14500, 13375, },
1599 { 14625, 13500, },
1600 { 14750, 13625, },
1601 { 14875, 13750, },
1602 { 15000, 13875, },
1603 { 15125, 14000, },
1604 { 15250, 14125, },
1605 { 15375, 14250, },
1606 { 15500, 14375, },
1607 { 15625, 14500, },
1608 { 15750, 14625, },
1609 { 15875, 14750, },
1610 { 16000, 14875, },
1611 { 16125, 15000, },
1612 };
1613 if (dev_priv->info->is_mobile)
1614 return v_table[pxvid].vm;
1615 else
1616 return v_table[pxvid].vd;
1651} 1617}
1652 1618
1653void i915_update_gfx_val(struct drm_i915_private *dev_priv) 1619void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1881,9 +1847,9 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
1881int i915_driver_load(struct drm_device *dev, unsigned long flags) 1847int i915_driver_load(struct drm_device *dev, unsigned long flags)
1882{ 1848{
1883 struct drm_i915_private *dev_priv; 1849 struct drm_i915_private *dev_priv;
1884 resource_size_t base, size;
1885 int ret = 0, mmio_bar; 1850 int ret = 0, mmio_bar;
1886 uint32_t agp_size, prealloc_size; 1851 uint32_t agp_size;
1852
1887 /* i915 has 4 more counters */ 1853 /* i915 has 4 more counters */
1888 dev->counters += 4; 1854 dev->counters += 4;
1889 dev->types[6] = _DRM_STAT_IRQ; 1855 dev->types[6] = _DRM_STAT_IRQ;
@@ -1899,11 +1865,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1899 dev_priv->dev = dev; 1865 dev_priv->dev = dev;
1900 dev_priv->info = (struct intel_device_info *) flags; 1866 dev_priv->info = (struct intel_device_info *) flags;
1901 1867
1902 /* Add register map (needed for suspend/resume) */
1903 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1904 base = pci_resource_start(dev->pdev, mmio_bar);
1905 size = pci_resource_len(dev->pdev, mmio_bar);
1906
1907 if (i915_get_bridge_dev(dev)) { 1868 if (i915_get_bridge_dev(dev)) {
1908 ret = -EIO; 1869 ret = -EIO;
1909 goto free_priv; 1870 goto free_priv;
@@ -1913,16 +1874,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1913 if (IS_GEN2(dev)) 1874 if (IS_GEN2(dev))
1914 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1875 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1915 1876
1916 dev_priv->regs = ioremap(base, size); 1877 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1878 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1917 if (!dev_priv->regs) { 1879 if (!dev_priv->regs) {
1918 DRM_ERROR("failed to map registers\n"); 1880 DRM_ERROR("failed to map registers\n");
1919 ret = -EIO; 1881 ret = -EIO;
1920 goto put_bridge; 1882 goto put_bridge;
1921 } 1883 }
1922 1884
1885 dev_priv->mm.gtt = intel_gtt_get();
1886 if (!dev_priv->mm.gtt) {
1887 DRM_ERROR("Failed to initialize GTT\n");
1888 ret = -ENODEV;
1889 goto out_iomapfree;
1890 }
1891
1892 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1893
1923 dev_priv->mm.gtt_mapping = 1894 dev_priv->mm.gtt_mapping =
1924 io_mapping_create_wc(dev->agp->base, 1895 io_mapping_create_wc(dev->agp->base, agp_size);
1925 dev->agp->agp_info.aper_size * 1024*1024);
1926 if (dev_priv->mm.gtt_mapping == NULL) { 1896 if (dev_priv->mm.gtt_mapping == NULL) {
1927 ret = -EIO; 1897 ret = -EIO;
1928 goto out_rmmap; 1898 goto out_rmmap;
@@ -1934,24 +1904,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1934 * MTRR if present. Even if a UC MTRR isn't present. 1904 * MTRR if present. Even if a UC MTRR isn't present.
1935 */ 1905 */
1936 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base, 1906 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1937 dev->agp->agp_info.aper_size * 1907 agp_size,
1938 1024 * 1024,
1939 MTRR_TYPE_WRCOMB, 1); 1908 MTRR_TYPE_WRCOMB, 1);
1940 if (dev_priv->mm.gtt_mtrr < 0) { 1909 if (dev_priv->mm.gtt_mtrr < 0) {
1941 DRM_INFO("MTRR allocation failed. Graphics " 1910 DRM_INFO("MTRR allocation failed. Graphics "
1942 "performance may suffer.\n"); 1911 "performance may suffer.\n");
1943 } 1912 }
1944 1913
1945 dev_priv->mm.gtt = intel_gtt_get();
1946 if (!dev_priv->mm.gtt) {
1947 DRM_ERROR("Failed to initialize GTT\n");
1948 ret = -ENODEV;
1949 goto out_iomapfree;
1950 }
1951
1952 prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
1953 agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
1954
1955 /* The i915 workqueue is primarily used for batched retirement of 1914 /* The i915 workqueue is primarily used for batched retirement of
1956 * requests (and thus managing bo) once the task has been completed 1915 * requests (and thus managing bo) once the task has been completed
1957 * by the GPU. i915_gem_retire_requests() is called directly when we 1916 * by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1959,7 +1918,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1959 * bo. 1918 * bo.
1960 * 1919 *
1961 * It is also used for periodic low-priority events, such as 1920 * It is also used for periodic low-priority events, such as
1962 * idle-timers and hangcheck. 1921 * idle-timers and recording error state.
1963 * 1922 *
1964 * All tasks on the workqueue are expected to acquire the dev mutex 1923 * All tasks on the workqueue are expected to acquire the dev mutex
1965 * so there is no point in running more than one instance of the 1924 * so there is no point in running more than one instance of the
@@ -1977,20 +1936,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1977 /* enable GEM by default */ 1936 /* enable GEM by default */
1978 dev_priv->has_gem = 1; 1937 dev_priv->has_gem = 1;
1979 1938
1980 if (prealloc_size > agp_size * 3 / 4) {
1981 DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
1982 "memory stolen.\n",
1983 prealloc_size / 1024, agp_size / 1024);
1984 DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
1985 "updating the BIOS to fix).\n");
1986 dev_priv->has_gem = 0;
1987 }
1988
1989 if (dev_priv->has_gem == 0 && 1939 if (dev_priv->has_gem == 0 &&
1990 drm_core_check_feature(dev, DRIVER_MODESET)) { 1940 drm_core_check_feature(dev, DRIVER_MODESET)) {
1991 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n"); 1941 DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
1992 ret = -ENODEV; 1942 ret = -ENODEV;
1993 goto out_iomapfree; 1943 goto out_workqueue_free;
1994 } 1944 }
1995 1945
1996 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1946 dev->driver->get_vblank_counter = i915_get_vblank_counter;
@@ -2013,8 +1963,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2013 /* Init HWS */ 1963 /* Init HWS */
2014 if (!I915_NEED_GFX_HWS(dev)) { 1964 if (!I915_NEED_GFX_HWS(dev)) {
2015 ret = i915_init_phys_hws(dev); 1965 ret = i915_init_phys_hws(dev);
2016 if (ret != 0) 1966 if (ret)
2017 goto out_workqueue_free; 1967 goto out_gem_unload;
2018 } 1968 }
2019 1969
2020 if (IS_PINEVIEW(dev)) 1970 if (IS_PINEVIEW(dev))
@@ -2036,16 +1986,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2036 if (!IS_I945G(dev) && !IS_I945GM(dev)) 1986 if (!IS_I945G(dev) && !IS_I945GM(dev))
2037 pci_enable_msi(dev->pdev); 1987 pci_enable_msi(dev->pdev);
2038 1988
2039 spin_lock_init(&dev_priv->user_irq_lock); 1989 spin_lock_init(&dev_priv->irq_lock);
2040 spin_lock_init(&dev_priv->error_lock); 1990 spin_lock_init(&dev_priv->error_lock);
2041 dev_priv->trace_irq_seqno = 0; 1991 dev_priv->trace_irq_seqno = 0;
2042 1992
2043 ret = drm_vblank_init(dev, I915_NUM_PIPE); 1993 ret = drm_vblank_init(dev, I915_NUM_PIPE);
2044 1994 if (ret)
2045 if (ret) { 1995 goto out_gem_unload;
2046 (void) i915_driver_unload(dev);
2047 return ret;
2048 }
2049 1996
2050 /* Start out suspended */ 1997 /* Start out suspended */
2051 dev_priv->mm.suspended = 1; 1998 dev_priv->mm.suspended = 1;
@@ -2053,10 +2000,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2053 intel_detect_pch(dev); 2000 intel_detect_pch(dev);
2054 2001
2055 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 2002 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
2056 ret = i915_load_modeset_init(dev, prealloc_size, agp_size); 2003 ret = i915_load_modeset_init(dev);
2057 if (ret < 0) { 2004 if (ret < 0) {
2058 DRM_ERROR("failed to init modeset\n"); 2005 DRM_ERROR("failed to init modeset\n");
2059 goto out_workqueue_free; 2006 goto out_gem_unload;
2060 } 2007 }
2061 } 2008 }
2062 2009
@@ -2074,12 +2021,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
2074 2021
2075 return 0; 2022 return 0;
2076 2023
2024out_gem_unload:
2025 if (dev->pdev->msi_enabled)
2026 pci_disable_msi(dev->pdev);
2027
2028 intel_teardown_gmbus(dev);
2029 intel_teardown_mchbar(dev);
2077out_workqueue_free: 2030out_workqueue_free:
2078 destroy_workqueue(dev_priv->wq); 2031 destroy_workqueue(dev_priv->wq);
2079out_iomapfree: 2032out_iomapfree:
2080 io_mapping_free(dev_priv->mm.gtt_mapping); 2033 io_mapping_free(dev_priv->mm.gtt_mapping);
2081out_rmmap: 2034out_rmmap:
2082 iounmap(dev_priv->regs); 2035 pci_iounmap(dev->pdev, dev_priv->regs);
2083put_bridge: 2036put_bridge:
2084 pci_dev_put(dev_priv->bridge_dev); 2037 pci_dev_put(dev_priv->bridge_dev);
2085free_priv: 2038free_priv:
@@ -2096,6 +2049,9 @@ int i915_driver_unload(struct drm_device *dev)
2096 i915_mch_dev = NULL; 2049 i915_mch_dev = NULL;
2097 spin_unlock(&mchdev_lock); 2050 spin_unlock(&mchdev_lock);
2098 2051
2052 if (dev_priv->mm.inactive_shrinker.shrink)
2053 unregister_shrinker(&dev_priv->mm.inactive_shrinker);
2054
2099 mutex_lock(&dev->struct_mutex); 2055 mutex_lock(&dev->struct_mutex);
2100 ret = i915_gpu_idle(dev); 2056 ret = i915_gpu_idle(dev);
2101 if (ret) 2057 if (ret)
@@ -2153,7 +2109,7 @@ int i915_driver_unload(struct drm_device *dev)
2153 mutex_unlock(&dev->struct_mutex); 2109 mutex_unlock(&dev->struct_mutex);
2154 if (I915_HAS_FBC(dev) && i915_powersave) 2110 if (I915_HAS_FBC(dev) && i915_powersave)
2155 i915_cleanup_compression(dev); 2111 i915_cleanup_compression(dev);
2156 drm_mm_takedown(&dev_priv->mm.vram); 2112 drm_mm_takedown(&dev_priv->mm.stolen);
2157 2113
2158 intel_cleanup_overlay(dev); 2114 intel_cleanup_overlay(dev);
2159 2115
@@ -2162,7 +2118,7 @@ int i915_driver_unload(struct drm_device *dev)
2162 } 2118 }
2163 2119
2164 if (dev_priv->regs != NULL) 2120 if (dev_priv->regs != NULL)
2165 iounmap(dev_priv->regs); 2121 pci_iounmap(dev->pdev, dev_priv->regs);
2166 2122
2167 intel_teardown_gmbus(dev); 2123 intel_teardown_gmbus(dev);
2168 intel_teardown_mchbar(dev); 2124 intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960712e6..9eee6cf7901e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
111 111
112static const struct intel_device_info intel_i965gm_info = { 112static const struct intel_device_info intel_i965gm_info = {
113 .gen = 4, .is_crestline = 1, 113 .gen = 4, .is_crestline = 1,
114 .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 114 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
115 .has_overlay = 1, 115 .has_overlay = 1,
116 .supports_tv = 1, 116 .supports_tv = 1,
117}; 117};
@@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
130 130
131static const struct intel_device_info intel_gm45_info = { 131static const struct intel_device_info intel_gm45_info = {
132 .gen = 4, .is_g4x = 1, 132 .gen = 4, .is_g4x = 1,
133 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 133 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
134 .has_pipe_cxsr = 1, .has_hotplug = 1, 134 .has_pipe_cxsr = 1, .has_hotplug = 1,
135 .supports_tv = 1, 135 .supports_tv = 1,
136 .has_bsd_ring = 1, 136 .has_bsd_ring = 1,
@@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
150 150
151static const struct intel_device_info intel_ironlake_m_info = { 151static const struct intel_device_info intel_ironlake_m_info = {
152 .gen = 5, .is_mobile = 1, 152 .gen = 5, .is_mobile = 1,
153 .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, 153 .need_gfx_hws = 1, .has_hotplug = 1,
154 .has_fbc = 0, /* disabled due to buggy hardware */ 154 .has_fbc = 0, /* disabled due to buggy hardware */
155 .has_bsd_ring = 1, 155 .has_bsd_ring = 1,
156}; 156};
@@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
165static const struct intel_device_info intel_sandybridge_m_info = { 165static const struct intel_device_info intel_sandybridge_m_info = {
166 .gen = 6, .is_mobile = 1, 166 .gen = 6, .is_mobile = 1,
167 .need_gfx_hws = 1, .has_hotplug = 1, 167 .need_gfx_hws = 1, .has_hotplug = 1,
168 .has_fbc = 1,
168 .has_bsd_ring = 1, 169 .has_bsd_ring = 1,
169 .has_blt_ring = 1, 170 .has_blt_ring = 1,
170}; 171};
@@ -244,6 +245,28 @@ void intel_detect_pch (struct drm_device *dev)
244 } 245 }
245} 246}
246 247
248void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
249{
250 int count;
251
252 count = 0;
253 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
254 udelay(10);
255
256 I915_WRITE_NOTRACE(FORCEWAKE, 1);
257 POSTING_READ(FORCEWAKE);
258
259 count = 0;
260 while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
261 udelay(10);
262}
263
264void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
265{
266 I915_WRITE_NOTRACE(FORCEWAKE, 0);
267 POSTING_READ(FORCEWAKE);
268}
269
247static int i915_drm_freeze(struct drm_device *dev) 270static int i915_drm_freeze(struct drm_device *dev)
248{ 271{
249 struct drm_i915_private *dev_priv = dev->dev_private; 272 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -304,6 +327,12 @@ static int i915_drm_thaw(struct drm_device *dev)
304 struct drm_i915_private *dev_priv = dev->dev_private; 327 struct drm_i915_private *dev_priv = dev->dev_private;
305 int error = 0; 328 int error = 0;
306 329
330 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
331 mutex_lock(&dev->struct_mutex);
332 i915_gem_restore_gtt_mappings(dev);
333 mutex_unlock(&dev->struct_mutex);
334 }
335
307 i915_restore_state(dev); 336 i915_restore_state(dev);
308 intel_opregion_setup(dev); 337 intel_opregion_setup(dev);
309 338
@@ -405,6 +434,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
405 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); 434 return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
406} 435}
407 436
437static int gen6_do_reset(struct drm_device *dev, u8 flags)
438{
439 struct drm_i915_private *dev_priv = dev->dev_private;
440
441 I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
442 return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
443}
444
408/** 445/**
409 * i965_reset - reset chip after a hang 446 * i965_reset - reset chip after a hang
410 * @dev: drm device to reset 447 * @dev: drm device to reset
@@ -431,7 +468,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
431 bool need_display = true; 468 bool need_display = true;
432 int ret; 469 int ret;
433 470
434 mutex_lock(&dev->struct_mutex); 471 if (!mutex_trylock(&dev->struct_mutex))
472 return -EBUSY;
435 473
436 i915_gem_reset(dev); 474 i915_gem_reset(dev);
437 475
@@ -439,6 +477,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
439 if (get_seconds() - dev_priv->last_gpu_reset < 5) { 477 if (get_seconds() - dev_priv->last_gpu_reset < 5) {
440 DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); 478 DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
441 } else switch (INTEL_INFO(dev)->gen) { 479 } else switch (INTEL_INFO(dev)->gen) {
480 case 6:
481 ret = gen6_do_reset(dev, flags);
482 break;
442 case 5: 483 case 5:
443 ret = ironlake_do_reset(dev, flags); 484 ret = ironlake_do_reset(dev, flags);
444 break; 485 break;
@@ -472,9 +513,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
472 */ 513 */
473 if (drm_core_check_feature(dev, DRIVER_MODESET) || 514 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
474 !dev_priv->mm.suspended) { 515 !dev_priv->mm.suspended) {
475 struct intel_ring_buffer *ring = &dev_priv->render_ring;
476 dev_priv->mm.suspended = 0; 516 dev_priv->mm.suspended = 0;
477 ring->init(dev, ring); 517
518 dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
519 if (HAS_BSD(dev))
520 dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
521 if (HAS_BLT(dev))
522 dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
523
478 mutex_unlock(&dev->struct_mutex); 524 mutex_unlock(&dev->struct_mutex);
479 drm_irq_uninstall(dev); 525 drm_irq_uninstall(dev);
480 drm_irq_install(dev); 526 drm_irq_install(dev);
@@ -606,6 +652,8 @@ static struct drm_driver driver = {
606 .device_is_agp = i915_driver_device_is_agp, 652 .device_is_agp = i915_driver_device_is_agp,
607 .enable_vblank = i915_enable_vblank, 653 .enable_vblank = i915_enable_vblank,
608 .disable_vblank = i915_disable_vblank, 654 .disable_vblank = i915_disable_vblank,
655 .get_vblank_timestamp = i915_get_vblank_timestamp,
656 .get_scanout_position = i915_get_crtc_scanoutpos,
609 .irq_preinstall = i915_driver_irq_preinstall, 657 .irq_preinstall = i915_driver_irq_preinstall,
610 .irq_postinstall = i915_driver_irq_postinstall, 658 .irq_postinstall = i915_driver_irq_postinstall,
611 .irq_uninstall = i915_driver_irq_uninstall, 659 .irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +709,6 @@ static int __init i915_init(void)
661 709
662 driver.num_ioctls = i915_max_ioctl; 710 driver.num_ioctls = i915_max_ioctl;
663 711
664 i915_gem_shrinker_init();
665
666 /* 712 /*
667 * If CONFIG_DRM_I915_KMS is set, default to KMS unless 713 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
668 * explicitly disabled with the module pararmeter. 714 * explicitly disabled with the module pararmeter.
@@ -684,17 +730,11 @@ static int __init i915_init(void)
684 driver.driver_features &= ~DRIVER_MODESET; 730 driver.driver_features &= ~DRIVER_MODESET;
685#endif 731#endif
686 732
687 if (!(driver.driver_features & DRIVER_MODESET)) {
688 driver.suspend = i915_suspend;
689 driver.resume = i915_resume;
690 }
691
692 return drm_init(&driver); 733 return drm_init(&driver);
693} 734}
694 735
695static void __exit i915_exit(void) 736static void __exit i915_exit(void)
696{ 737{
697 i915_gem_shrinker_exit();
698 drm_exit(&driver); 738 drm_exit(&driver);
699} 739}
700 740
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826da3099..aac1bf332f75 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object {
89 int id; 89 int id;
90 struct page **page_list; 90 struct page **page_list;
91 drm_dma_handle_t *handle; 91 drm_dma_handle_t *handle;
92 struct drm_gem_object *cur_obj; 92 struct drm_i915_gem_object *cur_obj;
93}; 93};
94 94
95struct mem_block { 95struct mem_block {
@@ -124,9 +124,9 @@ struct drm_i915_master_private {
124#define I915_FENCE_REG_NONE -1 124#define I915_FENCE_REG_NONE -1
125 125
126struct drm_i915_fence_reg { 126struct drm_i915_fence_reg {
127 struct drm_gem_object *obj;
128 struct list_head lru_list; 127 struct list_head lru_list;
129 bool gpu; 128 struct drm_i915_gem_object *obj;
129 uint32_t setup_seqno;
130}; 130};
131 131
132struct sdvo_device_mapping { 132struct sdvo_device_mapping {
@@ -139,6 +139,8 @@ struct sdvo_device_mapping {
139 u8 ddc_pin; 139 u8 ddc_pin;
140}; 140};
141 141
142struct intel_display_error_state;
143
142struct drm_i915_error_state { 144struct drm_i915_error_state {
143 u32 eir; 145 u32 eir;
144 u32 pgtbl_er; 146 u32 pgtbl_er;
@@ -148,11 +150,23 @@ struct drm_i915_error_state {
148 u32 ipehr; 150 u32 ipehr;
149 u32 instdone; 151 u32 instdone;
150 u32 acthd; 152 u32 acthd;
153 u32 error; /* gen6+ */
154 u32 bcs_acthd; /* gen6+ blt engine */
155 u32 bcs_ipehr;
156 u32 bcs_ipeir;
157 u32 bcs_instdone;
158 u32 bcs_seqno;
159 u32 vcs_acthd; /* gen6+ bsd engine */
160 u32 vcs_ipehr;
161 u32 vcs_ipeir;
162 u32 vcs_instdone;
163 u32 vcs_seqno;
151 u32 instpm; 164 u32 instpm;
152 u32 instps; 165 u32 instps;
153 u32 instdone1; 166 u32 instdone1;
154 u32 seqno; 167 u32 seqno;
155 u64 bbaddr; 168 u64 bbaddr;
169 u64 fence[16];
156 struct timeval time; 170 struct timeval time;
157 struct drm_i915_error_object { 171 struct drm_i915_error_object {
158 int page_count; 172 int page_count;
@@ -171,9 +185,11 @@ struct drm_i915_error_state {
171 u32 tiling:2; 185 u32 tiling:2;
172 u32 dirty:1; 186 u32 dirty:1;
173 u32 purgeable:1; 187 u32 purgeable:1;
174 } *active_bo; 188 u32 ring:4;
175 u32 active_bo_count; 189 } *active_bo, *pinned_bo;
190 u32 active_bo_count, pinned_bo_count;
176 struct intel_overlay_error_state *overlay; 191 struct intel_overlay_error_state *overlay;
192 struct intel_display_error_state *display;
177}; 193};
178 194
179struct drm_i915_display_funcs { 195struct drm_i915_display_funcs {
@@ -207,7 +223,6 @@ struct intel_device_info {
207 u8 is_broadwater : 1; 223 u8 is_broadwater : 1;
208 u8 is_crestline : 1; 224 u8 is_crestline : 1;
209 u8 has_fbc : 1; 225 u8 has_fbc : 1;
210 u8 has_rc6 : 1;
211 u8 has_pipe_cxsr : 1; 226 u8 has_pipe_cxsr : 1;
212 u8 has_hotplug : 1; 227 u8 has_hotplug : 1;
213 u8 cursor_needs_physical : 1; 228 u8 cursor_needs_physical : 1;
@@ -243,6 +258,7 @@ typedef struct drm_i915_private {
243 const struct intel_device_info *info; 258 const struct intel_device_info *info;
244 259
245 int has_gem; 260 int has_gem;
261 int relative_constants_mode;
246 262
247 void __iomem *regs; 263 void __iomem *regs;
248 264
@@ -253,20 +269,15 @@ typedef struct drm_i915_private {
253 } *gmbus; 269 } *gmbus;
254 270
255 struct pci_dev *bridge_dev; 271 struct pci_dev *bridge_dev;
256 struct intel_ring_buffer render_ring; 272 struct intel_ring_buffer ring[I915_NUM_RINGS];
257 struct intel_ring_buffer bsd_ring;
258 struct intel_ring_buffer blt_ring;
259 uint32_t next_seqno; 273 uint32_t next_seqno;
260 274
261 drm_dma_handle_t *status_page_dmah; 275 drm_dma_handle_t *status_page_dmah;
262 void *seqno_page;
263 dma_addr_t dma_status_page; 276 dma_addr_t dma_status_page;
264 uint32_t counter; 277 uint32_t counter;
265 unsigned int seqno_gfx_addr;
266 drm_local_map_t hws_map; 278 drm_local_map_t hws_map;
267 struct drm_gem_object *seqno_obj; 279 struct drm_i915_gem_object *pwrctx;
268 struct drm_gem_object *pwrctx; 280 struct drm_i915_gem_object *renderctx;
269 struct drm_gem_object *renderctx;
270 281
271 struct resource mch_res; 282 struct resource mch_res;
272 283
@@ -275,25 +286,17 @@ typedef struct drm_i915_private {
275 int front_offset; 286 int front_offset;
276 int current_page; 287 int current_page;
277 int page_flipping; 288 int page_flipping;
278#define I915_DEBUG_READ (1<<0)
279#define I915_DEBUG_WRITE (1<<1)
280 unsigned long debug_flags;
281 289
282 wait_queue_head_t irq_queue;
283 atomic_t irq_received; 290 atomic_t irq_received;
284 /** Protects user_irq_refcount and irq_mask_reg */
285 spinlock_t user_irq_lock;
286 u32 trace_irq_seqno; 291 u32 trace_irq_seqno;
292
293 /* protects the irq masks */
294 spinlock_t irq_lock;
287 /** Cached value of IMR to avoid reads in updating the bitfield */ 295 /** Cached value of IMR to avoid reads in updating the bitfield */
288 u32 irq_mask_reg;
289 u32 pipestat[2]; 296 u32 pipestat[2];
290 /** splitted irq regs for graphics and display engine on Ironlake, 297 u32 irq_mask;
291 irq_mask_reg is still used for display irq. */ 298 u32 gt_irq_mask;
292 u32 gt_irq_mask_reg; 299 u32 pch_irq_mask;
293 u32 gt_irq_enable_reg;
294 u32 de_irq_enable_reg;
295 u32 pch_irq_mask_reg;
296 u32 pch_irq_enable_reg;
297 300
298 u32 hotplug_supported_mask; 301 u32 hotplug_supported_mask;
299 struct work_struct hotplug_work; 302 struct work_struct hotplug_work;
@@ -306,7 +309,7 @@ typedef struct drm_i915_private {
306 int num_pipe; 309 int num_pipe;
307 310
308 /* For hangcheck timer */ 311 /* For hangcheck timer */
309#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */ 312#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
310 struct timer_list hangcheck_timer; 313 struct timer_list hangcheck_timer;
311 int hangcheck_count; 314 int hangcheck_count;
312 uint32_t last_acthd; 315 uint32_t last_acthd;
@@ -530,23 +533,21 @@ typedef struct drm_i915_private {
530 533
531 struct { 534 struct {
532 /** Bridge to intel-gtt-ko */ 535 /** Bridge to intel-gtt-ko */
533 struct intel_gtt *gtt; 536 const struct intel_gtt *gtt;
534 /** Memory allocator for GTT stolen memory */ 537 /** Memory allocator for GTT stolen memory */
535 struct drm_mm vram; 538 struct drm_mm stolen;
536 /** Memory allocator for GTT */ 539 /** Memory allocator for GTT */
537 struct drm_mm gtt_space; 540 struct drm_mm gtt_space;
541 /** List of all objects in gtt_space. Used to restore gtt
542 * mappings on resume */
543 struct list_head gtt_list;
544 /** End of mappable part of GTT */
545 unsigned long gtt_mappable_end;
538 546
539 struct io_mapping *gtt_mapping; 547 struct io_mapping *gtt_mapping;
540 int gtt_mtrr; 548 int gtt_mtrr;
541 549
542 /** 550 struct shrinker inactive_shrinker;
543 * Membership on list of all loaded devices, used to evict
544 * inactive buffers under memory pressure.
545 *
546 * Modifications should only be done whilst holding the
547 * shrink_list_lock spinlock.
548 */
549 struct list_head shrink_list;
550 551
551 /** 552 /**
552 * List of objects currently involved in rendering. 553 * List of objects currently involved in rendering.
@@ -609,16 +610,6 @@ typedef struct drm_i915_private {
609 struct delayed_work retire_work; 610 struct delayed_work retire_work;
610 611
611 /** 612 /**
612 * Waiting sequence number, if any
613 */
614 uint32_t waiting_gem_seqno;
615
616 /**
617 * Last seq seen at irq time
618 */
619 uint32_t irq_gem_seqno;
620
621 /**
622 * Flag if the X Server, and thus DRM, is not currently in 613 * Flag if the X Server, and thus DRM, is not currently in
623 * control of the device. 614 * control of the device.
624 * 615 *
@@ -645,16 +636,11 @@ typedef struct drm_i915_private {
645 /* storage for physical objects */ 636 /* storage for physical objects */
646 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 637 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
647 638
648 uint32_t flush_rings;
649
650 /* accounting, useful for userland debugging */ 639 /* accounting, useful for userland debugging */
651 size_t object_memory;
652 size_t pin_memory;
653 size_t gtt_memory;
654 size_t gtt_total; 640 size_t gtt_total;
641 size_t mappable_gtt_total;
642 size_t object_memory;
655 u32 object_count; 643 u32 object_count;
656 u32 pin_count;
657 u32 gtt_count;
658 } mm; 644 } mm;
659 struct sdvo_device_mapping sdvo_mappings[2]; 645 struct sdvo_device_mapping sdvo_mappings[2];
660 /* indicate whether the LVDS_BORDER should be enabled or not */ 646 /* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +674,14 @@ typedef struct drm_i915_private {
688 u8 fmax; 674 u8 fmax;
689 u8 fstart; 675 u8 fstart;
690 676
691 u64 last_count1; 677 u64 last_count1;
692 unsigned long last_time1; 678 unsigned long last_time1;
693 u64 last_count2; 679 u64 last_count2;
694 struct timespec last_time2; 680 struct timespec last_time2;
695 unsigned long gfx_power; 681 unsigned long gfx_power;
696 int c_m; 682 int c_m;
697 int r_t; 683 int r_t;
698 u8 corr; 684 u8 corr;
699 spinlock_t *mchdev_lock; 685 spinlock_t *mchdev_lock;
700 686
701 enum no_fbc_reason no_fbc_reason; 687 enum no_fbc_reason no_fbc_reason;
@@ -709,20 +695,20 @@ typedef struct drm_i915_private {
709 struct intel_fbdev *fbdev; 695 struct intel_fbdev *fbdev;
710} drm_i915_private_t; 696} drm_i915_private_t;
711 697
712/** driver private structure attached to each drm_gem_object */
713struct drm_i915_gem_object { 698struct drm_i915_gem_object {
714 struct drm_gem_object base; 699 struct drm_gem_object base;
715 700
716 /** Current space allocated to this object in the GTT, if any. */ 701 /** Current space allocated to this object in the GTT, if any. */
717 struct drm_mm_node *gtt_space; 702 struct drm_mm_node *gtt_space;
703 struct list_head gtt_list;
718 704
719 /** This object's place on the active/flushing/inactive lists */ 705 /** This object's place on the active/flushing/inactive lists */
720 struct list_head ring_list; 706 struct list_head ring_list;
721 struct list_head mm_list; 707 struct list_head mm_list;
722 /** This object's place on GPU write list */ 708 /** This object's place on GPU write list */
723 struct list_head gpu_write_list; 709 struct list_head gpu_write_list;
724 /** This object's place on eviction list */ 710 /** This object's place in the batchbuffer or on the eviction list */
725 struct list_head evict_list; 711 struct list_head exec_list;
726 712
727 /** 713 /**
728 * This is set if the object is on the active or flushing lists 714 * This is set if the object is on the active or flushing lists
@@ -738,6 +724,12 @@ struct drm_i915_gem_object {
738 unsigned int dirty : 1; 724 unsigned int dirty : 1;
739 725
740 /** 726 /**
727 * This is set if the object has been written to since the last
728 * GPU flush.
729 */
730 unsigned int pending_gpu_write : 1;
731
732 /**
741 * Fence register bits (if any) for this object. Will be set 733 * Fence register bits (if any) for this object. Will be set
742 * as needed when mapped into the GTT. 734 * as needed when mapped into the GTT.
743 * Protected by dev->struct_mutex. 735 * Protected by dev->struct_mutex.
@@ -747,29 +739,15 @@ struct drm_i915_gem_object {
747 signed int fence_reg : 5; 739 signed int fence_reg : 5;
748 740
749 /** 741 /**
750 * Used for checking the object doesn't appear more than once
751 * in an execbuffer object list.
752 */
753 unsigned int in_execbuffer : 1;
754
755 /**
756 * Advice: are the backing pages purgeable? 742 * Advice: are the backing pages purgeable?
757 */ 743 */
758 unsigned int madv : 2; 744 unsigned int madv : 2;
759 745
760 /** 746 /**
761 * Refcount for the pages array. With the current locking scheme, there
762 * are at most two concurrent users: Binding a bo to the gtt and
763 * pwrite/pread using physical addresses. So two bits for a maximum
764 * of two users are enough.
765 */
766 unsigned int pages_refcount : 2;
767#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
768
769 /**
770 * Current tiling mode for the object. 747 * Current tiling mode for the object.
771 */ 748 */
772 unsigned int tiling_mode : 2; 749 unsigned int tiling_mode : 2;
750 unsigned int tiling_changed : 1;
773 751
774 /** How many users have pinned this object in GTT space. The following 752 /** How many users have pinned this object in GTT space. The following
775 * users can each hold at most one reference: pwrite/pread, pin_ioctl 753 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +761,54 @@ struct drm_i915_gem_object {
783 unsigned int pin_count : 4; 761 unsigned int pin_count : 4;
784#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf 762#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
785 763
786 /** AGP memory structure for our GTT binding. */ 764 /**
787 DRM_AGP_MEM *agp_mem; 765 * Is the object at the current location in the gtt mappable and
766 * fenceable? Used to avoid costly recalculations.
767 */
768 unsigned int map_and_fenceable : 1;
769
770 /**
771 * Whether the current gtt mapping needs to be mappable (and isn't just
772 * mappable by accident). Track pin and fault separate for a more
773 * accurate mappable working set.
774 */
775 unsigned int fault_mappable : 1;
776 unsigned int pin_mappable : 1;
777
778 /*
779 * Is the GPU currently using a fence to access this buffer,
780 */
781 unsigned int pending_fenced_gpu_access:1;
782 unsigned int fenced_gpu_access:1;
788 783
789 struct page **pages; 784 struct page **pages;
790 785
791 /** 786 /**
792 * Current offset of the object in GTT space. 787 * DMAR support
793 *
794 * This is the same as gtt_space->start
795 */ 788 */
796 uint32_t gtt_offset; 789 struct scatterlist *sg_list;
790 int num_sg;
797 791
798 /* Which ring is refering to is this object */ 792 /**
799 struct intel_ring_buffer *ring; 793 * Used for performing relocations during execbuffer insertion.
794 */
795 struct hlist_node exec_node;
796 unsigned long exec_handle;
800 797
801 /** 798 /**
802 * Fake offset for use by mmap(2) 799 * Current offset of the object in GTT space.
800 *
801 * This is the same as gtt_space->start
803 */ 802 */
804 uint64_t mmap_offset; 803 uint32_t gtt_offset;
805 804
806 /** Breadcrumb of last rendering to the buffer. */ 805 /** Breadcrumb of last rendering to the buffer. */
807 uint32_t last_rendering_seqno; 806 uint32_t last_rendering_seqno;
807 struct intel_ring_buffer *ring;
808
809 /** Breadcrumb of last fenced GPU access to the buffer. */
810 uint32_t last_fenced_seqno;
811 struct intel_ring_buffer *last_fenced_ring;
808 812
809 /** Current tiling stride for the object, if it's tiled. */ 813 /** Current tiling stride for the object, if it's tiled. */
810 uint32_t stride; 814 uint32_t stride;
@@ -880,6 +884,68 @@ enum intel_chip_family {
880 CHIP_I965 = 0x08, 884 CHIP_I965 = 0x08,
881}; 885};
882 886
887#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
888
889#define IS_I830(dev) ((dev)->pci_device == 0x3577)
890#define IS_845G(dev) ((dev)->pci_device == 0x2562)
891#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
892#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
893#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
894#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
895#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
896#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
897#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
898#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
899#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
900#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
901#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
902#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
903#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
904#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
905#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
906#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
907#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
908
909#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
910#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
911#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
912#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
913#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
914
915#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
916#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
917#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
918
919#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
920#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
921
922/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
923 * rows, which changed the alignment requirements and fence programming.
924 */
925#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
926 IS_I915GM(dev)))
927#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
928#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
929#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
930#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
931#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
932#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
933/* dsparb controlled by hw only */
934#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
935
936#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
937#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
938#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
939
940#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
941#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
942
943#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
944#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
945#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
946
947#include "i915_trace.h"
948
883extern struct drm_ioctl_desc i915_ioctls[]; 949extern struct drm_ioctl_desc i915_ioctls[];
884extern int i915_max_ioctl; 950extern int i915_max_ioctl;
885extern unsigned int i915_fbpercrtc; 951extern unsigned int i915_fbpercrtc;
@@ -907,8 +973,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
907extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 973extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
908 unsigned long arg); 974 unsigned long arg);
909extern int i915_emit_box(struct drm_device *dev, 975extern int i915_emit_box(struct drm_device *dev,
910 struct drm_clip_rect *boxes, 976 struct drm_clip_rect *box,
911 int i, int DR1, int DR4); 977 int DR1, int DR4);
912extern int i915_reset(struct drm_device *dev, u8 flags); 978extern int i915_reset(struct drm_device *dev, u8 flags);
913extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); 979extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
914extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv); 980extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +984,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
918 984
919/* i915_irq.c */ 985/* i915_irq.c */
920void i915_hangcheck_elapsed(unsigned long data); 986void i915_hangcheck_elapsed(unsigned long data);
987void i915_handle_error(struct drm_device *dev, bool wedged);
921extern int i915_irq_emit(struct drm_device *dev, void *data, 988extern int i915_irq_emit(struct drm_device *dev, void *data,
922 struct drm_file *file_priv); 989 struct drm_file *file_priv);
923extern int i915_irq_wait(struct drm_device *dev, void *data, 990extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -953,6 +1020,13 @@ void
953i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); 1020i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
954 1021
955void intel_enable_asle (struct drm_device *dev); 1022void intel_enable_asle (struct drm_device *dev);
1023int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
1024 int *max_error,
1025 struct timeval *vblank_time,
1026 unsigned flags);
1027
1028int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
1029 int *vpos, int *hpos);
956 1030
957#ifdef CONFIG_DEBUG_FS 1031#ifdef CONFIG_DEBUG_FS
958extern void i915_destroy_error_state(struct drm_device *dev); 1032extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1091,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1017 struct drm_file *file_priv); 1091 struct drm_file *file_priv);
1018void i915_gem_load(struct drm_device *dev); 1092void i915_gem_load(struct drm_device *dev);
1019int i915_gem_init_object(struct drm_gem_object *obj); 1093int i915_gem_init_object(struct drm_gem_object *obj);
1020struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, 1094void i915_gem_flush_ring(struct drm_device *dev,
1021 size_t size); 1095 struct intel_ring_buffer *ring,
1096 uint32_t invalidate_domains,
1097 uint32_t flush_domains);
1098struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1099 size_t size);
1022void i915_gem_free_object(struct drm_gem_object *obj); 1100void i915_gem_free_object(struct drm_gem_object *obj);
1023int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 1101int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1024void i915_gem_object_unpin(struct drm_gem_object *obj); 1102 uint32_t alignment,
1025int i915_gem_object_unbind(struct drm_gem_object *obj); 1103 bool map_and_fenceable);
1026void i915_gem_release_mmap(struct drm_gem_object *obj); 1104void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1105int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1106void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1027void i915_gem_lastclose(struct drm_device *dev); 1107void i915_gem_lastclose(struct drm_device *dev);
1028 1108
1109int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1110int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
1111 bool interruptible);
1112void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1113 struct intel_ring_buffer *ring,
1114 u32 seqno);
1115
1029/** 1116/**
1030 * Returns true if seq1 is later than seq2. 1117 * Returns true if seq1 is later than seq2.
1031 */ 1118 */
@@ -1035,73 +1122,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1035 return (int32_t)(seq1 - seq2) >= 0; 1122 return (int32_t)(seq1 - seq2) >= 0;
1036} 1123}
1037 1124
1038int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, 1125static inline u32
1039 bool interruptible); 1126i915_gem_next_request_seqno(struct drm_device *dev,
1040int i915_gem_object_put_fence_reg(struct drm_gem_object *obj, 1127 struct intel_ring_buffer *ring)
1041 bool interruptible); 1128{
1129 drm_i915_private_t *dev_priv = dev->dev_private;
1130 return ring->outstanding_lazy_request = dev_priv->next_seqno;
1131}
1132
1133int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
1134 struct intel_ring_buffer *pipelined,
1135 bool interruptible);
1136int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1137
1042void i915_gem_retire_requests(struct drm_device *dev); 1138void i915_gem_retire_requests(struct drm_device *dev);
1043void i915_gem_reset(struct drm_device *dev); 1139void i915_gem_reset(struct drm_device *dev);
1044void i915_gem_clflush_object(struct drm_gem_object *obj); 1140void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1045int i915_gem_object_set_domain(struct drm_gem_object *obj, 1141int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1046 uint32_t read_domains, 1142 uint32_t read_domains,
1047 uint32_t write_domain); 1143 uint32_t write_domain);
1048int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, 1144int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
1049 bool interruptible); 1145 bool interruptible);
1050int i915_gem_init_ringbuffer(struct drm_device *dev); 1146int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
1051void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1147void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1052int i915_gem_do_init(struct drm_device *dev, unsigned long start, 1148void i915_gem_do_init(struct drm_device *dev,
1053 unsigned long end); 1149 unsigned long start,
1054int i915_gpu_idle(struct drm_device *dev); 1150 unsigned long mappable_end,
1055int i915_gem_idle(struct drm_device *dev); 1151 unsigned long end);
1056uint32_t i915_add_request(struct drm_device *dev, 1152int __must_check i915_gpu_idle(struct drm_device *dev);
1057 struct drm_file *file_priv, 1153int __must_check i915_gem_idle(struct drm_device *dev);
1058 struct drm_i915_gem_request *request, 1154int __must_check i915_add_request(struct drm_device *dev,
1059 struct intel_ring_buffer *ring); 1155 struct drm_file *file_priv,
1060int i915_do_wait_request(struct drm_device *dev, 1156 struct drm_i915_gem_request *request,
1061 uint32_t seqno, 1157 struct intel_ring_buffer *ring);
1062 bool interruptible, 1158int __must_check i915_do_wait_request(struct drm_device *dev,
1063 struct intel_ring_buffer *ring); 1159 uint32_t seqno,
1160 bool interruptible,
1161 struct intel_ring_buffer *ring);
1064int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 1162int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1065int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 1163int __must_check
1066 int write); 1164i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1067int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, 1165 bool write);
1068 bool pipelined); 1166int __must_check
1167i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
1168 struct intel_ring_buffer *pipelined);
1069int i915_gem_attach_phys_object(struct drm_device *dev, 1169int i915_gem_attach_phys_object(struct drm_device *dev,
1070 struct drm_gem_object *obj, 1170 struct drm_i915_gem_object *obj,
1071 int id, 1171 int id,
1072 int align); 1172 int align);
1073void i915_gem_detach_phys_object(struct drm_device *dev, 1173void i915_gem_detach_phys_object(struct drm_device *dev,
1074 struct drm_gem_object *obj); 1174 struct drm_i915_gem_object *obj);
1075void i915_gem_free_all_phys_object(struct drm_device *dev); 1175void i915_gem_free_all_phys_object(struct drm_device *dev);
1076void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv); 1176void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1077 1177
1078void i915_gem_shrinker_init(void); 1178/* i915_gem_gtt.c */
1079void i915_gem_shrinker_exit(void); 1179void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1180int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
1181void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1080 1182
1081/* i915_gem_evict.c */ 1183/* i915_gem_evict.c */
1082int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment); 1184int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1083int i915_gem_evict_everything(struct drm_device *dev); 1185 unsigned alignment, bool mappable);
1084int i915_gem_evict_inactive(struct drm_device *dev); 1186int __must_check i915_gem_evict_everything(struct drm_device *dev,
1187 bool purgeable_only);
1188int __must_check i915_gem_evict_inactive(struct drm_device *dev,
1189 bool purgeable_only);
1085 1190
1086/* i915_gem_tiling.c */ 1191/* i915_gem_tiling.c */
1087void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1192void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1088void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 1193void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1089void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 1194void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1090bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
1091 int tiling_mode);
1092bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
1093 int tiling_mode);
1094 1195
1095/* i915_gem_debug.c */ 1196/* i915_gem_debug.c */
1096void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1197void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1097 const char *where, uint32_t mark); 1198 const char *where, uint32_t mark);
1098#if WATCH_LISTS 1199#if WATCH_LISTS
1099int i915_verify_lists(struct drm_device *dev); 1200int i915_verify_lists(struct drm_device *dev);
1100#else 1201#else
1101#define i915_verify_lists(dev) 0 1202#define i915_verify_lists(dev) 0
1102#endif 1203#endif
1103void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 1204void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1104void i915_gem_dump_object(struct drm_gem_object *obj, int len, 1205 int handle);
1206void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1105 const char *where, uint32_t mark); 1207 const char *where, uint32_t mark);
1106 1208
1107/* i915_debugfs.c */ 1209/* i915_debugfs.c */
@@ -1163,6 +1265,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
1163extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval); 1265extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
1164extern bool intel_fbc_enabled(struct drm_device *dev); 1266extern bool intel_fbc_enabled(struct drm_device *dev);
1165extern bool ironlake_set_drps(struct drm_device *dev, u8 val); 1267extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1268extern void gen6_set_rps(struct drm_device *dev, u8 val);
1166extern void intel_detect_pch (struct drm_device *dev); 1269extern void intel_detect_pch (struct drm_device *dev);
1167extern int intel_trans_dp_port_sel (struct drm_crtc *crtc); 1270extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1168 1271
@@ -1170,79 +1273,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
1170#ifdef CONFIG_DEBUG_FS 1273#ifdef CONFIG_DEBUG_FS
1171extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); 1274extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1172extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error); 1275extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
1276
1277extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1278extern void intel_display_print_error_state(struct seq_file *m,
1279 struct drm_device *dev,
1280 struct intel_display_error_state *error);
1173#endif 1281#endif
1174 1282
1283#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
1284
1285#define BEGIN_LP_RING(n) \
1286 intel_ring_begin(LP_RING(dev_priv), (n))
1287
1288#define OUT_RING(x) \
1289 intel_ring_emit(LP_RING(dev_priv), x)
1290
1291#define ADVANCE_LP_RING() \
1292 intel_ring_advance(LP_RING(dev_priv))
1293
1175/** 1294/**
1176 * Lock test for when it's just for synchronization of ring access. 1295 * Lock test for when it's just for synchronization of ring access.
1177 * 1296 *
1178 * In that case, we don't need to do it when GEM is initialized as nobody else 1297 * In that case, we don't need to do it when GEM is initialized as nobody else
1179 * has access to the ring. 1298 * has access to the ring.
1180 */ 1299 */
1181#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 1300#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
1182 if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \ 1301 if (LP_RING(dev->dev_private)->obj == NULL) \
1183 == NULL) \ 1302 LOCK_TEST_WITH_RETURN(dev, file); \
1184 LOCK_TEST_WITH_RETURN(dev, file_priv); \
1185} while (0) 1303} while (0)
1186 1304
1187static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg) 1305
1306#define __i915_read(x, y) \
1307static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
1308 u##x val = read##y(dev_priv->regs + reg); \
1309 trace_i915_reg_rw('R', reg, val, sizeof(val)); \
1310 return val; \
1311}
1312__i915_read(8, b)
1313__i915_read(16, w)
1314__i915_read(32, l)
1315__i915_read(64, q)
1316#undef __i915_read
1317
1318#define __i915_write(x, y) \
1319static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
1320 trace_i915_reg_rw('W', reg, val, sizeof(val)); \
1321 write##y(val, dev_priv->regs + reg); \
1322}
1323__i915_write(8, b)
1324__i915_write(16, w)
1325__i915_write(32, l)
1326__i915_write(64, q)
1327#undef __i915_write
1328
1329#define I915_READ8(reg) i915_read8(dev_priv, (reg))
1330#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1331
1332#define I915_READ16(reg) i915_read16(dev_priv, (reg))
1333#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1334#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
1335#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
1336
1337#define I915_READ(reg) i915_read32(dev_priv, (reg))
1338#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
1339#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
1340#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
1341
1342#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1343#define I915_READ64(reg) i915_read64(dev_priv, (reg))
1344
1345#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1346#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1347
1348
1349/* On SNB platform, before reading ring registers forcewake bit
1350 * must be set to prevent GT core from power down and stale values being
1351 * returned.
1352 */
1353void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
1354void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
1355static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
1188{ 1356{
1189 u32 val; 1357 u32 val;
1190 1358
1191 val = readl(dev_priv->regs + reg); 1359 if (dev_priv->info->gen >= 6) {
1192 if (dev_priv->debug_flags & I915_DEBUG_READ) 1360 __gen6_force_wake_get(dev_priv);
1193 printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg); 1361 val = I915_READ(reg);
1362 __gen6_force_wake_put(dev_priv);
1363 } else
1364 val = I915_READ(reg);
1365
1194 return val; 1366 return val;
1195} 1367}
1196 1368
1197static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg, 1369static inline void
1198 u32 val) 1370i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1199{ 1371{
1200 writel(val, dev_priv->regs + reg); 1372 /* Trace down the write operation before the real write */
1201 if (dev_priv->debug_flags & I915_DEBUG_WRITE) 1373 trace_i915_reg_rw('W', reg, val, len);
1202 printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg); 1374 switch (len) {
1375 case 8:
1376 writeq(val, dev_priv->regs + reg);
1377 break;
1378 case 4:
1379 writel(val, dev_priv->regs + reg);
1380 break;
1381 case 2:
1382 writew(val, dev_priv->regs + reg);
1383 break;
1384 case 1:
1385 writeb(val, dev_priv->regs + reg);
1386 break;
1387 }
1203} 1388}
1204 1389
1205#define I915_READ(reg) i915_read(dev_priv, (reg))
1206#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
1207#define I915_READ16(reg) readw(dev_priv->regs + (reg))
1208#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
1209#define I915_READ8(reg) readb(dev_priv->regs + (reg))
1210#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
1211#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
1212#define I915_READ64(reg) readq(dev_priv->regs + (reg))
1213#define POSTING_READ(reg) (void)I915_READ(reg)
1214#define POSTING_READ16(reg) (void)I915_READ16(reg)
1215
1216#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
1217 I915_DEBUG_WRITE)
1218#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
1219 I915_DEBUG_WRITE))
1220
1221#define I915_VERBOSE 0
1222
1223#define BEGIN_LP_RING(n) do { \
1224 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1225 if (I915_VERBOSE) \
1226 DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
1227 intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
1228} while (0)
1229
1230
1231#define OUT_RING(x) do { \
1232 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1233 if (I915_VERBOSE) \
1234 DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
1235 intel_ring_emit(dev, &dev_priv__->render_ring, x); \
1236} while (0)
1237
1238#define ADVANCE_LP_RING() do { \
1239 drm_i915_private_t *dev_priv__ = dev->dev_private; \
1240 if (I915_VERBOSE) \
1241 DRM_DEBUG("ADVANCE_LP_RING %x\n", \
1242 dev_priv__->render_ring.tail); \
1243 intel_ring_advance(dev, &dev_priv__->render_ring); \
1244} while(0)
1245
1246/** 1390/**
1247 * Reads a dword out of the status page, which is written to from the command 1391 * Reads a dword out of the status page, which is written to from the command
1248 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 1392 * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1403,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1259 * The area from dword 0x20 to 0x3ff is available for driver usage. 1403 * The area from dword 0x20 to 0x3ff is available for driver usage.
1260 */ 1404 */
1261#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\ 1405#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
1262 (dev_priv->render_ring.status_page.page_addr))[reg]) 1406 (LP_RING(dev_priv)->status_page.page_addr))[reg])
1263#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX) 1407#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
1264#define I915_GEM_HWS_INDEX 0x20 1408#define I915_GEM_HWS_INDEX 0x20
1265#define I915_BREADCRUMB_INDEX 0x21 1409#define I915_BREADCRUMB_INDEX 0x21
1266 1410
1267#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1268
1269#define IS_I830(dev) ((dev)->pci_device == 0x3577)
1270#define IS_845G(dev) ((dev)->pci_device == 0x2562)
1271#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1272#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1273#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1274#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1275#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1276#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1277#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1278#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1279#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1280#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1281#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1282#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1283#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1284#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1285#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1286#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1287#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1288
1289#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1290#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1291#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1292#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1293#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1294
1295#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1296#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1297#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1298
1299#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1300#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1301
1302/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1303 * rows, which changed the alignment requirements and fence programming.
1304 */
1305#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1306 IS_I915GM(dev)))
1307#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1308#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1309#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1310#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1311#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1312#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1313/* dsparb controlled by hw only */
1314#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1315
1316#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1317#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1318#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1319#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
1320
1321#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
1322#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
1323
1324#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1325#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1326#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1327
1328#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1329
1330#endif 1411#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 17b1cba3b5f1..c79c0b62ef60 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,39 +34,31 @@
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/swap.h> 35#include <linux/swap.h>
36#include <linux/pci.h> 36#include <linux/pci.h>
37#include <linux/intel-gtt.h>
38 37
39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
40 39static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, 40static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
42 bool pipelined); 41static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
43static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 42 bool write);
44static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 43static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
46 int write);
47static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
48 uint64_t offset, 44 uint64_t offset,
49 uint64_t size); 45 uint64_t size);
50static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 46static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
51static int i915_gem_object_wait_rendering(struct drm_gem_object *obj, 47static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
52 bool interruptible); 48 unsigned alignment,
53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49 bool map_and_fenceable);
54 unsigned alignment); 50static void i915_gem_clear_fence_reg(struct drm_device *dev,
55static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 51 struct drm_i915_fence_reg *reg);
56static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 52static int i915_gem_phys_pwrite(struct drm_device *dev,
53 struct drm_i915_gem_object *obj,
57 struct drm_i915_gem_pwrite *args, 54 struct drm_i915_gem_pwrite *args,
58 struct drm_file *file_priv); 55 struct drm_file *file);
59static void i915_gem_free_object_tail(struct drm_gem_object *obj); 56static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
60 57
61static int 58static int i915_gem_inactive_shrink(struct shrinker *shrinker,
62i915_gem_object_get_pages(struct drm_gem_object *obj, 59 int nr_to_scan,
63 gfp_t gfpmask); 60 gfp_t gfp_mask);
64 61
65static void
66i915_gem_object_put_pages(struct drm_gem_object *obj);
67
68static LIST_HEAD(shrink_list);
69static DEFINE_SPINLOCK(shrink_list_lock);
70 62
71/* some bookkeeping */ 63/* some bookkeeping */
72static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv, 64static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -83,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
83 dev_priv->mm.object_memory -= size; 75 dev_priv->mm.object_memory -= size;
84} 76}
85 77
86static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
87 size_t size)
88{
89 dev_priv->mm.gtt_count++;
90 dev_priv->mm.gtt_memory += size;
91}
92
93static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
94 size_t size)
95{
96 dev_priv->mm.gtt_count--;
97 dev_priv->mm.gtt_memory -= size;
98}
99
100static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
101 size_t size)
102{
103 dev_priv->mm.pin_count++;
104 dev_priv->mm.pin_memory += size;
105}
106
107static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
108 size_t size)
109{
110 dev_priv->mm.pin_count--;
111 dev_priv->mm.pin_memory -= size;
112}
113
114int 78int
115i915_gem_check_is_wedged(struct drm_device *dev) 79i915_gem_check_is_wedged(struct drm_device *dev)
116{ 80{
@@ -141,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev)
141 return -EIO; 105 return -EIO;
142} 106}
143 107
144static int i915_mutex_lock_interruptible(struct drm_device *dev) 108int i915_mutex_lock_interruptible(struct drm_device *dev)
145{ 109{
146 struct drm_i915_private *dev_priv = dev->dev_private; 110 struct drm_i915_private *dev_priv = dev->dev_private;
147 int ret; 111 int ret;
@@ -164,75 +128,76 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
164} 128}
165 129
166static inline bool 130static inline bool
167i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv) 131i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
168{ 132{
169 return obj_priv->gtt_space && 133 return obj->gtt_space && !obj->active && obj->pin_count == 0;
170 !obj_priv->active &&
171 obj_priv->pin_count == 0;
172} 134}
173 135
174int i915_gem_do_init(struct drm_device *dev, 136void i915_gem_do_init(struct drm_device *dev,
175 unsigned long start, 137 unsigned long start,
176 unsigned long end) 138 unsigned long mappable_end,
139 unsigned long end)
177{ 140{
178 drm_i915_private_t *dev_priv = dev->dev_private; 141 drm_i915_private_t *dev_priv = dev->dev_private;
179 142
180 if (start >= end ||
181 (start & (PAGE_SIZE - 1)) != 0 ||
182 (end & (PAGE_SIZE - 1)) != 0) {
183 return -EINVAL;
184 }
185
186 drm_mm_init(&dev_priv->mm.gtt_space, start, 143 drm_mm_init(&dev_priv->mm.gtt_space, start,
187 end - start); 144 end - start);
188 145
189 dev_priv->mm.gtt_total = end - start; 146 dev_priv->mm.gtt_total = end - start;
190 147 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
191 return 0; 148 dev_priv->mm.gtt_mappable_end = mappable_end;
192} 149}
193 150
194int 151int
195i915_gem_init_ioctl(struct drm_device *dev, void *data, 152i915_gem_init_ioctl(struct drm_device *dev, void *data,
196 struct drm_file *file_priv) 153 struct drm_file *file)
197{ 154{
198 struct drm_i915_gem_init *args = data; 155 struct drm_i915_gem_init *args = data;
199 int ret; 156
157 if (args->gtt_start >= args->gtt_end ||
158 (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
159 return -EINVAL;
200 160
201 mutex_lock(&dev->struct_mutex); 161 mutex_lock(&dev->struct_mutex);
202 ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end); 162 i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
203 mutex_unlock(&dev->struct_mutex); 163 mutex_unlock(&dev->struct_mutex);
204 164
205 return ret; 165 return 0;
206} 166}
207 167
208int 168int
209i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, 169i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
210 struct drm_file *file_priv) 170 struct drm_file *file)
211{ 171{
212 struct drm_i915_private *dev_priv = dev->dev_private; 172 struct drm_i915_private *dev_priv = dev->dev_private;
213 struct drm_i915_gem_get_aperture *args = data; 173 struct drm_i915_gem_get_aperture *args = data;
174 struct drm_i915_gem_object *obj;
175 size_t pinned;
214 176
215 if (!(dev->driver->driver_features & DRIVER_GEM)) 177 if (!(dev->driver->driver_features & DRIVER_GEM))
216 return -ENODEV; 178 return -ENODEV;
217 179
180 pinned = 0;
218 mutex_lock(&dev->struct_mutex); 181 mutex_lock(&dev->struct_mutex);
219 args->aper_size = dev_priv->mm.gtt_total; 182 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
220 args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory; 183 pinned += obj->gtt_space->size;
221 mutex_unlock(&dev->struct_mutex); 184 mutex_unlock(&dev->struct_mutex);
222 185
186 args->aper_size = dev_priv->mm.gtt_total;
187 args->aper_available_size = args->aper_size -pinned;
188
223 return 0; 189 return 0;
224} 190}
225 191
226
227/** 192/**
228 * Creates a new mm object and returns a handle to it. 193 * Creates a new mm object and returns a handle to it.
229 */ 194 */
230int 195int
231i915_gem_create_ioctl(struct drm_device *dev, void *data, 196i915_gem_create_ioctl(struct drm_device *dev, void *data,
232 struct drm_file *file_priv) 197 struct drm_file *file)
233{ 198{
234 struct drm_i915_gem_create *args = data; 199 struct drm_i915_gem_create *args = data;
235 struct drm_gem_object *obj; 200 struct drm_i915_gem_object *obj;
236 int ret; 201 int ret;
237 u32 handle; 202 u32 handle;
238 203
@@ -243,45 +208,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
243 if (obj == NULL) 208 if (obj == NULL)
244 return -ENOMEM; 209 return -ENOMEM;
245 210
246 ret = drm_gem_handle_create(file_priv, obj, &handle); 211 ret = drm_gem_handle_create(file, &obj->base, &handle);
247 if (ret) { 212 if (ret) {
248 drm_gem_object_release(obj); 213 drm_gem_object_release(&obj->base);
249 i915_gem_info_remove_obj(dev->dev_private, obj->size); 214 i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
250 kfree(obj); 215 kfree(obj);
251 return ret; 216 return ret;
252 } 217 }
253 218
254 /* drop reference from allocate - handle holds it now */ 219 /* drop reference from allocate - handle holds it now */
255 drm_gem_object_unreference(obj); 220 drm_gem_object_unreference(&obj->base);
256 trace_i915_gem_object_create(obj); 221 trace_i915_gem_object_create(obj);
257 222
258 args->handle = handle; 223 args->handle = handle;
259 return 0; 224 return 0;
260} 225}
261 226
262static inline int 227static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
263fast_shmem_read(struct page **pages,
264 loff_t page_base, int page_offset,
265 char __user *data,
266 int length)
267{ 228{
268 char *vaddr; 229 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
269 int ret;
270
271 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
272 ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
273 kunmap_atomic(vaddr);
274
275 return ret;
276}
277
278static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
279{
280 drm_i915_private_t *dev_priv = obj->dev->dev_private;
281 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
282 230
283 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && 231 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
284 obj_priv->tiling_mode != I915_TILING_NONE; 232 obj->tiling_mode != I915_TILING_NONE;
285} 233}
286 234
287static inline void 235static inline void
@@ -357,38 +305,51 @@ slow_shmem_bit17_copy(struct page *gpu_page,
357 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow(). 305 * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
358 */ 306 */
359static int 307static int
360i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj, 308i915_gem_shmem_pread_fast(struct drm_device *dev,
309 struct drm_i915_gem_object *obj,
361 struct drm_i915_gem_pread *args, 310 struct drm_i915_gem_pread *args,
362 struct drm_file *file_priv) 311 struct drm_file *file)
363{ 312{
364 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 313 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
365 ssize_t remain; 314 ssize_t remain;
366 loff_t offset, page_base; 315 loff_t offset;
367 char __user *user_data; 316 char __user *user_data;
368 int page_offset, page_length; 317 int page_offset, page_length;
369 318
370 user_data = (char __user *) (uintptr_t) args->data_ptr; 319 user_data = (char __user *) (uintptr_t) args->data_ptr;
371 remain = args->size; 320 remain = args->size;
372 321
373 obj_priv = to_intel_bo(obj);
374 offset = args->offset; 322 offset = args->offset;
375 323
376 while (remain > 0) { 324 while (remain > 0) {
325 struct page *page;
326 char *vaddr;
327 int ret;
328
377 /* Operation in this page 329 /* Operation in this page
378 * 330 *
379 * page_base = page offset within aperture
380 * page_offset = offset within page 331 * page_offset = offset within page
381 * page_length = bytes to copy for this page 332 * page_length = bytes to copy for this page
382 */ 333 */
383 page_base = (offset & ~(PAGE_SIZE-1));
384 page_offset = offset & (PAGE_SIZE-1); 334 page_offset = offset & (PAGE_SIZE-1);
385 page_length = remain; 335 page_length = remain;
386 if ((page_offset + remain) > PAGE_SIZE) 336 if ((page_offset + remain) > PAGE_SIZE)
387 page_length = PAGE_SIZE - page_offset; 337 page_length = PAGE_SIZE - page_offset;
388 338
389 if (fast_shmem_read(obj_priv->pages, 339 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
390 page_base, page_offset, 340 GFP_HIGHUSER | __GFP_RECLAIMABLE);
391 user_data, page_length)) 341 if (IS_ERR(page))
342 return PTR_ERR(page);
343
344 vaddr = kmap_atomic(page);
345 ret = __copy_to_user_inatomic(user_data,
346 vaddr + page_offset,
347 page_length);
348 kunmap_atomic(vaddr);
349
350 mark_page_accessed(page);
351 page_cache_release(page);
352 if (ret)
392 return -EFAULT; 353 return -EFAULT;
393 354
394 remain -= page_length; 355 remain -= page_length;
@@ -399,30 +360,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
399 return 0; 360 return 0;
400} 361}
401 362
402static int
403i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
404{
405 int ret;
406
407 ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
408
409 /* If we've insufficient memory to map in the pages, attempt
410 * to make some space by throwing out some old buffers.
411 */
412 if (ret == -ENOMEM) {
413 struct drm_device *dev = obj->dev;
414
415 ret = i915_gem_evict_something(dev, obj->size,
416 i915_gem_get_gtt_alignment(obj));
417 if (ret)
418 return ret;
419
420 ret = i915_gem_object_get_pages(obj, 0);
421 }
422
423 return ret;
424}
425
426/** 363/**
427 * This is the fallback shmem pread path, which allocates temporary storage 364 * This is the fallback shmem pread path, which allocates temporary storage
428 * in kernel space to copy_to_user into outside of the struct_mutex, so we 365 * in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -430,18 +367,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
430 * and not take page faults. 367 * and not take page faults.
431 */ 368 */
432static int 369static int
433i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj, 370i915_gem_shmem_pread_slow(struct drm_device *dev,
371 struct drm_i915_gem_object *obj,
434 struct drm_i915_gem_pread *args, 372 struct drm_i915_gem_pread *args,
435 struct drm_file *file_priv) 373 struct drm_file *file)
436{ 374{
437 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 375 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
438 struct mm_struct *mm = current->mm; 376 struct mm_struct *mm = current->mm;
439 struct page **user_pages; 377 struct page **user_pages;
440 ssize_t remain; 378 ssize_t remain;
441 loff_t offset, pinned_pages, i; 379 loff_t offset, pinned_pages, i;
442 loff_t first_data_page, last_data_page, num_pages; 380 loff_t first_data_page, last_data_page, num_pages;
443 int shmem_page_index, shmem_page_offset; 381 int shmem_page_offset;
444 int data_page_index, data_page_offset; 382 int data_page_index, data_page_offset;
445 int page_length; 383 int page_length;
446 int ret; 384 int ret;
447 uint64_t data_ptr = args->data_ptr; 385 uint64_t data_ptr = args->data_ptr;
@@ -480,19 +418,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
480 418
481 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 419 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
482 420
483 obj_priv = to_intel_bo(obj);
484 offset = args->offset; 421 offset = args->offset;
485 422
486 while (remain > 0) { 423 while (remain > 0) {
424 struct page *page;
425
487 /* Operation in this page 426 /* Operation in this page
488 * 427 *
489 * shmem_page_index = page number within shmem file
490 * shmem_page_offset = offset within page in shmem file 428 * shmem_page_offset = offset within page in shmem file
491 * data_page_index = page number in get_user_pages return 429 * data_page_index = page number in get_user_pages return
492 * data_page_offset = offset with data_page_index page. 430 * data_page_offset = offset with data_page_index page.
493 * page_length = bytes to copy for this page 431 * page_length = bytes to copy for this page
494 */ 432 */
495 shmem_page_index = offset / PAGE_SIZE;
496 shmem_page_offset = offset & ~PAGE_MASK; 433 shmem_page_offset = offset & ~PAGE_MASK;
497 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 434 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
498 data_page_offset = data_ptr & ~PAGE_MASK; 435 data_page_offset = data_ptr & ~PAGE_MASK;
@@ -503,8 +440,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
503 if ((data_page_offset + page_length) > PAGE_SIZE) 440 if ((data_page_offset + page_length) > PAGE_SIZE)
504 page_length = PAGE_SIZE - data_page_offset; 441 page_length = PAGE_SIZE - data_page_offset;
505 442
443 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
444 GFP_HIGHUSER | __GFP_RECLAIMABLE);
445 if (IS_ERR(page))
446 return PTR_ERR(page);
447
506 if (do_bit17_swizzling) { 448 if (do_bit17_swizzling) {
507 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 449 slow_shmem_bit17_copy(page,
508 shmem_page_offset, 450 shmem_page_offset,
509 user_pages[data_page_index], 451 user_pages[data_page_index],
510 data_page_offset, 452 data_page_offset,
@@ -513,11 +455,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
513 } else { 455 } else {
514 slow_shmem_copy(user_pages[data_page_index], 456 slow_shmem_copy(user_pages[data_page_index],
515 data_page_offset, 457 data_page_offset,
516 obj_priv->pages[shmem_page_index], 458 page,
517 shmem_page_offset, 459 shmem_page_offset,
518 page_length); 460 page_length);
519 } 461 }
520 462
463 mark_page_accessed(page);
464 page_cache_release(page);
465
521 remain -= page_length; 466 remain -= page_length;
522 data_ptr += page_length; 467 data_ptr += page_length;
523 offset += page_length; 468 offset += page_length;
@@ -526,6 +471,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
526out: 471out:
527 for (i = 0; i < pinned_pages; i++) { 472 for (i = 0; i < pinned_pages; i++) {
528 SetPageDirty(user_pages[i]); 473 SetPageDirty(user_pages[i]);
474 mark_page_accessed(user_pages[i]);
529 page_cache_release(user_pages[i]); 475 page_cache_release(user_pages[i]);
530 } 476 }
531 drm_free_large(user_pages); 477 drm_free_large(user_pages);
@@ -540,11 +486,10 @@ out:
540 */ 486 */
541int 487int
542i915_gem_pread_ioctl(struct drm_device *dev, void *data, 488i915_gem_pread_ioctl(struct drm_device *dev, void *data,
543 struct drm_file *file_priv) 489 struct drm_file *file)
544{ 490{
545 struct drm_i915_gem_pread *args = data; 491 struct drm_i915_gem_pread *args = data;
546 struct drm_gem_object *obj; 492 struct drm_i915_gem_object *obj;
547 struct drm_i915_gem_object *obj_priv;
548 int ret = 0; 493 int ret = 0;
549 494
550 if (args->size == 0) 495 if (args->size == 0)
@@ -564,39 +509,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
564 if (ret) 509 if (ret)
565 return ret; 510 return ret;
566 511
567 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 512 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
568 if (obj == NULL) { 513 if (obj == NULL) {
569 ret = -ENOENT; 514 ret = -ENOENT;
570 goto unlock; 515 goto unlock;
571 } 516 }
572 obj_priv = to_intel_bo(obj);
573 517
574 /* Bounds check source. */ 518 /* Bounds check source. */
575 if (args->offset > obj->size || args->size > obj->size - args->offset) { 519 if (args->offset > obj->base.size ||
520 args->size > obj->base.size - args->offset) {
576 ret = -EINVAL; 521 ret = -EINVAL;
577 goto out; 522 goto out;
578 } 523 }
579 524
580 ret = i915_gem_object_get_pages_or_evict(obj);
581 if (ret)
582 goto out;
583
584 ret = i915_gem_object_set_cpu_read_domain_range(obj, 525 ret = i915_gem_object_set_cpu_read_domain_range(obj,
585 args->offset, 526 args->offset,
586 args->size); 527 args->size);
587 if (ret) 528 if (ret)
588 goto out_put; 529 goto out;
589 530
590 ret = -EFAULT; 531 ret = -EFAULT;
591 if (!i915_gem_object_needs_bit17_swizzle(obj)) 532 if (!i915_gem_object_needs_bit17_swizzle(obj))
592 ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv); 533 ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
593 if (ret == -EFAULT) 534 if (ret == -EFAULT)
594 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv); 535 ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
595 536
596out_put:
597 i915_gem_object_put_pages(obj);
598out: 537out:
599 drm_gem_object_unreference(obj); 538 drm_gem_object_unreference(&obj->base);
600unlock: 539unlock:
601 mutex_unlock(&dev->struct_mutex); 540 mutex_unlock(&dev->struct_mutex);
602 return ret; 541 return ret;
@@ -646,32 +585,16 @@ slow_kernel_write(struct io_mapping *mapping,
646 io_mapping_unmap(dst_vaddr); 585 io_mapping_unmap(dst_vaddr);
647} 586}
648 587
649static inline int
650fast_shmem_write(struct page **pages,
651 loff_t page_base, int page_offset,
652 char __user *data,
653 int length)
654{
655 char *vaddr;
656 int ret;
657
658 vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
659 ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
660 kunmap_atomic(vaddr);
661
662 return ret;
663}
664
665/** 588/**
666 * This is the fast pwrite path, where we copy the data directly from the 589 * This is the fast pwrite path, where we copy the data directly from the
667 * user into the GTT, uncached. 590 * user into the GTT, uncached.
668 */ 591 */
669static int 592static int
670i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, 593i915_gem_gtt_pwrite_fast(struct drm_device *dev,
594 struct drm_i915_gem_object *obj,
671 struct drm_i915_gem_pwrite *args, 595 struct drm_i915_gem_pwrite *args,
672 struct drm_file *file_priv) 596 struct drm_file *file)
673{ 597{
674 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
675 drm_i915_private_t *dev_priv = dev->dev_private; 598 drm_i915_private_t *dev_priv = dev->dev_private;
676 ssize_t remain; 599 ssize_t remain;
677 loff_t offset, page_base; 600 loff_t offset, page_base;
@@ -681,8 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
681 user_data = (char __user *) (uintptr_t) args->data_ptr; 604 user_data = (char __user *) (uintptr_t) args->data_ptr;
682 remain = args->size; 605 remain = args->size;
683 606
684 obj_priv = to_intel_bo(obj); 607 offset = obj->gtt_offset + args->offset;
685 offset = obj_priv->gtt_offset + args->offset;
686 608
687 while (remain > 0) { 609 while (remain > 0) {
688 /* Operation in this page 610 /* Operation in this page
@@ -722,11 +644,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
722 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit). 644 * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
723 */ 645 */
724static int 646static int
725i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, 647i915_gem_gtt_pwrite_slow(struct drm_device *dev,
648 struct drm_i915_gem_object *obj,
726 struct drm_i915_gem_pwrite *args, 649 struct drm_i915_gem_pwrite *args,
727 struct drm_file *file_priv) 650 struct drm_file *file)
728{ 651{
729 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
730 drm_i915_private_t *dev_priv = dev->dev_private; 652 drm_i915_private_t *dev_priv = dev->dev_private;
731 ssize_t remain; 653 ssize_t remain;
732 loff_t gtt_page_base, offset; 654 loff_t gtt_page_base, offset;
@@ -763,12 +685,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
763 goto out_unpin_pages; 685 goto out_unpin_pages;
764 } 686 }
765 687
766 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 688 ret = i915_gem_object_set_to_gtt_domain(obj, true);
689 if (ret)
690 goto out_unpin_pages;
691
692 ret = i915_gem_object_put_fence(obj);
767 if (ret) 693 if (ret)
768 goto out_unpin_pages; 694 goto out_unpin_pages;
769 695
770 obj_priv = to_intel_bo(obj); 696 offset = obj->gtt_offset + args->offset;
771 offset = obj_priv->gtt_offset + args->offset;
772 697
773 while (remain > 0) { 698 while (remain > 0) {
774 /* Operation in this page 699 /* Operation in this page
@@ -814,39 +739,58 @@ out_unpin_pages:
814 * copy_from_user into the kmapped pages backing the object. 739 * copy_from_user into the kmapped pages backing the object.
815 */ 740 */
816static int 741static int
817i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj, 742i915_gem_shmem_pwrite_fast(struct drm_device *dev,
743 struct drm_i915_gem_object *obj,
818 struct drm_i915_gem_pwrite *args, 744 struct drm_i915_gem_pwrite *args,
819 struct drm_file *file_priv) 745 struct drm_file *file)
820{ 746{
821 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 747 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
822 ssize_t remain; 748 ssize_t remain;
823 loff_t offset, page_base; 749 loff_t offset;
824 char __user *user_data; 750 char __user *user_data;
825 int page_offset, page_length; 751 int page_offset, page_length;
826 752
827 user_data = (char __user *) (uintptr_t) args->data_ptr; 753 user_data = (char __user *) (uintptr_t) args->data_ptr;
828 remain = args->size; 754 remain = args->size;
829 755
830 obj_priv = to_intel_bo(obj);
831 offset = args->offset; 756 offset = args->offset;
832 obj_priv->dirty = 1; 757 obj->dirty = 1;
833 758
834 while (remain > 0) { 759 while (remain > 0) {
760 struct page *page;
761 char *vaddr;
762 int ret;
763
835 /* Operation in this page 764 /* Operation in this page
836 * 765 *
837 * page_base = page offset within aperture
838 * page_offset = offset within page 766 * page_offset = offset within page
839 * page_length = bytes to copy for this page 767 * page_length = bytes to copy for this page
840 */ 768 */
841 page_base = (offset & ~(PAGE_SIZE-1));
842 page_offset = offset & (PAGE_SIZE-1); 769 page_offset = offset & (PAGE_SIZE-1);
843 page_length = remain; 770 page_length = remain;
844 if ((page_offset + remain) > PAGE_SIZE) 771 if ((page_offset + remain) > PAGE_SIZE)
845 page_length = PAGE_SIZE - page_offset; 772 page_length = PAGE_SIZE - page_offset;
846 773
847 if (fast_shmem_write(obj_priv->pages, 774 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
848 page_base, page_offset, 775 GFP_HIGHUSER | __GFP_RECLAIMABLE);
849 user_data, page_length)) 776 if (IS_ERR(page))
777 return PTR_ERR(page);
778
779 vaddr = kmap_atomic(page, KM_USER0);
780 ret = __copy_from_user_inatomic(vaddr + page_offset,
781 user_data,
782 page_length);
783 kunmap_atomic(vaddr, KM_USER0);
784
785 set_page_dirty(page);
786 mark_page_accessed(page);
787 page_cache_release(page);
788
789 /* If we get a fault while copying data, then (presumably) our
790 * source page isn't available. Return the error and we'll
791 * retry in the slow path.
792 */
793 if (ret)
850 return -EFAULT; 794 return -EFAULT;
851 795
852 remain -= page_length; 796 remain -= page_length;
@@ -865,17 +809,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
865 * struct_mutex is held. 809 * struct_mutex is held.
866 */ 810 */
867static int 811static int
868i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj, 812i915_gem_shmem_pwrite_slow(struct drm_device *dev,
813 struct drm_i915_gem_object *obj,
869 struct drm_i915_gem_pwrite *args, 814 struct drm_i915_gem_pwrite *args,
870 struct drm_file *file_priv) 815 struct drm_file *file)
871{ 816{
872 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 817 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
873 struct mm_struct *mm = current->mm; 818 struct mm_struct *mm = current->mm;
874 struct page **user_pages; 819 struct page **user_pages;
875 ssize_t remain; 820 ssize_t remain;
876 loff_t offset, pinned_pages, i; 821 loff_t offset, pinned_pages, i;
877 loff_t first_data_page, last_data_page, num_pages; 822 loff_t first_data_page, last_data_page, num_pages;
878 int shmem_page_index, shmem_page_offset; 823 int shmem_page_offset;
879 int data_page_index, data_page_offset; 824 int data_page_index, data_page_offset;
880 int page_length; 825 int page_length;
881 int ret; 826 int ret;
@@ -913,20 +858,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
913 858
914 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj); 859 do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
915 860
916 obj_priv = to_intel_bo(obj);
917 offset = args->offset; 861 offset = args->offset;
918 obj_priv->dirty = 1; 862 obj->dirty = 1;
919 863
920 while (remain > 0) { 864 while (remain > 0) {
865 struct page *page;
866
921 /* Operation in this page 867 /* Operation in this page
922 * 868 *
923 * shmem_page_index = page number within shmem file
924 * shmem_page_offset = offset within page in shmem file 869 * shmem_page_offset = offset within page in shmem file
925 * data_page_index = page number in get_user_pages return 870 * data_page_index = page number in get_user_pages return
926 * data_page_offset = offset with data_page_index page. 871 * data_page_offset = offset with data_page_index page.
927 * page_length = bytes to copy for this page 872 * page_length = bytes to copy for this page
928 */ 873 */
929 shmem_page_index = offset / PAGE_SIZE;
930 shmem_page_offset = offset & ~PAGE_MASK; 874 shmem_page_offset = offset & ~PAGE_MASK;
931 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 875 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
932 data_page_offset = data_ptr & ~PAGE_MASK; 876 data_page_offset = data_ptr & ~PAGE_MASK;
@@ -937,21 +881,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
937 if ((data_page_offset + page_length) > PAGE_SIZE) 881 if ((data_page_offset + page_length) > PAGE_SIZE)
938 page_length = PAGE_SIZE - data_page_offset; 882 page_length = PAGE_SIZE - data_page_offset;
939 883
884 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
885 GFP_HIGHUSER | __GFP_RECLAIMABLE);
886 if (IS_ERR(page)) {
887 ret = PTR_ERR(page);
888 goto out;
889 }
890
940 if (do_bit17_swizzling) { 891 if (do_bit17_swizzling) {
941 slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index], 892 slow_shmem_bit17_copy(page,
942 shmem_page_offset, 893 shmem_page_offset,
943 user_pages[data_page_index], 894 user_pages[data_page_index],
944 data_page_offset, 895 data_page_offset,
945 page_length, 896 page_length,
946 0); 897 0);
947 } else { 898 } else {
948 slow_shmem_copy(obj_priv->pages[shmem_page_index], 899 slow_shmem_copy(page,
949 shmem_page_offset, 900 shmem_page_offset,
950 user_pages[data_page_index], 901 user_pages[data_page_index],
951 data_page_offset, 902 data_page_offset,
952 page_length); 903 page_length);
953 } 904 }
954 905
906 set_page_dirty(page);
907 mark_page_accessed(page);
908 page_cache_release(page);
909
955 remain -= page_length; 910 remain -= page_length;
956 data_ptr += page_length; 911 data_ptr += page_length;
957 offset += page_length; 912 offset += page_length;
@@ -975,8 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
975 struct drm_file *file) 930 struct drm_file *file)
976{ 931{
977 struct drm_i915_gem_pwrite *args = data; 932 struct drm_i915_gem_pwrite *args = data;
978 struct drm_gem_object *obj; 933 struct drm_i915_gem_object *obj;
979 struct drm_i915_gem_object *obj_priv;
980 int ret; 934 int ret;
981 935
982 if (args->size == 0) 936 if (args->size == 0)
@@ -996,15 +950,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
996 if (ret) 950 if (ret)
997 return ret; 951 return ret;
998 952
999 obj = drm_gem_object_lookup(dev, file, args->handle); 953 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1000 if (obj == NULL) { 954 if (obj == NULL) {
1001 ret = -ENOENT; 955 ret = -ENOENT;
1002 goto unlock; 956 goto unlock;
1003 } 957 }
1004 obj_priv = to_intel_bo(obj);
1005 958
1006 /* Bounds check destination. */ 959 /* Bounds check destination. */
1007 if (args->offset > obj->size || args->size > obj->size - args->offset) { 960 if (args->offset > obj->base.size ||
961 args->size > obj->base.size - args->offset) {
1008 ret = -EINVAL; 962 ret = -EINVAL;
1009 goto out; 963 goto out;
1010 } 964 }
@@ -1015,16 +969,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1015 * pread/pwrite currently are reading and writing from the CPU 969 * pread/pwrite currently are reading and writing from the CPU
1016 * perspective, requiring manual detiling by the client. 970 * perspective, requiring manual detiling by the client.
1017 */ 971 */
1018 if (obj_priv->phys_obj) 972 if (obj->phys_obj)
1019 ret = i915_gem_phys_pwrite(dev, obj, args, file); 973 ret = i915_gem_phys_pwrite(dev, obj, args, file);
1020 else if (obj_priv->tiling_mode == I915_TILING_NONE && 974 else if (obj->gtt_space &&
1021 obj_priv->gtt_space && 975 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
1022 obj->write_domain != I915_GEM_DOMAIN_CPU) { 976 ret = i915_gem_object_pin(obj, 0, true);
1023 ret = i915_gem_object_pin(obj, 0);
1024 if (ret) 977 if (ret)
1025 goto out; 978 goto out;
1026 979
1027 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 980 ret = i915_gem_object_set_to_gtt_domain(obj, true);
981 if (ret)
982 goto out_unpin;
983
984 ret = i915_gem_object_put_fence(obj);
1028 if (ret) 985 if (ret)
1029 goto out_unpin; 986 goto out_unpin;
1030 987
@@ -1035,26 +992,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1035out_unpin: 992out_unpin:
1036 i915_gem_object_unpin(obj); 993 i915_gem_object_unpin(obj);
1037 } else { 994 } else {
1038 ret = i915_gem_object_get_pages_or_evict(obj);
1039 if (ret)
1040 goto out;
1041
1042 ret = i915_gem_object_set_to_cpu_domain(obj, 1); 995 ret = i915_gem_object_set_to_cpu_domain(obj, 1);
1043 if (ret) 996 if (ret)
1044 goto out_put; 997 goto out;
1045 998
1046 ret = -EFAULT; 999 ret = -EFAULT;
1047 if (!i915_gem_object_needs_bit17_swizzle(obj)) 1000 if (!i915_gem_object_needs_bit17_swizzle(obj))
1048 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file); 1001 ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
1049 if (ret == -EFAULT) 1002 if (ret == -EFAULT)
1050 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file); 1003 ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
1051
1052out_put:
1053 i915_gem_object_put_pages(obj);
1054 } 1004 }
1055 1005
1056out: 1006out:
1057 drm_gem_object_unreference(obj); 1007 drm_gem_object_unreference(&obj->base);
1058unlock: 1008unlock:
1059 mutex_unlock(&dev->struct_mutex); 1009 mutex_unlock(&dev->struct_mutex);
1060 return ret; 1010 return ret;
@@ -1066,12 +1016,10 @@ unlock:
1066 */ 1016 */
1067int 1017int
1068i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 1018i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1069 struct drm_file *file_priv) 1019 struct drm_file *file)
1070{ 1020{
1071 struct drm_i915_private *dev_priv = dev->dev_private;
1072 struct drm_i915_gem_set_domain *args = data; 1021 struct drm_i915_gem_set_domain *args = data;
1073 struct drm_gem_object *obj; 1022 struct drm_i915_gem_object *obj;
1074 struct drm_i915_gem_object *obj_priv;
1075 uint32_t read_domains = args->read_domains; 1023 uint32_t read_domains = args->read_domains;
1076 uint32_t write_domain = args->write_domain; 1024 uint32_t write_domain = args->write_domain;
1077 int ret; 1025 int ret;
@@ -1096,28 +1044,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1096 if (ret) 1044 if (ret)
1097 return ret; 1045 return ret;
1098 1046
1099 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1047 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1100 if (obj == NULL) { 1048 if (obj == NULL) {
1101 ret = -ENOENT; 1049 ret = -ENOENT;
1102 goto unlock; 1050 goto unlock;
1103 } 1051 }
1104 obj_priv = to_intel_bo(obj);
1105
1106 intel_mark_busy(dev, obj);
1107 1052
1108 if (read_domains & I915_GEM_DOMAIN_GTT) { 1053 if (read_domains & I915_GEM_DOMAIN_GTT) {
1109 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 1054 ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1110 1055
1111 /* Update the LRU on the fence for the CPU access that's
1112 * about to occur.
1113 */
1114 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1115 struct drm_i915_fence_reg *reg =
1116 &dev_priv->fence_regs[obj_priv->fence_reg];
1117 list_move_tail(&reg->lru_list,
1118 &dev_priv->mm.fence_list);
1119 }
1120
1121 /* Silently promote "you're not bound, there was nothing to do" 1056 /* Silently promote "you're not bound, there was nothing to do"
1122 * to success, since the client was just asking us to 1057 * to success, since the client was just asking us to
1123 * make sure everything was done. 1058 * make sure everything was done.
@@ -1128,11 +1063,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1128 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 1063 ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1129 } 1064 }
1130 1065
1131 /* Maintain LRU order of "inactive" objects */ 1066 drm_gem_object_unreference(&obj->base);
1132 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1133 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1134
1135 drm_gem_object_unreference(obj);
1136unlock: 1067unlock:
1137 mutex_unlock(&dev->struct_mutex); 1068 mutex_unlock(&dev->struct_mutex);
1138 return ret; 1069 return ret;
@@ -1143,10 +1074,10 @@ unlock:
1143 */ 1074 */
1144int 1075int
1145i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 1076i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1146 struct drm_file *file_priv) 1077 struct drm_file *file)
1147{ 1078{
1148 struct drm_i915_gem_sw_finish *args = data; 1079 struct drm_i915_gem_sw_finish *args = data;
1149 struct drm_gem_object *obj; 1080 struct drm_i915_gem_object *obj;
1150 int ret = 0; 1081 int ret = 0;
1151 1082
1152 if (!(dev->driver->driver_features & DRIVER_GEM)) 1083 if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1156,17 +1087,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1156 if (ret) 1087 if (ret)
1157 return ret; 1088 return ret;
1158 1089
1159 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1090 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1160 if (obj == NULL) { 1091 if (obj == NULL) {
1161 ret = -ENOENT; 1092 ret = -ENOENT;
1162 goto unlock; 1093 goto unlock;
1163 } 1094 }
1164 1095
1165 /* Pinned buffers may be scanout, so flush the cache */ 1096 /* Pinned buffers may be scanout, so flush the cache */
1166 if (to_intel_bo(obj)->pin_count) 1097 if (obj->pin_count)
1167 i915_gem_object_flush_cpu_write_domain(obj); 1098 i915_gem_object_flush_cpu_write_domain(obj);
1168 1099
1169 drm_gem_object_unreference(obj); 1100 drm_gem_object_unreference(&obj->base);
1170unlock: 1101unlock:
1171 mutex_unlock(&dev->struct_mutex); 1102 mutex_unlock(&dev->struct_mutex);
1172 return ret; 1103 return ret;
@@ -1181,8 +1112,9 @@ unlock:
1181 */ 1112 */
1182int 1113int
1183i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 1114i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1184 struct drm_file *file_priv) 1115 struct drm_file *file)
1185{ 1116{
1117 struct drm_i915_private *dev_priv = dev->dev_private;
1186 struct drm_i915_gem_mmap *args = data; 1118 struct drm_i915_gem_mmap *args = data;
1187 struct drm_gem_object *obj; 1119 struct drm_gem_object *obj;
1188 loff_t offset; 1120 loff_t offset;
@@ -1191,10 +1123,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1191 if (!(dev->driver->driver_features & DRIVER_GEM)) 1123 if (!(dev->driver->driver_features & DRIVER_GEM))
1192 return -ENODEV; 1124 return -ENODEV;
1193 1125
1194 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1126 obj = drm_gem_object_lookup(dev, file, args->handle);
1195 if (obj == NULL) 1127 if (obj == NULL)
1196 return -ENOENT; 1128 return -ENOENT;
1197 1129
1130 if (obj->size > dev_priv->mm.gtt_mappable_end) {
1131 drm_gem_object_unreference_unlocked(obj);
1132 return -E2BIG;
1133 }
1134
1198 offset = args->offset; 1135 offset = args->offset;
1199 1136
1200 down_write(&current->mm->mmap_sem); 1137 down_write(&current->mm->mmap_sem);
@@ -1229,10 +1166,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1229 */ 1166 */
1230int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1167int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1231{ 1168{
1232 struct drm_gem_object *obj = vma->vm_private_data; 1169 struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
1233 struct drm_device *dev = obj->dev; 1170 struct drm_device *dev = obj->base.dev;
1234 drm_i915_private_t *dev_priv = dev->dev_private; 1171 drm_i915_private_t *dev_priv = dev->dev_private;
1235 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1236 pgoff_t page_offset; 1172 pgoff_t page_offset;
1237 unsigned long pfn; 1173 unsigned long pfn;
1238 int ret = 0; 1174 int ret = 0;
@@ -1244,27 +1180,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1244 1180
1245 /* Now bind it into the GTT if needed */ 1181 /* Now bind it into the GTT if needed */
1246 mutex_lock(&dev->struct_mutex); 1182 mutex_lock(&dev->struct_mutex);
1247 if (!obj_priv->gtt_space) {
1248 ret = i915_gem_object_bind_to_gtt(obj, 0);
1249 if (ret)
1250 goto unlock;
1251 1183
1252 ret = i915_gem_object_set_to_gtt_domain(obj, write); 1184 if (!obj->map_and_fenceable) {
1185 ret = i915_gem_object_unbind(obj);
1253 if (ret) 1186 if (ret)
1254 goto unlock; 1187 goto unlock;
1255 } 1188 }
1256 1189 if (!obj->gtt_space) {
1257 /* Need a new fence register? */ 1190 ret = i915_gem_object_bind_to_gtt(obj, 0, true);
1258 if (obj_priv->tiling_mode != I915_TILING_NONE) {
1259 ret = i915_gem_object_get_fence_reg(obj, true);
1260 if (ret) 1191 if (ret)
1261 goto unlock; 1192 goto unlock;
1262 } 1193 }
1263 1194
1264 if (i915_gem_object_is_inactive(obj_priv)) 1195 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1265 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); 1196 if (ret)
1197 goto unlock;
1198
1199 if (obj->tiling_mode == I915_TILING_NONE)
1200 ret = i915_gem_object_put_fence(obj);
1201 else
1202 ret = i915_gem_object_get_fence(obj, NULL, true);
1203 if (ret)
1204 goto unlock;
1205
1206 if (i915_gem_object_is_inactive(obj))
1207 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1208
1209 obj->fault_mappable = true;
1266 1210
1267 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1211 pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
1268 page_offset; 1212 page_offset;
1269 1213
1270 /* Finally, remap it using the new GTT offset */ 1214 /* Finally, remap it using the new GTT offset */
@@ -1273,11 +1217,12 @@ unlock:
1273 mutex_unlock(&dev->struct_mutex); 1217 mutex_unlock(&dev->struct_mutex);
1274 1218
1275 switch (ret) { 1219 switch (ret) {
1220 case -EAGAIN:
1221 set_need_resched();
1276 case 0: 1222 case 0:
1277 case -ERESTARTSYS: 1223 case -ERESTARTSYS:
1278 return VM_FAULT_NOPAGE; 1224 return VM_FAULT_NOPAGE;
1279 case -ENOMEM: 1225 case -ENOMEM:
1280 case -EAGAIN:
1281 return VM_FAULT_OOM; 1226 return VM_FAULT_OOM;
1282 default: 1227 default:
1283 return VM_FAULT_SIGBUS; 1228 return VM_FAULT_SIGBUS;
@@ -1296,37 +1241,39 @@ unlock:
1296 * This routine allocates and attaches a fake offset for @obj. 1241 * This routine allocates and attaches a fake offset for @obj.
1297 */ 1242 */
1298static int 1243static int
1299i915_gem_create_mmap_offset(struct drm_gem_object *obj) 1244i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
1300{ 1245{
1301 struct drm_device *dev = obj->dev; 1246 struct drm_device *dev = obj->base.dev;
1302 struct drm_gem_mm *mm = dev->mm_private; 1247 struct drm_gem_mm *mm = dev->mm_private;
1303 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1304 struct drm_map_list *list; 1248 struct drm_map_list *list;
1305 struct drm_local_map *map; 1249 struct drm_local_map *map;
1306 int ret = 0; 1250 int ret = 0;
1307 1251
1308 /* Set the object up for mmap'ing */ 1252 /* Set the object up for mmap'ing */
1309 list = &obj->map_list; 1253 list = &obj->base.map_list;
1310 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL); 1254 list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
1311 if (!list->map) 1255 if (!list->map)
1312 return -ENOMEM; 1256 return -ENOMEM;
1313 1257
1314 map = list->map; 1258 map = list->map;
1315 map->type = _DRM_GEM; 1259 map->type = _DRM_GEM;
1316 map->size = obj->size; 1260 map->size = obj->base.size;
1317 map->handle = obj; 1261 map->handle = obj;
1318 1262
1319 /* Get a DRM GEM mmap offset allocated... */ 1263 /* Get a DRM GEM mmap offset allocated... */
1320 list->file_offset_node = drm_mm_search_free(&mm->offset_manager, 1264 list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
1321 obj->size / PAGE_SIZE, 0, 0); 1265 obj->base.size / PAGE_SIZE,
1266 0, 0);
1322 if (!list->file_offset_node) { 1267 if (!list->file_offset_node) {
1323 DRM_ERROR("failed to allocate offset for bo %d\n", obj->name); 1268 DRM_ERROR("failed to allocate offset for bo %d\n",
1269 obj->base.name);
1324 ret = -ENOSPC; 1270 ret = -ENOSPC;
1325 goto out_free_list; 1271 goto out_free_list;
1326 } 1272 }
1327 1273
1328 list->file_offset_node = drm_mm_get_block(list->file_offset_node, 1274 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
1329 obj->size / PAGE_SIZE, 0); 1275 obj->base.size / PAGE_SIZE,
1276 0);
1330 if (!list->file_offset_node) { 1277 if (!list->file_offset_node) {
1331 ret = -ENOMEM; 1278 ret = -ENOMEM;
1332 goto out_free_list; 1279 goto out_free_list;
@@ -1339,16 +1286,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
1339 goto out_free_mm; 1286 goto out_free_mm;
1340 } 1287 }
1341 1288
1342 /* By now we should be all set, any drm_mmap request on the offset
1343 * below will get to our mmap & fault handler */
1344 obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
1345
1346 return 0; 1289 return 0;
1347 1290
1348out_free_mm: 1291out_free_mm:
1349 drm_mm_put_block(list->file_offset_node); 1292 drm_mm_put_block(list->file_offset_node);
1350out_free_list: 1293out_free_list:
1351 kfree(list->map); 1294 kfree(list->map);
1295 list->map = NULL;
1352 1296
1353 return ret; 1297 return ret;
1354} 1298}
@@ -1368,38 +1312,51 @@ out_free_list:
1368 * fixup by i915_gem_fault(). 1312 * fixup by i915_gem_fault().
1369 */ 1313 */
1370void 1314void
1371i915_gem_release_mmap(struct drm_gem_object *obj) 1315i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1372{ 1316{
1373 struct drm_device *dev = obj->dev; 1317 if (!obj->fault_mappable)
1374 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1318 return;
1319
1320 unmap_mapping_range(obj->base.dev->dev_mapping,
1321 (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
1322 obj->base.size, 1);
1375 1323
1376 if (dev->dev_mapping) 1324 obj->fault_mappable = false;
1377 unmap_mapping_range(dev->dev_mapping,
1378 obj_priv->mmap_offset, obj->size, 1);
1379} 1325}
1380 1326
1381static void 1327static void
1382i915_gem_free_mmap_offset(struct drm_gem_object *obj) 1328i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
1383{ 1329{
1384 struct drm_device *dev = obj->dev; 1330 struct drm_device *dev = obj->base.dev;
1385 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1386 struct drm_gem_mm *mm = dev->mm_private; 1331 struct drm_gem_mm *mm = dev->mm_private;
1387 struct drm_map_list *list; 1332 struct drm_map_list *list = &obj->base.map_list;
1388 1333
1389 list = &obj->map_list;
1390 drm_ht_remove_item(&mm->offset_hash, &list->hash); 1334 drm_ht_remove_item(&mm->offset_hash, &list->hash);
1335 drm_mm_put_block(list->file_offset_node);
1336 kfree(list->map);
1337 list->map = NULL;
1338}
1391 1339
1392 if (list->file_offset_node) { 1340static uint32_t
1393 drm_mm_put_block(list->file_offset_node); 1341i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
1394 list->file_offset_node = NULL; 1342{
1395 } 1343 struct drm_device *dev = obj->base.dev;
1344 uint32_t size;
1396 1345
1397 if (list->map) { 1346 if (INTEL_INFO(dev)->gen >= 4 ||
1398 kfree(list->map); 1347 obj->tiling_mode == I915_TILING_NONE)
1399 list->map = NULL; 1348 return obj->base.size;
1400 }
1401 1349
1402 obj_priv->mmap_offset = 0; 1350 /* Previous chips need a power-of-two fence region when tiling */
1351 if (INTEL_INFO(dev)->gen == 3)
1352 size = 1024*1024;
1353 else
1354 size = 512*1024;
1355
1356 while (size < obj->base.size)
1357 size <<= 1;
1358
1359 return size;
1403} 1360}
1404 1361
1405/** 1362/**
@@ -1407,42 +1364,68 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
1407 * @obj: object to check 1364 * @obj: object to check
1408 * 1365 *
1409 * Return the required GTT alignment for an object, taking into account 1366 * Return the required GTT alignment for an object, taking into account
1410 * potential fence register mapping if needed. 1367 * potential fence register mapping.
1411 */ 1368 */
1412static uint32_t 1369static uint32_t
1413i915_gem_get_gtt_alignment(struct drm_gem_object *obj) 1370i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1414{ 1371{
1415 struct drm_device *dev = obj->dev; 1372 struct drm_device *dev = obj->base.dev;
1416 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1417 int start, i;
1418 1373
1419 /* 1374 /*
1420 * Minimum alignment is 4k (GTT page size), but might be greater 1375 * Minimum alignment is 4k (GTT page size), but might be greater
1421 * if a fence register is needed for the object. 1376 * if a fence register is needed for the object.
1422 */ 1377 */
1423 if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE) 1378 if (INTEL_INFO(dev)->gen >= 4 ||
1379 obj->tiling_mode == I915_TILING_NONE)
1424 return 4096; 1380 return 4096;
1425 1381
1426 /* 1382 /*
1427 * Previous chips need to be aligned to the size of the smallest 1383 * Previous chips need to be aligned to the size of the smallest
1428 * fence register that can contain the object. 1384 * fence register that can contain the object.
1429 */ 1385 */
1430 if (INTEL_INFO(dev)->gen == 3) 1386 return i915_gem_get_gtt_size(obj);
1431 start = 1024*1024; 1387}
1432 else
1433 start = 512*1024;
1434 1388
1435 for (i = start; i < obj->size; i <<= 1) 1389/**
1436 ; 1390 * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
1391 * unfenced object
1392 * @obj: object to check
1393 *
1394 * Return the required GTT alignment for an object, only taking into account
1395 * unfenced tiled surface requirements.
1396 */
1397static uint32_t
1398i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1399{
1400 struct drm_device *dev = obj->base.dev;
1401 int tile_height;
1402
1403 /*
1404 * Minimum alignment is 4k (GTT page size) for sane hw.
1405 */
1406 if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
1407 obj->tiling_mode == I915_TILING_NONE)
1408 return 4096;
1409
1410 /*
1411 * Older chips need unfenced tiled buffers to be aligned to the left
1412 * edge of an even tile row (where tile rows are counted as if the bo is
1413 * placed in a fenced gtt region).
1414 */
1415 if (IS_GEN2(dev) ||
1416 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
1417 tile_height = 32;
1418 else
1419 tile_height = 8;
1437 1420
1438 return i; 1421 return tile_height * obj->stride * 2;
1439} 1422}
1440 1423
1441/** 1424/**
1442 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1425 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
1443 * @dev: DRM device 1426 * @dev: DRM device
1444 * @data: GTT mapping ioctl data 1427 * @data: GTT mapping ioctl data
1445 * @file_priv: GEM object info 1428 * @file: GEM object info
1446 * 1429 *
1447 * Simply returns the fake offset to userspace so it can mmap it. 1430 * Simply returns the fake offset to userspace so it can mmap it.
1448 * The mmap call will end up in drm_gem_mmap(), which will set things 1431 * The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1455,11 +1438,11 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
1455 */ 1438 */
1456int 1439int
1457i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data, 1440i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1458 struct drm_file *file_priv) 1441 struct drm_file *file)
1459{ 1442{
1443 struct drm_i915_private *dev_priv = dev->dev_private;
1460 struct drm_i915_gem_mmap_gtt *args = data; 1444 struct drm_i915_gem_mmap_gtt *args = data;
1461 struct drm_gem_object *obj; 1445 struct drm_i915_gem_object *obj;
1462 struct drm_i915_gem_object *obj_priv;
1463 int ret; 1446 int ret;
1464 1447
1465 if (!(dev->driver->driver_features & DRIVER_GEM)) 1448 if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1469,130 +1452,196 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1469 if (ret) 1452 if (ret)
1470 return ret; 1453 return ret;
1471 1454
1472 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 1455 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
1473 if (obj == NULL) { 1456 if (obj == NULL) {
1474 ret = -ENOENT; 1457 ret = -ENOENT;
1475 goto unlock; 1458 goto unlock;
1476 } 1459 }
1477 obj_priv = to_intel_bo(obj);
1478 1460
1479 if (obj_priv->madv != I915_MADV_WILLNEED) { 1461 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1462 ret = -E2BIG;
1463 goto unlock;
1464 }
1465
1466 if (obj->madv != I915_MADV_WILLNEED) {
1480 DRM_ERROR("Attempting to mmap a purgeable buffer\n"); 1467 DRM_ERROR("Attempting to mmap a purgeable buffer\n");
1481 ret = -EINVAL; 1468 ret = -EINVAL;
1482 goto out; 1469 goto out;
1483 } 1470 }
1484 1471
1485 if (!obj_priv->mmap_offset) { 1472 if (!obj->base.map_list.map) {
1486 ret = i915_gem_create_mmap_offset(obj); 1473 ret = i915_gem_create_mmap_offset(obj);
1487 if (ret) 1474 if (ret)
1488 goto out; 1475 goto out;
1489 } 1476 }
1490 1477
1491 args->offset = obj_priv->mmap_offset; 1478 args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
1492
1493 /*
1494 * Pull it into the GTT so that we have a page list (makes the
1495 * initial fault faster and any subsequent flushing possible).
1496 */
1497 if (!obj_priv->agp_mem) {
1498 ret = i915_gem_object_bind_to_gtt(obj, 0);
1499 if (ret)
1500 goto out;
1501 }
1502 1479
1503out: 1480out:
1504 drm_gem_object_unreference(obj); 1481 drm_gem_object_unreference(&obj->base);
1505unlock: 1482unlock:
1506 mutex_unlock(&dev->struct_mutex); 1483 mutex_unlock(&dev->struct_mutex);
1507 return ret; 1484 return ret;
1508} 1485}
1509 1486
1487static int
1488i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
1489 gfp_t gfpmask)
1490{
1491 int page_count, i;
1492 struct address_space *mapping;
1493 struct inode *inode;
1494 struct page *page;
1495
1496 /* Get the list of pages out of our struct file. They'll be pinned
1497 * at this point until we release them.
1498 */
1499 page_count = obj->base.size / PAGE_SIZE;
1500 BUG_ON(obj->pages != NULL);
1501 obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
1502 if (obj->pages == NULL)
1503 return -ENOMEM;
1504
1505 inode = obj->base.filp->f_path.dentry->d_inode;
1506 mapping = inode->i_mapping;
1507 for (i = 0; i < page_count; i++) {
1508 page = read_cache_page_gfp(mapping, i,
1509 GFP_HIGHUSER |
1510 __GFP_COLD |
1511 __GFP_RECLAIMABLE |
1512 gfpmask);
1513 if (IS_ERR(page))
1514 goto err_pages;
1515
1516 obj->pages[i] = page;
1517 }
1518
1519 if (obj->tiling_mode != I915_TILING_NONE)
1520 i915_gem_object_do_bit_17_swizzle(obj);
1521
1522 return 0;
1523
1524err_pages:
1525 while (i--)
1526 page_cache_release(obj->pages[i]);
1527
1528 drm_free_large(obj->pages);
1529 obj->pages = NULL;
1530 return PTR_ERR(page);
1531}
1532
1510static void 1533static void
1511i915_gem_object_put_pages(struct drm_gem_object *obj) 1534i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
1512{ 1535{
1513 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1536 int page_count = obj->base.size / PAGE_SIZE;
1514 int page_count = obj->size / PAGE_SIZE;
1515 int i; 1537 int i;
1516 1538
1517 BUG_ON(obj_priv->pages_refcount == 0); 1539 BUG_ON(obj->madv == __I915_MADV_PURGED);
1518 BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
1519 1540
1520 if (--obj_priv->pages_refcount != 0) 1541 if (obj->tiling_mode != I915_TILING_NONE)
1521 return;
1522
1523 if (obj_priv->tiling_mode != I915_TILING_NONE)
1524 i915_gem_object_save_bit_17_swizzle(obj); 1542 i915_gem_object_save_bit_17_swizzle(obj);
1525 1543
1526 if (obj_priv->madv == I915_MADV_DONTNEED) 1544 if (obj->madv == I915_MADV_DONTNEED)
1527 obj_priv->dirty = 0; 1545 obj->dirty = 0;
1528 1546
1529 for (i = 0; i < page_count; i++) { 1547 for (i = 0; i < page_count; i++) {
1530 if (obj_priv->dirty) 1548 if (obj->dirty)
1531 set_page_dirty(obj_priv->pages[i]); 1549 set_page_dirty(obj->pages[i]);
1532 1550
1533 if (obj_priv->madv == I915_MADV_WILLNEED) 1551 if (obj->madv == I915_MADV_WILLNEED)
1534 mark_page_accessed(obj_priv->pages[i]); 1552 mark_page_accessed(obj->pages[i]);
1535 1553
1536 page_cache_release(obj_priv->pages[i]); 1554 page_cache_release(obj->pages[i]);
1537 } 1555 }
1538 obj_priv->dirty = 0; 1556 obj->dirty = 0;
1539
1540 drm_free_large(obj_priv->pages);
1541 obj_priv->pages = NULL;
1542}
1543
1544static uint32_t
1545i915_gem_next_request_seqno(struct drm_device *dev,
1546 struct intel_ring_buffer *ring)
1547{
1548 drm_i915_private_t *dev_priv = dev->dev_private;
1549 1557
1550 ring->outstanding_lazy_request = true; 1558 drm_free_large(obj->pages);
1551 return dev_priv->next_seqno; 1559 obj->pages = NULL;
1552} 1560}
1553 1561
1554static void 1562void
1555i915_gem_object_move_to_active(struct drm_gem_object *obj, 1563i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1556 struct intel_ring_buffer *ring) 1564 struct intel_ring_buffer *ring,
1565 u32 seqno)
1557{ 1566{
1558 struct drm_device *dev = obj->dev; 1567 struct drm_device *dev = obj->base.dev;
1559 struct drm_i915_private *dev_priv = dev->dev_private; 1568 struct drm_i915_private *dev_priv = dev->dev_private;
1560 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1561 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1562 1569
1563 BUG_ON(ring == NULL); 1570 BUG_ON(ring == NULL);
1564 obj_priv->ring = ring; 1571 obj->ring = ring;
1565 1572
1566 /* Add a reference if we're newly entering the active list. */ 1573 /* Add a reference if we're newly entering the active list. */
1567 if (!obj_priv->active) { 1574 if (!obj->active) {
1568 drm_gem_object_reference(obj); 1575 drm_gem_object_reference(&obj->base);
1569 obj_priv->active = 1; 1576 obj->active = 1;
1570 } 1577 }
1571 1578
1572 /* Move from whatever list we were on to the tail of execution. */ 1579 /* Move from whatever list we were on to the tail of execution. */
1573 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list); 1580 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
1574 list_move_tail(&obj_priv->ring_list, &ring->active_list); 1581 list_move_tail(&obj->ring_list, &ring->active_list);
1575 obj_priv->last_rendering_seqno = seqno; 1582
1583 obj->last_rendering_seqno = seqno;
1584 if (obj->fenced_gpu_access) {
1585 struct drm_i915_fence_reg *reg;
1586
1587 BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
1588
1589 obj->last_fenced_seqno = seqno;
1590 obj->last_fenced_ring = ring;
1591
1592 reg = &dev_priv->fence_regs[obj->fence_reg];
1593 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
1594 }
1595}
1596
1597static void
1598i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
1599{
1600 list_del_init(&obj->ring_list);
1601 obj->last_rendering_seqno = 0;
1576} 1602}
1577 1603
1578static void 1604static void
1579i915_gem_object_move_to_flushing(struct drm_gem_object *obj) 1605i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
1580{ 1606{
1581 struct drm_device *dev = obj->dev; 1607 struct drm_device *dev = obj->base.dev;
1582 drm_i915_private_t *dev_priv = dev->dev_private; 1608 drm_i915_private_t *dev_priv = dev->dev_private;
1583 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1584 1609
1585 BUG_ON(!obj_priv->active); 1610 BUG_ON(!obj->active);
1586 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list); 1611 list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
1587 list_del_init(&obj_priv->ring_list); 1612
1588 obj_priv->last_rendering_seqno = 0; 1613 i915_gem_object_move_off_active(obj);
1614}
1615
1616static void
1617i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1618{
1619 struct drm_device *dev = obj->base.dev;
1620 struct drm_i915_private *dev_priv = dev->dev_private;
1621
1622 if (obj->pin_count != 0)
1623 list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
1624 else
1625 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
1626
1627 BUG_ON(!list_empty(&obj->gpu_write_list));
1628 BUG_ON(!obj->active);
1629 obj->ring = NULL;
1630
1631 i915_gem_object_move_off_active(obj);
1632 obj->fenced_gpu_access = false;
1633
1634 obj->active = 0;
1635 obj->pending_gpu_write = false;
1636 drm_gem_object_unreference(&obj->base);
1637
1638 WARN_ON(i915_verify_lists(dev));
1589} 1639}
1590 1640
1591/* Immediately discard the backing storage */ 1641/* Immediately discard the backing storage */
1592static void 1642static void
1593i915_gem_object_truncate(struct drm_gem_object *obj) 1643i915_gem_object_truncate(struct drm_i915_gem_object *obj)
1594{ 1644{
1595 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1596 struct inode *inode; 1645 struct inode *inode;
1597 1646
1598 /* Our goal here is to return as much of the memory as 1647 /* Our goal here is to return as much of the memory as
@@ -1601,42 +1650,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
1601 * backing pages, *now*. Here we mirror the actions taken 1650 * backing pages, *now*. Here we mirror the actions taken
1602 * when by shmem_delete_inode() to release the backing store. 1651 * when by shmem_delete_inode() to release the backing store.
1603 */ 1652 */
1604 inode = obj->filp->f_path.dentry->d_inode; 1653 inode = obj->base.filp->f_path.dentry->d_inode;
1605 truncate_inode_pages(inode->i_mapping, 0); 1654 truncate_inode_pages(inode->i_mapping, 0);
1606 if (inode->i_op->truncate_range) 1655 if (inode->i_op->truncate_range)
1607 inode->i_op->truncate_range(inode, 0, (loff_t)-1); 1656 inode->i_op->truncate_range(inode, 0, (loff_t)-1);
1608 1657
1609 obj_priv->madv = __I915_MADV_PURGED; 1658 obj->madv = __I915_MADV_PURGED;
1610} 1659}
1611 1660
1612static inline int 1661static inline int
1613i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) 1662i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
1614{
1615 return obj_priv->madv == I915_MADV_DONTNEED;
1616}
1617
1618static void
1619i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1620{ 1663{
1621 struct drm_device *dev = obj->dev; 1664 return obj->madv == I915_MADV_DONTNEED;
1622 drm_i915_private_t *dev_priv = dev->dev_private;
1623 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1624
1625 if (obj_priv->pin_count != 0)
1626 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
1627 else
1628 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1629 list_del_init(&obj_priv->ring_list);
1630
1631 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1632
1633 obj_priv->last_rendering_seqno = 0;
1634 obj_priv->ring = NULL;
1635 if (obj_priv->active) {
1636 obj_priv->active = 0;
1637 drm_gem_object_unreference(obj);
1638 }
1639 WARN_ON(i915_verify_lists(dev));
1640} 1665}
1641 1666
1642static void 1667static void
@@ -1644,37 +1669,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
1644 uint32_t flush_domains, 1669 uint32_t flush_domains,
1645 struct intel_ring_buffer *ring) 1670 struct intel_ring_buffer *ring)
1646{ 1671{
1647 drm_i915_private_t *dev_priv = dev->dev_private; 1672 struct drm_i915_gem_object *obj, *next;
1648 struct drm_i915_gem_object *obj_priv, *next;
1649 1673
1650 list_for_each_entry_safe(obj_priv, next, 1674 list_for_each_entry_safe(obj, next,
1651 &ring->gpu_write_list, 1675 &ring->gpu_write_list,
1652 gpu_write_list) { 1676 gpu_write_list) {
1653 struct drm_gem_object *obj = &obj_priv->base; 1677 if (obj->base.write_domain & flush_domains) {
1654 1678 uint32_t old_write_domain = obj->base.write_domain;
1655 if (obj->write_domain & flush_domains) {
1656 uint32_t old_write_domain = obj->write_domain;
1657 1679
1658 obj->write_domain = 0; 1680 obj->base.write_domain = 0;
1659 list_del_init(&obj_priv->gpu_write_list); 1681 list_del_init(&obj->gpu_write_list);
1660 i915_gem_object_move_to_active(obj, ring); 1682 i915_gem_object_move_to_active(obj, ring,
1661 1683 i915_gem_next_request_seqno(dev, ring));
1662 /* update the fence lru list */
1663 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
1664 struct drm_i915_fence_reg *reg =
1665 &dev_priv->fence_regs[obj_priv->fence_reg];
1666 list_move_tail(&reg->lru_list,
1667 &dev_priv->mm.fence_list);
1668 }
1669 1684
1670 trace_i915_gem_object_change_domain(obj, 1685 trace_i915_gem_object_change_domain(obj,
1671 obj->read_domains, 1686 obj->base.read_domains,
1672 old_write_domain); 1687 old_write_domain);
1673 } 1688 }
1674 } 1689 }
1675} 1690}
1676 1691
1677uint32_t 1692int
1678i915_add_request(struct drm_device *dev, 1693i915_add_request(struct drm_device *dev,
1679 struct drm_file *file, 1694 struct drm_file *file,
1680 struct drm_i915_gem_request *request, 1695 struct drm_i915_gem_request *request,
@@ -1684,17 +1699,17 @@ i915_add_request(struct drm_device *dev,
1684 struct drm_i915_file_private *file_priv = NULL; 1699 struct drm_i915_file_private *file_priv = NULL;
1685 uint32_t seqno; 1700 uint32_t seqno;
1686 int was_empty; 1701 int was_empty;
1702 int ret;
1703
1704 BUG_ON(request == NULL);
1687 1705
1688 if (file != NULL) 1706 if (file != NULL)
1689 file_priv = file->driver_priv; 1707 file_priv = file->driver_priv;
1690 1708
1691 if (request == NULL) { 1709 ret = ring->add_request(ring, &seqno);
1692 request = kzalloc(sizeof(*request), GFP_KERNEL); 1710 if (ret)
1693 if (request == NULL) 1711 return ret;
1694 return 0;
1695 }
1696 1712
1697 seqno = ring->add_request(dev, ring, 0);
1698 ring->outstanding_lazy_request = false; 1713 ring->outstanding_lazy_request = false;
1699 1714
1700 request->seqno = seqno; 1715 request->seqno = seqno;
@@ -1718,26 +1733,7 @@ i915_add_request(struct drm_device *dev,
1718 queue_delayed_work(dev_priv->wq, 1733 queue_delayed_work(dev_priv->wq,
1719 &dev_priv->mm.retire_work, HZ); 1734 &dev_priv->mm.retire_work, HZ);
1720 } 1735 }
1721 return seqno; 1736 return 0;
1722}
1723
1724/**
1725 * Command execution barrier
1726 *
1727 * Ensures that all commands in the ring are finished
1728 * before signalling the CPU
1729 */
1730static void
1731i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
1732{
1733 uint32_t flush_domains = 0;
1734
1735 /* The sampler always gets flushed on i965 (sigh) */
1736 if (INTEL_INFO(dev)->gen >= 4)
1737 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
1738
1739 ring->flush(dev, ring,
1740 I915_GEM_DOMAIN_COMMAND, flush_domains);
1741} 1737}
1742 1738
1743static inline void 1739static inline void
@@ -1770,62 +1766,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1770 } 1766 }
1771 1767
1772 while (!list_empty(&ring->active_list)) { 1768 while (!list_empty(&ring->active_list)) {
1773 struct drm_i915_gem_object *obj_priv; 1769 struct drm_i915_gem_object *obj;
1774 1770
1775 obj_priv = list_first_entry(&ring->active_list, 1771 obj = list_first_entry(&ring->active_list,
1776 struct drm_i915_gem_object, 1772 struct drm_i915_gem_object,
1777 ring_list); 1773 ring_list);
1778 1774
1779 obj_priv->base.write_domain = 0; 1775 obj->base.write_domain = 0;
1780 list_del_init(&obj_priv->gpu_write_list); 1776 list_del_init(&obj->gpu_write_list);
1781 i915_gem_object_move_to_inactive(&obj_priv->base); 1777 i915_gem_object_move_to_inactive(obj);
1778 }
1779}
1780
1781static void i915_gem_reset_fences(struct drm_device *dev)
1782{
1783 struct drm_i915_private *dev_priv = dev->dev_private;
1784 int i;
1785
1786 for (i = 0; i < 16; i++) {
1787 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1788 struct drm_i915_gem_object *obj = reg->obj;
1789
1790 if (!obj)
1791 continue;
1792
1793 if (obj->tiling_mode)
1794 i915_gem_release_mmap(obj);
1795
1796 reg->obj->fence_reg = I915_FENCE_REG_NONE;
1797 reg->obj->fenced_gpu_access = false;
1798 reg->obj->last_fenced_seqno = 0;
1799 reg->obj->last_fenced_ring = NULL;
1800 i915_gem_clear_fence_reg(dev, reg);
1782 } 1801 }
1783} 1802}
1784 1803
1785void i915_gem_reset(struct drm_device *dev) 1804void i915_gem_reset(struct drm_device *dev)
1786{ 1805{
1787 struct drm_i915_private *dev_priv = dev->dev_private; 1806 struct drm_i915_private *dev_priv = dev->dev_private;
1788 struct drm_i915_gem_object *obj_priv; 1807 struct drm_i915_gem_object *obj;
1789 int i; 1808 int i;
1790 1809
1791 i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring); 1810 for (i = 0; i < I915_NUM_RINGS; i++)
1792 i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring); 1811 i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
1793 i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
1794 1812
1795 /* Remove anything from the flushing lists. The GPU cache is likely 1813 /* Remove anything from the flushing lists. The GPU cache is likely
1796 * to be lost on reset along with the data, so simply move the 1814 * to be lost on reset along with the data, so simply move the
1797 * lost bo to the inactive list. 1815 * lost bo to the inactive list.
1798 */ 1816 */
1799 while (!list_empty(&dev_priv->mm.flushing_list)) { 1817 while (!list_empty(&dev_priv->mm.flushing_list)) {
1800 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 1818 obj= list_first_entry(&dev_priv->mm.flushing_list,
1801 struct drm_i915_gem_object, 1819 struct drm_i915_gem_object,
1802 mm_list); 1820 mm_list);
1803 1821
1804 obj_priv->base.write_domain = 0; 1822 obj->base.write_domain = 0;
1805 list_del_init(&obj_priv->gpu_write_list); 1823 list_del_init(&obj->gpu_write_list);
1806 i915_gem_object_move_to_inactive(&obj_priv->base); 1824 i915_gem_object_move_to_inactive(obj);
1807 } 1825 }
1808 1826
1809 /* Move everything out of the GPU domains to ensure we do any 1827 /* Move everything out of the GPU domains to ensure we do any
1810 * necessary invalidation upon reuse. 1828 * necessary invalidation upon reuse.
1811 */ 1829 */
1812 list_for_each_entry(obj_priv, 1830 list_for_each_entry(obj,
1813 &dev_priv->mm.inactive_list, 1831 &dev_priv->mm.inactive_list,
1814 mm_list) 1832 mm_list)
1815 { 1833 {
1816 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 1834 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1817 } 1835 }
1818 1836
1819 /* The fence registers are invalidated so clear them out */ 1837 /* The fence registers are invalidated so clear them out */
1820 for (i = 0; i < 16; i++) { 1838 i915_gem_reset_fences(dev);
1821 struct drm_i915_fence_reg *reg;
1822
1823 reg = &dev_priv->fence_regs[i];
1824 if (!reg->obj)
1825 continue;
1826
1827 i915_gem_clear_fence_reg(reg->obj);
1828 }
1829} 1839}
1830 1840
1831/** 1841/**
@@ -1837,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1837{ 1847{
1838 drm_i915_private_t *dev_priv = dev->dev_private; 1848 drm_i915_private_t *dev_priv = dev->dev_private;
1839 uint32_t seqno; 1849 uint32_t seqno;
1850 int i;
1840 1851
1841 if (!ring->status_page.page_addr || 1852 if (!ring->status_page.page_addr ||
1842 list_empty(&ring->request_list)) 1853 list_empty(&ring->request_list))
@@ -1844,7 +1855,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1844 1855
1845 WARN_ON(i915_verify_lists(dev)); 1856 WARN_ON(i915_verify_lists(dev));
1846 1857
1847 seqno = ring->get_seqno(dev, ring); 1858 seqno = ring->get_seqno(ring);
1859
1860 for (i = 0; i < I915_NUM_RINGS; i++)
1861 if (seqno >= ring->sync_seqno[i])
1862 ring->sync_seqno[i] = 0;
1863
1848 while (!list_empty(&ring->request_list)) { 1864 while (!list_empty(&ring->request_list)) {
1849 struct drm_i915_gem_request *request; 1865 struct drm_i915_gem_request *request;
1850 1866
@@ -1866,18 +1882,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1866 * by the ringbuffer to the flushing/inactive lists as appropriate. 1882 * by the ringbuffer to the flushing/inactive lists as appropriate.
1867 */ 1883 */
1868 while (!list_empty(&ring->active_list)) { 1884 while (!list_empty(&ring->active_list)) {
1869 struct drm_gem_object *obj; 1885 struct drm_i915_gem_object *obj;
1870 struct drm_i915_gem_object *obj_priv;
1871 1886
1872 obj_priv = list_first_entry(&ring->active_list, 1887 obj= list_first_entry(&ring->active_list,
1873 struct drm_i915_gem_object, 1888 struct drm_i915_gem_object,
1874 ring_list); 1889 ring_list);
1875 1890
1876 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) 1891 if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
1877 break; 1892 break;
1878 1893
1879 obj = &obj_priv->base; 1894 if (obj->base.write_domain != 0)
1880 if (obj->write_domain != 0)
1881 i915_gem_object_move_to_flushing(obj); 1895 i915_gem_object_move_to_flushing(obj);
1882 else 1896 else
1883 i915_gem_object_move_to_inactive(obj); 1897 i915_gem_object_move_to_inactive(obj);
@@ -1885,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1885 1899
1886 if (unlikely (dev_priv->trace_irq_seqno && 1900 if (unlikely (dev_priv->trace_irq_seqno &&
1887 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) { 1901 i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
1888 ring->user_irq_put(dev, ring); 1902 ring->irq_put(ring);
1889 dev_priv->trace_irq_seqno = 0; 1903 dev_priv->trace_irq_seqno = 0;
1890 } 1904 }
1891 1905
@@ -1896,24 +1910,24 @@ void
1896i915_gem_retire_requests(struct drm_device *dev) 1910i915_gem_retire_requests(struct drm_device *dev)
1897{ 1911{
1898 drm_i915_private_t *dev_priv = dev->dev_private; 1912 drm_i915_private_t *dev_priv = dev->dev_private;
1913 int i;
1899 1914
1900 if (!list_empty(&dev_priv->mm.deferred_free_list)) { 1915 if (!list_empty(&dev_priv->mm.deferred_free_list)) {
1901 struct drm_i915_gem_object *obj_priv, *tmp; 1916 struct drm_i915_gem_object *obj, *next;
1902 1917
1903 /* We must be careful that during unbind() we do not 1918 /* We must be careful that during unbind() we do not
1904 * accidentally infinitely recurse into retire requests. 1919 * accidentally infinitely recurse into retire requests.
1905 * Currently: 1920 * Currently:
1906 * retire -> free -> unbind -> wait -> retire_ring 1921 * retire -> free -> unbind -> wait -> retire_ring
1907 */ 1922 */
1908 list_for_each_entry_safe(obj_priv, tmp, 1923 list_for_each_entry_safe(obj, next,
1909 &dev_priv->mm.deferred_free_list, 1924 &dev_priv->mm.deferred_free_list,
1910 mm_list) 1925 mm_list)
1911 i915_gem_free_object_tail(&obj_priv->base); 1926 i915_gem_free_object_tail(obj);
1912 } 1927 }
1913 1928
1914 i915_gem_retire_requests_ring(dev, &dev_priv->render_ring); 1929 for (i = 0; i < I915_NUM_RINGS; i++)
1915 i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring); 1930 i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
1916 i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
1917} 1931}
1918 1932
1919static void 1933static void
@@ -1935,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
1935 i915_gem_retire_requests(dev); 1949 i915_gem_retire_requests(dev);
1936 1950
1937 if (!dev_priv->mm.suspended && 1951 if (!dev_priv->mm.suspended &&
1938 (!list_empty(&dev_priv->render_ring.request_list) || 1952 (!list_empty(&dev_priv->ring[RCS].request_list) ||
1939 !list_empty(&dev_priv->bsd_ring.request_list) || 1953 !list_empty(&dev_priv->ring[VCS].request_list) ||
1940 !list_empty(&dev_priv->blt_ring.request_list))) 1954 !list_empty(&dev_priv->ring[BCS].request_list)))
1941 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ); 1955 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
1942 mutex_unlock(&dev->struct_mutex); 1956 mutex_unlock(&dev->struct_mutex);
1943} 1957}
@@ -1955,14 +1969,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1955 if (atomic_read(&dev_priv->mm.wedged)) 1969 if (atomic_read(&dev_priv->mm.wedged))
1956 return -EAGAIN; 1970 return -EAGAIN;
1957 1971
1958 if (ring->outstanding_lazy_request) { 1972 if (seqno == ring->outstanding_lazy_request) {
1959 seqno = i915_add_request(dev, NULL, NULL, ring); 1973 struct drm_i915_gem_request *request;
1960 if (seqno == 0) 1974
1975 request = kzalloc(sizeof(*request), GFP_KERNEL);
1976 if (request == NULL)
1961 return -ENOMEM; 1977 return -ENOMEM;
1978
1979 ret = i915_add_request(dev, NULL, request, ring);
1980 if (ret) {
1981 kfree(request);
1982 return ret;
1983 }
1984
1985 seqno = request->seqno;
1962 } 1986 }
1963 BUG_ON(seqno == dev_priv->next_seqno);
1964 1987
1965 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { 1988 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
1966 if (HAS_PCH_SPLIT(dev)) 1989 if (HAS_PCH_SPLIT(dev))
1967 ier = I915_READ(DEIER) | I915_READ(GTIER); 1990 ier = I915_READ(DEIER) | I915_READ(GTIER);
1968 else 1991 else
@@ -1976,21 +1999,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1976 1999
1977 trace_i915_gem_request_wait_begin(dev, seqno); 2000 trace_i915_gem_request_wait_begin(dev, seqno);
1978 2001
1979 ring->waiting_gem_seqno = seqno; 2002 ring->waiting_seqno = seqno;
1980 ring->user_irq_get(dev, ring); 2003 if (ring->irq_get(ring)) {
1981 if (interruptible) 2004 if (interruptible)
1982 ret = wait_event_interruptible(ring->irq_queue, 2005 ret = wait_event_interruptible(ring->irq_queue,
1983 i915_seqno_passed( 2006 i915_seqno_passed(ring->get_seqno(ring), seqno)
1984 ring->get_seqno(dev, ring), seqno) 2007 || atomic_read(&dev_priv->mm.wedged));
1985 || atomic_read(&dev_priv->mm.wedged)); 2008 else
1986 else 2009 wait_event(ring->irq_queue,
1987 wait_event(ring->irq_queue, 2010 i915_seqno_passed(ring->get_seqno(ring), seqno)
1988 i915_seqno_passed( 2011 || atomic_read(&dev_priv->mm.wedged));
1989 ring->get_seqno(dev, ring), seqno)
1990 || atomic_read(&dev_priv->mm.wedged));
1991 2012
1992 ring->user_irq_put(dev, ring); 2013 ring->irq_put(ring);
1993 ring->waiting_gem_seqno = 0; 2014 } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
2015 seqno) ||
2016 atomic_read(&dev_priv->mm.wedged), 3000))
2017 ret = -EBUSY;
2018 ring->waiting_seqno = 0;
1994 2019
1995 trace_i915_gem_request_wait_end(dev, seqno); 2020 trace_i915_gem_request_wait_end(dev, seqno);
1996 } 2021 }
@@ -1999,7 +2024,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
1999 2024
2000 if (ret && ret != -ERESTARTSYS) 2025 if (ret && ret != -ERESTARTSYS)
2001 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n", 2026 DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
2002 __func__, ret, seqno, ring->get_seqno(dev, ring), 2027 __func__, ret, seqno, ring->get_seqno(ring),
2003 dev_priv->next_seqno); 2028 dev_priv->next_seqno);
2004 2029
2005 /* Directly dispatch request retiring. While we have the work queue 2030 /* Directly dispatch request retiring. While we have the work queue
@@ -2024,70 +2049,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
2024 return i915_do_wait_request(dev, seqno, 1, ring); 2049 return i915_do_wait_request(dev, seqno, 1, ring);
2025} 2050}
2026 2051
2027static void
2028i915_gem_flush_ring(struct drm_device *dev,
2029 struct drm_file *file_priv,
2030 struct intel_ring_buffer *ring,
2031 uint32_t invalidate_domains,
2032 uint32_t flush_domains)
2033{
2034 ring->flush(dev, ring, invalidate_domains, flush_domains);
2035 i915_gem_process_flushing_list(dev, flush_domains, ring);
2036}
2037
2038static void
2039i915_gem_flush(struct drm_device *dev,
2040 struct drm_file *file_priv,
2041 uint32_t invalidate_domains,
2042 uint32_t flush_domains,
2043 uint32_t flush_rings)
2044{
2045 drm_i915_private_t *dev_priv = dev->dev_private;
2046
2047 if (flush_domains & I915_GEM_DOMAIN_CPU)
2048 drm_agp_chipset_flush(dev);
2049
2050 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
2051 if (flush_rings & RING_RENDER)
2052 i915_gem_flush_ring(dev, file_priv,
2053 &dev_priv->render_ring,
2054 invalidate_domains, flush_domains);
2055 if (flush_rings & RING_BSD)
2056 i915_gem_flush_ring(dev, file_priv,
2057 &dev_priv->bsd_ring,
2058 invalidate_domains, flush_domains);
2059 if (flush_rings & RING_BLT)
2060 i915_gem_flush_ring(dev, file_priv,
2061 &dev_priv->blt_ring,
2062 invalidate_domains, flush_domains);
2063 }
2064}
2065
2066/** 2052/**
2067 * Ensures that all rendering to the object has completed and the object is 2053 * Ensures that all rendering to the object has completed and the object is
2068 * safe to unbind from the GTT or access from the CPU. 2054 * safe to unbind from the GTT or access from the CPU.
2069 */ 2055 */
2070static int 2056int
2071i915_gem_object_wait_rendering(struct drm_gem_object *obj, 2057i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
2072 bool interruptible) 2058 bool interruptible)
2073{ 2059{
2074 struct drm_device *dev = obj->dev; 2060 struct drm_device *dev = obj->base.dev;
2075 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2076 int ret; 2061 int ret;
2077 2062
2078 /* This function only exists to support waiting for existing rendering, 2063 /* This function only exists to support waiting for existing rendering,
2079 * not for emitting required flushes. 2064 * not for emitting required flushes.
2080 */ 2065 */
2081 BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); 2066 BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
2082 2067
2083 /* If there is rendering queued on the buffer being evicted, wait for 2068 /* If there is rendering queued on the buffer being evicted, wait for
2084 * it. 2069 * it.
2085 */ 2070 */
2086 if (obj_priv->active) { 2071 if (obj->active) {
2087 ret = i915_do_wait_request(dev, 2072 ret = i915_do_wait_request(dev,
2088 obj_priv->last_rendering_seqno, 2073 obj->last_rendering_seqno,
2089 interruptible, 2074 interruptible,
2090 obj_priv->ring); 2075 obj->ring);
2091 if (ret) 2076 if (ret)
2092 return ret; 2077 return ret;
2093 } 2078 }
@@ -2099,17 +2084,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
2099 * Unbinds an object from the GTT aperture. 2084 * Unbinds an object from the GTT aperture.
2100 */ 2085 */
2101int 2086int
2102i915_gem_object_unbind(struct drm_gem_object *obj) 2087i915_gem_object_unbind(struct drm_i915_gem_object *obj)
2103{ 2088{
2104 struct drm_device *dev = obj->dev;
2105 struct drm_i915_private *dev_priv = dev->dev_private;
2106 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2107 int ret = 0; 2089 int ret = 0;
2108 2090
2109 if (obj_priv->gtt_space == NULL) 2091 if (obj->gtt_space == NULL)
2110 return 0; 2092 return 0;
2111 2093
2112 if (obj_priv->pin_count != 0) { 2094 if (obj->pin_count != 0) {
2113 DRM_ERROR("Attempting to unbind pinned buffer\n"); 2095 DRM_ERROR("Attempting to unbind pinned buffer\n");
2114 return -EINVAL; 2096 return -EINVAL;
2115 } 2097 }
@@ -2132,27 +2114,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2132 */ 2114 */
2133 if (ret) { 2115 if (ret) {
2134 i915_gem_clflush_object(obj); 2116 i915_gem_clflush_object(obj);
2135 obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU; 2117 obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2136 } 2118 }
2137 2119
2138 /* release the fence reg _after_ flushing */ 2120 /* release the fence reg _after_ flushing */
2139 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 2121 ret = i915_gem_object_put_fence(obj);
2140 i915_gem_clear_fence_reg(obj); 2122 if (ret == -ERESTARTSYS)
2141 2123 return ret;
2142 drm_unbind_agp(obj_priv->agp_mem);
2143 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
2144 2124
2145 i915_gem_object_put_pages(obj); 2125 i915_gem_gtt_unbind_object(obj);
2146 BUG_ON(obj_priv->pages_refcount); 2126 i915_gem_object_put_pages_gtt(obj);
2147 2127
2148 i915_gem_info_remove_gtt(dev_priv, obj->size); 2128 list_del_init(&obj->gtt_list);
2149 list_del_init(&obj_priv->mm_list); 2129 list_del_init(&obj->mm_list);
2130 /* Avoid an unnecessary call to unbind on rebind. */
2131 obj->map_and_fenceable = true;
2150 2132
2151 drm_mm_put_block(obj_priv->gtt_space); 2133 drm_mm_put_block(obj->gtt_space);
2152 obj_priv->gtt_space = NULL; 2134 obj->gtt_space = NULL;
2153 obj_priv->gtt_offset = 0; 2135 obj->gtt_offset = 0;
2154 2136
2155 if (i915_gem_object_is_purgeable(obj_priv)) 2137 if (i915_gem_object_is_purgeable(obj))
2156 i915_gem_object_truncate(obj); 2138 i915_gem_object_truncate(obj);
2157 2139
2158 trace_i915_gem_object_unbind(obj); 2140 trace_i915_gem_object_unbind(obj);
@@ -2160,14 +2142,25 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2160 return ret; 2142 return ret;
2161} 2143}
2162 2144
2145void
2146i915_gem_flush_ring(struct drm_device *dev,
2147 struct intel_ring_buffer *ring,
2148 uint32_t invalidate_domains,
2149 uint32_t flush_domains)
2150{
2151 ring->flush(ring, invalidate_domains, flush_domains);
2152 i915_gem_process_flushing_list(dev, flush_domains, ring);
2153}
2154
2163static int i915_ring_idle(struct drm_device *dev, 2155static int i915_ring_idle(struct drm_device *dev,
2164 struct intel_ring_buffer *ring) 2156 struct intel_ring_buffer *ring)
2165{ 2157{
2166 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) 2158 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2167 return 0; 2159 return 0;
2168 2160
2169 i915_gem_flush_ring(dev, NULL, ring, 2161 if (!list_empty(&ring->gpu_write_list))
2170 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); 2162 i915_gem_flush_ring(dev, ring,
2163 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
2171 return i915_wait_request(dev, 2164 return i915_wait_request(dev,
2172 i915_gem_next_request_seqno(dev, ring), 2165 i915_gem_next_request_seqno(dev, ring),
2173 ring); 2166 ring);
@@ -2178,7 +2171,7 @@ i915_gpu_idle(struct drm_device *dev)
2178{ 2171{
2179 drm_i915_private_t *dev_priv = dev->dev_private; 2172 drm_i915_private_t *dev_priv = dev->dev_private;
2180 bool lists_empty; 2173 bool lists_empty;
2181 int ret; 2174 int ret, i;
2182 2175
2183 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2176 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2184 list_empty(&dev_priv->mm.active_list)); 2177 list_empty(&dev_priv->mm.active_list));
@@ -2186,258 +2179,296 @@ i915_gpu_idle(struct drm_device *dev)
2186 return 0; 2179 return 0;
2187 2180
2188 /* Flush everything onto the inactive list. */ 2181 /* Flush everything onto the inactive list. */
2189 ret = i915_ring_idle(dev, &dev_priv->render_ring); 2182 for (i = 0; i < I915_NUM_RINGS; i++) {
2190 if (ret) 2183 ret = i915_ring_idle(dev, &dev_priv->ring[i]);
2191 return ret; 2184 if (ret)
2192 2185 return ret;
2193 ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
2194 if (ret)
2195 return ret;
2196
2197 ret = i915_ring_idle(dev, &dev_priv->blt_ring);
2198 if (ret)
2199 return ret;
2200
2201 return 0;
2202}
2203
2204static int
2205i915_gem_object_get_pages(struct drm_gem_object *obj,
2206 gfp_t gfpmask)
2207{
2208 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2209 int page_count, i;
2210 struct address_space *mapping;
2211 struct inode *inode;
2212 struct page *page;
2213
2214 BUG_ON(obj_priv->pages_refcount
2215 == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
2216
2217 if (obj_priv->pages_refcount++ != 0)
2218 return 0;
2219
2220 /* Get the list of pages out of our struct file. They'll be pinned
2221 * at this point until we release them.
2222 */
2223 page_count = obj->size / PAGE_SIZE;
2224 BUG_ON(obj_priv->pages != NULL);
2225 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2226 if (obj_priv->pages == NULL) {
2227 obj_priv->pages_refcount--;
2228 return -ENOMEM;
2229 }
2230
2231 inode = obj->filp->f_path.dentry->d_inode;
2232 mapping = inode->i_mapping;
2233 for (i = 0; i < page_count; i++) {
2234 page = read_cache_page_gfp(mapping, i,
2235 GFP_HIGHUSER |
2236 __GFP_COLD |
2237 __GFP_RECLAIMABLE |
2238 gfpmask);
2239 if (IS_ERR(page))
2240 goto err_pages;
2241
2242 obj_priv->pages[i] = page;
2243 } 2186 }
2244 2187
2245 if (obj_priv->tiling_mode != I915_TILING_NONE)
2246 i915_gem_object_do_bit_17_swizzle(obj);
2247
2248 return 0; 2188 return 0;
2249
2250err_pages:
2251 while (i--)
2252 page_cache_release(obj_priv->pages[i]);
2253
2254 drm_free_large(obj_priv->pages);
2255 obj_priv->pages = NULL;
2256 obj_priv->pages_refcount--;
2257 return PTR_ERR(page);
2258} 2189}
2259 2190
2260static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg) 2191static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
2192 struct intel_ring_buffer *pipelined)
2261{ 2193{
2262 struct drm_gem_object *obj = reg->obj; 2194 struct drm_device *dev = obj->base.dev;
2263 struct drm_device *dev = obj->dev;
2264 drm_i915_private_t *dev_priv = dev->dev_private; 2195 drm_i915_private_t *dev_priv = dev->dev_private;
2265 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2196 u32 size = obj->gtt_space->size;
2266 int regnum = obj_priv->fence_reg; 2197 int regnum = obj->fence_reg;
2267 uint64_t val; 2198 uint64_t val;
2268 2199
2269 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & 2200 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2270 0xfffff000) << 32; 2201 0xfffff000) << 32;
2271 val |= obj_priv->gtt_offset & 0xfffff000; 2202 val |= obj->gtt_offset & 0xfffff000;
2272 val |= (uint64_t)((obj_priv->stride / 128) - 1) << 2203 val |= (uint64_t)((obj->stride / 128) - 1) <<
2273 SANDYBRIDGE_FENCE_PITCH_SHIFT; 2204 SANDYBRIDGE_FENCE_PITCH_SHIFT;
2274 2205
2275 if (obj_priv->tiling_mode == I915_TILING_Y) 2206 if (obj->tiling_mode == I915_TILING_Y)
2276 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2207 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2277 val |= I965_FENCE_REG_VALID; 2208 val |= I965_FENCE_REG_VALID;
2278 2209
2279 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val); 2210 if (pipelined) {
2211 int ret = intel_ring_begin(pipelined, 6);
2212 if (ret)
2213 return ret;
2214
2215 intel_ring_emit(pipelined, MI_NOOP);
2216 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2217 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
2218 intel_ring_emit(pipelined, (u32)val);
2219 intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
2220 intel_ring_emit(pipelined, (u32)(val >> 32));
2221 intel_ring_advance(pipelined);
2222 } else
2223 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
2224
2225 return 0;
2280} 2226}
2281 2227
2282static void i965_write_fence_reg(struct drm_i915_fence_reg *reg) 2228static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
2229 struct intel_ring_buffer *pipelined)
2283{ 2230{
2284 struct drm_gem_object *obj = reg->obj; 2231 struct drm_device *dev = obj->base.dev;
2285 struct drm_device *dev = obj->dev;
2286 drm_i915_private_t *dev_priv = dev->dev_private; 2232 drm_i915_private_t *dev_priv = dev->dev_private;
2287 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2233 u32 size = obj->gtt_space->size;
2288 int regnum = obj_priv->fence_reg; 2234 int regnum = obj->fence_reg;
2289 uint64_t val; 2235 uint64_t val;
2290 2236
2291 val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) & 2237 val = (uint64_t)((obj->gtt_offset + size - 4096) &
2292 0xfffff000) << 32; 2238 0xfffff000) << 32;
2293 val |= obj_priv->gtt_offset & 0xfffff000; 2239 val |= obj->gtt_offset & 0xfffff000;
2294 val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT; 2240 val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
2295 if (obj_priv->tiling_mode == I915_TILING_Y) 2241 if (obj->tiling_mode == I915_TILING_Y)
2296 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 2242 val |= 1 << I965_FENCE_TILING_Y_SHIFT;
2297 val |= I965_FENCE_REG_VALID; 2243 val |= I965_FENCE_REG_VALID;
2298 2244
2299 I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val); 2245 if (pipelined) {
2246 int ret = intel_ring_begin(pipelined, 6);
2247 if (ret)
2248 return ret;
2249
2250 intel_ring_emit(pipelined, MI_NOOP);
2251 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
2252 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
2253 intel_ring_emit(pipelined, (u32)val);
2254 intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
2255 intel_ring_emit(pipelined, (u32)(val >> 32));
2256 intel_ring_advance(pipelined);
2257 } else
2258 I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
2259
2260 return 0;
2300} 2261}
2301 2262
2302static void i915_write_fence_reg(struct drm_i915_fence_reg *reg) 2263static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
2264 struct intel_ring_buffer *pipelined)
2303{ 2265{
2304 struct drm_gem_object *obj = reg->obj; 2266 struct drm_device *dev = obj->base.dev;
2305 struct drm_device *dev = obj->dev;
2306 drm_i915_private_t *dev_priv = dev->dev_private; 2267 drm_i915_private_t *dev_priv = dev->dev_private;
2307 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2268 u32 size = obj->gtt_space->size;
2308 int regnum = obj_priv->fence_reg; 2269 u32 fence_reg, val, pitch_val;
2309 int tile_width; 2270 int tile_width;
2310 uint32_t fence_reg, val;
2311 uint32_t pitch_val;
2312 2271
2313 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 2272 if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
2314 (obj_priv->gtt_offset & (obj->size - 1))) { 2273 (size & -size) != size ||
2315 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n", 2274 (obj->gtt_offset & (size - 1)),
2316 __func__, obj_priv->gtt_offset, obj->size); 2275 "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
2317 return; 2276 obj->gtt_offset, obj->map_and_fenceable, size))
2318 } 2277 return -EINVAL;
2319 2278
2320 if (obj_priv->tiling_mode == I915_TILING_Y && 2279 if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
2321 HAS_128_BYTE_Y_TILING(dev))
2322 tile_width = 128; 2280 tile_width = 128;
2323 else 2281 else
2324 tile_width = 512; 2282 tile_width = 512;
2325 2283
2326 /* Note: pitch better be a power of two tile widths */ 2284 /* Note: pitch better be a power of two tile widths */
2327 pitch_val = obj_priv->stride / tile_width; 2285 pitch_val = obj->stride / tile_width;
2328 pitch_val = ffs(pitch_val) - 1; 2286 pitch_val = ffs(pitch_val) - 1;
2329 2287
2330 if (obj_priv->tiling_mode == I915_TILING_Y && 2288 val = obj->gtt_offset;
2331 HAS_128_BYTE_Y_TILING(dev)) 2289 if (obj->tiling_mode == I915_TILING_Y)
2332 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2333 else
2334 WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
2335
2336 val = obj_priv->gtt_offset;
2337 if (obj_priv->tiling_mode == I915_TILING_Y)
2338 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2290 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2339 val |= I915_FENCE_SIZE_BITS(obj->size); 2291 val |= I915_FENCE_SIZE_BITS(size);
2340 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2292 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2341 val |= I830_FENCE_REG_VALID; 2293 val |= I830_FENCE_REG_VALID;
2342 2294
2343 if (regnum < 8) 2295 fence_reg = obj->fence_reg;
2344 fence_reg = FENCE_REG_830_0 + (regnum * 4); 2296 if (fence_reg < 8)
2297 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2345 else 2298 else
2346 fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4); 2299 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2347 I915_WRITE(fence_reg, val); 2300
2301 if (pipelined) {
2302 int ret = intel_ring_begin(pipelined, 4);
2303 if (ret)
2304 return ret;
2305
2306 intel_ring_emit(pipelined, MI_NOOP);
2307 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2308 intel_ring_emit(pipelined, fence_reg);
2309 intel_ring_emit(pipelined, val);
2310 intel_ring_advance(pipelined);
2311 } else
2312 I915_WRITE(fence_reg, val);
2313
2314 return 0;
2348} 2315}
2349 2316
2350static void i830_write_fence_reg(struct drm_i915_fence_reg *reg) 2317static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
2318 struct intel_ring_buffer *pipelined)
2351{ 2319{
2352 struct drm_gem_object *obj = reg->obj; 2320 struct drm_device *dev = obj->base.dev;
2353 struct drm_device *dev = obj->dev;
2354 drm_i915_private_t *dev_priv = dev->dev_private; 2321 drm_i915_private_t *dev_priv = dev->dev_private;
2355 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2322 u32 size = obj->gtt_space->size;
2356 int regnum = obj_priv->fence_reg; 2323 int regnum = obj->fence_reg;
2357 uint32_t val; 2324 uint32_t val;
2358 uint32_t pitch_val; 2325 uint32_t pitch_val;
2359 uint32_t fence_size_bits;
2360 2326
2361 if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) || 2327 if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
2362 (obj_priv->gtt_offset & (obj->size - 1))) { 2328 (size & -size) != size ||
2363 WARN(1, "%s: object 0x%08x not 512K or size aligned\n", 2329 (obj->gtt_offset & (size - 1)),
2364 __func__, obj_priv->gtt_offset); 2330 "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
2365 return; 2331 obj->gtt_offset, size))
2366 } 2332 return -EINVAL;
2367 2333
2368 pitch_val = obj_priv->stride / 128; 2334 pitch_val = obj->stride / 128;
2369 pitch_val = ffs(pitch_val) - 1; 2335 pitch_val = ffs(pitch_val) - 1;
2370 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2371 2336
2372 val = obj_priv->gtt_offset; 2337 val = obj->gtt_offset;
2373 if (obj_priv->tiling_mode == I915_TILING_Y) 2338 if (obj->tiling_mode == I915_TILING_Y)
2374 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2339 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
2375 fence_size_bits = I830_FENCE_SIZE_BITS(obj->size); 2340 val |= I830_FENCE_SIZE_BITS(size);
2376 WARN_ON(fence_size_bits & ~0x00000f00);
2377 val |= fence_size_bits;
2378 val |= pitch_val << I830_FENCE_PITCH_SHIFT; 2341 val |= pitch_val << I830_FENCE_PITCH_SHIFT;
2379 val |= I830_FENCE_REG_VALID; 2342 val |= I830_FENCE_REG_VALID;
2380 2343
2381 I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val); 2344 if (pipelined) {
2345 int ret = intel_ring_begin(pipelined, 4);
2346 if (ret)
2347 return ret;
2348
2349 intel_ring_emit(pipelined, MI_NOOP);
2350 intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
2351 intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
2352 intel_ring_emit(pipelined, val);
2353 intel_ring_advance(pipelined);
2354 } else
2355 I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
2356
2357 return 0;
2382} 2358}
2383 2359
2384static int i915_find_fence_reg(struct drm_device *dev, 2360static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
2385 bool interruptible) 2361{
2362 return i915_seqno_passed(ring->get_seqno(ring), seqno);
2363}
2364
2365static int
2366i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
2367 struct intel_ring_buffer *pipelined,
2368 bool interruptible)
2369{
2370 int ret;
2371
2372 if (obj->fenced_gpu_access) {
2373 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2374 i915_gem_flush_ring(obj->base.dev,
2375 obj->last_fenced_ring,
2376 0, obj->base.write_domain);
2377
2378 obj->fenced_gpu_access = false;
2379 }
2380
2381 if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
2382 if (!ring_passed_seqno(obj->last_fenced_ring,
2383 obj->last_fenced_seqno)) {
2384 ret = i915_do_wait_request(obj->base.dev,
2385 obj->last_fenced_seqno,
2386 interruptible,
2387 obj->last_fenced_ring);
2388 if (ret)
2389 return ret;
2390 }
2391
2392 obj->last_fenced_seqno = 0;
2393 obj->last_fenced_ring = NULL;
2394 }
2395
2396 return 0;
2397}
2398
2399int
2400i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
2401{
2402 int ret;
2403
2404 if (obj->tiling_mode)
2405 i915_gem_release_mmap(obj);
2406
2407 ret = i915_gem_object_flush_fence(obj, NULL, true);
2408 if (ret)
2409 return ret;
2410
2411 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2412 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2413 i915_gem_clear_fence_reg(obj->base.dev,
2414 &dev_priv->fence_regs[obj->fence_reg]);
2415
2416 obj->fence_reg = I915_FENCE_REG_NONE;
2417 }
2418
2419 return 0;
2420}
2421
2422static struct drm_i915_fence_reg *
2423i915_find_fence_reg(struct drm_device *dev,
2424 struct intel_ring_buffer *pipelined)
2386{ 2425{
2387 struct drm_i915_fence_reg *reg = NULL;
2388 struct drm_i915_gem_object *obj_priv = NULL;
2389 struct drm_i915_private *dev_priv = dev->dev_private; 2426 struct drm_i915_private *dev_priv = dev->dev_private;
2390 struct drm_gem_object *obj = NULL; 2427 struct drm_i915_fence_reg *reg, *first, *avail;
2391 int i, avail, ret; 2428 int i;
2392 2429
2393 /* First try to find a free reg */ 2430 /* First try to find a free reg */
2394 avail = 0; 2431 avail = NULL;
2395 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) { 2432 for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
2396 reg = &dev_priv->fence_regs[i]; 2433 reg = &dev_priv->fence_regs[i];
2397 if (!reg->obj) 2434 if (!reg->obj)
2398 return i; 2435 return reg;
2399 2436
2400 obj_priv = to_intel_bo(reg->obj); 2437 if (!reg->obj->pin_count)
2401 if (!obj_priv->pin_count) 2438 avail = reg;
2402 avail++;
2403 } 2439 }
2404 2440
2405 if (avail == 0) 2441 if (avail == NULL)
2406 return -ENOSPC; 2442 return NULL;
2407 2443
2408 /* None available, try to steal one or wait for a user to finish */ 2444 /* None available, try to steal one or wait for a user to finish */
2409 i = I915_FENCE_REG_NONE; 2445 avail = first = NULL;
2410 list_for_each_entry(reg, &dev_priv->mm.fence_list, 2446 list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
2411 lru_list) { 2447 if (reg->obj->pin_count)
2412 obj = reg->obj;
2413 obj_priv = to_intel_bo(obj);
2414
2415 if (obj_priv->pin_count)
2416 continue; 2448 continue;
2417 2449
2418 /* found one! */ 2450 if (first == NULL)
2419 i = obj_priv->fence_reg; 2451 first = reg;
2420 break; 2452
2453 if (!pipelined ||
2454 !reg->obj->last_fenced_ring ||
2455 reg->obj->last_fenced_ring == pipelined) {
2456 avail = reg;
2457 break;
2458 }
2421 } 2459 }
2422 2460
2423 BUG_ON(i == I915_FENCE_REG_NONE); 2461 if (avail == NULL)
2462 avail = first;
2424 2463
2425 /* We only have a reference on obj from the active list. put_fence_reg 2464 return avail;
2426 * might drop that one, causing a use-after-free in it. So hold a
2427 * private reference to obj like the other callers of put_fence_reg
2428 * (set_tiling ioctl) do. */
2429 drm_gem_object_reference(obj);
2430 ret = i915_gem_object_put_fence_reg(obj, interruptible);
2431 drm_gem_object_unreference(obj);
2432 if (ret != 0)
2433 return ret;
2434
2435 return i;
2436} 2465}
2437 2466
2438/** 2467/**
2439 * i915_gem_object_get_fence_reg - set up a fence reg for an object 2468 * i915_gem_object_get_fence - set up a fence reg for an object
2440 * @obj: object to map through a fence reg 2469 * @obj: object to map through a fence reg
2470 * @pipelined: ring on which to queue the change, or NULL for CPU access
2471 * @interruptible: must we wait uninterruptibly for the register to retire?
2441 * 2472 *
2442 * When mapping objects through the GTT, userspace wants to be able to write 2473 * When mapping objects through the GTT, userspace wants to be able to write
2443 * to them without having to worry about swizzling if the object is tiled. 2474 * to them without having to worry about swizzling if the object is tiled.
@@ -2449,72 +2480,138 @@ static int i915_find_fence_reg(struct drm_device *dev,
2449 * and tiling format. 2480 * and tiling format.
2450 */ 2481 */
2451int 2482int
2452i915_gem_object_get_fence_reg(struct drm_gem_object *obj, 2483i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
2453 bool interruptible) 2484 struct intel_ring_buffer *pipelined,
2485 bool interruptible)
2454{ 2486{
2455 struct drm_device *dev = obj->dev; 2487 struct drm_device *dev = obj->base.dev;
2456 struct drm_i915_private *dev_priv = dev->dev_private; 2488 struct drm_i915_private *dev_priv = dev->dev_private;
2457 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2489 struct drm_i915_fence_reg *reg;
2458 struct drm_i915_fence_reg *reg = NULL;
2459 int ret; 2490 int ret;
2460 2491
2461 /* Just update our place in the LRU if our fence is getting used. */ 2492 /* XXX disable pipelining. There are bugs. Shocking. */
2462 if (obj_priv->fence_reg != I915_FENCE_REG_NONE) { 2493 pipelined = NULL;
2463 reg = &dev_priv->fence_regs[obj_priv->fence_reg]; 2494
2495 /* Just update our place in the LRU if our fence is getting reused. */
2496 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2497 reg = &dev_priv->fence_regs[obj->fence_reg];
2464 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2498 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2499
2500 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2501 pipelined = NULL;
2502
2503 if (!pipelined) {
2504 if (reg->setup_seqno) {
2505 if (!ring_passed_seqno(obj->last_fenced_ring,
2506 reg->setup_seqno)) {
2507 ret = i915_do_wait_request(obj->base.dev,
2508 reg->setup_seqno,
2509 interruptible,
2510 obj->last_fenced_ring);
2511 if (ret)
2512 return ret;
2513 }
2514
2515 reg->setup_seqno = 0;
2516 }
2517 } else if (obj->last_fenced_ring &&
2518 obj->last_fenced_ring != pipelined) {
2519 ret = i915_gem_object_flush_fence(obj,
2520 pipelined,
2521 interruptible);
2522 if (ret)
2523 return ret;
2524 } else if (obj->tiling_changed) {
2525 if (obj->fenced_gpu_access) {
2526 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2527 i915_gem_flush_ring(obj->base.dev, obj->ring,
2528 0, obj->base.write_domain);
2529
2530 obj->fenced_gpu_access = false;
2531 }
2532 }
2533
2534 if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
2535 pipelined = NULL;
2536 BUG_ON(!pipelined && reg->setup_seqno);
2537
2538 if (obj->tiling_changed) {
2539 if (pipelined) {
2540 reg->setup_seqno =
2541 i915_gem_next_request_seqno(dev, pipelined);
2542 obj->last_fenced_seqno = reg->setup_seqno;
2543 obj->last_fenced_ring = pipelined;
2544 }
2545 goto update;
2546 }
2547
2465 return 0; 2548 return 0;
2466 } 2549 }
2467 2550
2468 switch (obj_priv->tiling_mode) { 2551 reg = i915_find_fence_reg(dev, pipelined);
2469 case I915_TILING_NONE: 2552 if (reg == NULL)
2470 WARN(1, "allocating a fence for non-tiled object?\n"); 2553 return -ENOSPC;
2471 break;
2472 case I915_TILING_X:
2473 if (!obj_priv->stride)
2474 return -EINVAL;
2475 WARN((obj_priv->stride & (512 - 1)),
2476 "object 0x%08x is X tiled but has non-512B pitch\n",
2477 obj_priv->gtt_offset);
2478 break;
2479 case I915_TILING_Y:
2480 if (!obj_priv->stride)
2481 return -EINVAL;
2482 WARN((obj_priv->stride & (128 - 1)),
2483 "object 0x%08x is Y tiled but has non-128B pitch\n",
2484 obj_priv->gtt_offset);
2485 break;
2486 }
2487 2554
2488 ret = i915_find_fence_reg(dev, interruptible); 2555 ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
2489 if (ret < 0) 2556 if (ret)
2490 return ret; 2557 return ret;
2491 2558
2492 obj_priv->fence_reg = ret; 2559 if (reg->obj) {
2493 reg = &dev_priv->fence_regs[obj_priv->fence_reg]; 2560 struct drm_i915_gem_object *old = reg->obj;
2494 list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2561
2562 drm_gem_object_reference(&old->base);
2563
2564 if (old->tiling_mode)
2565 i915_gem_release_mmap(old);
2566
2567 ret = i915_gem_object_flush_fence(old,
2568 pipelined,
2569 interruptible);
2570 if (ret) {
2571 drm_gem_object_unreference(&old->base);
2572 return ret;
2573 }
2574
2575 if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
2576 pipelined = NULL;
2577
2578 old->fence_reg = I915_FENCE_REG_NONE;
2579 old->last_fenced_ring = pipelined;
2580 old->last_fenced_seqno =
2581 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2582
2583 drm_gem_object_unreference(&old->base);
2584 } else if (obj->last_fenced_seqno == 0)
2585 pipelined = NULL;
2495 2586
2496 reg->obj = obj; 2587 reg->obj = obj;
2588 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
2589 obj->fence_reg = reg - dev_priv->fence_regs;
2590 obj->last_fenced_ring = pipelined;
2591
2592 reg->setup_seqno =
2593 pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
2594 obj->last_fenced_seqno = reg->setup_seqno;
2497 2595
2596update:
2597 obj->tiling_changed = false;
2498 switch (INTEL_INFO(dev)->gen) { 2598 switch (INTEL_INFO(dev)->gen) {
2499 case 6: 2599 case 6:
2500 sandybridge_write_fence_reg(reg); 2600 ret = sandybridge_write_fence_reg(obj, pipelined);
2501 break; 2601 break;
2502 case 5: 2602 case 5:
2503 case 4: 2603 case 4:
2504 i965_write_fence_reg(reg); 2604 ret = i965_write_fence_reg(obj, pipelined);
2505 break; 2605 break;
2506 case 3: 2606 case 3:
2507 i915_write_fence_reg(reg); 2607 ret = i915_write_fence_reg(obj, pipelined);
2508 break; 2608 break;
2509 case 2: 2609 case 2:
2510 i830_write_fence_reg(reg); 2610 ret = i830_write_fence_reg(obj, pipelined);
2511 break; 2611 break;
2512 } 2612 }
2513 2613
2514 trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg, 2614 return ret;
2515 obj_priv->tiling_mode);
2516
2517 return 0;
2518} 2615}
2519 2616
2520/** 2617/**
@@ -2522,154 +2619,127 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
2522 * @obj: object to clear 2619 * @obj: object to clear
2523 * 2620 *
2524 * Zeroes out the fence register itself and clears out the associated 2621 * Zeroes out the fence register itself and clears out the associated
2525 * data structures in dev_priv and obj_priv. 2622 * data structures in dev_priv and obj.
2526 */ 2623 */
2527static void 2624static void
2528i915_gem_clear_fence_reg(struct drm_gem_object *obj) 2625i915_gem_clear_fence_reg(struct drm_device *dev,
2626 struct drm_i915_fence_reg *reg)
2529{ 2627{
2530 struct drm_device *dev = obj->dev;
2531 drm_i915_private_t *dev_priv = dev->dev_private; 2628 drm_i915_private_t *dev_priv = dev->dev_private;
2532 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 2629 uint32_t fence_reg = reg - dev_priv->fence_regs;
2533 struct drm_i915_fence_reg *reg =
2534 &dev_priv->fence_regs[obj_priv->fence_reg];
2535 uint32_t fence_reg;
2536 2630
2537 switch (INTEL_INFO(dev)->gen) { 2631 switch (INTEL_INFO(dev)->gen) {
2538 case 6: 2632 case 6:
2539 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + 2633 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
2540 (obj_priv->fence_reg * 8), 0);
2541 break; 2634 break;
2542 case 5: 2635 case 5:
2543 case 4: 2636 case 4:
2544 I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0); 2637 I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
2545 break; 2638 break;
2546 case 3: 2639 case 3:
2547 if (obj_priv->fence_reg >= 8) 2640 if (fence_reg >= 8)
2548 fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4; 2641 fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
2549 else 2642 else
2550 case 2: 2643 case 2:
2551 fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4; 2644 fence_reg = FENCE_REG_830_0 + fence_reg * 4;
2552 2645
2553 I915_WRITE(fence_reg, 0); 2646 I915_WRITE(fence_reg, 0);
2554 break; 2647 break;
2555 } 2648 }
2556 2649
2557 reg->obj = NULL;
2558 obj_priv->fence_reg = I915_FENCE_REG_NONE;
2559 list_del_init(&reg->lru_list); 2650 list_del_init(&reg->lru_list);
2560} 2651 reg->obj = NULL;
2561 2652 reg->setup_seqno = 0;
2562/**
2563 * i915_gem_object_put_fence_reg - waits on outstanding fenced access
2564 * to the buffer to finish, and then resets the fence register.
2565 * @obj: tiled object holding a fence register.
2566 * @bool: whether the wait upon the fence is interruptible
2567 *
2568 * Zeroes out the fence register itself and clears out the associated
2569 * data structures in dev_priv and obj_priv.
2570 */
2571int
2572i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2573 bool interruptible)
2574{
2575 struct drm_device *dev = obj->dev;
2576 struct drm_i915_private *dev_priv = dev->dev_private;
2577 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2578 struct drm_i915_fence_reg *reg;
2579
2580 if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
2581 return 0;
2582
2583 /* If we've changed tiling, GTT-mappings of the object
2584 * need to re-fault to ensure that the correct fence register
2585 * setup is in place.
2586 */
2587 i915_gem_release_mmap(obj);
2588
2589 /* On the i915, GPU access to tiled buffers is via a fence,
2590 * therefore we must wait for any outstanding access to complete
2591 * before clearing the fence.
2592 */
2593 reg = &dev_priv->fence_regs[obj_priv->fence_reg];
2594 if (reg->gpu) {
2595 int ret;
2596
2597 ret = i915_gem_object_flush_gpu_write_domain(obj, true);
2598 if (ret)
2599 return ret;
2600
2601 ret = i915_gem_object_wait_rendering(obj, interruptible);
2602 if (ret)
2603 return ret;
2604
2605 reg->gpu = false;
2606 }
2607
2608 i915_gem_object_flush_gtt_write_domain(obj);
2609 i915_gem_clear_fence_reg(obj);
2610
2611 return 0;
2612} 2653}
2613 2654
2614/** 2655/**
2615 * Finds free space in the GTT aperture and binds the object there. 2656 * Finds free space in the GTT aperture and binds the object there.
2616 */ 2657 */
2617static int 2658static int
2618i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) 2659i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
2660 unsigned alignment,
2661 bool map_and_fenceable)
2619{ 2662{
2620 struct drm_device *dev = obj->dev; 2663 struct drm_device *dev = obj->base.dev;
2621 drm_i915_private_t *dev_priv = dev->dev_private; 2664 drm_i915_private_t *dev_priv = dev->dev_private;
2622 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2623 struct drm_mm_node *free_space; 2665 struct drm_mm_node *free_space;
2624 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN; 2666 gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
2667 u32 size, fence_size, fence_alignment, unfenced_alignment;
2668 bool mappable, fenceable;
2625 int ret; 2669 int ret;
2626 2670
2627 if (obj_priv->madv != I915_MADV_WILLNEED) { 2671 if (obj->madv != I915_MADV_WILLNEED) {
2628 DRM_ERROR("Attempting to bind a purgeable object\n"); 2672 DRM_ERROR("Attempting to bind a purgeable object\n");
2629 return -EINVAL; 2673 return -EINVAL;
2630 } 2674 }
2631 2675
2676 fence_size = i915_gem_get_gtt_size(obj);
2677 fence_alignment = i915_gem_get_gtt_alignment(obj);
2678 unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
2679
2632 if (alignment == 0) 2680 if (alignment == 0)
2633 alignment = i915_gem_get_gtt_alignment(obj); 2681 alignment = map_and_fenceable ? fence_alignment :
2634 if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) { 2682 unfenced_alignment;
2683 if (map_and_fenceable && alignment & (fence_alignment - 1)) {
2635 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 2684 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
2636 return -EINVAL; 2685 return -EINVAL;
2637 } 2686 }
2638 2687
2688 size = map_and_fenceable ? fence_size : obj->base.size;
2689
2639 /* If the object is bigger than the entire aperture, reject it early 2690 /* If the object is bigger than the entire aperture, reject it early
2640 * before evicting everything in a vain attempt to find space. 2691 * before evicting everything in a vain attempt to find space.
2641 */ 2692 */
2642 if (obj->size > dev_priv->mm.gtt_total) { 2693 if (obj->base.size >
2694 (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
2643 DRM_ERROR("Attempting to bind an object larger than the aperture\n"); 2695 DRM_ERROR("Attempting to bind an object larger than the aperture\n");
2644 return -E2BIG; 2696 return -E2BIG;
2645 } 2697 }
2646 2698
2647 search_free: 2699 search_free:
2648 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 2700 if (map_and_fenceable)
2649 obj->size, alignment, 0); 2701 free_space =
2650 if (free_space != NULL) 2702 drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
2651 obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, 2703 size, alignment, 0,
2652 alignment); 2704 dev_priv->mm.gtt_mappable_end,
2653 if (obj_priv->gtt_space == NULL) { 2705 0);
2706 else
2707 free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
2708 size, alignment, 0);
2709
2710 if (free_space != NULL) {
2711 if (map_and_fenceable)
2712 obj->gtt_space =
2713 drm_mm_get_block_range_generic(free_space,
2714 size, alignment, 0,
2715 dev_priv->mm.gtt_mappable_end,
2716 0);
2717 else
2718 obj->gtt_space =
2719 drm_mm_get_block(free_space, size, alignment);
2720 }
2721 if (obj->gtt_space == NULL) {
2654 /* If the gtt is empty and we're still having trouble 2722 /* If the gtt is empty and we're still having trouble
2655 * fitting our object in, we're out of memory. 2723 * fitting our object in, we're out of memory.
2656 */ 2724 */
2657 ret = i915_gem_evict_something(dev, obj->size, alignment); 2725 ret = i915_gem_evict_something(dev, size, alignment,
2726 map_and_fenceable);
2658 if (ret) 2727 if (ret)
2659 return ret; 2728 return ret;
2660 2729
2661 goto search_free; 2730 goto search_free;
2662 } 2731 }
2663 2732
2664 ret = i915_gem_object_get_pages(obj, gfpmask); 2733 ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
2665 if (ret) { 2734 if (ret) {
2666 drm_mm_put_block(obj_priv->gtt_space); 2735 drm_mm_put_block(obj->gtt_space);
2667 obj_priv->gtt_space = NULL; 2736 obj->gtt_space = NULL;
2668 2737
2669 if (ret == -ENOMEM) { 2738 if (ret == -ENOMEM) {
2670 /* first try to clear up some space from the GTT */ 2739 /* first try to clear up some space from the GTT */
2671 ret = i915_gem_evict_something(dev, obj->size, 2740 ret = i915_gem_evict_something(dev, size,
2672 alignment); 2741 alignment,
2742 map_and_fenceable);
2673 if (ret) { 2743 if (ret) {
2674 /* now try to shrink everyone else */ 2744 /* now try to shrink everyone else */
2675 if (gfpmask) { 2745 if (gfpmask) {
@@ -2686,126 +2756,113 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2686 return ret; 2756 return ret;
2687 } 2757 }
2688 2758
2689 /* Create an AGP memory structure pointing at our pages, and bind it 2759 ret = i915_gem_gtt_bind_object(obj);
2690 * into the GTT. 2760 if (ret) {
2691 */ 2761 i915_gem_object_put_pages_gtt(obj);
2692 obj_priv->agp_mem = drm_agp_bind_pages(dev, 2762 drm_mm_put_block(obj->gtt_space);
2693 obj_priv->pages, 2763 obj->gtt_space = NULL;
2694 obj->size >> PAGE_SHIFT, 2764
2695 obj_priv->gtt_space->start, 2765 ret = i915_gem_evict_something(dev, size,
2696 obj_priv->agp_type); 2766 alignment, map_and_fenceable);
2697 if (obj_priv->agp_mem == NULL) {
2698 i915_gem_object_put_pages(obj);
2699 drm_mm_put_block(obj_priv->gtt_space);
2700 obj_priv->gtt_space = NULL;
2701
2702 ret = i915_gem_evict_something(dev, obj->size, alignment);
2703 if (ret) 2767 if (ret)
2704 return ret; 2768 return ret;
2705 2769
2706 goto search_free; 2770 goto search_free;
2707 } 2771 }
2708 2772
2709 /* keep track of bounds object by adding it to the inactive list */ 2773 list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
2710 list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list); 2774 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
2711 i915_gem_info_add_gtt(dev_priv, obj->size);
2712 2775
2713 /* Assert that the object is not currently in any GPU domain. As it 2776 /* Assert that the object is not currently in any GPU domain. As it
2714 * wasn't in the GTT, there shouldn't be any way it could have been in 2777 * wasn't in the GTT, there shouldn't be any way it could have been in
2715 * a GPU cache 2778 * a GPU cache
2716 */ 2779 */
2717 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2780 BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2718 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2781 BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2719 2782
2720 obj_priv->gtt_offset = obj_priv->gtt_space->start; 2783 obj->gtt_offset = obj->gtt_space->start;
2721 trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
2722 2784
2785 fenceable =
2786 obj->gtt_space->size == fence_size &&
2787 (obj->gtt_space->start & (fence_alignment -1)) == 0;
2788
2789 mappable =
2790 obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
2791
2792 obj->map_and_fenceable = mappable && fenceable;
2793
2794 trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
2723 return 0; 2795 return 0;
2724} 2796}
2725 2797
2726void 2798void
2727i915_gem_clflush_object(struct drm_gem_object *obj) 2799i915_gem_clflush_object(struct drm_i915_gem_object *obj)
2728{ 2800{
2729 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2730
2731 /* If we don't have a page list set up, then we're not pinned 2801 /* If we don't have a page list set up, then we're not pinned
2732 * to GPU, and we can ignore the cache flush because it'll happen 2802 * to GPU, and we can ignore the cache flush because it'll happen
2733 * again at bind time. 2803 * again at bind time.
2734 */ 2804 */
2735 if (obj_priv->pages == NULL) 2805 if (obj->pages == NULL)
2736 return; 2806 return;
2737 2807
2738 trace_i915_gem_object_clflush(obj); 2808 trace_i915_gem_object_clflush(obj);
2739 2809
2740 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2810 drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
2741} 2811}
2742 2812
2743/** Flushes any GPU write domain for the object if it's dirty. */ 2813/** Flushes any GPU write domain for the object if it's dirty. */
2744static int 2814static void
2745i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, 2815i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
2746 bool pipelined)
2747{ 2816{
2748 struct drm_device *dev = obj->dev; 2817 struct drm_device *dev = obj->base.dev;
2749 uint32_t old_write_domain;
2750 2818
2751 if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 2819 if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
2752 return 0; 2820 return;
2753 2821
2754 /* Queue the GPU write cache flushing we need. */ 2822 /* Queue the GPU write cache flushing we need. */
2755 old_write_domain = obj->write_domain; 2823 i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
2756 i915_gem_flush_ring(dev, NULL, 2824 BUG_ON(obj->base.write_domain);
2757 to_intel_bo(obj)->ring,
2758 0, obj->write_domain);
2759 BUG_ON(obj->write_domain);
2760
2761 trace_i915_gem_object_change_domain(obj,
2762 obj->read_domains,
2763 old_write_domain);
2764
2765 if (pipelined)
2766 return 0;
2767
2768 return i915_gem_object_wait_rendering(obj, true);
2769} 2825}
2770 2826
2771/** Flushes the GTT write domain for the object if it's dirty. */ 2827/** Flushes the GTT write domain for the object if it's dirty. */
2772static void 2828static void
2773i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) 2829i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
2774{ 2830{
2775 uint32_t old_write_domain; 2831 uint32_t old_write_domain;
2776 2832
2777 if (obj->write_domain != I915_GEM_DOMAIN_GTT) 2833 if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
2778 return; 2834 return;
2779 2835
2780 /* No actual flushing is required for the GTT write domain. Writes 2836 /* No actual flushing is required for the GTT write domain. Writes
2781 * to it immediately go to main memory as far as we know, so there's 2837 * to it immediately go to main memory as far as we know, so there's
2782 * no chipset flush. It also doesn't land in render cache. 2838 * no chipset flush. It also doesn't land in render cache.
2783 */ 2839 */
2784 old_write_domain = obj->write_domain; 2840 i915_gem_release_mmap(obj);
2785 obj->write_domain = 0; 2841
2842 old_write_domain = obj->base.write_domain;
2843 obj->base.write_domain = 0;
2786 2844
2787 trace_i915_gem_object_change_domain(obj, 2845 trace_i915_gem_object_change_domain(obj,
2788 obj->read_domains, 2846 obj->base.read_domains,
2789 old_write_domain); 2847 old_write_domain);
2790} 2848}
2791 2849
2792/** Flushes the CPU write domain for the object if it's dirty. */ 2850/** Flushes the CPU write domain for the object if it's dirty. */
2793static void 2851static void
2794i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) 2852i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
2795{ 2853{
2796 struct drm_device *dev = obj->dev;
2797 uint32_t old_write_domain; 2854 uint32_t old_write_domain;
2798 2855
2799 if (obj->write_domain != I915_GEM_DOMAIN_CPU) 2856 if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
2800 return; 2857 return;
2801 2858
2802 i915_gem_clflush_object(obj); 2859 i915_gem_clflush_object(obj);
2803 drm_agp_chipset_flush(dev); 2860 intel_gtt_chipset_flush();
2804 old_write_domain = obj->write_domain; 2861 old_write_domain = obj->base.write_domain;
2805 obj->write_domain = 0; 2862 obj->base.write_domain = 0;
2806 2863
2807 trace_i915_gem_object_change_domain(obj, 2864 trace_i915_gem_object_change_domain(obj,
2808 obj->read_domains, 2865 obj->base.read_domains,
2809 old_write_domain); 2866 old_write_domain);
2810} 2867}
2811 2868
@@ -2816,40 +2873,36 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
2816 * flushes to occur. 2873 * flushes to occur.
2817 */ 2874 */
2818int 2875int
2819i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 2876i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
2820{ 2877{
2821 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2822 uint32_t old_write_domain, old_read_domains; 2878 uint32_t old_write_domain, old_read_domains;
2823 int ret; 2879 int ret;
2824 2880
2825 /* Not valid to be called on unbound objects. */ 2881 /* Not valid to be called on unbound objects. */
2826 if (obj_priv->gtt_space == NULL) 2882 if (obj->gtt_space == NULL)
2827 return -EINVAL; 2883 return -EINVAL;
2828 2884
2829 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 2885 i915_gem_object_flush_gpu_write_domain(obj);
2830 if (ret != 0) 2886 if (obj->pending_gpu_write || write) {
2831 return ret;
2832
2833 i915_gem_object_flush_cpu_write_domain(obj);
2834
2835 if (write) {
2836 ret = i915_gem_object_wait_rendering(obj, true); 2887 ret = i915_gem_object_wait_rendering(obj, true);
2837 if (ret) 2888 if (ret)
2838 return ret; 2889 return ret;
2839 } 2890 }
2840 2891
2841 old_write_domain = obj->write_domain; 2892 i915_gem_object_flush_cpu_write_domain(obj);
2842 old_read_domains = obj->read_domains; 2893
2894 old_write_domain = obj->base.write_domain;
2895 old_read_domains = obj->base.read_domains;
2843 2896
2844 /* It should now be out of any other write domains, and we can update 2897 /* It should now be out of any other write domains, and we can update
2845 * the domain values for our changes. 2898 * the domain values for our changes.
2846 */ 2899 */
2847 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 2900 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
2848 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2901 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2849 if (write) { 2902 if (write) {
2850 obj->read_domains = I915_GEM_DOMAIN_GTT; 2903 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
2851 obj->write_domain = I915_GEM_DOMAIN_GTT; 2904 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
2852 obj_priv->dirty = 1; 2905 obj->dirty = 1;
2853 } 2906 }
2854 2907
2855 trace_i915_gem_object_change_domain(obj, 2908 trace_i915_gem_object_change_domain(obj,
@@ -2864,23 +2917,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2864 * wait, as in modesetting process we're not supposed to be interrupted. 2917 * wait, as in modesetting process we're not supposed to be interrupted.
2865 */ 2918 */
2866int 2919int
2867i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, 2920i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
2868 bool pipelined) 2921 struct intel_ring_buffer *pipelined)
2869{ 2922{
2870 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
2871 uint32_t old_read_domains; 2923 uint32_t old_read_domains;
2872 int ret; 2924 int ret;
2873 2925
2874 /* Not valid to be called on unbound objects. */ 2926 /* Not valid to be called on unbound objects. */
2875 if (obj_priv->gtt_space == NULL) 2927 if (obj->gtt_space == NULL)
2876 return -EINVAL; 2928 return -EINVAL;
2877 2929
2878 ret = i915_gem_object_flush_gpu_write_domain(obj, true); 2930 i915_gem_object_flush_gpu_write_domain(obj);
2879 if (ret)
2880 return ret;
2881 2931
2882 /* Currently, we are always called from an non-interruptible context. */ 2932 /* Currently, we are always called from an non-interruptible context. */
2883 if (!pipelined) { 2933 if (pipelined != obj->ring) {
2884 ret = i915_gem_object_wait_rendering(obj, false); 2934 ret = i915_gem_object_wait_rendering(obj, false);
2885 if (ret) 2935 if (ret)
2886 return ret; 2936 return ret;
@@ -2888,12 +2938,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2888 2938
2889 i915_gem_object_flush_cpu_write_domain(obj); 2939 i915_gem_object_flush_cpu_write_domain(obj);
2890 2940
2891 old_read_domains = obj->read_domains; 2941 old_read_domains = obj->base.read_domains;
2892 obj->read_domains |= I915_GEM_DOMAIN_GTT; 2942 obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
2893 2943
2894 trace_i915_gem_object_change_domain(obj, 2944 trace_i915_gem_object_change_domain(obj,
2895 old_read_domains, 2945 old_read_domains,
2896 obj->write_domain); 2946 obj->base.write_domain);
2897 2947
2898 return 0; 2948 return 0;
2899} 2949}
@@ -2906,10 +2956,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2906 return 0; 2956 return 0;
2907 2957
2908 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) 2958 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2909 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, 2959 i915_gem_flush_ring(obj->base.dev, obj->ring,
2910 0, obj->base.write_domain); 2960 0, obj->base.write_domain);
2911 2961
2912 return i915_gem_object_wait_rendering(&obj->base, interruptible); 2962 return i915_gem_object_wait_rendering(obj, interruptible);
2913} 2963}
2914 2964
2915/** 2965/**
@@ -2919,13 +2969,14 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2919 * flushes to occur. 2969 * flushes to occur.
2920 */ 2970 */
2921static int 2971static int
2922i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 2972i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
2923{ 2973{
2924 uint32_t old_write_domain, old_read_domains; 2974 uint32_t old_write_domain, old_read_domains;
2925 int ret; 2975 int ret;
2926 2976
2927 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 2977 i915_gem_object_flush_gpu_write_domain(obj);
2928 if (ret != 0) 2978 ret = i915_gem_object_wait_rendering(obj, true);
2979 if (ret)
2929 return ret; 2980 return ret;
2930 2981
2931 i915_gem_object_flush_gtt_write_domain(obj); 2982 i915_gem_object_flush_gtt_write_domain(obj);
@@ -2935,33 +2986,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2935 */ 2986 */
2936 i915_gem_object_set_to_full_cpu_read_domain(obj); 2987 i915_gem_object_set_to_full_cpu_read_domain(obj);
2937 2988
2938 if (write) { 2989 old_write_domain = obj->base.write_domain;
2939 ret = i915_gem_object_wait_rendering(obj, true); 2990 old_read_domains = obj->base.read_domains;
2940 if (ret)
2941 return ret;
2942 }
2943
2944 old_write_domain = obj->write_domain;
2945 old_read_domains = obj->read_domains;
2946 2991
2947 /* Flush the CPU cache if it's still invalid. */ 2992 /* Flush the CPU cache if it's still invalid. */
2948 if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 2993 if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
2949 i915_gem_clflush_object(obj); 2994 i915_gem_clflush_object(obj);
2950 2995
2951 obj->read_domains |= I915_GEM_DOMAIN_CPU; 2996 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
2952 } 2997 }
2953 2998
2954 /* It should now be out of any other write domains, and we can update 2999 /* It should now be out of any other write domains, and we can update
2955 * the domain values for our changes. 3000 * the domain values for our changes.
2956 */ 3001 */
2957 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3002 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
2958 3003
2959 /* If we're writing through the CPU, then the GPU read domains will 3004 /* If we're writing through the CPU, then the GPU read domains will
2960 * need to be invalidated at next use. 3005 * need to be invalidated at next use.
2961 */ 3006 */
2962 if (write) { 3007 if (write) {
2963 obj->read_domains = I915_GEM_DOMAIN_CPU; 3008 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
2964 obj->write_domain = I915_GEM_DOMAIN_CPU; 3009 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
2965 } 3010 }
2966 3011
2967 trace_i915_gem_object_change_domain(obj, 3012 trace_i915_gem_object_change_domain(obj,
@@ -2971,184 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2971 return 0; 3016 return 0;
2972} 3017}
2973 3018
2974/*
2975 * Set the next domain for the specified object. This
2976 * may not actually perform the necessary flushing/invaliding though,
2977 * as that may want to be batched with other set_domain operations
2978 *
2979 * This is (we hope) the only really tricky part of gem. The goal
2980 * is fairly simple -- track which caches hold bits of the object
2981 * and make sure they remain coherent. A few concrete examples may
2982 * help to explain how it works. For shorthand, we use the notation
2983 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
2984 * a pair of read and write domain masks.
2985 *
2986 * Case 1: the batch buffer
2987 *
2988 * 1. Allocated
2989 * 2. Written by CPU
2990 * 3. Mapped to GTT
2991 * 4. Read by GPU
2992 * 5. Unmapped from GTT
2993 * 6. Freed
2994 *
2995 * Let's take these a step at a time
2996 *
2997 * 1. Allocated
2998 * Pages allocated from the kernel may still have
2999 * cache contents, so we set them to (CPU, CPU) always.
3000 * 2. Written by CPU (using pwrite)
3001 * The pwrite function calls set_domain (CPU, CPU) and
3002 * this function does nothing (as nothing changes)
3003 * 3. Mapped by GTT
3004 * This function asserts that the object is not
3005 * currently in any GPU-based read or write domains
3006 * 4. Read by GPU
3007 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
3008 * As write_domain is zero, this function adds in the
3009 * current read domains (CPU+COMMAND, 0).
3010 * flush_domains is set to CPU.
3011 * invalidate_domains is set to COMMAND
3012 * clflush is run to get data out of the CPU caches
3013 * then i915_dev_set_domain calls i915_gem_flush to
3014 * emit an MI_FLUSH and drm_agp_chipset_flush
3015 * 5. Unmapped from GTT
3016 * i915_gem_object_unbind calls set_domain (CPU, CPU)
3017 * flush_domains and invalidate_domains end up both zero
3018 * so no flushing/invalidating happens
3019 * 6. Freed
3020 * yay, done
3021 *
3022 * Case 2: The shared render buffer
3023 *
3024 * 1. Allocated
3025 * 2. Mapped to GTT
3026 * 3. Read/written by GPU
3027 * 4. set_domain to (CPU,CPU)
3028 * 5. Read/written by CPU
3029 * 6. Read/written by GPU
3030 *
3031 * 1. Allocated
3032 * Same as last example, (CPU, CPU)
3033 * 2. Mapped to GTT
3034 * Nothing changes (assertions find that it is not in the GPU)
3035 * 3. Read/written by GPU
3036 * execbuffer calls set_domain (RENDER, RENDER)
3037 * flush_domains gets CPU
3038 * invalidate_domains gets GPU
3039 * clflush (obj)
3040 * MI_FLUSH and drm_agp_chipset_flush
3041 * 4. set_domain (CPU, CPU)
3042 * flush_domains gets GPU
3043 * invalidate_domains gets CPU
3044 * wait_rendering (obj) to make sure all drawing is complete.
3045 * This will include an MI_FLUSH to get the data from GPU
3046 * to memory
3047 * clflush (obj) to invalidate the CPU cache
3048 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
3049 * 5. Read/written by CPU
3050 * cache lines are loaded and dirtied
3051 * 6. Read written by GPU
3052 * Same as last GPU access
3053 *
3054 * Case 3: The constant buffer
3055 *
3056 * 1. Allocated
3057 * 2. Written by CPU
3058 * 3. Read by GPU
3059 * 4. Updated (written) by CPU again
3060 * 5. Read by GPU
3061 *
3062 * 1. Allocated
3063 * (CPU, CPU)
3064 * 2. Written by CPU
3065 * (CPU, CPU)
3066 * 3. Read by GPU
3067 * (CPU+RENDER, 0)
3068 * flush_domains = CPU
3069 * invalidate_domains = RENDER
3070 * clflush (obj)
3071 * MI_FLUSH
3072 * drm_agp_chipset_flush
3073 * 4. Updated (written) by CPU again
3074 * (CPU, CPU)
3075 * flush_domains = 0 (no previous write domain)
3076 * invalidate_domains = 0 (no new read domains)
3077 * 5. Read by GPU
3078 * (CPU+RENDER, 0)
3079 * flush_domains = CPU
3080 * invalidate_domains = RENDER
3081 * clflush (obj)
3082 * MI_FLUSH
3083 * drm_agp_chipset_flush
3084 */
3085static void
3086i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3087 struct intel_ring_buffer *ring)
3088{
3089 struct drm_device *dev = obj->dev;
3090 struct drm_i915_private *dev_priv = dev->dev_private;
3091 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3092 uint32_t invalidate_domains = 0;
3093 uint32_t flush_domains = 0;
3094 uint32_t old_read_domains;
3095
3096 intel_mark_busy(dev, obj);
3097
3098 /*
3099 * If the object isn't moving to a new write domain,
3100 * let the object stay in multiple read domains
3101 */
3102 if (obj->pending_write_domain == 0)
3103 obj->pending_read_domains |= obj->read_domains;
3104 else
3105 obj_priv->dirty = 1;
3106
3107 /*
3108 * Flush the current write domain if
3109 * the new read domains don't match. Invalidate
3110 * any read domains which differ from the old
3111 * write domain
3112 */
3113 if (obj->write_domain &&
3114 (obj->write_domain != obj->pending_read_domains ||
3115 obj_priv->ring != ring)) {
3116 flush_domains |= obj->write_domain;
3117 invalidate_domains |=
3118 obj->pending_read_domains & ~obj->write_domain;
3119 }
3120 /*
3121 * Invalidate any read caches which may have
3122 * stale data. That is, any new read domains.
3123 */
3124 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
3125 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
3126 i915_gem_clflush_object(obj);
3127
3128 old_read_domains = obj->read_domains;
3129
3130 /* The actual obj->write_domain will be updated with
3131 * pending_write_domain after we emit the accumulated flush for all
3132 * of our domain changes in execbuffers (which clears objects'
3133 * write_domains). So if we have a current write domain that we
3134 * aren't changing, set pending_write_domain to that.
3135 */
3136 if (flush_domains == 0 && obj->pending_write_domain == 0)
3137 obj->pending_write_domain = obj->write_domain;
3138 obj->read_domains = obj->pending_read_domains;
3139
3140 dev->invalidate_domains |= invalidate_domains;
3141 dev->flush_domains |= flush_domains;
3142 if (flush_domains & I915_GEM_GPU_DOMAINS)
3143 dev_priv->mm.flush_rings |= obj_priv->ring->id;
3144 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
3145 dev_priv->mm.flush_rings |= ring->id;
3146
3147 trace_i915_gem_object_change_domain(obj,
3148 old_read_domains,
3149 obj->write_domain);
3150}
3151
3152/** 3019/**
3153 * Moves the object from a partially CPU read to a full one. 3020 * Moves the object from a partially CPU read to a full one.
3154 * 3021 *
@@ -3156,30 +3023,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3156 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). 3023 * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
3157 */ 3024 */
3158static void 3025static void
3159i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 3026i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
3160{ 3027{
3161 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3028 if (!obj->page_cpu_valid)
3162
3163 if (!obj_priv->page_cpu_valid)
3164 return; 3029 return;
3165 3030
3166 /* If we're partially in the CPU read domain, finish moving it in. 3031 /* If we're partially in the CPU read domain, finish moving it in.
3167 */ 3032 */
3168 if (obj->read_domains & I915_GEM_DOMAIN_CPU) { 3033 if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
3169 int i; 3034 int i;
3170 3035
3171 for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 3036 for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
3172 if (obj_priv->page_cpu_valid[i]) 3037 if (obj->page_cpu_valid[i])
3173 continue; 3038 continue;
3174 drm_clflush_pages(obj_priv->pages + i, 1); 3039 drm_clflush_pages(obj->pages + i, 1);
3175 } 3040 }
3176 } 3041 }
3177 3042
3178 /* Free the page_cpu_valid mappings which are now stale, whether 3043 /* Free the page_cpu_valid mappings which are now stale, whether
3179 * or not we've got I915_GEM_DOMAIN_CPU. 3044 * or not we've got I915_GEM_DOMAIN_CPU.
3180 */ 3045 */
3181 kfree(obj_priv->page_cpu_valid); 3046 kfree(obj->page_cpu_valid);
3182 obj_priv->page_cpu_valid = NULL; 3047 obj->page_cpu_valid = NULL;
3183} 3048}
3184 3049
3185/** 3050/**
@@ -3195,354 +3060,62 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
3195 * flushes to occur. 3060 * flushes to occur.
3196 */ 3061 */
3197static int 3062static int
3198i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 3063i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
3199 uint64_t offset, uint64_t size) 3064 uint64_t offset, uint64_t size)
3200{ 3065{
3201 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
3202 uint32_t old_read_domains; 3066 uint32_t old_read_domains;
3203 int i, ret; 3067 int i, ret;
3204 3068
3205 if (offset == 0 && size == obj->size) 3069 if (offset == 0 && size == obj->base.size)
3206 return i915_gem_object_set_to_cpu_domain(obj, 0); 3070 return i915_gem_object_set_to_cpu_domain(obj, 0);
3207 3071
3208 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 3072 i915_gem_object_flush_gpu_write_domain(obj);
3209 if (ret != 0) 3073 ret = i915_gem_object_wait_rendering(obj, true);
3074 if (ret)
3210 return ret; 3075 return ret;
3076
3211 i915_gem_object_flush_gtt_write_domain(obj); 3077 i915_gem_object_flush_gtt_write_domain(obj);
3212 3078
3213 /* If we're already fully in the CPU read domain, we're done. */ 3079 /* If we're already fully in the CPU read domain, we're done. */
3214 if (obj_priv->page_cpu_valid == NULL && 3080 if (obj->page_cpu_valid == NULL &&
3215 (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) 3081 (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
3216 return 0; 3082 return 0;
3217 3083
3218 /* Otherwise, create/clear the per-page CPU read domain flag if we're 3084 /* Otherwise, create/clear the per-page CPU read domain flag if we're
3219 * newly adding I915_GEM_DOMAIN_CPU 3085 * newly adding I915_GEM_DOMAIN_CPU
3220 */ 3086 */
3221 if (obj_priv->page_cpu_valid == NULL) { 3087 if (obj->page_cpu_valid == NULL) {
3222 obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE, 3088 obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
3223 GFP_KERNEL); 3089 GFP_KERNEL);
3224 if (obj_priv->page_cpu_valid == NULL) 3090 if (obj->page_cpu_valid == NULL)
3225 return -ENOMEM; 3091 return -ENOMEM;
3226 } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) 3092 } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
3227 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); 3093 memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
3228 3094
3229 /* Flush the cache on any pages that are still invalid from the CPU's 3095 /* Flush the cache on any pages that are still invalid from the CPU's
3230 * perspective. 3096 * perspective.
3231 */ 3097 */
3232 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; 3098 for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
3233 i++) { 3099 i++) {
3234 if (obj_priv->page_cpu_valid[i]) 3100 if (obj->page_cpu_valid[i])
3235 continue; 3101 continue;
3236 3102
3237 drm_clflush_pages(obj_priv->pages + i, 1); 3103 drm_clflush_pages(obj->pages + i, 1);
3238 3104
3239 obj_priv->page_cpu_valid[i] = 1; 3105 obj->page_cpu_valid[i] = 1;
3240 } 3106 }
3241 3107
3242 /* It should now be out of any other write domains, and we can update 3108 /* It should now be out of any other write domains, and we can update
3243 * the domain values for our changes. 3109 * the domain values for our changes.
3244 */ 3110 */
3245 BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 3111 BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3246 3112
3247 old_read_domains = obj->read_domains; 3113 old_read_domains = obj->base.read_domains;
3248 obj->read_domains |= I915_GEM_DOMAIN_CPU; 3114 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3249 3115
3250 trace_i915_gem_object_change_domain(obj, 3116 trace_i915_gem_object_change_domain(obj,
3251 old_read_domains, 3117 old_read_domains,
3252 obj->write_domain); 3118 obj->base.write_domain);
3253
3254 return 0;
3255}
3256
3257/**
3258 * Pin an object to the GTT and evaluate the relocations landing in it.
3259 */
3260static int
3261i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
3262 struct drm_file *file_priv,
3263 struct drm_i915_gem_exec_object2 *entry)
3264{
3265 struct drm_device *dev = obj->base.dev;
3266 drm_i915_private_t *dev_priv = dev->dev_private;
3267 struct drm_i915_gem_relocation_entry __user *user_relocs;
3268 struct drm_gem_object *target_obj = NULL;
3269 uint32_t target_handle = 0;
3270 int i, ret = 0;
3271
3272 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3273 for (i = 0; i < entry->relocation_count; i++) {
3274 struct drm_i915_gem_relocation_entry reloc;
3275 uint32_t target_offset;
3276
3277 if (__copy_from_user_inatomic(&reloc,
3278 user_relocs+i,
3279 sizeof(reloc))) {
3280 ret = -EFAULT;
3281 break;
3282 }
3283
3284 if (reloc.target_handle != target_handle) {
3285 drm_gem_object_unreference(target_obj);
3286
3287 target_obj = drm_gem_object_lookup(dev, file_priv,
3288 reloc.target_handle);
3289 if (target_obj == NULL) {
3290 ret = -ENOENT;
3291 break;
3292 }
3293
3294 target_handle = reloc.target_handle;
3295 }
3296 target_offset = to_intel_bo(target_obj)->gtt_offset;
3297
3298#if WATCH_RELOC
3299 DRM_INFO("%s: obj %p offset %08x target %d "
3300 "read %08x write %08x gtt %08x "
3301 "presumed %08x delta %08x\n",
3302 __func__,
3303 obj,
3304 (int) reloc.offset,
3305 (int) reloc.target_handle,
3306 (int) reloc.read_domains,
3307 (int) reloc.write_domain,
3308 (int) target_offset,
3309 (int) reloc.presumed_offset,
3310 reloc.delta);
3311#endif
3312
3313 /* The target buffer should have appeared before us in the
3314 * exec_object list, so it should have a GTT space bound by now.
3315 */
3316 if (target_offset == 0) {
3317 DRM_ERROR("No GTT space found for object %d\n",
3318 reloc.target_handle);
3319 ret = -EINVAL;
3320 break;
3321 }
3322
3323 /* Validate that the target is in a valid r/w GPU domain */
3324 if (reloc.write_domain & (reloc.write_domain - 1)) {
3325 DRM_ERROR("reloc with multiple write domains: "
3326 "obj %p target %d offset %d "
3327 "read %08x write %08x",
3328 obj, reloc.target_handle,
3329 (int) reloc.offset,
3330 reloc.read_domains,
3331 reloc.write_domain);
3332 ret = -EINVAL;
3333 break;
3334 }
3335 if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
3336 reloc.read_domains & I915_GEM_DOMAIN_CPU) {
3337 DRM_ERROR("reloc with read/write CPU domains: "
3338 "obj %p target %d offset %d "
3339 "read %08x write %08x",
3340 obj, reloc.target_handle,
3341 (int) reloc.offset,
3342 reloc.read_domains,
3343 reloc.write_domain);
3344 ret = -EINVAL;
3345 break;
3346 }
3347 if (reloc.write_domain && target_obj->pending_write_domain &&
3348 reloc.write_domain != target_obj->pending_write_domain) {
3349 DRM_ERROR("Write domain conflict: "
3350 "obj %p target %d offset %d "
3351 "new %08x old %08x\n",
3352 obj, reloc.target_handle,
3353 (int) reloc.offset,
3354 reloc.write_domain,
3355 target_obj->pending_write_domain);
3356 ret = -EINVAL;
3357 break;
3358 }
3359
3360 target_obj->pending_read_domains |= reloc.read_domains;
3361 target_obj->pending_write_domain |= reloc.write_domain;
3362
3363 /* If the relocation already has the right value in it, no
3364 * more work needs to be done.
3365 */
3366 if (target_offset == reloc.presumed_offset)
3367 continue;
3368
3369 /* Check that the relocation address is valid... */
3370 if (reloc.offset > obj->base.size - 4) {
3371 DRM_ERROR("Relocation beyond object bounds: "
3372 "obj %p target %d offset %d size %d.\n",
3373 obj, reloc.target_handle,
3374 (int) reloc.offset, (int) obj->base.size);
3375 ret = -EINVAL;
3376 break;
3377 }
3378 if (reloc.offset & 3) {
3379 DRM_ERROR("Relocation not 4-byte aligned: "
3380 "obj %p target %d offset %d.\n",
3381 obj, reloc.target_handle,
3382 (int) reloc.offset);
3383 ret = -EINVAL;
3384 break;
3385 }
3386
3387 /* and points to somewhere within the target object. */
3388 if (reloc.delta >= target_obj->size) {
3389 DRM_ERROR("Relocation beyond target object bounds: "
3390 "obj %p target %d delta %d size %d.\n",
3391 obj, reloc.target_handle,
3392 (int) reloc.delta, (int) target_obj->size);
3393 ret = -EINVAL;
3394 break;
3395 }
3396
3397 reloc.delta += target_offset;
3398 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3399 uint32_t page_offset = reloc.offset & ~PAGE_MASK;
3400 char *vaddr;
3401
3402 vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
3403 *(uint32_t *)(vaddr + page_offset) = reloc.delta;
3404 kunmap_atomic(vaddr);
3405 } else {
3406 uint32_t __iomem *reloc_entry;
3407 void __iomem *reloc_page;
3408
3409 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3410 if (ret)
3411 break;
3412
3413 /* Map the page containing the relocation we're going to perform. */
3414 reloc.offset += obj->gtt_offset;
3415 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3416 reloc.offset & PAGE_MASK);
3417 reloc_entry = (uint32_t __iomem *)
3418 (reloc_page + (reloc.offset & ~PAGE_MASK));
3419 iowrite32(reloc.delta, reloc_entry);
3420 io_mapping_unmap_atomic(reloc_page);
3421 }
3422
3423 /* and update the user's relocation entry */
3424 reloc.presumed_offset = target_offset;
3425 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3426 &reloc.presumed_offset,
3427 sizeof(reloc.presumed_offset))) {
3428 ret = -EFAULT;
3429 break;
3430 }
3431 }
3432
3433 drm_gem_object_unreference(target_obj);
3434 return ret;
3435}
3436
3437static int
3438i915_gem_execbuffer_pin(struct drm_device *dev,
3439 struct drm_file *file,
3440 struct drm_gem_object **object_list,
3441 struct drm_i915_gem_exec_object2 *exec_list,
3442 int count)
3443{
3444 struct drm_i915_private *dev_priv = dev->dev_private;
3445 int ret, i, retry;
3446
3447 /* attempt to pin all of the buffers into the GTT */
3448 for (retry = 0; retry < 2; retry++) {
3449 ret = 0;
3450 for (i = 0; i < count; i++) {
3451 struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
3452 struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
3453 bool need_fence =
3454 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
3455 obj->tiling_mode != I915_TILING_NONE;
3456
3457 /* Check fence reg constraints and rebind if necessary */
3458 if (need_fence &&
3459 !i915_gem_object_fence_offset_ok(&obj->base,
3460 obj->tiling_mode)) {
3461 ret = i915_gem_object_unbind(&obj->base);
3462 if (ret)
3463 break;
3464 }
3465
3466 ret = i915_gem_object_pin(&obj->base, entry->alignment);
3467 if (ret)
3468 break;
3469
3470 /*
3471 * Pre-965 chips need a fence register set up in order
3472 * to properly handle blits to/from tiled surfaces.
3473 */
3474 if (need_fence) {
3475 ret = i915_gem_object_get_fence_reg(&obj->base, true);
3476 if (ret) {
3477 i915_gem_object_unpin(&obj->base);
3478 break;
3479 }
3480
3481 dev_priv->fence_regs[obj->fence_reg].gpu = true;
3482 }
3483
3484 entry->offset = obj->gtt_offset;
3485 }
3486
3487 while (i--)
3488 i915_gem_object_unpin(object_list[i]);
3489
3490 if (ret == 0)
3491 break;
3492
3493 if (ret != -ENOSPC || retry)
3494 return ret;
3495
3496 ret = i915_gem_evict_everything(dev);
3497 if (ret)
3498 return ret;
3499 }
3500
3501 return 0;
3502}
3503
3504static int
3505i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3506 struct drm_file *file,
3507 struct intel_ring_buffer *ring,
3508 struct drm_gem_object **objects,
3509 int count)
3510{
3511 struct drm_i915_private *dev_priv = dev->dev_private;
3512 int ret, i;
3513
3514 /* Zero the global flush/invalidate flags. These
3515 * will be modified as new domains are computed
3516 * for each object
3517 */
3518 dev->invalidate_domains = 0;
3519 dev->flush_domains = 0;
3520 dev_priv->mm.flush_rings = 0;
3521 for (i = 0; i < count; i++)
3522 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3523
3524 if (dev->invalidate_domains | dev->flush_domains) {
3525#if WATCH_EXEC
3526 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3527 __func__,
3528 dev->invalidate_domains,
3529 dev->flush_domains);
3530#endif
3531 i915_gem_flush(dev, file,
3532 dev->invalidate_domains,
3533 dev->flush_domains,
3534 dev_priv->mm.flush_rings);
3535 }
3536
3537 for (i = 0; i < count; i++) {
3538 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3539 /* XXX replace with semaphores */
3540 if (obj->ring && ring != obj->ring) {
3541 ret = i915_gem_object_wait_rendering(&obj->base, true);
3542 if (ret)
3543 return ret;
3544 }
3545 }
3546 3119
3547 return 0; 3120 return 0;
3548} 3121}
@@ -3582,586 +3155,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3582 return 0; 3155 return 0;
3583 3156
3584 ret = 0; 3157 ret = 0;
3585 if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) { 3158 if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
3586 /* And wait for the seqno passing without holding any locks and 3159 /* And wait for the seqno passing without holding any locks and
3587 * causing extra latency for others. This is safe as the irq 3160 * causing extra latency for others. This is safe as the irq
3588 * generation is designed to be run atomically and so is 3161 * generation is designed to be run atomically and so is
3589 * lockless. 3162 * lockless.
3590 */ 3163 */
3591 ring->user_irq_get(dev, ring); 3164 if (ring->irq_get(ring)) {
3592 ret = wait_event_interruptible(ring->irq_queue, 3165 ret = wait_event_interruptible(ring->irq_queue,
3593 i915_seqno_passed(ring->get_seqno(dev, ring), seqno) 3166 i915_seqno_passed(ring->get_seqno(ring), seqno)
3594 || atomic_read(&dev_priv->mm.wedged)); 3167 || atomic_read(&dev_priv->mm.wedged));
3595 ring->user_irq_put(dev, ring); 3168 ring->irq_put(ring);
3596
3597 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3598 ret = -EIO;
3599 }
3600
3601 if (ret == 0)
3602 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
3603
3604 return ret;
3605}
3606
3607static int
3608i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
3609 uint64_t exec_offset)
3610{
3611 uint32_t exec_start, exec_len;
3612
3613 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3614 exec_len = (uint32_t) exec->batch_len;
3615
3616 if ((exec_start | exec_len) & 0x7)
3617 return -EINVAL;
3618
3619 if (!exec_start)
3620 return -EINVAL;
3621
3622 return 0;
3623}
3624
3625static int
3626validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3627 int count)
3628{
3629 int i;
3630
3631 for (i = 0; i < count; i++) {
3632 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3633 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
3634
3635 if (!access_ok(VERIFY_READ, ptr, length))
3636 return -EFAULT;
3637
3638 /* we may also need to update the presumed offsets */
3639 if (!access_ok(VERIFY_WRITE, ptr, length))
3640 return -EFAULT;
3641
3642 if (fault_in_pages_readable(ptr, length))
3643 return -EFAULT;
3644 }
3645
3646 return 0;
3647}
3648
3649static int
3650i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3651 struct drm_file *file,
3652 struct drm_i915_gem_execbuffer2 *args,
3653 struct drm_i915_gem_exec_object2 *exec_list)
3654{
3655 drm_i915_private_t *dev_priv = dev->dev_private;
3656 struct drm_gem_object **object_list = NULL;
3657 struct drm_gem_object *batch_obj;
3658 struct drm_i915_gem_object *obj_priv;
3659 struct drm_clip_rect *cliprects = NULL;
3660 struct drm_i915_gem_request *request = NULL;
3661 int ret, i, flips;
3662 uint64_t exec_offset;
3663
3664 struct intel_ring_buffer *ring = NULL;
3665
3666 ret = i915_gem_check_is_wedged(dev);
3667 if (ret)
3668 return ret;
3669
3670 ret = validate_exec_list(exec_list, args->buffer_count);
3671 if (ret)
3672 return ret;
3673
3674#if WATCH_EXEC
3675 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3676 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3677#endif
3678 switch (args->flags & I915_EXEC_RING_MASK) {
3679 case I915_EXEC_DEFAULT:
3680 case I915_EXEC_RENDER:
3681 ring = &dev_priv->render_ring;
3682 break;
3683 case I915_EXEC_BSD:
3684 if (!HAS_BSD(dev)) {
3685 DRM_ERROR("execbuf with invalid ring (BSD)\n");
3686 return -EINVAL;
3687 }
3688 ring = &dev_priv->bsd_ring;
3689 break;
3690 case I915_EXEC_BLT:
3691 if (!HAS_BLT(dev)) {
3692 DRM_ERROR("execbuf with invalid ring (BLT)\n");
3693 return -EINVAL;
3694 }
3695 ring = &dev_priv->blt_ring;
3696 break;
3697 default:
3698 DRM_ERROR("execbuf with unknown ring: %d\n",
3699 (int)(args->flags & I915_EXEC_RING_MASK));
3700 return -EINVAL;
3701 }
3702
3703 if (args->buffer_count < 1) {
3704 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3705 return -EINVAL;
3706 }
3707 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
3708 if (object_list == NULL) {
3709 DRM_ERROR("Failed to allocate object list for %d buffers\n",
3710 args->buffer_count);
3711 ret = -ENOMEM;
3712 goto pre_mutex_err;
3713 }
3714
3715 if (args->num_cliprects != 0) {
3716 cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
3717 GFP_KERNEL);
3718 if (cliprects == NULL) {
3719 ret = -ENOMEM;
3720 goto pre_mutex_err;
3721 }
3722
3723 ret = copy_from_user(cliprects,
3724 (struct drm_clip_rect __user *)
3725 (uintptr_t) args->cliprects_ptr,
3726 sizeof(*cliprects) * args->num_cliprects);
3727 if (ret != 0) {
3728 DRM_ERROR("copy %d cliprects failed: %d\n",
3729 args->num_cliprects, ret);
3730 ret = -EFAULT;
3731 goto pre_mutex_err;
3732 }
3733 }
3734
3735 request = kzalloc(sizeof(*request), GFP_KERNEL);
3736 if (request == NULL) {
3737 ret = -ENOMEM;
3738 goto pre_mutex_err;
3739 }
3740
3741 ret = i915_mutex_lock_interruptible(dev);
3742 if (ret)
3743 goto pre_mutex_err;
3744
3745 if (dev_priv->mm.suspended) {
3746 mutex_unlock(&dev->struct_mutex);
3747 ret = -EBUSY;
3748 goto pre_mutex_err;
3749 }
3750
3751 /* Look up object handles */
3752 for (i = 0; i < args->buffer_count; i++) {
3753 object_list[i] = drm_gem_object_lookup(dev, file,
3754 exec_list[i].handle);
3755 if (object_list[i] == NULL) {
3756 DRM_ERROR("Invalid object handle %d at index %d\n",
3757 exec_list[i].handle, i);
3758 /* prevent error path from reading uninitialized data */
3759 args->buffer_count = i + 1;
3760 ret = -ENOENT;
3761 goto err;
3762 }
3763
3764 obj_priv = to_intel_bo(object_list[i]);
3765 if (obj_priv->in_execbuffer) {
3766 DRM_ERROR("Object %p appears more than once in object list\n",
3767 object_list[i]);
3768 /* prevent error path from reading uninitialized data */
3769 args->buffer_count = i + 1;
3770 ret = -EINVAL;
3771 goto err;
3772 }
3773 obj_priv->in_execbuffer = true;
3774 }
3775
3776 /* Move the objects en-masse into the GTT, evicting if necessary. */
3777 ret = i915_gem_execbuffer_pin(dev, file,
3778 object_list, exec_list,
3779 args->buffer_count);
3780 if (ret)
3781 goto err;
3782
3783 /* The objects are in their final locations, apply the relocations. */
3784 for (i = 0; i < args->buffer_count; i++) {
3785 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3786 obj->base.pending_read_domains = 0;
3787 obj->base.pending_write_domain = 0;
3788 ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
3789 if (ret)
3790 goto err;
3791 }
3792
3793 /* Set the pending read domains for the batch buffer to COMMAND */
3794 batch_obj = object_list[args->buffer_count-1];
3795 if (batch_obj->pending_write_domain) {
3796 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3797 ret = -EINVAL;
3798 goto err;
3799 }
3800 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3801
3802 /* Sanity check the batch buffer */
3803 exec_offset = to_intel_bo(batch_obj)->gtt_offset;
3804 ret = i915_gem_check_execbuffer(args, exec_offset);
3805 if (ret != 0) {
3806 DRM_ERROR("execbuf with invalid offset/length\n");
3807 goto err;
3808 }
3809
3810 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3811 object_list, args->buffer_count);
3812 if (ret)
3813 goto err;
3814
3815 for (i = 0; i < args->buffer_count; i++) {
3816 struct drm_gem_object *obj = object_list[i];
3817 uint32_t old_write_domain = obj->write_domain;
3818 obj->write_domain = obj->pending_write_domain;
3819 trace_i915_gem_object_change_domain(obj,
3820 obj->read_domains,
3821 old_write_domain);
3822 }
3823
3824#if WATCH_COHERENCY
3825 for (i = 0; i < args->buffer_count; i++) {
3826 i915_gem_object_check_coherency(object_list[i],
3827 exec_list[i].handle);
3828 }
3829#endif
3830
3831#if WATCH_EXEC
3832 i915_gem_dump_object(batch_obj,
3833 args->batch_len,
3834 __func__,
3835 ~0);
3836#endif
3837
3838 /* Check for any pending flips. As we only maintain a flip queue depth
3839 * of 1, we can simply insert a WAIT for the next display flip prior
3840 * to executing the batch and avoid stalling the CPU.
3841 */
3842 flips = 0;
3843 for (i = 0; i < args->buffer_count; i++) {
3844 if (object_list[i]->write_domain)
3845 flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
3846 }
3847 if (flips) {
3848 int plane, flip_mask;
3849
3850 for (plane = 0; flips >> plane; plane++) {
3851 if (((flips >> plane) & 1) == 0)
3852 continue;
3853
3854 if (plane)
3855 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
3856 else
3857 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
3858
3859 intel_ring_begin(dev, ring, 2);
3860 intel_ring_emit(dev, ring,
3861 MI_WAIT_FOR_EVENT | flip_mask);
3862 intel_ring_emit(dev, ring, MI_NOOP);
3863 intel_ring_advance(dev, ring);
3864 }
3865 }
3866
3867 /* Exec the batchbuffer */
3868 ret = ring->dispatch_gem_execbuffer(dev, ring, args,
3869 cliprects, exec_offset);
3870 if (ret) {
3871 DRM_ERROR("dispatch failed %d\n", ret);
3872 goto err;
3873 }
3874
3875 /*
3876 * Ensure that the commands in the batch buffer are
3877 * finished before the interrupt fires
3878 */
3879 i915_retire_commands(dev, ring);
3880
3881 for (i = 0; i < args->buffer_count; i++) {
3882 struct drm_gem_object *obj = object_list[i];
3883
3884 i915_gem_object_move_to_active(obj, ring);
3885 if (obj->write_domain)
3886 list_move_tail(&to_intel_bo(obj)->gpu_write_list,
3887 &ring->gpu_write_list);
3888 }
3889
3890 i915_add_request(dev, file, request, ring);
3891 request = NULL;
3892
3893err:
3894 for (i = 0; i < args->buffer_count; i++) {
3895 if (object_list[i]) {
3896 obj_priv = to_intel_bo(object_list[i]);
3897 obj_priv->in_execbuffer = false;
3898 }
3899 drm_gem_object_unreference(object_list[i]);
3900 }
3901
3902 mutex_unlock(&dev->struct_mutex);
3903
3904pre_mutex_err:
3905 drm_free_large(object_list);
3906 kfree(cliprects);
3907 kfree(request);
3908
3909 return ret;
3910}
3911
3912/*
3913 * Legacy execbuffer just creates an exec2 list from the original exec object
3914 * list array and passes it to the real function.
3915 */
3916int
3917i915_gem_execbuffer(struct drm_device *dev, void *data,
3918 struct drm_file *file_priv)
3919{
3920 struct drm_i915_gem_execbuffer *args = data;
3921 struct drm_i915_gem_execbuffer2 exec2;
3922 struct drm_i915_gem_exec_object *exec_list = NULL;
3923 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
3924 int ret, i;
3925
3926#if WATCH_EXEC
3927 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
3928 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
3929#endif
3930
3931 if (args->buffer_count < 1) {
3932 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
3933 return -EINVAL;
3934 }
3935
3936 /* Copy in the exec list from userland */
3937 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
3938 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
3939 if (exec_list == NULL || exec2_list == NULL) {
3940 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
3941 args->buffer_count);
3942 drm_free_large(exec_list);
3943 drm_free_large(exec2_list);
3944 return -ENOMEM;
3945 }
3946 ret = copy_from_user(exec_list,
3947 (struct drm_i915_relocation_entry __user *)
3948 (uintptr_t) args->buffers_ptr,
3949 sizeof(*exec_list) * args->buffer_count);
3950 if (ret != 0) {
3951 DRM_ERROR("copy %d exec entries failed %d\n",
3952 args->buffer_count, ret);
3953 drm_free_large(exec_list);
3954 drm_free_large(exec2_list);
3955 return -EFAULT;
3956 }
3957
3958 for (i = 0; i < args->buffer_count; i++) {
3959 exec2_list[i].handle = exec_list[i].handle;
3960 exec2_list[i].relocation_count = exec_list[i].relocation_count;
3961 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
3962 exec2_list[i].alignment = exec_list[i].alignment;
3963 exec2_list[i].offset = exec_list[i].offset;
3964 if (INTEL_INFO(dev)->gen < 4)
3965 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
3966 else
3967 exec2_list[i].flags = 0;
3968 }
3969 3169
3970 exec2.buffers_ptr = args->buffers_ptr; 3170 if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
3971 exec2.buffer_count = args->buffer_count; 3171 ret = -EIO;
3972 exec2.batch_start_offset = args->batch_start_offset;
3973 exec2.batch_len = args->batch_len;
3974 exec2.DR1 = args->DR1;
3975 exec2.DR4 = args->DR4;
3976 exec2.num_cliprects = args->num_cliprects;
3977 exec2.cliprects_ptr = args->cliprects_ptr;
3978 exec2.flags = I915_EXEC_RENDER;
3979
3980 ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
3981 if (!ret) {
3982 /* Copy the new buffer offsets back to the user's exec list. */
3983 for (i = 0; i < args->buffer_count; i++)
3984 exec_list[i].offset = exec2_list[i].offset;
3985 /* ... and back out to userspace */
3986 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
3987 (uintptr_t) args->buffers_ptr,
3988 exec_list,
3989 sizeof(*exec_list) * args->buffer_count);
3990 if (ret) {
3991 ret = -EFAULT;
3992 DRM_ERROR("failed to copy %d exec entries "
3993 "back to user (%d)\n",
3994 args->buffer_count, ret);
3995 } 3172 }
3996 } 3173 }
3997 3174
3998 drm_free_large(exec_list); 3175 if (ret == 0)
3999 drm_free_large(exec2_list); 3176 queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
4000 return ret;
4001}
4002
4003int
4004i915_gem_execbuffer2(struct drm_device *dev, void *data,
4005 struct drm_file *file_priv)
4006{
4007 struct drm_i915_gem_execbuffer2 *args = data;
4008 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
4009 int ret;
4010
4011#if WATCH_EXEC
4012 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
4013 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
4014#endif
4015
4016 if (args->buffer_count < 1) {
4017 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
4018 return -EINVAL;
4019 }
4020
4021 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
4022 if (exec2_list == NULL) {
4023 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
4024 args->buffer_count);
4025 return -ENOMEM;
4026 }
4027 ret = copy_from_user(exec2_list,
4028 (struct drm_i915_relocation_entry __user *)
4029 (uintptr_t) args->buffers_ptr,
4030 sizeof(*exec2_list) * args->buffer_count);
4031 if (ret != 0) {
4032 DRM_ERROR("copy %d exec entries failed %d\n",
4033 args->buffer_count, ret);
4034 drm_free_large(exec2_list);
4035 return -EFAULT;
4036 }
4037
4038 ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
4039 if (!ret) {
4040 /* Copy the new buffer offsets back to the user's exec list. */
4041 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
4042 (uintptr_t) args->buffers_ptr,
4043 exec2_list,
4044 sizeof(*exec2_list) * args->buffer_count);
4045 if (ret) {
4046 ret = -EFAULT;
4047 DRM_ERROR("failed to copy %d exec entries "
4048 "back to user (%d)\n",
4049 args->buffer_count, ret);
4050 }
4051 }
4052 3177
4053 drm_free_large(exec2_list);
4054 return ret; 3178 return ret;
4055} 3179}
4056 3180
4057int 3181int
4058i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 3182i915_gem_object_pin(struct drm_i915_gem_object *obj,
3183 uint32_t alignment,
3184 bool map_and_fenceable)
4059{ 3185{
4060 struct drm_device *dev = obj->dev; 3186 struct drm_device *dev = obj->base.dev;
4061 struct drm_i915_private *dev_priv = dev->dev_private; 3187 struct drm_i915_private *dev_priv = dev->dev_private;
4062 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4063 int ret; 3188 int ret;
4064 3189
4065 BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT); 3190 BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
4066 WARN_ON(i915_verify_lists(dev)); 3191 WARN_ON(i915_verify_lists(dev));
4067 3192
4068 if (obj_priv->gtt_space != NULL) { 3193 if (obj->gtt_space != NULL) {
4069 if (alignment == 0) 3194 if ((alignment && obj->gtt_offset & (alignment - 1)) ||
4070 alignment = i915_gem_get_gtt_alignment(obj); 3195 (map_and_fenceable && !obj->map_and_fenceable)) {
4071 if (obj_priv->gtt_offset & (alignment - 1)) { 3196 WARN(obj->pin_count,
4072 WARN(obj_priv->pin_count, 3197 "bo is already pinned with incorrect alignment:"
4073 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n", 3198 " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
4074 obj_priv->gtt_offset, alignment); 3199 " obj->map_and_fenceable=%d\n",
3200 obj->gtt_offset, alignment,
3201 map_and_fenceable,
3202 obj->map_and_fenceable);
4075 ret = i915_gem_object_unbind(obj); 3203 ret = i915_gem_object_unbind(obj);
4076 if (ret) 3204 if (ret)
4077 return ret; 3205 return ret;
4078 } 3206 }
4079 } 3207 }
4080 3208
4081 if (obj_priv->gtt_space == NULL) { 3209 if (obj->gtt_space == NULL) {
4082 ret = i915_gem_object_bind_to_gtt(obj, alignment); 3210 ret = i915_gem_object_bind_to_gtt(obj, alignment,
3211 map_and_fenceable);
4083 if (ret) 3212 if (ret)
4084 return ret; 3213 return ret;
4085 } 3214 }
4086 3215
4087 obj_priv->pin_count++; 3216 if (obj->pin_count++ == 0) {
4088 3217 if (!obj->active)
4089 /* If the object is not active and not pending a flush, 3218 list_move_tail(&obj->mm_list,
4090 * remove it from the inactive list
4091 */
4092 if (obj_priv->pin_count == 1) {
4093 i915_gem_info_add_pin(dev_priv, obj->size);
4094 if (!obj_priv->active)
4095 list_move_tail(&obj_priv->mm_list,
4096 &dev_priv->mm.pinned_list); 3219 &dev_priv->mm.pinned_list);
4097 } 3220 }
3221 obj->pin_mappable |= map_and_fenceable;
4098 3222
4099 WARN_ON(i915_verify_lists(dev)); 3223 WARN_ON(i915_verify_lists(dev));
4100 return 0; 3224 return 0;
4101} 3225}
4102 3226
4103void 3227void
4104i915_gem_object_unpin(struct drm_gem_object *obj) 3228i915_gem_object_unpin(struct drm_i915_gem_object *obj)
4105{ 3229{
4106 struct drm_device *dev = obj->dev; 3230 struct drm_device *dev = obj->base.dev;
4107 drm_i915_private_t *dev_priv = dev->dev_private; 3231 drm_i915_private_t *dev_priv = dev->dev_private;
4108 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4109 3232
4110 WARN_ON(i915_verify_lists(dev)); 3233 WARN_ON(i915_verify_lists(dev));
4111 obj_priv->pin_count--; 3234 BUG_ON(obj->pin_count == 0);
4112 BUG_ON(obj_priv->pin_count < 0); 3235 BUG_ON(obj->gtt_space == NULL);
4113 BUG_ON(obj_priv->gtt_space == NULL);
4114 3236
4115 /* If the object is no longer pinned, and is 3237 if (--obj->pin_count == 0) {
4116 * neither active nor being flushed, then stick it on 3238 if (!obj->active)
4117 * the inactive list 3239 list_move_tail(&obj->mm_list,
4118 */
4119 if (obj_priv->pin_count == 0) {
4120 if (!obj_priv->active)
4121 list_move_tail(&obj_priv->mm_list,
4122 &dev_priv->mm.inactive_list); 3240 &dev_priv->mm.inactive_list);
4123 i915_gem_info_remove_pin(dev_priv, obj->size); 3241 obj->pin_mappable = false;
4124 } 3242 }
4125 WARN_ON(i915_verify_lists(dev)); 3243 WARN_ON(i915_verify_lists(dev));
4126} 3244}
4127 3245
4128int 3246int
4129i915_gem_pin_ioctl(struct drm_device *dev, void *data, 3247i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4130 struct drm_file *file_priv) 3248 struct drm_file *file)
4131{ 3249{
4132 struct drm_i915_gem_pin *args = data; 3250 struct drm_i915_gem_pin *args = data;
4133 struct drm_gem_object *obj; 3251 struct drm_i915_gem_object *obj;
4134 struct drm_i915_gem_object *obj_priv;
4135 int ret; 3252 int ret;
4136 3253
4137 ret = i915_mutex_lock_interruptible(dev); 3254 ret = i915_mutex_lock_interruptible(dev);
4138 if (ret) 3255 if (ret)
4139 return ret; 3256 return ret;
4140 3257
4141 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3258 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4142 if (obj == NULL) { 3259 if (obj == NULL) {
4143 ret = -ENOENT; 3260 ret = -ENOENT;
4144 goto unlock; 3261 goto unlock;
4145 } 3262 }
4146 obj_priv = to_intel_bo(obj);
4147 3263
4148 if (obj_priv->madv != I915_MADV_WILLNEED) { 3264 if (obj->madv != I915_MADV_WILLNEED) {
4149 DRM_ERROR("Attempting to pin a purgeable buffer\n"); 3265 DRM_ERROR("Attempting to pin a purgeable buffer\n");
4150 ret = -EINVAL; 3266 ret = -EINVAL;
4151 goto out; 3267 goto out;
4152 } 3268 }
4153 3269
4154 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 3270 if (obj->pin_filp != NULL && obj->pin_filp != file) {
4155 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 3271 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
4156 args->handle); 3272 args->handle);
4157 ret = -EINVAL; 3273 ret = -EINVAL;
4158 goto out; 3274 goto out;
4159 } 3275 }
4160 3276
4161 obj_priv->user_pin_count++; 3277 obj->user_pin_count++;
4162 obj_priv->pin_filp = file_priv; 3278 obj->pin_filp = file;
4163 if (obj_priv->user_pin_count == 1) { 3279 if (obj->user_pin_count == 1) {
4164 ret = i915_gem_object_pin(obj, args->alignment); 3280 ret = i915_gem_object_pin(obj, args->alignment, true);
4165 if (ret) 3281 if (ret)
4166 goto out; 3282 goto out;
4167 } 3283 }
@@ -4170,9 +3286,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
4170 * as the X server doesn't manage domains yet 3286 * as the X server doesn't manage domains yet
4171 */ 3287 */
4172 i915_gem_object_flush_cpu_write_domain(obj); 3288 i915_gem_object_flush_cpu_write_domain(obj);
4173 args->offset = obj_priv->gtt_offset; 3289 args->offset = obj->gtt_offset;
4174out: 3290out:
4175 drm_gem_object_unreference(obj); 3291 drm_gem_object_unreference(&obj->base);
4176unlock: 3292unlock:
4177 mutex_unlock(&dev->struct_mutex); 3293 mutex_unlock(&dev->struct_mutex);
4178 return ret; 3294 return ret;
@@ -4180,38 +3296,36 @@ unlock:
4180 3296
4181int 3297int
4182i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 3298i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
4183 struct drm_file *file_priv) 3299 struct drm_file *file)
4184{ 3300{
4185 struct drm_i915_gem_pin *args = data; 3301 struct drm_i915_gem_pin *args = data;
4186 struct drm_gem_object *obj; 3302 struct drm_i915_gem_object *obj;
4187 struct drm_i915_gem_object *obj_priv;
4188 int ret; 3303 int ret;
4189 3304
4190 ret = i915_mutex_lock_interruptible(dev); 3305 ret = i915_mutex_lock_interruptible(dev);
4191 if (ret) 3306 if (ret)
4192 return ret; 3307 return ret;
4193 3308
4194 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3309 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4195 if (obj == NULL) { 3310 if (obj == NULL) {
4196 ret = -ENOENT; 3311 ret = -ENOENT;
4197 goto unlock; 3312 goto unlock;
4198 } 3313 }
4199 obj_priv = to_intel_bo(obj);
4200 3314
4201 if (obj_priv->pin_filp != file_priv) { 3315 if (obj->pin_filp != file) {
4202 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n", 3316 DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
4203 args->handle); 3317 args->handle);
4204 ret = -EINVAL; 3318 ret = -EINVAL;
4205 goto out; 3319 goto out;
4206 } 3320 }
4207 obj_priv->user_pin_count--; 3321 obj->user_pin_count--;
4208 if (obj_priv->user_pin_count == 0) { 3322 if (obj->user_pin_count == 0) {
4209 obj_priv->pin_filp = NULL; 3323 obj->pin_filp = NULL;
4210 i915_gem_object_unpin(obj); 3324 i915_gem_object_unpin(obj);
4211 } 3325 }
4212 3326
4213out: 3327out:
4214 drm_gem_object_unreference(obj); 3328 drm_gem_object_unreference(&obj->base);
4215unlock: 3329unlock:
4216 mutex_unlock(&dev->struct_mutex); 3330 mutex_unlock(&dev->struct_mutex);
4217 return ret; 3331 return ret;
@@ -4219,52 +3333,64 @@ unlock:
4219 3333
4220int 3334int
4221i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3335i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4222 struct drm_file *file_priv) 3336 struct drm_file *file)
4223{ 3337{
4224 struct drm_i915_gem_busy *args = data; 3338 struct drm_i915_gem_busy *args = data;
4225 struct drm_gem_object *obj; 3339 struct drm_i915_gem_object *obj;
4226 struct drm_i915_gem_object *obj_priv;
4227 int ret; 3340 int ret;
4228 3341
4229 ret = i915_mutex_lock_interruptible(dev); 3342 ret = i915_mutex_lock_interruptible(dev);
4230 if (ret) 3343 if (ret)
4231 return ret; 3344 return ret;
4232 3345
4233 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3346 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
4234 if (obj == NULL) { 3347 if (obj == NULL) {
4235 ret = -ENOENT; 3348 ret = -ENOENT;
4236 goto unlock; 3349 goto unlock;
4237 } 3350 }
4238 obj_priv = to_intel_bo(obj);
4239 3351
4240 /* Count all active objects as busy, even if they are currently not used 3352 /* Count all active objects as busy, even if they are currently not used
4241 * by the gpu. Users of this interface expect objects to eventually 3353 * by the gpu. Users of this interface expect objects to eventually
4242 * become non-busy without any further actions, therefore emit any 3354 * become non-busy without any further actions, therefore emit any
4243 * necessary flushes here. 3355 * necessary flushes here.
4244 */ 3356 */
4245 args->busy = obj_priv->active; 3357 args->busy = obj->active;
4246 if (args->busy) { 3358 if (args->busy) {
4247 /* Unconditionally flush objects, even when the gpu still uses this 3359 /* Unconditionally flush objects, even when the gpu still uses this
4248 * object. Userspace calling this function indicates that it wants to 3360 * object. Userspace calling this function indicates that it wants to
4249 * use this buffer rather sooner than later, so issuing the required 3361 * use this buffer rather sooner than later, so issuing the required
4250 * flush earlier is beneficial. 3362 * flush earlier is beneficial.
4251 */ 3363 */
4252 if (obj->write_domain & I915_GEM_GPU_DOMAINS) 3364 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
4253 i915_gem_flush_ring(dev, file_priv, 3365 i915_gem_flush_ring(dev, obj->ring,
4254 obj_priv->ring, 3366 0, obj->base.write_domain);
4255 0, obj->write_domain); 3367 } else if (obj->ring->outstanding_lazy_request ==
3368 obj->last_rendering_seqno) {
3369 struct drm_i915_gem_request *request;
3370
3371 /* This ring is not being cleared by active usage,
3372 * so emit a request to do so.
3373 */
3374 request = kzalloc(sizeof(*request), GFP_KERNEL);
3375 if (request)
3376 ret = i915_add_request(dev,
3377 NULL, request,
3378 obj->ring);
3379 else
3380 ret = -ENOMEM;
3381 }
4256 3382
4257 /* Update the active list for the hardware's current position. 3383 /* Update the active list for the hardware's current position.
4258 * Otherwise this only updates on a delayed timer or when irqs 3384 * Otherwise this only updates on a delayed timer or when irqs
4259 * are actually unmasked, and our working set ends up being 3385 * are actually unmasked, and our working set ends up being
4260 * larger than required. 3386 * larger than required.
4261 */ 3387 */
4262 i915_gem_retire_requests_ring(dev, obj_priv->ring); 3388 i915_gem_retire_requests_ring(dev, obj->ring);
4263 3389
4264 args->busy = obj_priv->active; 3390 args->busy = obj->active;
4265 } 3391 }
4266 3392
4267 drm_gem_object_unreference(obj); 3393 drm_gem_object_unreference(&obj->base);
4268unlock: 3394unlock:
4269 mutex_unlock(&dev->struct_mutex); 3395 mutex_unlock(&dev->struct_mutex);
4270 return ret; 3396 return ret;
@@ -4282,8 +3408,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4282 struct drm_file *file_priv) 3408 struct drm_file *file_priv)
4283{ 3409{
4284 struct drm_i915_gem_madvise *args = data; 3410 struct drm_i915_gem_madvise *args = data;
4285 struct drm_gem_object *obj; 3411 struct drm_i915_gem_object *obj;
4286 struct drm_i915_gem_object *obj_priv;
4287 int ret; 3412 int ret;
4288 3413
4289 switch (args->madv) { 3414 switch (args->madv) {
@@ -4298,37 +3423,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4298 if (ret) 3423 if (ret)
4299 return ret; 3424 return ret;
4300 3425
4301 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3426 obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
4302 if (obj == NULL) { 3427 if (obj == NULL) {
4303 ret = -ENOENT; 3428 ret = -ENOENT;
4304 goto unlock; 3429 goto unlock;
4305 } 3430 }
4306 obj_priv = to_intel_bo(obj);
4307 3431
4308 if (obj_priv->pin_count) { 3432 if (obj->pin_count) {
4309 ret = -EINVAL; 3433 ret = -EINVAL;
4310 goto out; 3434 goto out;
4311 } 3435 }
4312 3436
4313 if (obj_priv->madv != __I915_MADV_PURGED) 3437 if (obj->madv != __I915_MADV_PURGED)
4314 obj_priv->madv = args->madv; 3438 obj->madv = args->madv;
4315 3439
4316 /* if the object is no longer bound, discard its backing storage */ 3440 /* if the object is no longer bound, discard its backing storage */
4317 if (i915_gem_object_is_purgeable(obj_priv) && 3441 if (i915_gem_object_is_purgeable(obj) &&
4318 obj_priv->gtt_space == NULL) 3442 obj->gtt_space == NULL)
4319 i915_gem_object_truncate(obj); 3443 i915_gem_object_truncate(obj);
4320 3444
4321 args->retained = obj_priv->madv != __I915_MADV_PURGED; 3445 args->retained = obj->madv != __I915_MADV_PURGED;
4322 3446
4323out: 3447out:
4324 drm_gem_object_unreference(obj); 3448 drm_gem_object_unreference(&obj->base);
4325unlock: 3449unlock:
4326 mutex_unlock(&dev->struct_mutex); 3450 mutex_unlock(&dev->struct_mutex);
4327 return ret; 3451 return ret;
4328} 3452}
4329 3453
4330struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev, 3454struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
4331 size_t size) 3455 size_t size)
4332{ 3456{
4333 struct drm_i915_private *dev_priv = dev->dev_private; 3457 struct drm_i915_private *dev_priv = dev->dev_private;
4334 struct drm_i915_gem_object *obj; 3458 struct drm_i915_gem_object *obj;
@@ -4351,11 +3475,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4351 obj->base.driver_private = NULL; 3475 obj->base.driver_private = NULL;
4352 obj->fence_reg = I915_FENCE_REG_NONE; 3476 obj->fence_reg = I915_FENCE_REG_NONE;
4353 INIT_LIST_HEAD(&obj->mm_list); 3477 INIT_LIST_HEAD(&obj->mm_list);
3478 INIT_LIST_HEAD(&obj->gtt_list);
4354 INIT_LIST_HEAD(&obj->ring_list); 3479 INIT_LIST_HEAD(&obj->ring_list);
3480 INIT_LIST_HEAD(&obj->exec_list);
4355 INIT_LIST_HEAD(&obj->gpu_write_list); 3481 INIT_LIST_HEAD(&obj->gpu_write_list);
4356 obj->madv = I915_MADV_WILLNEED; 3482 obj->madv = I915_MADV_WILLNEED;
3483 /* Avoid an unnecessary call to unbind on the first bind. */
3484 obj->map_and_fenceable = true;
4357 3485
4358 return &obj->base; 3486 return obj;
4359} 3487}
4360 3488
4361int i915_gem_init_object(struct drm_gem_object *obj) 3489int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4365,42 +3493,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
4365 return 0; 3493 return 0;
4366} 3494}
4367 3495
4368static void i915_gem_free_object_tail(struct drm_gem_object *obj) 3496static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
4369{ 3497{
4370 struct drm_device *dev = obj->dev; 3498 struct drm_device *dev = obj->base.dev;
4371 drm_i915_private_t *dev_priv = dev->dev_private; 3499 drm_i915_private_t *dev_priv = dev->dev_private;
4372 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4373 int ret; 3500 int ret;
4374 3501
4375 ret = i915_gem_object_unbind(obj); 3502 ret = i915_gem_object_unbind(obj);
4376 if (ret == -ERESTARTSYS) { 3503 if (ret == -ERESTARTSYS) {
4377 list_move(&obj_priv->mm_list, 3504 list_move(&obj->mm_list,
4378 &dev_priv->mm.deferred_free_list); 3505 &dev_priv->mm.deferred_free_list);
4379 return; 3506 return;
4380 } 3507 }
4381 3508
4382 if (obj_priv->mmap_offset) 3509 if (obj->base.map_list.map)
4383 i915_gem_free_mmap_offset(obj); 3510 i915_gem_free_mmap_offset(obj);
4384 3511
4385 drm_gem_object_release(obj); 3512 drm_gem_object_release(&obj->base);
4386 i915_gem_info_remove_obj(dev_priv, obj->size); 3513 i915_gem_info_remove_obj(dev_priv, obj->base.size);
4387 3514
4388 kfree(obj_priv->page_cpu_valid); 3515 kfree(obj->page_cpu_valid);
4389 kfree(obj_priv->bit_17); 3516 kfree(obj->bit_17);
4390 kfree(obj_priv); 3517 kfree(obj);
4391} 3518}
4392 3519
4393void i915_gem_free_object(struct drm_gem_object *obj) 3520void i915_gem_free_object(struct drm_gem_object *gem_obj)
4394{ 3521{
4395 struct drm_device *dev = obj->dev; 3522 struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4396 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3523 struct drm_device *dev = obj->base.dev;
4397 3524
4398 trace_i915_gem_object_destroy(obj); 3525 trace_i915_gem_object_destroy(obj);
4399 3526
4400 while (obj_priv->pin_count > 0) 3527 while (obj->pin_count > 0)
4401 i915_gem_object_unpin(obj); 3528 i915_gem_object_unpin(obj);
4402 3529
4403 if (obj_priv->phys_obj) 3530 if (obj->phys_obj)
4404 i915_gem_detach_phys_object(dev, obj); 3531 i915_gem_detach_phys_object(dev, obj);
4405 3532
4406 i915_gem_free_object_tail(obj); 3533 i915_gem_free_object_tail(obj);
@@ -4427,13 +3554,15 @@ i915_gem_idle(struct drm_device *dev)
4427 3554
4428 /* Under UMS, be paranoid and evict. */ 3555 /* Under UMS, be paranoid and evict. */
4429 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 3556 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4430 ret = i915_gem_evict_inactive(dev); 3557 ret = i915_gem_evict_inactive(dev, false);
4431 if (ret) { 3558 if (ret) {
4432 mutex_unlock(&dev->struct_mutex); 3559 mutex_unlock(&dev->struct_mutex);
4433 return ret; 3560 return ret;
4434 } 3561 }
4435 } 3562 }
4436 3563
3564 i915_gem_reset_fences(dev);
3565
4437 /* Hack! Don't let anybody do execbuf while we don't control the chip. 3566 /* Hack! Don't let anybody do execbuf while we don't control the chip.
4438 * We need to replace this with a semaphore, or something. 3567 * We need to replace this with a semaphore, or something.
4439 * And not confound mm.suspended! 3568 * And not confound mm.suspended!
@@ -4452,82 +3581,15 @@ i915_gem_idle(struct drm_device *dev)
4452 return 0; 3581 return 0;
4453} 3582}
4454 3583
4455/*
4456 * 965+ support PIPE_CONTROL commands, which provide finer grained control
4457 * over cache flushing.
4458 */
4459static int
4460i915_gem_init_pipe_control(struct drm_device *dev)
4461{
4462 drm_i915_private_t *dev_priv = dev->dev_private;
4463 struct drm_gem_object *obj;
4464 struct drm_i915_gem_object *obj_priv;
4465 int ret;
4466
4467 obj = i915_gem_alloc_object(dev, 4096);
4468 if (obj == NULL) {
4469 DRM_ERROR("Failed to allocate seqno page\n");
4470 ret = -ENOMEM;
4471 goto err;
4472 }
4473 obj_priv = to_intel_bo(obj);
4474 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
4475
4476 ret = i915_gem_object_pin(obj, 4096);
4477 if (ret)
4478 goto err_unref;
4479
4480 dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
4481 dev_priv->seqno_page = kmap(obj_priv->pages[0]);
4482 if (dev_priv->seqno_page == NULL)
4483 goto err_unpin;
4484
4485 dev_priv->seqno_obj = obj;
4486 memset(dev_priv->seqno_page, 0, PAGE_SIZE);
4487
4488 return 0;
4489
4490err_unpin:
4491 i915_gem_object_unpin(obj);
4492err_unref:
4493 drm_gem_object_unreference(obj);
4494err:
4495 return ret;
4496}
4497
4498
4499static void
4500i915_gem_cleanup_pipe_control(struct drm_device *dev)
4501{
4502 drm_i915_private_t *dev_priv = dev->dev_private;
4503 struct drm_gem_object *obj;
4504 struct drm_i915_gem_object *obj_priv;
4505
4506 obj = dev_priv->seqno_obj;
4507 obj_priv = to_intel_bo(obj);
4508 kunmap(obj_priv->pages[0]);
4509 i915_gem_object_unpin(obj);
4510 drm_gem_object_unreference(obj);
4511 dev_priv->seqno_obj = NULL;
4512
4513 dev_priv->seqno_page = NULL;
4514}
4515
4516int 3584int
4517i915_gem_init_ringbuffer(struct drm_device *dev) 3585i915_gem_init_ringbuffer(struct drm_device *dev)
4518{ 3586{
4519 drm_i915_private_t *dev_priv = dev->dev_private; 3587 drm_i915_private_t *dev_priv = dev->dev_private;
4520 int ret; 3588 int ret;
4521 3589
4522 if (HAS_PIPE_CONTROL(dev)) {
4523 ret = i915_gem_init_pipe_control(dev);
4524 if (ret)
4525 return ret;
4526 }
4527
4528 ret = intel_init_render_ring_buffer(dev); 3590 ret = intel_init_render_ring_buffer(dev);
4529 if (ret) 3591 if (ret)
4530 goto cleanup_pipe_control; 3592 return ret;
4531 3593
4532 if (HAS_BSD(dev)) { 3594 if (HAS_BSD(dev)) {
4533 ret = intel_init_bsd_ring_buffer(dev); 3595 ret = intel_init_bsd_ring_buffer(dev);
@@ -4546,12 +3608,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
4546 return 0; 3608 return 0;
4547 3609
4548cleanup_bsd_ring: 3610cleanup_bsd_ring:
4549 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 3611 intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
4550cleanup_render_ring: 3612cleanup_render_ring:
4551 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 3613 intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
4552cleanup_pipe_control:
4553 if (HAS_PIPE_CONTROL(dev))
4554 i915_gem_cleanup_pipe_control(dev);
4555 return ret; 3614 return ret;
4556} 3615}
4557 3616
@@ -4559,12 +3618,10 @@ void
4559i915_gem_cleanup_ringbuffer(struct drm_device *dev) 3618i915_gem_cleanup_ringbuffer(struct drm_device *dev)
4560{ 3619{
4561 drm_i915_private_t *dev_priv = dev->dev_private; 3620 drm_i915_private_t *dev_priv = dev->dev_private;
3621 int i;
4562 3622
4563 intel_cleanup_ring_buffer(dev, &dev_priv->render_ring); 3623 for (i = 0; i < I915_NUM_RINGS; i++)
4564 intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring); 3624 intel_cleanup_ring_buffer(&dev_priv->ring[i]);
4565 intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
4566 if (HAS_PIPE_CONTROL(dev))
4567 i915_gem_cleanup_pipe_control(dev);
4568} 3625}
4569 3626
4570int 3627int
@@ -4572,7 +3629,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4572 struct drm_file *file_priv) 3629 struct drm_file *file_priv)
4573{ 3630{
4574 drm_i915_private_t *dev_priv = dev->dev_private; 3631 drm_i915_private_t *dev_priv = dev->dev_private;
4575 int ret; 3632 int ret, i;
4576 3633
4577 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3634 if (drm_core_check_feature(dev, DRIVER_MODESET))
4578 return 0; 3635 return 0;
@@ -4592,14 +3649,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4592 } 3649 }
4593 3650
4594 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 3651 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4595 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4596 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
4597 BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
4598 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 3652 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
4599 BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 3653 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
4600 BUG_ON(!list_empty(&dev_priv->render_ring.request_list)); 3654 for (i = 0; i < I915_NUM_RINGS; i++) {
4601 BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list)); 3655 BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
4602 BUG_ON(!list_empty(&dev_priv->blt_ring.request_list)); 3656 BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
3657 }
4603 mutex_unlock(&dev->struct_mutex); 3658 mutex_unlock(&dev->struct_mutex);
4604 3659
4605 ret = drm_irq_install(dev); 3660 ret = drm_irq_install(dev);
@@ -4661,17 +3716,14 @@ i915_gem_load(struct drm_device *dev)
4661 INIT_LIST_HEAD(&dev_priv->mm.pinned_list); 3716 INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
4662 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 3717 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4663 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); 3718 INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
4664 init_ring_lists(&dev_priv->render_ring); 3719 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
4665 init_ring_lists(&dev_priv->bsd_ring); 3720 for (i = 0; i < I915_NUM_RINGS; i++)
4666 init_ring_lists(&dev_priv->blt_ring); 3721 init_ring_lists(&dev_priv->ring[i]);
4667 for (i = 0; i < 16; i++) 3722 for (i = 0; i < 16; i++)
4668 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3723 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
4669 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3724 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
4670 i915_gem_retire_work_handler); 3725 i915_gem_retire_work_handler);
4671 init_completion(&dev_priv->error_completion); 3726 init_completion(&dev_priv->error_completion);
4672 spin_lock(&shrink_list_lock);
4673 list_add(&dev_priv->mm.shrink_list, &shrink_list);
4674 spin_unlock(&shrink_list_lock);
4675 3727
4676 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */ 3728 /* On GEN3 we really need to make sure the ARB C3 LP bit is set */
4677 if (IS_GEN3(dev)) { 3729 if (IS_GEN3(dev)) {
@@ -4683,6 +3735,8 @@ i915_gem_load(struct drm_device *dev)
4683 } 3735 }
4684 } 3736 }
4685 3737
3738 dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
3739
4686 /* Old X drivers will take 0-2 for front, back, depth buffers */ 3740 /* Old X drivers will take 0-2 for front, back, depth buffers */
4687 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3741 if (!drm_core_check_feature(dev, DRIVER_MODESET))
4688 dev_priv->fence_reg_start = 3; 3742 dev_priv->fence_reg_start = 3;
@@ -4714,6 +3768,10 @@ i915_gem_load(struct drm_device *dev)
4714 } 3768 }
4715 i915_gem_detect_bit_6_swizzle(dev); 3769 i915_gem_detect_bit_6_swizzle(dev);
4716 init_waitqueue_head(&dev_priv->pending_flip_queue); 3770 init_waitqueue_head(&dev_priv->pending_flip_queue);
3771
3772 dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
3773 dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
3774 register_shrinker(&dev_priv->mm.inactive_shrinker);
4717} 3775}
4718 3776
4719/* 3777/*
@@ -4783,47 +3841,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
4783} 3841}
4784 3842
4785void i915_gem_detach_phys_object(struct drm_device *dev, 3843void i915_gem_detach_phys_object(struct drm_device *dev,
4786 struct drm_gem_object *obj) 3844 struct drm_i915_gem_object *obj)
4787{ 3845{
4788 struct drm_i915_gem_object *obj_priv; 3846 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
3847 char *vaddr;
4789 int i; 3848 int i;
4790 int ret;
4791 int page_count; 3849 int page_count;
4792 3850
4793 obj_priv = to_intel_bo(obj); 3851 if (!obj->phys_obj)
4794 if (!obj_priv->phys_obj)
4795 return; 3852 return;
3853 vaddr = obj->phys_obj->handle->vaddr;
4796 3854
4797 ret = i915_gem_object_get_pages(obj, 0); 3855 page_count = obj->base.size / PAGE_SIZE;
4798 if (ret)
4799 goto out;
4800
4801 page_count = obj->size / PAGE_SIZE;
4802
4803 for (i = 0; i < page_count; i++) { 3856 for (i = 0; i < page_count; i++) {
4804 char *dst = kmap_atomic(obj_priv->pages[i]); 3857 struct page *page = read_cache_page_gfp(mapping, i,
4805 char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 3858 GFP_HIGHUSER | __GFP_RECLAIMABLE);
4806 3859 if (!IS_ERR(page)) {
4807 memcpy(dst, src, PAGE_SIZE); 3860 char *dst = kmap_atomic(page);
4808 kunmap_atomic(dst); 3861 memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
3862 kunmap_atomic(dst);
3863
3864 drm_clflush_pages(&page, 1);
3865
3866 set_page_dirty(page);
3867 mark_page_accessed(page);
3868 page_cache_release(page);
3869 }
4809 } 3870 }
4810 drm_clflush_pages(obj_priv->pages, page_count); 3871 intel_gtt_chipset_flush();
4811 drm_agp_chipset_flush(dev);
4812 3872
4813 i915_gem_object_put_pages(obj); 3873 obj->phys_obj->cur_obj = NULL;
4814out: 3874 obj->phys_obj = NULL;
4815 obj_priv->phys_obj->cur_obj = NULL;
4816 obj_priv->phys_obj = NULL;
4817} 3875}
4818 3876
4819int 3877int
4820i915_gem_attach_phys_object(struct drm_device *dev, 3878i915_gem_attach_phys_object(struct drm_device *dev,
4821 struct drm_gem_object *obj, 3879 struct drm_i915_gem_object *obj,
4822 int id, 3880 int id,
4823 int align) 3881 int align)
4824{ 3882{
3883 struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
4825 drm_i915_private_t *dev_priv = dev->dev_private; 3884 drm_i915_private_t *dev_priv = dev->dev_private;
4826 struct drm_i915_gem_object *obj_priv;
4827 int ret = 0; 3885 int ret = 0;
4828 int page_count; 3886 int page_count;
4829 int i; 3887 int i;
@@ -4831,10 +3889,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4831 if (id > I915_MAX_PHYS_OBJECT) 3889 if (id > I915_MAX_PHYS_OBJECT)
4832 return -EINVAL; 3890 return -EINVAL;
4833 3891
4834 obj_priv = to_intel_bo(obj); 3892 if (obj->phys_obj) {
4835 3893 if (obj->phys_obj->id == id)
4836 if (obj_priv->phys_obj) {
4837 if (obj_priv->phys_obj->id == id)
4838 return 0; 3894 return 0;
4839 i915_gem_detach_phys_object(dev, obj); 3895 i915_gem_detach_phys_object(dev, obj);
4840 } 3896 }
@@ -4842,51 +3898,50 @@ i915_gem_attach_phys_object(struct drm_device *dev,
4842 /* create a new object */ 3898 /* create a new object */
4843 if (!dev_priv->mm.phys_objs[id - 1]) { 3899 if (!dev_priv->mm.phys_objs[id - 1]) {
4844 ret = i915_gem_init_phys_object(dev, id, 3900 ret = i915_gem_init_phys_object(dev, id,
4845 obj->size, align); 3901 obj->base.size, align);
4846 if (ret) { 3902 if (ret) {
4847 DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size); 3903 DRM_ERROR("failed to init phys object %d size: %zu\n",
4848 goto out; 3904 id, obj->base.size);
3905 return ret;
4849 } 3906 }
4850 } 3907 }
4851 3908
4852 /* bind to the object */ 3909 /* bind to the object */
4853 obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1]; 3910 obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
4854 obj_priv->phys_obj->cur_obj = obj; 3911 obj->phys_obj->cur_obj = obj;
4855
4856 ret = i915_gem_object_get_pages(obj, 0);
4857 if (ret) {
4858 DRM_ERROR("failed to get page list\n");
4859 goto out;
4860 }
4861 3912
4862 page_count = obj->size / PAGE_SIZE; 3913 page_count = obj->base.size / PAGE_SIZE;
4863 3914
4864 for (i = 0; i < page_count; i++) { 3915 for (i = 0; i < page_count; i++) {
4865 char *src = kmap_atomic(obj_priv->pages[i]); 3916 struct page *page;
4866 char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE); 3917 char *dst, *src;
4867 3918
3919 page = read_cache_page_gfp(mapping, i,
3920 GFP_HIGHUSER | __GFP_RECLAIMABLE);
3921 if (IS_ERR(page))
3922 return PTR_ERR(page);
3923
3924 src = kmap_atomic(page);
3925 dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
4868 memcpy(dst, src, PAGE_SIZE); 3926 memcpy(dst, src, PAGE_SIZE);
4869 kunmap_atomic(src); 3927 kunmap_atomic(src);
4870 }
4871 3928
4872 i915_gem_object_put_pages(obj); 3929 mark_page_accessed(page);
3930 page_cache_release(page);
3931 }
4873 3932
4874 return 0; 3933 return 0;
4875out:
4876 return ret;
4877} 3934}
4878 3935
4879static int 3936static int
4880i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 3937i915_gem_phys_pwrite(struct drm_device *dev,
3938 struct drm_i915_gem_object *obj,
4881 struct drm_i915_gem_pwrite *args, 3939 struct drm_i915_gem_pwrite *args,
4882 struct drm_file *file_priv) 3940 struct drm_file *file_priv)
4883{ 3941{
4884 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 3942 void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
4885 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4886 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr; 3943 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4887 3944
4888 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4889
4890 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { 3945 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4891 unsigned long unwritten; 3946 unsigned long unwritten;
4892 3947
@@ -4901,7 +3956,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4901 return -EFAULT; 3956 return -EFAULT;
4902 } 3957 }
4903 3958
4904 drm_agp_chipset_flush(dev); 3959 intel_gtt_chipset_flush();
4905 return 0; 3960 return 0;
4906} 3961}
4907 3962
@@ -4939,144 +3994,68 @@ i915_gpu_is_active(struct drm_device *dev)
4939} 3994}
4940 3995
4941static int 3996static int
4942i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 3997i915_gem_inactive_shrink(struct shrinker *shrinker,
4943{ 3998 int nr_to_scan,
4944 drm_i915_private_t *dev_priv, *next_dev; 3999 gfp_t gfp_mask)
4945 struct drm_i915_gem_object *obj_priv, *next_obj; 4000{
4946 int cnt = 0; 4001 struct drm_i915_private *dev_priv =
4947 int would_deadlock = 1; 4002 container_of(shrinker,
4003 struct drm_i915_private,
4004 mm.inactive_shrinker);
4005 struct drm_device *dev = dev_priv->dev;
4006 struct drm_i915_gem_object *obj, *next;
4007 int cnt;
4008
4009 if (!mutex_trylock(&dev->struct_mutex))
4010 return 0;
4948 4011
4949 /* "fast-path" to count number of available objects */ 4012 /* "fast-path" to count number of available objects */
4950 if (nr_to_scan == 0) { 4013 if (nr_to_scan == 0) {
4951 spin_lock(&shrink_list_lock); 4014 cnt = 0;
4952 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { 4015 list_for_each_entry(obj,
4953 struct drm_device *dev = dev_priv->dev; 4016 &dev_priv->mm.inactive_list,
4954 4017 mm_list)
4955 if (mutex_trylock(&dev->struct_mutex)) { 4018 cnt++;
4956 list_for_each_entry(obj_priv, 4019 mutex_unlock(&dev->struct_mutex);
4957 &dev_priv->mm.inactive_list, 4020 return cnt / 100 * sysctl_vfs_cache_pressure;
4958 mm_list)
4959 cnt++;
4960 mutex_unlock(&dev->struct_mutex);
4961 }
4962 }
4963 spin_unlock(&shrink_list_lock);
4964
4965 return (cnt / 100) * sysctl_vfs_cache_pressure;
4966 } 4021 }
4967 4022
4968 spin_lock(&shrink_list_lock);
4969
4970rescan: 4023rescan:
4971 /* first scan for clean buffers */ 4024 /* first scan for clean buffers */
4972 list_for_each_entry_safe(dev_priv, next_dev, 4025 i915_gem_retire_requests(dev);
4973 &shrink_list, mm.shrink_list) {
4974 struct drm_device *dev = dev_priv->dev;
4975
4976 if (! mutex_trylock(&dev->struct_mutex))
4977 continue;
4978
4979 spin_unlock(&shrink_list_lock);
4980 i915_gem_retire_requests(dev);
4981 4026
4982 list_for_each_entry_safe(obj_priv, next_obj, 4027 list_for_each_entry_safe(obj, next,
4983 &dev_priv->mm.inactive_list, 4028 &dev_priv->mm.inactive_list,
4984 mm_list) { 4029 mm_list) {
4985 if (i915_gem_object_is_purgeable(obj_priv)) { 4030 if (i915_gem_object_is_purgeable(obj)) {
4986 i915_gem_object_unbind(&obj_priv->base); 4031 if (i915_gem_object_unbind(obj) == 0 &&
4987 if (--nr_to_scan <= 0) 4032 --nr_to_scan == 0)
4988 break; 4033 break;
4989 }
4990 } 4034 }
4991
4992 spin_lock(&shrink_list_lock);
4993 mutex_unlock(&dev->struct_mutex);
4994
4995 would_deadlock = 0;
4996
4997 if (nr_to_scan <= 0)
4998 break;
4999 } 4035 }
5000 4036
5001 /* second pass, evict/count anything still on the inactive list */ 4037 /* second pass, evict/count anything still on the inactive list */
5002 list_for_each_entry_safe(dev_priv, next_dev, 4038 cnt = 0;
5003 &shrink_list, mm.shrink_list) { 4039 list_for_each_entry_safe(obj, next,
5004 struct drm_device *dev = dev_priv->dev; 4040 &dev_priv->mm.inactive_list,
5005 4041 mm_list) {
5006 if (! mutex_trylock(&dev->struct_mutex)) 4042 if (nr_to_scan &&
5007 continue; 4043 i915_gem_object_unbind(obj) == 0)
5008 4044 nr_to_scan--;
5009 spin_unlock(&shrink_list_lock); 4045 else
5010 4046 cnt++;
5011 list_for_each_entry_safe(obj_priv, next_obj,
5012 &dev_priv->mm.inactive_list,
5013 mm_list) {
5014 if (nr_to_scan > 0) {
5015 i915_gem_object_unbind(&obj_priv->base);
5016 nr_to_scan--;
5017 } else
5018 cnt++;
5019 }
5020
5021 spin_lock(&shrink_list_lock);
5022 mutex_unlock(&dev->struct_mutex);
5023
5024 would_deadlock = 0;
5025 } 4047 }
5026 4048
5027 if (nr_to_scan) { 4049 if (nr_to_scan && i915_gpu_is_active(dev)) {
5028 int active = 0;
5029
5030 /* 4050 /*
5031 * We are desperate for pages, so as a last resort, wait 4051 * We are desperate for pages, so as a last resort, wait
5032 * for the GPU to finish and discard whatever we can. 4052 * for the GPU to finish and discard whatever we can.
5033 * This has a dramatic impact to reduce the number of 4053 * This has a dramatic impact to reduce the number of
5034 * OOM-killer events whilst running the GPU aggressively. 4054 * OOM-killer events whilst running the GPU aggressively.
5035 */ 4055 */
5036 list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { 4056 if (i915_gpu_idle(dev) == 0)
5037 struct drm_device *dev = dev_priv->dev;
5038
5039 if (!mutex_trylock(&dev->struct_mutex))
5040 continue;
5041
5042 spin_unlock(&shrink_list_lock);
5043
5044 if (i915_gpu_is_active(dev)) {
5045 i915_gpu_idle(dev);
5046 active++;
5047 }
5048
5049 spin_lock(&shrink_list_lock);
5050 mutex_unlock(&dev->struct_mutex);
5051 }
5052
5053 if (active)
5054 goto rescan; 4057 goto rescan;
5055 } 4058 }
5056 4059 mutex_unlock(&dev->struct_mutex);
5057 spin_unlock(&shrink_list_lock); 4060 return cnt / 100 * sysctl_vfs_cache_pressure;
5058
5059 if (would_deadlock)
5060 return -1;
5061 else if (cnt > 0)
5062 return (cnt / 100) * sysctl_vfs_cache_pressure;
5063 else
5064 return 0;
5065}
5066
5067static struct shrinker shrinker = {
5068 .shrink = i915_gem_shrink,
5069 .seeks = DEFAULT_SEEKS,
5070};
5071
5072__init void
5073i915_gem_shrinker_init(void)
5074{
5075 register_shrinker(&shrinker);
5076}
5077
5078__exit void
5079i915_gem_shrinker_exit(void)
5080{
5081 unregister_shrinker(&shrinker);
5082} 4061}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b840a8d..29d014c48ca2 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
152} 152}
153 153
154void 154void
155i915_gem_dump_object(struct drm_gem_object *obj, int len, 155i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
156 const char *where, uint32_t mark) 156 const char *where, uint32_t mark)
157{ 157{
158 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
159 int page; 158 int page;
160 159
161 DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 160 DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
162 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { 161 for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
163 int page_len, chunk, chunk_len; 162 int page_len, chunk, chunk_len;
164 163
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
170 chunk_len = page_len - chunk; 169 chunk_len = page_len - chunk;
171 if (chunk_len > 128) 170 if (chunk_len > 128)
172 chunk_len = 128; 171 chunk_len = 128;
173 i915_gem_dump_page(obj_priv->pages[page], 172 i915_gem_dump_page(obj->pages[page],
174 chunk, chunk + chunk_len, 173 chunk, chunk + chunk_len,
175 obj_priv->gtt_offset + 174 obj->gtt_offset +
176 page * PAGE_SIZE, 175 page * PAGE_SIZE,
177 mark); 176 mark);
178 } 177 }
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
182 181
183#if WATCH_COHERENCY 182#if WATCH_COHERENCY
184void 183void
185i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 184i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
186{ 185{
187 struct drm_device *dev = obj->dev; 186 struct drm_device *dev = obj->base.dev;
188 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
189 int page; 187 int page;
190 uint32_t *gtt_mapping; 188 uint32_t *gtt_mapping;
191 uint32_t *backing_map = NULL; 189 uint32_t *backing_map = NULL;
192 int bad_count = 0; 190 int bad_count = 0;
193 191
194 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n", 192 DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
195 __func__, obj, obj_priv->gtt_offset, handle, 193 __func__, obj, obj->gtt_offset, handle,
196 obj->size / 1024); 194 obj->size / 1024);
197 195
198 gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, 196 gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
199 obj->size);
200 if (gtt_mapping == NULL) { 197 if (gtt_mapping == NULL) {
201 DRM_ERROR("failed to map GTT space\n"); 198 DRM_ERROR("failed to map GTT space\n");
202 return; 199 return;
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
205 for (page = 0; page < obj->size / PAGE_SIZE; page++) { 202 for (page = 0; page < obj->size / PAGE_SIZE; page++) {
206 int i; 203 int i;
207 204
208 backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0); 205 backing_map = kmap_atomic(obj->pages[page], KM_USER0);
209 206
210 if (backing_map == NULL) { 207 if (backing_map == NULL) {
211 DRM_ERROR("failed to map backing page\n"); 208 DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
220 if (cpuval != gttval) { 217 if (cpuval != gttval) {
221 DRM_INFO("incoherent CPU vs GPU at 0x%08x: " 218 DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
222 "0x%08x vs 0x%08x\n", 219 "0x%08x vs 0x%08x\n",
223 (int)(obj_priv->gtt_offset + 220 (int)(obj->gtt_offset +
224 page * PAGE_SIZE + i * 4), 221 page * PAGE_SIZE + i * 4),
225 cpuval, gttval); 222 cpuval, gttval);
226 if (bad_count++ >= 8) { 223 if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1d0cc6..78b8cf90c922 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
32#include "i915_drm.h" 32#include "i915_drm.h"
33 33
34static bool 34static bool
35mark_free(struct drm_i915_gem_object *obj_priv, 35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
36 struct list_head *unwind)
37{ 36{
38 list_add(&obj_priv->evict_list, unwind); 37 list_add(&obj->exec_list, unwind);
39 drm_gem_object_reference(&obj_priv->base); 38 drm_gem_object_reference(&obj->base);
40 return drm_mm_scan_add_block(obj_priv->gtt_space); 39 return drm_mm_scan_add_block(obj->gtt_space);
41} 40}
42 41
43int 42int
44i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) 43i915_gem_evict_something(struct drm_device *dev, int min_size,
44 unsigned alignment, bool mappable)
45{ 45{
46 drm_i915_private_t *dev_priv = dev->dev_private; 46 drm_i915_private_t *dev_priv = dev->dev_private;
47 struct list_head eviction_list, unwind_list; 47 struct list_head eviction_list, unwind_list;
48 struct drm_i915_gem_object *obj_priv; 48 struct drm_i915_gem_object *obj;
49 int ret = 0; 49 int ret = 0;
50 50
51 i915_gem_retire_requests(dev); 51 i915_gem_retire_requests(dev);
52 52
53 /* Re-check for free space after retiring requests */ 53 /* Re-check for free space after retiring requests */
54 if (drm_mm_search_free(&dev_priv->mm.gtt_space, 54 if (mappable) {
55 min_size, alignment, 0)) 55 if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
56 return 0; 56 min_size, alignment, 0,
57 dev_priv->mm.gtt_mappable_end,
58 0))
59 return 0;
60 } else {
61 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
62 min_size, alignment, 0))
63 return 0;
64 }
57 65
58 /* 66 /*
59 * The goal is to evict objects and amalgamate space in LRU order. 67 * The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
79 */ 87 */
80 88
81 INIT_LIST_HEAD(&unwind_list); 89 INIT_LIST_HEAD(&unwind_list);
82 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); 90 if (mappable)
91 drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
92 alignment, 0,
93 dev_priv->mm.gtt_mappable_end);
94 else
95 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
83 96
84 /* First see if there is a large enough contiguous idle region... */ 97 /* First see if there is a large enough contiguous idle region... */
85 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { 98 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
86 if (mark_free(obj_priv, &unwind_list)) 99 if (mark_free(obj, &unwind_list))
87 goto found; 100 goto found;
88 } 101 }
89 102
90 /* Now merge in the soon-to-be-expired objects... */ 103 /* Now merge in the soon-to-be-expired objects... */
91 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 104 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
92 /* Does the object require an outstanding flush? */ 105 /* Does the object require an outstanding flush? */
93 if (obj_priv->base.write_domain || obj_priv->pin_count) 106 if (obj->base.write_domain || obj->pin_count)
94 continue; 107 continue;
95 108
96 if (mark_free(obj_priv, &unwind_list)) 109 if (mark_free(obj, &unwind_list))
97 goto found; 110 goto found;
98 } 111 }
99 112
100 /* Finally add anything with a pending flush (in order of retirement) */ 113 /* Finally add anything with a pending flush (in order of retirement) */
101 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { 114 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
102 if (obj_priv->pin_count) 115 if (obj->pin_count)
103 continue; 116 continue;
104 117
105 if (mark_free(obj_priv, &unwind_list)) 118 if (mark_free(obj, &unwind_list))
106 goto found; 119 goto found;
107 } 120 }
108 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 121 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
109 if (! obj_priv->base.write_domain || obj_priv->pin_count) 122 if (! obj->base.write_domain || obj->pin_count)
110 continue; 123 continue;
111 124
112 if (mark_free(obj_priv, &unwind_list)) 125 if (mark_free(obj, &unwind_list))
113 goto found; 126 goto found;
114 } 127 }
115 128
116 /* Nothing found, clean up and bail out! */ 129 /* Nothing found, clean up and bail out! */
117 list_for_each_entry(obj_priv, &unwind_list, evict_list) { 130 list_for_each_entry(obj, &unwind_list, exec_list) {
118 ret = drm_mm_scan_remove_block(obj_priv->gtt_space); 131 ret = drm_mm_scan_remove_block(obj->gtt_space);
119 BUG_ON(ret); 132 BUG_ON(ret);
120 drm_gem_object_unreference(&obj_priv->base); 133 drm_gem_object_unreference(&obj->base);
121 } 134 }
122 135
123 /* We expect the caller to unpin, evict all and try again, or give up. 136 /* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +144,33 @@ found:
131 * temporary list. */ 144 * temporary list. */
132 INIT_LIST_HEAD(&eviction_list); 145 INIT_LIST_HEAD(&eviction_list);
133 while (!list_empty(&unwind_list)) { 146 while (!list_empty(&unwind_list)) {
134 obj_priv = list_first_entry(&unwind_list, 147 obj = list_first_entry(&unwind_list,
135 struct drm_i915_gem_object, 148 struct drm_i915_gem_object,
136 evict_list); 149 exec_list);
137 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) { 150 if (drm_mm_scan_remove_block(obj->gtt_space)) {
138 list_move(&obj_priv->evict_list, &eviction_list); 151 list_move(&obj->exec_list, &eviction_list);
139 continue; 152 continue;
140 } 153 }
141 list_del(&obj_priv->evict_list); 154 list_del_init(&obj->exec_list);
142 drm_gem_object_unreference(&obj_priv->base); 155 drm_gem_object_unreference(&obj->base);
143 } 156 }
144 157
145 /* Unbinding will emit any required flushes */ 158 /* Unbinding will emit any required flushes */
146 while (!list_empty(&eviction_list)) { 159 while (!list_empty(&eviction_list)) {
147 obj_priv = list_first_entry(&eviction_list, 160 obj = list_first_entry(&eviction_list,
148 struct drm_i915_gem_object, 161 struct drm_i915_gem_object,
149 evict_list); 162 exec_list);
150 if (ret == 0) 163 if (ret == 0)
151 ret = i915_gem_object_unbind(&obj_priv->base); 164 ret = i915_gem_object_unbind(obj);
152 list_del(&obj_priv->evict_list); 165 list_del_init(&obj->exec_list);
153 drm_gem_object_unreference(&obj_priv->base); 166 drm_gem_object_unreference(&obj->base);
154 } 167 }
155 168
156 return ret; 169 return ret;
157} 170}
158 171
159int 172int
160i915_gem_evict_everything(struct drm_device *dev) 173i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
161{ 174{
162 drm_i915_private_t *dev_priv = dev->dev_private; 175 drm_i915_private_t *dev_priv = dev->dev_private;
163 int ret; 176 int ret;
@@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev)
176 189
177 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 190 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
178 191
179 ret = i915_gem_evict_inactive(dev); 192 return i915_gem_evict_inactive(dev, purgeable_only);
180 if (ret)
181 return ret;
182
183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
184 list_empty(&dev_priv->mm.flushing_list) &&
185 list_empty(&dev_priv->mm.active_list));
186 BUG_ON(!lists_empty);
187
188 return 0;
189} 193}
190 194
191/** Unbinds all inactive objects. */ 195/** Unbinds all inactive objects. */
192int 196int
193i915_gem_evict_inactive(struct drm_device *dev) 197i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
194{ 198{
195 drm_i915_private_t *dev_priv = dev->dev_private; 199 drm_i915_private_t *dev_priv = dev->dev_private;
196 200 struct drm_i915_gem_object *obj, *next;
197 while (!list_empty(&dev_priv->mm.inactive_list)) { 201
198 struct drm_gem_object *obj; 202 list_for_each_entry_safe(obj, next,
199 int ret; 203 &dev_priv->mm.inactive_list, mm_list) {
200 204 if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
201 obj = &list_first_entry(&dev_priv->mm.inactive_list, 205 int ret = i915_gem_object_unbind(obj);
202 struct drm_i915_gem_object, 206 if (ret)
203 mm_list)->base; 207 return ret;
204
205 ret = i915_gem_object_unbind(obj);
206 if (ret != 0) {
207 DRM_ERROR("Error unbinding object: %d\n", ret);
208 return ret;
209 } 208 }
210 } 209 }
211 210
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 000000000000..61129e6759eb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1343 @@
1/*
2 * Copyright © 2008,2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drm.h"
32#include "i915_drv.h"
33#include "i915_trace.h"
34#include "intel_drv.h"
35
36struct change_domains {
37 uint32_t invalidate_domains;
38 uint32_t flush_domains;
39 uint32_t flush_rings;
40};
41
42/*
43 * Set the next domain for the specified object. This
44 * may not actually perform the necessary flushing/invaliding though,
45 * as that may want to be batched with other set_domain operations
46 *
47 * This is (we hope) the only really tricky part of gem. The goal
48 * is fairly simple -- track which caches hold bits of the object
49 * and make sure they remain coherent. A few concrete examples may
50 * help to explain how it works. For shorthand, we use the notation
51 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
52 * a pair of read and write domain masks.
53 *
54 * Case 1: the batch buffer
55 *
56 * 1. Allocated
57 * 2. Written by CPU
58 * 3. Mapped to GTT
59 * 4. Read by GPU
60 * 5. Unmapped from GTT
61 * 6. Freed
62 *
63 * Let's take these a step at a time
64 *
65 * 1. Allocated
66 * Pages allocated from the kernel may still have
67 * cache contents, so we set them to (CPU, CPU) always.
68 * 2. Written by CPU (using pwrite)
69 * The pwrite function calls set_domain (CPU, CPU) and
70 * this function does nothing (as nothing changes)
71 * 3. Mapped by GTT
72 * This function asserts that the object is not
73 * currently in any GPU-based read or write domains
74 * 4. Read by GPU
75 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
76 * As write_domain is zero, this function adds in the
77 * current read domains (CPU+COMMAND, 0).
78 * flush_domains is set to CPU.
79 * invalidate_domains is set to COMMAND
80 * clflush is run to get data out of the CPU caches
81 * then i915_dev_set_domain calls i915_gem_flush to
82 * emit an MI_FLUSH and drm_agp_chipset_flush
83 * 5. Unmapped from GTT
84 * i915_gem_object_unbind calls set_domain (CPU, CPU)
85 * flush_domains and invalidate_domains end up both zero
86 * so no flushing/invalidating happens
87 * 6. Freed
88 * yay, done
89 *
90 * Case 2: The shared render buffer
91 *
92 * 1. Allocated
93 * 2. Mapped to GTT
94 * 3. Read/written by GPU
95 * 4. set_domain to (CPU,CPU)
96 * 5. Read/written by CPU
97 * 6. Read/written by GPU
98 *
99 * 1. Allocated
100 * Same as last example, (CPU, CPU)
101 * 2. Mapped to GTT
102 * Nothing changes (assertions find that it is not in the GPU)
103 * 3. Read/written by GPU
104 * execbuffer calls set_domain (RENDER, RENDER)
105 * flush_domains gets CPU
106 * invalidate_domains gets GPU
107 * clflush (obj)
108 * MI_FLUSH and drm_agp_chipset_flush
109 * 4. set_domain (CPU, CPU)
110 * flush_domains gets GPU
111 * invalidate_domains gets CPU
112 * wait_rendering (obj) to make sure all drawing is complete.
113 * This will include an MI_FLUSH to get the data from GPU
114 * to memory
115 * clflush (obj) to invalidate the CPU cache
116 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
117 * 5. Read/written by CPU
118 * cache lines are loaded and dirtied
119 * 6. Read written by GPU
120 * Same as last GPU access
121 *
122 * Case 3: The constant buffer
123 *
124 * 1. Allocated
125 * 2. Written by CPU
126 * 3. Read by GPU
127 * 4. Updated (written) by CPU again
128 * 5. Read by GPU
129 *
130 * 1. Allocated
131 * (CPU, CPU)
132 * 2. Written by CPU
133 * (CPU, CPU)
134 * 3. Read by GPU
135 * (CPU+RENDER, 0)
136 * flush_domains = CPU
137 * invalidate_domains = RENDER
138 * clflush (obj)
139 * MI_FLUSH
140 * drm_agp_chipset_flush
141 * 4. Updated (written) by CPU again
142 * (CPU, CPU)
143 * flush_domains = 0 (no previous write domain)
144 * invalidate_domains = 0 (no new read domains)
145 * 5. Read by GPU
146 * (CPU+RENDER, 0)
147 * flush_domains = CPU
148 * invalidate_domains = RENDER
149 * clflush (obj)
150 * MI_FLUSH
151 * drm_agp_chipset_flush
152 */
153static void
154i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
155 struct intel_ring_buffer *ring,
156 struct change_domains *cd)
157{
158 uint32_t invalidate_domains = 0, flush_domains = 0;
159
160 /*
161 * If the object isn't moving to a new write domain,
162 * let the object stay in multiple read domains
163 */
164 if (obj->base.pending_write_domain == 0)
165 obj->base.pending_read_domains |= obj->base.read_domains;
166
167 /*
168 * Flush the current write domain if
169 * the new read domains don't match. Invalidate
170 * any read domains which differ from the old
171 * write domain
172 */
173 if (obj->base.write_domain &&
174 (((obj->base.write_domain != obj->base.pending_read_domains ||
175 obj->ring != ring)) ||
176 (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
177 flush_domains |= obj->base.write_domain;
178 invalidate_domains |=
179 obj->base.pending_read_domains & ~obj->base.write_domain;
180 }
181 /*
182 * Invalidate any read caches which may have
183 * stale data. That is, any new read domains.
184 */
185 invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
186 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
187 i915_gem_clflush_object(obj);
188
189 /* blow away mappings if mapped through GTT */
190 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
191 i915_gem_release_mmap(obj);
192
193 /* The actual obj->write_domain will be updated with
194 * pending_write_domain after we emit the accumulated flush for all
195 * of our domain changes in execbuffers (which clears objects'
196 * write_domains). So if we have a current write domain that we
197 * aren't changing, set pending_write_domain to that.
198 */
199 if (flush_domains == 0 && obj->base.pending_write_domain == 0)
200 obj->base.pending_write_domain = obj->base.write_domain;
201
202 cd->invalidate_domains |= invalidate_domains;
203 cd->flush_domains |= flush_domains;
204 if (flush_domains & I915_GEM_GPU_DOMAINS)
205 cd->flush_rings |= obj->ring->id;
206 if (invalidate_domains & I915_GEM_GPU_DOMAINS)
207 cd->flush_rings |= ring->id;
208}
209
210struct eb_objects {
211 int and;
212 struct hlist_head buckets[0];
213};
214
215static struct eb_objects *
216eb_create(int size)
217{
218 struct eb_objects *eb;
219 int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
220 while (count > size)
221 count >>= 1;
222 eb = kzalloc(count*sizeof(struct hlist_head) +
223 sizeof(struct eb_objects),
224 GFP_KERNEL);
225 if (eb == NULL)
226 return eb;
227
228 eb->and = count - 1;
229 return eb;
230}
231
232static void
233eb_reset(struct eb_objects *eb)
234{
235 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
236}
237
238static void
239eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
240{
241 hlist_add_head(&obj->exec_node,
242 &eb->buckets[obj->exec_handle & eb->and]);
243}
244
245static struct drm_i915_gem_object *
246eb_get_object(struct eb_objects *eb, unsigned long handle)
247{
248 struct hlist_head *head;
249 struct hlist_node *node;
250 struct drm_i915_gem_object *obj;
251
252 head = &eb->buckets[handle & eb->and];
253 hlist_for_each(node, head) {
254 obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
255 if (obj->exec_handle == handle)
256 return obj;
257 }
258
259 return NULL;
260}
261
262static void
263eb_destroy(struct eb_objects *eb)
264{
265 kfree(eb);
266}
267
268static int
269i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
270 struct eb_objects *eb,
271 struct drm_i915_gem_exec_object2 *entry,
272 struct drm_i915_gem_relocation_entry *reloc)
273{
274 struct drm_device *dev = obj->base.dev;
275 struct drm_gem_object *target_obj;
276 uint32_t target_offset;
277 int ret = -EINVAL;
278
279 /* we've already hold a reference to all valid objects */
280 target_obj = &eb_get_object(eb, reloc->target_handle)->base;
281 if (unlikely(target_obj == NULL))
282 return -ENOENT;
283
284 target_offset = to_intel_bo(target_obj)->gtt_offset;
285
286#if WATCH_RELOC
287 DRM_INFO("%s: obj %p offset %08x target %d "
288 "read %08x write %08x gtt %08x "
289 "presumed %08x delta %08x\n",
290 __func__,
291 obj,
292 (int) reloc->offset,
293 (int) reloc->target_handle,
294 (int) reloc->read_domains,
295 (int) reloc->write_domain,
296 (int) target_offset,
297 (int) reloc->presumed_offset,
298 reloc->delta);
299#endif
300
301 /* The target buffer should have appeared before us in the
302 * exec_object list, so it should have a GTT space bound by now.
303 */
304 if (unlikely(target_offset == 0)) {
305 DRM_ERROR("No GTT space found for object %d\n",
306 reloc->target_handle);
307 return ret;
308 }
309
310 /* Validate that the target is in a valid r/w GPU domain */
311 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
312 DRM_ERROR("reloc with multiple write domains: "
313 "obj %p target %d offset %d "
314 "read %08x write %08x",
315 obj, reloc->target_handle,
316 (int) reloc->offset,
317 reloc->read_domains,
318 reloc->write_domain);
319 return ret;
320 }
321 if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
322 DRM_ERROR("reloc with read/write CPU domains: "
323 "obj %p target %d offset %d "
324 "read %08x write %08x",
325 obj, reloc->target_handle,
326 (int) reloc->offset,
327 reloc->read_domains,
328 reloc->write_domain);
329 return ret;
330 }
331 if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
332 reloc->write_domain != target_obj->pending_write_domain)) {
333 DRM_ERROR("Write domain conflict: "
334 "obj %p target %d offset %d "
335 "new %08x old %08x\n",
336 obj, reloc->target_handle,
337 (int) reloc->offset,
338 reloc->write_domain,
339 target_obj->pending_write_domain);
340 return ret;
341 }
342
343 target_obj->pending_read_domains |= reloc->read_domains;
344 target_obj->pending_write_domain |= reloc->write_domain;
345
346 /* If the relocation already has the right value in it, no
347 * more work needs to be done.
348 */
349 if (target_offset == reloc->presumed_offset)
350 return 0;
351
352 /* Check that the relocation address is valid... */
353 if (unlikely(reloc->offset > obj->base.size - 4)) {
354 DRM_ERROR("Relocation beyond object bounds: "
355 "obj %p target %d offset %d size %d.\n",
356 obj, reloc->target_handle,
357 (int) reloc->offset,
358 (int) obj->base.size);
359 return ret;
360 }
361 if (unlikely(reloc->offset & 3)) {
362 DRM_ERROR("Relocation not 4-byte aligned: "
363 "obj %p target %d offset %d.\n",
364 obj, reloc->target_handle,
365 (int) reloc->offset);
366 return ret;
367 }
368
369 /* and points to somewhere within the target object. */
370 if (unlikely(reloc->delta >= target_obj->size)) {
371 DRM_ERROR("Relocation beyond target object bounds: "
372 "obj %p target %d delta %d size %d.\n",
373 obj, reloc->target_handle,
374 (int) reloc->delta,
375 (int) target_obj->size);
376 return ret;
377 }
378
379 reloc->delta += target_offset;
380 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
381 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
382 char *vaddr;
383
384 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
385 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
386 kunmap_atomic(vaddr);
387 } else {
388 struct drm_i915_private *dev_priv = dev->dev_private;
389 uint32_t __iomem *reloc_entry;
390 void __iomem *reloc_page;
391
392 ret = i915_gem_object_set_to_gtt_domain(obj, 1);
393 if (ret)
394 return ret;
395
396 /* Map the page containing the relocation we're going to perform. */
397 reloc->offset += obj->gtt_offset;
398 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
399 reloc->offset & PAGE_MASK);
400 reloc_entry = (uint32_t __iomem *)
401 (reloc_page + (reloc->offset & ~PAGE_MASK));
402 iowrite32(reloc->delta, reloc_entry);
403 io_mapping_unmap_atomic(reloc_page);
404 }
405
406 /* and update the user's relocation entry */
407 reloc->presumed_offset = target_offset;
408
409 return 0;
410}
411
412static int
413i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
414 struct eb_objects *eb,
415 struct drm_i915_gem_exec_object2 *entry)
416{
417 struct drm_i915_gem_relocation_entry __user *user_relocs;
418 int i, ret;
419
420 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
421 for (i = 0; i < entry->relocation_count; i++) {
422 struct drm_i915_gem_relocation_entry reloc;
423
424 if (__copy_from_user_inatomic(&reloc,
425 user_relocs+i,
426 sizeof(reloc)))
427 return -EFAULT;
428
429 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
430 if (ret)
431 return ret;
432
433 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
434 &reloc.presumed_offset,
435 sizeof(reloc.presumed_offset)))
436 return -EFAULT;
437 }
438
439 return 0;
440}
441
442static int
443i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
444 struct eb_objects *eb,
445 struct drm_i915_gem_exec_object2 *entry,
446 struct drm_i915_gem_relocation_entry *relocs)
447{
448 int i, ret;
449
450 for (i = 0; i < entry->relocation_count; i++) {
451 ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
452 if (ret)
453 return ret;
454 }
455
456 return 0;
457}
458
459static int
460i915_gem_execbuffer_relocate(struct drm_device *dev,
461 struct eb_objects *eb,
462 struct list_head *objects,
463 struct drm_i915_gem_exec_object2 *exec)
464{
465 struct drm_i915_gem_object *obj;
466 int ret;
467
468 list_for_each_entry(obj, objects, exec_list) {
469 obj->base.pending_read_domains = 0;
470 obj->base.pending_write_domain = 0;
471 ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
472 if (ret)
473 return ret;
474 }
475
476 return 0;
477}
478
479static int
480i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
481 struct drm_file *file,
482 struct list_head *objects,
483 struct drm_i915_gem_exec_object2 *exec)
484{
485 struct drm_i915_gem_object *obj;
486 struct drm_i915_gem_exec_object2 *entry;
487 int ret, retry;
488 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
489
490 /* Attempt to pin all of the buffers into the GTT.
491 * This is done in 3 phases:
492 *
493 * 1a. Unbind all objects that do not match the GTT constraints for
494 * the execbuffer (fenceable, mappable, alignment etc).
495 * 1b. Increment pin count for already bound objects.
496 * 2. Bind new objects.
497 * 3. Decrement pin count.
498 *
499 * This avoid unnecessary unbinding of later objects in order to makr
500 * room for the earlier objects *unless* we need to defragment.
501 */
502 retry = 0;
503 do {
504 ret = 0;
505
506 /* Unbind any ill-fitting objects or pin. */
507 entry = exec;
508 list_for_each_entry(obj, objects, exec_list) {
509 bool need_fence, need_mappable;
510
511 if (!obj->gtt_space) {
512 entry++;
513 continue;
514 }
515
516 need_fence =
517 has_fenced_gpu_access &&
518 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
519 obj->tiling_mode != I915_TILING_NONE;
520 need_mappable =
521 entry->relocation_count ? true : need_fence;
522
523 if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
524 (need_mappable && !obj->map_and_fenceable))
525 ret = i915_gem_object_unbind(obj);
526 else
527 ret = i915_gem_object_pin(obj,
528 entry->alignment,
529 need_mappable);
530 if (ret)
531 goto err;
532
533 entry++;
534 }
535
536 /* Bind fresh objects */
537 entry = exec;
538 list_for_each_entry(obj, objects, exec_list) {
539 bool need_fence;
540
541 need_fence =
542 has_fenced_gpu_access &&
543 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
544 obj->tiling_mode != I915_TILING_NONE;
545
546 if (!obj->gtt_space) {
547 bool need_mappable =
548 entry->relocation_count ? true : need_fence;
549
550 ret = i915_gem_object_pin(obj,
551 entry->alignment,
552 need_mappable);
553 if (ret)
554 break;
555 }
556
557 if (has_fenced_gpu_access) {
558 if (need_fence) {
559 ret = i915_gem_object_get_fence(obj, ring, 1);
560 if (ret)
561 break;
562 } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
563 obj->tiling_mode == I915_TILING_NONE) {
564 /* XXX pipelined! */
565 ret = i915_gem_object_put_fence(obj);
566 if (ret)
567 break;
568 }
569 obj->pending_fenced_gpu_access = need_fence;
570 }
571
572 entry->offset = obj->gtt_offset;
573 entry++;
574 }
575
576 /* Decrement pin count for bound objects */
577 list_for_each_entry(obj, objects, exec_list) {
578 if (obj->gtt_space)
579 i915_gem_object_unpin(obj);
580 }
581
582 if (ret != -ENOSPC || retry > 1)
583 return ret;
584
585 /* First attempt, just clear anything that is purgeable.
586 * Second attempt, clear the entire GTT.
587 */
588 ret = i915_gem_evict_everything(ring->dev, retry == 0);
589 if (ret)
590 return ret;
591
592 retry++;
593 } while (1);
594
595err:
596 obj = list_entry(obj->exec_list.prev,
597 struct drm_i915_gem_object,
598 exec_list);
599 while (objects != &obj->exec_list) {
600 if (obj->gtt_space)
601 i915_gem_object_unpin(obj);
602
603 obj = list_entry(obj->exec_list.prev,
604 struct drm_i915_gem_object,
605 exec_list);
606 }
607
608 return ret;
609}
610
611static int
612i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
613 struct drm_file *file,
614 struct intel_ring_buffer *ring,
615 struct list_head *objects,
616 struct eb_objects *eb,
617 struct drm_i915_gem_exec_object2 *exec,
618 int count)
619{
620 struct drm_i915_gem_relocation_entry *reloc;
621 struct drm_i915_gem_object *obj;
622 int i, total, ret;
623
624 /* We may process another execbuffer during the unlock... */
625 while (list_empty(objects)) {
626 obj = list_first_entry(objects,
627 struct drm_i915_gem_object,
628 exec_list);
629 list_del_init(&obj->exec_list);
630 drm_gem_object_unreference(&obj->base);
631 }
632
633 mutex_unlock(&dev->struct_mutex);
634
635 total = 0;
636 for (i = 0; i < count; i++)
637 total += exec[i].relocation_count;
638
639 reloc = drm_malloc_ab(total, sizeof(*reloc));
640 if (reloc == NULL) {
641 mutex_lock(&dev->struct_mutex);
642 return -ENOMEM;
643 }
644
645 total = 0;
646 for (i = 0; i < count; i++) {
647 struct drm_i915_gem_relocation_entry __user *user_relocs;
648
649 user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
650
651 if (copy_from_user(reloc+total, user_relocs,
652 exec[i].relocation_count * sizeof(*reloc))) {
653 ret = -EFAULT;
654 mutex_lock(&dev->struct_mutex);
655 goto err;
656 }
657
658 total += exec[i].relocation_count;
659 }
660
661 ret = i915_mutex_lock_interruptible(dev);
662 if (ret) {
663 mutex_lock(&dev->struct_mutex);
664 goto err;
665 }
666
667 /* reacquire the objects */
668 INIT_LIST_HEAD(objects);
669 eb_reset(eb);
670 for (i = 0; i < count; i++) {
671 struct drm_i915_gem_object *obj;
672
673 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
674 exec[i].handle));
675 if (obj == NULL) {
676 DRM_ERROR("Invalid object handle %d at index %d\n",
677 exec[i].handle, i);
678 ret = -ENOENT;
679 goto err;
680 }
681
682 list_add_tail(&obj->exec_list, objects);
683 obj->exec_handle = exec[i].handle;
684 eb_add_object(eb, obj);
685 }
686
687 ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
688 if (ret)
689 goto err;
690
691 total = 0;
692 list_for_each_entry(obj, objects, exec_list) {
693 obj->base.pending_read_domains = 0;
694 obj->base.pending_write_domain = 0;
695 ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
696 exec,
697 reloc + total);
698 if (ret)
699 goto err;
700
701 total += exec->relocation_count;
702 exec++;
703 }
704
705 /* Leave the user relocations as are, this is the painfully slow path,
706 * and we want to avoid the complication of dropping the lock whilst
707 * having buffers reserved in the aperture and so causing spurious
708 * ENOSPC for random operations.
709 */
710
711err:
712 drm_free_large(reloc);
713 return ret;
714}
715
716static void
717i915_gem_execbuffer_flush(struct drm_device *dev,
718 uint32_t invalidate_domains,
719 uint32_t flush_domains,
720 uint32_t flush_rings)
721{
722 drm_i915_private_t *dev_priv = dev->dev_private;
723 int i;
724
725 if (flush_domains & I915_GEM_DOMAIN_CPU)
726 intel_gtt_chipset_flush();
727
728 if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
729 for (i = 0; i < I915_NUM_RINGS; i++)
730 if (flush_rings & (1 << i))
731 i915_gem_flush_ring(dev, &dev_priv->ring[i],
732 invalidate_domains,
733 flush_domains);
734 }
735}
736
737static int
738i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
739 struct intel_ring_buffer *to)
740{
741 struct intel_ring_buffer *from = obj->ring;
742 u32 seqno;
743 int ret, idx;
744
745 if (from == NULL || to == from)
746 return 0;
747
748 if (INTEL_INFO(obj->base.dev)->gen < 6)
749 return i915_gem_object_wait_rendering(obj, true);
750
751 idx = intel_ring_sync_index(from, to);
752
753 seqno = obj->last_rendering_seqno;
754 if (seqno <= from->sync_seqno[idx])
755 return 0;
756
757 if (seqno == from->outstanding_lazy_request) {
758 struct drm_i915_gem_request *request;
759
760 request = kzalloc(sizeof(*request), GFP_KERNEL);
761 if (request == NULL)
762 return -ENOMEM;
763
764 ret = i915_add_request(obj->base.dev, NULL, request, from);
765 if (ret) {
766 kfree(request);
767 return ret;
768 }
769
770 seqno = request->seqno;
771 }
772
773 from->sync_seqno[idx] = seqno;
774 return intel_ring_sync(to, from, seqno - 1);
775}
776
777static int
778i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
779 struct list_head *objects)
780{
781 struct drm_i915_gem_object *obj;
782 struct change_domains cd;
783 int ret;
784
785 cd.invalidate_domains = 0;
786 cd.flush_domains = 0;
787 cd.flush_rings = 0;
788 list_for_each_entry(obj, objects, exec_list)
789 i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
790
791 if (cd.invalidate_domains | cd.flush_domains) {
792#if WATCH_EXEC
793 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
794 __func__,
795 cd.invalidate_domains,
796 cd.flush_domains);
797#endif
798 i915_gem_execbuffer_flush(ring->dev,
799 cd.invalidate_domains,
800 cd.flush_domains,
801 cd.flush_rings);
802 }
803
804 list_for_each_entry(obj, objects, exec_list) {
805 ret = i915_gem_execbuffer_sync_rings(obj, ring);
806 if (ret)
807 return ret;
808 }
809
810 return 0;
811}
812
813static bool
814i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
815{
816 return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
817}
818
819static int
820validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
821 int count)
822{
823 int i;
824
825 for (i = 0; i < count; i++) {
826 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
827 int length; /* limited by fault_in_pages_readable() */
828
829 /* First check for malicious input causing overflow */
830 if (exec[i].relocation_count >
831 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
832 return -EINVAL;
833
834 length = exec[i].relocation_count *
835 sizeof(struct drm_i915_gem_relocation_entry);
836 if (!access_ok(VERIFY_READ, ptr, length))
837 return -EFAULT;
838
839 /* we may also need to update the presumed offsets */
840 if (!access_ok(VERIFY_WRITE, ptr, length))
841 return -EFAULT;
842
843 if (fault_in_pages_readable(ptr, length))
844 return -EFAULT;
845 }
846
847 return 0;
848}
849
850static int
851i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
852 struct list_head *objects)
853{
854 struct drm_i915_gem_object *obj;
855 int flips;
856
857 /* Check for any pending flips. As we only maintain a flip queue depth
858 * of 1, we can simply insert a WAIT for the next display flip prior
859 * to executing the batch and avoid stalling the CPU.
860 */
861 flips = 0;
862 list_for_each_entry(obj, objects, exec_list) {
863 if (obj->base.write_domain)
864 flips |= atomic_read(&obj->pending_flip);
865 }
866 if (flips) {
867 int plane, flip_mask, ret;
868
869 for (plane = 0; flips >> plane; plane++) {
870 if (((flips >> plane) & 1) == 0)
871 continue;
872
873 if (plane)
874 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
875 else
876 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
877
878 ret = intel_ring_begin(ring, 2);
879 if (ret)
880 return ret;
881
882 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
883 intel_ring_emit(ring, MI_NOOP);
884 intel_ring_advance(ring);
885 }
886 }
887
888 return 0;
889}
890
891static void
892i915_gem_execbuffer_move_to_active(struct list_head *objects,
893 struct intel_ring_buffer *ring,
894 u32 seqno)
895{
896 struct drm_i915_gem_object *obj;
897
898 list_for_each_entry(obj, objects, exec_list) {
899 obj->base.read_domains = obj->base.pending_read_domains;
900 obj->base.write_domain = obj->base.pending_write_domain;
901 obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
902
903 i915_gem_object_move_to_active(obj, ring, seqno);
904 if (obj->base.write_domain) {
905 obj->dirty = 1;
906 obj->pending_gpu_write = true;
907 list_move_tail(&obj->gpu_write_list,
908 &ring->gpu_write_list);
909 intel_mark_busy(ring->dev, obj);
910 }
911
912 trace_i915_gem_object_change_domain(obj,
913 obj->base.read_domains,
914 obj->base.write_domain);
915 }
916}
917
918static void
919i915_gem_execbuffer_retire_commands(struct drm_device *dev,
920 struct drm_file *file,
921 struct intel_ring_buffer *ring)
922{
923 struct drm_i915_gem_request *request;
924 u32 flush_domains;
925
926 /*
927 * Ensure that the commands in the batch buffer are
928 * finished before the interrupt fires.
929 *
930 * The sampler always gets flushed on i965 (sigh).
931 */
932 flush_domains = 0;
933 if (INTEL_INFO(dev)->gen >= 4)
934 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
935
936 ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
937
938 /* Add a breadcrumb for the completion of the batch buffer */
939 request = kzalloc(sizeof(*request), GFP_KERNEL);
940 if (request == NULL || i915_add_request(dev, file, request, ring)) {
941 i915_gem_next_request_seqno(dev, ring);
942 kfree(request);
943 }
944}
945
946static int
947i915_gem_do_execbuffer(struct drm_device *dev, void *data,
948 struct drm_file *file,
949 struct drm_i915_gem_execbuffer2 *args,
950 struct drm_i915_gem_exec_object2 *exec)
951{
952 drm_i915_private_t *dev_priv = dev->dev_private;
953 struct list_head objects;
954 struct eb_objects *eb;
955 struct drm_i915_gem_object *batch_obj;
956 struct drm_clip_rect *cliprects = NULL;
957 struct intel_ring_buffer *ring;
958 u32 exec_start, exec_len;
959 u32 seqno;
960 int ret, mode, i;
961
962 if (!i915_gem_check_execbuffer(args)) {
963 DRM_ERROR("execbuf with invalid offset/length\n");
964 return -EINVAL;
965 }
966
967 ret = validate_exec_list(exec, args->buffer_count);
968 if (ret)
969 return ret;
970
971#if WATCH_EXEC
972 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
973 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
974#endif
975 switch (args->flags & I915_EXEC_RING_MASK) {
976 case I915_EXEC_DEFAULT:
977 case I915_EXEC_RENDER:
978 ring = &dev_priv->ring[RCS];
979 break;
980 case I915_EXEC_BSD:
981 if (!HAS_BSD(dev)) {
982 DRM_ERROR("execbuf with invalid ring (BSD)\n");
983 return -EINVAL;
984 }
985 ring = &dev_priv->ring[VCS];
986 break;
987 case I915_EXEC_BLT:
988 if (!HAS_BLT(dev)) {
989 DRM_ERROR("execbuf with invalid ring (BLT)\n");
990 return -EINVAL;
991 }
992 ring = &dev_priv->ring[BCS];
993 break;
994 default:
995 DRM_ERROR("execbuf with unknown ring: %d\n",
996 (int)(args->flags & I915_EXEC_RING_MASK));
997 return -EINVAL;
998 }
999
1000 mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1001 switch (mode) {
1002 case I915_EXEC_CONSTANTS_REL_GENERAL:
1003 case I915_EXEC_CONSTANTS_ABSOLUTE:
1004 case I915_EXEC_CONSTANTS_REL_SURFACE:
1005 if (ring == &dev_priv->ring[RCS] &&
1006 mode != dev_priv->relative_constants_mode) {
1007 if (INTEL_INFO(dev)->gen < 4)
1008 return -EINVAL;
1009
1010 if (INTEL_INFO(dev)->gen > 5 &&
1011 mode == I915_EXEC_CONSTANTS_REL_SURFACE)
1012 return -EINVAL;
1013
1014 ret = intel_ring_begin(ring, 4);
1015 if (ret)
1016 return ret;
1017
1018 intel_ring_emit(ring, MI_NOOP);
1019 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1020 intel_ring_emit(ring, INSTPM);
1021 intel_ring_emit(ring,
1022 I915_EXEC_CONSTANTS_MASK << 16 | mode);
1023 intel_ring_advance(ring);
1024
1025 dev_priv->relative_constants_mode = mode;
1026 }
1027 break;
1028 default:
1029 DRM_ERROR("execbuf with unknown constants: %d\n", mode);
1030 return -EINVAL;
1031 }
1032
1033 if (args->buffer_count < 1) {
1034 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1035 return -EINVAL;
1036 }
1037
1038 if (args->num_cliprects != 0) {
1039 if (ring != &dev_priv->ring[RCS]) {
1040 DRM_ERROR("clip rectangles are only valid with the render ring\n");
1041 return -EINVAL;
1042 }
1043
1044 cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
1045 GFP_KERNEL);
1046 if (cliprects == NULL) {
1047 ret = -ENOMEM;
1048 goto pre_mutex_err;
1049 }
1050
1051 if (copy_from_user(cliprects,
1052 (struct drm_clip_rect __user *)(uintptr_t)
1053 args->cliprects_ptr,
1054 sizeof(*cliprects)*args->num_cliprects)) {
1055 ret = -EFAULT;
1056 goto pre_mutex_err;
1057 }
1058 }
1059
1060 ret = i915_mutex_lock_interruptible(dev);
1061 if (ret)
1062 goto pre_mutex_err;
1063
1064 if (dev_priv->mm.suspended) {
1065 mutex_unlock(&dev->struct_mutex);
1066 ret = -EBUSY;
1067 goto pre_mutex_err;
1068 }
1069
1070 eb = eb_create(args->buffer_count);
1071 if (eb == NULL) {
1072 mutex_unlock(&dev->struct_mutex);
1073 ret = -ENOMEM;
1074 goto pre_mutex_err;
1075 }
1076
1077 /* Look up object handles */
1078 INIT_LIST_HEAD(&objects);
1079 for (i = 0; i < args->buffer_count; i++) {
1080 struct drm_i915_gem_object *obj;
1081
1082 obj = to_intel_bo(drm_gem_object_lookup(dev, file,
1083 exec[i].handle));
1084 if (obj == NULL) {
1085 DRM_ERROR("Invalid object handle %d at index %d\n",
1086 exec[i].handle, i);
1087 /* prevent error path from reading uninitialized data */
1088 ret = -ENOENT;
1089 goto err;
1090 }
1091
1092 if (!list_empty(&obj->exec_list)) {
1093 DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
1094 obj, exec[i].handle, i);
1095 ret = -EINVAL;
1096 goto err;
1097 }
1098
1099 list_add_tail(&obj->exec_list, &objects);
1100 obj->exec_handle = exec[i].handle;
1101 eb_add_object(eb, obj);
1102 }
1103
1104 /* Move the objects en-masse into the GTT, evicting if necessary. */
1105 ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
1106 if (ret)
1107 goto err;
1108
1109 /* The objects are in their final locations, apply the relocations. */
1110 ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
1111 if (ret) {
1112 if (ret == -EFAULT) {
1113 ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
1114 &objects, eb,
1115 exec,
1116 args->buffer_count);
1117 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1118 }
1119 if (ret)
1120 goto err;
1121 }
1122
1123 /* Set the pending read domains for the batch buffer to COMMAND */
1124 batch_obj = list_entry(objects.prev,
1125 struct drm_i915_gem_object,
1126 exec_list);
1127 if (batch_obj->base.pending_write_domain) {
1128 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
1129 ret = -EINVAL;
1130 goto err;
1131 }
1132 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1133
1134 ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
1135 if (ret)
1136 goto err;
1137
1138 ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
1139 if (ret)
1140 goto err;
1141
1142 seqno = i915_gem_next_request_seqno(dev, ring);
1143 for (i = 0; i < I915_NUM_RINGS-1; i++) {
1144 if (seqno < ring->sync_seqno[i]) {
1145 /* The GPU can not handle its semaphore value wrapping,
1146 * so every billion or so execbuffers, we need to stall
1147 * the GPU in order to reset the counters.
1148 */
1149 ret = i915_gpu_idle(dev);
1150 if (ret)
1151 goto err;
1152
1153 BUG_ON(ring->sync_seqno[i]);
1154 }
1155 }
1156
1157 exec_start = batch_obj->gtt_offset + args->batch_start_offset;
1158 exec_len = args->batch_len;
1159 if (cliprects) {
1160 for (i = 0; i < args->num_cliprects; i++) {
1161 ret = i915_emit_box(dev, &cliprects[i],
1162 args->DR1, args->DR4);
1163 if (ret)
1164 goto err;
1165
1166 ret = ring->dispatch_execbuffer(ring,
1167 exec_start, exec_len);
1168 if (ret)
1169 goto err;
1170 }
1171 } else {
1172 ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
1173 if (ret)
1174 goto err;
1175 }
1176
1177 i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
1178 i915_gem_execbuffer_retire_commands(dev, file, ring);
1179
1180err:
1181 eb_destroy(eb);
1182 while (!list_empty(&objects)) {
1183 struct drm_i915_gem_object *obj;
1184
1185 obj = list_first_entry(&objects,
1186 struct drm_i915_gem_object,
1187 exec_list);
1188 list_del_init(&obj->exec_list);
1189 drm_gem_object_unreference(&obj->base);
1190 }
1191
1192 mutex_unlock(&dev->struct_mutex);
1193
1194pre_mutex_err:
1195 kfree(cliprects);
1196 return ret;
1197}
1198
1199/*
1200 * Legacy execbuffer just creates an exec2 list from the original exec object
1201 * list array and passes it to the real function.
1202 */
1203int
1204i915_gem_execbuffer(struct drm_device *dev, void *data,
1205 struct drm_file *file)
1206{
1207 struct drm_i915_gem_execbuffer *args = data;
1208 struct drm_i915_gem_execbuffer2 exec2;
1209 struct drm_i915_gem_exec_object *exec_list = NULL;
1210 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1211 int ret, i;
1212
1213#if WATCH_EXEC
1214 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1215 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1216#endif
1217
1218 if (args->buffer_count < 1) {
1219 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
1220 return -EINVAL;
1221 }
1222
1223 /* Copy in the exec list from userland */
1224 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1225 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1226 if (exec_list == NULL || exec2_list == NULL) {
1227 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1228 args->buffer_count);
1229 drm_free_large(exec_list);
1230 drm_free_large(exec2_list);
1231 return -ENOMEM;
1232 }
1233 ret = copy_from_user(exec_list,
1234 (struct drm_i915_relocation_entry __user *)
1235 (uintptr_t) args->buffers_ptr,
1236 sizeof(*exec_list) * args->buffer_count);
1237 if (ret != 0) {
1238 DRM_ERROR("copy %d exec entries failed %d\n",
1239 args->buffer_count, ret);
1240 drm_free_large(exec_list);
1241 drm_free_large(exec2_list);
1242 return -EFAULT;
1243 }
1244
1245 for (i = 0; i < args->buffer_count; i++) {
1246 exec2_list[i].handle = exec_list[i].handle;
1247 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1248 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1249 exec2_list[i].alignment = exec_list[i].alignment;
1250 exec2_list[i].offset = exec_list[i].offset;
1251 if (INTEL_INFO(dev)->gen < 4)
1252 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1253 else
1254 exec2_list[i].flags = 0;
1255 }
1256
1257 exec2.buffers_ptr = args->buffers_ptr;
1258 exec2.buffer_count = args->buffer_count;
1259 exec2.batch_start_offset = args->batch_start_offset;
1260 exec2.batch_len = args->batch_len;
1261 exec2.DR1 = args->DR1;
1262 exec2.DR4 = args->DR4;
1263 exec2.num_cliprects = args->num_cliprects;
1264 exec2.cliprects_ptr = args->cliprects_ptr;
1265 exec2.flags = I915_EXEC_RENDER;
1266
1267 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1268 if (!ret) {
1269 /* Copy the new buffer offsets back to the user's exec list. */
1270 for (i = 0; i < args->buffer_count; i++)
1271 exec_list[i].offset = exec2_list[i].offset;
1272 /* ... and back out to userspace */
1273 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1274 (uintptr_t) args->buffers_ptr,
1275 exec_list,
1276 sizeof(*exec_list) * args->buffer_count);
1277 if (ret) {
1278 ret = -EFAULT;
1279 DRM_ERROR("failed to copy %d exec entries "
1280 "back to user (%d)\n",
1281 args->buffer_count, ret);
1282 }
1283 }
1284
1285 drm_free_large(exec_list);
1286 drm_free_large(exec2_list);
1287 return ret;
1288}
1289
1290int
1291i915_gem_execbuffer2(struct drm_device *dev, void *data,
1292 struct drm_file *file)
1293{
1294 struct drm_i915_gem_execbuffer2 *args = data;
1295 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1296 int ret;
1297
1298#if WATCH_EXEC
1299 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1300 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1301#endif
1302
1303 if (args->buffer_count < 1) {
1304 DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
1305 return -EINVAL;
1306 }
1307
1308 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1309 if (exec2_list == NULL) {
1310 DRM_ERROR("Failed to allocate exec list for %d buffers\n",
1311 args->buffer_count);
1312 return -ENOMEM;
1313 }
1314 ret = copy_from_user(exec2_list,
1315 (struct drm_i915_relocation_entry __user *)
1316 (uintptr_t) args->buffers_ptr,
1317 sizeof(*exec2_list) * args->buffer_count);
1318 if (ret != 0) {
1319 DRM_ERROR("copy %d exec entries failed %d\n",
1320 args->buffer_count, ret);
1321 drm_free_large(exec2_list);
1322 return -EFAULT;
1323 }
1324
1325 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1326 if (!ret) {
1327 /* Copy the new buffer offsets back to the user's exec list. */
1328 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1329 (uintptr_t) args->buffers_ptr,
1330 exec2_list,
1331 sizeof(*exec2_list) * args->buffer_count);
1332 if (ret) {
1333 ret = -EFAULT;
1334 DRM_ERROR("failed to copy %d exec entries "
1335 "back to user (%d)\n",
1336 args->buffer_count, ret);
1337 }
1338 }
1339
1340 drm_free_large(exec2_list);
1341 return ret;
1342}
1343
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 000000000000..86673e77d7cb
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,99 @@
1/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "drmP.h"
26#include "drm.h"
27#include "i915_drm.h"
28#include "i915_drv.h"
29#include "i915_trace.h"
30#include "intel_drv.h"
31
32void i915_gem_restore_gtt_mappings(struct drm_device *dev)
33{
34 struct drm_i915_private *dev_priv = dev->dev_private;
35 struct drm_i915_gem_object *obj;
36
37 list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
38 i915_gem_clflush_object(obj);
39
40 if (dev_priv->mm.gtt->needs_dmar) {
41 BUG_ON(!obj->sg_list);
42
43 intel_gtt_insert_sg_entries(obj->sg_list,
44 obj->num_sg,
45 obj->gtt_space->start
46 >> PAGE_SHIFT,
47 obj->agp_type);
48 } else
49 intel_gtt_insert_pages(obj->gtt_space->start
50 >> PAGE_SHIFT,
51 obj->base.size >> PAGE_SHIFT,
52 obj->pages,
53 obj->agp_type);
54 }
55
56 intel_gtt_chipset_flush();
57}
58
59int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
60{
61 struct drm_device *dev = obj->base.dev;
62 struct drm_i915_private *dev_priv = dev->dev_private;
63 int ret;
64
65 if (dev_priv->mm.gtt->needs_dmar) {
66 ret = intel_gtt_map_memory(obj->pages,
67 obj->base.size >> PAGE_SHIFT,
68 &obj->sg_list,
69 &obj->num_sg);
70 if (ret != 0)
71 return ret;
72
73 intel_gtt_insert_sg_entries(obj->sg_list,
74 obj->num_sg,
75 obj->gtt_space->start >> PAGE_SHIFT,
76 obj->agp_type);
77 } else
78 intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
79 obj->base.size >> PAGE_SHIFT,
80 obj->pages,
81 obj->agp_type);
82
83 return 0;
84}
85
86void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
87{
88 struct drm_device *dev = obj->base.dev;
89 struct drm_i915_private *dev_priv = dev->dev_private;
90
91 if (dev_priv->mm.gtt->needs_dmar) {
92 intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
93 obj->sg_list = NULL;
94 obj->num_sg = 0;
95 }
96
97 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
98 obj->base.size >> PAGE_SHIFT);
99}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de70be1..22a32b9932c5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
181} 181}
182 182
183/* Check pitch constriants for all chips & tiling formats */ 183/* Check pitch constriants for all chips & tiling formats */
184bool 184static bool
185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
186{ 186{
187 int tile_width; 187 int tile_width;
@@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
232 return true; 232 return true;
233} 233}
234 234
235bool 235/* Is the current GTT allocation valid for the change in tiling? */
236i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode) 236static bool
237i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
237{ 238{
238 struct drm_device *dev = obj->dev; 239 u32 size;
239 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
240
241 if (obj_priv->gtt_space == NULL)
242 return true;
243 240
244 if (tiling_mode == I915_TILING_NONE) 241 if (tiling_mode == I915_TILING_NONE)
245 return true; 242 return true;
246 243
247 if (INTEL_INFO(dev)->gen >= 4) 244 if (INTEL_INFO(obj->base.dev)->gen >= 4)
248 return true; 245 return true;
249 246
250 if (obj_priv->gtt_offset & (obj->size - 1)) 247 if (INTEL_INFO(obj->base.dev)->gen == 3) {
251 return false; 248 if (obj->gtt_offset & ~I915_FENCE_START_MASK)
252
253 if (IS_GEN3(dev)) {
254 if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
255 return false; 249 return false;
256 } else { 250 } else {
257 if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) 251 if (obj->gtt_offset & ~I830_FENCE_START_MASK)
258 return false; 252 return false;
259 } 253 }
260 254
255 /*
256 * Previous chips need to be aligned to the size of the smallest
257 * fence register that can contain the object.
258 */
259 if (INTEL_INFO(obj->base.dev)->gen == 3)
260 size = 1024*1024;
261 else
262 size = 512*1024;
263
264 while (size < obj->base.size)
265 size <<= 1;
266
267 if (obj->gtt_space->size != size)
268 return false;
269
270 if (obj->gtt_offset & (size - 1))
271 return false;
272
261 return true; 273 return true;
262} 274}
263 275
@@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
267 */ 279 */
268int 280int
269i915_gem_set_tiling(struct drm_device *dev, void *data, 281i915_gem_set_tiling(struct drm_device *dev, void *data,
270 struct drm_file *file_priv) 282 struct drm_file *file)
271{ 283{
272 struct drm_i915_gem_set_tiling *args = data; 284 struct drm_i915_gem_set_tiling *args = data;
273 drm_i915_private_t *dev_priv = dev->dev_private; 285 drm_i915_private_t *dev_priv = dev->dev_private;
274 struct drm_gem_object *obj; 286 struct drm_i915_gem_object *obj;
275 struct drm_i915_gem_object *obj_priv;
276 int ret; 287 int ret;
277 288
278 ret = i915_gem_check_is_wedged(dev); 289 ret = i915_gem_check_is_wedged(dev);
279 if (ret) 290 if (ret)
280 return ret; 291 return ret;
281 292
282 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 293 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
283 if (obj == NULL) 294 if (obj == NULL)
284 return -ENOENT; 295 return -ENOENT;
285 obj_priv = to_intel_bo(obj);
286 296
287 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) { 297 if (!i915_tiling_ok(dev,
288 drm_gem_object_unreference_unlocked(obj); 298 args->stride, obj->base.size, args->tiling_mode)) {
299 drm_gem_object_unreference_unlocked(&obj->base);
289 return -EINVAL; 300 return -EINVAL;
290 } 301 }
291 302
292 if (obj_priv->pin_count) { 303 if (obj->pin_count) {
293 drm_gem_object_unreference_unlocked(obj); 304 drm_gem_object_unreference_unlocked(&obj->base);
294 return -EBUSY; 305 return -EBUSY;
295 } 306 }
296 307
@@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
324 } 335 }
325 336
326 mutex_lock(&dev->struct_mutex); 337 mutex_lock(&dev->struct_mutex);
327 if (args->tiling_mode != obj_priv->tiling_mode || 338 if (args->tiling_mode != obj->tiling_mode ||
328 args->stride != obj_priv->stride) { 339 args->stride != obj->stride) {
329 /* We need to rebind the object if its current allocation 340 /* We need to rebind the object if its current allocation
330 * no longer meets the alignment restrictions for its new 341 * no longer meets the alignment restrictions for its new
331 * tiling mode. Otherwise we can just leave it alone, but 342 * tiling mode. Otherwise we can just leave it alone, but
332 * need to ensure that any fence register is cleared. 343 * need to ensure that any fence register is cleared.
333 */ 344 */
334 if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode)) 345 i915_gem_release_mmap(obj);
335 ret = i915_gem_object_unbind(obj);
336 else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
337 ret = i915_gem_object_put_fence_reg(obj, true);
338 else
339 i915_gem_release_mmap(obj);
340 346
341 if (ret != 0) { 347 obj->map_and_fenceable =
342 args->tiling_mode = obj_priv->tiling_mode; 348 obj->gtt_space == NULL ||
343 args->stride = obj_priv->stride; 349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
344 goto err; 350 i915_gem_object_fence_ok(obj, args->tiling_mode));
345 }
346 351
347 obj_priv->tiling_mode = args->tiling_mode; 352 obj->tiling_changed = true;
348 obj_priv->stride = args->stride; 353 obj->tiling_mode = args->tiling_mode;
354 obj->stride = args->stride;
349 } 355 }
350err: 356 drm_gem_object_unreference(&obj->base);
351 drm_gem_object_unreference(obj);
352 mutex_unlock(&dev->struct_mutex); 357 mutex_unlock(&dev->struct_mutex);
353 358
354 return ret; 359 return 0;
355} 360}
356 361
357/** 362/**
@@ -359,22 +364,20 @@ err:
359 */ 364 */
360int 365int
361i915_gem_get_tiling(struct drm_device *dev, void *data, 366i915_gem_get_tiling(struct drm_device *dev, void *data,
362 struct drm_file *file_priv) 367 struct drm_file *file)
363{ 368{
364 struct drm_i915_gem_get_tiling *args = data; 369 struct drm_i915_gem_get_tiling *args = data;
365 drm_i915_private_t *dev_priv = dev->dev_private; 370 drm_i915_private_t *dev_priv = dev->dev_private;
366 struct drm_gem_object *obj; 371 struct drm_i915_gem_object *obj;
367 struct drm_i915_gem_object *obj_priv;
368 372
369 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 373 obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
370 if (obj == NULL) 374 if (obj == NULL)
371 return -ENOENT; 375 return -ENOENT;
372 obj_priv = to_intel_bo(obj);
373 376
374 mutex_lock(&dev->struct_mutex); 377 mutex_lock(&dev->struct_mutex);
375 378
376 args->tiling_mode = obj_priv->tiling_mode; 379 args->tiling_mode = obj->tiling_mode;
377 switch (obj_priv->tiling_mode) { 380 switch (obj->tiling_mode) {
378 case I915_TILING_X: 381 case I915_TILING_X:
379 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 382 args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
380 break; 383 break;
@@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
394 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 397 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
395 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10; 398 args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
396 399
397 drm_gem_object_unreference(obj); 400 drm_gem_object_unreference(&obj->base);
398 mutex_unlock(&dev->struct_mutex); 401 mutex_unlock(&dev->struct_mutex);
399 402
400 return 0; 403 return 0;
@@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page)
424} 427}
425 428
426void 429void
427i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj) 430i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
428{ 431{
429 struct drm_device *dev = obj->dev; 432 struct drm_device *dev = obj->base.dev;
430 drm_i915_private_t *dev_priv = dev->dev_private; 433 drm_i915_private_t *dev_priv = dev->dev_private;
431 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 434 int page_count = obj->base.size >> PAGE_SHIFT;
432 int page_count = obj->size >> PAGE_SHIFT;
433 int i; 435 int i;
434 436
435 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) 437 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
436 return; 438 return;
437 439
438 if (obj_priv->bit_17 == NULL) 440 if (obj->bit_17 == NULL)
439 return; 441 return;
440 442
441 for (i = 0; i < page_count; i++) { 443 for (i = 0; i < page_count; i++) {
442 char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17; 444 char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
443 if ((new_bit_17 & 0x1) != 445 if ((new_bit_17 & 0x1) !=
444 (test_bit(i, obj_priv->bit_17) != 0)) { 446 (test_bit(i, obj->bit_17) != 0)) {
445 i915_gem_swizzle_page(obj_priv->pages[i]); 447 i915_gem_swizzle_page(obj->pages[i]);
446 set_page_dirty(obj_priv->pages[i]); 448 set_page_dirty(obj->pages[i]);
447 } 449 }
448 } 450 }
449} 451}
450 452
451void 453void
452i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj) 454i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
453{ 455{
454 struct drm_device *dev = obj->dev; 456 struct drm_device *dev = obj->base.dev;
455 drm_i915_private_t *dev_priv = dev->dev_private; 457 drm_i915_private_t *dev_priv = dev->dev_private;
456 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 458 int page_count = obj->base.size >> PAGE_SHIFT;
457 int page_count = obj->size >> PAGE_SHIFT;
458 int i; 459 int i;
459 460
460 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) 461 if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
461 return; 462 return;
462 463
463 if (obj_priv->bit_17 == NULL) { 464 if (obj->bit_17 == NULL) {
464 obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * 465 obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
465 sizeof(long), GFP_KERNEL); 466 sizeof(long), GFP_KERNEL);
466 if (obj_priv->bit_17 == NULL) { 467 if (obj->bit_17 == NULL) {
467 DRM_ERROR("Failed to allocate memory for bit 17 " 468 DRM_ERROR("Failed to allocate memory for bit 17 "
468 "record\n"); 469 "record\n");
469 return; 470 return;
@@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
471 } 472 }
472 473
473 for (i = 0; i < page_count; i++) { 474 for (i = 0; i < page_count; i++) {
474 if (page_to_phys(obj_priv->pages[i]) & (1 << 17)) 475 if (page_to_phys(obj->pages[i]) & (1 << 17))
475 __set_bit(i, obj_priv->bit_17); 476 __set_bit(i, obj->bit_17);
476 else 477 else
477 __clear_bit(i, obj_priv->bit_17); 478 __clear_bit(i, obj->bit_17);
478 } 479 }
479} 480}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c91d7b..0dadc025b77b 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -67,20 +67,20 @@
67void 67void
68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 68ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
69{ 69{
70 if ((dev_priv->gt_irq_mask_reg & mask) != 0) { 70 if ((dev_priv->gt_irq_mask & mask) != 0) {
71 dev_priv->gt_irq_mask_reg &= ~mask; 71 dev_priv->gt_irq_mask &= ~mask;
72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 72 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
73 (void) I915_READ(GTIMR); 73 POSTING_READ(GTIMR);
74 } 74 }
75} 75}
76 76
77void 77void
78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask) 78ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
79{ 79{
80 if ((dev_priv->gt_irq_mask_reg & mask) != mask) { 80 if ((dev_priv->gt_irq_mask & mask) != mask) {
81 dev_priv->gt_irq_mask_reg |= mask; 81 dev_priv->gt_irq_mask |= mask;
82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 82 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
83 (void) I915_READ(GTIMR); 83 POSTING_READ(GTIMR);
84 } 84 }
85} 85}
86 86
@@ -88,40 +88,40 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
88static void 88static void
89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 89ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
90{ 90{
91 if ((dev_priv->irq_mask_reg & mask) != 0) { 91 if ((dev_priv->irq_mask & mask) != 0) {
92 dev_priv->irq_mask_reg &= ~mask; 92 dev_priv->irq_mask &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 93 I915_WRITE(DEIMR, dev_priv->irq_mask);
94 (void) I915_READ(DEIMR); 94 POSTING_READ(DEIMR);
95 } 95 }
96} 96}
97 97
98static inline void 98static inline void
99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) 99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
100{ 100{
101 if ((dev_priv->irq_mask_reg & mask) != mask) { 101 if ((dev_priv->irq_mask & mask) != mask) {
102 dev_priv->irq_mask_reg |= mask; 102 dev_priv->irq_mask |= mask;
103 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 103 I915_WRITE(DEIMR, dev_priv->irq_mask);
104 (void) I915_READ(DEIMR); 104 POSTING_READ(DEIMR);
105 } 105 }
106} 106}
107 107
108void 108void
109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 109i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
110{ 110{
111 if ((dev_priv->irq_mask_reg & mask) != 0) { 111 if ((dev_priv->irq_mask & mask) != 0) {
112 dev_priv->irq_mask_reg &= ~mask; 112 dev_priv->irq_mask &= ~mask;
113 I915_WRITE(IMR, dev_priv->irq_mask_reg); 113 I915_WRITE(IMR, dev_priv->irq_mask);
114 (void) I915_READ(IMR); 114 POSTING_READ(IMR);
115 } 115 }
116} 116}
117 117
118void 118void
119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 119i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
120{ 120{
121 if ((dev_priv->irq_mask_reg & mask) != mask) { 121 if ((dev_priv->irq_mask & mask) != mask) {
122 dev_priv->irq_mask_reg |= mask; 122 dev_priv->irq_mask |= mask;
123 I915_WRITE(IMR, dev_priv->irq_mask_reg); 123 I915_WRITE(IMR, dev_priv->irq_mask);
124 (void) I915_READ(IMR); 124 POSTING_READ(IMR);
125 } 125 }
126} 126}
127 127
@@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
144 dev_priv->pipestat[pipe] |= mask; 144 dev_priv->pipestat[pipe] |= mask;
145 /* Enable the interrupt, clear any pending status */ 145 /* Enable the interrupt, clear any pending status */
146 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); 146 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
147 (void) I915_READ(reg); 147 POSTING_READ(reg);
148 } 148 }
149} 149}
150 150
@@ -156,16 +156,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
156 156
157 dev_priv->pipestat[pipe] &= ~mask; 157 dev_priv->pipestat[pipe] &= ~mask;
158 I915_WRITE(reg, dev_priv->pipestat[pipe]); 158 I915_WRITE(reg, dev_priv->pipestat[pipe]);
159 (void) I915_READ(reg); 159 POSTING_READ(reg);
160 } 160 }
161} 161}
162 162
163/** 163/**
164 * intel_enable_asle - enable ASLE interrupt for OpRegion 164 * intel_enable_asle - enable ASLE interrupt for OpRegion
165 */ 165 */
166void intel_enable_asle (struct drm_device *dev) 166void intel_enable_asle(struct drm_device *dev)
167{ 167{
168 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 168 drm_i915_private_t *dev_priv = dev->dev_private;
169 unsigned long irqflags;
170
171 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
169 172
170 if (HAS_PCH_SPLIT(dev)) 173 if (HAS_PCH_SPLIT(dev))
171 ironlake_enable_display_irq(dev_priv, DE_GSE); 174 ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev)
176 i915_enable_pipestat(dev_priv, 0, 179 i915_enable_pipestat(dev_priv, 0,
177 PIPE_LEGACY_BLC_EVENT_ENABLE); 180 PIPE_LEGACY_BLC_EVENT_ENABLE);
178 } 181 }
182
183 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
179} 184}
180 185
181/** 186/**
@@ -243,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
243 return I915_READ(reg); 248 return I915_READ(reg);
244} 249}
245 250
251int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
252 int *vpos, int *hpos)
253{
254 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
255 u32 vbl = 0, position = 0;
256 int vbl_start, vbl_end, htotal, vtotal;
257 bool in_vbl = true;
258 int ret = 0;
259
260 if (!i915_pipe_enabled(dev, pipe)) {
261 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
262 "pipe %d\n", pipe);
263 return 0;
264 }
265
266 /* Get vtotal. */
267 vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
268
269 if (INTEL_INFO(dev)->gen >= 4) {
270 /* No obvious pixelcount register. Only query vertical
271 * scanout position from Display scan line register.
272 */
273 position = I915_READ(PIPEDSL(pipe));
274
275 /* Decode into vertical scanout position. Don't have
276 * horizontal scanout position.
277 */
278 *vpos = position & 0x1fff;
279 *hpos = 0;
280 } else {
281 /* Have access to pixelcount since start of frame.
282 * We can split this into vertical and horizontal
283 * scanout position.
284 */
285 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
286
287 htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
288 *vpos = position / htotal;
289 *hpos = position - (*vpos * htotal);
290 }
291
292 /* Query vblank area. */
293 vbl = I915_READ(VBLANK(pipe));
294
295 /* Test position against vblank region. */
296 vbl_start = vbl & 0x1fff;
297 vbl_end = (vbl >> 16) & 0x1fff;
298
299 if ((*vpos < vbl_start) || (*vpos > vbl_end))
300 in_vbl = false;
301
302 /* Inside "upper part" of vblank area? Apply corrective offset: */
303 if (in_vbl && (*vpos >= vbl_start))
304 *vpos = *vpos - vtotal;
305
306 /* Readouts valid? */
307 if (vbl > 0)
308 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
309
310 /* In vblank? */
311 if (in_vbl)
312 ret |= DRM_SCANOUTPOS_INVBL;
313
314 return ret;
315}
316
317int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
318 int *max_error,
319 struct timeval *vblank_time,
320 unsigned flags)
321{
322 struct drm_crtc *drmcrtc;
323
324 if (crtc < 0 || crtc >= dev->num_crtcs) {
325 DRM_ERROR("Invalid crtc %d\n", crtc);
326 return -EINVAL;
327 }
328
329 /* Get drm_crtc to timestamp: */
330 drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
331
332 /* Helper routine in DRM core does all the work: */
333 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
334 vblank_time, flags, drmcrtc);
335}
336
246/* 337/*
247 * Handle hotplug events outside the interrupt handler proper. 338 * Handle hotplug events outside the interrupt handler proper.
248 */ 339 */
@@ -297,8 +388,8 @@ static void notify_ring(struct drm_device *dev,
297 struct intel_ring_buffer *ring) 388 struct intel_ring_buffer *ring)
298{ 389{
299 struct drm_i915_private *dev_priv = dev->dev_private; 390 struct drm_i915_private *dev_priv = dev->dev_private;
300 u32 seqno = ring->get_seqno(dev, ring); 391 u32 seqno = ring->get_seqno(ring);
301 ring->irq_gem_seqno = seqno; 392 ring->irq_seqno = seqno;
302 trace_i915_gem_request_complete(dev, seqno); 393 trace_i915_gem_request_complete(dev, seqno);
303 wake_up_all(&ring->irq_queue); 394 wake_up_all(&ring->irq_queue);
304 dev_priv->hangcheck_count = 0; 395 dev_priv->hangcheck_count = 0;
@@ -306,11 +397,49 @@ static void notify_ring(struct drm_device *dev,
306 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 397 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
307} 398}
308 399
400static void gen6_pm_irq_handler(struct drm_device *dev)
401{
402 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
403 u8 new_delay = dev_priv->cur_delay;
404 u32 pm_iir;
405
406 pm_iir = I915_READ(GEN6_PMIIR);
407 if (!pm_iir)
408 return;
409
410 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
411 if (dev_priv->cur_delay != dev_priv->max_delay)
412 new_delay = dev_priv->cur_delay + 1;
413 if (new_delay > dev_priv->max_delay)
414 new_delay = dev_priv->max_delay;
415 } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
416 if (dev_priv->cur_delay != dev_priv->min_delay)
417 new_delay = dev_priv->cur_delay - 1;
418 if (new_delay < dev_priv->min_delay) {
419 new_delay = dev_priv->min_delay;
420 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
421 I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
422 ((new_delay << 16) & 0x3f0000));
423 } else {
424 /* Make sure we continue to get down interrupts
425 * until we hit the minimum frequency */
426 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
427 I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
428 }
429
430 }
431
432 gen6_set_rps(dev, new_delay);
433 dev_priv->cur_delay = new_delay;
434
435 I915_WRITE(GEN6_PMIIR, pm_iir);
436}
437
309static irqreturn_t ironlake_irq_handler(struct drm_device *dev) 438static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
310{ 439{
311 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 440 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
312 int ret = IRQ_NONE; 441 int ret = IRQ_NONE;
313 u32 de_iir, gt_iir, de_ier, pch_iir; 442 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
314 u32 hotplug_mask; 443 u32 hotplug_mask;
315 struct drm_i915_master_private *master_priv; 444 struct drm_i915_master_private *master_priv;
316 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; 445 u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +450,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
321 /* disable master interrupt before clearing iir */ 450 /* disable master interrupt before clearing iir */
322 de_ier = I915_READ(DEIER); 451 de_ier = I915_READ(DEIER);
323 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); 452 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
324 (void)I915_READ(DEIER); 453 POSTING_READ(DEIER);
325 454
326 de_iir = I915_READ(DEIIR); 455 de_iir = I915_READ(DEIIR);
327 gt_iir = I915_READ(GTIIR); 456 gt_iir = I915_READ(GTIIR);
328 pch_iir = I915_READ(SDEIIR); 457 pch_iir = I915_READ(SDEIIR);
458 pm_iir = I915_READ(GEN6_PMIIR);
329 459
330 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) 460 if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
461 (!IS_GEN6(dev) || pm_iir == 0))
331 goto done; 462 goto done;
332 463
333 if (HAS_PCH_CPT(dev)) 464 if (HAS_PCH_CPT(dev))
@@ -344,12 +475,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
344 READ_BREADCRUMB(dev_priv); 475 READ_BREADCRUMB(dev_priv);
345 } 476 }
346 477
347 if (gt_iir & GT_PIPE_NOTIFY) 478 if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
348 notify_ring(dev, &dev_priv->render_ring); 479 notify_ring(dev, &dev_priv->ring[RCS]);
349 if (gt_iir & bsd_usr_interrupt) 480 if (gt_iir & bsd_usr_interrupt)
350 notify_ring(dev, &dev_priv->bsd_ring); 481 notify_ring(dev, &dev_priv->ring[VCS]);
351 if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT) 482 if (gt_iir & GT_BLT_USER_INTERRUPT)
352 notify_ring(dev, &dev_priv->blt_ring); 483 notify_ring(dev, &dev_priv->ring[BCS]);
353 484
354 if (de_iir & DE_GSE) 485 if (de_iir & DE_GSE)
355 intel_opregion_gse_intr(dev); 486 intel_opregion_gse_intr(dev);
@@ -379,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
379 i915_handle_rps_change(dev); 510 i915_handle_rps_change(dev);
380 } 511 }
381 512
513 if (IS_GEN6(dev))
514 gen6_pm_irq_handler(dev);
515
382 /* should clear PCH hotplug event before clear CPU irq */ 516 /* should clear PCH hotplug event before clear CPU irq */
383 I915_WRITE(SDEIIR, pch_iir); 517 I915_WRITE(SDEIIR, pch_iir);
384 I915_WRITE(GTIIR, gt_iir); 518 I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +520,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
386 520
387done: 521done:
388 I915_WRITE(DEIER, de_ier); 522 I915_WRITE(DEIER, de_ier);
389 (void)I915_READ(DEIER); 523 POSTING_READ(DEIER);
390 524
391 return ret; 525 return ret;
392} 526}
@@ -423,28 +557,23 @@ static void i915_error_work_func(struct work_struct *work)
423#ifdef CONFIG_DEBUG_FS 557#ifdef CONFIG_DEBUG_FS
424static struct drm_i915_error_object * 558static struct drm_i915_error_object *
425i915_error_object_create(struct drm_device *dev, 559i915_error_object_create(struct drm_device *dev,
426 struct drm_gem_object *src) 560 struct drm_i915_gem_object *src)
427{ 561{
428 drm_i915_private_t *dev_priv = dev->dev_private; 562 drm_i915_private_t *dev_priv = dev->dev_private;
429 struct drm_i915_error_object *dst; 563 struct drm_i915_error_object *dst;
430 struct drm_i915_gem_object *src_priv;
431 int page, page_count; 564 int page, page_count;
432 u32 reloc_offset; 565 u32 reloc_offset;
433 566
434 if (src == NULL) 567 if (src == NULL || src->pages == NULL)
435 return NULL;
436
437 src_priv = to_intel_bo(src);
438 if (src_priv->pages == NULL)
439 return NULL; 568 return NULL;
440 569
441 page_count = src->size / PAGE_SIZE; 570 page_count = src->base.size / PAGE_SIZE;
442 571
443 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC); 572 dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
444 if (dst == NULL) 573 if (dst == NULL)
445 return NULL; 574 return NULL;
446 575
447 reloc_offset = src_priv->gtt_offset; 576 reloc_offset = src->gtt_offset;
448 for (page = 0; page < page_count; page++) { 577 for (page = 0; page < page_count; page++) {
449 unsigned long flags; 578 unsigned long flags;
450 void __iomem *s; 579 void __iomem *s;
@@ -466,7 +595,7 @@ i915_error_object_create(struct drm_device *dev,
466 reloc_offset += PAGE_SIZE; 595 reloc_offset += PAGE_SIZE;
467 } 596 }
468 dst->page_count = page_count; 597 dst->page_count = page_count;
469 dst->gtt_offset = src_priv->gtt_offset; 598 dst->gtt_offset = src->gtt_offset;
470 599
471 return dst; 600 return dst;
472 601
@@ -520,36 +649,96 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
520} 649}
521 650
522static u32 651static u32
523i915_ringbuffer_last_batch(struct drm_device *dev) 652i915_ringbuffer_last_batch(struct drm_device *dev,
653 struct intel_ring_buffer *ring)
524{ 654{
525 struct drm_i915_private *dev_priv = dev->dev_private; 655 struct drm_i915_private *dev_priv = dev->dev_private;
526 u32 head, bbaddr; 656 u32 head, bbaddr;
527 u32 *ring; 657 u32 *val;
528 658
529 /* Locate the current position in the ringbuffer and walk back 659 /* Locate the current position in the ringbuffer and walk back
530 * to find the most recently dispatched batch buffer. 660 * to find the most recently dispatched batch buffer.
531 */ 661 */
532 bbaddr = 0; 662 head = I915_READ_HEAD(ring) & HEAD_ADDR;
533 head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
534 ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
535 663
536 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 664 val = (u32 *)(ring->virtual_start + head);
537 bbaddr = i915_get_bbaddr(dev, ring); 665 while (--val >= (u32 *)ring->virtual_start) {
666 bbaddr = i915_get_bbaddr(dev, val);
538 if (bbaddr) 667 if (bbaddr)
539 break; 668 return bbaddr;
540 } 669 }
541 670
542 if (bbaddr == 0) { 671 val = (u32 *)(ring->virtual_start + ring->size);
543 ring = (u32 *)(dev_priv->render_ring.virtual_start 672 while (--val >= (u32 *)ring->virtual_start) {
544 + dev_priv->render_ring.size); 673 bbaddr = i915_get_bbaddr(dev, val);
545 while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) { 674 if (bbaddr)
546 bbaddr = i915_get_bbaddr(dev, ring); 675 return bbaddr;
547 if (bbaddr)
548 break;
549 }
550 } 676 }
551 677
552 return bbaddr; 678 return 0;
679}
680
681static u32 capture_bo_list(struct drm_i915_error_buffer *err,
682 int count,
683 struct list_head *head)
684{
685 struct drm_i915_gem_object *obj;
686 int i = 0;
687
688 list_for_each_entry(obj, head, mm_list) {
689 err->size = obj->base.size;
690 err->name = obj->base.name;
691 err->seqno = obj->last_rendering_seqno;
692 err->gtt_offset = obj->gtt_offset;
693 err->read_domains = obj->base.read_domains;
694 err->write_domain = obj->base.write_domain;
695 err->fence_reg = obj->fence_reg;
696 err->pinned = 0;
697 if (obj->pin_count > 0)
698 err->pinned = 1;
699 if (obj->user_pin_count > 0)
700 err->pinned = -1;
701 err->tiling = obj->tiling_mode;
702 err->dirty = obj->dirty;
703 err->purgeable = obj->madv != I915_MADV_WILLNEED;
704 err->ring = obj->ring ? obj->ring->id : 0;
705
706 if (++i == count)
707 break;
708
709 err++;
710 }
711
712 return i;
713}
714
715static void i915_gem_record_fences(struct drm_device *dev,
716 struct drm_i915_error_state *error)
717{
718 struct drm_i915_private *dev_priv = dev->dev_private;
719 int i;
720
721 /* Fences */
722 switch (INTEL_INFO(dev)->gen) {
723 case 6:
724 for (i = 0; i < 16; i++)
725 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
726 break;
727 case 5:
728 case 4:
729 for (i = 0; i < 16; i++)
730 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
731 break;
732 case 3:
733 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
734 for (i = 0; i < 8; i++)
735 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
736 case 2:
737 for (i = 0; i < 8; i++)
738 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
739 break;
740
741 }
553} 742}
554 743
555/** 744/**
@@ -564,9 +753,9 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
564static void i915_capture_error_state(struct drm_device *dev) 753static void i915_capture_error_state(struct drm_device *dev)
565{ 754{
566 struct drm_i915_private *dev_priv = dev->dev_private; 755 struct drm_i915_private *dev_priv = dev->dev_private;
567 struct drm_i915_gem_object *obj_priv; 756 struct drm_i915_gem_object *obj;
568 struct drm_i915_error_state *error; 757 struct drm_i915_error_state *error;
569 struct drm_gem_object *batchbuffer[2]; 758 struct drm_i915_gem_object *batchbuffer[2];
570 unsigned long flags; 759 unsigned long flags;
571 u32 bbaddr; 760 u32 bbaddr;
572 int count; 761 int count;
@@ -585,20 +774,33 @@ static void i915_capture_error_state(struct drm_device *dev)
585 774
586 DRM_DEBUG_DRIVER("generating error event\n"); 775 DRM_DEBUG_DRIVER("generating error event\n");
587 776
588 error->seqno = 777 error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
589 dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
590 error->eir = I915_READ(EIR); 778 error->eir = I915_READ(EIR);
591 error->pgtbl_er = I915_READ(PGTBL_ER); 779 error->pgtbl_er = I915_READ(PGTBL_ER);
592 error->pipeastat = I915_READ(PIPEASTAT); 780 error->pipeastat = I915_READ(PIPEASTAT);
593 error->pipebstat = I915_READ(PIPEBSTAT); 781 error->pipebstat = I915_READ(PIPEBSTAT);
594 error->instpm = I915_READ(INSTPM); 782 error->instpm = I915_READ(INSTPM);
595 if (INTEL_INFO(dev)->gen < 4) { 783 error->error = 0;
596 error->ipeir = I915_READ(IPEIR); 784 if (INTEL_INFO(dev)->gen >= 6) {
597 error->ipehr = I915_READ(IPEHR); 785 error->error = I915_READ(ERROR_GEN6);
598 error->instdone = I915_READ(INSTDONE); 786
599 error->acthd = I915_READ(ACTHD); 787 error->bcs_acthd = I915_READ(BCS_ACTHD);
600 error->bbaddr = 0; 788 error->bcs_ipehr = I915_READ(BCS_IPEHR);
601 } else { 789 error->bcs_ipeir = I915_READ(BCS_IPEIR);
790 error->bcs_instdone = I915_READ(BCS_INSTDONE);
791 error->bcs_seqno = 0;
792 if (dev_priv->ring[BCS].get_seqno)
793 error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
794
795 error->vcs_acthd = I915_READ(VCS_ACTHD);
796 error->vcs_ipehr = I915_READ(VCS_IPEHR);
797 error->vcs_ipeir = I915_READ(VCS_IPEIR);
798 error->vcs_instdone = I915_READ(VCS_INSTDONE);
799 error->vcs_seqno = 0;
800 if (dev_priv->ring[VCS].get_seqno)
801 error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
802 }
803 if (INTEL_INFO(dev)->gen >= 4) {
602 error->ipeir = I915_READ(IPEIR_I965); 804 error->ipeir = I915_READ(IPEIR_I965);
603 error->ipehr = I915_READ(IPEHR_I965); 805 error->ipehr = I915_READ(IPEHR_I965);
604 error->instdone = I915_READ(INSTDONE_I965); 806 error->instdone = I915_READ(INSTDONE_I965);
@@ -606,42 +808,45 @@ static void i915_capture_error_state(struct drm_device *dev)
606 error->instdone1 = I915_READ(INSTDONE1); 808 error->instdone1 = I915_READ(INSTDONE1);
607 error->acthd = I915_READ(ACTHD_I965); 809 error->acthd = I915_READ(ACTHD_I965);
608 error->bbaddr = I915_READ64(BB_ADDR); 810 error->bbaddr = I915_READ64(BB_ADDR);
811 } else {
812 error->ipeir = I915_READ(IPEIR);
813 error->ipehr = I915_READ(IPEHR);
814 error->instdone = I915_READ(INSTDONE);
815 error->acthd = I915_READ(ACTHD);
816 error->bbaddr = 0;
609 } 817 }
818 i915_gem_record_fences(dev, error);
610 819
611 bbaddr = i915_ringbuffer_last_batch(dev); 820 bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
612 821
613 /* Grab the current batchbuffer, most likely to have crashed. */ 822 /* Grab the current batchbuffer, most likely to have crashed. */
614 batchbuffer[0] = NULL; 823 batchbuffer[0] = NULL;
615 batchbuffer[1] = NULL; 824 batchbuffer[1] = NULL;
616 count = 0; 825 count = 0;
617 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) { 826 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
618 struct drm_gem_object *obj = &obj_priv->base;
619
620 if (batchbuffer[0] == NULL && 827 if (batchbuffer[0] == NULL &&
621 bbaddr >= obj_priv->gtt_offset && 828 bbaddr >= obj->gtt_offset &&
622 bbaddr < obj_priv->gtt_offset + obj->size) 829 bbaddr < obj->gtt_offset + obj->base.size)
623 batchbuffer[0] = obj; 830 batchbuffer[0] = obj;
624 831
625 if (batchbuffer[1] == NULL && 832 if (batchbuffer[1] == NULL &&
626 error->acthd >= obj_priv->gtt_offset && 833 error->acthd >= obj->gtt_offset &&
627 error->acthd < obj_priv->gtt_offset + obj->size) 834 error->acthd < obj->gtt_offset + obj->base.size)
628 batchbuffer[1] = obj; 835 batchbuffer[1] = obj;
629 836
630 count++; 837 count++;
631 } 838 }
632 /* Scan the other lists for completeness for those bizarre errors. */ 839 /* Scan the other lists for completeness for those bizarre errors. */
633 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 840 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
634 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) { 841 list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
635 struct drm_gem_object *obj = &obj_priv->base;
636
637 if (batchbuffer[0] == NULL && 842 if (batchbuffer[0] == NULL &&
638 bbaddr >= obj_priv->gtt_offset && 843 bbaddr >= obj->gtt_offset &&
639 bbaddr < obj_priv->gtt_offset + obj->size) 844 bbaddr < obj->gtt_offset + obj->base.size)
640 batchbuffer[0] = obj; 845 batchbuffer[0] = obj;
641 846
642 if (batchbuffer[1] == NULL && 847 if (batchbuffer[1] == NULL &&
643 error->acthd >= obj_priv->gtt_offset && 848 error->acthd >= obj->gtt_offset &&
644 error->acthd < obj_priv->gtt_offset + obj->size) 849 error->acthd < obj->gtt_offset + obj->base.size)
645 batchbuffer[1] = obj; 850 batchbuffer[1] = obj;
646 851
647 if (batchbuffer[0] && batchbuffer[1]) 852 if (batchbuffer[0] && batchbuffer[1])
@@ -649,17 +854,15 @@ static void i915_capture_error_state(struct drm_device *dev)
649 } 854 }
650 } 855 }
651 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 856 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
652 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) { 857 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
653 struct drm_gem_object *obj = &obj_priv->base;
654
655 if (batchbuffer[0] == NULL && 858 if (batchbuffer[0] == NULL &&
656 bbaddr >= obj_priv->gtt_offset && 859 bbaddr >= obj->gtt_offset &&
657 bbaddr < obj_priv->gtt_offset + obj->size) 860 bbaddr < obj->gtt_offset + obj->base.size)
658 batchbuffer[0] = obj; 861 batchbuffer[0] = obj;
659 862
660 if (batchbuffer[1] == NULL && 863 if (batchbuffer[1] == NULL &&
661 error->acthd >= obj_priv->gtt_offset && 864 error->acthd >= obj->gtt_offset &&
662 error->acthd < obj_priv->gtt_offset + obj->size) 865 error->acthd < obj->gtt_offset + obj->base.size)
663 batchbuffer[1] = obj; 866 batchbuffer[1] = obj;
664 867
665 if (batchbuffer[0] && batchbuffer[1]) 868 if (batchbuffer[0] && batchbuffer[1])
@@ -678,46 +881,41 @@ static void i915_capture_error_state(struct drm_device *dev)
678 881
679 /* Record the ringbuffer */ 882 /* Record the ringbuffer */
680 error->ringbuffer = i915_error_object_create(dev, 883 error->ringbuffer = i915_error_object_create(dev,
681 dev_priv->render_ring.gem_object); 884 dev_priv->ring[RCS].obj);
682 885
683 /* Record buffers on the active list. */ 886 /* Record buffers on the active and pinned lists. */
684 error->active_bo = NULL; 887 error->active_bo = NULL;
685 error->active_bo_count = 0; 888 error->pinned_bo = NULL;
686 889
687 if (count) 890 error->active_bo_count = count;
891 list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
892 count++;
893 error->pinned_bo_count = count - error->active_bo_count;
894
895 if (count) {
688 error->active_bo = kmalloc(sizeof(*error->active_bo)*count, 896 error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
689 GFP_ATOMIC); 897 GFP_ATOMIC);
690 898 if (error->active_bo)
691 if (error->active_bo) { 899 error->pinned_bo =
692 int i = 0; 900 error->active_bo + error->active_bo_count;
693 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
694 struct drm_gem_object *obj = &obj_priv->base;
695
696 error->active_bo[i].size = obj->size;
697 error->active_bo[i].name = obj->name;
698 error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
699 error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
700 error->active_bo[i].read_domains = obj->read_domains;
701 error->active_bo[i].write_domain = obj->write_domain;
702 error->active_bo[i].fence_reg = obj_priv->fence_reg;
703 error->active_bo[i].pinned = 0;
704 if (obj_priv->pin_count > 0)
705 error->active_bo[i].pinned = 1;
706 if (obj_priv->user_pin_count > 0)
707 error->active_bo[i].pinned = -1;
708 error->active_bo[i].tiling = obj_priv->tiling_mode;
709 error->active_bo[i].dirty = obj_priv->dirty;
710 error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
711
712 if (++i == count)
713 break;
714 }
715 error->active_bo_count = i;
716 } 901 }
717 902
903 if (error->active_bo)
904 error->active_bo_count =
905 capture_bo_list(error->active_bo,
906 error->active_bo_count,
907 &dev_priv->mm.active_list);
908
909 if (error->pinned_bo)
910 error->pinned_bo_count =
911 capture_bo_list(error->pinned_bo,
912 error->pinned_bo_count,
913 &dev_priv->mm.pinned_list);
914
718 do_gettimeofday(&error->time); 915 do_gettimeofday(&error->time);
719 916
720 error->overlay = intel_overlay_capture_error_state(dev); 917 error->overlay = intel_overlay_capture_error_state(dev);
918 error->display = intel_display_capture_error_state(dev);
721 919
722 spin_lock_irqsave(&dev_priv->error_lock, flags); 920 spin_lock_irqsave(&dev_priv->error_lock, flags);
723 if (dev_priv->first_error == NULL) { 921 if (dev_priv->first_error == NULL) {
@@ -775,7 +973,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
775 printk(KERN_ERR " ACTHD: 0x%08x\n", 973 printk(KERN_ERR " ACTHD: 0x%08x\n",
776 I915_READ(ACTHD_I965)); 974 I915_READ(ACTHD_I965));
777 I915_WRITE(IPEIR_I965, ipeir); 975 I915_WRITE(IPEIR_I965, ipeir);
778 (void)I915_READ(IPEIR_I965); 976 POSTING_READ(IPEIR_I965);
779 } 977 }
780 if (eir & GM45_ERROR_PAGE_TABLE) { 978 if (eir & GM45_ERROR_PAGE_TABLE) {
781 u32 pgtbl_err = I915_READ(PGTBL_ER); 979 u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +981,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
783 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 981 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
784 pgtbl_err); 982 pgtbl_err);
785 I915_WRITE(PGTBL_ER, pgtbl_err); 983 I915_WRITE(PGTBL_ER, pgtbl_err);
786 (void)I915_READ(PGTBL_ER); 984 POSTING_READ(PGTBL_ER);
787 } 985 }
788 } 986 }
789 987
@@ -794,7 +992,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
794 printk(KERN_ERR " PGTBL_ER: 0x%08x\n", 992 printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
795 pgtbl_err); 993 pgtbl_err);
796 I915_WRITE(PGTBL_ER, pgtbl_err); 994 I915_WRITE(PGTBL_ER, pgtbl_err);
797 (void)I915_READ(PGTBL_ER); 995 POSTING_READ(PGTBL_ER);
798 } 996 }
799 } 997 }
800 998
@@ -825,7 +1023,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
825 printk(KERN_ERR " ACTHD: 0x%08x\n", 1023 printk(KERN_ERR " ACTHD: 0x%08x\n",
826 I915_READ(ACTHD)); 1024 I915_READ(ACTHD));
827 I915_WRITE(IPEIR, ipeir); 1025 I915_WRITE(IPEIR, ipeir);
828 (void)I915_READ(IPEIR); 1026 POSTING_READ(IPEIR);
829 } else { 1027 } else {
830 u32 ipeir = I915_READ(IPEIR_I965); 1028 u32 ipeir = I915_READ(IPEIR_I965);
831 1029
@@ -842,12 +1040,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
842 printk(KERN_ERR " ACTHD: 0x%08x\n", 1040 printk(KERN_ERR " ACTHD: 0x%08x\n",
843 I915_READ(ACTHD_I965)); 1041 I915_READ(ACTHD_I965));
844 I915_WRITE(IPEIR_I965, ipeir); 1042 I915_WRITE(IPEIR_I965, ipeir);
845 (void)I915_READ(IPEIR_I965); 1043 POSTING_READ(IPEIR_I965);
846 } 1044 }
847 } 1045 }
848 1046
849 I915_WRITE(EIR, eir); 1047 I915_WRITE(EIR, eir);
850 (void)I915_READ(EIR); 1048 POSTING_READ(EIR);
851 eir = I915_READ(EIR); 1049 eir = I915_READ(EIR);
852 if (eir) { 1050 if (eir) {
853 /* 1051 /*
@@ -870,7 +1068,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
870 * so userspace knows something bad happened (should trigger collection 1068 * so userspace knows something bad happened (should trigger collection
871 * of a ring dump etc.). 1069 * of a ring dump etc.).
872 */ 1070 */
873static void i915_handle_error(struct drm_device *dev, bool wedged) 1071void i915_handle_error(struct drm_device *dev, bool wedged)
874{ 1072{
875 struct drm_i915_private *dev_priv = dev->dev_private; 1073 struct drm_i915_private *dev_priv = dev->dev_private;
876 1074
@@ -884,11 +1082,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
884 /* 1082 /*
885 * Wakeup waiting processes so they don't hang 1083 * Wakeup waiting processes so they don't hang
886 */ 1084 */
887 wake_up_all(&dev_priv->render_ring.irq_queue); 1085 wake_up_all(&dev_priv->ring[RCS].irq_queue);
888 if (HAS_BSD(dev)) 1086 if (HAS_BSD(dev))
889 wake_up_all(&dev_priv->bsd_ring.irq_queue); 1087 wake_up_all(&dev_priv->ring[VCS].irq_queue);
890 if (HAS_BLT(dev)) 1088 if (HAS_BLT(dev))
891 wake_up_all(&dev_priv->blt_ring.irq_queue); 1089 wake_up_all(&dev_priv->ring[BCS].irq_queue);
892 } 1090 }
893 1091
894 queue_work(dev_priv->wq, &dev_priv->error_work); 1092 queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1097,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
899 drm_i915_private_t *dev_priv = dev->dev_private; 1097 drm_i915_private_t *dev_priv = dev->dev_private;
900 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; 1098 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
901 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1099 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
902 struct drm_i915_gem_object *obj_priv; 1100 struct drm_i915_gem_object *obj;
903 struct intel_unpin_work *work; 1101 struct intel_unpin_work *work;
904 unsigned long flags; 1102 unsigned long flags;
905 bool stall_detected; 1103 bool stall_detected;
@@ -918,13 +1116,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
918 } 1116 }
919 1117
920 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ 1118 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
921 obj_priv = to_intel_bo(work->pending_flip_obj); 1119 obj = work->pending_flip_obj;
922 if (INTEL_INFO(dev)->gen >= 4) { 1120 if (INTEL_INFO(dev)->gen >= 4) {
923 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF; 1121 int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
924 stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset; 1122 stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
925 } else { 1123 } else {
926 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR; 1124 int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
927 stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset + 1125 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
928 crtc->y * crtc->fb->pitch + 1126 crtc->y * crtc->fb->pitch +
929 crtc->x * crtc->fb->bits_per_pixel/8); 1127 crtc->x * crtc->fb->bits_per_pixel/8);
930 } 1128 }
@@ -970,7 +1168,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
970 * It doesn't set the bit in iir again, but it still produces 1168 * It doesn't set the bit in iir again, but it still produces
971 * interrupts (for non-MSI). 1169 * interrupts (for non-MSI).
972 */ 1170 */
973 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1171 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
974 pipea_stats = I915_READ(PIPEASTAT); 1172 pipea_stats = I915_READ(PIPEASTAT);
975 pipeb_stats = I915_READ(PIPEBSTAT); 1173 pipeb_stats = I915_READ(PIPEBSTAT);
976 1174
@@ -993,7 +1191,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
993 I915_WRITE(PIPEBSTAT, pipeb_stats); 1191 I915_WRITE(PIPEBSTAT, pipeb_stats);
994 irq_received = 1; 1192 irq_received = 1;
995 } 1193 }
996 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1194 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
997 1195
998 if (!irq_received) 1196 if (!irq_received)
999 break; 1197 break;
@@ -1026,9 +1224,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
1026 } 1224 }
1027 1225
1028 if (iir & I915_USER_INTERRUPT) 1226 if (iir & I915_USER_INTERRUPT)
1029 notify_ring(dev, &dev_priv->render_ring); 1227 notify_ring(dev, &dev_priv->ring[RCS]);
1030 if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT)) 1228 if (iir & I915_BSD_USER_INTERRUPT)
1031 notify_ring(dev, &dev_priv->bsd_ring); 1229 notify_ring(dev, &dev_priv->ring[VCS]);
1032 1230
1033 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { 1231 if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
1034 intel_prepare_page_flip(dev, 0); 1232 intel_prepare_page_flip(dev, 0);
@@ -1101,12 +1299,13 @@ static int i915_emit_irq(struct drm_device * dev)
1101 if (master_priv->sarea_priv) 1299 if (master_priv->sarea_priv)
1102 master_priv->sarea_priv->last_enqueue = dev_priv->counter; 1300 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
1103 1301
1104 BEGIN_LP_RING(4); 1302 if (BEGIN_LP_RING(4) == 0) {
1105 OUT_RING(MI_STORE_DWORD_INDEX); 1303 OUT_RING(MI_STORE_DWORD_INDEX);
1106 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1304 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1107 OUT_RING(dev_priv->counter); 1305 OUT_RING(dev_priv->counter);
1108 OUT_RING(MI_USER_INTERRUPT); 1306 OUT_RING(MI_USER_INTERRUPT);
1109 ADVANCE_LP_RING(); 1307 ADVANCE_LP_RING();
1308 }
1110 1309
1111 return dev_priv->counter; 1310 return dev_priv->counter;
1112} 1311}
@@ -1114,12 +1313,11 @@ static int i915_emit_irq(struct drm_device * dev)
1114void i915_trace_irq_get(struct drm_device *dev, u32 seqno) 1313void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
1115{ 1314{
1116 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1315 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1117 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1316 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1118 1317
1119 if (dev_priv->trace_irq_seqno == 0) 1318 if (dev_priv->trace_irq_seqno == 0 &&
1120 render_ring->user_irq_get(dev, render_ring); 1319 ring->irq_get(ring))
1121 1320 dev_priv->trace_irq_seqno = seqno;
1122 dev_priv->trace_irq_seqno = seqno;
1123} 1321}
1124 1322
1125static int i915_wait_irq(struct drm_device * dev, int irq_nr) 1323static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1325,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1127 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1128 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; 1326 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
1129 int ret = 0; 1327 int ret = 0;
1130 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 1328 struct intel_ring_buffer *ring = LP_RING(dev_priv);
1131 1329
1132 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr, 1330 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
1133 READ_BREADCRUMB(dev_priv)); 1331 READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1339,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
1141 if (master_priv->sarea_priv) 1339 if (master_priv->sarea_priv)
1142 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 1340 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
1143 1341
1144 render_ring->user_irq_get(dev, render_ring); 1342 ret = -ENODEV;
1145 DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ, 1343 if (ring->irq_get(ring)) {
1146 READ_BREADCRUMB(dev_priv) >= irq_nr); 1344 DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
1147 render_ring->user_irq_put(dev, render_ring); 1345 READ_BREADCRUMB(dev_priv) >= irq_nr);
1346 ring->irq_put(ring);
1347 }
1148 1348
1149 if (ret == -EBUSY) { 1349 if (ret == -EBUSY) {
1150 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 1350 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1363,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
1163 drm_i915_irq_emit_t *emit = data; 1363 drm_i915_irq_emit_t *emit = data;
1164 int result; 1364 int result;
1165 1365
1166 if (!dev_priv || !dev_priv->render_ring.virtual_start) { 1366 if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
1167 DRM_ERROR("called with no initialization\n"); 1367 DRM_ERROR("called with no initialization\n");
1168 return -EINVAL; 1368 return -EINVAL;
1169 } 1369 }
@@ -1209,9 +1409,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1209 if (!i915_pipe_enabled(dev, pipe)) 1409 if (!i915_pipe_enabled(dev, pipe))
1210 return -EINVAL; 1410 return -EINVAL;
1211 1411
1212 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1412 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1213 if (HAS_PCH_SPLIT(dev)) 1413 if (HAS_PCH_SPLIT(dev))
1214 ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 1414 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
1215 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1415 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1216 else if (INTEL_INFO(dev)->gen >= 4) 1416 else if (INTEL_INFO(dev)->gen >= 4)
1217 i915_enable_pipestat(dev_priv, pipe, 1417 i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1419,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
1219 else 1419 else
1220 i915_enable_pipestat(dev_priv, pipe, 1420 i915_enable_pipestat(dev_priv, pipe,
1221 PIPE_VBLANK_INTERRUPT_ENABLE); 1421 PIPE_VBLANK_INTERRUPT_ENABLE);
1222 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1422 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1223 return 0; 1423 return 0;
1224} 1424}
1225 1425
@@ -1231,15 +1431,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
1231 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 1431 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1232 unsigned long irqflags; 1432 unsigned long irqflags;
1233 1433
1234 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1434 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1235 if (HAS_PCH_SPLIT(dev)) 1435 if (HAS_PCH_SPLIT(dev))
1236 ironlake_disable_display_irq(dev_priv, (pipe == 0) ? 1436 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
1237 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK); 1437 DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
1238 else 1438 else
1239 i915_disable_pipestat(dev_priv, pipe, 1439 i915_disable_pipestat(dev_priv, pipe,
1240 PIPE_VBLANK_INTERRUPT_ENABLE | 1440 PIPE_VBLANK_INTERRUPT_ENABLE |
1241 PIPE_START_VBLANK_INTERRUPT_ENABLE); 1441 PIPE_START_VBLANK_INTERRUPT_ENABLE);
1242 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1442 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1243} 1443}
1244 1444
1245void i915_enable_interrupt (struct drm_device *dev) 1445void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1506,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
1306 return -EINVAL; 1506 return -EINVAL;
1307} 1507}
1308 1508
1309static struct drm_i915_gem_request * 1509static u32
1310i915_get_tail_request(struct drm_device *dev) 1510ring_last_seqno(struct intel_ring_buffer *ring)
1311{ 1511{
1312 drm_i915_private_t *dev_priv = dev->dev_private; 1512 return list_entry(ring->request_list.prev,
1313 return list_entry(dev_priv->render_ring.request_list.prev, 1513 struct drm_i915_gem_request, list)->seqno;
1314 struct drm_i915_gem_request, list); 1514}
1515
1516static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
1517{
1518 if (list_empty(&ring->request_list) ||
1519 i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
1520 /* Issue a wake-up to catch stuck h/w. */
1521 if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
1522 DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
1523 ring->name,
1524 ring->waiting_seqno,
1525 ring->get_seqno(ring));
1526 wake_up_all(&ring->irq_queue);
1527 *err = true;
1528 }
1529 return true;
1530 }
1531 return false;
1532}
1533
1534static bool kick_ring(struct intel_ring_buffer *ring)
1535{
1536 struct drm_device *dev = ring->dev;
1537 struct drm_i915_private *dev_priv = dev->dev_private;
1538 u32 tmp = I915_READ_CTL(ring);
1539 if (tmp & RING_WAIT) {
1540 DRM_ERROR("Kicking stuck wait on %s\n",
1541 ring->name);
1542 I915_WRITE_CTL(ring, tmp);
1543 return true;
1544 }
1545 if (IS_GEN6(dev) &&
1546 (tmp & RING_WAIT_SEMAPHORE)) {
1547 DRM_ERROR("Kicking stuck semaphore on %s\n",
1548 ring->name);
1549 I915_WRITE_CTL(ring, tmp);
1550 return true;
1551 }
1552 return false;
1315} 1553}
1316 1554
1317/** 1555/**
@@ -1325,6 +1563,17 @@ void i915_hangcheck_elapsed(unsigned long data)
1325 struct drm_device *dev = (struct drm_device *)data; 1563 struct drm_device *dev = (struct drm_device *)data;
1326 drm_i915_private_t *dev_priv = dev->dev_private; 1564 drm_i915_private_t *dev_priv = dev->dev_private;
1327 uint32_t acthd, instdone, instdone1; 1565 uint32_t acthd, instdone, instdone1;
1566 bool err = false;
1567
1568 /* If all work is done then ACTHD clearly hasn't advanced. */
1569 if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
1570 i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
1571 i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
1572 dev_priv->hangcheck_count = 0;
1573 if (err)
1574 goto repeat;
1575 return;
1576 }
1328 1577
1329 if (INTEL_INFO(dev)->gen < 4) { 1578 if (INTEL_INFO(dev)->gen < 4) {
1330 acthd = I915_READ(ACTHD); 1579 acthd = I915_READ(ACTHD);
@@ -1336,38 +1585,6 @@ void i915_hangcheck_elapsed(unsigned long data)
1336 instdone1 = I915_READ(INSTDONE1); 1585 instdone1 = I915_READ(INSTDONE1);
1337 } 1586 }
1338 1587
1339 /* If all work is done then ACTHD clearly hasn't advanced. */
1340 if (list_empty(&dev_priv->render_ring.request_list) ||
1341 i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
1342 i915_get_tail_request(dev)->seqno)) {
1343 bool missed_wakeup = false;
1344
1345 dev_priv->hangcheck_count = 0;
1346
1347 /* Issue a wake-up to catch stuck h/w. */
1348 if (dev_priv->render_ring.waiting_gem_seqno &&
1349 waitqueue_active(&dev_priv->render_ring.irq_queue)) {
1350 wake_up_all(&dev_priv->render_ring.irq_queue);
1351 missed_wakeup = true;
1352 }
1353
1354 if (dev_priv->bsd_ring.waiting_gem_seqno &&
1355 waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
1356 wake_up_all(&dev_priv->bsd_ring.irq_queue);
1357 missed_wakeup = true;
1358 }
1359
1360 if (dev_priv->blt_ring.waiting_gem_seqno &&
1361 waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
1362 wake_up_all(&dev_priv->blt_ring.irq_queue);
1363 missed_wakeup = true;
1364 }
1365
1366 if (missed_wakeup)
1367 DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
1368 return;
1369 }
1370
1371 if (dev_priv->last_acthd == acthd && 1588 if (dev_priv->last_acthd == acthd &&
1372 dev_priv->last_instdone == instdone && 1589 dev_priv->last_instdone == instdone &&
1373 dev_priv->last_instdone1 == instdone1) { 1590 dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1597,17 @@ void i915_hangcheck_elapsed(unsigned long data)
1380 * and break the hang. This should work on 1597 * and break the hang. This should work on
1381 * all but the second generation chipsets. 1598 * all but the second generation chipsets.
1382 */ 1599 */
1383 u32 tmp = I915_READ(PRB0_CTL); 1600
1384 if (tmp & RING_WAIT) { 1601 if (kick_ring(&dev_priv->ring[RCS]))
1385 I915_WRITE(PRB0_CTL, tmp); 1602 goto repeat;
1386 POSTING_READ(PRB0_CTL); 1603
1387 goto out; 1604 if (HAS_BSD(dev) &&
1388 } 1605 kick_ring(&dev_priv->ring[VCS]))
1606 goto repeat;
1607
1608 if (HAS_BLT(dev) &&
1609 kick_ring(&dev_priv->ring[BCS]))
1610 goto repeat;
1389 } 1611 }
1390 1612
1391 i915_handle_error(dev, true); 1613 i915_handle_error(dev, true);
@@ -1399,7 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
1399 dev_priv->last_instdone1 = instdone1; 1621 dev_priv->last_instdone1 = instdone1;
1400 } 1622 }
1401 1623
1402out: 1624repeat:
1403 /* Reset timer case chip hangs without another request being added */ 1625 /* Reset timer case chip hangs without another request being added */
1404 mod_timer(&dev_priv->hangcheck_timer, 1626 mod_timer(&dev_priv->hangcheck_timer,
1405 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)); 1627 jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1639,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
1417 1639
1418 I915_WRITE(DEIMR, 0xffffffff); 1640 I915_WRITE(DEIMR, 0xffffffff);
1419 I915_WRITE(DEIER, 0x0); 1641 I915_WRITE(DEIER, 0x0);
1420 (void) I915_READ(DEIER); 1642 POSTING_READ(DEIER);
1421 1643
1422 /* and GT */ 1644 /* and GT */
1423 I915_WRITE(GTIMR, 0xffffffff); 1645 I915_WRITE(GTIMR, 0xffffffff);
1424 I915_WRITE(GTIER, 0x0); 1646 I915_WRITE(GTIER, 0x0);
1425 (void) I915_READ(GTIER); 1647 POSTING_READ(GTIER);
1426 1648
1427 /* south display irq */ 1649 /* south display irq */
1428 I915_WRITE(SDEIMR, 0xffffffff); 1650 I915_WRITE(SDEIMR, 0xffffffff);
1429 I915_WRITE(SDEIER, 0x0); 1651 I915_WRITE(SDEIER, 0x0);
1430 (void) I915_READ(SDEIER); 1652 POSTING_READ(SDEIER);
1431} 1653}
1432 1654
1433static int ironlake_irq_postinstall(struct drm_device *dev) 1655static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1658,39 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1436 /* enable kind of interrupts always enabled */ 1658 /* enable kind of interrupts always enabled */
1437 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | 1659 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
1438 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; 1660 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
1439 u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; 1661 u32 render_irqs;
1440 u32 hotplug_mask; 1662 u32 hotplug_mask;
1441 1663
1442 dev_priv->irq_mask_reg = ~display_mask; 1664 dev_priv->irq_mask = ~display_mask;
1443 dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
1444 1665
1445 /* should always can generate irq */ 1666 /* should always can generate irq */
1446 I915_WRITE(DEIIR, I915_READ(DEIIR)); 1667 I915_WRITE(DEIIR, I915_READ(DEIIR));
1447 I915_WRITE(DEIMR, dev_priv->irq_mask_reg); 1668 I915_WRITE(DEIMR, dev_priv->irq_mask);
1448 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg); 1669 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
1449 (void) I915_READ(DEIER); 1670 POSTING_READ(DEIER);
1450 1671
1451 if (IS_GEN6(dev)) { 1672 dev_priv->gt_irq_mask = ~0;
1452 render_mask =
1453 GT_PIPE_NOTIFY |
1454 GT_GEN6_BSD_USER_INTERRUPT |
1455 GT_BLT_USER_INTERRUPT;
1456 }
1457
1458 dev_priv->gt_irq_mask_reg = ~render_mask;
1459 dev_priv->gt_irq_enable_reg = render_mask;
1460 1673
1461 I915_WRITE(GTIIR, I915_READ(GTIIR)); 1674 I915_WRITE(GTIIR, I915_READ(GTIIR));
1462 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg); 1675 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
1463 if (IS_GEN6(dev)) { 1676 if (IS_GEN6(dev)) {
1464 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT); 1677 I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
1465 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT); 1678 I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
1466 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT); 1679 I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
1467 } 1680 }
1468 1681
1469 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); 1682 if (IS_GEN6(dev))
1470 (void) I915_READ(GTIER); 1683 render_irqs =
1684 GT_USER_INTERRUPT |
1685 GT_GEN6_BSD_USER_INTERRUPT |
1686 GT_BLT_USER_INTERRUPT;
1687 else
1688 render_irqs =
1689 GT_USER_INTERRUPT |
1690 GT_PIPE_NOTIFY |
1691 GT_BSD_USER_INTERRUPT;
1692 I915_WRITE(GTIER, render_irqs);
1693 POSTING_READ(GTIER);
1471 1694
1472 if (HAS_PCH_CPT(dev)) { 1695 if (HAS_PCH_CPT(dev)) {
1473 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | 1696 hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
@@ -1477,13 +1700,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1477 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1700 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1478 } 1701 }
1479 1702
1480 dev_priv->pch_irq_mask_reg = ~hotplug_mask; 1703 dev_priv->pch_irq_mask = ~hotplug_mask;
1481 dev_priv->pch_irq_enable_reg = hotplug_mask;
1482 1704
1483 I915_WRITE(SDEIIR, I915_READ(SDEIIR)); 1705 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
1484 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg); 1706 I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
1485 I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg); 1707 I915_WRITE(SDEIER, hotplug_mask);
1486 (void) I915_READ(SDEIER); 1708 POSTING_READ(SDEIER);
1487 1709
1488 if (IS_IRONLAKE_M(dev)) { 1710 if (IS_IRONLAKE_M(dev)) {
1489 /* Clear & enable PCU event interrupts */ 1711 /* Clear & enable PCU event interrupts */
@@ -1519,7 +1741,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
1519 I915_WRITE(PIPEBSTAT, 0); 1741 I915_WRITE(PIPEBSTAT, 0);
1520 I915_WRITE(IMR, 0xffffffff); 1742 I915_WRITE(IMR, 0xffffffff);
1521 I915_WRITE(IER, 0x0); 1743 I915_WRITE(IER, 0x0);
1522 (void) I915_READ(IER); 1744 POSTING_READ(IER);
1523} 1745}
1524 1746
1525/* 1747/*
@@ -1532,11 +1754,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1532 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 1754 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
1533 u32 error_mask; 1755 u32 error_mask;
1534 1756
1535 DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue); 1757 DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
1536 if (HAS_BSD(dev)) 1758 if (HAS_BSD(dev))
1537 DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue); 1759 DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
1538 if (HAS_BLT(dev)) 1760 if (HAS_BLT(dev))
1539 DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue); 1761 DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
1540 1762
1541 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 1763 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
1542 1764
@@ -1544,7 +1766,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1544 return ironlake_irq_postinstall(dev); 1766 return ironlake_irq_postinstall(dev);
1545 1767
1546 /* Unmask the interrupts that we always want on. */ 1768 /* Unmask the interrupts that we always want on. */
1547 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 1769 dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
1548 1770
1549 dev_priv->pipestat[0] = 0; 1771 dev_priv->pipestat[0] = 0;
1550 dev_priv->pipestat[1] = 0; 1772 dev_priv->pipestat[1] = 0;
@@ -1553,7 +1775,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1553 /* Enable in IER... */ 1775 /* Enable in IER... */
1554 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1776 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
1555 /* and unmask in IMR */ 1777 /* and unmask in IMR */
1556 dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT; 1778 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
1557 } 1779 }
1558 1780
1559 /* 1781 /*
@@ -1571,9 +1793,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
1571 } 1793 }
1572 I915_WRITE(EMR, error_mask); 1794 I915_WRITE(EMR, error_mask);
1573 1795
1574 I915_WRITE(IMR, dev_priv->irq_mask_reg); 1796 I915_WRITE(IMR, dev_priv->irq_mask);
1575 I915_WRITE(IER, enable_mask); 1797 I915_WRITE(IER, enable_mask);
1576 (void) I915_READ(IER); 1798 POSTING_READ(IER);
1577 1799
1578 if (I915_HAS_HOTPLUG(dev)) { 1800 if (I915_HAS_HOTPLUG(dev)) {
1579 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1801 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 25ed911a3112..d60860ec8cf4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
78#define GRDOM_RENDER (1<<2) 78#define GRDOM_RENDER (1<<2)
79#define GRDOM_MEDIA (3<<2) 79#define GRDOM_MEDIA (3<<2)
80 80
81#define GEN6_GDRST 0x941c
82#define GEN6_GRDOM_FULL (1 << 0)
83#define GEN6_GRDOM_RENDER (1 << 1)
84#define GEN6_GRDOM_MEDIA (1 << 2)
85#define GEN6_GRDOM_BLT (1 << 3)
86
81/* VGA stuff */ 87/* VGA stuff */
82 88
83#define VGA_ST01_MDA 0x3ba 89#define VGA_ST01_MDA 0x3ba
@@ -158,12 +164,23 @@
158#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 164#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
159#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 165#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
160#define MI_STORE_DWORD_INDEX_SHIFT 2 166#define MI_STORE_DWORD_INDEX_SHIFT 2
161#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) 167/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
168 * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
169 * simply ignores the register load under certain conditions.
170 * - One can actually load arbitrary many arbitrary registers: Simply issue x
171 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
172 */
173#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
162#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ 174#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
163#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 175#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
164#define MI_BATCH_NON_SECURE (1) 176#define MI_BATCH_NON_SECURE (1)
165#define MI_BATCH_NON_SECURE_I965 (1<<8) 177#define MI_BATCH_NON_SECURE_I965 (1<<8)
166#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 178#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
179#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
180#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
181#define MI_SEMAPHORE_UPDATE (1<<21)
182#define MI_SEMAPHORE_COMPARE (1<<20)
183#define MI_SEMAPHORE_REGISTER (1<<18)
167/* 184/*
168 * 3D instructions used by the kernel 185 * 3D instructions used by the kernel
169 */ 186 */
@@ -256,10 +273,6 @@
256 * Instruction and interrupt control regs 273 * Instruction and interrupt control regs
257 */ 274 */
258#define PGTBL_ER 0x02024 275#define PGTBL_ER 0x02024
259#define PRB0_TAIL 0x02030
260#define PRB0_HEAD 0x02034
261#define PRB0_START 0x02038
262#define PRB0_CTL 0x0203c
263#define RENDER_RING_BASE 0x02000 276#define RENDER_RING_BASE 0x02000
264#define BSD_RING_BASE 0x04000 277#define BSD_RING_BASE 0x04000
265#define GEN6_BSD_RING_BASE 0x12000 278#define GEN6_BSD_RING_BASE 0x12000
@@ -268,9 +281,13 @@
268#define RING_HEAD(base) ((base)+0x34) 281#define RING_HEAD(base) ((base)+0x34)
269#define RING_START(base) ((base)+0x38) 282#define RING_START(base) ((base)+0x38)
270#define RING_CTL(base) ((base)+0x3c) 283#define RING_CTL(base) ((base)+0x3c)
284#define RING_SYNC_0(base) ((base)+0x40)
285#define RING_SYNC_1(base) ((base)+0x44)
286#define RING_MAX_IDLE(base) ((base)+0x54)
271#define RING_HWS_PGA(base) ((base)+0x80) 287#define RING_HWS_PGA(base) ((base)+0x80)
272#define RING_HWS_PGA_GEN6(base) ((base)+0x2080) 288#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
273#define RING_ACTHD(base) ((base)+0x74) 289#define RING_ACTHD(base) ((base)+0x74)
290#define RING_NOPID(base) ((base)+0x94)
274#define TAIL_ADDR 0x001FFFF8 291#define TAIL_ADDR 0x001FFFF8
275#define HEAD_WRAP_COUNT 0xFFE00000 292#define HEAD_WRAP_COUNT 0xFFE00000
276#define HEAD_WRAP_ONE 0x00200000 293#define HEAD_WRAP_ONE 0x00200000
@@ -285,10 +302,17 @@
285#define RING_INVALID 0x00000000 302#define RING_INVALID 0x00000000
286#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */ 303#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
287#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */ 304#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
305#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
306#if 0
307#define PRB0_TAIL 0x02030
308#define PRB0_HEAD 0x02034
309#define PRB0_START 0x02038
310#define PRB0_CTL 0x0203c
288#define PRB1_TAIL 0x02040 /* 915+ only */ 311#define PRB1_TAIL 0x02040 /* 915+ only */
289#define PRB1_HEAD 0x02044 /* 915+ only */ 312#define PRB1_HEAD 0x02044 /* 915+ only */
290#define PRB1_START 0x02048 /* 915+ only */ 313#define PRB1_START 0x02048 /* 915+ only */
291#define PRB1_CTL 0x0204c /* 915+ only */ 314#define PRB1_CTL 0x0204c /* 915+ only */
315#endif
292#define IPEIR_I965 0x02064 316#define IPEIR_I965 0x02064
293#define IPEHR_I965 0x02068 317#define IPEHR_I965 0x02068
294#define INSTDONE_I965 0x0206c 318#define INSTDONE_I965 0x0206c
@@ -305,11 +329,42 @@
305#define INSTDONE 0x02090 329#define INSTDONE 0x02090
306#define NOPID 0x02094 330#define NOPID 0x02094
307#define HWSTAM 0x02098 331#define HWSTAM 0x02098
332#define VCS_INSTDONE 0x1206C
333#define VCS_IPEIR 0x12064
334#define VCS_IPEHR 0x12068
335#define VCS_ACTHD 0x12074
336#define BCS_INSTDONE 0x2206C
337#define BCS_IPEIR 0x22064
338#define BCS_IPEHR 0x22068
339#define BCS_ACTHD 0x22074
340
341#define ERROR_GEN6 0x040a0
342
343/* GM45+ chicken bits -- debug workaround bits that may be required
344 * for various sorts of correct behavior. The top 16 bits of each are
345 * the enables for writing to the corresponding low bit.
346 */
347#define _3D_CHICKEN 0x02084
348#define _3D_CHICKEN2 0x0208c
349/* Disables pipelining of read flushes past the SF-WIZ interface.
350 * Required on all Ironlake steppings according to the B-Spec, but the
351 * particular danger of not doing so is not specified.
352 */
353# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
354#define _3D_CHICKEN3 0x02090
308 355
309#define MI_MODE 0x0209c 356#define MI_MODE 0x0209c
310# define VS_TIMER_DISPATCH (1 << 6) 357# define VS_TIMER_DISPATCH (1 << 6)
311# define MI_FLUSH_ENABLE (1 << 11) 358# define MI_FLUSH_ENABLE (1 << 11)
312 359
360#define GFX_MODE 0x02520
361#define GFX_RUN_LIST_ENABLE (1<<15)
362#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
363#define GFX_SURFACE_FAULT_ENABLE (1<<12)
364#define GFX_REPLAY_MODE (1<<11)
365#define GFX_PSMI_GRANULARITY (1<<10)
366#define GFX_PPGTT_ENABLE (1<<9)
367
313#define SCPD0 0x0209c /* 915+ only */ 368#define SCPD0 0x0209c /* 915+ only */
314#define IER 0x020a0 369#define IER 0x020a0
315#define IIR 0x020a4 370#define IIR 0x020a4
@@ -461,7 +516,7 @@
461#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3) 516#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
462 517
463#define GEN6_BSD_IMR 0x120a8 518#define GEN6_BSD_IMR 0x120a8
464#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12) 519#define GEN6_BSD_USER_INTERRUPT (1 << 12)
465 520
466#define GEN6_BSD_RNCID 0x12198 521#define GEN6_BSD_RNCID 0x12198
467 522
@@ -541,6 +596,18 @@
541 596
542#define ILK_DISPLAY_CHICKEN1 0x42000 597#define ILK_DISPLAY_CHICKEN1 0x42000
543#define ILK_FBCQ_DIS (1<<22) 598#define ILK_FBCQ_DIS (1<<22)
599#define ILK_PABSTRETCH_DIS (1<<21)
600
601
602/*
603 * Framebuffer compression for Sandybridge
604 *
605 * The following two registers are of type GTTMMADR
606 */
607#define SNB_DPFC_CTL_SA 0x100100
608#define SNB_CPU_FENCE_ENABLE (1<<29)
609#define DPFC_CPU_FENCE_OFFSET 0x100104
610
544 611
545/* 612/*
546 * GPIO regs 613 * GPIO regs
@@ -900,6 +967,8 @@
900 */ 967 */
901#define MCHBAR_MIRROR_BASE 0x10000 968#define MCHBAR_MIRROR_BASE 0x10000
902 969
970#define MCHBAR_MIRROR_BASE_SNB 0x140000
971
903/** 915-945 and GM965 MCH register controlling DRAM channel access */ 972/** 915-945 and GM965 MCH register controlling DRAM channel access */
904#define DCC 0x10200 973#define DCC 0x10200
905#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 974#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1119,6 +1188,10 @@
1119#define DDRMPLL1 0X12c20 1188#define DDRMPLL1 0X12c20
1120#define PEG_BAND_GAP_DATA 0x14d68 1189#define PEG_BAND_GAP_DATA 0x14d68
1121 1190
1191#define GEN6_GT_PERF_STATUS 0x145948
1192#define GEN6_RP_STATE_LIMITS 0x145994
1193#define GEN6_RP_STATE_CAP 0x145998
1194
1122/* 1195/*
1123 * Logical Context regs 1196 * Logical Context regs
1124 */ 1197 */
@@ -1168,7 +1241,6 @@
1168#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B) 1241#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
1169#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B) 1242#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
1170#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B) 1243#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
1171#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
1172#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B) 1244#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
1173 1245
1174/* VGA port control */ 1246/* VGA port control */
@@ -2182,8 +2254,10 @@
2182#define PIPE_6BPC (2 << 5) 2254#define PIPE_6BPC (2 << 5)
2183#define PIPE_12BPC (3 << 5) 2255#define PIPE_12BPC (3 << 5)
2184 2256
2257#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
2185#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF) 2258#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
2186#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL) 2259#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
2260#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
2187 2261
2188#define DSPARB 0x70030 2262#define DSPARB 0x70030
2189#define DSPARB_CSTART_MASK (0x7f << 7) 2263#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2291,6 +2365,40 @@
2291 2365
2292#define ILK_FIFO_LINE_SIZE 64 2366#define ILK_FIFO_LINE_SIZE 64
2293 2367
2368/* define the WM info on Sandybridge */
2369#define SNB_DISPLAY_FIFO 128
2370#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
2371#define SNB_DISPLAY_DFTWM 8
2372#define SNB_CURSOR_FIFO 32
2373#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
2374#define SNB_CURSOR_DFTWM 8
2375
2376#define SNB_DISPLAY_SR_FIFO 512
2377#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
2378#define SNB_DISPLAY_DFT_SRWM 0x3f
2379#define SNB_CURSOR_SR_FIFO 64
2380#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
2381#define SNB_CURSOR_DFT_SRWM 8
2382
2383#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
2384
2385#define SNB_FIFO_LINE_SIZE 64
2386
2387
2388/* the address where we get all kinds of latency value */
2389#define SSKPD 0x5d10
2390#define SSKPD_WM_MASK 0x3f
2391#define SSKPD_WM0_SHIFT 0
2392#define SSKPD_WM1_SHIFT 8
2393#define SSKPD_WM2_SHIFT 16
2394#define SSKPD_WM3_SHIFT 24
2395
2396#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
2397#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
2398#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
2399#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
2400#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
2401
2294/* 2402/*
2295 * The two pipe frame counter registers are not synchronized, so 2403 * The two pipe frame counter registers are not synchronized, so
2296 * reading a stable value is somewhat tricky. The following code 2404 * reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2459,10 @@
2351#define CURBBASE 0x700c4 2459#define CURBBASE 0x700c4
2352#define CURBPOS 0x700c8 2460#define CURBPOS 0x700c8
2353 2461
2462#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
2463#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
2464#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
2465
2354/* Display A control */ 2466/* Display A control */
2355#define DSPACNTR 0x70180 2467#define DSPACNTR 0x70180
2356#define DISPLAY_PLANE_ENABLE (1<<31) 2468#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -2586,10 +2698,14 @@
2586#define GTIER 0x4401c 2698#define GTIER 0x4401c
2587 2699
2588#define ILK_DISPLAY_CHICKEN2 0x42004 2700#define ILK_DISPLAY_CHICKEN2 0x42004
2701/* Required on all Ironlake and Sandybridge according to the B-Spec. */
2702#define ILK_ELPIN_409_SELECT (1 << 25)
2589#define ILK_DPARB_GATE (1<<22) 2703#define ILK_DPARB_GATE (1<<22)
2590#define ILK_VSDPFD_FULL (1<<21) 2704#define ILK_VSDPFD_FULL (1<<21)
2591#define ILK_DSPCLK_GATE 0x42020 2705#define ILK_DSPCLK_GATE 0x42020
2592#define ILK_DPARB_CLK_GATE (1<<5) 2706#define ILK_DPARB_CLK_GATE (1<<5)
2707#define ILK_DPFD_CLK_GATE (1<<7)
2708
2593/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */ 2709/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
2594#define ILK_CLK_FBC (1<<7) 2710#define ILK_CLK_FBC (1<<7)
2595#define ILK_DPFC_DIS1 (1<<8) 2711#define ILK_DPFC_DIS1 (1<<8)
@@ -2669,6 +2785,7 @@
2669#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B) 2785#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
2670 2786
2671#define PCH_FPA0 0xc6040 2787#define PCH_FPA0 0xc6040
2788#define FP_CB_TUNE (0x3<<22)
2672#define PCH_FPA1 0xc6044 2789#define PCH_FPA1 0xc6044
2673#define PCH_FPB0 0xc6048 2790#define PCH_FPB0 0xc6048
2674#define PCH_FPB1 0xc604c 2791#define PCH_FPB1 0xc604c
@@ -3033,6 +3150,7 @@
3033#define TRANS_DP_10BPC (1<<9) 3150#define TRANS_DP_10BPC (1<<9)
3034#define TRANS_DP_6BPC (2<<9) 3151#define TRANS_DP_6BPC (2<<9)
3035#define TRANS_DP_12BPC (3<<9) 3152#define TRANS_DP_12BPC (3<<9)
3153#define TRANS_DP_BPC_MASK (3<<9)
3036#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) 3154#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
3037#define TRANS_DP_VSYNC_ACTIVE_LOW 0 3155#define TRANS_DP_VSYNC_ACTIVE_LOW 0
3038#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) 3156#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
@@ -3052,4 +3170,66 @@
3052#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22) 3170#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
3053#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22) 3171#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
3054 3172
3173#define FORCEWAKE 0xA18C
3174#define FORCEWAKE_ACK 0x130090
3175
3176#define GEN6_RPNSWREQ 0xA008
3177#define GEN6_TURBO_DISABLE (1<<31)
3178#define GEN6_FREQUENCY(x) ((x)<<25)
3179#define GEN6_OFFSET(x) ((x)<<19)
3180#define GEN6_AGGRESSIVE_TURBO (0<<15)
3181#define GEN6_RC_VIDEO_FREQ 0xA00C
3182#define GEN6_RC_CONTROL 0xA090
3183#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
3184#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
3185#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
3186#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
3187#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
3188#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
3189#define GEN6_RC_CTL_HW_ENABLE (1<<31)
3190#define GEN6_RP_DOWN_TIMEOUT 0xA010
3191#define GEN6_RP_INTERRUPT_LIMITS 0xA014
3192#define GEN6_RPSTAT1 0xA01C
3193#define GEN6_RP_CONTROL 0xA024
3194#define GEN6_RP_MEDIA_TURBO (1<<11)
3195#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
3196#define GEN6_RP_MEDIA_IS_GFX (1<<8)
3197#define GEN6_RP_ENABLE (1<<7)
3198#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
3199#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
3200#define GEN6_RP_UP_THRESHOLD 0xA02C
3201#define GEN6_RP_DOWN_THRESHOLD 0xA030
3202#define GEN6_RP_UP_EI 0xA068
3203#define GEN6_RP_DOWN_EI 0xA06C
3204#define GEN6_RP_IDLE_HYSTERSIS 0xA070
3205#define GEN6_RC_STATE 0xA094
3206#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
3207#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
3208#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
3209#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
3210#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
3211#define GEN6_RC_SLEEP 0xA0B0
3212#define GEN6_RC1e_THRESHOLD 0xA0B4
3213#define GEN6_RC6_THRESHOLD 0xA0B8
3214#define GEN6_RC6p_THRESHOLD 0xA0BC
3215#define GEN6_RC6pp_THRESHOLD 0xA0C0
3216#define GEN6_PMINTRMSK 0xA168
3217
3218#define GEN6_PMISR 0x44020
3219#define GEN6_PMIMR 0x44024
3220#define GEN6_PMIIR 0x44028
3221#define GEN6_PMIER 0x4402C
3222#define GEN6_PM_MBOX_EVENT (1<<25)
3223#define GEN6_PM_THERMAL_EVENT (1<<24)
3224#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
3225#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
3226#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
3227#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
3228#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
3229
3230#define GEN6_PCODE_MAILBOX 0x138124
3231#define GEN6_PCODE_READY (1<<31)
3232#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
3233#define GEN6_PCODE_DATA 0x138128
3234
3055#endif /* _I915_REG_H_ */ 3235#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 454c064f8ef7..410772466fa7 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,10 +235,21 @@ static void i915_restore_vga(struct drm_device *dev)
235static void i915_save_modeset_reg(struct drm_device *dev) 235static void i915_save_modeset_reg(struct drm_device *dev)
236{ 236{
237 struct drm_i915_private *dev_priv = dev->dev_private; 237 struct drm_i915_private *dev_priv = dev->dev_private;
238 int i;
238 239
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 240 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 241 return;
241 242
243 /* Cursor state */
244 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
245 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
246 dev_priv->saveCURABASE = I915_READ(CURABASE);
247 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
248 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
249 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
250 if (IS_GEN2(dev))
251 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
252
242 if (HAS_PCH_SPLIT(dev)) { 253 if (HAS_PCH_SPLIT(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 254 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 255 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -357,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
357 } 368 }
358 i915_save_palette(dev, PIPE_B); 369 i915_save_palette(dev, PIPE_B);
359 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 370 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
371
372 /* Fences */
373 switch (INTEL_INFO(dev)->gen) {
374 case 6:
375 for (i = 0; i < 16; i++)
376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
377 break;
378 case 5:
379 case 4:
380 for (i = 0; i < 16; i++)
381 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
382 break;
383 case 3:
384 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
385 for (i = 0; i < 8; i++)
386 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
387 case 2:
388 for (i = 0; i < 8; i++)
389 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
390 break;
391 }
392
360 return; 393 return;
361} 394}
362 395
@@ -365,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
365 struct drm_i915_private *dev_priv = dev->dev_private; 398 struct drm_i915_private *dev_priv = dev->dev_private;
366 int dpll_a_reg, fpa0_reg, fpa1_reg; 399 int dpll_a_reg, fpa0_reg, fpa1_reg;
367 int dpll_b_reg, fpb0_reg, fpb1_reg; 400 int dpll_b_reg, fpb0_reg, fpb1_reg;
401 int i;
368 402
369 if (drm_core_check_feature(dev, DRIVER_MODESET)) 403 if (drm_core_check_feature(dev, DRIVER_MODESET))
370 return; 404 return;
371 405
406 /* Fences */
407 switch (INTEL_INFO(dev)->gen) {
408 case 6:
409 for (i = 0; i < 16; i++)
410 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
411 break;
412 case 5:
413 case 4:
414 for (i = 0; i < 16; i++)
415 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
416 break;
417 case 3:
418 case 2:
419 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
420 for (i = 0; i < 8; i++)
421 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
422 for (i = 0; i < 8; i++)
423 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
424 break;
425 }
426
427
372 if (HAS_PCH_SPLIT(dev)) { 428 if (HAS_PCH_SPLIT(dev)) {
373 dpll_a_reg = PCH_DPLL_A; 429 dpll_a_reg = PCH_DPLL_A;
374 dpll_b_reg = PCH_DPLL_B; 430 dpll_b_reg = PCH_DPLL_B;
@@ -529,6 +585,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
529 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 585 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
530 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 586 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
531 587
588 /* Cursor state */
589 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
590 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
591 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
592 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
593 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
594 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
595 if (IS_GEN2(dev))
596 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
597
532 return; 598 return;
533} 599}
534 600
@@ -543,16 +609,6 @@ void i915_save_display(struct drm_device *dev)
543 /* Don't save them in KMS mode */ 609 /* Don't save them in KMS mode */
544 i915_save_modeset_reg(dev); 610 i915_save_modeset_reg(dev);
545 611
546 /* Cursor state */
547 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
548 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
549 dev_priv->saveCURABASE = I915_READ(CURABASE);
550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
551 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
552 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
553 if (IS_GEN2(dev))
554 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
555
556 /* CRT state */ 612 /* CRT state */
557 if (HAS_PCH_SPLIT(dev)) { 613 if (HAS_PCH_SPLIT(dev)) {
558 dev_priv->saveADPA = I915_READ(PCH_ADPA); 614 dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +713,6 @@ void i915_restore_display(struct drm_device *dev)
657 /* Don't restore them in KMS mode */ 713 /* Don't restore them in KMS mode */
658 i915_restore_modeset_reg(dev); 714 i915_restore_modeset_reg(dev);
659 715
660 /* Cursor state */
661 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
662 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
663 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
667 if (IS_GEN2(dev))
668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
669
670 /* CRT state */ 716 /* CRT state */
671 if (HAS_PCH_SPLIT(dev)) 717 if (HAS_PCH_SPLIT(dev))
672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 718 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
@@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev)
771 dev_priv->saveIMR = I915_READ(IMR); 817 dev_priv->saveIMR = I915_READ(IMR);
772 } 818 }
773 819
774 if (HAS_PCH_SPLIT(dev)) 820 if (IS_IRONLAKE_M(dev))
775 ironlake_disable_drps(dev); 821 ironlake_disable_drps(dev);
822 if (IS_GEN6(dev))
823 gen6_disable_rps(dev);
824
825 /* XXX disabling the clock gating breaks suspend on gm45
826 intel_disable_clock_gating(dev);
827 */
776 828
777 /* Cache mode state */ 829 /* Cache mode state */
778 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 830 dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev)
788 for (i = 0; i < 3; i++) 840 for (i = 0; i < 3; i++)
789 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 841 dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
790 842
791 /* Fences */
792 switch (INTEL_INFO(dev)->gen) {
793 case 6:
794 for (i = 0; i < 16; i++)
795 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
796 break;
797 case 5:
798 case 4:
799 for (i = 0; i < 16; i++)
800 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
801 break;
802 case 3:
803 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
804 for (i = 0; i < 8; i++)
805 dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
806 case 2:
807 for (i = 0; i < 8; i++)
808 dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
809 break;
810
811 }
812
813 return 0; 843 return 0;
814} 844}
815 845
@@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev)
823 /* Hardware status page */ 853 /* Hardware status page */
824 I915_WRITE(HWS_PGA, dev_priv->saveHWS); 854 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
825 855
826 /* Fences */
827 switch (INTEL_INFO(dev)->gen) {
828 case 6:
829 for (i = 0; i < 16; i++)
830 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
831 break;
832 case 5:
833 case 4:
834 for (i = 0; i < 16; i++)
835 I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
836 break;
837 case 3:
838 case 2:
839 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
840 for (i = 0; i < 8; i++)
841 I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
842 for (i = 0; i < 8; i++)
843 I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
844 break;
845 }
846
847 i915_restore_display(dev); 856 i915_restore_display(dev);
848 857
849 /* Interrupt state */ 858 /* Interrupt state */
@@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev)
860 } 869 }
861 870
862 /* Clock gating state */ 871 /* Clock gating state */
863 intel_init_clock_gating(dev); 872 intel_enable_clock_gating(dev);
864 873
865 if (HAS_PCH_SPLIT(dev)) { 874 if (IS_IRONLAKE_M(dev)) {
866 ironlake_enable_drps(dev); 875 ironlake_enable_drps(dev);
867 intel_init_emon(dev); 876 intel_init_emon(dev);
868 } 877 }
869 878
879 if (IS_GEN6(dev))
880 gen6_enable_rps(dev_priv);
881
870 /* Cache mode state */ 882 /* Cache mode state */
871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 883 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
872 884
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a21cc14..7f0fc3ed61aa 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
6#include <linux/tracepoint.h> 6#include <linux/tracepoint.h>
7 7
8#include <drm/drmP.h> 8#include <drm/drmP.h>
9#include "i915_drv.h"
9 10
10#undef TRACE_SYSTEM 11#undef TRACE_SYSTEM
11#define TRACE_SYSTEM i915 12#define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
16 17
17TRACE_EVENT(i915_gem_object_create, 18TRACE_EVENT(i915_gem_object_create,
18 19
19 TP_PROTO(struct drm_gem_object *obj), 20 TP_PROTO(struct drm_i915_gem_object *obj),
20 21
21 TP_ARGS(obj), 22 TP_ARGS(obj),
22 23
23 TP_STRUCT__entry( 24 TP_STRUCT__entry(
24 __field(struct drm_gem_object *, obj) 25 __field(struct drm_i915_gem_object *, obj)
25 __field(u32, size) 26 __field(u32, size)
26 ), 27 ),
27 28
28 TP_fast_assign( 29 TP_fast_assign(
29 __entry->obj = obj; 30 __entry->obj = obj;
30 __entry->size = obj->size; 31 __entry->size = obj->base.size;
31 ), 32 ),
32 33
33 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 34 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create,
35 36
36TRACE_EVENT(i915_gem_object_bind, 37TRACE_EVENT(i915_gem_object_bind,
37 38
38 TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset), 39 TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
39 40
40 TP_ARGS(obj, gtt_offset), 41 TP_ARGS(obj, gtt_offset, mappable),
41 42
42 TP_STRUCT__entry( 43 TP_STRUCT__entry(
43 __field(struct drm_gem_object *, obj) 44 __field(struct drm_i915_gem_object *, obj)
44 __field(u32, gtt_offset) 45 __field(u32, gtt_offset)
46 __field(bool, mappable)
45 ), 47 ),
46 48
47 TP_fast_assign( 49 TP_fast_assign(
48 __entry->obj = obj; 50 __entry->obj = obj;
49 __entry->gtt_offset = gtt_offset; 51 __entry->gtt_offset = gtt_offset;
52 __entry->mappable = mappable;
50 ), 53 ),
51 54
52 TP_printk("obj=%p, gtt_offset=%08x", 55 TP_printk("obj=%p, gtt_offset=%08x%s",
53 __entry->obj, __entry->gtt_offset) 56 __entry->obj, __entry->gtt_offset,
57 __entry->mappable ? ", mappable" : "")
54); 58);
55 59
56TRACE_EVENT(i915_gem_object_change_domain, 60TRACE_EVENT(i915_gem_object_change_domain,
57 61
58 TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain), 62 TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
59 63
60 TP_ARGS(obj, old_read_domains, old_write_domain), 64 TP_ARGS(obj, old_read_domains, old_write_domain),
61 65
62 TP_STRUCT__entry( 66 TP_STRUCT__entry(
63 __field(struct drm_gem_object *, obj) 67 __field(struct drm_i915_gem_object *, obj)
64 __field(u32, read_domains) 68 __field(u32, read_domains)
65 __field(u32, write_domain) 69 __field(u32, write_domain)
66 ), 70 ),
67 71
68 TP_fast_assign( 72 TP_fast_assign(
69 __entry->obj = obj; 73 __entry->obj = obj;
70 __entry->read_domains = obj->read_domains | (old_read_domains << 16); 74 __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
71 __entry->write_domain = obj->write_domain | (old_write_domain << 16); 75 __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
72 ), 76 ),
73 77
74 TP_printk("obj=%p, read=%04x, write=%04x", 78 TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain,
76 __entry->read_domains, __entry->write_domain) 80 __entry->read_domains, __entry->write_domain)
77); 81);
78 82
79TRACE_EVENT(i915_gem_object_get_fence,
80
81 TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
82
83 TP_ARGS(obj, fence, tiling_mode),
84
85 TP_STRUCT__entry(
86 __field(struct drm_gem_object *, obj)
87 __field(int, fence)
88 __field(int, tiling_mode)
89 ),
90
91 TP_fast_assign(
92 __entry->obj = obj;
93 __entry->fence = fence;
94 __entry->tiling_mode = tiling_mode;
95 ),
96
97 TP_printk("obj=%p, fence=%d, tiling=%d",
98 __entry->obj, __entry->fence, __entry->tiling_mode)
99);
100
101DECLARE_EVENT_CLASS(i915_gem_object, 83DECLARE_EVENT_CLASS(i915_gem_object,
102 84
103 TP_PROTO(struct drm_gem_object *obj), 85 TP_PROTO(struct drm_i915_gem_object *obj),
104 86
105 TP_ARGS(obj), 87 TP_ARGS(obj),
106 88
107 TP_STRUCT__entry( 89 TP_STRUCT__entry(
108 __field(struct drm_gem_object *, obj) 90 __field(struct drm_i915_gem_object *, obj)
109 ), 91 ),
110 92
111 TP_fast_assign( 93 TP_fast_assign(
@@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
117 99
118DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush, 100DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
119 101
120 TP_PROTO(struct drm_gem_object *obj), 102 TP_PROTO(struct drm_i915_gem_object *obj),
121 103
122 TP_ARGS(obj) 104 TP_ARGS(obj)
123); 105);
124 106
125DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind, 107DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
126 108
127 TP_PROTO(struct drm_gem_object *obj), 109 TP_PROTO(struct drm_i915_gem_object *obj),
128 110
129 TP_ARGS(obj) 111 TP_ARGS(obj)
130); 112);
131 113
132DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy, 114DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
133 115
134 TP_PROTO(struct drm_gem_object *obj), 116 TP_PROTO(struct drm_i915_gem_object *obj),
135 117
136 TP_ARGS(obj) 118 TP_ARGS(obj)
137); 119);
@@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
263); 245);
264 246
265TRACE_EVENT(i915_flip_request, 247TRACE_EVENT(i915_flip_request,
266 TP_PROTO(int plane, struct drm_gem_object *obj), 248 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
267 249
268 TP_ARGS(plane, obj), 250 TP_ARGS(plane, obj),
269 251
270 TP_STRUCT__entry( 252 TP_STRUCT__entry(
271 __field(int, plane) 253 __field(int, plane)
272 __field(struct drm_gem_object *, obj) 254 __field(struct drm_i915_gem_object *, obj)
273 ), 255 ),
274 256
275 TP_fast_assign( 257 TP_fast_assign(
@@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request,
281); 263);
282 264
283TRACE_EVENT(i915_flip_complete, 265TRACE_EVENT(i915_flip_complete,
284 TP_PROTO(int plane, struct drm_gem_object *obj), 266 TP_PROTO(int plane, struct drm_i915_gem_object *obj),
285 267
286 TP_ARGS(plane, obj), 268 TP_ARGS(plane, obj),
287 269
288 TP_STRUCT__entry( 270 TP_STRUCT__entry(
289 __field(int, plane) 271 __field(int, plane)
290 __field(struct drm_gem_object *, obj) 272 __field(struct drm_i915_gem_object *, obj)
291 ), 273 ),
292 274
293 TP_fast_assign( 275 TP_fast_assign(
@@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete,
298 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj) 280 TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
299); 281);
300 282
283TRACE_EVENT(i915_reg_rw,
284 TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
285
286 TP_ARGS(cmd, reg, val, len),
287
288 TP_STRUCT__entry(
289 __field(int, cmd)
290 __field(uint32_t, reg)
291 __field(uint64_t, val)
292 __field(int, len)
293 ),
294
295 TP_fast_assign(
296 __entry->cmd = cmd;
297 __entry->reg = reg;
298 __entry->val = (uint64_t)val;
299 __entry->len = len;
300 ),
301
302 TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
303 __entry->cmd, __entry->reg, __entry->val, __entry->len)
304);
305
301#endif /* _I915_TRACE_H_ */ 306#endif /* _I915_TRACE_H_ */
302 307
303/* This part must be outside protection */ 308/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 65c88f9ba12c..2cb8e0b9f1ee 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -190,37 +190,6 @@ out:
190 kfree(output.pointer); 190 kfree(output.pointer);
191} 191}
192 192
193static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
194{
195 return 0;
196}
197
198static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
199 enum vga_switcheroo_state state)
200{
201 return 0;
202}
203
204static int intel_dsm_init(void)
205{
206 return 0;
207}
208
209static int intel_dsm_get_client_id(struct pci_dev *pdev)
210{
211 if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
212 return VGA_SWITCHEROO_IGD;
213 else
214 return VGA_SWITCHEROO_DIS;
215}
216
217static struct vga_switcheroo_handler intel_dsm_handler = {
218 .switchto = intel_dsm_switchto,
219 .power_state = intel_dsm_power_state,
220 .init = intel_dsm_init,
221 .get_client_id = intel_dsm_get_client_id,
222};
223
224static bool intel_dsm_pci_probe(struct pci_dev *pdev) 193static bool intel_dsm_pci_probe(struct pci_dev *pdev)
225{ 194{
226 acpi_handle dhandle, intel_handle; 195 acpi_handle dhandle, intel_handle;
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void)
276{ 245{
277 if (!intel_dsm_detect()) 246 if (!intel_dsm_detect())
278 return; 247 return;
279
280 vga_switcheroo_register_handler(&intel_dsm_handler);
281} 248}
282 249
283void intel_unregister_dsm_handler(void) 250void intel_unregister_dsm_handler(void)
284{ 251{
285 vga_switcheroo_unregister_handler();
286} 252}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bee24b1a58e8..880659680d0a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
642 .find_pll = intel_find_pll_ironlake_dp, 642 .find_pll = intel_find_pll_ironlake_dp,
643}; 643};
644 644
645static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) 645static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
646 int refclk)
646{ 647{
647 struct drm_device *dev = crtc->dev; 648 struct drm_device *dev = crtc->dev;
648 struct drm_i915_private *dev_priv = dev->dev_private; 649 struct drm_i915_private *dev_priv = dev->dev_private;
649 const intel_limit_t *limit; 650 const intel_limit_t *limit;
650 int refclk = 120;
651 651
652 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 652 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
653 if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
654 refclk = 100;
655
656 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 653 if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
657 LVDS_CLKB_POWER_UP) { 654 LVDS_CLKB_POWER_UP) {
658 /* LVDS dual channel */ 655 /* LVDS dual channel */
659 if (refclk == 100) 656 if (refclk == 100000)
660 limit = &intel_limits_ironlake_dual_lvds_100m; 657 limit = &intel_limits_ironlake_dual_lvds_100m;
661 else 658 else
662 limit = &intel_limits_ironlake_dual_lvds; 659 limit = &intel_limits_ironlake_dual_lvds;
663 } else { 660 } else {
664 if (refclk == 100) 661 if (refclk == 100000)
665 limit = &intel_limits_ironlake_single_lvds_100m; 662 limit = &intel_limits_ironlake_single_lvds_100m;
666 else 663 else
667 limit = &intel_limits_ironlake_single_lvds; 664 limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
702 return limit; 699 return limit;
703} 700}
704 701
705static const intel_limit_t *intel_limit(struct drm_crtc *crtc) 702static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
706{ 703{
707 struct drm_device *dev = crtc->dev; 704 struct drm_device *dev = crtc->dev;
708 const intel_limit_t *limit; 705 const intel_limit_t *limit;
709 706
710 if (HAS_PCH_SPLIT(dev)) 707 if (HAS_PCH_SPLIT(dev))
711 limit = intel_ironlake_limit(crtc); 708 limit = intel_ironlake_limit(crtc, refclk);
712 else if (IS_G4X(dev)) { 709 else if (IS_G4X(dev)) {
713 limit = intel_g4x_limit(crtc); 710 limit = intel_g4x_limit(crtc);
714 } else if (IS_PINEVIEW(dev)) { 711 } else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
773 * the given connectors. 770 * the given connectors.
774 */ 771 */
775 772
776static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) 773static bool intel_PLL_is_valid(struct drm_device *dev,
774 const intel_limit_t *limit,
775 const intel_clock_t *clock)
777{ 776{
778 const intel_limit_t *limit = intel_limit (crtc);
779 struct drm_device *dev = crtc->dev;
780
781 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) 777 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
782 INTELPllInvalid ("p1 out of range\n"); 778 INTELPllInvalid ("p1 out of range\n");
783 if (clock->p < limit->p.min || limit->p.max < clock->p) 779 if (clock->p < limit->p.min || limit->p.max < clock->p)
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
849 int this_err; 845 int this_err;
850 846
851 intel_clock(dev, refclk, &clock); 847 intel_clock(dev, refclk, &clock);
852 848 if (!intel_PLL_is_valid(dev, limit,
853 if (!intel_PLL_is_valid(crtc, &clock)) 849 &clock))
854 continue; 850 continue;
855 851
856 this_err = abs(clock.dot - target); 852 this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
912 int this_err; 908 int this_err;
913 909
914 intel_clock(dev, refclk, &clock); 910 intel_clock(dev, refclk, &clock);
915 if (!intel_PLL_is_valid(crtc, &clock)) 911 if (!intel_PLL_is_valid(dev, limit,
912 &clock))
916 continue; 913 continue;
917 this_err = abs(clock.dot - target) ; 914
915 this_err = abs(clock.dot - target);
918 if (this_err < err_most) { 916 if (this_err < err_most) {
919 *best_clock = clock; 917 *best_clock = clock;
920 err_most = this_err; 918 err_most = this_err;
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1066 struct drm_i915_private *dev_priv = dev->dev_private; 1064 struct drm_i915_private *dev_priv = dev->dev_private;
1067 struct drm_framebuffer *fb = crtc->fb; 1065 struct drm_framebuffer *fb = crtc->fb;
1068 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1066 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1069 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1067 struct drm_i915_gem_object *obj = intel_fb->obj;
1070 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1068 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1071 int plane, i; 1069 int plane, i;
1072 u32 fbc_ctl, fbc_ctl2; 1070 u32 fbc_ctl, fbc_ctl2;
1073 1071
1074 if (fb->pitch == dev_priv->cfb_pitch && 1072 if (fb->pitch == dev_priv->cfb_pitch &&
1075 obj_priv->fence_reg == dev_priv->cfb_fence && 1073 obj->fence_reg == dev_priv->cfb_fence &&
1076 intel_crtc->plane == dev_priv->cfb_plane && 1074 intel_crtc->plane == dev_priv->cfb_plane &&
1077 I915_READ(FBC_CONTROL) & FBC_CTL_EN) 1075 I915_READ(FBC_CONTROL) & FBC_CTL_EN)
1078 return; 1076 return;
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1086 1084
1087 /* FBC_CTL wants 64B units */ 1085 /* FBC_CTL wants 64B units */
1088 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1086 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1089 dev_priv->cfb_fence = obj_priv->fence_reg; 1087 dev_priv->cfb_fence = obj->fence_reg;
1090 dev_priv->cfb_plane = intel_crtc->plane; 1088 dev_priv->cfb_plane = intel_crtc->plane;
1091 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; 1089 plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
1092 1090
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1096 1094
1097 /* Set it up... */ 1095 /* Set it up... */
1098 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane; 1096 fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
1099 if (obj_priv->tiling_mode != I915_TILING_NONE) 1097 if (obj->tiling_mode != I915_TILING_NONE)
1100 fbc_ctl2 |= FBC_CTL_CPU_FENCE; 1098 fbc_ctl2 |= FBC_CTL_CPU_FENCE;
1101 I915_WRITE(FBC_CONTROL2, fbc_ctl2); 1099 I915_WRITE(FBC_CONTROL2, fbc_ctl2);
1102 I915_WRITE(FBC_FENCE_OFF, crtc->y); 1100 I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1107 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ 1105 fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
1108 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; 1106 fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
1109 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; 1107 fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
1110 if (obj_priv->tiling_mode != I915_TILING_NONE) 1108 if (obj->tiling_mode != I915_TILING_NONE)
1111 fbc_ctl |= dev_priv->cfb_fence; 1109 fbc_ctl |= dev_priv->cfb_fence;
1112 I915_WRITE(FBC_CONTROL, fbc_ctl); 1110 I915_WRITE(FBC_CONTROL, fbc_ctl);
1113 1111
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1150 struct drm_i915_private *dev_priv = dev->dev_private; 1148 struct drm_i915_private *dev_priv = dev->dev_private;
1151 struct drm_framebuffer *fb = crtc->fb; 1149 struct drm_framebuffer *fb = crtc->fb;
1152 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1150 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1153 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1151 struct drm_i915_gem_object *obj = intel_fb->obj;
1154 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1152 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1155 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 1153 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1156 unsigned long stall_watermark = 200; 1154 unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1159 dpfc_ctl = I915_READ(DPFC_CONTROL); 1157 dpfc_ctl = I915_READ(DPFC_CONTROL);
1160 if (dpfc_ctl & DPFC_CTL_EN) { 1158 if (dpfc_ctl & DPFC_CTL_EN) {
1161 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && 1159 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1162 dev_priv->cfb_fence == obj_priv->fence_reg && 1160 dev_priv->cfb_fence == obj->fence_reg &&
1163 dev_priv->cfb_plane == intel_crtc->plane && 1161 dev_priv->cfb_plane == intel_crtc->plane &&
1164 dev_priv->cfb_y == crtc->y) 1162 dev_priv->cfb_y == crtc->y)
1165 return; 1163 return;
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1170 } 1168 }
1171 1169
1172 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1170 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1173 dev_priv->cfb_fence = obj_priv->fence_reg; 1171 dev_priv->cfb_fence = obj->fence_reg;
1174 dev_priv->cfb_plane = intel_crtc->plane; 1172 dev_priv->cfb_plane = intel_crtc->plane;
1175 dev_priv->cfb_y = crtc->y; 1173 dev_priv->cfb_y = crtc->y;
1176 1174
1177 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; 1175 dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
1178 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1176 if (obj->tiling_mode != I915_TILING_NONE) {
1179 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence; 1177 dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
1180 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); 1178 I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
1181 } else { 1179 } else {
@@ -1221,7 +1219,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1221 struct drm_i915_private *dev_priv = dev->dev_private; 1219 struct drm_i915_private *dev_priv = dev->dev_private;
1222 struct drm_framebuffer *fb = crtc->fb; 1220 struct drm_framebuffer *fb = crtc->fb;
1223 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 1221 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
1224 struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj); 1222 struct drm_i915_gem_object *obj = intel_fb->obj;
1225 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1223 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1226 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; 1224 int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
1227 unsigned long stall_watermark = 200; 1225 unsigned long stall_watermark = 200;
@@ -1230,9 +1228,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1230 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); 1228 dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
1231 if (dpfc_ctl & DPFC_CTL_EN) { 1229 if (dpfc_ctl & DPFC_CTL_EN) {
1232 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 && 1230 if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
1233 dev_priv->cfb_fence == obj_priv->fence_reg && 1231 dev_priv->cfb_fence == obj->fence_reg &&
1234 dev_priv->cfb_plane == intel_crtc->plane && 1232 dev_priv->cfb_plane == intel_crtc->plane &&
1235 dev_priv->cfb_offset == obj_priv->gtt_offset && 1233 dev_priv->cfb_offset == obj->gtt_offset &&
1236 dev_priv->cfb_y == crtc->y) 1234 dev_priv->cfb_y == crtc->y)
1237 return; 1235 return;
1238 1236
@@ -1242,14 +1240,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1242 } 1240 }
1243 1241
1244 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1; 1242 dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
1245 dev_priv->cfb_fence = obj_priv->fence_reg; 1243 dev_priv->cfb_fence = obj->fence_reg;
1246 dev_priv->cfb_plane = intel_crtc->plane; 1244 dev_priv->cfb_plane = intel_crtc->plane;
1247 dev_priv->cfb_offset = obj_priv->gtt_offset; 1245 dev_priv->cfb_offset = obj->gtt_offset;
1248 dev_priv->cfb_y = crtc->y; 1246 dev_priv->cfb_y = crtc->y;
1249 1247
1250 dpfc_ctl &= DPFC_RESERVED; 1248 dpfc_ctl &= DPFC_RESERVED;
1251 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); 1249 dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
1252 if (obj_priv->tiling_mode != I915_TILING_NONE) { 1250 if (obj->tiling_mode != I915_TILING_NONE) {
1253 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence); 1251 dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
1254 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); 1252 I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
1255 } else { 1253 } else {
@@ -1260,10 +1258,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
1260 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | 1258 (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
1261 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); 1259 (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
1262 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); 1260 I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
1263 I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID); 1261 I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
1264 /* enable it... */ 1262 /* enable it... */
1265 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); 1263 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
1266 1264
1265 if (IS_GEN6(dev)) {
1266 I915_WRITE(SNB_DPFC_CTL_SA,
1267 SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
1268 I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
1269 }
1270
1267 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); 1271 DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
1268} 1272}
1269 1273
@@ -1345,7 +1349,7 @@ static void intel_update_fbc(struct drm_device *dev)
1345 struct intel_crtc *intel_crtc; 1349 struct intel_crtc *intel_crtc;
1346 struct drm_framebuffer *fb; 1350 struct drm_framebuffer *fb;
1347 struct intel_framebuffer *intel_fb; 1351 struct intel_framebuffer *intel_fb;
1348 struct drm_i915_gem_object *obj_priv; 1352 struct drm_i915_gem_object *obj;
1349 1353
1350 DRM_DEBUG_KMS("\n"); 1354 DRM_DEBUG_KMS("\n");
1351 1355
@@ -1384,9 +1388,9 @@ static void intel_update_fbc(struct drm_device *dev)
1384 intel_crtc = to_intel_crtc(crtc); 1388 intel_crtc = to_intel_crtc(crtc);
1385 fb = crtc->fb; 1389 fb = crtc->fb;
1386 intel_fb = to_intel_framebuffer(fb); 1390 intel_fb = to_intel_framebuffer(fb);
1387 obj_priv = to_intel_bo(intel_fb->obj); 1391 obj = intel_fb->obj;
1388 1392
1389 if (intel_fb->obj->size > dev_priv->cfb_size) { 1393 if (intel_fb->obj->base.size > dev_priv->cfb_size) {
1390 DRM_DEBUG_KMS("framebuffer too large, disabling " 1394 DRM_DEBUG_KMS("framebuffer too large, disabling "
1391 "compression\n"); 1395 "compression\n");
1392 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; 1396 dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1414,7 @@ static void intel_update_fbc(struct drm_device *dev)
1410 dev_priv->no_fbc_reason = FBC_BAD_PLANE; 1414 dev_priv->no_fbc_reason = FBC_BAD_PLANE;
1411 goto out_disable; 1415 goto out_disable;
1412 } 1416 }
1413 if (obj_priv->tiling_mode != I915_TILING_X) { 1417 if (obj->tiling_mode != I915_TILING_X) {
1414 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n"); 1418 DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
1415 dev_priv->no_fbc_reason = FBC_NOT_TILED; 1419 dev_priv->no_fbc_reason = FBC_NOT_TILED;
1416 goto out_disable; 1420 goto out_disable;
@@ -1433,14 +1437,13 @@ out_disable:
1433 1437
1434int 1438int
1435intel_pin_and_fence_fb_obj(struct drm_device *dev, 1439intel_pin_and_fence_fb_obj(struct drm_device *dev,
1436 struct drm_gem_object *obj, 1440 struct drm_i915_gem_object *obj,
1437 bool pipelined) 1441 struct intel_ring_buffer *pipelined)
1438{ 1442{
1439 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1440 u32 alignment; 1443 u32 alignment;
1441 int ret; 1444 int ret;
1442 1445
1443 switch (obj_priv->tiling_mode) { 1446 switch (obj->tiling_mode) {
1444 case I915_TILING_NONE: 1447 case I915_TILING_NONE:
1445 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) 1448 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1446 alignment = 128 * 1024; 1449 alignment = 128 * 1024;
@@ -1461,7 +1464,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1461 BUG(); 1464 BUG();
1462 } 1465 }
1463 1466
1464 ret = i915_gem_object_pin(obj, alignment); 1467 ret = i915_gem_object_pin(obj, alignment, true);
1465 if (ret) 1468 if (ret)
1466 return ret; 1469 return ret;
1467 1470
@@ -1474,9 +1477,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
1474 * framebuffer compression. For simplicity, we always install 1477 * framebuffer compression. For simplicity, we always install
1475 * a fence as the cost is not that onerous. 1478 * a fence as the cost is not that onerous.
1476 */ 1479 */
1477 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 1480 if (obj->tiling_mode != I915_TILING_NONE) {
1478 obj_priv->tiling_mode != I915_TILING_NONE) { 1481 ret = i915_gem_object_get_fence(obj, pipelined, false);
1479 ret = i915_gem_object_get_fence_reg(obj, false);
1480 if (ret) 1482 if (ret)
1481 goto err_unpin; 1483 goto err_unpin;
1482 } 1484 }
@@ -1497,8 +1499,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1497 struct drm_i915_private *dev_priv = dev->dev_private; 1499 struct drm_i915_private *dev_priv = dev->dev_private;
1498 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1500 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1499 struct intel_framebuffer *intel_fb; 1501 struct intel_framebuffer *intel_fb;
1500 struct drm_i915_gem_object *obj_priv; 1502 struct drm_i915_gem_object *obj;
1501 struct drm_gem_object *obj;
1502 int plane = intel_crtc->plane; 1503 int plane = intel_crtc->plane;
1503 unsigned long Start, Offset; 1504 unsigned long Start, Offset;
1504 u32 dspcntr; 1505 u32 dspcntr;
@@ -1515,7 +1516,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1515 1516
1516 intel_fb = to_intel_framebuffer(fb); 1517 intel_fb = to_intel_framebuffer(fb);
1517 obj = intel_fb->obj; 1518 obj = intel_fb->obj;
1518 obj_priv = to_intel_bo(obj);
1519 1519
1520 reg = DSPCNTR(plane); 1520 reg = DSPCNTR(plane);
1521 dspcntr = I915_READ(reg); 1521 dspcntr = I915_READ(reg);
@@ -1540,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1540 return -EINVAL; 1540 return -EINVAL;
1541 } 1541 }
1542 if (INTEL_INFO(dev)->gen >= 4) { 1542 if (INTEL_INFO(dev)->gen >= 4) {
1543 if (obj_priv->tiling_mode != I915_TILING_NONE) 1543 if (obj->tiling_mode != I915_TILING_NONE)
1544 dspcntr |= DISPPLANE_TILED; 1544 dspcntr |= DISPPLANE_TILED;
1545 else 1545 else
1546 dspcntr &= ~DISPPLANE_TILED; 1546 dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1552,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1552 1552
1553 I915_WRITE(reg, dspcntr); 1553 I915_WRITE(reg, dspcntr);
1554 1554
1555 Start = obj_priv->gtt_offset; 1555 Start = obj->gtt_offset;
1556 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); 1556 Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
1557 1557
1558 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", 1558 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1598,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1598 mutex_lock(&dev->struct_mutex); 1598 mutex_lock(&dev->struct_mutex);
1599 ret = intel_pin_and_fence_fb_obj(dev, 1599 ret = intel_pin_and_fence_fb_obj(dev,
1600 to_intel_framebuffer(crtc->fb)->obj, 1600 to_intel_framebuffer(crtc->fb)->obj,
1601 false); 1601 NULL);
1602 if (ret != 0) { 1602 if (ret != 0) {
1603 mutex_unlock(&dev->struct_mutex); 1603 mutex_unlock(&dev->struct_mutex);
1604 return ret; 1604 return ret;
@@ -1606,18 +1606,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1606 1606
1607 if (old_fb) { 1607 if (old_fb) {
1608 struct drm_i915_private *dev_priv = dev->dev_private; 1608 struct drm_i915_private *dev_priv = dev->dev_private;
1609 struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1609 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1610 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1611 1610
1612 wait_event(dev_priv->pending_flip_queue, 1611 wait_event(dev_priv->pending_flip_queue,
1613 atomic_read(&obj_priv->pending_flip) == 0); 1612 atomic_read(&obj->pending_flip) == 0);
1614 1613
1615 /* Big Hammer, we also need to ensure that any pending 1614 /* Big Hammer, we also need to ensure that any pending
1616 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1615 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1617 * current scanout is retired before unpinning the old 1616 * current scanout is retired before unpinning the old
1618 * framebuffer. 1617 * framebuffer.
1619 */ 1618 */
1620 ret = i915_gem_object_flush_gpu(obj_priv, false); 1619 ret = i915_gem_object_flush_gpu(obj, false);
1621 if (ret) { 1620 if (ret) {
1622 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); 1621 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1623 mutex_unlock(&dev->struct_mutex); 1622 mutex_unlock(&dev->struct_mutex);
@@ -1633,8 +1632,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1633 return ret; 1632 return ret;
1634 } 1633 }
1635 1634
1636 if (old_fb) 1635 if (old_fb) {
1636 intel_wait_for_vblank(dev, intel_crtc->pipe);
1637 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); 1637 i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
1638 }
1638 1639
1639 mutex_unlock(&dev->struct_mutex); 1640 mutex_unlock(&dev->struct_mutex);
1640 1641
@@ -1996,31 +1997,31 @@ static void intel_flush_display_plane(struct drm_device *dev,
1996static void intel_clear_scanline_wait(struct drm_device *dev) 1997static void intel_clear_scanline_wait(struct drm_device *dev)
1997{ 1998{
1998 struct drm_i915_private *dev_priv = dev->dev_private; 1999 struct drm_i915_private *dev_priv = dev->dev_private;
2000 struct intel_ring_buffer *ring;
1999 u32 tmp; 2001 u32 tmp;
2000 2002
2001 if (IS_GEN2(dev)) 2003 if (IS_GEN2(dev))
2002 /* Can't break the hang on i8xx */ 2004 /* Can't break the hang on i8xx */
2003 return; 2005 return;
2004 2006
2005 tmp = I915_READ(PRB0_CTL); 2007 ring = LP_RING(dev_priv);
2006 if (tmp & RING_WAIT) { 2008 tmp = I915_READ_CTL(ring);
2007 I915_WRITE(PRB0_CTL, tmp); 2009 if (tmp & RING_WAIT)
2008 POSTING_READ(PRB0_CTL); 2010 I915_WRITE_CTL(ring, tmp);
2009 }
2010} 2011}
2011 2012
2012static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) 2013static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2013{ 2014{
2014 struct drm_i915_gem_object *obj_priv; 2015 struct drm_i915_gem_object *obj;
2015 struct drm_i915_private *dev_priv; 2016 struct drm_i915_private *dev_priv;
2016 2017
2017 if (crtc->fb == NULL) 2018 if (crtc->fb == NULL)
2018 return; 2019 return;
2019 2020
2020 obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj); 2021 obj = to_intel_framebuffer(crtc->fb)->obj;
2021 dev_priv = crtc->dev->dev_private; 2022 dev_priv = crtc->dev->dev_private;
2022 wait_event(dev_priv->pending_flip_queue, 2023 wait_event(dev_priv->pending_flip_queue,
2023 atomic_read(&obj_priv->pending_flip) == 0); 2024 atomic_read(&obj->pending_flip) == 0);
2024} 2025}
2025 2026
2026static void ironlake_crtc_enable(struct drm_crtc *crtc) 2027static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2120,9 +2121,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2120 reg = TRANS_DP_CTL(pipe); 2121 reg = TRANS_DP_CTL(pipe);
2121 temp = I915_READ(reg); 2122 temp = I915_READ(reg);
2122 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2123 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2123 TRANS_DP_SYNC_MASK); 2124 TRANS_DP_SYNC_MASK |
2125 TRANS_DP_BPC_MASK);
2124 temp |= (TRANS_DP_OUTPUT_ENABLE | 2126 temp |= (TRANS_DP_OUTPUT_ENABLE |
2125 TRANS_DP_ENH_FRAMING); 2127 TRANS_DP_ENH_FRAMING);
2128 temp |= TRANS_DP_8BPC;
2126 2129
2127 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2130 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2128 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2131 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2712,27 +2715,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
2712 } 2715 }
2713} 2716}
2714 2717
2715#define DATA_N 0x800000
2716#define LINK_N 0x80000
2717
2718static void 2718static void
2719ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 2719ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
2720 int link_clock, struct fdi_m_n *m_n) 2720 int link_clock, struct fdi_m_n *m_n)
2721{ 2721{
2722 u64 temp;
2723
2724 m_n->tu = 64; /* default size */ 2722 m_n->tu = 64; /* default size */
2725 2723
2726 temp = (u64) DATA_N * pixel_clock; 2724 /* BUG_ON(pixel_clock > INT_MAX / 36); */
2727 temp = div_u64(temp, link_clock); 2725 m_n->gmch_m = bits_per_pixel * pixel_clock;
2728 m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); 2726 m_n->gmch_n = link_clock * nlanes * 8;
2729 m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
2730 m_n->gmch_n = DATA_N;
2731 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 2727 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
2732 2728
2733 temp = (u64) LINK_N * pixel_clock; 2729 m_n->link_m = pixel_clock;
2734 m_n->link_m = div_u64(temp, link_clock); 2730 m_n->link_n = link_clock;
2735 m_n->link_n = LINK_N;
2736 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 2731 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
2737} 2732}
2738 2733
@@ -2856,6 +2851,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
2856 ILK_FIFO_LINE_SIZE 2851 ILK_FIFO_LINE_SIZE
2857}; 2852};
2858 2853
2854static struct intel_watermark_params sandybridge_display_wm_info = {
2855 SNB_DISPLAY_FIFO,
2856 SNB_DISPLAY_MAXWM,
2857 SNB_DISPLAY_DFTWM,
2858 2,
2859 SNB_FIFO_LINE_SIZE
2860};
2861
2862static struct intel_watermark_params sandybridge_cursor_wm_info = {
2863 SNB_CURSOR_FIFO,
2864 SNB_CURSOR_MAXWM,
2865 SNB_CURSOR_DFTWM,
2866 2,
2867 SNB_FIFO_LINE_SIZE
2868};
2869
2870static struct intel_watermark_params sandybridge_display_srwm_info = {
2871 SNB_DISPLAY_SR_FIFO,
2872 SNB_DISPLAY_MAX_SRWM,
2873 SNB_DISPLAY_DFT_SRWM,
2874 2,
2875 SNB_FIFO_LINE_SIZE
2876};
2877
2878static struct intel_watermark_params sandybridge_cursor_srwm_info = {
2879 SNB_CURSOR_SR_FIFO,
2880 SNB_CURSOR_MAX_SRWM,
2881 SNB_CURSOR_DFT_SRWM,
2882 2,
2883 SNB_FIFO_LINE_SIZE
2884};
2885
2886
2859/** 2887/**
2860 * intel_calculate_wm - calculate watermark level 2888 * intel_calculate_wm - calculate watermark level
2861 * @clock_in_khz: pixel clock 2889 * @clock_in_khz: pixel clock
@@ -3389,6 +3417,10 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
3389 3417
3390static bool ironlake_compute_wm0(struct drm_device *dev, 3418static bool ironlake_compute_wm0(struct drm_device *dev,
3391 int pipe, 3419 int pipe,
3420 const struct intel_watermark_params *display,
3421 int display_latency,
3422 const struct intel_watermark_params *cursor,
3423 int cursor_latency,
3392 int *plane_wm, 3424 int *plane_wm,
3393 int *cursor_wm) 3425 int *cursor_wm)
3394{ 3426{
@@ -3406,22 +3438,20 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
3406 pixel_size = crtc->fb->bits_per_pixel / 8; 3438 pixel_size = crtc->fb->bits_per_pixel / 8;
3407 3439
3408 /* Use the small buffer method to calculate plane watermark */ 3440 /* Use the small buffer method to calculate plane watermark */
3409 entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000; 3441 entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000;
3410 entries = DIV_ROUND_UP(entries, 3442 entries = DIV_ROUND_UP(entries, display->cacheline_size);
3411 ironlake_display_wm_info.cacheline_size); 3443 *plane_wm = entries + display->guard_size;
3412 *plane_wm = entries + ironlake_display_wm_info.guard_size; 3444 if (*plane_wm > (int)display->max_wm)
3413 if (*plane_wm > (int)ironlake_display_wm_info.max_wm) 3445 *plane_wm = display->max_wm;
3414 *plane_wm = ironlake_display_wm_info.max_wm;
3415 3446
3416 /* Use the large buffer method to calculate cursor watermark */ 3447 /* Use the large buffer method to calculate cursor watermark */
3417 line_time_us = ((htotal * 1000) / clock); 3448 line_time_us = ((htotal * 1000) / clock);
3418 line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000; 3449 line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000;
3419 entries = line_count * 64 * pixel_size; 3450 entries = line_count * 64 * pixel_size;
3420 entries = DIV_ROUND_UP(entries, 3451 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
3421 ironlake_cursor_wm_info.cacheline_size); 3452 *cursor_wm = entries + cursor->guard_size;
3422 *cursor_wm = entries + ironlake_cursor_wm_info.guard_size; 3453 if (*cursor_wm > (int)cursor->max_wm)
3423 if (*cursor_wm > ironlake_cursor_wm_info.max_wm) 3454 *cursor_wm = (int)cursor->max_wm;
3424 *cursor_wm = ironlake_cursor_wm_info.max_wm;
3425 3455
3426 return true; 3456 return true;
3427} 3457}
@@ -3436,7 +3466,12 @@ static void ironlake_update_wm(struct drm_device *dev,
3436 int tmp; 3466 int tmp;
3437 3467
3438 enabled = 0; 3468 enabled = 0;
3439 if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) { 3469 if (ironlake_compute_wm0(dev, 0,
3470 &ironlake_display_wm_info,
3471 ILK_LP0_PLANE_LATENCY,
3472 &ironlake_cursor_wm_info,
3473 ILK_LP0_CURSOR_LATENCY,
3474 &plane_wm, &cursor_wm)) {
3440 I915_WRITE(WM0_PIPEA_ILK, 3475 I915_WRITE(WM0_PIPEA_ILK,
3441 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 3476 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3442 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 3477 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3445,7 +3480,12 @@ static void ironlake_update_wm(struct drm_device *dev,
3445 enabled++; 3480 enabled++;
3446 } 3481 }
3447 3482
3448 if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) { 3483 if (ironlake_compute_wm0(dev, 1,
3484 &ironlake_display_wm_info,
3485 ILK_LP0_PLANE_LATENCY,
3486 &ironlake_cursor_wm_info,
3487 ILK_LP0_CURSOR_LATENCY,
3488 &plane_wm, &cursor_wm)) {
3449 I915_WRITE(WM0_PIPEB_ILK, 3489 I915_WRITE(WM0_PIPEB_ILK,
3450 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 3490 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3451 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 3491 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3459,7 +3499,7 @@ static void ironlake_update_wm(struct drm_device *dev,
3459 * display plane is used. 3499 * display plane is used.
3460 */ 3500 */
3461 tmp = 0; 3501 tmp = 0;
3462 if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) { 3502 if (enabled == 1) {
3463 unsigned long line_time_us; 3503 unsigned long line_time_us;
3464 int small, large, plane_fbc; 3504 int small, large, plane_fbc;
3465 int sr_clock, entries; 3505 int sr_clock, entries;
@@ -3511,6 +3551,197 @@ static void ironlake_update_wm(struct drm_device *dev,
3511 /* XXX setup WM2 and WM3 */ 3551 /* XXX setup WM2 and WM3 */
3512} 3552}
3513 3553
3554/*
3555 * Check the wm result.
3556 *
3557 * If any calculated watermark values is larger than the maximum value that
3558 * can be programmed into the associated watermark register, that watermark
3559 * must be disabled.
3560 *
3561 * Also return true if all of those watermark values is 0, which is set by
3562 * sandybridge_compute_srwm, to indicate the latency is ZERO.
3563 */
3564static bool sandybridge_check_srwm(struct drm_device *dev, int level,
3565 int fbc_wm, int display_wm, int cursor_wm)
3566{
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3568
3569 DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
3570 " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
3571
3572 if (fbc_wm > SNB_FBC_MAX_SRWM) {
3573 DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
3574 fbc_wm, SNB_FBC_MAX_SRWM, level);
3575
3576 /* fbc has it's own way to disable FBC WM */
3577 I915_WRITE(DISP_ARB_CTL,
3578 I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
3579 return false;
3580 }
3581
3582 if (display_wm > SNB_DISPLAY_MAX_SRWM) {
3583 DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
3584 display_wm, SNB_DISPLAY_MAX_SRWM, level);
3585 return false;
3586 }
3587
3588 if (cursor_wm > SNB_CURSOR_MAX_SRWM) {
3589 DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
3590 cursor_wm, SNB_CURSOR_MAX_SRWM, level);
3591 return false;
3592 }
3593
3594 if (!(fbc_wm || display_wm || cursor_wm)) {
3595 DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
3596 return false;
3597 }
3598
3599 return true;
3600}
3601
3602/*
3603 * Compute watermark values of WM[1-3],
3604 */
3605static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
3606 int hdisplay, int htotal, int pixel_size,
3607 int clock, int latency_ns, int *fbc_wm,
3608 int *display_wm, int *cursor_wm)
3609{
3610
3611 unsigned long line_time_us;
3612 int small, large;
3613 int entries;
3614 int line_count, line_size;
3615
3616 if (!latency_ns) {
3617 *fbc_wm = *display_wm = *cursor_wm = 0;
3618 return false;
3619 }
3620
3621 line_time_us = (htotal * 1000) / clock;
3622 line_count = (latency_ns / line_time_us + 1000) / 1000;
3623 line_size = hdisplay * pixel_size;
3624
3625 /* Use the minimum of the small and large buffer method for primary */
3626 small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
3627 large = line_count * line_size;
3628
3629 entries = DIV_ROUND_UP(min(small, large),
3630 sandybridge_display_srwm_info.cacheline_size);
3631 *display_wm = entries + sandybridge_display_srwm_info.guard_size;
3632
3633 /*
3634 * Spec said:
3635 * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
3636 */
3637 *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
3638
3639 /* calculate the self-refresh watermark for display cursor */
3640 entries = line_count * pixel_size * 64;
3641 entries = DIV_ROUND_UP(entries,
3642 sandybridge_cursor_srwm_info.cacheline_size);
3643 *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
3644
3645 return sandybridge_check_srwm(dev, level,
3646 *fbc_wm, *display_wm, *cursor_wm);
3647}
3648
3649static void sandybridge_update_wm(struct drm_device *dev,
3650 int planea_clock, int planeb_clock,
3651 int hdisplay, int htotal,
3652 int pixel_size)
3653{
3654 struct drm_i915_private *dev_priv = dev->dev_private;
3655 int latency = SNB_READ_WM0_LATENCY();
3656 int fbc_wm, plane_wm, cursor_wm, enabled;
3657 int clock;
3658
3659 enabled = 0;
3660 if (ironlake_compute_wm0(dev, 0,
3661 &sandybridge_display_wm_info, latency,
3662 &sandybridge_cursor_wm_info, latency,
3663 &plane_wm, &cursor_wm)) {
3664 I915_WRITE(WM0_PIPEA_ILK,
3665 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3666 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
3667 " plane %d, " "cursor: %d\n",
3668 plane_wm, cursor_wm);
3669 enabled++;
3670 }
3671
3672 if (ironlake_compute_wm0(dev, 1,
3673 &sandybridge_display_wm_info, latency,
3674 &sandybridge_cursor_wm_info, latency,
3675 &plane_wm, &cursor_wm)) {
3676 I915_WRITE(WM0_PIPEB_ILK,
3677 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
3678 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
3679 " plane %d, cursor: %d\n",
3680 plane_wm, cursor_wm);
3681 enabled++;
3682 }
3683
3684 /*
3685 * Calculate and update the self-refresh watermark only when one
3686 * display plane is used.
3687 *
3688 * SNB support 3 levels of watermark.
3689 *
3690 * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
3691 * and disabled in the descending order
3692 *
3693 */
3694 I915_WRITE(WM3_LP_ILK, 0);
3695 I915_WRITE(WM2_LP_ILK, 0);
3696 I915_WRITE(WM1_LP_ILK, 0);
3697
3698 if (enabled != 1)
3699 return;
3700
3701 clock = planea_clock ? planea_clock : planeb_clock;
3702
3703 /* WM1 */
3704 if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
3705 clock, SNB_READ_WM1_LATENCY() * 500,
3706 &fbc_wm, &plane_wm, &cursor_wm))
3707 return;
3708
3709 I915_WRITE(WM1_LP_ILK,
3710 WM1_LP_SR_EN |
3711 (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3712 (fbc_wm << WM1_LP_FBC_SHIFT) |
3713 (plane_wm << WM1_LP_SR_SHIFT) |
3714 cursor_wm);
3715
3716 /* WM2 */
3717 if (!sandybridge_compute_srwm(dev, 2,
3718 hdisplay, htotal, pixel_size,
3719 clock, SNB_READ_WM2_LATENCY() * 500,
3720 &fbc_wm, &plane_wm, &cursor_wm))
3721 return;
3722
3723 I915_WRITE(WM2_LP_ILK,
3724 WM2_LP_EN |
3725 (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3726 (fbc_wm << WM1_LP_FBC_SHIFT) |
3727 (plane_wm << WM1_LP_SR_SHIFT) |
3728 cursor_wm);
3729
3730 /* WM3 */
3731 if (!sandybridge_compute_srwm(dev, 3,
3732 hdisplay, htotal, pixel_size,
3733 clock, SNB_READ_WM3_LATENCY() * 500,
3734 &fbc_wm, &plane_wm, &cursor_wm))
3735 return;
3736
3737 I915_WRITE(WM3_LP_ILK,
3738 WM3_LP_EN |
3739 (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
3740 (fbc_wm << WM1_LP_FBC_SHIFT) |
3741 (plane_wm << WM1_LP_SR_SHIFT) |
3742 cursor_wm);
3743}
3744
3514/** 3745/**
3515 * intel_update_watermarks - update FIFO watermark values based on current modes 3746 * intel_update_watermarks - update FIFO watermark values based on current modes
3516 * 3747 *
@@ -3666,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3666 * refclk, or FALSE. The returned values represent the clock equation: 3897 * refclk, or FALSE. The returned values represent the clock equation:
3667 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. 3898 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
3668 */ 3899 */
3669 limit = intel_limit(crtc); 3900 limit = intel_limit(crtc, refclk);
3670 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 3901 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
3671 if (!ok) { 3902 if (!ok) {
3672 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 3903 DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3716,6 +3947,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3716 3947
3717 /* FDI link */ 3948 /* FDI link */
3718 if (HAS_PCH_SPLIT(dev)) { 3949 if (HAS_PCH_SPLIT(dev)) {
3950 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3719 int lane = 0, link_bw, bpp; 3951 int lane = 0, link_bw, bpp;
3720 /* CPU eDP doesn't require FDI link, so just set DP M/N 3952 /* CPU eDP doesn't require FDI link, so just set DP M/N
3721 according to current link config */ 3953 according to current link config */
@@ -3799,6 +4031,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3799 4031
3800 intel_crtc->fdi_lanes = lane; 4032 intel_crtc->fdi_lanes = lane;
3801 4033
4034 if (pixel_multiplier > 1)
4035 link_bw *= pixel_multiplier;
3802 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 4036 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
3803 } 4037 }
3804 4038
@@ -3860,6 +4094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3860 reduced_clock.m2; 4094 reduced_clock.m2;
3861 } 4095 }
3862 4096
4097 /* Enable autotuning of the PLL clock (if permissible) */
4098 if (HAS_PCH_SPLIT(dev)) {
4099 int factor = 21;
4100
4101 if (is_lvds) {
4102 if ((dev_priv->lvds_use_ssc &&
4103 dev_priv->lvds_ssc_freq == 100) ||
4104 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4105 factor = 25;
4106 } else if (is_sdvo && is_tv)
4107 factor = 20;
4108
4109 if (clock.m1 < factor * clock.n)
4110 fp |= FP_CB_TUNE;
4111 }
4112
3863 dpll = 0; 4113 dpll = 0;
3864 if (!HAS_PCH_SPLIT(dev)) 4114 if (!HAS_PCH_SPLIT(dev))
3865 dpll = DPLL_VGA_MODE_DIS; 4115 dpll = DPLL_VGA_MODE_DIS;
@@ -4074,7 +4324,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4074 } 4324 }
4075 4325
4076 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 4326 if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
4077 I915_WRITE(fp_reg, fp);
4078 I915_WRITE(dpll_reg, dpll); 4327 I915_WRITE(dpll_reg, dpll);
4079 4328
4080 /* Wait for the clocks to stabilize. */ 4329 /* Wait for the clocks to stabilize. */
@@ -4092,13 +4341,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
4092 } 4341 }
4093 I915_WRITE(DPLL_MD(pipe), temp); 4342 I915_WRITE(DPLL_MD(pipe), temp);
4094 } else { 4343 } else {
4095 /* write it again -- the BIOS does, after all */ 4344 /* The pixel multiplier can only be updated once the
4345 * DPLL is enabled and the clocks are stable.
4346 *
4347 * So write it again.
4348 */
4096 I915_WRITE(dpll_reg, dpll); 4349 I915_WRITE(dpll_reg, dpll);
4097 } 4350 }
4098
4099 /* Wait for the clocks to stabilize. */
4100 POSTING_READ(dpll_reg);
4101 udelay(150);
4102 } 4351 }
4103 4352
4104 intel_crtc->lowfreq_avail = false; 4353 intel_crtc->lowfreq_avail = false;
@@ -4334,15 +4583,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
4334} 4583}
4335 4584
4336static int intel_crtc_cursor_set(struct drm_crtc *crtc, 4585static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4337 struct drm_file *file_priv, 4586 struct drm_file *file,
4338 uint32_t handle, 4587 uint32_t handle,
4339 uint32_t width, uint32_t height) 4588 uint32_t width, uint32_t height)
4340{ 4589{
4341 struct drm_device *dev = crtc->dev; 4590 struct drm_device *dev = crtc->dev;
4342 struct drm_i915_private *dev_priv = dev->dev_private; 4591 struct drm_i915_private *dev_priv = dev->dev_private;
4343 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4592 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4344 struct drm_gem_object *bo; 4593 struct drm_i915_gem_object *obj;
4345 struct drm_i915_gem_object *obj_priv;
4346 uint32_t addr; 4594 uint32_t addr;
4347 int ret; 4595 int ret;
4348 4596
@@ -4352,7 +4600,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4352 if (!handle) { 4600 if (!handle) {
4353 DRM_DEBUG_KMS("cursor off\n"); 4601 DRM_DEBUG_KMS("cursor off\n");
4354 addr = 0; 4602 addr = 0;
4355 bo = NULL; 4603 obj = NULL;
4356 mutex_lock(&dev->struct_mutex); 4604 mutex_lock(&dev->struct_mutex);
4357 goto finish; 4605 goto finish;
4358 } 4606 }
@@ -4363,13 +4611,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4363 return -EINVAL; 4611 return -EINVAL;
4364 } 4612 }
4365 4613
4366 bo = drm_gem_object_lookup(dev, file_priv, handle); 4614 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
4367 if (!bo) 4615 if (!obj)
4368 return -ENOENT; 4616 return -ENOENT;
4369 4617
4370 obj_priv = to_intel_bo(bo); 4618 if (obj->base.size < width * height * 4) {
4371
4372 if (bo->size < width * height * 4) {
4373 DRM_ERROR("buffer is to small\n"); 4619 DRM_ERROR("buffer is to small\n");
4374 ret = -ENOMEM; 4620 ret = -ENOMEM;
4375 goto fail; 4621 goto fail;
@@ -4378,29 +4624,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4378 /* we only need to pin inside GTT if cursor is non-phy */ 4624 /* we only need to pin inside GTT if cursor is non-phy */
4379 mutex_lock(&dev->struct_mutex); 4625 mutex_lock(&dev->struct_mutex);
4380 if (!dev_priv->info->cursor_needs_physical) { 4626 if (!dev_priv->info->cursor_needs_physical) {
4381 ret = i915_gem_object_pin(bo, PAGE_SIZE); 4627 if (obj->tiling_mode) {
4628 DRM_ERROR("cursor cannot be tiled\n");
4629 ret = -EINVAL;
4630 goto fail_locked;
4631 }
4632
4633 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
4382 if (ret) { 4634 if (ret) {
4383 DRM_ERROR("failed to pin cursor bo\n"); 4635 DRM_ERROR("failed to pin cursor bo\n");
4384 goto fail_locked; 4636 goto fail_locked;
4385 } 4637 }
4386 4638
4387 ret = i915_gem_object_set_to_gtt_domain(bo, 0); 4639 ret = i915_gem_object_set_to_gtt_domain(obj, 0);
4388 if (ret) { 4640 if (ret) {
4389 DRM_ERROR("failed to move cursor bo into the GTT\n"); 4641 DRM_ERROR("failed to move cursor bo into the GTT\n");
4390 goto fail_unpin; 4642 goto fail_unpin;
4391 } 4643 }
4392 4644
4393 addr = obj_priv->gtt_offset; 4645 ret = i915_gem_object_put_fence(obj);
4646 if (ret) {
4647 DRM_ERROR("failed to move cursor bo into the GTT\n");
4648 goto fail_unpin;
4649 }
4650
4651 addr = obj->gtt_offset;
4394 } else { 4652 } else {
4395 int align = IS_I830(dev) ? 16 * 1024 : 256; 4653 int align = IS_I830(dev) ? 16 * 1024 : 256;
4396 ret = i915_gem_attach_phys_object(dev, bo, 4654 ret = i915_gem_attach_phys_object(dev, obj,
4397 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, 4655 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
4398 align); 4656 align);
4399 if (ret) { 4657 if (ret) {
4400 DRM_ERROR("failed to attach phys object\n"); 4658 DRM_ERROR("failed to attach phys object\n");
4401 goto fail_locked; 4659 goto fail_locked;
4402 } 4660 }
4403 addr = obj_priv->phys_obj->handle->busaddr; 4661 addr = obj->phys_obj->handle->busaddr;
4404 } 4662 }
4405 4663
4406 if (IS_GEN2(dev)) 4664 if (IS_GEN2(dev))
@@ -4409,17 +4667,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4409 finish: 4667 finish:
4410 if (intel_crtc->cursor_bo) { 4668 if (intel_crtc->cursor_bo) {
4411 if (dev_priv->info->cursor_needs_physical) { 4669 if (dev_priv->info->cursor_needs_physical) {
4412 if (intel_crtc->cursor_bo != bo) 4670 if (intel_crtc->cursor_bo != obj)
4413 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 4671 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
4414 } else 4672 } else
4415 i915_gem_object_unpin(intel_crtc->cursor_bo); 4673 i915_gem_object_unpin(intel_crtc->cursor_bo);
4416 drm_gem_object_unreference(intel_crtc->cursor_bo); 4674 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
4417 } 4675 }
4418 4676
4419 mutex_unlock(&dev->struct_mutex); 4677 mutex_unlock(&dev->struct_mutex);
4420 4678
4421 intel_crtc->cursor_addr = addr; 4679 intel_crtc->cursor_addr = addr;
4422 intel_crtc->cursor_bo = bo; 4680 intel_crtc->cursor_bo = obj;
4423 intel_crtc->cursor_width = width; 4681 intel_crtc->cursor_width = width;
4424 intel_crtc->cursor_height = height; 4682 intel_crtc->cursor_height = height;
4425 4683
@@ -4427,11 +4685,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
4427 4685
4428 return 0; 4686 return 0;
4429fail_unpin: 4687fail_unpin:
4430 i915_gem_object_unpin(bo); 4688 i915_gem_object_unpin(obj);
4431fail_locked: 4689fail_locked:
4432 mutex_unlock(&dev->struct_mutex); 4690 mutex_unlock(&dev->struct_mutex);
4433fail: 4691fail:
4434 drm_gem_object_unreference_unlocked(bo); 4692 drm_gem_object_unreference_unlocked(&obj->base);
4435 return ret; 4693 return ret;
4436} 4694}
4437 4695
@@ -4742,8 +5000,14 @@ static void intel_gpu_idle_timer(unsigned long arg)
4742 struct drm_device *dev = (struct drm_device *)arg; 5000 struct drm_device *dev = (struct drm_device *)arg;
4743 drm_i915_private_t *dev_priv = dev->dev_private; 5001 drm_i915_private_t *dev_priv = dev->dev_private;
4744 5002
4745 dev_priv->busy = false; 5003 if (!list_empty(&dev_priv->mm.active_list)) {
5004 /* Still processing requests, so just re-arm the timer. */
5005 mod_timer(&dev_priv->idle_timer, jiffies +
5006 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5007 return;
5008 }
4746 5009
5010 dev_priv->busy = false;
4747 queue_work(dev_priv->wq, &dev_priv->idle_work); 5011 queue_work(dev_priv->wq, &dev_priv->idle_work);
4748} 5012}
4749 5013
@@ -4754,9 +5018,17 @@ static void intel_crtc_idle_timer(unsigned long arg)
4754 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; 5018 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
4755 struct drm_crtc *crtc = &intel_crtc->base; 5019 struct drm_crtc *crtc = &intel_crtc->base;
4756 drm_i915_private_t *dev_priv = crtc->dev->dev_private; 5020 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
5021 struct intel_framebuffer *intel_fb;
4757 5022
4758 intel_crtc->busy = false; 5023 intel_fb = to_intel_framebuffer(crtc->fb);
5024 if (intel_fb && intel_fb->obj->active) {
5025 /* The framebuffer is still being accessed by the GPU. */
5026 mod_timer(&intel_crtc->idle_timer, jiffies +
5027 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5028 return;
5029 }
4759 5030
5031 intel_crtc->busy = false;
4760 queue_work(dev_priv->wq, &dev_priv->idle_work); 5032 queue_work(dev_priv->wq, &dev_priv->idle_work);
4761} 5033}
4762 5034
@@ -4891,7 +5163,7 @@ static void intel_idle_update(struct work_struct *work)
4891 * buffer), we'll also mark the display as busy, so we know to increase its 5163 * buffer), we'll also mark the display as busy, so we know to increase its
4892 * clock frequency. 5164 * clock frequency.
4893 */ 5165 */
4894void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj) 5166void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
4895{ 5167{
4896 drm_i915_private_t *dev_priv = dev->dev_private; 5168 drm_i915_private_t *dev_priv = dev->dev_private;
4897 struct drm_crtc *crtc = NULL; 5169 struct drm_crtc *crtc = NULL;
@@ -4972,8 +5244,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
4972 5244
4973 mutex_lock(&work->dev->struct_mutex); 5245 mutex_lock(&work->dev->struct_mutex);
4974 i915_gem_object_unpin(work->old_fb_obj); 5246 i915_gem_object_unpin(work->old_fb_obj);
4975 drm_gem_object_unreference(work->pending_flip_obj); 5247 drm_gem_object_unreference(&work->pending_flip_obj->base);
4976 drm_gem_object_unreference(work->old_fb_obj); 5248 drm_gem_object_unreference(&work->old_fb_obj->base);
5249
4977 mutex_unlock(&work->dev->struct_mutex); 5250 mutex_unlock(&work->dev->struct_mutex);
4978 kfree(work); 5251 kfree(work);
4979} 5252}
@@ -4984,15 +5257,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
4984 drm_i915_private_t *dev_priv = dev->dev_private; 5257 drm_i915_private_t *dev_priv = dev->dev_private;
4985 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5258 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4986 struct intel_unpin_work *work; 5259 struct intel_unpin_work *work;
4987 struct drm_i915_gem_object *obj_priv; 5260 struct drm_i915_gem_object *obj;
4988 struct drm_pending_vblank_event *e; 5261 struct drm_pending_vblank_event *e;
4989 struct timeval now; 5262 struct timeval tnow, tvbl;
4990 unsigned long flags; 5263 unsigned long flags;
4991 5264
4992 /* Ignore early vblank irqs */ 5265 /* Ignore early vblank irqs */
4993 if (intel_crtc == NULL) 5266 if (intel_crtc == NULL)
4994 return; 5267 return;
4995 5268
5269 do_gettimeofday(&tnow);
5270
4996 spin_lock_irqsave(&dev->event_lock, flags); 5271 spin_lock_irqsave(&dev->event_lock, flags);
4997 work = intel_crtc->unpin_work; 5272 work = intel_crtc->unpin_work;
4998 if (work == NULL || !work->pending) { 5273 if (work == NULL || !work->pending) {
@@ -5001,26 +5276,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
5001 } 5276 }
5002 5277
5003 intel_crtc->unpin_work = NULL; 5278 intel_crtc->unpin_work = NULL;
5004 drm_vblank_put(dev, intel_crtc->pipe);
5005 5279
5006 if (work->event) { 5280 if (work->event) {
5007 e = work->event; 5281 e = work->event;
5008 do_gettimeofday(&now); 5282 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
5009 e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe); 5283
5010 e->event.tv_sec = now.tv_sec; 5284 /* Called before vblank count and timestamps have
5011 e->event.tv_usec = now.tv_usec; 5285 * been updated for the vblank interval of flip
5286 * completion? Need to increment vblank count and
5287 * add one videorefresh duration to returned timestamp
5288 * to account for this. We assume this happened if we
5289 * get called over 0.9 frame durations after the last
5290 * timestamped vblank.
5291 *
5292 * This calculation can not be used with vrefresh rates
5293 * below 5Hz (10Hz to be on the safe side) without
5294 * promoting to 64 integers.
5295 */
5296 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
5297 9 * crtc->framedur_ns) {
5298 e->event.sequence++;
5299 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
5300 crtc->framedur_ns);
5301 }
5302
5303 e->event.tv_sec = tvbl.tv_sec;
5304 e->event.tv_usec = tvbl.tv_usec;
5305
5012 list_add_tail(&e->base.link, 5306 list_add_tail(&e->base.link,
5013 &e->base.file_priv->event_list); 5307 &e->base.file_priv->event_list);
5014 wake_up_interruptible(&e->base.file_priv->event_wait); 5308 wake_up_interruptible(&e->base.file_priv->event_wait);
5015 } 5309 }
5016 5310
5311 drm_vblank_put(dev, intel_crtc->pipe);
5312
5017 spin_unlock_irqrestore(&dev->event_lock, flags); 5313 spin_unlock_irqrestore(&dev->event_lock, flags);
5018 5314
5019 obj_priv = to_intel_bo(work->old_fb_obj); 5315 obj = work->old_fb_obj;
5316
5020 atomic_clear_mask(1 << intel_crtc->plane, 5317 atomic_clear_mask(1 << intel_crtc->plane,
5021 &obj_priv->pending_flip.counter); 5318 &obj->pending_flip.counter);
5022 if (atomic_read(&obj_priv->pending_flip) == 0) 5319 if (atomic_read(&obj->pending_flip) == 0)
5023 wake_up(&dev_priv->pending_flip_queue); 5320 wake_up(&dev_priv->pending_flip_queue);
5321
5024 schedule_work(&work->work); 5322 schedule_work(&work->work);
5025 5323
5026 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); 5324 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5066,8 +5364,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5066 struct drm_device *dev = crtc->dev; 5364 struct drm_device *dev = crtc->dev;
5067 struct drm_i915_private *dev_priv = dev->dev_private; 5365 struct drm_i915_private *dev_priv = dev->dev_private;
5068 struct intel_framebuffer *intel_fb; 5366 struct intel_framebuffer *intel_fb;
5069 struct drm_i915_gem_object *obj_priv; 5367 struct drm_i915_gem_object *obj;
5070 struct drm_gem_object *obj;
5071 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5368 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5072 struct intel_unpin_work *work; 5369 struct intel_unpin_work *work;
5073 unsigned long flags, offset; 5370 unsigned long flags, offset;
@@ -5101,13 +5398,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5101 obj = intel_fb->obj; 5398 obj = intel_fb->obj;
5102 5399
5103 mutex_lock(&dev->struct_mutex); 5400 mutex_lock(&dev->struct_mutex);
5104 ret = intel_pin_and_fence_fb_obj(dev, obj, true); 5401 ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
5105 if (ret) 5402 if (ret)
5106 goto cleanup_work; 5403 goto cleanup_work;
5107 5404
5108 /* Reference the objects for the scheduled work. */ 5405 /* Reference the objects for the scheduled work. */
5109 drm_gem_object_reference(work->old_fb_obj); 5406 drm_gem_object_reference(&work->old_fb_obj->base);
5110 drm_gem_object_reference(obj); 5407 drm_gem_object_reference(&obj->base);
5111 5408
5112 crtc->fb = fb; 5409 crtc->fb = fb;
5113 5410
@@ -5115,22 +5412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5115 if (ret) 5412 if (ret)
5116 goto cleanup_objs; 5413 goto cleanup_objs;
5117 5414
5118 /* Block clients from rendering to the new back buffer until
5119 * the flip occurs and the object is no longer visible.
5120 */
5121 atomic_add(1 << intel_crtc->plane,
5122 &to_intel_bo(work->old_fb_obj)->pending_flip);
5123
5124 work->pending_flip_obj = obj;
5125 obj_priv = to_intel_bo(obj);
5126
5127 if (IS_GEN3(dev) || IS_GEN2(dev)) { 5415 if (IS_GEN3(dev) || IS_GEN2(dev)) {
5128 u32 flip_mask; 5416 u32 flip_mask;
5129 5417
5130 /* Can't queue multiple flips, so wait for the previous 5418 /* Can't queue multiple flips, so wait for the previous
5131 * one to finish before executing the next. 5419 * one to finish before executing the next.
5132 */ 5420 */
5133 BEGIN_LP_RING(2); 5421 ret = BEGIN_LP_RING(2);
5422 if (ret)
5423 goto cleanup_objs;
5424
5134 if (intel_crtc->plane) 5425 if (intel_crtc->plane)
5135 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; 5426 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
5136 else 5427 else
@@ -5140,18 +5431,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5140 ADVANCE_LP_RING(); 5431 ADVANCE_LP_RING();
5141 } 5432 }
5142 5433
5434 work->pending_flip_obj = obj;
5435
5143 work->enable_stall_check = true; 5436 work->enable_stall_check = true;
5144 5437
5145 /* Offset into the new buffer for cases of shared fbs between CRTCs */ 5438 /* Offset into the new buffer for cases of shared fbs between CRTCs */
5146 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; 5439 offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
5147 5440
5148 BEGIN_LP_RING(4); 5441 ret = BEGIN_LP_RING(4);
5149 switch(INTEL_INFO(dev)->gen) { 5442 if (ret)
5443 goto cleanup_objs;
5444
5445 /* Block clients from rendering to the new back buffer until
5446 * the flip occurs and the object is no longer visible.
5447 */
5448 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
5449
5450 switch (INTEL_INFO(dev)->gen) {
5150 case 2: 5451 case 2:
5151 OUT_RING(MI_DISPLAY_FLIP | 5452 OUT_RING(MI_DISPLAY_FLIP |
5152 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5453 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5153 OUT_RING(fb->pitch); 5454 OUT_RING(fb->pitch);
5154 OUT_RING(obj_priv->gtt_offset + offset); 5455 OUT_RING(obj->gtt_offset + offset);
5155 OUT_RING(MI_NOOP); 5456 OUT_RING(MI_NOOP);
5156 break; 5457 break;
5157 5458
@@ -5159,7 +5460,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5159 OUT_RING(MI_DISPLAY_FLIP_I915 | 5460 OUT_RING(MI_DISPLAY_FLIP_I915 |
5160 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5461 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5161 OUT_RING(fb->pitch); 5462 OUT_RING(fb->pitch);
5162 OUT_RING(obj_priv->gtt_offset + offset); 5463 OUT_RING(obj->gtt_offset + offset);
5163 OUT_RING(MI_NOOP); 5464 OUT_RING(MI_NOOP);
5164 break; 5465 break;
5165 5466
@@ -5172,7 +5473,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5172 OUT_RING(MI_DISPLAY_FLIP | 5473 OUT_RING(MI_DISPLAY_FLIP |
5173 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5474 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5174 OUT_RING(fb->pitch); 5475 OUT_RING(fb->pitch);
5175 OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode); 5476 OUT_RING(obj->gtt_offset | obj->tiling_mode);
5176 5477
5177 /* XXX Enabling the panel-fitter across page-flip is so far 5478 /* XXX Enabling the panel-fitter across page-flip is so far
5178 * untested on non-native modes, so ignore it for now. 5479 * untested on non-native modes, so ignore it for now.
@@ -5186,8 +5487,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5186 case 6: 5487 case 6:
5187 OUT_RING(MI_DISPLAY_FLIP | 5488 OUT_RING(MI_DISPLAY_FLIP |
5188 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); 5489 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
5189 OUT_RING(fb->pitch | obj_priv->tiling_mode); 5490 OUT_RING(fb->pitch | obj->tiling_mode);
5190 OUT_RING(obj_priv->gtt_offset); 5491 OUT_RING(obj->gtt_offset);
5191 5492
5192 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; 5493 pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
5193 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff; 5494 pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5203,8 +5504,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
5203 return 0; 5504 return 0;
5204 5505
5205cleanup_objs: 5506cleanup_objs:
5206 drm_gem_object_unreference(work->old_fb_obj); 5507 drm_gem_object_unreference(&work->old_fb_obj->base);
5207 drm_gem_object_unreference(obj); 5508 drm_gem_object_unreference(&obj->base);
5208cleanup_work: 5509cleanup_work:
5209 mutex_unlock(&dev->struct_mutex); 5510 mutex_unlock(&dev->struct_mutex);
5210 5511
@@ -5236,6 +5537,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
5236 .page_flip = intel_crtc_page_flip, 5537 .page_flip = intel_crtc_page_flip,
5237}; 5538};
5238 5539
5540static void intel_sanitize_modesetting(struct drm_device *dev,
5541 int pipe, int plane)
5542{
5543 struct drm_i915_private *dev_priv = dev->dev_private;
5544 u32 reg, val;
5545
5546 if (HAS_PCH_SPLIT(dev))
5547 return;
5548
5549 /* Who knows what state these registers were left in by the BIOS or
5550 * grub?
5551 *
5552 * If we leave the registers in a conflicting state (e.g. with the
5553 * display plane reading from the other pipe than the one we intend
5554 * to use) then when we attempt to teardown the active mode, we will
5555 * not disable the pipes and planes in the correct order -- leaving
5556 * a plane reading from a disabled pipe and possibly leading to
5557 * undefined behaviour.
5558 */
5559
5560 reg = DSPCNTR(plane);
5561 val = I915_READ(reg);
5562
5563 if ((val & DISPLAY_PLANE_ENABLE) == 0)
5564 return;
5565 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
5566 return;
5567
5568 /* This display plane is active and attached to the other CPU pipe. */
5569 pipe = !pipe;
5570
5571 /* Disable the plane and wait for it to stop reading from the pipe. */
5572 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
5573 intel_flush_display_plane(dev, plane);
5574
5575 if (IS_GEN2(dev))
5576 intel_wait_for_vblank(dev, pipe);
5577
5578 if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
5579 return;
5580
5581 /* Switch off the pipe. */
5582 reg = PIPECONF(pipe);
5583 val = I915_READ(reg);
5584 if (val & PIPECONF_ENABLE) {
5585 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
5586 intel_wait_for_pipe_off(dev, pipe);
5587 }
5588}
5239 5589
5240static void intel_crtc_init(struct drm_device *dev, int pipe) 5590static void intel_crtc_init(struct drm_device *dev, int pipe)
5241{ 5591{
@@ -5287,10 +5637,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5287 5637
5288 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, 5638 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
5289 (unsigned long)intel_crtc); 5639 (unsigned long)intel_crtc);
5640
5641 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
5290} 5642}
5291 5643
5292int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 5644int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
5293 struct drm_file *file_priv) 5645 struct drm_file *file)
5294{ 5646{
5295 drm_i915_private_t *dev_priv = dev->dev_private; 5647 drm_i915_private_t *dev_priv = dev->dev_private;
5296 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 5648 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5336,9 +5688,14 @@ static void intel_setup_outputs(struct drm_device *dev)
5336 struct drm_i915_private *dev_priv = dev->dev_private; 5688 struct drm_i915_private *dev_priv = dev->dev_private;
5337 struct intel_encoder *encoder; 5689 struct intel_encoder *encoder;
5338 bool dpd_is_edp = false; 5690 bool dpd_is_edp = false;
5691 bool has_lvds = false;
5339 5692
5340 if (IS_MOBILE(dev) && !IS_I830(dev)) 5693 if (IS_MOBILE(dev) && !IS_I830(dev))
5341 intel_lvds_init(dev); 5694 has_lvds = intel_lvds_init(dev);
5695 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
5696 /* disable the panel fitter on everything but LVDS */
5697 I915_WRITE(PFIT_CONTROL, 0);
5698 }
5342 5699
5343 if (HAS_PCH_SPLIT(dev)) { 5700 if (HAS_PCH_SPLIT(dev)) {
5344 dpd_is_edp = intel_dpd_is_edp(dev); 5701 dpd_is_edp = intel_dpd_is_edp(dev);
@@ -5435,19 +5792,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
5435 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 5792 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
5436 5793
5437 drm_framebuffer_cleanup(fb); 5794 drm_framebuffer_cleanup(fb);
5438 drm_gem_object_unreference_unlocked(intel_fb->obj); 5795 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
5439 5796
5440 kfree(intel_fb); 5797 kfree(intel_fb);
5441} 5798}
5442 5799
5443static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, 5800static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
5444 struct drm_file *file_priv, 5801 struct drm_file *file,
5445 unsigned int *handle) 5802 unsigned int *handle)
5446{ 5803{
5447 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); 5804 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
5448 struct drm_gem_object *object = intel_fb->obj; 5805 struct drm_i915_gem_object *obj = intel_fb->obj;
5449 5806
5450 return drm_gem_handle_create(file_priv, object, handle); 5807 return drm_gem_handle_create(file, &obj->base, handle);
5451} 5808}
5452 5809
5453static const struct drm_framebuffer_funcs intel_fb_funcs = { 5810static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5458,12 +5815,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
5458int intel_framebuffer_init(struct drm_device *dev, 5815int intel_framebuffer_init(struct drm_device *dev,
5459 struct intel_framebuffer *intel_fb, 5816 struct intel_framebuffer *intel_fb,
5460 struct drm_mode_fb_cmd *mode_cmd, 5817 struct drm_mode_fb_cmd *mode_cmd,
5461 struct drm_gem_object *obj) 5818 struct drm_i915_gem_object *obj)
5462{ 5819{
5463 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
5464 int ret; 5820 int ret;
5465 5821
5466 if (obj_priv->tiling_mode == I915_TILING_Y) 5822 if (obj->tiling_mode == I915_TILING_Y)
5467 return -EINVAL; 5823 return -EINVAL;
5468 5824
5469 if (mode_cmd->pitch & 63) 5825 if (mode_cmd->pitch & 63)
@@ -5495,11 +5851,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
5495 struct drm_file *filp, 5851 struct drm_file *filp,
5496 struct drm_mode_fb_cmd *mode_cmd) 5852 struct drm_mode_fb_cmd *mode_cmd)
5497{ 5853{
5498 struct drm_gem_object *obj; 5854 struct drm_i915_gem_object *obj;
5499 struct intel_framebuffer *intel_fb; 5855 struct intel_framebuffer *intel_fb;
5500 int ret; 5856 int ret;
5501 5857
5502 obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle); 5858 obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
5503 if (!obj) 5859 if (!obj)
5504 return ERR_PTR(-ENOENT); 5860 return ERR_PTR(-ENOENT);
5505 5861
@@ -5507,10 +5863,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
5507 if (!intel_fb) 5863 if (!intel_fb)
5508 return ERR_PTR(-ENOMEM); 5864 return ERR_PTR(-ENOMEM);
5509 5865
5510 ret = intel_framebuffer_init(dev, intel_fb, 5866 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5511 mode_cmd, obj);
5512 if (ret) { 5867 if (ret) {
5513 drm_gem_object_unreference_unlocked(obj); 5868 drm_gem_object_unreference_unlocked(&obj->base);
5514 kfree(intel_fb); 5869 kfree(intel_fb);
5515 return ERR_PTR(ret); 5870 return ERR_PTR(ret);
5516 } 5871 }
@@ -5523,10 +5878,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
5523 .output_poll_changed = intel_fb_output_poll_changed, 5878 .output_poll_changed = intel_fb_output_poll_changed,
5524}; 5879};
5525 5880
5526static struct drm_gem_object * 5881static struct drm_i915_gem_object *
5527intel_alloc_context_page(struct drm_device *dev) 5882intel_alloc_context_page(struct drm_device *dev)
5528{ 5883{
5529 struct drm_gem_object *ctx; 5884 struct drm_i915_gem_object *ctx;
5530 int ret; 5885 int ret;
5531 5886
5532 ctx = i915_gem_alloc_object(dev, 4096); 5887 ctx = i915_gem_alloc_object(dev, 4096);
@@ -5536,7 +5891,7 @@ intel_alloc_context_page(struct drm_device *dev)
5536 } 5891 }
5537 5892
5538 mutex_lock(&dev->struct_mutex); 5893 mutex_lock(&dev->struct_mutex);
5539 ret = i915_gem_object_pin(ctx, 4096); 5894 ret = i915_gem_object_pin(ctx, 4096, true);
5540 if (ret) { 5895 if (ret) {
5541 DRM_ERROR("failed to pin power context: %d\n", ret); 5896 DRM_ERROR("failed to pin power context: %d\n", ret);
5542 goto err_unref; 5897 goto err_unref;
@@ -5554,7 +5909,7 @@ intel_alloc_context_page(struct drm_device *dev)
5554err_unpin: 5909err_unpin:
5555 i915_gem_object_unpin(ctx); 5910 i915_gem_object_unpin(ctx);
5556err_unref: 5911err_unref:
5557 drm_gem_object_unreference(ctx); 5912 drm_gem_object_unreference(&ctx->base);
5558 mutex_unlock(&dev->struct_mutex); 5913 mutex_unlock(&dev->struct_mutex);
5559 return NULL; 5914 return NULL;
5560} 5915}
@@ -5666,6 +6021,25 @@ void ironlake_disable_drps(struct drm_device *dev)
5666 6021
5667} 6022}
5668 6023
6024void gen6_set_rps(struct drm_device *dev, u8 val)
6025{
6026 struct drm_i915_private *dev_priv = dev->dev_private;
6027 u32 swreq;
6028
6029 swreq = (val & 0x3ff) << 25;
6030 I915_WRITE(GEN6_RPNSWREQ, swreq);
6031}
6032
6033void gen6_disable_rps(struct drm_device *dev)
6034{
6035 struct drm_i915_private *dev_priv = dev->dev_private;
6036
6037 I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
6038 I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
6039 I915_WRITE(GEN6_PMIER, 0);
6040 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
6041}
6042
5669static unsigned long intel_pxfreq(u32 vidfreq) 6043static unsigned long intel_pxfreq(u32 vidfreq)
5670{ 6044{
5671 unsigned long freq; 6045 unsigned long freq;
@@ -5752,7 +6126,96 @@ void intel_init_emon(struct drm_device *dev)
5752 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); 6126 dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
5753} 6127}
5754 6128
5755void intel_init_clock_gating(struct drm_device *dev) 6129void gen6_enable_rps(struct drm_i915_private *dev_priv)
6130{
6131 int i;
6132
6133 /* Here begins a magic sequence of register writes to enable
6134 * auto-downclocking.
6135 *
6136 * Perhaps there might be some value in exposing these to
6137 * userspace...
6138 */
6139 I915_WRITE(GEN6_RC_STATE, 0);
6140 __gen6_force_wake_get(dev_priv);
6141
6142 /* disable the counters and set deterministic thresholds */
6143 I915_WRITE(GEN6_RC_CONTROL, 0);
6144
6145 I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
6146 I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
6147 I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
6148 I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
6149 I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
6150
6151 for (i = 0; i < I915_NUM_RINGS; i++)
6152 I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
6153
6154 I915_WRITE(GEN6_RC_SLEEP, 0);
6155 I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
6156 I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
6157 I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
6158 I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
6159
6160 I915_WRITE(GEN6_RC_CONTROL,
6161 GEN6_RC_CTL_RC6p_ENABLE |
6162 GEN6_RC_CTL_RC6_ENABLE |
6163 GEN6_RC_CTL_EI_MODE(1) |
6164 GEN6_RC_CTL_HW_ENABLE);
6165
6166 I915_WRITE(GEN6_RPNSWREQ,
6167 GEN6_FREQUENCY(10) |
6168 GEN6_OFFSET(0) |
6169 GEN6_AGGRESSIVE_TURBO);
6170 I915_WRITE(GEN6_RC_VIDEO_FREQ,
6171 GEN6_FREQUENCY(12));
6172
6173 I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
6174 I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
6175 18 << 24 |
6176 6 << 16);
6177 I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
6178 I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
6179 I915_WRITE(GEN6_RP_UP_EI, 100000);
6180 I915_WRITE(GEN6_RP_DOWN_EI, 300000);
6181 I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
6182 I915_WRITE(GEN6_RP_CONTROL,
6183 GEN6_RP_MEDIA_TURBO |
6184 GEN6_RP_USE_NORMAL_FREQ |
6185 GEN6_RP_MEDIA_IS_GFX |
6186 GEN6_RP_ENABLE |
6187 GEN6_RP_UP_BUSY_MAX |
6188 GEN6_RP_DOWN_BUSY_MIN);
6189
6190 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6191 500))
6192 DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
6193
6194 I915_WRITE(GEN6_PCODE_DATA, 0);
6195 I915_WRITE(GEN6_PCODE_MAILBOX,
6196 GEN6_PCODE_READY |
6197 GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
6198 if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
6199 500))
6200 DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
6201
6202 /* requires MSI enabled */
6203 I915_WRITE(GEN6_PMIER,
6204 GEN6_PM_MBOX_EVENT |
6205 GEN6_PM_THERMAL_EVENT |
6206 GEN6_PM_RP_DOWN_TIMEOUT |
6207 GEN6_PM_RP_UP_THRESHOLD |
6208 GEN6_PM_RP_DOWN_THRESHOLD |
6209 GEN6_PM_RP_UP_EI_EXPIRED |
6210 GEN6_PM_RP_DOWN_EI_EXPIRED);
6211 I915_WRITE(GEN6_PMIMR, 0);
6212 /* enable all PM interrupts */
6213 I915_WRITE(GEN6_PMINTRMSK, 0);
6214
6215 __gen6_force_wake_put(dev_priv);
6216}
6217
6218void intel_enable_clock_gating(struct drm_device *dev)
5756{ 6219{
5757 struct drm_i915_private *dev_priv = dev->dev_private; 6220 struct drm_i915_private *dev_priv = dev->dev_private;
5758 6221
@@ -5800,9 +6263,9 @@ void intel_init_clock_gating(struct drm_device *dev)
5800 I915_WRITE(DISP_ARB_CTL, 6263 I915_WRITE(DISP_ARB_CTL,
5801 (I915_READ(DISP_ARB_CTL) | 6264 (I915_READ(DISP_ARB_CTL) |
5802 DISP_FBC_WM_DIS)); 6265 DISP_FBC_WM_DIS));
5803 I915_WRITE(WM3_LP_ILK, 0); 6266 I915_WRITE(WM3_LP_ILK, 0);
5804 I915_WRITE(WM2_LP_ILK, 0); 6267 I915_WRITE(WM2_LP_ILK, 0);
5805 I915_WRITE(WM1_LP_ILK, 0); 6268 I915_WRITE(WM1_LP_ILK, 0);
5806 } 6269 }
5807 /* 6270 /*
5808 * Based on the document from hardware guys the following bits 6271 * Based on the document from hardware guys the following bits
@@ -5824,7 +6287,49 @@ void intel_init_clock_gating(struct drm_device *dev)
5824 ILK_DPFC_DIS2 | 6287 ILK_DPFC_DIS2 |
5825 ILK_CLK_FBC); 6288 ILK_CLK_FBC);
5826 } 6289 }
5827 return; 6290
6291 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6292 I915_READ(ILK_DISPLAY_CHICKEN2) |
6293 ILK_ELPIN_409_SELECT);
6294
6295 if (IS_GEN5(dev)) {
6296 I915_WRITE(_3D_CHICKEN2,
6297 _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
6298 _3D_CHICKEN2_WM_READ_PIPELINED);
6299 }
6300
6301 if (IS_GEN6(dev)) {
6302 I915_WRITE(WM3_LP_ILK, 0);
6303 I915_WRITE(WM2_LP_ILK, 0);
6304 I915_WRITE(WM1_LP_ILK, 0);
6305
6306 /*
6307 * According to the spec the following bits should be
6308 * set in order to enable memory self-refresh and fbc:
6309 * The bit21 and bit22 of 0x42000
6310 * The bit21 and bit22 of 0x42004
6311 * The bit5 and bit7 of 0x42020
6312 * The bit14 of 0x70180
6313 * The bit14 of 0x71180
6314 */
6315 I915_WRITE(ILK_DISPLAY_CHICKEN1,
6316 I915_READ(ILK_DISPLAY_CHICKEN1) |
6317 ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
6318 I915_WRITE(ILK_DISPLAY_CHICKEN2,
6319 I915_READ(ILK_DISPLAY_CHICKEN2) |
6320 ILK_DPARB_GATE | ILK_VSDPFD_FULL);
6321 I915_WRITE(ILK_DSPCLK_GATE,
6322 I915_READ(ILK_DSPCLK_GATE) |
6323 ILK_DPARB_CLK_GATE |
6324 ILK_DPFD_CLK_GATE);
6325
6326 I915_WRITE(DSPACNTR,
6327 I915_READ(DSPACNTR) |
6328 DISPPLANE_TRICKLE_FEED_DISABLE);
6329 I915_WRITE(DSPBCNTR,
6330 I915_READ(DSPBCNTR) |
6331 DISPPLANE_TRICKLE_FEED_DISABLE);
6332 }
5828 } else if (IS_G4X(dev)) { 6333 } else if (IS_G4X(dev)) {
5829 uint32_t dspclk_gate; 6334 uint32_t dspclk_gate;
5830 I915_WRITE(RENCLK_GATE_D1, 0); 6335 I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5867,20 +6372,18 @@ void intel_init_clock_gating(struct drm_device *dev)
5867 * GPU can automatically power down the render unit if given a page 6372 * GPU can automatically power down the render unit if given a page
5868 * to save state. 6373 * to save state.
5869 */ 6374 */
5870 if (IS_IRONLAKE_M(dev)) { 6375 if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
5871 if (dev_priv->renderctx == NULL) 6376 if (dev_priv->renderctx == NULL)
5872 dev_priv->renderctx = intel_alloc_context_page(dev); 6377 dev_priv->renderctx = intel_alloc_context_page(dev);
5873 if (dev_priv->renderctx) { 6378 if (dev_priv->renderctx) {
5874 struct drm_i915_gem_object *obj_priv; 6379 struct drm_i915_gem_object *obj = dev_priv->renderctx;
5875 obj_priv = to_intel_bo(dev_priv->renderctx); 6380 if (BEGIN_LP_RING(4) == 0) {
5876 if (obj_priv) {
5877 BEGIN_LP_RING(4);
5878 OUT_RING(MI_SET_CONTEXT); 6381 OUT_RING(MI_SET_CONTEXT);
5879 OUT_RING(obj_priv->gtt_offset | 6382 OUT_RING(obj->gtt_offset |
5880 MI_MM_SPACE_GTT | 6383 MI_MM_SPACE_GTT |
5881 MI_SAVE_EXT_STATE_EN | 6384 MI_SAVE_EXT_STATE_EN |
5882 MI_RESTORE_EXT_STATE_EN | 6385 MI_RESTORE_EXT_STATE_EN |
5883 MI_RESTORE_INHIBIT); 6386 MI_RESTORE_INHIBIT);
5884 OUT_RING(MI_NOOP); 6387 OUT_RING(MI_NOOP);
5885 OUT_RING(MI_FLUSH); 6388 OUT_RING(MI_FLUSH);
5886 ADVANCE_LP_RING(); 6389 ADVANCE_LP_RING();
@@ -5890,29 +6393,45 @@ void intel_init_clock_gating(struct drm_device *dev)
5890 "Disable RC6\n"); 6393 "Disable RC6\n");
5891 } 6394 }
5892 6395
5893 if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 6396 if (IS_GEN4(dev) && IS_MOBILE(dev)) {
5894 struct drm_i915_gem_object *obj_priv = NULL; 6397 if (dev_priv->pwrctx == NULL)
5895 6398 dev_priv->pwrctx = intel_alloc_context_page(dev);
5896 if (dev_priv->pwrctx) { 6399 if (dev_priv->pwrctx) {
5897 obj_priv = to_intel_bo(dev_priv->pwrctx); 6400 struct drm_i915_gem_object *obj = dev_priv->pwrctx;
5898 } else { 6401 I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
5899 struct drm_gem_object *pwrctx;
5900
5901 pwrctx = intel_alloc_context_page(dev);
5902 if (pwrctx) {
5903 dev_priv->pwrctx = pwrctx;
5904 obj_priv = to_intel_bo(pwrctx);
5905 }
5906 }
5907
5908 if (obj_priv) {
5909 I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
5910 I915_WRITE(MCHBAR_RENDER_STANDBY, 6402 I915_WRITE(MCHBAR_RENDER_STANDBY,
5911 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 6403 I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
5912 } 6404 }
5913 } 6405 }
5914} 6406}
5915 6407
6408void intel_disable_clock_gating(struct drm_device *dev)
6409{
6410 struct drm_i915_private *dev_priv = dev->dev_private;
6411
6412 if (dev_priv->renderctx) {
6413 struct drm_i915_gem_object *obj = dev_priv->renderctx;
6414
6415 I915_WRITE(CCID, 0);
6416 POSTING_READ(CCID);
6417
6418 i915_gem_object_unpin(obj);
6419 drm_gem_object_unreference(&obj->base);
6420 dev_priv->renderctx = NULL;
6421 }
6422
6423 if (dev_priv->pwrctx) {
6424 struct drm_i915_gem_object *obj = dev_priv->pwrctx;
6425
6426 I915_WRITE(PWRCTXA, 0);
6427 POSTING_READ(PWRCTXA);
6428
6429 i915_gem_object_unpin(obj);
6430 drm_gem_object_unreference(&obj->base);
6431 dev_priv->pwrctx = NULL;
6432 }
6433}
6434
5916/* Set up chip specific display functions */ 6435/* Set up chip specific display functions */
5917static void intel_init_display(struct drm_device *dev) 6436static void intel_init_display(struct drm_device *dev)
5918{ 6437{
@@ -5925,7 +6444,7 @@ static void intel_init_display(struct drm_device *dev)
5925 dev_priv->display.dpms = i9xx_crtc_dpms; 6444 dev_priv->display.dpms = i9xx_crtc_dpms;
5926 6445
5927 if (I915_HAS_FBC(dev)) { 6446 if (I915_HAS_FBC(dev)) {
5928 if (IS_IRONLAKE_M(dev)) { 6447 if (HAS_PCH_SPLIT(dev)) {
5929 dev_priv->display.fbc_enabled = ironlake_fbc_enabled; 6448 dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
5930 dev_priv->display.enable_fbc = ironlake_enable_fbc; 6449 dev_priv->display.enable_fbc = ironlake_enable_fbc;
5931 dev_priv->display.disable_fbc = ironlake_disable_fbc; 6450 dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -5974,6 +6493,14 @@ static void intel_init_display(struct drm_device *dev)
5974 "Disable CxSR\n"); 6493 "Disable CxSR\n");
5975 dev_priv->display.update_wm = NULL; 6494 dev_priv->display.update_wm = NULL;
5976 } 6495 }
6496 } else if (IS_GEN6(dev)) {
6497 if (SNB_READ_WM0_LATENCY()) {
6498 dev_priv->display.update_wm = sandybridge_update_wm;
6499 } else {
6500 DRM_DEBUG_KMS("Failed to read display plane latency. "
6501 "Disable CxSR\n");
6502 dev_priv->display.update_wm = NULL;
6503 }
5977 } else 6504 } else
5978 dev_priv->display.update_wm = NULL; 6505 dev_priv->display.update_wm = NULL;
5979 } else if (IS_PINEVIEW(dev)) { 6506 } else if (IS_PINEVIEW(dev)) {
@@ -6139,7 +6666,7 @@ void intel_modeset_init(struct drm_device *dev)
6139 6666
6140 intel_setup_outputs(dev); 6667 intel_setup_outputs(dev);
6141 6668
6142 intel_init_clock_gating(dev); 6669 intel_enable_clock_gating(dev);
6143 6670
6144 /* Just disable it once at startup */ 6671 /* Just disable it once at startup */
6145 i915_disable_vga(dev); 6672 i915_disable_vga(dev);
@@ -6149,6 +6676,9 @@ void intel_modeset_init(struct drm_device *dev)
6149 intel_init_emon(dev); 6676 intel_init_emon(dev);
6150 } 6677 }
6151 6678
6679 if (IS_GEN6(dev))
6680 gen6_enable_rps(dev_priv);
6681
6152 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6682 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6153 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6683 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6154 (unsigned long)dev); 6684 (unsigned long)dev);
@@ -6180,28 +6710,12 @@ void intel_modeset_cleanup(struct drm_device *dev)
6180 if (dev_priv->display.disable_fbc) 6710 if (dev_priv->display.disable_fbc)
6181 dev_priv->display.disable_fbc(dev); 6711 dev_priv->display.disable_fbc(dev);
6182 6712
6183 if (dev_priv->renderctx) {
6184 struct drm_i915_gem_object *obj_priv;
6185
6186 obj_priv = to_intel_bo(dev_priv->renderctx);
6187 I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
6188 I915_READ(CCID);
6189 i915_gem_object_unpin(dev_priv->renderctx);
6190 drm_gem_object_unreference(dev_priv->renderctx);
6191 }
6192
6193 if (dev_priv->pwrctx) {
6194 struct drm_i915_gem_object *obj_priv;
6195
6196 obj_priv = to_intel_bo(dev_priv->pwrctx);
6197 I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
6198 I915_READ(PWRCTXA);
6199 i915_gem_object_unpin(dev_priv->pwrctx);
6200 drm_gem_object_unreference(dev_priv->pwrctx);
6201 }
6202
6203 if (IS_IRONLAKE_M(dev)) 6713 if (IS_IRONLAKE_M(dev))
6204 ironlake_disable_drps(dev); 6714 ironlake_disable_drps(dev);
6715 if (IS_GEN6(dev))
6716 gen6_disable_rps(dev);
6717
6718 intel_disable_clock_gating(dev);
6205 6719
6206 mutex_unlock(&dev->struct_mutex); 6720 mutex_unlock(&dev->struct_mutex);
6207 6721
@@ -6253,3 +6767,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
6253 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); 6767 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
6254 return 0; 6768 return 0;
6255} 6769}
6770
6771#ifdef CONFIG_DEBUG_FS
6772#include <linux/seq_file.h>
6773
6774struct intel_display_error_state {
6775 struct intel_cursor_error_state {
6776 u32 control;
6777 u32 position;
6778 u32 base;
6779 u32 size;
6780 } cursor[2];
6781
6782 struct intel_pipe_error_state {
6783 u32 conf;
6784 u32 source;
6785
6786 u32 htotal;
6787 u32 hblank;
6788 u32 hsync;
6789 u32 vtotal;
6790 u32 vblank;
6791 u32 vsync;
6792 } pipe[2];
6793
6794 struct intel_plane_error_state {
6795 u32 control;
6796 u32 stride;
6797 u32 size;
6798 u32 pos;
6799 u32 addr;
6800 u32 surface;
6801 u32 tile_offset;
6802 } plane[2];
6803};
6804
6805struct intel_display_error_state *
6806intel_display_capture_error_state(struct drm_device *dev)
6807{
6808 drm_i915_private_t *dev_priv = dev->dev_private;
6809 struct intel_display_error_state *error;
6810 int i;
6811
6812 error = kmalloc(sizeof(*error), GFP_ATOMIC);
6813 if (error == NULL)
6814 return NULL;
6815
6816 for (i = 0; i < 2; i++) {
6817 error->cursor[i].control = I915_READ(CURCNTR(i));
6818 error->cursor[i].position = I915_READ(CURPOS(i));
6819 error->cursor[i].base = I915_READ(CURBASE(i));
6820
6821 error->plane[i].control = I915_READ(DSPCNTR(i));
6822 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
6823 error->plane[i].size = I915_READ(DSPSIZE(i));
6824 error->plane[i].pos= I915_READ(DSPPOS(i));
6825 error->plane[i].addr = I915_READ(DSPADDR(i));
6826 if (INTEL_INFO(dev)->gen >= 4) {
6827 error->plane[i].surface = I915_READ(DSPSURF(i));
6828 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
6829 }
6830
6831 error->pipe[i].conf = I915_READ(PIPECONF(i));
6832 error->pipe[i].source = I915_READ(PIPESRC(i));
6833 error->pipe[i].htotal = I915_READ(HTOTAL(i));
6834 error->pipe[i].hblank = I915_READ(HBLANK(i));
6835 error->pipe[i].hsync = I915_READ(HSYNC(i));
6836 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
6837 error->pipe[i].vblank = I915_READ(VBLANK(i));
6838 error->pipe[i].vsync = I915_READ(VSYNC(i));
6839 }
6840
6841 return error;
6842}
6843
6844void
6845intel_display_print_error_state(struct seq_file *m,
6846 struct drm_device *dev,
6847 struct intel_display_error_state *error)
6848{
6849 int i;
6850
6851 for (i = 0; i < 2; i++) {
6852 seq_printf(m, "Pipe [%d]:\n", i);
6853 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
6854 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
6855 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
6856 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
6857 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
6858 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
6859 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
6860 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
6861
6862 seq_printf(m, "Plane [%d]:\n", i);
6863 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
6864 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
6865 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
6866 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
6867 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
6868 if (INTEL_INFO(dev)->gen >= 4) {
6869 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
6870 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
6871 }
6872
6873 seq_printf(m, "Cursor [%d]:\n", i);
6874 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
6875 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
6876 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
6877 }
6878}
6879#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c8e005553310..1dc60408d5b8 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
479 uint16_t address = algo_data->address; 479 uint16_t address = algo_data->address;
480 uint8_t msg[5]; 480 uint8_t msg[5];
481 uint8_t reply[2]; 481 uint8_t reply[2];
482 unsigned retry;
482 int msg_bytes; 483 int msg_bytes;
483 int reply_bytes; 484 int reply_bytes;
484 int ret; 485 int ret;
@@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
513 break; 514 break;
514 } 515 }
515 516
516 for (;;) { 517 for (retry = 0; retry < 5; retry++) {
517 ret = intel_dp_aux_ch(intel_dp, 518 ret = intel_dp_aux_ch(intel_dp,
518 msg, msg_bytes, 519 msg, msg_bytes,
519 reply, reply_bytes); 520 reply, reply_bytes);
520 if (ret < 0) { 521 if (ret < 0) {
521 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 522 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
522 return ret; 523 return ret;
523 } 524 }
525
526 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
527 case AUX_NATIVE_REPLY_ACK:
528 /* I2C-over-AUX Reply field is only valid
529 * when paired with AUX ACK.
530 */
531 break;
532 case AUX_NATIVE_REPLY_NACK:
533 DRM_DEBUG_KMS("aux_ch native nack\n");
534 return -EREMOTEIO;
535 case AUX_NATIVE_REPLY_DEFER:
536 udelay(100);
537 continue;
538 default:
539 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
540 reply[0]);
541 return -EREMOTEIO;
542 }
543
524 switch (reply[0] & AUX_I2C_REPLY_MASK) { 544 switch (reply[0] & AUX_I2C_REPLY_MASK) {
525 case AUX_I2C_REPLY_ACK: 545 case AUX_I2C_REPLY_ACK:
526 if (mode == MODE_I2C_READ) { 546 if (mode == MODE_I2C_READ) {
@@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
528 } 548 }
529 return reply_bytes - 1; 549 return reply_bytes - 1;
530 case AUX_I2C_REPLY_NACK: 550 case AUX_I2C_REPLY_NACK:
531 DRM_DEBUG_KMS("aux_ch nack\n"); 551 DRM_DEBUG_KMS("aux_i2c nack\n");
532 return -EREMOTEIO; 552 return -EREMOTEIO;
533 case AUX_I2C_REPLY_DEFER: 553 case AUX_I2C_REPLY_DEFER:
534 DRM_DEBUG_KMS("aux_ch defer\n"); 554 DRM_DEBUG_KMS("aux_i2c defer\n");
535 udelay(100); 555 udelay(100);
536 break; 556 break;
537 default: 557 default:
538 DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); 558 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
539 return -EREMOTEIO; 559 return -EREMOTEIO;
540 } 560 }
541 } 561 }
562
563 DRM_ERROR("too many retries, giving up\n");
564 return -EREMOTEIO;
542} 565}
543 566
544static int 567static int
@@ -584,17 +607,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
584 mode->clock = dev_priv->panel_fixed_mode->clock; 607 mode->clock = dev_priv->panel_fixed_mode->clock;
585 } 608 }
586 609
587 /* Just use VBT values for eDP */
588 if (is_edp(intel_dp)) {
589 intel_dp->lane_count = dev_priv->edp.lanes;
590 intel_dp->link_bw = dev_priv->edp.rate;
591 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
592 DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
593 intel_dp->link_bw, intel_dp->lane_count,
594 adjusted_mode->clock);
595 return true;
596 }
597
598 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 610 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
599 for (clock = 0; clock <= max_clock; clock++) { 611 for (clock = 0; clock <= max_clock; clock++) {
600 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 612 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -613,6 +625,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
613 } 625 }
614 } 626 }
615 627
628 if (is_edp(intel_dp)) {
629 /* okay we failed just pick the highest */
630 intel_dp->lane_count = max_lane_count;
631 intel_dp->link_bw = bws[max_clock];
632 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
633 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
634 "count %d clock %d\n",
635 intel_dp->link_bw, intel_dp->lane_count,
636 adjusted_mode->clock);
637
638 return true;
639 }
640
616 return false; 641 return false;
617} 642}
618 643
@@ -1087,21 +1112,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1087} 1112}
1088 1113
1089static uint32_t 1114static uint32_t
1090intel_dp_signal_levels(struct intel_dp *intel_dp) 1115intel_dp_signal_levels(uint8_t train_set, int lane_count)
1091{ 1116{
1092 struct drm_device *dev = intel_dp->base.base.dev; 1117 uint32_t signal_levels = 0;
1093 struct drm_i915_private *dev_priv = dev->dev_private;
1094 uint32_t signal_levels = 0;
1095 u8 train_set = intel_dp->train_set[0];
1096 u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
1097 u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
1098 1118
1099 if (is_edp(intel_dp)) { 1119 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1100 vswing = dev_priv->edp.vswing;
1101 preemphasis = dev_priv->edp.preemphasis;
1102 }
1103
1104 switch (vswing) {
1105 case DP_TRAIN_VOLTAGE_SWING_400: 1120 case DP_TRAIN_VOLTAGE_SWING_400:
1106 default: 1121 default:
1107 signal_levels |= DP_VOLTAGE_0_4; 1122 signal_levels |= DP_VOLTAGE_0_4;
@@ -1116,7 +1131,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp)
1116 signal_levels |= DP_VOLTAGE_1_2; 1131 signal_levels |= DP_VOLTAGE_1_2;
1117 break; 1132 break;
1118 } 1133 }
1119 switch (preemphasis) { 1134 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1120 case DP_TRAIN_PRE_EMPHASIS_0: 1135 case DP_TRAIN_PRE_EMPHASIS_0:
1121 default: 1136 default:
1122 signal_levels |= DP_PRE_EMPHASIS_0; 1137 signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1203,18 +1218,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
1203} 1218}
1204 1219
1205static bool 1220static bool
1206intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
1207{
1208 struct drm_device *dev = intel_dp->base.base.dev;
1209 struct drm_i915_private *dev_priv = dev->dev_private;
1210
1211 if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
1212 return false;
1213
1214 return true;
1215}
1216
1217static bool
1218intel_dp_set_link_train(struct intel_dp *intel_dp, 1221intel_dp_set_link_train(struct intel_dp *intel_dp,
1219 uint32_t dp_reg_value, 1222 uint32_t dp_reg_value,
1220 uint8_t dp_train_pat) 1223 uint8_t dp_train_pat)
@@ -1226,9 +1229,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1226 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1229 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1227 POSTING_READ(intel_dp->output_reg); 1230 POSTING_READ(intel_dp->output_reg);
1228 1231
1229 if (!intel_dp_aux_handshake_required(intel_dp))
1230 return true;
1231
1232 intel_dp_aux_native_write_1(intel_dp, 1232 intel_dp_aux_native_write_1(intel_dp,
1233 DP_TRAINING_PATTERN_SET, 1233 DP_TRAINING_PATTERN_SET,
1234 dp_train_pat); 1234 dp_train_pat);
@@ -1261,11 +1261,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1261 POSTING_READ(intel_dp->output_reg); 1261 POSTING_READ(intel_dp->output_reg);
1262 intel_wait_for_vblank(dev, intel_crtc->pipe); 1262 intel_wait_for_vblank(dev, intel_crtc->pipe);
1263 1263
1264 if (intel_dp_aux_handshake_required(intel_dp)) 1264 /* Write the link configuration data */
1265 /* Write the link configuration data */ 1265 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1266 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1266 intel_dp->link_configuration,
1267 intel_dp->link_configuration, 1267 DP_LINK_CONFIGURATION_SIZE);
1268 DP_LINK_CONFIGURATION_SIZE);
1269 1268
1270 DP |= DP_PORT_EN; 1269 DP |= DP_PORT_EN;
1271 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) 1270 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
@@ -1283,7 +1282,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1283 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1282 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1284 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1283 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1285 } else { 1284 } else {
1286 signal_levels = intel_dp_signal_levels(intel_dp); 1285 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1287 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1286 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1288 } 1287 }
1289 1288
@@ -1297,37 +1296,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1297 break; 1296 break;
1298 /* Set training pattern 1 */ 1297 /* Set training pattern 1 */
1299 1298
1300 udelay(500); 1299 udelay(100);
1301 if (intel_dp_aux_handshake_required(intel_dp)) { 1300 if (!intel_dp_get_link_status(intel_dp))
1302 break; 1301 break;
1303 } else {
1304 if (!intel_dp_get_link_status(intel_dp))
1305 break;
1306 1302
1307 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1303 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1308 clock_recovery = true; 1304 clock_recovery = true;
1309 break; 1305 break;
1310 } 1306 }
1311 1307
1312 /* Check to see if we've tried the max voltage */ 1308 /* Check to see if we've tried the max voltage */
1313 for (i = 0; i < intel_dp->lane_count; i++) 1309 for (i = 0; i < intel_dp->lane_count; i++)
1314 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1310 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1315 break;
1316 if (i == intel_dp->lane_count)
1317 break; 1311 break;
1312 if (i == intel_dp->lane_count)
1313 break;
1318 1314
1319 /* Check to see if we've tried the same voltage 5 times */ 1315 /* Check to see if we've tried the same voltage 5 times */
1320 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1316 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1321 ++tries; 1317 ++tries;
1322 if (tries == 5) 1318 if (tries == 5)
1323 break; 1319 break;
1324 } else 1320 } else
1325 tries = 0; 1321 tries = 0;
1326 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1322 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1327 1323
1328 /* Compute new intel_dp->train_set as requested by target */ 1324 /* Compute new intel_dp->train_set as requested by target */
1329 intel_get_adjust_train(intel_dp); 1325 intel_get_adjust_train(intel_dp);
1330 }
1331 } 1326 }
1332 1327
1333 intel_dp->DP = DP; 1328 intel_dp->DP = DP;
@@ -1354,7 +1349,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1354 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1349 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1355 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1350 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1356 } else { 1351 } else {
1357 signal_levels = intel_dp_signal_levels(intel_dp); 1352 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1358 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1353 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1359 } 1354 }
1360 1355
@@ -1368,28 +1363,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1368 DP_TRAINING_PATTERN_2)) 1363 DP_TRAINING_PATTERN_2))
1369 break; 1364 break;
1370 1365
1371 udelay(500); 1366 udelay(400);
1372 1367 if (!intel_dp_get_link_status(intel_dp))
1373 if (!intel_dp_aux_handshake_required(intel_dp)) {
1374 break; 1368 break;
1375 } else {
1376 if (!intel_dp_get_link_status(intel_dp))
1377 break;
1378 1369
1379 if (intel_channel_eq_ok(intel_dp)) { 1370 if (intel_channel_eq_ok(intel_dp)) {
1380 channel_eq = true; 1371 channel_eq = true;
1381 break; 1372 break;
1382 } 1373 }
1383 1374
1384 /* Try 5 times */ 1375 /* Try 5 times */
1385 if (tries > 5) 1376 if (tries > 5)
1386 break; 1377 break;
1387 1378
1388 /* Compute new intel_dp->train_set as requested by target */ 1379 /* Compute new intel_dp->train_set as requested by target */
1389 intel_get_adjust_train(intel_dp); 1380 intel_get_adjust_train(intel_dp);
1390 ++tries; 1381 ++tries;
1391 }
1392 } 1382 }
1383
1393 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) 1384 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1394 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1385 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1395 else 1386 else
@@ -1408,6 +1399,9 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1408 struct drm_i915_private *dev_priv = dev->dev_private; 1399 struct drm_i915_private *dev_priv = dev->dev_private;
1409 uint32_t DP = intel_dp->DP; 1400 uint32_t DP = intel_dp->DP;
1410 1401
1402 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1403 return;
1404
1411 DRM_DEBUG_KMS("\n"); 1405 DRM_DEBUG_KMS("\n");
1412 1406
1413 if (is_edp(intel_dp)) { 1407 if (is_edp(intel_dp)) {
@@ -1430,6 +1424,27 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1430 1424
1431 if (is_edp(intel_dp)) 1425 if (is_edp(intel_dp))
1432 DP |= DP_LINK_TRAIN_OFF; 1426 DP |= DP_LINK_TRAIN_OFF;
1427
1428 if (!HAS_PCH_CPT(dev) &&
1429 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1430 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1431 /* Hardware workaround: leaving our transcoder select
1432 * set to transcoder B while it's off will prevent the
1433 * corresponding HDMI output on transcoder A.
1434 *
1435 * Combine this with another hardware workaround:
1436 * transcoder select bit can only be cleared while the
1437 * port is enabled.
1438 */
1439 DP &= ~DP_PIPEB_SELECT;
1440 I915_WRITE(intel_dp->output_reg, DP);
1441
1442 /* Changes to enable or select take place the vblank
1443 * after being written.
1444 */
1445 intel_wait_for_vblank(dev, intel_crtc->pipe);
1446 }
1447
1433 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1448 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1434 POSTING_READ(intel_dp->output_reg); 1449 POSTING_READ(intel_dp->output_reg);
1435} 1450}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 21551fe74541..d782ad9fd6db 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
127 127
128struct intel_framebuffer { 128struct intel_framebuffer {
129 struct drm_framebuffer base; 129 struct drm_framebuffer base;
130 struct drm_gem_object *obj; 130 struct drm_i915_gem_object *obj;
131}; 131};
132 132
133struct intel_fbdev { 133struct intel_fbdev {
@@ -166,7 +166,7 @@ struct intel_crtc {
166 struct intel_unpin_work *unpin_work; 166 struct intel_unpin_work *unpin_work;
167 int fdi_lanes; 167 int fdi_lanes;
168 168
169 struct drm_gem_object *cursor_bo; 169 struct drm_i915_gem_object *cursor_bo;
170 uint32_t cursor_addr; 170 uint32_t cursor_addr;
171 int16_t cursor_x, cursor_y; 171 int16_t cursor_x, cursor_y;
172 int16_t cursor_width, cursor_height; 172 int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
220struct intel_unpin_work { 220struct intel_unpin_work {
221 struct work_struct work; 221 struct work_struct work;
222 struct drm_device *dev; 222 struct drm_device *dev;
223 struct drm_gem_object *old_fb_obj; 223 struct drm_i915_gem_object *old_fb_obj;
224 struct drm_gem_object *pending_flip_obj; 224 struct drm_i915_gem_object *pending_flip_obj;
225 struct drm_pending_vblank_event *event; 225 struct drm_pending_vblank_event *event;
226 int pending; 226 int pending;
227 bool enable_stall_check; 227 bool enable_stall_check;
@@ -236,8 +236,9 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
236extern bool intel_sdvo_init(struct drm_device *dev, int output_device); 236extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
237extern void intel_dvo_init(struct drm_device *dev); 237extern void intel_dvo_init(struct drm_device *dev);
238extern void intel_tv_init(struct drm_device *dev); 238extern void intel_tv_init(struct drm_device *dev);
239extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); 239extern void intel_mark_busy(struct drm_device *dev,
240extern void intel_lvds_init(struct drm_device *dev); 240 struct drm_i915_gem_object *obj);
241extern bool intel_lvds_init(struct drm_device *dev);
241extern void intel_dp_init(struct drm_device *dev, int dp_reg); 242extern void intel_dp_init(struct drm_device *dev, int dp_reg);
242void 243void
243intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 244intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -293,19 +294,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
293 u16 blue, int regno); 294 u16 blue, int regno);
294extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 295extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
295 u16 *blue, int regno); 296 u16 *blue, int regno);
296extern void intel_init_clock_gating(struct drm_device *dev); 297extern void intel_enable_clock_gating(struct drm_device *dev);
298extern void intel_disable_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 299extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 300extern void ironlake_disable_drps(struct drm_device *dev);
301extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
302extern void gen6_disable_rps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev); 303extern void intel_init_emon(struct drm_device *dev);
300 304
301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 305extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
302 struct drm_gem_object *obj, 306 struct drm_i915_gem_object *obj,
303 bool pipelined); 307 struct intel_ring_buffer *pipelined);
304 308
305extern int intel_framebuffer_init(struct drm_device *dev, 309extern int intel_framebuffer_init(struct drm_device *dev,
306 struct intel_framebuffer *ifb, 310 struct intel_framebuffer *ifb,
307 struct drm_mode_fb_cmd *mode_cmd, 311 struct drm_mode_fb_cmd *mode_cmd,
308 struct drm_gem_object *obj); 312 struct drm_i915_gem_object *obj);
309extern int intel_fbdev_init(struct drm_device *dev); 313extern int intel_fbdev_init(struct drm_device *dev);
310extern void intel_fbdev_fini(struct drm_device *dev); 314extern void intel_fbdev_fini(struct drm_device *dev);
311 315
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ced3eef8da07..67738f32dfd4 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
65 struct fb_info *info; 65 struct fb_info *info;
66 struct drm_framebuffer *fb; 66 struct drm_framebuffer *fb;
67 struct drm_mode_fb_cmd mode_cmd; 67 struct drm_mode_fb_cmd mode_cmd;
68 struct drm_gem_object *fbo = NULL; 68 struct drm_i915_gem_object *obj;
69 struct drm_i915_gem_object *obj_priv;
70 struct device *device = &dev->pdev->dev; 69 struct device *device = &dev->pdev->dev;
71 int size, ret; 70 int size, ret;
72 71
@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
83 82
84 size = mode_cmd.pitch * mode_cmd.height; 83 size = mode_cmd.pitch * mode_cmd.height;
85 size = ALIGN(size, PAGE_SIZE); 84 size = ALIGN(size, PAGE_SIZE);
86 fbo = i915_gem_alloc_object(dev, size); 85 obj = i915_gem_alloc_object(dev, size);
87 if (!fbo) { 86 if (!obj) {
88 DRM_ERROR("failed to allocate framebuffer\n"); 87 DRM_ERROR("failed to allocate framebuffer\n");
89 ret = -ENOMEM; 88 ret = -ENOMEM;
90 goto out; 89 goto out;
91 } 90 }
92 obj_priv = to_intel_bo(fbo);
93 91
94 mutex_lock(&dev->struct_mutex); 92 mutex_lock(&dev->struct_mutex);
95 93
96 /* Flush everything out, we'll be doing GTT only from now on */ 94 /* Flush everything out, we'll be doing GTT only from now on */
97 ret = intel_pin_and_fence_fb_obj(dev, fbo, false); 95 ret = intel_pin_and_fence_fb_obj(dev, obj, false);
98 if (ret) { 96 if (ret) {
99 DRM_ERROR("failed to pin fb: %d\n", ret); 97 DRM_ERROR("failed to pin fb: %d\n", ret);
100 goto out_unref; 98 goto out_unref;
@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
108 106
109 info->par = ifbdev; 107 info->par = ifbdev;
110 108
111 ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo); 109 ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
112 if (ret) 110 if (ret)
113 goto out_unpin; 111 goto out_unpin;
114 112
@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
134 else 132 else
135 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); 133 info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
136 134
137 info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset; 135 info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
138 info->fix.smem_len = size; 136 info->fix.smem_len = size;
139 137
140 info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset, 138 info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
141 size);
142 if (!info->screen_base) { 139 if (!info->screen_base) {
143 ret = -ENOSPC; 140 ret = -ENOSPC;
144 goto out_unpin; 141 goto out_unpin;
@@ -164,7 +161,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
164 161
165 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n", 162 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
166 fb->width, fb->height, 163 fb->width, fb->height,
167 obj_priv->gtt_offset, fbo); 164 obj->gtt_offset, obj);
168 165
169 166
170 mutex_unlock(&dev->struct_mutex); 167 mutex_unlock(&dev->struct_mutex);
@@ -172,9 +169,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
172 return 0; 169 return 0;
173 170
174out_unpin: 171out_unpin:
175 i915_gem_object_unpin(fbo); 172 i915_gem_object_unpin(obj);
176out_unref: 173out_unref:
177 drm_gem_object_unreference(fbo); 174 drm_gem_object_unreference(&obj->base);
178 mutex_unlock(&dev->struct_mutex); 175 mutex_unlock(&dev->struct_mutex);
179out: 176out:
180 return ret; 177 return ret;
@@ -221,7 +218,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
221 218
222 drm_framebuffer_cleanup(&ifb->base); 219 drm_framebuffer_cleanup(&ifb->base);
223 if (ifb->obj) { 220 if (ifb->obj) {
224 drm_gem_object_unreference_unlocked(ifb->obj); 221 drm_gem_object_unreference_unlocked(&ifb->obj->base);
225 ifb->obj = NULL; 222 ifb->obj = NULL;
226 } 223 }
227} 224}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086e7eea..58040f68ed7a 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio)
85 85
86 /* On most chips, these bits must be preserved in software. */ 86 /* On most chips, these bits must be preserved in software. */
87 if (!IS_I830(dev) && !IS_845G(dev)) 87 if (!IS_I830(dev) && !IS_845G(dev))
88 reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | 88 reserved = I915_READ_NOTRACE(gpio->reg) &
89 GPIO_CLOCK_PULLUP_DISABLE); 89 (GPIO_DATA_PULLUP_DISABLE |
90 GPIO_CLOCK_PULLUP_DISABLE);
90 91
91 return reserved; 92 return reserved;
92} 93}
@@ -96,9 +97,9 @@ static int get_clock(void *data)
96 struct intel_gpio *gpio = data; 97 struct intel_gpio *gpio = data;
97 struct drm_i915_private *dev_priv = gpio->dev_priv; 98 struct drm_i915_private *dev_priv = gpio->dev_priv;
98 u32 reserved = get_reserved(gpio); 99 u32 reserved = get_reserved(gpio);
99 I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); 100 I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
100 I915_WRITE(gpio->reg, reserved); 101 I915_WRITE_NOTRACE(gpio->reg, reserved);
101 return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; 102 return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
102} 103}
103 104
104static int get_data(void *data) 105static int get_data(void *data)
@@ -106,9 +107,9 @@ static int get_data(void *data)
106 struct intel_gpio *gpio = data; 107 struct intel_gpio *gpio = data;
107 struct drm_i915_private *dev_priv = gpio->dev_priv; 108 struct drm_i915_private *dev_priv = gpio->dev_priv;
108 u32 reserved = get_reserved(gpio); 109 u32 reserved = get_reserved(gpio);
109 I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); 110 I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
110 I915_WRITE(gpio->reg, reserved); 111 I915_WRITE_NOTRACE(gpio->reg, reserved);
111 return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; 112 return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
112} 113}
113 114
114static void set_clock(void *data, int state_high) 115static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
124 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | 125 clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
125 GPIO_CLOCK_VAL_MASK; 126 GPIO_CLOCK_VAL_MASK;
126 127
127 I915_WRITE(gpio->reg, reserved | clock_bits); 128 I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
128 POSTING_READ(gpio->reg); 129 POSTING_READ(gpio->reg);
129} 130}
130 131
@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
141 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | 142 data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
142 GPIO_DATA_VAL_MASK; 143 GPIO_DATA_VAL_MASK;
143 144
144 I915_WRITE(gpio->reg, reserved | data_bits); 145 I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
145 POSTING_READ(gpio->reg); 146 POSTING_READ(gpio->reg);
146} 147}
147 148
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4324a326f98e..aa2307080be2 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
68/** 68/**
69 * Sets the power state for the panel. 69 * Sets the power state for the panel.
70 */ 70 */
71static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) 71static void intel_lvds_enable(struct intel_lvds *intel_lvds)
72{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev; 73 struct drm_device *dev = intel_lvds->base.base.dev;
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
82 lvds_reg = LVDS; 82 lvds_reg = LVDS;
83 } 83 }
84 84
85 if (on) { 85 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
86 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
87 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
88 intel_panel_set_backlight(dev, dev_priv->backlight_level);
89 } else {
90 dev_priv->backlight_level = intel_panel_get_backlight(dev);
91
92 intel_panel_set_backlight(dev, 0);
93 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
94 86
95 if (intel_lvds->pfit_control) { 87 if (intel_lvds->pfit_dirty) {
96 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) 88 /*
97 DRM_ERROR("timed out waiting for panel to power off\n"); 89 * Enable automatic panel scaling so that non-native modes
98 I915_WRITE(PFIT_CONTROL, 0); 90 * fill the screen. The panel fitter should only be
99 intel_lvds->pfit_control = 0; 91 * adjusted whilst the pipe is disabled, according to
92 * register description and PRM.
93 */
94 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
95 intel_lvds->pfit_control,
96 intel_lvds->pfit_pgm_ratios);
97 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
98 DRM_ERROR("timed out waiting for panel to power off\n");
99 } else {
100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
100 intel_lvds->pfit_dirty = false; 102 intel_lvds->pfit_dirty = false;
101 } 103 }
104 }
105
106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
107 POSTING_READ(lvds_reg);
102 108
103 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 109 intel_panel_set_backlight(dev, dev_priv->backlight_level);
110}
111
112static void intel_lvds_disable(struct intel_lvds *intel_lvds)
113{
114 struct drm_device *dev = intel_lvds->base.base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private;
116 u32 ctl_reg, lvds_reg;
117
118 if (HAS_PCH_SPLIT(dev)) {
119 ctl_reg = PCH_PP_CONTROL;
120 lvds_reg = PCH_LVDS;
121 } else {
122 ctl_reg = PP_CONTROL;
123 lvds_reg = LVDS;
104 } 124 }
125
126 dev_priv->backlight_level = intel_panel_get_backlight(dev);
127 intel_panel_set_backlight(dev, 0);
128
129 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
130
131 if (intel_lvds->pfit_control) {
132 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
133 DRM_ERROR("timed out waiting for panel to power off\n");
134
135 I915_WRITE(PFIT_CONTROL, 0);
136 intel_lvds->pfit_dirty = true;
137 }
138
139 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
105 POSTING_READ(lvds_reg); 140 POSTING_READ(lvds_reg);
106} 141}
107 142
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
110 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 145 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
111 146
112 if (mode == DRM_MODE_DPMS_ON) 147 if (mode == DRM_MODE_DPMS_ON)
113 intel_lvds_set_power(intel_lvds, true); 148 intel_lvds_enable(intel_lvds);
114 else 149 else
115 intel_lvds_set_power(intel_lvds, false); 150 intel_lvds_disable(intel_lvds);
116 151
117 /* XXX: We never power down the LVDS pairs. */ 152 /* XXX: We never power down the LVDS pairs. */
118} 153}
@@ -269,14 +304,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
269 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 304 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
270 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 305 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
271 306
272 pfit_control |= PFIT_ENABLE;
273 /* 965+ is easy, it does everything in hw */ 307 /* 965+ is easy, it does everything in hw */
274 if (scaled_width > scaled_height) 308 if (scaled_width > scaled_height)
275 pfit_control |= PFIT_SCALING_PILLAR; 309 pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
276 else if (scaled_width < scaled_height) 310 else if (scaled_width < scaled_height)
277 pfit_control |= PFIT_SCALING_LETTER; 311 pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
278 else 312 else if (adjusted_mode->hdisplay != mode->hdisplay)
279 pfit_control |= PFIT_SCALING_AUTO; 313 pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
280 } else { 314 } else {
281 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay; 315 u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
282 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay; 316 u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -323,13 +357,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
323 * Full scaling, even if it changes the aspect ratio. 357 * Full scaling, even if it changes the aspect ratio.
324 * Fortunately this is all done for us in hw. 358 * Fortunately this is all done for us in hw.
325 */ 359 */
326 pfit_control |= PFIT_ENABLE; 360 if (mode->vdisplay != adjusted_mode->vdisplay ||
327 if (INTEL_INFO(dev)->gen >= 4) 361 mode->hdisplay != adjusted_mode->hdisplay) {
328 pfit_control |= PFIT_SCALING_AUTO; 362 pfit_control |= PFIT_ENABLE;
329 else 363 if (INTEL_INFO(dev)->gen >= 4)
330 pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE | 364 pfit_control |= PFIT_SCALING_AUTO;
331 VERT_INTERP_BILINEAR | 365 else
332 HORIZ_INTERP_BILINEAR); 366 pfit_control |= (VERT_AUTO_SCALE |
367 VERT_INTERP_BILINEAR |
368 HORIZ_AUTO_SCALE |
369 HORIZ_INTERP_BILINEAR);
370 }
333 break; 371 break;
334 372
335 default: 373 default:
@@ -411,43 +449,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
411 /* Always do a full power on as we do not know what state 449 /* Always do a full power on as we do not know what state
412 * we were left in. 450 * we were left in.
413 */ 451 */
414 intel_lvds_set_power(intel_lvds, true); 452 intel_lvds_enable(intel_lvds);
415} 453}
416 454
417static void intel_lvds_mode_set(struct drm_encoder *encoder, 455static void intel_lvds_mode_set(struct drm_encoder *encoder,
418 struct drm_display_mode *mode, 456 struct drm_display_mode *mode,
419 struct drm_display_mode *adjusted_mode) 457 struct drm_display_mode *adjusted_mode)
420{ 458{
421 struct drm_device *dev = encoder->dev;
422 struct drm_i915_private *dev_priv = dev->dev_private;
423 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
424
425 /* 459 /*
426 * The LVDS pin pair will already have been turned on in the 460 * The LVDS pin pair will already have been turned on in the
427 * intel_crtc_mode_set since it has a large impact on the DPLL 461 * intel_crtc_mode_set since it has a large impact on the DPLL
428 * settings. 462 * settings.
429 */ 463 */
430
431 if (HAS_PCH_SPLIT(dev))
432 return;
433
434 if (!intel_lvds->pfit_dirty)
435 return;
436
437 /*
438 * Enable automatic panel scaling so that non-native modes fill the
439 * screen. Should be enabled before the pipe is enabled, according to
440 * register description and PRM.
441 */
442 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
443 intel_lvds->pfit_control,
444 intel_lvds->pfit_pgm_ratios);
445 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
446 DRM_ERROR("timed out waiting for panel to power off\n");
447
448 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
449 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
450 intel_lvds->pfit_dirty = false;
451} 464}
452 465
453/** 466/**
@@ -837,7 +850,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
837 * Create the connector, register the LVDS DDC bus, and try to figure out what 850 * Create the connector, register the LVDS DDC bus, and try to figure out what
838 * modes we can display on the LVDS panel (if present). 851 * modes we can display on the LVDS panel (if present).
839 */ 852 */
840void intel_lvds_init(struct drm_device *dev) 853bool intel_lvds_init(struct drm_device *dev)
841{ 854{
842 struct drm_i915_private *dev_priv = dev->dev_private; 855 struct drm_i915_private *dev_priv = dev->dev_private;
843 struct intel_lvds *intel_lvds; 856 struct intel_lvds *intel_lvds;
@@ -853,37 +866,37 @@ void intel_lvds_init(struct drm_device *dev)
853 866
854 /* Skip init on machines we know falsely report LVDS */ 867 /* Skip init on machines we know falsely report LVDS */
855 if (dmi_check_system(intel_no_lvds)) 868 if (dmi_check_system(intel_no_lvds))
856 return; 869 return false;
857 870
858 pin = GMBUS_PORT_PANEL; 871 pin = GMBUS_PORT_PANEL;
859 if (!lvds_is_present_in_vbt(dev, &pin)) { 872 if (!lvds_is_present_in_vbt(dev, &pin)) {
860 DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 873 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
861 return; 874 return false;
862 } 875 }
863 876
864 if (HAS_PCH_SPLIT(dev)) { 877 if (HAS_PCH_SPLIT(dev)) {
865 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 878 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
866 return; 879 return false;
867 if (dev_priv->edp.support) { 880 if (dev_priv->edp.support) {
868 DRM_DEBUG_KMS("disable LVDS for eDP support\n"); 881 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
869 return; 882 return false;
870 } 883 }
871 } 884 }
872 885
873 if (!intel_lvds_ddc_probe(dev, pin)) { 886 if (!intel_lvds_ddc_probe(dev, pin)) {
874 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); 887 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
875 return; 888 return false;
876 } 889 }
877 890
878 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); 891 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
879 if (!intel_lvds) { 892 if (!intel_lvds) {
880 return; 893 return false;
881 } 894 }
882 895
883 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 896 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
884 if (!intel_connector) { 897 if (!intel_connector) {
885 kfree(intel_lvds); 898 kfree(intel_lvds);
886 return; 899 return false;
887 } 900 }
888 901
889 if (!HAS_PCH_SPLIT(dev)) { 902 if (!HAS_PCH_SPLIT(dev)) {
@@ -904,6 +917,8 @@ void intel_lvds_init(struct drm_device *dev)
904 917
905 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); 918 intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
906 intel_encoder->crtc_mask = (1 << 1); 919 intel_encoder->crtc_mask = (1 << 1);
920 if (INTEL_INFO(dev)->gen >= 5)
921 intel_encoder->crtc_mask |= (1 << 0);
907 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); 922 drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
908 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); 923 drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
909 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 924 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1009,10 +1024,18 @@ void intel_lvds_init(struct drm_device *dev)
1009out: 1024out:
1010 if (HAS_PCH_SPLIT(dev)) { 1025 if (HAS_PCH_SPLIT(dev)) {
1011 u32 pwm; 1026 u32 pwm;
1012 /* make sure PWM is enabled */ 1027
1028 pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
1029
1030 /* make sure PWM is enabled and locked to the LVDS pipe */
1013 pwm = I915_READ(BLC_PWM_CPU_CTL2); 1031 pwm = I915_READ(BLC_PWM_CPU_CTL2);
1014 pwm |= (PWM_ENABLE | PWM_PIPE_B); 1032 if (pipe == 0 && (pwm & PWM_PIPE_B))
1015 I915_WRITE(BLC_PWM_CPU_CTL2, pwm); 1033 I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
1034 if (pipe)
1035 pwm |= PWM_PIPE_B;
1036 else
1037 pwm &= ~PWM_PIPE_B;
1038 I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
1016 1039
1017 pwm = I915_READ(BLC_PWM_PCH_CTL1); 1040 pwm = I915_READ(BLC_PWM_PCH_CTL1);
1018 pwm |= PWM_PCH_ENABLE; 1041 pwm |= PWM_PCH_ENABLE;
@@ -1026,7 +1049,7 @@ out:
1026 /* keep the LVDS connector */ 1049 /* keep the LVDS connector */
1027 dev_priv->int_lvds_connector = connector; 1050 dev_priv->int_lvds_connector = connector;
1028 drm_sysfs_connector_add(connector); 1051 drm_sysfs_connector_add(connector);
1029 return; 1052 return true;
1030 1053
1031failed: 1054failed:
1032 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1055 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1034,4 +1057,5 @@ failed:
1034 drm_encoder_cleanup(encoder); 1057 drm_encoder_cleanup(encoder);
1035 kfree(intel_lvds); 1058 kfree(intel_lvds);
1036 kfree(intel_connector); 1059 kfree(intel_connector);
1060 return false;
1037} 1061}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a867aea..f295a7aaadf9 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
273 struct opregion_asle *asle = dev_priv->opregion.asle; 273 struct opregion_asle *asle = dev_priv->opregion.asle;
274 274
275 if (asle) { 275 if (asle) {
276 if (IS_MOBILE(dev)) { 276 if (IS_MOBILE(dev))
277 unsigned long irqflags;
278
279 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
280 intel_enable_asle(dev); 277 intel_enable_asle(dev);
281 spin_unlock_irqrestore(&dev_priv->user_irq_lock,
282 irqflags);
283 }
284 278
285 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 279 asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
286 ASLE_PFMB_EN; 280 ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a481f47..3fbb98b948d6 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
221 int ret; 221 int ret;
222 222
223 BUG_ON(overlay->last_flip_req); 223 BUG_ON(overlay->last_flip_req);
224 overlay->last_flip_req = 224 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
225 i915_add_request(dev, NULL, request, &dev_priv->render_ring); 225 if (ret) {
226 if (overlay->last_flip_req == 0) 226 kfree(request);
227 return -ENOMEM; 227 return ret;
228 228 }
229 overlay->last_flip_req = request->seqno;
229 overlay->flip_tail = tail; 230 overlay->flip_tail = tail;
230 ret = i915_do_wait_request(dev, 231 ret = i915_do_wait_request(dev,
231 overlay->last_flip_req, true, 232 overlay->last_flip_req, true,
232 &dev_priv->render_ring); 233 LP_RING(dev_priv));
233 if (ret) 234 if (ret)
234 return ret; 235 return ret;
235 236
@@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
289static int intel_overlay_on(struct intel_overlay *overlay) 290static int intel_overlay_on(struct intel_overlay *overlay)
290{ 291{
291 struct drm_device *dev = overlay->dev; 292 struct drm_device *dev = overlay->dev;
293 struct drm_i915_private *dev_priv = dev->dev_private;
292 struct drm_i915_gem_request *request; 294 struct drm_i915_gem_request *request;
293 int pipe_a_quirk = 0; 295 int pipe_a_quirk = 0;
294 int ret; 296 int ret;
@@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
308 goto out; 310 goto out;
309 } 311 }
310 312
311 BEGIN_LP_RING(4); 313 ret = BEGIN_LP_RING(4);
314 if (ret) {
315 kfree(request);
316 goto out;
317 }
318
312 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON); 319 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
313 OUT_RING(overlay->flip_addr | OFC_UPDATE); 320 OUT_RING(overlay->flip_addr | OFC_UPDATE);
314 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 321 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
332 struct drm_i915_gem_request *request; 339 struct drm_i915_gem_request *request;
333 u32 flip_addr = overlay->flip_addr; 340 u32 flip_addr = overlay->flip_addr;
334 u32 tmp; 341 u32 tmp;
342 int ret;
335 343
336 BUG_ON(!overlay->active); 344 BUG_ON(!overlay->active);
337 345
@@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
347 if (tmp & (1 << 17)) 355 if (tmp & (1 << 17))
348 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp); 356 DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
349 357
350 BEGIN_LP_RING(2); 358 ret = BEGIN_LP_RING(2);
359 if (ret) {
360 kfree(request);
361 return ret;
362 }
351 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 363 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
352 OUT_RING(flip_addr); 364 OUT_RING(flip_addr);
353 ADVANCE_LP_RING(); 365 ADVANCE_LP_RING();
354 366
355 overlay->last_flip_req = 367 ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
356 i915_add_request(dev, NULL, request, &dev_priv->render_ring); 368 if (ret) {
369 kfree(request);
370 return ret;
371 }
372
373 overlay->last_flip_req = request->seqno;
357 return 0; 374 return 0;
358} 375}
359 376
360static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay) 377static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
361{ 378{
362 struct drm_gem_object *obj = &overlay->old_vid_bo->base; 379 struct drm_i915_gem_object *obj = overlay->old_vid_bo;
363 380
364 i915_gem_object_unpin(obj); 381 i915_gem_object_unpin(obj);
365 drm_gem_object_unreference(obj); 382 drm_gem_object_unreference(&obj->base);
366 383
367 overlay->old_vid_bo = NULL; 384 overlay->old_vid_bo = NULL;
368} 385}
369 386
370static void intel_overlay_off_tail(struct intel_overlay *overlay) 387static void intel_overlay_off_tail(struct intel_overlay *overlay)
371{ 388{
372 struct drm_gem_object *obj; 389 struct drm_i915_gem_object *obj = overlay->vid_bo;
373 390
374 /* never have the overlay hw on without showing a frame */ 391 /* never have the overlay hw on without showing a frame */
375 BUG_ON(!overlay->vid_bo); 392 BUG_ON(!overlay->vid_bo);
376 obj = &overlay->vid_bo->base;
377 393
378 i915_gem_object_unpin(obj); 394 i915_gem_object_unpin(obj);
379 drm_gem_object_unreference(obj); 395 drm_gem_object_unreference(&obj->base);
380 overlay->vid_bo = NULL; 396 overlay->vid_bo = NULL;
381 397
382 overlay->crtc->overlay = NULL; 398 overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
389 bool interruptible) 405 bool interruptible)
390{ 406{
391 struct drm_device *dev = overlay->dev; 407 struct drm_device *dev = overlay->dev;
408 struct drm_i915_private *dev_priv = dev->dev_private;
392 u32 flip_addr = overlay->flip_addr; 409 u32 flip_addr = overlay->flip_addr;
393 struct drm_i915_gem_request *request; 410 struct drm_i915_gem_request *request;
411 int ret;
394 412
395 BUG_ON(!overlay->active); 413 BUG_ON(!overlay->active);
396 414
@@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
404 * of the hw. Do it in both cases */ 422 * of the hw. Do it in both cases */
405 flip_addr |= OFC_UPDATE; 423 flip_addr |= OFC_UPDATE;
406 424
407 BEGIN_LP_RING(6); 425 ret = BEGIN_LP_RING(6);
426 if (ret) {
427 kfree(request);
428 return ret;
429 }
408 /* wait for overlay to go idle */ 430 /* wait for overlay to go idle */
409 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE); 431 OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
410 OUT_RING(flip_addr); 432 OUT_RING(flip_addr);
@@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
432 return 0; 454 return 0;
433 455
434 ret = i915_do_wait_request(dev, overlay->last_flip_req, 456 ret = i915_do_wait_request(dev, overlay->last_flip_req,
435 interruptible, &dev_priv->render_ring); 457 interruptible, LP_RING(dev_priv));
436 if (ret) 458 if (ret)
437 return ret; 459 return ret;
438 460
@@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
467 if (request == NULL) 489 if (request == NULL)
468 return -ENOMEM; 490 return -ENOMEM;
469 491
470 BEGIN_LP_RING(2); 492 ret = BEGIN_LP_RING(2);
493 if (ret) {
494 kfree(request);
495 return ret;
496 }
497
471 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP); 498 OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
472 OUT_RING(MI_NOOP); 499 OUT_RING(MI_NOOP);
473 ADVANCE_LP_RING(); 500 ADVANCE_LP_RING();
@@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
736} 763}
737 764
738static int intel_overlay_do_put_image(struct intel_overlay *overlay, 765static int intel_overlay_do_put_image(struct intel_overlay *overlay,
739 struct drm_gem_object *new_bo, 766 struct drm_i915_gem_object *new_bo,
740 struct put_image_params *params) 767 struct put_image_params *params)
741{ 768{
742 int ret, tmp_width; 769 int ret, tmp_width;
743 struct overlay_registers *regs; 770 struct overlay_registers *regs;
744 bool scale_changed = false; 771 bool scale_changed = false;
745 struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
746 struct drm_device *dev = overlay->dev; 772 struct drm_device *dev = overlay->dev;
747 773
748 BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 774 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
753 if (ret != 0) 779 if (ret != 0)
754 return ret; 780 return ret;
755 781
756 ret = i915_gem_object_pin(new_bo, PAGE_SIZE); 782 ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
757 if (ret != 0) 783 if (ret != 0)
758 return ret; 784 return ret;
759 785
@@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
761 if (ret != 0) 787 if (ret != 0)
762 goto out_unpin; 788 goto out_unpin;
763 789
790 ret = i915_gem_object_put_fence(new_bo);
791 if (ret)
792 goto out_unpin;
793
764 if (!overlay->active) { 794 if (!overlay->active) {
765 regs = intel_overlay_map_regs(overlay); 795 regs = intel_overlay_map_regs(overlay);
766 if (!regs) { 796 if (!regs) {
@@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
797 regs->SWIDTHSW = calc_swidthsw(overlay->dev, 827 regs->SWIDTHSW = calc_swidthsw(overlay->dev,
798 params->offset_Y, tmp_width); 828 params->offset_Y, tmp_width);
799 regs->SHEIGHT = params->src_h; 829 regs->SHEIGHT = params->src_h;
800 regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y; 830 regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
801 regs->OSTRIDE = params->stride_Y; 831 regs->OSTRIDE = params->stride_Y;
802 832
803 if (params->format & I915_OVERLAY_YUV_PLANAR) { 833 if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
811 params->src_w/uv_hscale); 841 params->src_w/uv_hscale);
812 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16; 842 regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
813 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16; 843 regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
814 regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U; 844 regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
815 regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V; 845 regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
816 regs->OSTRIDE |= params->stride_UV << 16; 846 regs->OSTRIDE |= params->stride_UV << 16;
817 } 847 }
818 848
@@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
829 goto out_unpin; 859 goto out_unpin;
830 860
831 overlay->old_vid_bo = overlay->vid_bo; 861 overlay->old_vid_bo = overlay->vid_bo;
832 overlay->vid_bo = to_intel_bo(new_bo); 862 overlay->vid_bo = new_bo;
833 863
834 return 0; 864 return 0;
835 865
@@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
942 972
943static int check_overlay_src(struct drm_device *dev, 973static int check_overlay_src(struct drm_device *dev,
944 struct drm_intel_overlay_put_image *rec, 974 struct drm_intel_overlay_put_image *rec,
945 struct drm_gem_object *new_bo) 975 struct drm_i915_gem_object *new_bo)
946{ 976{
947 int uv_hscale = uv_hsubsampling(rec->flags); 977 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 978 int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev,
1027 return -EINVAL; 1057 return -EINVAL;
1028 1058
1029 tmp = rec->stride_Y*rec->src_height; 1059 tmp = rec->stride_Y*rec->src_height;
1030 if (rec->offset_Y + tmp > new_bo->size) 1060 if (rec->offset_Y + tmp > new_bo->base.size)
1031 return -EINVAL; 1061 return -EINVAL;
1032 break; 1062 break;
1033 1063
@@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev,
1038 return -EINVAL; 1068 return -EINVAL;
1039 1069
1040 tmp = rec->stride_Y * rec->src_height; 1070 tmp = rec->stride_Y * rec->src_height;
1041 if (rec->offset_Y + tmp > new_bo->size) 1071 if (rec->offset_Y + tmp > new_bo->base.size)
1042 return -EINVAL; 1072 return -EINVAL;
1043 1073
1044 tmp = rec->stride_UV * (rec->src_height / uv_vscale); 1074 tmp = rec->stride_UV * (rec->src_height / uv_vscale);
1045 if (rec->offset_U + tmp > new_bo->size || 1075 if (rec->offset_U + tmp > new_bo->base.size ||
1046 rec->offset_V + tmp > new_bo->size) 1076 rec->offset_V + tmp > new_bo->base.size)
1047 return -EINVAL; 1077 return -EINVAL;
1048 break; 1078 break;
1049 } 1079 }
@@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1086 struct intel_overlay *overlay; 1116 struct intel_overlay *overlay;
1087 struct drm_mode_object *drmmode_obj; 1117 struct drm_mode_object *drmmode_obj;
1088 struct intel_crtc *crtc; 1118 struct intel_crtc *crtc;
1089 struct drm_gem_object *new_bo; 1119 struct drm_i915_gem_object *new_bo;
1090 struct put_image_params *params; 1120 struct put_image_params *params;
1091 int ret; 1121 int ret;
1092 1122
@@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1125 } 1155 }
1126 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); 1156 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
1127 1157
1128 new_bo = drm_gem_object_lookup(dev, file_priv, 1158 new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
1129 put_image_rec->bo_handle); 1159 put_image_rec->bo_handle));
1130 if (!new_bo) { 1160 if (!new_bo) {
1131 ret = -ENOENT; 1161 ret = -ENOENT;
1132 goto out_free; 1162 goto out_free;
@@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1135 mutex_lock(&dev->mode_config.mutex); 1165 mutex_lock(&dev->mode_config.mutex);
1136 mutex_lock(&dev->struct_mutex); 1166 mutex_lock(&dev->struct_mutex);
1137 1167
1168 if (new_bo->tiling_mode) {
1169 DRM_ERROR("buffer used for overlay image can not be tiled\n");
1170 ret = -EINVAL;
1171 goto out_unlock;
1172 }
1173
1138 ret = intel_overlay_recover_from_interrupt(overlay, true); 1174 ret = intel_overlay_recover_from_interrupt(overlay, true);
1139 if (ret != 0) 1175 if (ret != 0)
1140 goto out_unlock; 1176 goto out_unlock;
@@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
1217out_unlock: 1253out_unlock:
1218 mutex_unlock(&dev->struct_mutex); 1254 mutex_unlock(&dev->struct_mutex);
1219 mutex_unlock(&dev->mode_config.mutex); 1255 mutex_unlock(&dev->mode_config.mutex);
1220 drm_gem_object_unreference_unlocked(new_bo); 1256 drm_gem_object_unreference_unlocked(&new_bo->base);
1221out_free: 1257out_free:
1222 kfree(params); 1258 kfree(params);
1223 1259
@@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev)
1370{ 1406{
1371 drm_i915_private_t *dev_priv = dev->dev_private; 1407 drm_i915_private_t *dev_priv = dev->dev_private;
1372 struct intel_overlay *overlay; 1408 struct intel_overlay *overlay;
1373 struct drm_gem_object *reg_bo; 1409 struct drm_i915_gem_object *reg_bo;
1374 struct overlay_registers *regs; 1410 struct overlay_registers *regs;
1375 int ret; 1411 int ret;
1376 1412
@@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev)
1385 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE); 1421 reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
1386 if (!reg_bo) 1422 if (!reg_bo)
1387 goto out_free; 1423 goto out_free;
1388 overlay->reg_bo = to_intel_bo(reg_bo); 1424 overlay->reg_bo = reg_bo;
1389 1425
1390 if (OVERLAY_NEEDS_PHYSICAL(dev)) { 1426 if (OVERLAY_NEEDS_PHYSICAL(dev)) {
1391 ret = i915_gem_attach_phys_object(dev, reg_bo, 1427 ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev)
1395 DRM_ERROR("failed to attach phys overlay regs\n"); 1431 DRM_ERROR("failed to attach phys overlay regs\n");
1396 goto out_free_bo; 1432 goto out_free_bo;
1397 } 1433 }
1398 overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr; 1434 overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
1399 } else { 1435 } else {
1400 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE); 1436 ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
1401 if (ret) { 1437 if (ret) {
1402 DRM_ERROR("failed to pin overlay register bo\n"); 1438 DRM_ERROR("failed to pin overlay register bo\n");
1403 goto out_free_bo; 1439 goto out_free_bo;
1404 } 1440 }
1405 overlay->flip_addr = overlay->reg_bo->gtt_offset; 1441 overlay->flip_addr = reg_bo->gtt_offset;
1406 1442
1407 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true); 1443 ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
1408 if (ret) { 1444 if (ret) {
@@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev)
1434out_unpin_bo: 1470out_unpin_bo:
1435 i915_gem_object_unpin(reg_bo); 1471 i915_gem_object_unpin(reg_bo);
1436out_free_bo: 1472out_free_bo:
1437 drm_gem_object_unreference(reg_bo); 1473 drm_gem_object_unreference(&reg_bo->base);
1438out_free: 1474out_free:
1439 kfree(overlay); 1475 kfree(overlay);
1440 return; 1476 return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f385278..7350ec2515c6 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev)
125 return 0; 125 return 0;
126} 126}
127 127
128static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
129{
130 u32 val;
131
132 /* Restore the CTL value if it lost, e.g. GPU reset */
133
134 if (HAS_PCH_SPLIT(dev_priv->dev)) {
135 val = I915_READ(BLC_PWM_PCH_CTL2);
136 if (dev_priv->saveBLC_PWM_CTL2 == 0) {
137 dev_priv->saveBLC_PWM_CTL2 = val;
138 } else if (val == 0) {
139 I915_WRITE(BLC_PWM_PCH_CTL2,
140 dev_priv->saveBLC_PWM_CTL);
141 val = dev_priv->saveBLC_PWM_CTL;
142 }
143 } else {
144 val = I915_READ(BLC_PWM_CTL);
145 if (dev_priv->saveBLC_PWM_CTL == 0) {
146 dev_priv->saveBLC_PWM_CTL = val;
147 dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
148 } else if (val == 0) {
149 I915_WRITE(BLC_PWM_CTL,
150 dev_priv->saveBLC_PWM_CTL);
151 I915_WRITE(BLC_PWM_CTL2,
152 dev_priv->saveBLC_PWM_CTL2);
153 val = dev_priv->saveBLC_PWM_CTL;
154 }
155 }
156
157 return val;
158}
159
128u32 intel_panel_get_max_backlight(struct drm_device *dev) 160u32 intel_panel_get_max_backlight(struct drm_device *dev)
129{ 161{
130 struct drm_i915_private *dev_priv = dev->dev_private; 162 struct drm_i915_private *dev_priv = dev->dev_private;
131 u32 max; 163 u32 max;
132 164
165 max = i915_read_blc_pwm_ctl(dev_priv);
166 if (max == 0) {
167 /* XXX add code here to query mode clock or hardware clock
168 * and program max PWM appropriately.
169 */
170 printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
171 return 1;
172 }
173
133 if (HAS_PCH_SPLIT(dev)) { 174 if (HAS_PCH_SPLIT(dev)) {
134 max = I915_READ(BLC_PWM_PCH_CTL2) >> 16; 175 max >>= 16;
135 } else { 176 } else {
136 max = I915_READ(BLC_PWM_CTL);
137 if (IS_PINEVIEW(dev)) { 177 if (IS_PINEVIEW(dev)) {
138 max >>= 17; 178 max >>= 17;
139 } else { 179 } else {
@@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
146 max *= 0xff; 186 max *= 0xff;
147 } 187 }
148 188
149 if (max == 0) {
150 /* XXX add code here to query mode clock or hardware clock
151 * and program max PWM appropriately.
152 */
153 DRM_ERROR("fixme: max PWM is zero.\n");
154 max = 1;
155 }
156
157 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); 189 DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
158 return max; 190 return max;
159} 191}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b83306f9244b..56bc95c056dd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
49} 49}
50 50
51static void 51static void
52render_ring_flush(struct drm_device *dev, 52render_ring_flush(struct intel_ring_buffer *ring,
53 struct intel_ring_buffer *ring,
54 u32 invalidate_domains, 53 u32 invalidate_domains,
55 u32 flush_domains) 54 u32 flush_domains)
56{ 55{
56 struct drm_device *dev = ring->dev;
57 drm_i915_private_t *dev_priv = dev->dev_private; 57 drm_i915_private_t *dev_priv = dev->dev_private;
58 u32 cmd; 58 u32 cmd;
59 59
@@ -109,79 +109,83 @@ render_ring_flush(struct drm_device *dev,
109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 109 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
110 cmd |= MI_EXE_FLUSH; 110 cmd |= MI_EXE_FLUSH;
111 111
112 if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
113 (IS_G4X(dev) || IS_GEN5(dev)))
114 cmd |= MI_INVALIDATE_ISP;
115
112#if WATCH_EXEC 116#if WATCH_EXEC
113 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 117 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
114#endif 118#endif
115 intel_ring_begin(dev, ring, 2); 119 if (intel_ring_begin(ring, 2) == 0) {
116 intel_ring_emit(dev, ring, cmd); 120 intel_ring_emit(ring, cmd);
117 intel_ring_emit(dev, ring, MI_NOOP); 121 intel_ring_emit(ring, MI_NOOP);
118 intel_ring_advance(dev, ring); 122 intel_ring_advance(ring);
123 }
119 } 124 }
120} 125}
121 126
122static void ring_write_tail(struct drm_device *dev, 127static void ring_write_tail(struct intel_ring_buffer *ring,
123 struct intel_ring_buffer *ring,
124 u32 value) 128 u32 value)
125{ 129{
126 drm_i915_private_t *dev_priv = dev->dev_private; 130 drm_i915_private_t *dev_priv = ring->dev->dev_private;
127 I915_WRITE_TAIL(ring, value); 131 I915_WRITE_TAIL(ring, value);
128} 132}
129 133
130u32 intel_ring_get_active_head(struct drm_device *dev, 134u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
131 struct intel_ring_buffer *ring)
132{ 135{
133 drm_i915_private_t *dev_priv = dev->dev_private; 136 drm_i915_private_t *dev_priv = ring->dev->dev_private;
134 u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ? 137 u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
135 RING_ACTHD(ring->mmio_base) : ACTHD; 138 RING_ACTHD(ring->mmio_base) : ACTHD;
136 139
137 return I915_READ(acthd_reg); 140 return I915_READ(acthd_reg);
138} 141}
139 142
140static int init_ring_common(struct drm_device *dev, 143static int init_ring_common(struct intel_ring_buffer *ring)
141 struct intel_ring_buffer *ring)
142{ 144{
145 drm_i915_private_t *dev_priv = ring->dev->dev_private;
146 struct drm_i915_gem_object *obj = ring->obj;
143 u32 head; 147 u32 head;
144 drm_i915_private_t *dev_priv = dev->dev_private;
145 struct drm_i915_gem_object *obj_priv;
146 obj_priv = to_intel_bo(ring->gem_object);
147 148
148 /* Stop the ring if it's running. */ 149 /* Stop the ring if it's running. */
149 I915_WRITE_CTL(ring, 0); 150 I915_WRITE_CTL(ring, 0);
150 I915_WRITE_HEAD(ring, 0); 151 I915_WRITE_HEAD(ring, 0);
151 ring->write_tail(dev, ring, 0); 152 ring->write_tail(ring, 0);
152 153
153 /* Initialize the ring. */ 154 /* Initialize the ring. */
154 I915_WRITE_START(ring, obj_priv->gtt_offset); 155 I915_WRITE_START(ring, obj->gtt_offset);
155 head = I915_READ_HEAD(ring) & HEAD_ADDR; 156 head = I915_READ_HEAD(ring) & HEAD_ADDR;
156 157
157 /* G45 ring initialization fails to reset head to zero */ 158 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) { 159 if (head != 0) {
159 DRM_ERROR("%s head not reset to zero " 160 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n", 161 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name, 162 ring->name,
162 I915_READ_CTL(ring), 163 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring), 164 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring), 165 I915_READ_TAIL(ring),
165 I915_READ_START(ring)); 166 I915_READ_START(ring));
166 167
167 I915_WRITE_HEAD(ring, 0); 168 I915_WRITE_HEAD(ring, 0);
168 169
169 DRM_ERROR("%s head forced to zero " 170 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 "ctl %08x head %08x tail %08x start %08x\n", 171 DRM_ERROR("failed to set %s head to zero "
171 ring->name, 172 "ctl %08x head %08x tail %08x start %08x\n",
172 I915_READ_CTL(ring), 173 ring->name,
173 I915_READ_HEAD(ring), 174 I915_READ_CTL(ring),
174 I915_READ_TAIL(ring), 175 I915_READ_HEAD(ring),
175 I915_READ_START(ring)); 176 I915_READ_TAIL(ring),
177 I915_READ_START(ring));
178 }
176 } 179 }
177 180
178 I915_WRITE_CTL(ring, 181 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 182 ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_REPORT_64K | RING_VALID); 183 | RING_REPORT_64K | RING_VALID);
181 184
182 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 185 /* If the head is still not zero, the ring is dead */
184 if (head != 0) { 186 if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
187 I915_READ_START(ring) != obj->gtt_offset ||
188 (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
185 DRM_ERROR("%s initialization failed " 189 DRM_ERROR("%s initialization failed "
186 "ctl %08x head %08x tail %08x start %08x\n", 190 "ctl %08x head %08x tail %08x start %08x\n",
187 ring->name, 191 ring->name,
@@ -192,8 +196,8 @@ static int init_ring_common(struct drm_device *dev,
192 return -EIO; 196 return -EIO;
193 } 197 }
194 198
195 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 199 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
196 i915_kernel_lost_context(dev); 200 i915_kernel_lost_context(ring->dev);
197 else { 201 else {
198 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 202 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
199 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 203 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -201,335 +205,500 @@ static int init_ring_common(struct drm_device *dev,
201 if (ring->space < 0) 205 if (ring->space < 0)
202 ring->space += ring->size; 206 ring->space += ring->size;
203 } 207 }
208
204 return 0; 209 return 0;
205} 210}
206 211
207static int init_render_ring(struct drm_device *dev, 212/*
208 struct intel_ring_buffer *ring) 213 * 965+ support PIPE_CONTROL commands, which provide finer grained control
214 * over cache flushing.
215 */
216struct pipe_control {
217 struct drm_i915_gem_object *obj;
218 volatile u32 *cpu_page;
219 u32 gtt_offset;
220};
221
222static int
223init_pipe_control(struct intel_ring_buffer *ring)
209{ 224{
210 drm_i915_private_t *dev_priv = dev->dev_private; 225 struct pipe_control *pc;
211 int ret = init_ring_common(dev, ring); 226 struct drm_i915_gem_object *obj;
212 int mode; 227 int ret;
228
229 if (ring->private)
230 return 0;
231
232 pc = kmalloc(sizeof(*pc), GFP_KERNEL);
233 if (!pc)
234 return -ENOMEM;
235
236 obj = i915_gem_alloc_object(ring->dev, 4096);
237 if (obj == NULL) {
238 DRM_ERROR("Failed to allocate seqno page\n");
239 ret = -ENOMEM;
240 goto err;
241 }
242 obj->agp_type = AGP_USER_CACHED_MEMORY;
243
244 ret = i915_gem_object_pin(obj, 4096, true);
245 if (ret)
246 goto err_unref;
247
248 pc->gtt_offset = obj->gtt_offset;
249 pc->cpu_page = kmap(obj->pages[0]);
250 if (pc->cpu_page == NULL)
251 goto err_unpin;
252
253 pc->obj = obj;
254 ring->private = pc;
255 return 0;
256
257err_unpin:
258 i915_gem_object_unpin(obj);
259err_unref:
260 drm_gem_object_unreference(&obj->base);
261err:
262 kfree(pc);
263 return ret;
264}
265
266static void
267cleanup_pipe_control(struct intel_ring_buffer *ring)
268{
269 struct pipe_control *pc = ring->private;
270 struct drm_i915_gem_object *obj;
271
272 if (!ring->private)
273 return;
274
275 obj = pc->obj;
276 kunmap(obj->pages[0]);
277 i915_gem_object_unpin(obj);
278 drm_gem_object_unreference(&obj->base);
279
280 kfree(pc);
281 ring->private = NULL;
282}
283
284static int init_render_ring(struct intel_ring_buffer *ring)
285{
286 struct drm_device *dev = ring->dev;
287 struct drm_i915_private *dev_priv = dev->dev_private;
288 int ret = init_ring_common(ring);
213 289
214 if (INTEL_INFO(dev)->gen > 3) { 290 if (INTEL_INFO(dev)->gen > 3) {
215 mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH; 291 int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
216 if (IS_GEN6(dev)) 292 if (IS_GEN6(dev))
217 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE; 293 mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
218 I915_WRITE(MI_MODE, mode); 294 I915_WRITE(MI_MODE, mode);
219 } 295 }
296
297 if (INTEL_INFO(dev)->gen >= 6) {
298 } else if (IS_GEN5(dev)) {
299 ret = init_pipe_control(ring);
300 if (ret)
301 return ret;
302 }
303
220 return ret; 304 return ret;
221} 305}
222 306
223#define PIPE_CONTROL_FLUSH(addr) \ 307static void render_ring_cleanup(struct intel_ring_buffer *ring)
308{
309 if (!ring->private)
310 return;
311
312 cleanup_pipe_control(ring);
313}
314
315static void
316update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
317{
318 struct drm_device *dev = ring->dev;
319 struct drm_i915_private *dev_priv = dev->dev_private;
320 int id;
321
322 /*
323 * cs -> 1 = vcs, 0 = bcs
324 * vcs -> 1 = bcs, 0 = cs,
325 * bcs -> 1 = cs, 0 = vcs.
326 */
327 id = ring - dev_priv->ring;
328 id += 2 - i;
329 id %= 3;
330
331 intel_ring_emit(ring,
332 MI_SEMAPHORE_MBOX |
333 MI_SEMAPHORE_REGISTER |
334 MI_SEMAPHORE_UPDATE);
335 intel_ring_emit(ring, seqno);
336 intel_ring_emit(ring,
337 RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
338}
339
340static int
341gen6_add_request(struct intel_ring_buffer *ring,
342 u32 *result)
343{
344 u32 seqno;
345 int ret;
346
347 ret = intel_ring_begin(ring, 10);
348 if (ret)
349 return ret;
350
351 seqno = i915_gem_get_seqno(ring->dev);
352 update_semaphore(ring, 0, seqno);
353 update_semaphore(ring, 1, seqno);
354
355 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
356 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
357 intel_ring_emit(ring, seqno);
358 intel_ring_emit(ring, MI_USER_INTERRUPT);
359 intel_ring_advance(ring);
360
361 *result = seqno;
362 return 0;
363}
364
365int
366intel_ring_sync(struct intel_ring_buffer *ring,
367 struct intel_ring_buffer *to,
368 u32 seqno)
369{
370 int ret;
371
372 ret = intel_ring_begin(ring, 4);
373 if (ret)
374 return ret;
375
376 intel_ring_emit(ring,
377 MI_SEMAPHORE_MBOX |
378 MI_SEMAPHORE_REGISTER |
379 intel_ring_sync_index(ring, to) << 17 |
380 MI_SEMAPHORE_COMPARE);
381 intel_ring_emit(ring, seqno);
382 intel_ring_emit(ring, 0);
383 intel_ring_emit(ring, MI_NOOP);
384 intel_ring_advance(ring);
385
386 return 0;
387}
388
389#define PIPE_CONTROL_FLUSH(ring__, addr__) \
224do { \ 390do { \
225 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ 391 intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
226 PIPE_CONTROL_DEPTH_STALL | 2); \ 392 PIPE_CONTROL_DEPTH_STALL | 2); \
227 OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \ 393 intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
228 OUT_RING(0); \ 394 intel_ring_emit(ring__, 0); \
229 OUT_RING(0); \ 395 intel_ring_emit(ring__, 0); \
230} while (0) 396} while (0)
231 397
232/** 398static int
233 * Creates a new sequence number, emitting a write of it to the status page 399pc_render_add_request(struct intel_ring_buffer *ring,
234 * plus an interrupt, which will trigger i915_user_interrupt_handler. 400 u32 *result)
235 *
236 * Must be called with struct_lock held.
237 *
238 * Returned sequence numbers are nonzero on success.
239 */
240static u32
241render_ring_add_request(struct drm_device *dev,
242 struct intel_ring_buffer *ring,
243 u32 flush_domains)
244{ 401{
245 drm_i915_private_t *dev_priv = dev->dev_private; 402 struct drm_device *dev = ring->dev;
246 u32 seqno; 403 u32 seqno = i915_gem_get_seqno(dev);
404 struct pipe_control *pc = ring->private;
405 u32 scratch_addr = pc->gtt_offset + 128;
406 int ret;
247 407
248 seqno = i915_gem_get_seqno(dev); 408 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
249 409 * incoherent with writes to memory, i.e. completely fubar,
250 if (IS_GEN6(dev)) { 410 * so we need to use PIPE_NOTIFY instead.
251 BEGIN_LP_RING(6); 411 *
252 OUT_RING(GFX_OP_PIPE_CONTROL | 3); 412 * However, we also need to workaround the qword write
253 OUT_RING(PIPE_CONTROL_QW_WRITE | 413 * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
254 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH | 414 * memory before requesting an interrupt.
255 PIPE_CONTROL_NOTIFY); 415 */
256 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT); 416 ret = intel_ring_begin(ring, 32);
257 OUT_RING(seqno); 417 if (ret)
258 OUT_RING(0); 418 return ret;
259 OUT_RING(0); 419
260 ADVANCE_LP_RING(); 420 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
261 } else if (HAS_PIPE_CONTROL(dev)) { 421 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
262 u32 scratch_addr = dev_priv->seqno_gfx_addr + 128; 422 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
423 intel_ring_emit(ring, seqno);
424 intel_ring_emit(ring, 0);
425 PIPE_CONTROL_FLUSH(ring, scratch_addr);
426 scratch_addr += 128; /* write to separate cachelines */
427 PIPE_CONTROL_FLUSH(ring, scratch_addr);
428 scratch_addr += 128;
429 PIPE_CONTROL_FLUSH(ring, scratch_addr);
430 scratch_addr += 128;
431 PIPE_CONTROL_FLUSH(ring, scratch_addr);
432 scratch_addr += 128;
433 PIPE_CONTROL_FLUSH(ring, scratch_addr);
434 scratch_addr += 128;
435 PIPE_CONTROL_FLUSH(ring, scratch_addr);
436 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
437 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
438 PIPE_CONTROL_NOTIFY);
439 intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
440 intel_ring_emit(ring, seqno);
441 intel_ring_emit(ring, 0);
442 intel_ring_advance(ring);
443
444 *result = seqno;
445 return 0;
446}
263 447
264 /* 448static int
265 * Workaround qword write incoherence by flushing the 449render_ring_add_request(struct intel_ring_buffer *ring,
266 * PIPE_NOTIFY buffers out to memory before requesting 450 u32 *result)
267 * an interrupt. 451{
268 */ 452 struct drm_device *dev = ring->dev;
269 BEGIN_LP_RING(32); 453 u32 seqno = i915_gem_get_seqno(dev);
270 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | 454 int ret;
271 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
272 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
273 OUT_RING(seqno);
274 OUT_RING(0);
275 PIPE_CONTROL_FLUSH(scratch_addr);
276 scratch_addr += 128; /* write to separate cachelines */
277 PIPE_CONTROL_FLUSH(scratch_addr);
278 scratch_addr += 128;
279 PIPE_CONTROL_FLUSH(scratch_addr);
280 scratch_addr += 128;
281 PIPE_CONTROL_FLUSH(scratch_addr);
282 scratch_addr += 128;
283 PIPE_CONTROL_FLUSH(scratch_addr);
284 scratch_addr += 128;
285 PIPE_CONTROL_FLUSH(scratch_addr);
286 OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
287 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
288 PIPE_CONTROL_NOTIFY);
289 OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
290 OUT_RING(seqno);
291 OUT_RING(0);
292 ADVANCE_LP_RING();
293 } else {
294 BEGIN_LP_RING(4);
295 OUT_RING(MI_STORE_DWORD_INDEX);
296 OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
297 OUT_RING(seqno);
298 455
299 OUT_RING(MI_USER_INTERRUPT); 456 ret = intel_ring_begin(ring, 4);
300 ADVANCE_LP_RING(); 457 if (ret)
301 } 458 return ret;
302 return seqno; 459
460 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
461 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
462 intel_ring_emit(ring, seqno);
463 intel_ring_emit(ring, MI_USER_INTERRUPT);
464 intel_ring_advance(ring);
465
466 *result = seqno;
467 return 0;
303} 468}
304 469
305static u32 470static u32
306render_ring_get_seqno(struct drm_device *dev, 471ring_get_seqno(struct intel_ring_buffer *ring)
307 struct intel_ring_buffer *ring)
308{ 472{
309 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 473 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
310 if (HAS_PIPE_CONTROL(dev))
311 return ((volatile u32 *)(dev_priv->seqno_page))[0];
312 else
313 return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
314} 474}
315 475
316static void 476static u32
317render_ring_get_user_irq(struct drm_device *dev, 477pc_render_get_seqno(struct intel_ring_buffer *ring)
318 struct intel_ring_buffer *ring)
319{ 478{
320 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 479 struct pipe_control *pc = ring->private;
321 unsigned long irqflags; 480 return pc->cpu_page[0];
481}
482
483static bool
484render_ring_get_irq(struct intel_ring_buffer *ring)
485{
486 struct drm_device *dev = ring->dev;
487
488 if (!dev->irq_enabled)
489 return false;
322 490
323 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 491 if (atomic_inc_return(&ring->irq_refcount) == 1) {
324 if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) { 492 drm_i915_private_t *dev_priv = dev->dev_private;
493 unsigned long irqflags;
494
495 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
325 if (HAS_PCH_SPLIT(dev)) 496 if (HAS_PCH_SPLIT(dev))
326 ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 497 ironlake_enable_graphics_irq(dev_priv,
498 GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
327 else 499 else
328 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 500 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
501 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
329 } 502 }
330 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 503
504 return true;
331} 505}
332 506
333static void 507static void
334render_ring_put_user_irq(struct drm_device *dev, 508render_ring_put_irq(struct intel_ring_buffer *ring)
335 struct intel_ring_buffer *ring)
336{ 509{
337 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 510 struct drm_device *dev = ring->dev;
338 unsigned long irqflags;
339 511
340 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 512 if (atomic_dec_and_test(&ring->irq_refcount)) {
341 BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0); 513 drm_i915_private_t *dev_priv = dev->dev_private;
342 if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) { 514 unsigned long irqflags;
515
516 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
343 if (HAS_PCH_SPLIT(dev)) 517 if (HAS_PCH_SPLIT(dev))
344 ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY); 518 ironlake_disable_graphics_irq(dev_priv,
519 GT_USER_INTERRUPT |
520 GT_PIPE_NOTIFY);
345 else 521 else
346 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 522 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
523 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
347 } 524 }
348 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
349} 525}
350 526
351void intel_ring_setup_status_page(struct drm_device *dev, 527void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
352 struct intel_ring_buffer *ring)
353{ 528{
354 drm_i915_private_t *dev_priv = dev->dev_private; 529 drm_i915_private_t *dev_priv = ring->dev->dev_private;
355 if (IS_GEN6(dev)) { 530 u32 mmio = IS_GEN6(ring->dev) ?
356 I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base), 531 RING_HWS_PGA_GEN6(ring->mmio_base) :
357 ring->status_page.gfx_addr); 532 RING_HWS_PGA(ring->mmio_base);
358 I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */ 533 I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
359 } else { 534 POSTING_READ(mmio);
360 I915_WRITE(RING_HWS_PGA(ring->mmio_base),
361 ring->status_page.gfx_addr);
362 I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
363 }
364
365} 535}
366 536
367static void 537static void
368bsd_ring_flush(struct drm_device *dev, 538bsd_ring_flush(struct intel_ring_buffer *ring,
369 struct intel_ring_buffer *ring, 539 u32 invalidate_domains,
370 u32 invalidate_domains, 540 u32 flush_domains)
371 u32 flush_domains)
372{ 541{
373 intel_ring_begin(dev, ring, 2); 542 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
374 intel_ring_emit(dev, ring, MI_FLUSH); 543 return;
375 intel_ring_emit(dev, ring, MI_NOOP);
376 intel_ring_advance(dev, ring);
377}
378 544
379static int init_bsd_ring(struct drm_device *dev, 545 if (intel_ring_begin(ring, 2) == 0) {
380 struct intel_ring_buffer *ring) 546 intel_ring_emit(ring, MI_FLUSH);
381{ 547 intel_ring_emit(ring, MI_NOOP);
382 return init_ring_common(dev, ring); 548 intel_ring_advance(ring);
549 }
383} 550}
384 551
385static u32 552static int
386ring_add_request(struct drm_device *dev, 553ring_add_request(struct intel_ring_buffer *ring,
387 struct intel_ring_buffer *ring, 554 u32 *result)
388 u32 flush_domains)
389{ 555{
390 u32 seqno; 556 u32 seqno;
557 int ret;
391 558
392 seqno = i915_gem_get_seqno(dev); 559 ret = intel_ring_begin(ring, 4);
560 if (ret)
561 return ret;
393 562
394 intel_ring_begin(dev, ring, 4); 563 seqno = i915_gem_get_seqno(ring->dev);
395 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
396 intel_ring_emit(dev, ring,
397 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
398 intel_ring_emit(dev, ring, seqno);
399 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
400 intel_ring_advance(dev, ring);
401 564
402 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 565 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
566 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
567 intel_ring_emit(ring, seqno);
568 intel_ring_emit(ring, MI_USER_INTERRUPT);
569 intel_ring_advance(ring);
403 570
404 return seqno; 571 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
572 *result = seqno;
573 return 0;
405} 574}
406 575
407static void 576static bool
408bsd_ring_get_user_irq(struct drm_device *dev, 577ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
409 struct intel_ring_buffer *ring)
410{ 578{
411 /* do nothing */ 579 struct drm_device *dev = ring->dev;
580
581 if (!dev->irq_enabled)
582 return false;
583
584 if (atomic_inc_return(&ring->irq_refcount) == 1) {
585 drm_i915_private_t *dev_priv = dev->dev_private;
586 unsigned long irqflags;
587
588 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
589 ironlake_enable_graphics_irq(dev_priv, flag);
590 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
591 }
592
593 return true;
412} 594}
595
413static void 596static void
414bsd_ring_put_user_irq(struct drm_device *dev, 597ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
415 struct intel_ring_buffer *ring)
416{ 598{
417 /* do nothing */ 599 struct drm_device *dev = ring->dev;
600
601 if (atomic_dec_and_test(&ring->irq_refcount)) {
602 drm_i915_private_t *dev_priv = dev->dev_private;
603 unsigned long irqflags;
604
605 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
606 ironlake_disable_graphics_irq(dev_priv, flag);
607 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
608 }
418} 609}
419 610
420static u32 611static bool
421ring_status_page_get_seqno(struct drm_device *dev, 612bsd_ring_get_irq(struct intel_ring_buffer *ring)
422 struct intel_ring_buffer *ring)
423{ 613{
424 return intel_read_status_page(ring, I915_GEM_HWS_INDEX); 614 return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
615}
616static void
617bsd_ring_put_irq(struct intel_ring_buffer *ring)
618{
619 ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
425} 620}
426 621
427static int 622static int
428ring_dispatch_gem_execbuffer(struct drm_device *dev, 623ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
429 struct intel_ring_buffer *ring,
430 struct drm_i915_gem_execbuffer2 *exec,
431 struct drm_clip_rect *cliprects,
432 uint64_t exec_offset)
433{ 624{
434 uint32_t exec_start; 625 int ret;
435 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 626
436 intel_ring_begin(dev, ring, 2); 627 ret = intel_ring_begin(ring, 2);
437 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | 628 if (ret)
438 (2 << 6) | MI_BATCH_NON_SECURE_I965); 629 return ret;
439 intel_ring_emit(dev, ring, exec_start); 630
440 intel_ring_advance(dev, ring); 631 intel_ring_emit(ring,
632 MI_BATCH_BUFFER_START | (2 << 6) |
633 MI_BATCH_NON_SECURE_I965);
634 intel_ring_emit(ring, offset);
635 intel_ring_advance(ring);
636
441 return 0; 637 return 0;
442} 638}
443 639
444static int 640static int
445render_ring_dispatch_gem_execbuffer(struct drm_device *dev, 641render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
446 struct intel_ring_buffer *ring, 642 u32 offset, u32 len)
447 struct drm_i915_gem_execbuffer2 *exec,
448 struct drm_clip_rect *cliprects,
449 uint64_t exec_offset)
450{ 643{
644 struct drm_device *dev = ring->dev;
451 drm_i915_private_t *dev_priv = dev->dev_private; 645 drm_i915_private_t *dev_priv = dev->dev_private;
452 int nbox = exec->num_cliprects; 646 int ret;
453 int i = 0, count;
454 uint32_t exec_start, exec_len;
455 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
456 exec_len = (uint32_t) exec->batch_len;
457 647
458 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1); 648 trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
459 649
460 count = nbox ? nbox : 1; 650 if (IS_I830(dev) || IS_845G(dev)) {
651 ret = intel_ring_begin(ring, 4);
652 if (ret)
653 return ret;
461 654
462 for (i = 0; i < count; i++) { 655 intel_ring_emit(ring, MI_BATCH_BUFFER);
463 if (i < nbox) { 656 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
464 int ret = i915_emit_box(dev, cliprects, i, 657 intel_ring_emit(ring, offset + len - 8);
465 exec->DR1, exec->DR4); 658 intel_ring_emit(ring, 0);
466 if (ret) 659 } else {
467 return ret; 660 ret = intel_ring_begin(ring, 2);
468 } 661 if (ret)
662 return ret;
469 663
470 if (IS_I830(dev) || IS_845G(dev)) { 664 if (INTEL_INFO(dev)->gen >= 4) {
471 intel_ring_begin(dev, ring, 4); 665 intel_ring_emit(ring,
472 intel_ring_emit(dev, ring, MI_BATCH_BUFFER); 666 MI_BATCH_BUFFER_START | (2 << 6) |
473 intel_ring_emit(dev, ring, 667 MI_BATCH_NON_SECURE_I965);
474 exec_start | MI_BATCH_NON_SECURE); 668 intel_ring_emit(ring, offset);
475 intel_ring_emit(dev, ring, exec_start + exec_len - 4);
476 intel_ring_emit(dev, ring, 0);
477 } else { 669 } else {
478 intel_ring_begin(dev, ring, 2); 670 intel_ring_emit(ring,
479 if (INTEL_INFO(dev)->gen >= 4) { 671 MI_BATCH_BUFFER_START | (2 << 6));
480 intel_ring_emit(dev, ring, 672 intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
481 MI_BATCH_BUFFER_START | (2 << 6)
482 | MI_BATCH_NON_SECURE_I965);
483 intel_ring_emit(dev, ring, exec_start);
484 } else {
485 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
486 | (2 << 6));
487 intel_ring_emit(dev, ring, exec_start |
488 MI_BATCH_NON_SECURE);
489 }
490 } 673 }
491 intel_ring_advance(dev, ring);
492 } 674 }
493 675 intel_ring_advance(ring);
494 if (IS_G4X(dev) || IS_GEN5(dev)) {
495 intel_ring_begin(dev, ring, 2);
496 intel_ring_emit(dev, ring, MI_FLUSH |
497 MI_NO_WRITE_FLUSH |
498 MI_INVALIDATE_ISP );
499 intel_ring_emit(dev, ring, MI_NOOP);
500 intel_ring_advance(dev, ring);
501 }
502 /* XXX breadcrumb */
503 676
504 return 0; 677 return 0;
505} 678}
506 679
507static void cleanup_status_page(struct drm_device *dev, 680static void cleanup_status_page(struct intel_ring_buffer *ring)
508 struct intel_ring_buffer *ring)
509{ 681{
510 drm_i915_private_t *dev_priv = dev->dev_private; 682 drm_i915_private_t *dev_priv = ring->dev->dev_private;
511 struct drm_gem_object *obj; 683 struct drm_i915_gem_object *obj;
512 struct drm_i915_gem_object *obj_priv;
513 684
514 obj = ring->status_page.obj; 685 obj = ring->status_page.obj;
515 if (obj == NULL) 686 if (obj == NULL)
516 return; 687 return;
517 obj_priv = to_intel_bo(obj);
518 688
519 kunmap(obj_priv->pages[0]); 689 kunmap(obj->pages[0]);
520 i915_gem_object_unpin(obj); 690 i915_gem_object_unpin(obj);
521 drm_gem_object_unreference(obj); 691 drm_gem_object_unreference(&obj->base);
522 ring->status_page.obj = NULL; 692 ring->status_page.obj = NULL;
523 693
524 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 694 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
525} 695}
526 696
527static int init_status_page(struct drm_device *dev, 697static int init_status_page(struct intel_ring_buffer *ring)
528 struct intel_ring_buffer *ring)
529{ 698{
699 struct drm_device *dev = ring->dev;
530 drm_i915_private_t *dev_priv = dev->dev_private; 700 drm_i915_private_t *dev_priv = dev->dev_private;
531 struct drm_gem_object *obj; 701 struct drm_i915_gem_object *obj;
532 struct drm_i915_gem_object *obj_priv;
533 int ret; 702 int ret;
534 703
535 obj = i915_gem_alloc_object(dev, 4096); 704 obj = i915_gem_alloc_object(dev, 4096);
@@ -538,16 +707,15 @@ static int init_status_page(struct drm_device *dev,
538 ret = -ENOMEM; 707 ret = -ENOMEM;
539 goto err; 708 goto err;
540 } 709 }
541 obj_priv = to_intel_bo(obj); 710 obj->agp_type = AGP_USER_CACHED_MEMORY;
542 obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
543 711
544 ret = i915_gem_object_pin(obj, 4096); 712 ret = i915_gem_object_pin(obj, 4096, true);
545 if (ret != 0) { 713 if (ret != 0) {
546 goto err_unref; 714 goto err_unref;
547 } 715 }
548 716
549 ring->status_page.gfx_addr = obj_priv->gtt_offset; 717 ring->status_page.gfx_addr = obj->gtt_offset;
550 ring->status_page.page_addr = kmap(obj_priv->pages[0]); 718 ring->status_page.page_addr = kmap(obj->pages[0]);
551 if (ring->status_page.page_addr == NULL) { 719 if (ring->status_page.page_addr == NULL) {
552 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 720 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
553 goto err_unpin; 721 goto err_unpin;
@@ -555,7 +723,7 @@ static int init_status_page(struct drm_device *dev,
555 ring->status_page.obj = obj; 723 ring->status_page.obj = obj;
556 memset(ring->status_page.page_addr, 0, PAGE_SIZE); 724 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
557 725
558 intel_ring_setup_status_page(dev, ring); 726 intel_ring_setup_status_page(ring);
559 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n", 727 DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
560 ring->name, ring->status_page.gfx_addr); 728 ring->name, ring->status_page.gfx_addr);
561 729
@@ -564,7 +732,7 @@ static int init_status_page(struct drm_device *dev,
564err_unpin: 732err_unpin:
565 i915_gem_object_unpin(obj); 733 i915_gem_object_unpin(obj);
566err_unref: 734err_unref:
567 drm_gem_object_unreference(obj); 735 drm_gem_object_unreference(&obj->base);
568err: 736err:
569 return ret; 737 return ret;
570} 738}
@@ -572,9 +740,7 @@ err:
572int intel_init_ring_buffer(struct drm_device *dev, 740int intel_init_ring_buffer(struct drm_device *dev,
573 struct intel_ring_buffer *ring) 741 struct intel_ring_buffer *ring)
574{ 742{
575 struct drm_i915_private *dev_priv = dev->dev_private; 743 struct drm_i915_gem_object *obj;
576 struct drm_i915_gem_object *obj_priv;
577 struct drm_gem_object *obj;
578 int ret; 744 int ret;
579 745
580 ring->dev = dev; 746 ring->dev = dev;
@@ -583,7 +749,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
583 INIT_LIST_HEAD(&ring->gpu_write_list); 749 INIT_LIST_HEAD(&ring->gpu_write_list);
584 750
585 if (I915_NEED_GFX_HWS(dev)) { 751 if (I915_NEED_GFX_HWS(dev)) {
586 ret = init_status_page(dev, ring); 752 ret = init_status_page(ring);
587 if (ret) 753 if (ret)
588 return ret; 754 return ret;
589 } 755 }
@@ -595,15 +761,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
595 goto err_hws; 761 goto err_hws;
596 } 762 }
597 763
598 ring->gem_object = obj; 764 ring->obj = obj;
599 765
600 ret = i915_gem_object_pin(obj, PAGE_SIZE); 766 ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
601 if (ret) 767 if (ret)
602 goto err_unref; 768 goto err_unref;
603 769
604 obj_priv = to_intel_bo(obj);
605 ring->map.size = ring->size; 770 ring->map.size = ring->size;
606 ring->map.offset = dev->agp->base + obj_priv->gtt_offset; 771 ring->map.offset = dev->agp->base + obj->gtt_offset;
607 ring->map.type = 0; 772 ring->map.type = 0;
608 ring->map.flags = 0; 773 ring->map.flags = 0;
609 ring->map.mtrr = 0; 774 ring->map.mtrr = 0;
@@ -616,60 +781,57 @@ int intel_init_ring_buffer(struct drm_device *dev,
616 } 781 }
617 782
618 ring->virtual_start = ring->map.handle; 783 ring->virtual_start = ring->map.handle;
619 ret = ring->init(dev, ring); 784 ret = ring->init(ring);
620 if (ret) 785 if (ret)
621 goto err_unmap; 786 goto err_unmap;
622 787
623 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 788 return 0;
624 i915_kernel_lost_context(dev);
625 else {
626 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
627 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
628 ring->space = ring->head - (ring->tail + 8);
629 if (ring->space < 0)
630 ring->space += ring->size;
631 }
632 return ret;
633 789
634err_unmap: 790err_unmap:
635 drm_core_ioremapfree(&ring->map, dev); 791 drm_core_ioremapfree(&ring->map, dev);
636err_unpin: 792err_unpin:
637 i915_gem_object_unpin(obj); 793 i915_gem_object_unpin(obj);
638err_unref: 794err_unref:
639 drm_gem_object_unreference(obj); 795 drm_gem_object_unreference(&obj->base);
640 ring->gem_object = NULL; 796 ring->obj = NULL;
641err_hws: 797err_hws:
642 cleanup_status_page(dev, ring); 798 cleanup_status_page(ring);
643 return ret; 799 return ret;
644} 800}
645 801
646void intel_cleanup_ring_buffer(struct drm_device *dev, 802void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
647 struct intel_ring_buffer *ring)
648{ 803{
649 if (ring->gem_object == NULL) 804 struct drm_i915_private *dev_priv;
805 int ret;
806
807 if (ring->obj == NULL)
650 return; 808 return;
651 809
652 drm_core_ioremapfree(&ring->map, dev); 810 /* Disable the ring buffer. The ring must be idle at this point */
811 dev_priv = ring->dev->dev_private;
812 ret = intel_wait_ring_buffer(ring, ring->size - 8);
813 I915_WRITE_CTL(ring, 0);
653 814
654 i915_gem_object_unpin(ring->gem_object); 815 drm_core_ioremapfree(&ring->map, ring->dev);
655 drm_gem_object_unreference(ring->gem_object); 816
656 ring->gem_object = NULL; 817 i915_gem_object_unpin(ring->obj);
818 drm_gem_object_unreference(&ring->obj->base);
819 ring->obj = NULL;
657 820
658 if (ring->cleanup) 821 if (ring->cleanup)
659 ring->cleanup(ring); 822 ring->cleanup(ring);
660 823
661 cleanup_status_page(dev, ring); 824 cleanup_status_page(ring);
662} 825}
663 826
664static int intel_wrap_ring_buffer(struct drm_device *dev, 827static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
665 struct intel_ring_buffer *ring)
666{ 828{
667 unsigned int *virt; 829 unsigned int *virt;
668 int rem; 830 int rem;
669 rem = ring->size - ring->tail; 831 rem = ring->size - ring->tail;
670 832
671 if (ring->space < rem) { 833 if (ring->space < rem) {
672 int ret = intel_wait_ring_buffer(dev, ring, rem); 834 int ret = intel_wait_ring_buffer(ring, rem);
673 if (ret) 835 if (ret)
674 return ret; 836 return ret;
675 } 837 }
@@ -687,32 +849,29 @@ static int intel_wrap_ring_buffer(struct drm_device *dev,
687 return 0; 849 return 0;
688} 850}
689 851
690int intel_wait_ring_buffer(struct drm_device *dev, 852int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
691 struct intel_ring_buffer *ring, int n)
692{ 853{
854 struct drm_device *dev = ring->dev;
855 struct drm_i915_private *dev_priv = dev->dev_private;
693 unsigned long end; 856 unsigned long end;
694 drm_i915_private_t *dev_priv = dev->dev_private;
695 u32 head; 857 u32 head;
696 858
697 head = intel_read_status_page(ring, 4);
698 if (head) {
699 ring->head = head & HEAD_ADDR;
700 ring->space = ring->head - (ring->tail + 8);
701 if (ring->space < 0)
702 ring->space += ring->size;
703 if (ring->space >= n)
704 return 0;
705 }
706
707 trace_i915_ring_wait_begin (dev); 859 trace_i915_ring_wait_begin (dev);
708 end = jiffies + 3 * HZ; 860 end = jiffies + 3 * HZ;
709 do { 861 do {
710 ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; 862 /* If the reported head position has wrapped or hasn't advanced,
863 * fallback to the slow and accurate path.
864 */
865 head = intel_read_status_page(ring, 4);
866 if (head < ring->actual_head)
867 head = I915_READ_HEAD(ring);
868 ring->actual_head = head;
869 ring->head = head & HEAD_ADDR;
711 ring->space = ring->head - (ring->tail + 8); 870 ring->space = ring->head - (ring->tail + 8);
712 if (ring->space < 0) 871 if (ring->space < 0)
713 ring->space += ring->size; 872 ring->space += ring->size;
714 if (ring->space >= n) { 873 if (ring->space >= n) {
715 trace_i915_ring_wait_end (dev); 874 trace_i915_ring_wait_end(dev);
716 return 0; 875 return 0;
717 } 876 }
718 877
@@ -723,29 +882,39 @@ int intel_wait_ring_buffer(struct drm_device *dev,
723 } 882 }
724 883
725 msleep(1); 884 msleep(1);
885 if (atomic_read(&dev_priv->mm.wedged))
886 return -EAGAIN;
726 } while (!time_after(jiffies, end)); 887 } while (!time_after(jiffies, end));
727 trace_i915_ring_wait_end (dev); 888 trace_i915_ring_wait_end (dev);
728 return -EBUSY; 889 return -EBUSY;
729} 890}
730 891
731void intel_ring_begin(struct drm_device *dev, 892int intel_ring_begin(struct intel_ring_buffer *ring,
732 struct intel_ring_buffer *ring, 893 int num_dwords)
733 int num_dwords)
734{ 894{
735 int n = 4*num_dwords; 895 int n = 4*num_dwords;
736 if (unlikely(ring->tail + n > ring->size)) 896 int ret;
737 intel_wrap_ring_buffer(dev, ring); 897
738 if (unlikely(ring->space < n)) 898 if (unlikely(ring->tail + n > ring->size)) {
739 intel_wait_ring_buffer(dev, ring, n); 899 ret = intel_wrap_ring_buffer(ring);
900 if (unlikely(ret))
901 return ret;
902 }
903
904 if (unlikely(ring->space < n)) {
905 ret = intel_wait_ring_buffer(ring, n);
906 if (unlikely(ret))
907 return ret;
908 }
740 909
741 ring->space -= n; 910 ring->space -= n;
911 return 0;
742} 912}
743 913
744void intel_ring_advance(struct drm_device *dev, 914void intel_ring_advance(struct intel_ring_buffer *ring)
745 struct intel_ring_buffer *ring)
746{ 915{
747 ring->tail &= ring->size - 1; 916 ring->tail &= ring->size - 1;
748 ring->write_tail(dev, ring, ring->tail); 917 ring->write_tail(ring, ring->tail);
749} 918}
750 919
751static const struct intel_ring_buffer render_ring = { 920static const struct intel_ring_buffer render_ring = {
@@ -757,10 +926,11 @@ static const struct intel_ring_buffer render_ring = {
757 .write_tail = ring_write_tail, 926 .write_tail = ring_write_tail,
758 .flush = render_ring_flush, 927 .flush = render_ring_flush,
759 .add_request = render_ring_add_request, 928 .add_request = render_ring_add_request,
760 .get_seqno = render_ring_get_seqno, 929 .get_seqno = ring_get_seqno,
761 .user_irq_get = render_ring_get_user_irq, 930 .irq_get = render_ring_get_irq,
762 .user_irq_put = render_ring_put_user_irq, 931 .irq_put = render_ring_put_irq,
763 .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer, 932 .dispatch_execbuffer = render_ring_dispatch_execbuffer,
933 .cleanup = render_ring_cleanup,
764}; 934};
765 935
766/* ring buffer for bit-stream decoder */ 936/* ring buffer for bit-stream decoder */
@@ -770,22 +940,21 @@ static const struct intel_ring_buffer bsd_ring = {
770 .id = RING_BSD, 940 .id = RING_BSD,
771 .mmio_base = BSD_RING_BASE, 941 .mmio_base = BSD_RING_BASE,
772 .size = 32 * PAGE_SIZE, 942 .size = 32 * PAGE_SIZE,
773 .init = init_bsd_ring, 943 .init = init_ring_common,
774 .write_tail = ring_write_tail, 944 .write_tail = ring_write_tail,
775 .flush = bsd_ring_flush, 945 .flush = bsd_ring_flush,
776 .add_request = ring_add_request, 946 .add_request = ring_add_request,
777 .get_seqno = ring_status_page_get_seqno, 947 .get_seqno = ring_get_seqno,
778 .user_irq_get = bsd_ring_get_user_irq, 948 .irq_get = bsd_ring_get_irq,
779 .user_irq_put = bsd_ring_put_user_irq, 949 .irq_put = bsd_ring_put_irq,
780 .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer, 950 .dispatch_execbuffer = ring_dispatch_execbuffer,
781}; 951};
782 952
783 953
784static void gen6_bsd_ring_write_tail(struct drm_device *dev, 954static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
785 struct intel_ring_buffer *ring,
786 u32 value) 955 u32 value)
787{ 956{
788 drm_i915_private_t *dev_priv = dev->dev_private; 957 drm_i915_private_t *dev_priv = ring->dev->dev_private;
789 958
790 /* Every tail move must follow the sequence below */ 959 /* Every tail move must follow the sequence below */
791 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL, 960 I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -804,69 +973,80 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev,
804 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE); 973 GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
805} 974}
806 975
807static void gen6_ring_flush(struct drm_device *dev, 976static void gen6_ring_flush(struct intel_ring_buffer *ring,
808 struct intel_ring_buffer *ring,
809 u32 invalidate_domains, 977 u32 invalidate_domains,
810 u32 flush_domains) 978 u32 flush_domains)
811{ 979{
812 intel_ring_begin(dev, ring, 4); 980 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
813 intel_ring_emit(dev, ring, MI_FLUSH_DW); 981 return;
814 intel_ring_emit(dev, ring, 0); 982
815 intel_ring_emit(dev, ring, 0); 983 if (intel_ring_begin(ring, 4) == 0) {
816 intel_ring_emit(dev, ring, 0); 984 intel_ring_emit(ring, MI_FLUSH_DW);
817 intel_ring_advance(dev, ring); 985 intel_ring_emit(ring, 0);
986 intel_ring_emit(ring, 0);
987 intel_ring_emit(ring, 0);
988 intel_ring_advance(ring);
989 }
818} 990}
819 991
820static int 992static int
821gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev, 993gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
822 struct intel_ring_buffer *ring, 994 u32 offset, u32 len)
823 struct drm_i915_gem_execbuffer2 *exec,
824 struct drm_clip_rect *cliprects,
825 uint64_t exec_offset)
826{ 995{
827 uint32_t exec_start; 996 int ret;
828 997
829 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 998 ret = intel_ring_begin(ring, 2);
999 if (ret)
1000 return ret;
830 1001
831 intel_ring_begin(dev, ring, 2); 1002 intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
832 intel_ring_emit(dev, ring,
833 MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
834 /* bit0-7 is the length on GEN6+ */ 1003 /* bit0-7 is the length on GEN6+ */
835 intel_ring_emit(dev, ring, exec_start); 1004 intel_ring_emit(ring, offset);
836 intel_ring_advance(dev, ring); 1005 intel_ring_advance(ring);
837 1006
838 return 0; 1007 return 0;
839} 1008}
840 1009
1010static bool
1011gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
1012{
1013 return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1014}
1015
1016static void
1017gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
1018{
1019 ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
1020}
1021
841/* ring buffer for Video Codec for Gen6+ */ 1022/* ring buffer for Video Codec for Gen6+ */
842static const struct intel_ring_buffer gen6_bsd_ring = { 1023static const struct intel_ring_buffer gen6_bsd_ring = {
843 .name = "gen6 bsd ring", 1024 .name = "gen6 bsd ring",
844 .id = RING_BSD, 1025 .id = RING_BSD,
845 .mmio_base = GEN6_BSD_RING_BASE, 1026 .mmio_base = GEN6_BSD_RING_BASE,
846 .size = 32 * PAGE_SIZE, 1027 .size = 32 * PAGE_SIZE,
847 .init = init_bsd_ring, 1028 .init = init_ring_common,
848 .write_tail = gen6_bsd_ring_write_tail, 1029 .write_tail = gen6_bsd_ring_write_tail,
849 .flush = gen6_ring_flush, 1030 .flush = gen6_ring_flush,
850 .add_request = ring_add_request, 1031 .add_request = gen6_add_request,
851 .get_seqno = ring_status_page_get_seqno, 1032 .get_seqno = ring_get_seqno,
852 .user_irq_get = bsd_ring_get_user_irq, 1033 .irq_get = gen6_bsd_ring_get_irq,
853 .user_irq_put = bsd_ring_put_user_irq, 1034 .irq_put = gen6_bsd_ring_put_irq,
854 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 1035 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
855}; 1036};
856 1037
857/* Blitter support (SandyBridge+) */ 1038/* Blitter support (SandyBridge+) */
858 1039
859static void 1040static bool
860blt_ring_get_user_irq(struct drm_device *dev, 1041blt_ring_get_irq(struct intel_ring_buffer *ring)
861 struct intel_ring_buffer *ring)
862{ 1042{
863 /* do nothing */ 1043 return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
864} 1044}
1045
865static void 1046static void
866blt_ring_put_user_irq(struct drm_device *dev, 1047blt_ring_put_irq(struct intel_ring_buffer *ring)
867 struct intel_ring_buffer *ring)
868{ 1048{
869 /* do nothing */ 1049 ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
870} 1050}
871 1051
872 1052
@@ -884,32 +1064,31 @@ to_blt_workaround(struct intel_ring_buffer *ring)
884 return ring->private; 1064 return ring->private;
885} 1065}
886 1066
887static int blt_ring_init(struct drm_device *dev, 1067static int blt_ring_init(struct intel_ring_buffer *ring)
888 struct intel_ring_buffer *ring)
889{ 1068{
890 if (NEED_BLT_WORKAROUND(dev)) { 1069 if (NEED_BLT_WORKAROUND(ring->dev)) {
891 struct drm_i915_gem_object *obj; 1070 struct drm_i915_gem_object *obj;
892 u32 __iomem *ptr; 1071 u32 *ptr;
893 int ret; 1072 int ret;
894 1073
895 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096)); 1074 obj = i915_gem_alloc_object(ring->dev, 4096);
896 if (obj == NULL) 1075 if (obj == NULL)
897 return -ENOMEM; 1076 return -ENOMEM;
898 1077
899 ret = i915_gem_object_pin(&obj->base, 4096); 1078 ret = i915_gem_object_pin(obj, 4096, true);
900 if (ret) { 1079 if (ret) {
901 drm_gem_object_unreference(&obj->base); 1080 drm_gem_object_unreference(&obj->base);
902 return ret; 1081 return ret;
903 } 1082 }
904 1083
905 ptr = kmap(obj->pages[0]); 1084 ptr = kmap(obj->pages[0]);
906 iowrite32(MI_BATCH_BUFFER_END, ptr); 1085 *ptr++ = MI_BATCH_BUFFER_END;
907 iowrite32(MI_NOOP, ptr+1); 1086 *ptr++ = MI_NOOP;
908 kunmap(obj->pages[0]); 1087 kunmap(obj->pages[0]);
909 1088
910 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false); 1089 ret = i915_gem_object_set_to_gtt_domain(obj, false);
911 if (ret) { 1090 if (ret) {
912 i915_gem_object_unpin(&obj->base); 1091 i915_gem_object_unpin(obj);
913 drm_gem_object_unreference(&obj->base); 1092 drm_gem_object_unreference(&obj->base);
914 return ret; 1093 return ret;
915 } 1094 }
@@ -917,51 +1096,39 @@ static int blt_ring_init(struct drm_device *dev,
917 ring->private = obj; 1096 ring->private = obj;
918 } 1097 }
919 1098
920 return init_ring_common(dev, ring); 1099 return init_ring_common(ring);
921} 1100}
922 1101
923static void blt_ring_begin(struct drm_device *dev, 1102static int blt_ring_begin(struct intel_ring_buffer *ring,
924 struct intel_ring_buffer *ring,
925 int num_dwords) 1103 int num_dwords)
926{ 1104{
927 if (ring->private) { 1105 if (ring->private) {
928 intel_ring_begin(dev, ring, num_dwords+2); 1106 int ret = intel_ring_begin(ring, num_dwords+2);
929 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START); 1107 if (ret)
930 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset); 1108 return ret;
1109
1110 intel_ring_emit(ring, MI_BATCH_BUFFER_START);
1111 intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
1112
1113 return 0;
931 } else 1114 } else
932 intel_ring_begin(dev, ring, 4); 1115 return intel_ring_begin(ring, 4);
933} 1116}
934 1117
935static void blt_ring_flush(struct drm_device *dev, 1118static void blt_ring_flush(struct intel_ring_buffer *ring,
936 struct intel_ring_buffer *ring,
937 u32 invalidate_domains, 1119 u32 invalidate_domains,
938 u32 flush_domains) 1120 u32 flush_domains)
939{ 1121{
940 blt_ring_begin(dev, ring, 4); 1122 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
941 intel_ring_emit(dev, ring, MI_FLUSH_DW); 1123 return;
942 intel_ring_emit(dev, ring, 0);
943 intel_ring_emit(dev, ring, 0);
944 intel_ring_emit(dev, ring, 0);
945 intel_ring_advance(dev, ring);
946}
947
948static u32
949blt_ring_add_request(struct drm_device *dev,
950 struct intel_ring_buffer *ring,
951 u32 flush_domains)
952{
953 u32 seqno = i915_gem_get_seqno(dev);
954
955 blt_ring_begin(dev, ring, 4);
956 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
957 intel_ring_emit(dev, ring,
958 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
959 intel_ring_emit(dev, ring, seqno);
960 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
961 intel_ring_advance(dev, ring);
962 1124
963 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno); 1125 if (blt_ring_begin(ring, 4) == 0) {
964 return seqno; 1126 intel_ring_emit(ring, MI_FLUSH_DW);
1127 intel_ring_emit(ring, 0);
1128 intel_ring_emit(ring, 0);
1129 intel_ring_emit(ring, 0);
1130 intel_ring_advance(ring);
1131 }
965} 1132}
966 1133
967static void blt_ring_cleanup(struct intel_ring_buffer *ring) 1134static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -982,47 +1149,54 @@ static const struct intel_ring_buffer gen6_blt_ring = {
982 .init = blt_ring_init, 1149 .init = blt_ring_init,
983 .write_tail = ring_write_tail, 1150 .write_tail = ring_write_tail,
984 .flush = blt_ring_flush, 1151 .flush = blt_ring_flush,
985 .add_request = blt_ring_add_request, 1152 .add_request = gen6_add_request,
986 .get_seqno = ring_status_page_get_seqno, 1153 .get_seqno = ring_get_seqno,
987 .user_irq_get = blt_ring_get_user_irq, 1154 .irq_get = blt_ring_get_irq,
988 .user_irq_put = blt_ring_put_user_irq, 1155 .irq_put = blt_ring_put_irq,
989 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 1156 .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
990 .cleanup = blt_ring_cleanup, 1157 .cleanup = blt_ring_cleanup,
991}; 1158};
992 1159
993int intel_init_render_ring_buffer(struct drm_device *dev) 1160int intel_init_render_ring_buffer(struct drm_device *dev)
994{ 1161{
995 drm_i915_private_t *dev_priv = dev->dev_private; 1162 drm_i915_private_t *dev_priv = dev->dev_private;
996 1163 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
997 dev_priv->render_ring = render_ring; 1164
1165 *ring = render_ring;
1166 if (INTEL_INFO(dev)->gen >= 6) {
1167 ring->add_request = gen6_add_request;
1168 } else if (IS_GEN5(dev)) {
1169 ring->add_request = pc_render_add_request;
1170 ring->get_seqno = pc_render_get_seqno;
1171 }
998 1172
999 if (!I915_NEED_GFX_HWS(dev)) { 1173 if (!I915_NEED_GFX_HWS(dev)) {
1000 dev_priv->render_ring.status_page.page_addr 1174 ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
1001 = dev_priv->status_page_dmah->vaddr; 1175 memset(ring->status_page.page_addr, 0, PAGE_SIZE);
1002 memset(dev_priv->render_ring.status_page.page_addr,
1003 0, PAGE_SIZE);
1004 } 1176 }
1005 1177
1006 return intel_init_ring_buffer(dev, &dev_priv->render_ring); 1178 return intel_init_ring_buffer(dev, ring);
1007} 1179}
1008 1180
1009int intel_init_bsd_ring_buffer(struct drm_device *dev) 1181int intel_init_bsd_ring_buffer(struct drm_device *dev)
1010{ 1182{
1011 drm_i915_private_t *dev_priv = dev->dev_private; 1183 drm_i915_private_t *dev_priv = dev->dev_private;
1184 struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
1012 1185
1013 if (IS_GEN6(dev)) 1186 if (IS_GEN6(dev))
1014 dev_priv->bsd_ring = gen6_bsd_ring; 1187 *ring = gen6_bsd_ring;
1015 else 1188 else
1016 dev_priv->bsd_ring = bsd_ring; 1189 *ring = bsd_ring;
1017 1190
1018 return intel_init_ring_buffer(dev, &dev_priv->bsd_ring); 1191 return intel_init_ring_buffer(dev, ring);
1019} 1192}
1020 1193
1021int intel_init_blt_ring_buffer(struct drm_device *dev) 1194int intel_init_blt_ring_buffer(struct drm_device *dev)
1022{ 1195{
1023 drm_i915_private_t *dev_priv = dev->dev_private; 1196 drm_i915_private_t *dev_priv = dev->dev_private;
1197 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
1024 1198
1025 dev_priv->blt_ring = gen6_blt_ring; 1199 *ring = gen6_blt_ring;
1026 1200
1027 return intel_init_ring_buffer(dev, &dev_priv->blt_ring); 1201 return intel_init_ring_buffer(dev, ring);
1028} 1202}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3126c2681983..8e2e357ad6ee 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,37 @@
1#ifndef _INTEL_RINGBUFFER_H_ 1#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_ 2#define _INTEL_RINGBUFFER_H_
3 3
4enum {
5 RCS = 0x0,
6 VCS,
7 BCS,
8 I915_NUM_RINGS,
9};
10
4struct intel_hw_status_page { 11struct intel_hw_status_page {
5 void *page_addr; 12 u32 __iomem *page_addr;
6 unsigned int gfx_addr; 13 unsigned int gfx_addr;
7 struct drm_gem_object *obj; 14 struct drm_i915_gem_object *obj;
8}; 15};
9 16
10#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base)) 17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
18
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
11#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) 20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
12#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base)) 21
22#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
13#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) 23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
14#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base)) 24
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
15#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) 26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
16#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base)) 27
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
17#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) 29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
18 30
19struct drm_i915_gem_execbuffer2; 31#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
32#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
33#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
34
20struct intel_ring_buffer { 35struct intel_ring_buffer {
21 const char *name; 36 const char *name;
22 enum intel_ring_id { 37 enum intel_ring_id {
@@ -25,44 +40,36 @@ struct intel_ring_buffer {
25 RING_BLT = 0x4, 40 RING_BLT = 0x4,
26 } id; 41 } id;
27 u32 mmio_base; 42 u32 mmio_base;
28 unsigned long size;
29 void *virtual_start; 43 void *virtual_start;
30 struct drm_device *dev; 44 struct drm_device *dev;
31 struct drm_gem_object *gem_object; 45 struct drm_i915_gem_object *obj;
32 46
33 unsigned int head; 47 u32 actual_head;
34 unsigned int tail; 48 u32 head;
49 u32 tail;
35 int space; 50 int space;
51 int size;
36 struct intel_hw_status_page status_page; 52 struct intel_hw_status_page status_page;
37 53
38 u32 irq_gem_seqno; /* last seq seem at irq time */ 54 u32 irq_seqno; /* last seq seem at irq time */
39 u32 waiting_gem_seqno; 55 u32 waiting_seqno;
40 int user_irq_refcount; 56 u32 sync_seqno[I915_NUM_RINGS-1];
41 void (*user_irq_get)(struct drm_device *dev, 57 atomic_t irq_refcount;
42 struct intel_ring_buffer *ring); 58 bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
43 void (*user_irq_put)(struct drm_device *dev, 59 void (*irq_put)(struct intel_ring_buffer *ring);
44 struct intel_ring_buffer *ring);
45 60
46 int (*init)(struct drm_device *dev, 61 int (*init)(struct intel_ring_buffer *ring);
47 struct intel_ring_buffer *ring);
48 62
49 void (*write_tail)(struct drm_device *dev, 63 void (*write_tail)(struct intel_ring_buffer *ring,
50 struct intel_ring_buffer *ring,
51 u32 value); 64 u32 value);
52 void (*flush)(struct drm_device *dev, 65 void (*flush)(struct intel_ring_buffer *ring,
53 struct intel_ring_buffer *ring, 66 u32 invalidate_domains,
54 u32 invalidate_domains, 67 u32 flush_domains);
55 u32 flush_domains); 68 int (*add_request)(struct intel_ring_buffer *ring,
56 u32 (*add_request)(struct drm_device *dev, 69 u32 *seqno);
57 struct intel_ring_buffer *ring, 70 u32 (*get_seqno)(struct intel_ring_buffer *ring);
58 u32 flush_domains); 71 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
59 u32 (*get_seqno)(struct drm_device *dev, 72 u32 offset, u32 length);
60 struct intel_ring_buffer *ring);
61 int (*dispatch_gem_execbuffer)(struct drm_device *dev,
62 struct intel_ring_buffer *ring,
63 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset);
66 void (*cleanup)(struct intel_ring_buffer *ring); 73 void (*cleanup)(struct intel_ring_buffer *ring);
67 74
68 /** 75 /**
@@ -95,7 +102,7 @@ struct intel_ring_buffer {
95 /** 102 /**
96 * Do we have some not yet emitted requests outstanding? 103 * Do we have some not yet emitted requests outstanding?
97 */ 104 */
98 bool outstanding_lazy_request; 105 u32 outstanding_lazy_request;
99 106
100 wait_queue_head_t irq_queue; 107 wait_queue_head_t irq_queue;
101 drm_local_map_t map; 108 drm_local_map_t map;
@@ -104,44 +111,54 @@ struct intel_ring_buffer {
104}; 111};
105 112
106static inline u32 113static inline u32
114intel_ring_sync_index(struct intel_ring_buffer *ring,
115 struct intel_ring_buffer *other)
116{
117 int idx;
118
119 /*
120 * cs -> 0 = vcs, 1 = bcs
121 * vcs -> 0 = bcs, 1 = cs,
122 * bcs -> 0 = cs, 1 = vcs.
123 */
124
125 idx = (other - ring) - 1;
126 if (idx < 0)
127 idx += I915_NUM_RINGS;
128
129 return idx;
130}
131
132static inline u32
107intel_read_status_page(struct intel_ring_buffer *ring, 133intel_read_status_page(struct intel_ring_buffer *ring,
108 int reg) 134 int reg)
109{ 135{
110 u32 *regs = ring->status_page.page_addr; 136 return ioread32(ring->status_page.page_addr + reg);
111 return regs[reg];
112} 137}
113 138
114int intel_init_ring_buffer(struct drm_device *dev, 139void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
115 struct intel_ring_buffer *ring); 140int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
116void intel_cleanup_ring_buffer(struct drm_device *dev, 141int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
117 struct intel_ring_buffer *ring); 142
118int intel_wait_ring_buffer(struct drm_device *dev, 143static inline void intel_ring_emit(struct intel_ring_buffer *ring,
119 struct intel_ring_buffer *ring, int n); 144 u32 data)
120void intel_ring_begin(struct drm_device *dev,
121 struct intel_ring_buffer *ring, int n);
122
123static inline void intel_ring_emit(struct drm_device *dev,
124 struct intel_ring_buffer *ring,
125 unsigned int data)
126{ 145{
127 unsigned int *virt = ring->virtual_start + ring->tail; 146 iowrite32(data, ring->virtual_start + ring->tail);
128 *virt = data;
129 ring->tail += 4; 147 ring->tail += 4;
130} 148}
131 149
132void intel_ring_advance(struct drm_device *dev, 150void intel_ring_advance(struct intel_ring_buffer *ring);
133 struct intel_ring_buffer *ring);
134 151
135u32 intel_ring_get_seqno(struct drm_device *dev, 152u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
136 struct intel_ring_buffer *ring); 153int intel_ring_sync(struct intel_ring_buffer *ring,
154 struct intel_ring_buffer *to,
155 u32 seqno);
137 156
138int intel_init_render_ring_buffer(struct drm_device *dev); 157int intel_init_render_ring_buffer(struct drm_device *dev);
139int intel_init_bsd_ring_buffer(struct drm_device *dev); 158int intel_init_bsd_ring_buffer(struct drm_device *dev);
140int intel_init_blt_ring_buffer(struct drm_device *dev); 159int intel_init_blt_ring_buffer(struct drm_device *dev);
141 160
142u32 intel_ring_get_active_head(struct drm_device *dev, 161u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
143 struct intel_ring_buffer *ring); 162void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
144void intel_ring_setup_status_page(struct drm_device *dev,
145 struct intel_ring_buffer *ring);
146 163
147#endif /* _INTEL_RINGBUFFER_H_ */ 164#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd5..6c0bb18a26e8 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
107 * This is set if we treat the device as HDMI, instead of DVI. 107 * This is set if we treat the device as HDMI, instead of DVI.
108 */ 108 */
109 bool is_hdmi; 109 bool is_hdmi;
110 bool has_audio; 110 bool has_hdmi_monitor;
111 bool has_hdmi_audio;
111 112
112 /** 113 /**
113 * This is set if we detect output of sdvo device as LVDS and 114 * This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1023 if (!intel_sdvo_set_target_input(intel_sdvo)) 1024 if (!intel_sdvo_set_target_input(intel_sdvo))
1024 return; 1025 return;
1025 1026
1026 if (intel_sdvo->is_hdmi && 1027 if (intel_sdvo->has_hdmi_monitor &&
1027 !intel_sdvo_set_avi_infoframe(intel_sdvo)) 1028 !intel_sdvo_set_avi_infoframe(intel_sdvo))
1028 return; 1029 return;
1029 1030
@@ -1044,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1044 1045
1045 /* Set the SDVO control regs. */ 1046 /* Set the SDVO control regs. */
1046 if (INTEL_INFO(dev)->gen >= 4) { 1047 if (INTEL_INFO(dev)->gen >= 4) {
1047 sdvox = SDVO_BORDER_ENABLE; 1048 sdvox = 0;
1049 if (INTEL_INFO(dev)->gen < 5)
1050 sdvox |= SDVO_BORDER_ENABLE;
1048 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 1051 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1049 sdvox |= SDVO_VSYNC_ACTIVE_HIGH; 1052 sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
1050 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 1053 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1063,7 +1066,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1063 } 1066 }
1064 if (intel_crtc->pipe == 1) 1067 if (intel_crtc->pipe == 1)
1065 sdvox |= SDVO_PIPE_B_SELECT; 1068 sdvox |= SDVO_PIPE_B_SELECT;
1066 if (intel_sdvo->has_audio) 1069 if (intel_sdvo->has_hdmi_audio)
1067 sdvox |= SDVO_AUDIO_ENABLE; 1070 sdvox |= SDVO_AUDIO_ENABLE;
1068 1071
1069 if (INTEL_INFO(dev)->gen >= 4) { 1072 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1074,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1074 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1077 sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
1075 } 1078 }
1076 1079
1077 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL) 1080 if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
1081 INTEL_INFO(dev)->gen < 5)
1078 sdvox |= SDVO_STALL_SELECT; 1082 sdvox |= SDVO_STALL_SELECT;
1079 intel_sdvo_write_sdvox(intel_sdvo, sdvox); 1083 intel_sdvo_write_sdvox(intel_sdvo, sdvox);
1080} 1084}
@@ -1295,55 +1299,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
1295 return drm_get_edid(connector, &sdvo->ddc); 1299 return drm_get_edid(connector, &sdvo->ddc);
1296} 1300}
1297 1301
1298static struct drm_connector *
1299intel_find_analog_connector(struct drm_device *dev)
1300{
1301 struct drm_connector *connector;
1302 struct intel_sdvo *encoder;
1303
1304 list_for_each_entry(encoder,
1305 &dev->mode_config.encoder_list,
1306 base.base.head) {
1307 if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
1308 list_for_each_entry(connector,
1309 &dev->mode_config.connector_list,
1310 head) {
1311 if (&encoder->base ==
1312 intel_attached_encoder(connector))
1313 return connector;
1314 }
1315 }
1316 }
1317
1318 return NULL;
1319}
1320
1321static int
1322intel_analog_is_connected(struct drm_device *dev)
1323{
1324 struct drm_connector *analog_connector;
1325
1326 analog_connector = intel_find_analog_connector(dev);
1327 if (!analog_connector)
1328 return false;
1329
1330 if (analog_connector->funcs->detect(analog_connector, false) ==
1331 connector_status_disconnected)
1332 return false;
1333
1334 return true;
1335}
1336
1337/* Mac mini hack -- use the same DDC as the analog connector */ 1302/* Mac mini hack -- use the same DDC as the analog connector */
1338static struct edid * 1303static struct edid *
1339intel_sdvo_get_analog_edid(struct drm_connector *connector) 1304intel_sdvo_get_analog_edid(struct drm_connector *connector)
1340{ 1305{
1341 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1306 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1342 1307
1343 if (!intel_analog_is_connected(connector->dev)) 1308 return drm_get_edid(connector,
1344 return NULL; 1309 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1345
1346 return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1347} 1310}
1348 1311
1349enum drm_connector_status 1312enum drm_connector_status
@@ -1388,8 +1351,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1388 /* DDC bus is shared, match EDID to connector type */ 1351 /* DDC bus is shared, match EDID to connector type */
1389 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1352 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1390 status = connector_status_connected; 1353 status = connector_status_connected;
1391 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); 1354 if (intel_sdvo->is_hdmi) {
1392 intel_sdvo->has_audio = drm_detect_monitor_audio(edid); 1355 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1356 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1357 }
1393 } 1358 }
1394 connector->display_info.raw_edid = NULL; 1359 connector->display_info.raw_edid = NULL;
1395 kfree(edid); 1360 kfree(edid);
@@ -1398,7 +1363,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1398 if (status == connector_status_connected) { 1363 if (status == connector_status_connected) {
1399 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1364 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1400 if (intel_sdvo_connector->force_audio) 1365 if (intel_sdvo_connector->force_audio)
1401 intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; 1366 intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
1402 } 1367 }
1403 1368
1404 return status; 1369 return status;
@@ -1415,10 +1380,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1415 if (!intel_sdvo_write_cmd(intel_sdvo, 1380 if (!intel_sdvo_write_cmd(intel_sdvo,
1416 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1381 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1417 return connector_status_unknown; 1382 return connector_status_unknown;
1418 if (intel_sdvo->is_tv) { 1383
1419 /* add 30ms delay when the output type is SDVO-TV */ 1384 /* add 30ms delay when the output type might be TV */
1385 if (intel_sdvo->caps.output_flags &
1386 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1420 mdelay(30); 1387 mdelay(30);
1421 } 1388
1422 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1389 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1423 return connector_status_unknown; 1390 return connector_status_unknown;
1424 1391
@@ -1472,8 +1439,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1472 edid = intel_sdvo_get_analog_edid(connector); 1439 edid = intel_sdvo_get_analog_edid(connector);
1473 1440
1474 if (edid != NULL) { 1441 if (edid != NULL) {
1475 drm_mode_connector_update_edid_property(connector, edid); 1442 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1476 drm_add_edid_modes(connector, edid); 1443 drm_mode_connector_update_edid_property(connector, edid);
1444 drm_add_edid_modes(connector, edid);
1445 }
1477 connector->display_info.raw_edid = NULL; 1446 connector->display_info.raw_edid = NULL;
1478 kfree(edid); 1447 kfree(edid);
1479 } 1448 }
@@ -1713,12 +1682,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
1713 1682
1714 intel_sdvo_connector->force_audio = val; 1683 intel_sdvo_connector->force_audio = val;
1715 1684
1716 if (val > 0 && intel_sdvo->has_audio) 1685 if (val > 0 && intel_sdvo->has_hdmi_audio)
1717 return 0; 1686 return 0;
1718 if (val < 0 && !intel_sdvo->has_audio) 1687 if (val < 0 && !intel_sdvo->has_hdmi_audio)
1719 return 0; 1688 return 0;
1720 1689
1721 intel_sdvo->has_audio = val > 0; 1690 intel_sdvo->has_hdmi_audio = val > 0;
1722 goto done; 1691 goto done;
1723 } 1692 }
1724 1693
@@ -1942,9 +1911,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
1942 speed = mapping->i2c_speed; 1911 speed = mapping->i2c_speed;
1943 } 1912 }
1944 1913
1945 sdvo->i2c = &dev_priv->gmbus[pin].adapter; 1914 if (pin < GMBUS_NUM_PORTS) {
1946 intel_gmbus_set_speed(sdvo->i2c, speed); 1915 sdvo->i2c = &dev_priv->gmbus[pin].adapter;
1947 intel_gmbus_force_bit(sdvo->i2c, true); 1916 intel_gmbus_set_speed(sdvo->i2c, speed);
1917 intel_gmbus_force_bit(sdvo->i2c, true);
1918 } else
1919 sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
1948} 1920}
1949 1921
1950static bool 1922static bool
@@ -2070,6 +2042,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2070 intel_sdvo_set_colorimetry(intel_sdvo, 2042 intel_sdvo_set_colorimetry(intel_sdvo,
2071 SDVO_COLORIMETRY_RGB256); 2043 SDVO_COLORIMETRY_RGB256);
2072 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2044 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2045
2046 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2073 intel_sdvo->is_hdmi = true; 2047 intel_sdvo->is_hdmi = true;
2074 } 2048 }
2075 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2049 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -2077,8 +2051,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2077 2051
2078 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2052 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2079 2053
2080 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2081
2082 return true; 2054 return true;
2083} 2055}
2084 2056
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f7681989316..93206e4eaa6f 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1245 int type;
1246 1246
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1247 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1249 i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1249 i915_disable_pipestat(dev_priv, 0,
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1250 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1251 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1252 1253
1253 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1254 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1254 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1301 I915_WRITE(TV_CTL, save_tv_ctl); 1302 I915_WRITE(TV_CTL, save_tv_ctl);
1302 1303
1303 /* Restore interrupt config */ 1304 /* Restore interrupt config */
1304 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1305 i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE | 1306 i915_enable_pipestat(dev_priv, 0,
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1306 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1307 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1308 1310
1309 return type; 1311 return type;
1310} 1312}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 9b0773b75e86..258fa5e7a2d9 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
112 base += 3; 112 base += 3;
113 break; 113 break;
114 case ATOM_IIO_WRITE: 114 case ATOM_IIO_WRITE:
115 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
115 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); 116 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
116 base += 3; 117 base += 3;
117 break; 118 break;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 53bfe3afb0fa..c6a37e036f11 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1198,8 +1198,10 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
1198 mc->vram_end, mc->real_vram_size >> 20); 1198 mc->vram_end, mc->real_vram_size >> 20);
1199 } else { 1199 } else {
1200 u64 base = 0; 1200 u64 base = 0;
1201 if (rdev->flags & RADEON_IS_IGP) 1201 if (rdev->flags & RADEON_IS_IGP) {
1202 base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24; 1202 base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
1203 base <<= 24;
1204 }
1203 radeon_vram_location(rdev, &rdev->mc, base); 1205 radeon_vram_location(rdev, &rdev->mc, base);
1204 rdev->mc.gtt_base_align = 0; 1206 rdev->mc.gtt_base_align = 0;
1205 radeon_gtt_location(rdev, mc); 1207 radeon_gtt_location(rdev, mc);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9bebac1ec006..0f90fc3482ce 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -315,7 +315,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
315 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) { 315 if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
316 /* the initial DDX does bad things with the CB size occasionally */ 316 /* the initial DDX does bad things with the CB size occasionally */
317 /* it rounds up height too far for slice tile max but the BO is smaller */ 317 /* it rounds up height too far for slice tile max but the BO is smaller */
318 tmp = (height - 7) * pitch * bpe; 318 tmp = (height - 7) * 8 * bpe;
319 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) { 319 if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
320 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 320 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
321 return -EINVAL; 321 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d84612ae47e0..33cda016b083 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -86,6 +86,7 @@
86#define R600_HDP_NONSURFACE_BASE 0x2c04 86#define R600_HDP_NONSURFACE_BASE 0x2c04
87 87
88#define R600_BUS_CNTL 0x5420 88#define R600_BUS_CNTL 0x5420
89# define R600_BIOS_ROM_DIS (1 << 1)
89#define R600_CONFIG_CNTL 0x5424 90#define R600_CONFIG_CNTL 0x5424
90#define R600_CONFIG_MEMSIZE 0x5428 91#define R600_CONFIG_MEMSIZE 0x5428
91#define R600_CONFIG_F0_BASE 0x542C 92#define R600_CONFIG_F0_BASE 0x542C
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d6c611eee204..45bc750e9ae2 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
98 } 98 }
99 } 99 }
100 100
101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) &&
104 (gpio->usClkMaskRegisterIndex == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14;
107 }
108
101 if (gpio->sucI2cId.ucAccess == id) { 109 if (gpio->sucI2cId.ucAccess == id) {
102 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 110 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
103 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 111 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
174 } 182 }
175 } 183 }
176 184
185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) &&
188 (gpio->usClkMaskRegisterIndex == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14;
191 }
192
177 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 193 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
178 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 194 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
179 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 195 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 654787ec43f4..8f2c7b50dcf5 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
130 } 130 }
131 return true; 131 return true;
132} 132}
133
133static bool r700_read_disabled_bios(struct radeon_device *rdev) 134static bool r700_read_disabled_bios(struct radeon_device *rdev)
134{ 135{
135 uint32_t viph_control; 136 uint32_t viph_control;
@@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
143 bool r; 144 bool r;
144 145
145 viph_control = RREG32(RADEON_VIPH_CONTROL); 146 viph_control = RREG32(RADEON_VIPH_CONTROL);
146 bus_cntl = RREG32(RADEON_BUS_CNTL); 147 bus_cntl = RREG32(R600_BUS_CNTL);
147 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); 148 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
148 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); 149 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
149 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); 150 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
152 /* disable VIP */ 153 /* disable VIP */
153 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); 154 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
154 /* enable the rom */ 155 /* enable the rom */
155 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); 156 WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
156 /* Disable VGA mode */ 157 /* Disable VGA mode */
157 WREG32(AVIVO_D1VGA_CONTROL, 158 WREG32(AVIVO_D1VGA_CONTROL,
158 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | 159 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
191 cg_spll_status = RREG32(R600_CG_SPLL_STATUS); 192 cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
192 } 193 }
193 WREG32(RADEON_VIPH_CONTROL, viph_control); 194 WREG32(RADEON_VIPH_CONTROL, viph_control);
194 WREG32(RADEON_BUS_CNTL, bus_cntl); 195 WREG32(R600_BUS_CNTL, bus_cntl);
195 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); 196 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
196 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); 197 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
197 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); 198 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
216 bool r; 217 bool r;
217 218
218 viph_control = RREG32(RADEON_VIPH_CONTROL); 219 viph_control = RREG32(RADEON_VIPH_CONTROL);
219 bus_cntl = RREG32(RADEON_BUS_CNTL); 220 bus_cntl = RREG32(R600_BUS_CNTL);
220 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL); 221 d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
221 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL); 222 d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
222 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL); 223 vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
231 /* disable VIP */ 232 /* disable VIP */
232 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN)); 233 WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
233 /* enable the rom */ 234 /* enable the rom */
234 WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM)); 235 WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
235 /* Disable VGA mode */ 236 /* Disable VGA mode */
236 WREG32(AVIVO_D1VGA_CONTROL, 237 WREG32(AVIVO_D1VGA_CONTROL,
237 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | 238 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
262 263
263 /* restore regs */ 264 /* restore regs */
264 WREG32(RADEON_VIPH_CONTROL, viph_control); 265 WREG32(RADEON_VIPH_CONTROL, viph_control);
265 WREG32(RADEON_BUS_CNTL, bus_cntl); 266 WREG32(R600_BUS_CNTL, bus_cntl);
266 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); 267 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
267 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); 268 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
268 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); 269 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 111a844c1ecb..591fcae8f224 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -730,7 +730,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
730 clk = RBIOS8(offset + 3 + (i * 5) + 3); 730 clk = RBIOS8(offset + 3 + (i * 5) + 3);
731 data = RBIOS8(offset + 3 + (i * 5) + 4); 731 data = RBIOS8(offset + 3 + (i * 5) + 4);
732 i2c = combios_setup_i2c_bus(rdev, DDC_MONID, 732 i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
733 clk, data); 733 (1 << clk), (1 << data));
734 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK"); 734 rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
735 break; 735 break;
736 } 736 }
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index f3ba066ded20..5b00f92a50a2 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1178,6 +1178,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1178 /* no HPD on analog connectors */ 1178 /* no HPD on analog connectors */
1179 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1179 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1180 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1180 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1181 connector->interlace_allowed = true;
1182 connector->doublescan_allowed = true;
1181 break; 1183 break;
1182 case DRM_MODE_CONNECTOR_DVIA: 1184 case DRM_MODE_CONNECTOR_DVIA:
1183 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1185 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1193,6 +1195,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1193 1); 1195 1);
1194 /* no HPD on analog connectors */ 1196 /* no HPD on analog connectors */
1195 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1197 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1198 connector->interlace_allowed = true;
1199 connector->doublescan_allowed = true;
1196 break; 1200 break;
1197 case DRM_MODE_CONNECTOR_DVII: 1201 case DRM_MODE_CONNECTOR_DVII:
1198 case DRM_MODE_CONNECTOR_DVID: 1202 case DRM_MODE_CONNECTOR_DVID:
@@ -1229,6 +1233,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1229 rdev->mode_info.load_detect_property, 1233 rdev->mode_info.load_detect_property,
1230 1); 1234 1);
1231 } 1235 }
1236 connector->interlace_allowed = true;
1237 if (connector_type == DRM_MODE_CONNECTOR_DVII)
1238 connector->doublescan_allowed = true;
1239 else
1240 connector->doublescan_allowed = false;
1232 break; 1241 break;
1233 case DRM_MODE_CONNECTOR_HDMIA: 1242 case DRM_MODE_CONNECTOR_HDMIA:
1234 case DRM_MODE_CONNECTOR_HDMIB: 1243 case DRM_MODE_CONNECTOR_HDMIB:
@@ -1259,6 +1268,11 @@ radeon_add_atom_connector(struct drm_device *dev,
1259 0); 1268 0);
1260 } 1269 }
1261 subpixel_order = SubPixelHorizontalRGB; 1270 subpixel_order = SubPixelHorizontalRGB;
1271 connector->interlace_allowed = true;
1272 if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
1273 connector->doublescan_allowed = true;
1274 else
1275 connector->doublescan_allowed = false;
1262 break; 1276 break;
1263 case DRM_MODE_CONNECTOR_DisplayPort: 1277 case DRM_MODE_CONNECTOR_DisplayPort:
1264 case DRM_MODE_CONNECTOR_eDP: 1278 case DRM_MODE_CONNECTOR_eDP:
@@ -1296,6 +1310,9 @@ radeon_add_atom_connector(struct drm_device *dev,
1296 rdev->mode_info.underscan_vborder_property, 1310 rdev->mode_info.underscan_vborder_property,
1297 0); 1311 0);
1298 } 1312 }
1313 connector->interlace_allowed = true;
1314 /* in theory with a DP to VGA converter... */
1315 connector->doublescan_allowed = false;
1299 break; 1316 break;
1300 case DRM_MODE_CONNECTOR_SVIDEO: 1317 case DRM_MODE_CONNECTOR_SVIDEO:
1301 case DRM_MODE_CONNECTOR_Composite: 1318 case DRM_MODE_CONNECTOR_Composite:
@@ -1311,6 +1328,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1311 radeon_atombios_get_tv_info(rdev)); 1328 radeon_atombios_get_tv_info(rdev));
1312 /* no HPD on analog connectors */ 1329 /* no HPD on analog connectors */
1313 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1330 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1331 connector->interlace_allowed = false;
1332 connector->doublescan_allowed = false;
1314 break; 1333 break;
1315 case DRM_MODE_CONNECTOR_LVDS: 1334 case DRM_MODE_CONNECTOR_LVDS:
1316 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 1335 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1329,6 +1348,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1329 dev->mode_config.scaling_mode_property, 1348 dev->mode_config.scaling_mode_property,
1330 DRM_MODE_SCALE_FULLSCREEN); 1349 DRM_MODE_SCALE_FULLSCREEN);
1331 subpixel_order = SubPixelHorizontalRGB; 1350 subpixel_order = SubPixelHorizontalRGB;
1351 connector->interlace_allowed = false;
1352 connector->doublescan_allowed = false;
1332 break; 1353 break;
1333 } 1354 }
1334 1355
@@ -1406,6 +1427,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1406 /* no HPD on analog connectors */ 1427 /* no HPD on analog connectors */
1407 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1428 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1408 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 1429 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
1430 connector->interlace_allowed = true;
1431 connector->doublescan_allowed = true;
1409 break; 1432 break;
1410 case DRM_MODE_CONNECTOR_DVIA: 1433 case DRM_MODE_CONNECTOR_DVIA:
1411 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1434 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1421,6 +1444,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1421 1); 1444 1);
1422 /* no HPD on analog connectors */ 1445 /* no HPD on analog connectors */
1423 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1446 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1447 connector->interlace_allowed = true;
1448 connector->doublescan_allowed = true;
1424 break; 1449 break;
1425 case DRM_MODE_CONNECTOR_DVII: 1450 case DRM_MODE_CONNECTOR_DVII:
1426 case DRM_MODE_CONNECTOR_DVID: 1451 case DRM_MODE_CONNECTOR_DVID:
@@ -1438,6 +1463,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1438 1); 1463 1);
1439 } 1464 }
1440 subpixel_order = SubPixelHorizontalRGB; 1465 subpixel_order = SubPixelHorizontalRGB;
1466 connector->interlace_allowed = true;
1467 if (connector_type == DRM_MODE_CONNECTOR_DVII)
1468 connector->doublescan_allowed = true;
1469 else
1470 connector->doublescan_allowed = false;
1441 break; 1471 break;
1442 case DRM_MODE_CONNECTOR_SVIDEO: 1472 case DRM_MODE_CONNECTOR_SVIDEO:
1443 case DRM_MODE_CONNECTOR_Composite: 1473 case DRM_MODE_CONNECTOR_Composite:
@@ -1460,6 +1490,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1460 radeon_combios_get_tv_info(rdev)); 1490 radeon_combios_get_tv_info(rdev));
1461 /* no HPD on analog connectors */ 1491 /* no HPD on analog connectors */
1462 radeon_connector->hpd.hpd = RADEON_HPD_NONE; 1492 radeon_connector->hpd.hpd = RADEON_HPD_NONE;
1493 connector->interlace_allowed = false;
1494 connector->doublescan_allowed = false;
1463 break; 1495 break;
1464 case DRM_MODE_CONNECTOR_LVDS: 1496 case DRM_MODE_CONNECTOR_LVDS:
1465 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1497 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1473,6 +1505,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1473 dev->mode_config.scaling_mode_property, 1505 dev->mode_config.scaling_mode_property,
1474 DRM_MODE_SCALE_FULLSCREEN); 1506 DRM_MODE_SCALE_FULLSCREEN);
1475 subpixel_order = SubPixelHorizontalRGB; 1507 subpixel_order = SubPixelHorizontalRGB;
1508 connector->interlace_allowed = false;
1509 connector->doublescan_allowed = false;
1476 break; 1510 break;
1477 } 1511 }
1478 1512
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 3952cf3d0ee9..86660cb425ab 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -287,7 +287,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
287 mc->mc_vram_size = mc->aper_size; 287 mc->mc_vram_size = mc->aper_size;
288 } 288 }
289 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1; 289 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
290 dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n", 290 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
291 mc->mc_vram_size >> 20, mc->vram_start, 291 mc->mc_vram_size >> 20, mc->vram_start,
292 mc->vram_end, mc->real_vram_size >> 20); 292 mc->vram_end, mc->real_vram_size >> 20);
293} 293}
@@ -324,7 +324,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
324 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align; 324 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
325 } 325 }
326 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1; 326 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
327 dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n", 327 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
328 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end); 328 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
329} 329}
330 330
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8bdf0ba2983a..7d6b8e88f746 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -70,7 +70,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
70 u32 c = 0; 70 u32 c = 0;
71 71
72 rbo->placement.fpfn = 0; 72 rbo->placement.fpfn = 0;
73 rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT; 73 rbo->placement.lpfn = 0;
74 rbo->placement.placement = rbo->placements; 74 rbo->placement.placement = rbo->placements;
75 rbo->placement.busy_placement = rbo->placements; 75 rbo->placement.busy_placement = rbo->placements;
76 if (domain & RADEON_GEM_DOMAIN_VRAM) 76 if (domain & RADEON_GEM_DOMAIN_VRAM)
@@ -92,7 +92,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
92{ 92{
93 struct radeon_bo *bo; 93 struct radeon_bo *bo;
94 enum ttm_bo_type type; 94 enum ttm_bo_type type;
95 int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; 95 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
96 unsigned long max_size = 0;
96 int r; 97 int r;
97 98
98 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) { 99 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -105,6 +106,14 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
105 } 106 }
106 *bo_ptr = NULL; 107 *bo_ptr = NULL;
107 108
109 /* maximun bo size is the minimun btw visible vram and gtt size */
110 max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
111 if ((page_align << PAGE_SHIFT) >= max_size) {
112 printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
113 __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
114 return -ENOMEM;
115 }
116
108retry: 117retry:
109 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); 118 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
110 if (bo == NULL) 119 if (bo == NULL)
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 515345b11ac9..88cb04e7962b 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1386,6 +1386,7 @@ static const struct hid_device_id hid_blacklist[] = {
1386 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) }, 1386 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
1387 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, 1387 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
1388 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, 1388 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
1389 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
1389 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, 1390 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
1390 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, 1391 { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
1391 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, 1392 { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-egalax.c b/drivers/hid/hid-egalax.c
index 54b017ad258d..5a1b52e0eb85 100644
--- a/drivers/hid/hid-egalax.c
+++ b/drivers/hid/hid-egalax.c
@@ -221,7 +221,7 @@ static int egalax_probe(struct hid_device *hdev, const struct hid_device_id *id)
221 struct egalax_data *td; 221 struct egalax_data *td;
222 struct hid_report *report; 222 struct hid_report *report;
223 223
224 td = kmalloc(sizeof(struct egalax_data), GFP_KERNEL); 224 td = kzalloc(sizeof(struct egalax_data), GFP_KERNEL);
225 if (!td) { 225 if (!td) {
226 dev_err(&hdev->dev, "cannot allocate eGalax data\n"); 226 dev_err(&hdev->dev, "cannot allocate eGalax data\n");
227 return -ENOMEM; 227 return -ENOMEM;
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index bb0b3659437b..d8d372bae3cc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -174,7 +174,7 @@ static int hidinput_setkeycode(struct input_dev *dev,
174 174
175 clear_bit(*old_keycode, dev->keybit); 175 clear_bit(*old_keycode, dev->keybit);
176 set_bit(usage->code, dev->keybit); 176 set_bit(usage->code, dev->keybit);
177 dbg_hid(KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", 177 dbg_hid("Assigned keycode %d to HID usage code %x\n",
178 usage->code, usage->hid); 178 usage->code, usage->hid);
179 179
180 /* 180 /*
@@ -203,8 +203,8 @@ static int hidinput_setkeycode(struct input_dev *dev,
203 * 203 *
204 * as seen in the HID specification v1.11 6.2.2.7 Global Items. 204 * as seen in the HID specification v1.11 6.2.2.7 Global Items.
205 * 205 *
206 * Only exponent 1 length units are processed. Centimeters are converted to 206 * Only exponent 1 length units are processed. Centimeters and inches are
207 * inches. Degrees are converted to radians. 207 * converted to millimeters. Degrees are converted to radians.
208 */ 208 */
209static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code) 209static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
210{ 210{
@@ -225,13 +225,16 @@ static __s32 hidinput_calc_abs_res(const struct hid_field *field, __u16 code)
225 */ 225 */
226 if (code == ABS_X || code == ABS_Y || code == ABS_Z) { 226 if (code == ABS_X || code == ABS_Y || code == ABS_Z) {
227 if (field->unit == 0x11) { /* If centimeters */ 227 if (field->unit == 0x11) { /* If centimeters */
228 /* Convert to inches */ 228 /* Convert to millimeters */
229 prev = logical_extents; 229 unit_exponent += 1;
230 logical_extents *= 254; 230 } else if (field->unit == 0x13) { /* If inches */
231 if (logical_extents < prev) 231 /* Convert to millimeters */
232 prev = physical_extents;
233 physical_extents *= 254;
234 if (physical_extents < prev)
232 return 0; 235 return 0;
233 unit_exponent += 2; 236 unit_exponent -= 1;
234 } else if (field->unit != 0x13) { /* If not inches */ 237 } else {
235 return 0; 238 return 0;
236 } 239 }
237 } else if (code == ABS_RX || code == ABS_RY || code == ABS_RZ) { 240 } else if (code == ABS_RX || code == ABS_RY || code == ABS_RZ) {
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 15434c814793..25be4e1461bd 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -256,6 +256,8 @@ static const struct hid_device_id tm_devices[] = {
256 .driver_data = (unsigned long)ff_joystick }, 256 .driver_data = (unsigned long)ff_joystick },
257 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */ 257 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654), /* FGT Force Feedback Wheel */
258 .driver_data = (unsigned long)ff_joystick }, 258 .driver_data = (unsigned long)ff_joystick },
259 { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a), /* F430 Force Feedback Wheel */
260 .driver_data = (unsigned long)ff_joystick },
259 { } 261 { }
260}; 262};
261MODULE_DEVICE_TABLE(hid, tm_devices); 263MODULE_DEVICE_TABLE(hid, tm_devices);
diff --git a/drivers/hwmon/i5k_amb.c b/drivers/hwmon/i5k_amb.c
index 937983407e2a..c4c40be0edbf 100644
--- a/drivers/hwmon/i5k_amb.c
+++ b/drivers/hwmon/i5k_amb.c
@@ -497,12 +497,14 @@ static unsigned long chipset_ids[] = {
497 0 497 0
498}; 498};
499 499
500#ifdef MODULE
500static struct pci_device_id i5k_amb_ids[] __devinitdata = { 501static struct pci_device_id i5k_amb_ids[] __devinitdata = {
501 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, 502 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
502 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) }, 503 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR) },
503 { 0, } 504 { 0, }
504}; 505};
505MODULE_DEVICE_TABLE(pci, i5k_amb_ids); 506MODULE_DEVICE_TABLE(pci, i5k_amb_ids);
507#endif
506 508
507static int __devinit i5k_amb_probe(struct platform_device *pdev) 509static int __devinit i5k_amb_probe(struct platform_device *pdev)
508{ 510{
diff --git a/drivers/hwmon/lis3lv02d_i2c.c b/drivers/hwmon/lis3lv02d_i2c.c
index 9f4bae07f719..8853afce85ce 100644
--- a/drivers/hwmon/lis3lv02d_i2c.c
+++ b/drivers/hwmon/lis3lv02d_i2c.c
@@ -186,7 +186,7 @@ static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client)
186 return 0; 186 return 0;
187} 187}
188 188
189#ifdef CONFIG_PM 189#ifdef CONFIG_PM_SLEEP
190static int lis3lv02d_i2c_suspend(struct device *dev) 190static int lis3lv02d_i2c_suspend(struct device *dev)
191{ 191{
192 struct i2c_client *client = container_of(dev, struct i2c_client, dev); 192 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
@@ -213,12 +213,9 @@ static int lis3lv02d_i2c_resume(struct device *dev)
213 213
214 return 0; 214 return 0;
215} 215}
216#else 216#endif /* CONFIG_PM_SLEEP */
217#define lis3lv02d_i2c_suspend NULL
218#define lis3lv02d_i2c_resume NULL
219#define lis3lv02d_i2c_shutdown NULL
220#endif
221 217
218#ifdef CONFIG_PM_RUNTIME
222static int lis3_i2c_runtime_suspend(struct device *dev) 219static int lis3_i2c_runtime_suspend(struct device *dev)
223{ 220{
224 struct i2c_client *client = container_of(dev, struct i2c_client, dev); 221 struct i2c_client *client = container_of(dev, struct i2c_client, dev);
@@ -236,6 +233,7 @@ static int lis3_i2c_runtime_resume(struct device *dev)
236 lis3lv02d_poweron(lis3); 233 lis3lv02d_poweron(lis3);
237 return 0; 234 return 0;
238} 235}
236#endif /* CONFIG_PM_RUNTIME */
239 237
240static const struct i2c_device_id lis3lv02d_id[] = { 238static const struct i2c_device_id lis3lv02d_id[] = {
241 {"lis3lv02d", 0 }, 239 {"lis3lv02d", 0 },
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index b923074b2cbe..30f06e956bfb 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -75,8 +75,7 @@ config I2C_HELPER_AUTO
75 In doubt, say Y. 75 In doubt, say Y.
76 76
77config I2C_SMBUS 77config I2C_SMBUS
78 tristate 78 tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
79 prompt "SMBus-specific protocols" if !I2C_HELPER_AUTO
80 help 79 help
81 Say Y here if you want support for SMBus extensions to the I2C 80 Say Y here if you want support for SMBus extensions to the I2C
82 specification. At the moment, the only supported extension is 81 specification. At the moment, the only supported extension is
diff --git a/drivers/i2c/algos/Kconfig b/drivers/i2c/algos/Kconfig
index 3998dd620a03..f1cfe7e5508b 100644
--- a/drivers/i2c/algos/Kconfig
+++ b/drivers/i2c/algos/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menu "I2C Algorithms" 5menu "I2C Algorithms"
6 depends on !I2C_HELPER_AUTO 6 visible if !I2C_HELPER_AUTO
7 7
8config I2C_ALGOBIT 8config I2C_ALGOBIT
9 tristate "I2C bit-banging interfaces" 9 tristate "I2C bit-banging interfaces"
@@ -15,15 +15,3 @@ config I2C_ALGOPCA
15 tristate "I2C PCA 9564 interfaces" 15 tristate "I2C PCA 9564 interfaces"
16 16
17endmenu 17endmenu
18
19# In automatic configuration mode, we still have to define the
20# symbols to avoid unmet dependencies.
21
22if I2C_HELPER_AUTO
23config I2C_ALGOBIT
24 tristate
25config I2C_ALGOPCF
26 tristate
27config I2C_ALGOPCA
28 tristate
29endif
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index bb7e19280821..9b737ff133e2 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -278,36 +278,6 @@ void ib_ud_header_init(int payload_bytes,
278EXPORT_SYMBOL(ib_ud_header_init); 278EXPORT_SYMBOL(ib_ud_header_init);
279 279
280/** 280/**
281 * ib_lrh_header_pack - Pack LRH header struct into wire format
282 * @lrh:unpacked LRH header struct
283 * @buf:Buffer to pack into
284 *
285 * ib_lrh_header_pack() packs the LRH header structure @lrh into
286 * wire format in the buffer @buf.
287 */
288int ib_lrh_header_pack(struct ib_unpacked_lrh *lrh, void *buf)
289{
290 ib_pack(lrh_table, ARRAY_SIZE(lrh_table), lrh, buf);
291 return 0;
292}
293EXPORT_SYMBOL(ib_lrh_header_pack);
294
295/**
296 * ib_lrh_header_unpack - Unpack LRH structure from wire format
297 * @lrh:unpacked LRH header struct
298 * @buf:Buffer to pack into
299 *
300 * ib_lrh_header_unpack() unpacks the LRH header structure from
301 * wire format (in buf) into @lrh.
302 */
303int ib_lrh_header_unpack(void *buf, struct ib_unpacked_lrh *lrh)
304{
305 ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), buf, lrh);
306 return 0;
307}
308EXPORT_SYMBOL(ib_lrh_header_unpack);
309
310/**
311 * ib_ud_header_pack - Pack UD header struct into wire format 281 * ib_ud_header_pack - Pack UD header struct into wire format
312 * @header:UD header struct 282 * @header:UD header struct
313 * @buf:Buffer to pack into 283 * @buf:Buffer to pack into
diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
index 5440da0e59b4..1b1146f87124 100644
--- a/drivers/infiniband/core/uverbs_marshall.c
+++ b/drivers/infiniband/core/uverbs_marshall.c
@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
40 dst->grh.sgid_index = src->grh.sgid_index; 40 dst->grh.sgid_index = src->grh.sgid_index;
41 dst->grh.hop_limit = src->grh.hop_limit; 41 dst->grh.hop_limit = src->grh.hop_limit;
42 dst->grh.traffic_class = src->grh.traffic_class; 42 dst->grh.traffic_class = src->grh.traffic_class;
43 memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
43 dst->dlid = src->dlid; 44 dst->dlid = src->dlid;
44 dst->sl = src->sl; 45 dst->sl = src->sl;
45 dst->src_path_bits = src->src_path_bits; 46 dst->src_path_bits = src->src_path_bits;
46 dst->static_rate = src->static_rate; 47 dst->static_rate = src->static_rate;
47 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0; 48 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
48 dst->port_num = src->port_num; 49 dst->port_num = src->port_num;
50 dst->reserved = 0;
49} 51}
50EXPORT_SYMBOL(ib_copy_ah_attr_to_user); 52EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
51 53
52void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst, 54void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
53 struct ib_qp_attr *src) 55 struct ib_qp_attr *src)
54{ 56{
57 dst->qp_state = src->qp_state;
55 dst->cur_qp_state = src->cur_qp_state; 58 dst->cur_qp_state = src->cur_qp_state;
56 dst->path_mtu = src->path_mtu; 59 dst->path_mtu = src->path_mtu;
57 dst->path_mig_state = src->path_mig_state; 60 dst->path_mig_state = src->path_mig_state;
@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
83 dst->rnr_retry = src->rnr_retry; 86 dst->rnr_retry = src->rnr_retry;
84 dst->alt_port_num = src->alt_port_num; 87 dst->alt_port_num = src->alt_port_num;
85 dst->alt_timeout = src->alt_timeout; 88 dst->alt_timeout = src->alt_timeout;
89 memset(dst->reserved, 0, sizeof(dst->reserved));
86} 90}
87EXPORT_SYMBOL(ib_copy_qp_attr_to_user); 91EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
88 92
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bf3e20cd0298..30e09caf0da9 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -219,7 +219,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
219 struct net_device *ndev; 219 struct net_device *ndev;
220 enum ib_mtu tmp; 220 enum ib_mtu tmp;
221 221
222 props->active_width = IB_WIDTH_4X; 222 props->active_width = IB_WIDTH_1X;
223 props->active_speed = 4; 223 props->active_speed = 4;
224 props->port_cap_flags = IB_PORT_CM_SUP; 224 props->port_cap_flags = IB_PORT_CM_SUP;
225 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; 225 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
@@ -242,7 +242,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
242 tmp = iboe_get_mtu(ndev->mtu); 242 tmp = iboe_get_mtu(ndev->mtu);
243 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; 243 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
244 244
245 props->state = netif_running(ndev) && netif_oper_up(ndev) ? 245 props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ?
246 IB_PORT_ACTIVE : IB_PORT_DOWN; 246 IB_PORT_ACTIVE : IB_PORT_DOWN;
247 props->phys_state = state_to_phys_state(props->state); 247 props->phys_state = state_to_phys_state(props->state);
248 248
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 9a7794ac34c1..2001f20a4361 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1816,6 +1816,11 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1816 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ? 1816 ctrl->fence_size = (wr->send_flags & IB_SEND_FENCE ?
1817 MLX4_WQE_CTRL_FENCE : 0) | size; 1817 MLX4_WQE_CTRL_FENCE : 0) | size;
1818 1818
1819 if (be16_to_cpu(vlan) < 0x1000) {
1820 ctrl->ins_vlan = 1 << 6;
1821 ctrl->vlan_tag = vlan;
1822 }
1823
1819 /* 1824 /*
1820 * Make sure descriptor is fully written before 1825 * Make sure descriptor is fully written before
1821 * setting ownership bit (because HW can start 1826 * setting ownership bit (because HW can start
@@ -1831,11 +1836,6 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1831 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | 1836 ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
1832 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh; 1837 (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0) | blh;
1833 1838
1834 if (be16_to_cpu(vlan) < 0x1000) {
1835 ctrl->ins_vlan = 1 << 6;
1836 ctrl->vlan_tag = vlan;
1837 }
1838
1839 stamp = ind + qp->sq_spare_wqes; 1839 stamp = ind + qp->sq_spare_wqes;
1840 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); 1840 ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
1841 1841
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index d53b9e900234..27b6a3ce18ca 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -245,6 +245,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
245 goto err_free_tgfx; 245 goto err_free_tgfx;
246 } 246 }
247 247
248 parport_put_port(pp);
248 return tgfx; 249 return tgfx;
249 250
250 err_free_dev: 251 err_free_dev:
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index b8c51b9781db..3a87f3ba5f75 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -179,6 +179,22 @@ config KEYBOARD_GPIO
179 To compile this driver as a module, choose M here: the 179 To compile this driver as a module, choose M here: the
180 module will be called gpio_keys. 180 module will be called gpio_keys.
181 181
182config KEYBOARD_GPIO_POLLED
183 tristate "Polled GPIO buttons"
184 depends on GENERIC_GPIO
185 select INPUT_POLLDEV
186 help
187 This driver implements support for buttons connected
188 to GPIO pins that are not capable of generating interrupts.
189
190 Say Y here if your device has buttons connected
191 directly to such GPIO pins. Your board-specific
192 setup logic must also provide a platform device,
193 with configuration data saying which GPIOs are used.
194
195 To compile this driver as a module, choose M here: the
196 module will be called gpio_keys_polled.
197
182config KEYBOARD_TCA6416 198config KEYBOARD_TCA6416
183 tristate "TCA6416 Keypad Support" 199 tristate "TCA6416 Keypad Support"
184 depends on I2C 200 depends on I2C
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile
index a34452e8ebe2..622de73a445d 100644
--- a/drivers/input/keyboard/Makefile
+++ b/drivers/input/keyboard/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_KEYBOARD_BFIN) += bf54x-keys.o
14obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o 14obj-$(CONFIG_KEYBOARD_DAVINCI) += davinci_keyscan.o
15obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o 15obj-$(CONFIG_KEYBOARD_EP93XX) += ep93xx_keypad.o
16obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o 16obj-$(CONFIG_KEYBOARD_GPIO) += gpio_keys.o
17obj-$(CONFIG_KEYBOARD_GPIO_POLLED) += gpio_keys_polled.o
17obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o 18obj-$(CONFIG_KEYBOARD_TCA6416) += tca6416-keypad.o
18obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o 19obj-$(CONFIG_KEYBOARD_HIL) += hil_kbd.o
19obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o 20obj-$(CONFIG_KEYBOARD_HIL_OLD) += hilkbd.o
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
new file mode 100644
index 000000000000..4c17aff20657
--- /dev/null
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -0,0 +1,261 @@
1/*
2 * Driver for buttons on GPIO lines not capable of generating interrupts
3 *
4 * Copyright (C) 2007-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2010 Nuno Goncalves <nunojpg@gmail.com>
6 *
7 * This file was based on: /drivers/input/misc/cobalt_btns.c
8 * Copyright (C) 2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
9 *
10 * also was based on: /drivers/input/keyboard/gpio_keys.c
11 * Copyright 2005 Phil Blundell
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/input.h>
23#include <linux/input-polldev.h>
24#include <linux/ioport.h>
25#include <linux/platform_device.h>
26#include <linux/gpio.h>
27#include <linux/gpio_keys.h>
28
29#define DRV_NAME "gpio-keys-polled"
30
31struct gpio_keys_button_data {
32 int last_state;
33 int count;
34 int threshold;
35 int can_sleep;
36};
37
38struct gpio_keys_polled_dev {
39 struct input_polled_dev *poll_dev;
40 struct device *dev;
41 struct gpio_keys_platform_data *pdata;
42 struct gpio_keys_button_data data[0];
43};
44
45static void gpio_keys_polled_check_state(struct input_dev *input,
46 struct gpio_keys_button *button,
47 struct gpio_keys_button_data *bdata)
48{
49 int state;
50
51 if (bdata->can_sleep)
52 state = !!gpio_get_value_cansleep(button->gpio);
53 else
54 state = !!gpio_get_value(button->gpio);
55
56 if (state != bdata->last_state) {
57 unsigned int type = button->type ?: EV_KEY;
58
59 input_event(input, type, button->code,
60 !!(state ^ button->active_low));
61 input_sync(input);
62 bdata->count = 0;
63 bdata->last_state = state;
64 }
65}
66
67static void gpio_keys_polled_poll(struct input_polled_dev *dev)
68{
69 struct gpio_keys_polled_dev *bdev = dev->private;
70 struct gpio_keys_platform_data *pdata = bdev->pdata;
71 struct input_dev *input = dev->input;
72 int i;
73
74 for (i = 0; i < bdev->pdata->nbuttons; i++) {
75 struct gpio_keys_button_data *bdata = &bdev->data[i];
76
77 if (bdata->count < bdata->threshold)
78 bdata->count++;
79 else
80 gpio_keys_polled_check_state(input, &pdata->buttons[i],
81 bdata);
82 }
83}
84
85static void gpio_keys_polled_open(struct input_polled_dev *dev)
86{
87 struct gpio_keys_polled_dev *bdev = dev->private;
88 struct gpio_keys_platform_data *pdata = bdev->pdata;
89
90 if (pdata->enable)
91 pdata->enable(bdev->dev);
92}
93
94static void gpio_keys_polled_close(struct input_polled_dev *dev)
95{
96 struct gpio_keys_polled_dev *bdev = dev->private;
97 struct gpio_keys_platform_data *pdata = bdev->pdata;
98
99 if (pdata->disable)
100 pdata->disable(bdev->dev);
101}
102
103static int __devinit gpio_keys_polled_probe(struct platform_device *pdev)
104{
105 struct gpio_keys_platform_data *pdata = pdev->dev.platform_data;
106 struct device *dev = &pdev->dev;
107 struct gpio_keys_polled_dev *bdev;
108 struct input_polled_dev *poll_dev;
109 struct input_dev *input;
110 int error;
111 int i;
112
113 if (!pdata || !pdata->poll_interval)
114 return -EINVAL;
115
116 bdev = kzalloc(sizeof(struct gpio_keys_polled_dev) +
117 pdata->nbuttons * sizeof(struct gpio_keys_button_data),
118 GFP_KERNEL);
119 if (!bdev) {
120 dev_err(dev, "no memory for private data\n");
121 return -ENOMEM;
122 }
123
124 poll_dev = input_allocate_polled_device();
125 if (!poll_dev) {
126 dev_err(dev, "no memory for polled device\n");
127 error = -ENOMEM;
128 goto err_free_bdev;
129 }
130
131 poll_dev->private = bdev;
132 poll_dev->poll = gpio_keys_polled_poll;
133 poll_dev->poll_interval = pdata->poll_interval;
134 poll_dev->open = gpio_keys_polled_open;
135 poll_dev->close = gpio_keys_polled_close;
136
137 input = poll_dev->input;
138
139 input->evbit[0] = BIT(EV_KEY);
140 input->name = pdev->name;
141 input->phys = DRV_NAME"/input0";
142 input->dev.parent = &pdev->dev;
143
144 input->id.bustype = BUS_HOST;
145 input->id.vendor = 0x0001;
146 input->id.product = 0x0001;
147 input->id.version = 0x0100;
148
149 for (i = 0; i < pdata->nbuttons; i++) {
150 struct gpio_keys_button *button = &pdata->buttons[i];
151 struct gpio_keys_button_data *bdata = &bdev->data[i];
152 unsigned int gpio = button->gpio;
153 unsigned int type = button->type ?: EV_KEY;
154
155 if (button->wakeup) {
156 dev_err(dev, DRV_NAME " does not support wakeup\n");
157 error = -EINVAL;
158 goto err_free_gpio;
159 }
160
161 error = gpio_request(gpio,
162 button->desc ? button->desc : DRV_NAME);
163 if (error) {
164 dev_err(dev, "unable to claim gpio %u, err=%d\n",
165 gpio, error);
166 goto err_free_gpio;
167 }
168
169 error = gpio_direction_input(gpio);
170 if (error) {
171 dev_err(dev,
172 "unable to set direction on gpio %u, err=%d\n",
173 gpio, error);
174 goto err_free_gpio;
175 }
176
177 bdata->can_sleep = gpio_cansleep(gpio);
178 bdata->last_state = -1;
179 bdata->threshold = DIV_ROUND_UP(button->debounce_interval,
180 pdata->poll_interval);
181
182 input_set_capability(input, type, button->code);
183 }
184
185 bdev->poll_dev = poll_dev;
186 bdev->dev = dev;
187 bdev->pdata = pdata;
188 platform_set_drvdata(pdev, bdev);
189
190 error = input_register_polled_device(poll_dev);
191 if (error) {
192 dev_err(dev, "unable to register polled device, err=%d\n",
193 error);
194 goto err_free_gpio;
195 }
196
197 /* report initial state of the buttons */
198 for (i = 0; i < pdata->nbuttons; i++)
199 gpio_keys_polled_check_state(input, &pdata->buttons[i],
200 &bdev->data[i]);
201
202 return 0;
203
204err_free_gpio:
205 while (--i >= 0)
206 gpio_free(pdata->buttons[i].gpio);
207
208 input_free_polled_device(poll_dev);
209
210err_free_bdev:
211 kfree(bdev);
212
213 platform_set_drvdata(pdev, NULL);
214 return error;
215}
216
217static int __devexit gpio_keys_polled_remove(struct platform_device *pdev)
218{
219 struct gpio_keys_polled_dev *bdev = platform_get_drvdata(pdev);
220 struct gpio_keys_platform_data *pdata = bdev->pdata;
221 int i;
222
223 input_unregister_polled_device(bdev->poll_dev);
224
225 for (i = 0; i < pdata->nbuttons; i++)
226 gpio_free(pdata->buttons[i].gpio);
227
228 input_free_polled_device(bdev->poll_dev);
229
230 kfree(bdev);
231 platform_set_drvdata(pdev, NULL);
232
233 return 0;
234}
235
236static struct platform_driver gpio_keys_polled_driver = {
237 .probe = gpio_keys_polled_probe,
238 .remove = __devexit_p(gpio_keys_polled_remove),
239 .driver = {
240 .name = DRV_NAME,
241 .owner = THIS_MODULE,
242 },
243};
244
245static int __init gpio_keys_polled_init(void)
246{
247 return platform_driver_register(&gpio_keys_polled_driver);
248}
249
250static void __exit gpio_keys_polled_exit(void)
251{
252 platform_driver_unregister(&gpio_keys_polled_driver);
253}
254
255module_init(gpio_keys_polled_init);
256module_exit(gpio_keys_polled_exit);
257
258MODULE_LICENSE("GPL v2");
259MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
260MODULE_DESCRIPTION("Polled GPIO Buttons driver");
261MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 613a3652f98f..0aefaa885871 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,7 +51,8 @@
51#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 51#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
52#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 52#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
53#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) 53#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
54#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100100) 54#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
55#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
55#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) 56#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
56 57
57/* synaptics modes query bits */ 58/* synaptics modes query bits */
diff --git a/drivers/input/serio/gscps2.c b/drivers/input/serio/gscps2.c
index 3c287dd879d3..4225f5d6b15f 100644
--- a/drivers/input/serio/gscps2.c
+++ b/drivers/input/serio/gscps2.c
@@ -358,7 +358,7 @@ static int __devinit gscps2_probe(struct parisc_device *dev)
358 gscps2_reset(ps2port); 358 gscps2_reset(ps2port);
359 ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f; 359 ps2port->id = readb(ps2port->addr + GSC_ID) & 0x0f;
360 360
361 snprintf(serio->name, sizeof(serio->name), "GSC PS/2 %s", 361 snprintf(serio->name, sizeof(serio->name), "gsc-ps2-%s",
362 (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse"); 362 (ps2port->id == GSC_ID_KEYBOARD) ? "keyboard" : "mouse");
363 strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys)); 363 strlcpy(serio->phys, dev_name(&dev->dev), sizeof(serio->phys));
364 serio->id.type = SERIO_8042; 364 serio->id.type = SERIO_8042;
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index b3252ef1e279..4852b440960a 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -1436,6 +1436,12 @@ static struct wacom_features wacom_features_0xD2 =
1436 { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT }; 1436 { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
1437static struct wacom_features wacom_features_0xD3 = 1437static struct wacom_features wacom_features_0xD3 =
1438 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT }; 1438 { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
1439static struct wacom_features wacom_features_0xD8 =
1440 { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
1441static struct wacom_features wacom_features_0xDA =
1442 { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 63, BAMBOO_PT };
1443static struct wacom_features wacom_features_0xDB =
1444 { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13530, 1023, 63, BAMBOO_PT };
1439 1445
1440#define USB_DEVICE_WACOM(prod) \ 1446#define USB_DEVICE_WACOM(prod) \
1441 USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ 1447 USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \
@@ -1504,6 +1510,9 @@ const struct usb_device_id wacom_ids[] = {
1504 { USB_DEVICE_WACOM(0xD1) }, 1510 { USB_DEVICE_WACOM(0xD1) },
1505 { USB_DEVICE_WACOM(0xD2) }, 1511 { USB_DEVICE_WACOM(0xD2) },
1506 { USB_DEVICE_WACOM(0xD3) }, 1512 { USB_DEVICE_WACOM(0xD3) },
1513 { USB_DEVICE_WACOM(0xD8) },
1514 { USB_DEVICE_WACOM(0xDA) },
1515 { USB_DEVICE_WACOM(0xDB) },
1507 { USB_DEVICE_WACOM(0xF0) }, 1516 { USB_DEVICE_WACOM(0xF0) },
1508 { USB_DEVICE_WACOM(0xCC) }, 1517 { USB_DEVICE_WACOM(0xCC) },
1509 { USB_DEVICE_WACOM(0x90) }, 1518 { USB_DEVICE_WACOM(0x90) },
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f45f80f6d336..73fd6642b681 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -178,6 +178,7 @@ static const struct usb_device_id usbtouch_devices[] = {
178 178
179#ifdef CONFIG_TOUCHSCREEN_USB_ITM 179#ifdef CONFIG_TOUCHSCREEN_USB_ITM
180 {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM}, 180 {USB_DEVICE(0x0403, 0xf9e9), .driver_info = DEVTYPE_ITM},
181 {USB_DEVICE(0x16e3, 0xf9e9), .driver_info = DEVTYPE_ITM},
181#endif 182#endif
182 183
183#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO 184#ifdef CONFIG_TOUCHSCREEN_USB_ETURBO
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 2e847a90bad0..f2b5bab5e6a1 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@ __setup("icn=", icn_setup);
1627static int __init icn_init(void) 1627static int __init icn_init(void)
1628{ 1628{
1629 char *p; 1629 char *p;
1630 char rev[10]; 1630 char rev[20];
1631 1631
1632 memset(&dev, 0, sizeof(icn_dev)); 1632 memset(&dev, 0, sizeof(icn_dev));
1633 dev.memaddr = (membase & 0x0ffc000); 1633 dev.memaddr = (membase & 0x0ffc000);
@@ -1637,9 +1637,10 @@ static int __init icn_init(void)
1637 spin_lock_init(&dev.devlock); 1637 spin_lock_init(&dev.devlock);
1638 1638
1639 if ((p = strchr(revision, ':'))) { 1639 if ((p = strchr(revision, ':'))) {
1640 strcpy(rev, p + 1); 1640 strncpy(rev, p + 1, 20);
1641 p = strchr(rev, '$'); 1641 p = strchr(rev, '$');
1642 *p = 0; 1642 if (p)
1643 *p = 0;
1643 } else 1644 } else
1644 strcpy(rev, " ??? "); 1645 strcpy(rev, " ??? ");
1645 printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev, 1646 printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 77b8fd20cd90..6f190f4cdbc0 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -7,20 +7,20 @@ menuconfig NEW_LEDS
7 This is not related to standard keyboard LEDs which are controlled 7 This is not related to standard keyboard LEDs which are controlled
8 via the input system. 8 via the input system.
9 9
10if NEW_LEDS
11
12config LEDS_CLASS 10config LEDS_CLASS
13 bool "LED Class Support" 11 bool "LED Class Support"
12 depends on NEW_LEDS
14 help 13 help
15 This option enables the led sysfs class in /sys/class/leds. You'll 14 This option enables the led sysfs class in /sys/class/leds. You'll
16 need this to do anything useful with LEDs. If unsure, say N. 15 need this to do anything useful with LEDs. If unsure, say N.
17 16
18if LEDS_CLASS 17if NEW_LEDS
19 18
20comment "LED drivers" 19comment "LED drivers"
21 20
22config LEDS_88PM860X 21config LEDS_88PM860X
23 tristate "LED Support for Marvell 88PM860x PMIC" 22 tristate "LED Support for Marvell 88PM860x PMIC"
23 depends on LEDS_CLASS
24 depends on MFD_88PM860X 24 depends on MFD_88PM860X
25 help 25 help
26 This option enables support for on-chip LED drivers found on Marvell 26 This option enables support for on-chip LED drivers found on Marvell
@@ -28,6 +28,7 @@ config LEDS_88PM860X
28 28
29config LEDS_ATMEL_PWM 29config LEDS_ATMEL_PWM
30 tristate "LED Support using Atmel PWM outputs" 30 tristate "LED Support using Atmel PWM outputs"
31 depends on LEDS_CLASS
31 depends on ATMEL_PWM 32 depends on ATMEL_PWM
32 help 33 help
33 This option enables support for LEDs driven using outputs 34 This option enables support for LEDs driven using outputs
@@ -35,6 +36,7 @@ config LEDS_ATMEL_PWM
35 36
36config LEDS_LOCOMO 37config LEDS_LOCOMO
37 tristate "LED Support for Locomo device" 38 tristate "LED Support for Locomo device"
39 depends on LEDS_CLASS
38 depends on SHARP_LOCOMO 40 depends on SHARP_LOCOMO
39 help 41 help
40 This option enables support for the LEDs on Sharp Locomo. 42 This option enables support for the LEDs on Sharp Locomo.
@@ -42,6 +44,7 @@ config LEDS_LOCOMO
42 44
43config LEDS_MIKROTIK_RB532 45config LEDS_MIKROTIK_RB532
44 tristate "LED Support for Mikrotik Routerboard 532" 46 tristate "LED Support for Mikrotik Routerboard 532"
47 depends on LEDS_CLASS
45 depends on MIKROTIK_RB532 48 depends on MIKROTIK_RB532
46 help 49 help
47 This option enables support for the so called "User LED" of 50 This option enables support for the so called "User LED" of
@@ -49,6 +52,7 @@ config LEDS_MIKROTIK_RB532
49 52
50config LEDS_S3C24XX 53config LEDS_S3C24XX
51 tristate "LED Support for Samsung S3C24XX GPIO LEDs" 54 tristate "LED Support for Samsung S3C24XX GPIO LEDs"
55 depends on LEDS_CLASS
52 depends on ARCH_S3C2410 56 depends on ARCH_S3C2410
53 help 57 help
54 This option enables support for LEDs connected to GPIO lines 58 This option enables support for LEDs connected to GPIO lines
@@ -56,12 +60,14 @@ config LEDS_S3C24XX
56 60
57config LEDS_AMS_DELTA 61config LEDS_AMS_DELTA
58 tristate "LED Support for the Amstrad Delta (E3)" 62 tristate "LED Support for the Amstrad Delta (E3)"
63 depends on LEDS_CLASS
59 depends on MACH_AMS_DELTA 64 depends on MACH_AMS_DELTA
60 help 65 help
61 This option enables support for the LEDs on Amstrad Delta (E3). 66 This option enables support for the LEDs on Amstrad Delta (E3).
62 67
63config LEDS_NET48XX 68config LEDS_NET48XX
64 tristate "LED Support for Soekris net48xx series Error LED" 69 tristate "LED Support for Soekris net48xx series Error LED"
70 depends on LEDS_CLASS
65 depends on SCx200_GPIO 71 depends on SCx200_GPIO
66 help 72 help
67 This option enables support for the Soekris net4801 and net4826 error 73 This option enables support for the Soekris net4801 and net4826 error
@@ -79,18 +85,21 @@ config LEDS_NET5501
79 85
80config LEDS_FSG 86config LEDS_FSG
81 tristate "LED Support for the Freecom FSG-3" 87 tristate "LED Support for the Freecom FSG-3"
88 depends on LEDS_CLASS
82 depends on MACH_FSG 89 depends on MACH_FSG
83 help 90 help
84 This option enables support for the LEDs on the Freecom FSG-3. 91 This option enables support for the LEDs on the Freecom FSG-3.
85 92
86config LEDS_WRAP 93config LEDS_WRAP
87 tristate "LED Support for the WRAP series LEDs" 94 tristate "LED Support for the WRAP series LEDs"
95 depends on LEDS_CLASS
88 depends on SCx200_GPIO 96 depends on SCx200_GPIO
89 help 97 help
90 This option enables support for the PCEngines WRAP programmable LEDs. 98 This option enables support for the PCEngines WRAP programmable LEDs.
91 99
92config LEDS_ALIX2 100config LEDS_ALIX2
93 tristate "LED Support for ALIX.2 and ALIX.3 series" 101 tristate "LED Support for ALIX.2 and ALIX.3 series"
102 depends on LEDS_CLASS
94 depends on X86 && !GPIO_CS5535 && !CS5535_GPIO 103 depends on X86 && !GPIO_CS5535 && !CS5535_GPIO
95 help 104 help
96 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs. 105 This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
@@ -98,12 +107,14 @@ config LEDS_ALIX2
98 107
99config LEDS_H1940 108config LEDS_H1940
100 tristate "LED Support for iPAQ H1940 device" 109 tristate "LED Support for iPAQ H1940 device"
110 depends on LEDS_CLASS
101 depends on ARCH_H1940 111 depends on ARCH_H1940
102 help 112 help
103 This option enables support for the LEDs on the h1940. 113 This option enables support for the LEDs on the h1940.
104 114
105config LEDS_COBALT_QUBE 115config LEDS_COBALT_QUBE
106 tristate "LED Support for the Cobalt Qube series front LED" 116 tristate "LED Support for the Cobalt Qube series front LED"
117 depends on LEDS_CLASS
107 depends on MIPS_COBALT 118 depends on MIPS_COBALT
108 help 119 help
109 This option enables support for the front LED on Cobalt Qube series 120 This option enables support for the front LED on Cobalt Qube series
@@ -117,6 +128,7 @@ config LEDS_COBALT_RAQ
117 128
118config LEDS_SUNFIRE 129config LEDS_SUNFIRE
119 tristate "LED support for SunFire servers." 130 tristate "LED support for SunFire servers."
131 depends on LEDS_CLASS
120 depends on SPARC64 132 depends on SPARC64
121 select LEDS_TRIGGERS 133 select LEDS_TRIGGERS
122 help 134 help
@@ -125,6 +137,7 @@ config LEDS_SUNFIRE
125 137
126config LEDS_HP6XX 138config LEDS_HP6XX
127 tristate "LED Support for the HP Jornada 6xx" 139 tristate "LED Support for the HP Jornada 6xx"
140 depends on LEDS_CLASS
128 depends on SH_HP6XX 141 depends on SH_HP6XX
129 help 142 help
130 This option enables LED support for the handheld 143 This option enables LED support for the handheld
@@ -132,6 +145,7 @@ config LEDS_HP6XX
132 145
133config LEDS_PCA9532 146config LEDS_PCA9532
134 tristate "LED driver for PCA9532 dimmer" 147 tristate "LED driver for PCA9532 dimmer"
148 depends on LEDS_CLASS
135 depends on I2C && INPUT && EXPERIMENTAL 149 depends on I2C && INPUT && EXPERIMENTAL
136 help 150 help
137 This option enables support for NXP pca9532 151 This option enables support for NXP pca9532
@@ -140,6 +154,7 @@ config LEDS_PCA9532
140 154
141config LEDS_GPIO 155config LEDS_GPIO
142 tristate "LED Support for GPIO connected LEDs" 156 tristate "LED Support for GPIO connected LEDs"
157 depends on LEDS_CLASS
143 depends on GENERIC_GPIO 158 depends on GENERIC_GPIO
144 help 159 help
145 This option enables support for the LEDs connected to GPIO 160 This option enables support for the LEDs connected to GPIO
@@ -167,6 +182,7 @@ config LEDS_GPIO_OF
167 182
168config LEDS_LP3944 183config LEDS_LP3944
169 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" 184 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
185 depends on LEDS_CLASS
170 depends on I2C 186 depends on I2C
171 help 187 help
172 This option enables support for LEDs connected to the National 188 This option enables support for LEDs connected to the National
@@ -196,6 +212,7 @@ config LEDS_LP5523
196 212
197config LEDS_CLEVO_MAIL 213config LEDS_CLEVO_MAIL
198 tristate "Mail LED on Clevo notebook" 214 tristate "Mail LED on Clevo notebook"
215 depends on LEDS_CLASS
199 depends on X86 && SERIO_I8042 && DMI 216 depends on X86 && SERIO_I8042 && DMI
200 help 217 help
201 This driver makes the mail LED accessible from userspace 218 This driver makes the mail LED accessible from userspace
@@ -226,6 +243,7 @@ config LEDS_CLEVO_MAIL
226 243
227config LEDS_PCA955X 244config LEDS_PCA955X
228 tristate "LED Support for PCA955x I2C chips" 245 tristate "LED Support for PCA955x I2C chips"
246 depends on LEDS_CLASS
229 depends on I2C 247 depends on I2C
230 help 248 help
231 This option enables support for LEDs connected to PCA955x 249 This option enables support for LEDs connected to PCA955x
@@ -234,6 +252,7 @@ config LEDS_PCA955X
234 252
235config LEDS_WM831X_STATUS 253config LEDS_WM831X_STATUS
236 tristate "LED support for status LEDs on WM831x PMICs" 254 tristate "LED support for status LEDs on WM831x PMICs"
255 depends on LEDS_CLASS
237 depends on MFD_WM831X 256 depends on MFD_WM831X
238 help 257 help
239 This option enables support for the status LEDs of the WM831x 258 This option enables support for the status LEDs of the WM831x
@@ -241,6 +260,7 @@ config LEDS_WM831X_STATUS
241 260
242config LEDS_WM8350 261config LEDS_WM8350
243 tristate "LED Support for WM8350 AudioPlus PMIC" 262 tristate "LED Support for WM8350 AudioPlus PMIC"
263 depends on LEDS_CLASS
244 depends on MFD_WM8350 264 depends on MFD_WM8350
245 help 265 help
246 This option enables support for LEDs driven by the Wolfson 266 This option enables support for LEDs driven by the Wolfson
@@ -248,6 +268,7 @@ config LEDS_WM8350
248 268
249config LEDS_DA903X 269config LEDS_DA903X
250 tristate "LED Support for DA9030/DA9034 PMIC" 270 tristate "LED Support for DA9030/DA9034 PMIC"
271 depends on LEDS_CLASS
251 depends on PMIC_DA903X 272 depends on PMIC_DA903X
252 help 273 help
253 This option enables support for on-chip LED drivers found 274 This option enables support for on-chip LED drivers found
@@ -255,6 +276,7 @@ config LEDS_DA903X
255 276
256config LEDS_DAC124S085 277config LEDS_DAC124S085
257 tristate "LED Support for DAC124S085 SPI DAC" 278 tristate "LED Support for DAC124S085 SPI DAC"
279 depends on LEDS_CLASS
258 depends on SPI 280 depends on SPI
259 help 281 help
260 This option enables support for DAC124S085 SPI DAC from NatSemi, 282 This option enables support for DAC124S085 SPI DAC from NatSemi,
@@ -262,18 +284,21 @@ config LEDS_DAC124S085
262 284
263config LEDS_PWM 285config LEDS_PWM
264 tristate "PWM driven LED Support" 286 tristate "PWM driven LED Support"
287 depends on LEDS_CLASS
265 depends on HAVE_PWM 288 depends on HAVE_PWM
266 help 289 help
267 This option enables support for pwm driven LEDs 290 This option enables support for pwm driven LEDs
268 291
269config LEDS_REGULATOR 292config LEDS_REGULATOR
270 tristate "REGULATOR driven LED support" 293 tristate "REGULATOR driven LED support"
294 depends on LEDS_CLASS
271 depends on REGULATOR 295 depends on REGULATOR
272 help 296 help
273 This option enables support for regulator driven LEDs. 297 This option enables support for regulator driven LEDs.
274 298
275config LEDS_BD2802 299config LEDS_BD2802
276 tristate "LED driver for BD2802 RGB LED" 300 tristate "LED driver for BD2802 RGB LED"
301 depends on LEDS_CLASS
277 depends on I2C 302 depends on I2C
278 help 303 help
279 This option enables support for BD2802GU RGB LED driver chips 304 This option enables support for BD2802GU RGB LED driver chips
@@ -281,6 +306,7 @@ config LEDS_BD2802
281 306
282config LEDS_INTEL_SS4200 307config LEDS_INTEL_SS4200
283 tristate "LED driver for Intel NAS SS4200 series" 308 tristate "LED driver for Intel NAS SS4200 series"
309 depends on LEDS_CLASS
284 depends on PCI && DMI 310 depends on PCI && DMI
285 help 311 help
286 This option enables support for the Intel SS4200 series of 312 This option enables support for the Intel SS4200 series of
@@ -290,6 +316,7 @@ config LEDS_INTEL_SS4200
290 316
291config LEDS_LT3593 317config LEDS_LT3593
292 tristate "LED driver for LT3593 controllers" 318 tristate "LED driver for LT3593 controllers"
319 depends on LEDS_CLASS
293 depends on GENERIC_GPIO 320 depends on GENERIC_GPIO
294 help 321 help
295 This option enables support for LEDs driven by a Linear Technology 322 This option enables support for LEDs driven by a Linear Technology
@@ -298,6 +325,7 @@ config LEDS_LT3593
298 325
299config LEDS_ADP5520 326config LEDS_ADP5520
300 tristate "LED Support for ADP5520/ADP5501 PMIC" 327 tristate "LED Support for ADP5520/ADP5501 PMIC"
328 depends on LEDS_CLASS
301 depends on PMIC_ADP5520 329 depends on PMIC_ADP5520
302 help 330 help
303 This option enables support for on-chip LED drivers found 331 This option enables support for on-chip LED drivers found
@@ -308,6 +336,7 @@ config LEDS_ADP5520
308 336
309config LEDS_DELL_NETBOOKS 337config LEDS_DELL_NETBOOKS
310 tristate "External LED on Dell Business Netbooks" 338 tristate "External LED on Dell Business Netbooks"
339 depends on LEDS_CLASS
311 depends on X86 && ACPI_WMI 340 depends on X86 && ACPI_WMI
312 help 341 help
313 This adds support for the Latitude 2100 and similar 342 This adds support for the Latitude 2100 and similar
@@ -315,6 +344,7 @@ config LEDS_DELL_NETBOOKS
315 344
316config LEDS_MC13783 345config LEDS_MC13783
317 tristate "LED Support for MC13783 PMIC" 346 tristate "LED Support for MC13783 PMIC"
347 depends on LEDS_CLASS
318 depends on MFD_MC13783 348 depends on MFD_MC13783
319 help 349 help
320 This option enable support for on-chip LED drivers found 350 This option enable support for on-chip LED drivers found
@@ -322,6 +352,7 @@ config LEDS_MC13783
322 352
323config LEDS_NS2 353config LEDS_NS2
324 tristate "LED support for Network Space v2 GPIO LEDs" 354 tristate "LED support for Network Space v2 GPIO LEDs"
355 depends on LEDS_CLASS
325 depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2 356 depends on MACH_NETSPACE_V2 || MACH_INETSPACE_V2 || MACH_NETSPACE_MAX_V2 || D2NET_V2
326 default y 357 default y
327 help 358 help
@@ -340,17 +371,17 @@ config LEDS_NETXBIG
340 371
341config LEDS_TRIGGERS 372config LEDS_TRIGGERS
342 bool "LED Trigger support" 373 bool "LED Trigger support"
374 depends on LEDS_CLASS
343 help 375 help
344 This option enables trigger support for the leds class. 376 This option enables trigger support for the leds class.
345 These triggers allow kernel events to drive the LEDs and can 377 These triggers allow kernel events to drive the LEDs and can
346 be configured via sysfs. If unsure, say Y. 378 be configured via sysfs. If unsure, say Y.
347 379
348if LEDS_TRIGGERS
349
350comment "LED Triggers" 380comment "LED Triggers"
351 381
352config LEDS_TRIGGER_TIMER 382config LEDS_TRIGGER_TIMER
353 tristate "LED Timer Trigger" 383 tristate "LED Timer Trigger"
384 depends on LEDS_TRIGGERS
354 help 385 help
355 This allows LEDs to be controlled by a programmable timer 386 This allows LEDs to be controlled by a programmable timer
356 via sysfs. Some LED hardware can be programmed to start 387 via sysfs. Some LED hardware can be programmed to start
@@ -362,12 +393,14 @@ config LEDS_TRIGGER_TIMER
362config LEDS_TRIGGER_IDE_DISK 393config LEDS_TRIGGER_IDE_DISK
363 bool "LED IDE Disk Trigger" 394 bool "LED IDE Disk Trigger"
364 depends on IDE_GD_ATA 395 depends on IDE_GD_ATA
396 depends on LEDS_TRIGGERS
365 help 397 help
366 This allows LEDs to be controlled by IDE disk activity. 398 This allows LEDs to be controlled by IDE disk activity.
367 If unsure, say Y. 399 If unsure, say Y.
368 400
369config LEDS_TRIGGER_HEARTBEAT 401config LEDS_TRIGGER_HEARTBEAT
370 tristate "LED Heartbeat Trigger" 402 tristate "LED Heartbeat Trigger"
403 depends on LEDS_TRIGGERS
371 help 404 help
372 This allows LEDs to be controlled by a CPU load average. 405 This allows LEDs to be controlled by a CPU load average.
373 The flash frequency is a hyperbolic function of the 1-minute 406 The flash frequency is a hyperbolic function of the 1-minute
@@ -376,6 +409,7 @@ config LEDS_TRIGGER_HEARTBEAT
376 409
377config LEDS_TRIGGER_BACKLIGHT 410config LEDS_TRIGGER_BACKLIGHT
378 tristate "LED backlight Trigger" 411 tristate "LED backlight Trigger"
412 depends on LEDS_TRIGGERS
379 help 413 help
380 This allows LEDs to be controlled as a backlight device: they 414 This allows LEDs to be controlled as a backlight device: they
381 turn off and on when the display is blanked and unblanked. 415 turn off and on when the display is blanked and unblanked.
@@ -384,6 +418,7 @@ config LEDS_TRIGGER_BACKLIGHT
384 418
385config LEDS_TRIGGER_GPIO 419config LEDS_TRIGGER_GPIO
386 tristate "LED GPIO Trigger" 420 tristate "LED GPIO Trigger"
421 depends on LEDS_TRIGGERS
387 depends on GPIOLIB 422 depends on GPIOLIB
388 help 423 help
389 This allows LEDs to be controlled by gpio events. It's good 424 This allows LEDs to be controlled by gpio events. It's good
@@ -396,6 +431,7 @@ config LEDS_TRIGGER_GPIO
396 431
397config LEDS_TRIGGER_DEFAULT_ON 432config LEDS_TRIGGER_DEFAULT_ON
398 tristate "LED Default ON Trigger" 433 tristate "LED Default ON Trigger"
434 depends on LEDS_TRIGGERS
399 help 435 help
400 This allows LEDs to be initialised in the ON state. 436 This allows LEDs to be initialised in the ON state.
401 If unsure, say Y. 437 If unsure, say Y.
@@ -403,8 +439,4 @@ config LEDS_TRIGGER_DEFAULT_ON
403comment "iptables trigger is under Netfilter config (LED target)" 439comment "iptables trigger is under Netfilter config (LED target)"
404 depends on LEDS_TRIGGERS 440 depends on LEDS_TRIGGERS
405 441
406endif # LEDS_TRIGGERS
407
408endif # LEDS_CLASS
409
410endif # NEW_LEDS 442endif # NEW_LEDS
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 3782f31f06d2..33facd0c45d1 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -125,11 +125,22 @@ struct lp5521_chip {
125 u8 num_leds; 125 u8 num_leds;
126}; 126};
127 127
128#define cdev_to_led(c) container_of(c, struct lp5521_led, cdev) 128static inline struct lp5521_led *cdev_to_led(struct led_classdev *cdev)
129#define engine_to_lp5521(eng) container_of((eng), struct lp5521_chip, \ 129{
130 engines[(eng)->id - 1]) 130 return container_of(cdev, struct lp5521_led, cdev);
131#define led_to_lp5521(led) container_of((led), struct lp5521_chip, \ 131}
132 leds[(led)->id]) 132
133static inline struct lp5521_chip *engine_to_lp5521(struct lp5521_engine *engine)
134{
135 return container_of(engine, struct lp5521_chip,
136 engines[engine->id - 1]);
137}
138
139static inline struct lp5521_chip *led_to_lp5521(struct lp5521_led *led)
140{
141 return container_of(led, struct lp5521_chip,
142 leds[led->id]);
143}
133 144
134static void lp5521_led_brightness_work(struct work_struct *work); 145static void lp5521_led_brightness_work(struct work_struct *work);
135 146
@@ -185,14 +196,17 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
185 196
186 /* move current engine to direct mode and remember the state */ 197 /* move current engine to direct mode and remember the state */
187 ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); 198 ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
188 usleep_range(1000, 10000); 199 /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
200 usleep_range(1000, 2000);
189 ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); 201 ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
190 202
191 /* For loading, all the engines to load mode */ 203 /* For loading, all the engines to load mode */
192 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); 204 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
193 usleep_range(1000, 10000); 205 /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
206 usleep_range(1000, 2000);
194 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD); 207 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
195 usleep_range(1000, 10000); 208 /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
209 usleep_range(1000, 2000);
196 210
197 addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE; 211 addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
198 i2c_smbus_write_i2c_block_data(client, 212 i2c_smbus_write_i2c_block_data(client,
@@ -231,10 +245,6 @@ static int lp5521_configure(struct i2c_client *client,
231 245
232 lp5521_init_engine(chip, attr_group); 246 lp5521_init_engine(chip, attr_group);
233 247
234 lp5521_write(client, LP5521_REG_RESET, 0xff);
235
236 usleep_range(10000, 20000);
237
238 /* Set all PWMs to direct control mode */ 248 /* Set all PWMs to direct control mode */
239 ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F); 249 ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
240 250
@@ -251,8 +261,8 @@ static int lp5521_configure(struct i2c_client *client,
251 ret |= lp5521_write(client, LP5521_REG_ENABLE, 261 ret |= lp5521_write(client, LP5521_REG_ENABLE,
252 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM | 262 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
253 LP5521_EXEC_RUN); 263 LP5521_EXEC_RUN);
254 /* enable takes 500us */ 264 /* enable takes 500us. 1 - 2 ms leaves some margin */
255 usleep_range(500, 20000); 265 usleep_range(1000, 2000);
256 266
257 return ret; 267 return ret;
258} 268}
@@ -305,7 +315,8 @@ static int lp5521_detect(struct i2c_client *client)
305 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM); 315 LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
306 if (ret) 316 if (ret)
307 return ret; 317 return ret;
308 usleep_range(1000, 10000); 318 /* enable takes 500us. 1 - 2 ms leaves some margin */
319 usleep_range(1000, 2000);
309 ret = lp5521_read(client, LP5521_REG_ENABLE, &buf); 320 ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
310 if (ret) 321 if (ret)
311 return ret; 322 return ret;
@@ -693,11 +704,16 @@ static int lp5521_probe(struct i2c_client *client,
693 704
694 if (pdata->enable) { 705 if (pdata->enable) {
695 pdata->enable(0); 706 pdata->enable(0);
696 usleep_range(1000, 10000); 707 usleep_range(1000, 2000); /* Keep enable down at least 1ms */
697 pdata->enable(1); 708 pdata->enable(1);
698 usleep_range(1000, 10000); /* Spec says min 500us */ 709 usleep_range(1000, 2000); /* 500us abs min. */
699 } 710 }
700 711
712 lp5521_write(client, LP5521_REG_RESET, 0xff);
713 usleep_range(10000, 20000); /*
714 * Exact value is not available. 10 - 20ms
715 * appears to be enough for reset.
716 */
701 ret = lp5521_detect(client); 717 ret = lp5521_detect(client);
702 718
703 if (ret) { 719 if (ret) {
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
index 1e11fcc08b28..0cc4ead2fd8b 100644
--- a/drivers/leds/leds-lp5523.c
+++ b/drivers/leds/leds-lp5523.c
@@ -134,15 +134,18 @@ struct lp5523_chip {
134 u8 num_leds; 134 u8 num_leds;
135}; 135};
136 136
137#define cdev_to_led(c) container_of(c, struct lp5523_led, cdev) 137static inline struct lp5523_led *cdev_to_led(struct led_classdev *cdev)
138{
139 return container_of(cdev, struct lp5523_led, cdev);
140}
138 141
139static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine) 142static inline struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
140{ 143{
141 return container_of(engine, struct lp5523_chip, 144 return container_of(engine, struct lp5523_chip,
142 engines[engine->id - 1]); 145 engines[engine->id - 1]);
143} 146}
144 147
145static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led) 148static inline struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
146{ 149{
147 return container_of(led, struct lp5523_chip, 150 return container_of(led, struct lp5523_chip,
148 leds[led->id]); 151 leds[led->id]);
@@ -200,13 +203,9 @@ static int lp5523_configure(struct i2c_client *client)
200 { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0}, 203 { 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
201 }; 204 };
202 205
203 lp5523_write(client, LP5523_REG_RESET, 0xff);
204
205 usleep_range(10000, 100000);
206
207 ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE); 206 ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
208 /* Chip startup time after reset is 500 us */ 207 /* Chip startup time is 500 us, 1 - 2 ms gives some margin */
209 usleep_range(1000, 10000); 208 usleep_range(1000, 2000);
210 209
211 ret |= lp5523_write(client, LP5523_REG_CONFIG, 210 ret |= lp5523_write(client, LP5523_REG_CONFIG,
212 LP5523_AUTO_INC | LP5523_PWR_SAVE | 211 LP5523_AUTO_INC | LP5523_PWR_SAVE |
@@ -243,8 +242,8 @@ static int lp5523_configure(struct i2c_client *client)
243 return -1; 242 return -1;
244 } 243 }
245 244
246 /* Wait 3ms and check the engine status */ 245 /* Let the programs run for couple of ms and check the engine status */
247 usleep_range(3000, 20000); 246 usleep_range(3000, 6000);
248 lp5523_read(client, LP5523_REG_STATUS, &status); 247 lp5523_read(client, LP5523_REG_STATUS, &status);
249 status &= LP5523_ENG_STATUS_MASK; 248 status &= LP5523_ENG_STATUS_MASK;
250 249
@@ -449,10 +448,10 @@ static ssize_t lp5523_selftest(struct device *dev,
449 /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */ 448 /* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
450 lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL, 449 lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
451 LP5523_EN_LEDTEST | 16); 450 LP5523_EN_LEDTEST | 16);
452 usleep_range(3000, 10000); 451 usleep_range(3000, 6000); /* ADC conversion time is typically 2.7 ms */
453 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); 452 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
454 if (!(status & LP5523_LEDTEST_DONE)) 453 if (!(status & LP5523_LEDTEST_DONE))
455 usleep_range(3000, 10000); 454 usleep_range(3000, 6000); /* Was not ready. Wait little bit */
456 455
457 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd); 456 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
458 vdd--; /* There may be some fluctuation in measurement */ 457 vdd--; /* There may be some fluctuation in measurement */
@@ -468,16 +467,16 @@ static ssize_t lp5523_selftest(struct device *dev,
468 chip->pdata->led_config[i].led_current); 467 chip->pdata->led_config[i].led_current);
469 468
470 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff); 469 lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
471 /* let current stabilize 2ms before measurements start */ 470 /* let current stabilize 2 - 4ms before measurements start */
472 usleep_range(2000, 10000); 471 usleep_range(2000, 4000);
473 lp5523_write(chip->client, 472 lp5523_write(chip->client,
474 LP5523_REG_LED_TEST_CTRL, 473 LP5523_REG_LED_TEST_CTRL,
475 LP5523_EN_LEDTEST | i); 474 LP5523_EN_LEDTEST | i);
476 /* ledtest takes 2.7ms */ 475 /* ADC conversion time is 2.7 ms typically */
477 usleep_range(3000, 10000); 476 usleep_range(3000, 6000);
478 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status); 477 ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
479 if (!(status & LP5523_LEDTEST_DONE)) 478 if (!(status & LP5523_LEDTEST_DONE))
480 usleep_range(3000, 10000); 479 usleep_range(3000, 6000);/* Was not ready. Wait. */
481 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc); 480 ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
482 481
483 if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM) 482 if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
@@ -930,11 +929,16 @@ static int lp5523_probe(struct i2c_client *client,
930 929
931 if (pdata->enable) { 930 if (pdata->enable) {
932 pdata->enable(0); 931 pdata->enable(0);
933 usleep_range(1000, 10000); 932 usleep_range(1000, 2000); /* Keep enable down at least 1ms */
934 pdata->enable(1); 933 pdata->enable(1);
935 usleep_range(1000, 10000); /* Spec says min 500us */ 934 usleep_range(1000, 2000); /* 500us abs min. */
936 } 935 }
937 936
937 lp5523_write(client, LP5523_REG_RESET, 0xff);
938 usleep_range(10000, 20000); /*
939 * Exact value is not available. 10 - 20ms
940 * appears to be enough for reset.
941 */
938 ret = lp5523_detect(client); 942 ret = lp5523_detect(client);
939 if (ret) 943 if (ret)
940 goto fail2; 944 goto fail2;
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index a688293abd0b..614ebebaaa28 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -102,6 +102,7 @@ static struct dmi_system_id __initdata nas_led_whitelist[] = {
102 DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00") 102 DMI_MATCH(DMI_PRODUCT_VERSION, "1.00.00")
103 } 103 }
104 }, 104 },
105 {}
105}; 106};
106 107
107/* 108/*
diff --git a/drivers/macintosh/Kconfig b/drivers/macintosh/Kconfig
index 3d7355ff7308..fa51af11c6f1 100644
--- a/drivers/macintosh/Kconfig
+++ b/drivers/macintosh/Kconfig
@@ -102,6 +102,7 @@ config ADB_PMU_LED
102config ADB_PMU_LED_IDE 102config ADB_PMU_LED_IDE
103 bool "Use front LED as IDE LED by default" 103 bool "Use front LED as IDE LED by default"
104 depends on ADB_PMU_LED 104 depends on ADB_PMU_LED
105 depends on LEDS_CLASS
105 select LEDS_TRIGGERS 106 select LEDS_TRIGGERS
106 select LEDS_TRIGGER_IDE_DISK 107 select LEDS_TRIGGER_IDE_DISK
107 help 108 help
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 324a3663fcda..84c46a161927 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -1337,7 +1337,7 @@ super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1337 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1337 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1338 rdev->sb_page); 1338 rdev->sb_page);
1339 md_super_wait(rdev->mddev); 1339 md_super_wait(rdev->mddev);
1340 return num_sectors / 2; /* kB for sysfs */ 1340 return num_sectors;
1341} 1341}
1342 1342
1343 1343
@@ -1704,7 +1704,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
1704 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1704 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
1705 rdev->sb_page); 1705 rdev->sb_page);
1706 md_super_wait(rdev->mddev); 1706 md_super_wait(rdev->mddev);
1707 return num_sectors / 2; /* kB for sysfs */ 1707 return num_sectors;
1708} 1708}
1709 1709
1710static struct super_type super_types[] = { 1710static struct super_type super_types[] = {
@@ -4338,6 +4338,8 @@ static int md_alloc(dev_t dev, char *name)
4338 if (mddev->kobj.sd && 4338 if (mddev->kobj.sd &&
4339 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4339 sysfs_create_group(&mddev->kobj, &md_bitmap_group))
4340 printk(KERN_DEBUG "pointless warning\n"); 4340 printk(KERN_DEBUG "pointless warning\n");
4341
4342 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA);
4341 abort: 4343 abort:
4342 mutex_unlock(&disks_mutex); 4344 mutex_unlock(&disks_mutex);
4343 if (!error && mddev->kobj.sd) { 4345 if (!error && mddev->kobj.sd) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 45f8324196ec..845cf95b612c 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1161,6 +1161,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
1161 * is not possible. 1161 * is not possible.
1162 */ 1162 */
1163 if (!test_bit(Faulty, &rdev->flags) && 1163 if (!test_bit(Faulty, &rdev->flags) &&
1164 !mddev->recovery_disabled &&
1164 mddev->degraded < conf->raid_disks) { 1165 mddev->degraded < conf->raid_disks) {
1165 err = -EBUSY; 1166 err = -EBUSY;
1166 goto abort; 1167 goto abort;
diff --git a/drivers/media/common/tuners/Kconfig b/drivers/media/common/tuners/Kconfig
index 2385e6cca635..78b089526e02 100644
--- a/drivers/media/common/tuners/Kconfig
+++ b/drivers/media/common/tuners/Kconfig
@@ -31,7 +31,7 @@ config MEDIA_TUNER
31 select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMISE 31 select MEDIA_TUNER_TDA9887 if !MEDIA_TUNER_CUSTOMISE
32 select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE 32 select MEDIA_TUNER_MC44S803 if !MEDIA_TUNER_CUSTOMISE
33 33
34menuconfig MEDIA_TUNER_CUSTOMISE 34config MEDIA_TUNER_CUSTOMISE
35 bool "Customize analog and hybrid tuner modules to build" 35 bool "Customize analog and hybrid tuner modules to build"
36 depends on MEDIA_TUNER 36 depends on MEDIA_TUNER
37 default y if EMBEDDED 37 default y if EMBEDDED
@@ -44,7 +44,8 @@ menuconfig MEDIA_TUNER_CUSTOMISE
44 44
45 If unsure say N. 45 If unsure say N.
46 46
47if MEDIA_TUNER_CUSTOMISE 47menu "Customize TV tuners"
48 visible if MEDIA_TUNER_CUSTOMISE
48 49
49config MEDIA_TUNER_SIMPLE 50config MEDIA_TUNER_SIMPLE
50 tristate "Simple tuner support" 51 tristate "Simple tuner support"
@@ -185,5 +186,4 @@ config MEDIA_TUNER_TDA18218
185 default m if MEDIA_TUNER_CUSTOMISE 186 default m if MEDIA_TUNER_CUSTOMISE
186 help 187 help
187 NXP TDA18218 silicon tuner driver. 188 NXP TDA18218 silicon tuner driver.
188 189endmenu
189endif # MEDIA_TUNER_CUSTOMISE
diff --git a/drivers/media/dvb/frontends/Kconfig b/drivers/media/dvb/frontends/Kconfig
index e9062b08a485..96b27016670e 100644
--- a/drivers/media/dvb/frontends/Kconfig
+++ b/drivers/media/dvb/frontends/Kconfig
@@ -12,9 +12,8 @@ config DVB_FE_CUSTOMISE
12 12
13 If unsure say N. 13 If unsure say N.
14 14
15if DVB_FE_CUSTOMISE
16
17menu "Customise DVB Frontends" 15menu "Customise DVB Frontends"
16 visible if DVB_FE_CUSTOMISE
18 17
19comment "Multistandard (satellite) frontends" 18comment "Multistandard (satellite) frontends"
20 depends on DVB_CORE 19 depends on DVB_CORE
@@ -619,5 +618,3 @@ config DVB_DUMMY_FE
619 tristate "Dummy frontend driver" 618 tristate "Dummy frontend driver"
620 default n 619 default n
621endmenu 620endmenu
622
623endif
diff --git a/drivers/media/radio/radio-si4713.c b/drivers/media/radio/radio-si4713.c
index 6a435786b63d..03829e6818bd 100644
--- a/drivers/media/radio/radio-si4713.c
+++ b/drivers/media/radio/radio-si4713.c
@@ -291,7 +291,7 @@ static int radio_si4713_pdriver_probe(struct platform_device *pdev)
291 goto unregister_v4l2_dev; 291 goto unregister_v4l2_dev;
292 } 292 }
293 293
294 sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter, NULL, 294 sd = v4l2_i2c_new_subdev_board(&rsdev->v4l2_dev, adapter,
295 pdata->subdev_board_info, NULL); 295 pdata->subdev_board_info, NULL);
296 if (!sd) { 296 if (!sd) {
297 dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n"); 297 dev_err(&pdev->dev, "Cannot get v4l2 subdevice\n");
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig
index ac16e815e275..6830d2848bd7 100644
--- a/drivers/media/video/Kconfig
+++ b/drivers/media/video/Kconfig
@@ -112,7 +112,7 @@ config VIDEO_IR_I2C
112# 112#
113 113
114menu "Encoders/decoders and other helper chips" 114menu "Encoders/decoders and other helper chips"
115 depends on !VIDEO_HELPER_CHIPS_AUTO 115 visible if !VIDEO_HELPER_CHIPS_AUTO
116 116
117comment "Audio decoders" 117comment "Audio decoders"
118 118
diff --git a/drivers/media/video/au0828/au0828-cards.c b/drivers/media/video/au0828/au0828-cards.c
index 0453816d4ec3..01be89fa5c78 100644
--- a/drivers/media/video/au0828/au0828-cards.c
+++ b/drivers/media/video/au0828/au0828-cards.c
@@ -212,7 +212,7 @@ void au0828_card_setup(struct au0828_dev *dev)
212 be abstracted out if we ever need to support a different 212 be abstracted out if we ever need to support a different
213 demod) */ 213 demod) */
214 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 214 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
215 NULL, "au8522", 0x8e >> 1, NULL); 215 "au8522", 0x8e >> 1, NULL);
216 if (sd == NULL) 216 if (sd == NULL)
217 printk(KERN_ERR "analog subdev registration failed\n"); 217 printk(KERN_ERR "analog subdev registration failed\n");
218 } 218 }
@@ -221,7 +221,7 @@ void au0828_card_setup(struct au0828_dev *dev)
221 if (dev->board.tuner_type != TUNER_ABSENT) { 221 if (dev->board.tuner_type != TUNER_ABSENT) {
222 /* Load the tuner module, which does the attach */ 222 /* Load the tuner module, which does the attach */
223 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 223 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
224 NULL, "tuner", dev->board.tuner_addr, NULL); 224 "tuner", dev->board.tuner_addr, NULL);
225 if (sd == NULL) 225 if (sd == NULL)
226 printk(KERN_ERR "tuner subdev registration fail\n"); 226 printk(KERN_ERR "tuner subdev registration fail\n");
227 227
diff --git a/drivers/media/video/bt8xx/bttv-cards.c b/drivers/media/video/bt8xx/bttv-cards.c
index 87d8b006ef77..49efcf660ba6 100644
--- a/drivers/media/video/bt8xx/bttv-cards.c
+++ b/drivers/media/video/bt8xx/bttv-cards.c
@@ -3529,7 +3529,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
3529 struct v4l2_subdev *sd; 3529 struct v4l2_subdev *sd;
3530 3530
3531 sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3531 sd = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3532 &btv->c.i2c_adap, NULL, "saa6588", 0, addrs); 3532 &btv->c.i2c_adap, "saa6588", 0, addrs);
3533 btv->has_saa6588 = (sd != NULL); 3533 btv->has_saa6588 = (sd != NULL);
3534 } 3534 }
3535 3535
@@ -3554,7 +3554,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
3554 }; 3554 };
3555 3555
3556 btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3556 btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3557 &btv->c.i2c_adap, NULL, "msp3400", 0, addrs); 3557 &btv->c.i2c_adap, "msp3400", 0, addrs);
3558 if (btv->sd_msp34xx) 3558 if (btv->sd_msp34xx)
3559 return; 3559 return;
3560 goto no_audio; 3560 goto no_audio;
@@ -3568,7 +3568,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
3568 }; 3568 };
3569 3569
3570 if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3570 if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3571 &btv->c.i2c_adap, NULL, "tda7432", 0, addrs)) 3571 &btv->c.i2c_adap, "tda7432", 0, addrs))
3572 return; 3572 return;
3573 goto no_audio; 3573 goto no_audio;
3574 } 3574 }
@@ -3576,7 +3576,7 @@ void __devinit bttv_init_card2(struct bttv *btv)
3576 case 3: { 3576 case 3: {
3577 /* The user specified that we should probe for tvaudio */ 3577 /* The user specified that we should probe for tvaudio */
3578 btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3578 btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3579 &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs()); 3579 &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs());
3580 if (btv->sd_tvaudio) 3580 if (btv->sd_tvaudio)
3581 return; 3581 return;
3582 goto no_audio; 3582 goto no_audio;
@@ -3596,11 +3596,11 @@ void __devinit bttv_init_card2(struct bttv *btv)
3596 found is really something else (e.g. a tea6300). */ 3596 found is really something else (e.g. a tea6300). */
3597 if (!bttv_tvcards[btv->c.type].no_msp34xx) { 3597 if (!bttv_tvcards[btv->c.type].no_msp34xx) {
3598 btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3598 btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3599 &btv->c.i2c_adap, NULL, "msp3400", 3599 &btv->c.i2c_adap, "msp3400",
3600 0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1)); 3600 0, I2C_ADDRS(I2C_ADDR_MSP3400 >> 1));
3601 } else if (bttv_tvcards[btv->c.type].msp34xx_alt) { 3601 } else if (bttv_tvcards[btv->c.type].msp34xx_alt) {
3602 btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3602 btv->sd_msp34xx = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3603 &btv->c.i2c_adap, NULL, "msp3400", 3603 &btv->c.i2c_adap, "msp3400",
3604 0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1)); 3604 0, I2C_ADDRS(I2C_ADDR_MSP3400_ALT >> 1));
3605 } 3605 }
3606 3606
@@ -3616,13 +3616,13 @@ void __devinit bttv_init_card2(struct bttv *btv)
3616 }; 3616 };
3617 3617
3618 if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3618 if (v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3619 &btv->c.i2c_adap, NULL, "tda7432", 0, addrs)) 3619 &btv->c.i2c_adap, "tda7432", 0, addrs))
3620 return; 3620 return;
3621 } 3621 }
3622 3622
3623 /* Now see if we can find one of the tvaudio devices. */ 3623 /* Now see if we can find one of the tvaudio devices. */
3624 btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3624 btv->sd_tvaudio = v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3625 &btv->c.i2c_adap, NULL, "tvaudio", 0, tvaudio_addrs()); 3625 &btv->c.i2c_adap, "tvaudio", 0, tvaudio_addrs());
3626 if (btv->sd_tvaudio) 3626 if (btv->sd_tvaudio)
3627 return; 3627 return;
3628 3628
@@ -3646,13 +3646,13 @@ void __devinit bttv_init_tuner(struct bttv *btv)
3646 /* Load tuner module before issuing tuner config call! */ 3646 /* Load tuner module before issuing tuner config call! */
3647 if (bttv_tvcards[btv->c.type].has_radio) 3647 if (bttv_tvcards[btv->c.type].has_radio)
3648 v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3648 v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3649 &btv->c.i2c_adap, NULL, "tuner", 3649 &btv->c.i2c_adap, "tuner",
3650 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO)); 3650 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
3651 v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3651 v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3652 &btv->c.i2c_adap, NULL, "tuner", 3652 &btv->c.i2c_adap, "tuner",
3653 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); 3653 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
3654 v4l2_i2c_new_subdev(&btv->c.v4l2_dev, 3654 v4l2_i2c_new_subdev(&btv->c.v4l2_dev,
3655 &btv->c.i2c_adap, NULL, "tuner", 3655 &btv->c.i2c_adap, "tuner",
3656 0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD)); 3656 0, v4l2_i2c_tuner_addrs(ADDRS_TV_WITH_DEMOD));
3657 3657
3658 tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV; 3658 tun_setup.mode_mask = T_ANALOG_TV | T_DIGITAL_TV;
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 7bc36670071a..260c666ce931 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -2066,8 +2066,7 @@ static int cafe_pci_probe(struct pci_dev *pdev,
2066 2066
2067 cam->sensor_addr = 0x42; 2067 cam->sensor_addr = 0x42;
2068 cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter, 2068 cam->sensor = v4l2_i2c_new_subdev_cfg(&cam->v4l2_dev, &cam->i2c_adapter,
2069 "ov7670", "ov7670", 0, &sensor_cfg, cam->sensor_addr, 2069 "ov7670", 0, &sensor_cfg, cam->sensor_addr, NULL);
2070 NULL);
2071 if (cam->sensor == NULL) { 2070 if (cam->sensor == NULL) {
2072 ret = -ENODEV; 2071 ret = -ENODEV;
2073 goto out_smbus; 2072 goto out_smbus;
diff --git a/drivers/media/video/cx18/cx18-i2c.c b/drivers/media/video/cx18/cx18-i2c.c
index a09caf883170..e71a026f3419 100644
--- a/drivers/media/video/cx18/cx18-i2c.c
+++ b/drivers/media/video/cx18/cx18-i2c.c
@@ -122,15 +122,15 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
122 if (hw == CX18_HW_TUNER) { 122 if (hw == CX18_HW_TUNER) {
123 /* special tuner group handling */ 123 /* special tuner group handling */
124 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, 124 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
125 adap, NULL, type, 0, cx->card_i2c->radio); 125 adap, type, 0, cx->card_i2c->radio);
126 if (sd != NULL) 126 if (sd != NULL)
127 sd->grp_id = hw; 127 sd->grp_id = hw;
128 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, 128 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
129 adap, NULL, type, 0, cx->card_i2c->demod); 129 adap, type, 0, cx->card_i2c->demod);
130 if (sd != NULL) 130 if (sd != NULL)
131 sd->grp_id = hw; 131 sd->grp_id = hw;
132 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, 132 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev,
133 adap, NULL, type, 0, cx->card_i2c->tv); 133 adap, type, 0, cx->card_i2c->tv);
134 if (sd != NULL) 134 if (sd != NULL)
135 sd->grp_id = hw; 135 sd->grp_id = hw;
136 return sd != NULL ? 0 : -1; 136 return sd != NULL ? 0 : -1;
@@ -144,7 +144,7 @@ int cx18_i2c_register(struct cx18 *cx, unsigned idx)
144 return -1; 144 return -1;
145 145
146 /* It's an I2C device other than an analog tuner or IR chip */ 146 /* It's an I2C device other than an analog tuner or IR chip */
147 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, NULL, type, hw_addrs[idx], 147 sd = v4l2_i2c_new_subdev(&cx->v4l2_dev, adap, type, hw_addrs[idx],
148 NULL); 148 NULL);
149 if (sd != NULL) 149 if (sd != NULL)
150 sd->grp_id = hw; 150 sd->grp_id = hw;
diff --git a/drivers/media/video/cx231xx/cx231xx-cards.c b/drivers/media/video/cx231xx/cx231xx-cards.c
index 56c2d8195ac6..2c78d188bb06 100644
--- a/drivers/media/video/cx231xx/cx231xx-cards.c
+++ b/drivers/media/video/cx231xx/cx231xx-cards.c
@@ -560,7 +560,7 @@ void cx231xx_card_setup(struct cx231xx *dev)
560 if (dev->board.decoder == CX231XX_AVDECODER) { 560 if (dev->board.decoder == CX231XX_AVDECODER) {
561 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, 561 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
562 &dev->i2c_bus[0].i2c_adap, 562 &dev->i2c_bus[0].i2c_adap,
563 NULL, "cx25840", 0x88 >> 1, NULL); 563 "cx25840", 0x88 >> 1, NULL);
564 if (dev->sd_cx25840 == NULL) 564 if (dev->sd_cx25840 == NULL)
565 cx231xx_info("cx25840 subdev registration failure\n"); 565 cx231xx_info("cx25840 subdev registration failure\n");
566 cx25840_call(dev, core, load_fw); 566 cx25840_call(dev, core, load_fw);
@@ -571,7 +571,7 @@ void cx231xx_card_setup(struct cx231xx *dev)
571 if (dev->board.tuner_type != TUNER_ABSENT) { 571 if (dev->board.tuner_type != TUNER_ABSENT) {
572 dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, 572 dev->sd_tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev,
573 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap, 573 &dev->i2c_bus[dev->board.tuner_i2c_master].i2c_adap,
574 NULL, "tuner", 574 "tuner",
575 dev->tuner_addr, NULL); 575 dev->tuner_addr, NULL);
576 if (dev->sd_tuner == NULL) 576 if (dev->sd_tuner == NULL)
577 cx231xx_info("tuner subdev registration failure\n"); 577 cx231xx_info("tuner subdev registration failure\n");
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index db054004e462..8861309268b1 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -1247,7 +1247,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
1247 case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200: 1247 case CX23885_BOARD_LEADTEK_WINFAST_PXTV1200:
1248 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, 1248 dev->sd_cx25840 = v4l2_i2c_new_subdev(&dev->v4l2_dev,
1249 &dev->i2c_bus[2].i2c_adap, 1249 &dev->i2c_bus[2].i2c_adap,
1250 NULL, "cx25840", 0x88 >> 1, NULL); 1250 "cx25840", 0x88 >> 1, NULL);
1251 if (dev->sd_cx25840) { 1251 if (dev->sd_cx25840) {
1252 dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE; 1252 dev->sd_cx25840->grp_id = CX23885_HW_AV_CORE;
1253 v4l2_subdev_call(dev->sd_cx25840, core, load_fw); 1253 v4l2_subdev_call(dev->sd_cx25840, core, load_fw);
diff --git a/drivers/media/video/cx23885/cx23885-video.c b/drivers/media/video/cx23885/cx23885-video.c
index 3cc9f462d08d..8b2fb8a4375c 100644
--- a/drivers/media/video/cx23885/cx23885-video.c
+++ b/drivers/media/video/cx23885/cx23885-video.c
@@ -1507,10 +1507,10 @@ int cx23885_video_register(struct cx23885_dev *dev)
1507 if (dev->tuner_addr) 1507 if (dev->tuner_addr)
1508 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, 1508 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
1509 &dev->i2c_bus[1].i2c_adap, 1509 &dev->i2c_bus[1].i2c_adap,
1510 NULL, "tuner", dev->tuner_addr, NULL); 1510 "tuner", dev->tuner_addr, NULL);
1511 else 1511 else
1512 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, 1512 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
1513 &dev->i2c_bus[1].i2c_adap, NULL, 1513 &dev->i2c_bus[1].i2c_adap,
1514 "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV)); 1514 "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_TV));
1515 if (sd) { 1515 if (sd) {
1516 struct tuner_setup tun_setup; 1516 struct tuner_setup tun_setup;
diff --git a/drivers/media/video/cx88/cx88-cards.c b/drivers/media/video/cx88/cx88-cards.c
index b26fcba8600c..9b9e169cce90 100644
--- a/drivers/media/video/cx88/cx88-cards.c
+++ b/drivers/media/video/cx88/cx88-cards.c
@@ -3515,19 +3515,18 @@ struct cx88_core *cx88_core_create(struct pci_dev *pci, int nr)
3515 later code configures a tea5767. 3515 later code configures a tea5767.
3516 */ 3516 */
3517 v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, 3517 v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
3518 NULL, "tuner", 3518 "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
3519 0, v4l2_i2c_tuner_addrs(ADDRS_RADIO));
3520 if (has_demod) 3519 if (has_demod)
3521 v4l2_i2c_new_subdev(&core->v4l2_dev, 3520 v4l2_i2c_new_subdev(&core->v4l2_dev,
3522 &core->i2c_adap, NULL, "tuner", 3521 &core->i2c_adap, "tuner",
3523 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); 3522 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
3524 if (core->board.tuner_addr == ADDR_UNSET) { 3523 if (core->board.tuner_addr == ADDR_UNSET) {
3525 v4l2_i2c_new_subdev(&core->v4l2_dev, 3524 v4l2_i2c_new_subdev(&core->v4l2_dev,
3526 &core->i2c_adap, NULL, "tuner", 3525 &core->i2c_adap, "tuner",
3527 0, has_demod ? tv_addrs + 4 : tv_addrs); 3526 0, has_demod ? tv_addrs + 4 : tv_addrs);
3528 } else { 3527 } else {
3529 v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, 3528 v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
3530 NULL, "tuner", core->board.tuner_addr, NULL); 3529 "tuner", core->board.tuner_addr, NULL);
3531 } 3530 }
3532 } 3531 }
3533 3532
diff --git a/drivers/media/video/cx88/cx88-video.c b/drivers/media/video/cx88/cx88-video.c
index 88b51194f917..62cea9549404 100644
--- a/drivers/media/video/cx88/cx88-video.c
+++ b/drivers/media/video/cx88/cx88-video.c
@@ -1895,14 +1895,13 @@ static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
1895 1895
1896 if (core->board.audio_chip == V4L2_IDENT_WM8775) 1896 if (core->board.audio_chip == V4L2_IDENT_WM8775)
1897 v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, 1897 v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
1898 NULL, "wm8775", 0x36 >> 1, NULL); 1898 "wm8775", 0x36 >> 1, NULL);
1899 1899
1900 if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) { 1900 if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) {
1901 /* This probes for a tda9874 as is used on some 1901 /* This probes for a tda9874 as is used on some
1902 Pixelview Ultra boards. */ 1902 Pixelview Ultra boards. */
1903 v4l2_i2c_new_subdev(&core->v4l2_dev, 1903 v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
1904 &core->i2c_adap, 1904 "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
1905 NULL, "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
1906 } 1905 }
1907 1906
1908 switch (core->boardnr) { 1907 switch (core->boardnr) {
diff --git a/drivers/media/video/davinci/vpfe_capture.c b/drivers/media/video/davinci/vpfe_capture.c
index d8e38cc4ec40..7333a9bb2549 100644
--- a/drivers/media/video/davinci/vpfe_capture.c
+++ b/drivers/media/video/davinci/vpfe_capture.c
@@ -1986,7 +1986,6 @@ static __init int vpfe_probe(struct platform_device *pdev)
1986 vpfe_dev->sd[i] = 1986 vpfe_dev->sd[i] =
1987 v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev, 1987 v4l2_i2c_new_subdev_board(&vpfe_dev->v4l2_dev,
1988 i2c_adap, 1988 i2c_adap,
1989 NULL,
1990 &sdinfo->board_info, 1989 &sdinfo->board_info,
1991 NULL); 1990 NULL);
1992 if (vpfe_dev->sd[i]) { 1991 if (vpfe_dev->sd[i]) {
diff --git a/drivers/media/video/davinci/vpif_capture.c b/drivers/media/video/davinci/vpif_capture.c
index 6ac6acd16352..193abab6b355 100644
--- a/drivers/media/video/davinci/vpif_capture.c
+++ b/drivers/media/video/davinci/vpif_capture.c
@@ -2013,7 +2013,6 @@ static __init int vpif_probe(struct platform_device *pdev)
2013 vpif_obj.sd[i] = 2013 vpif_obj.sd[i] =
2014 v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, 2014 v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
2015 i2c_adap, 2015 i2c_adap,
2016 NULL,
2017 &subdevdata->board_info, 2016 &subdevdata->board_info,
2018 NULL); 2017 NULL);
2019 2018
diff --git a/drivers/media/video/davinci/vpif_display.c b/drivers/media/video/davinci/vpif_display.c
index 685f6a6ee603..412c65d54fe1 100644
--- a/drivers/media/video/davinci/vpif_display.c
+++ b/drivers/media/video/davinci/vpif_display.c
@@ -1553,7 +1553,7 @@ static __init int vpif_probe(struct platform_device *pdev)
1553 1553
1554 for (i = 0; i < subdev_count; i++) { 1554 for (i = 0; i < subdev_count; i++) {
1555 vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev, 1555 vpif_obj.sd[i] = v4l2_i2c_new_subdev_board(&vpif_obj.v4l2_dev,
1556 i2c_adap, NULL, 1556 i2c_adap,
1557 &subdevdata[i].board_info, 1557 &subdevdata[i].board_info,
1558 NULL); 1558 NULL);
1559 if (!vpif_obj.sd[i]) { 1559 if (!vpif_obj.sd[i]) {
diff --git a/drivers/media/video/em28xx/em28xx-cards.c b/drivers/media/video/em28xx/em28xx-cards.c
index 54859233f311..f7e9168157a5 100644
--- a/drivers/media/video/em28xx/em28xx-cards.c
+++ b/drivers/media/video/em28xx/em28xx-cards.c
@@ -2554,39 +2554,39 @@ void em28xx_card_setup(struct em28xx *dev)
2554 /* request some modules */ 2554 /* request some modules */
2555 if (dev->board.has_msp34xx) 2555 if (dev->board.has_msp34xx)
2556 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2556 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2557 NULL, "msp3400", 0, msp3400_addrs); 2557 "msp3400", 0, msp3400_addrs);
2558 2558
2559 if (dev->board.decoder == EM28XX_SAA711X) 2559 if (dev->board.decoder == EM28XX_SAA711X)
2560 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2560 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2561 NULL, "saa7115_auto", 0, saa711x_addrs); 2561 "saa7115_auto", 0, saa711x_addrs);
2562 2562
2563 if (dev->board.decoder == EM28XX_TVP5150) 2563 if (dev->board.decoder == EM28XX_TVP5150)
2564 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2564 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2565 NULL, "tvp5150", 0, tvp5150_addrs); 2565 "tvp5150", 0, tvp5150_addrs);
2566 2566
2567 if (dev->em28xx_sensor == EM28XX_MT9V011) { 2567 if (dev->em28xx_sensor == EM28XX_MT9V011) {
2568 struct v4l2_subdev *sd; 2568 struct v4l2_subdev *sd;
2569 2569
2570 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, 2570 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
2571 &dev->i2c_adap, NULL, "mt9v011", 0, mt9v011_addrs); 2571 &dev->i2c_adap, "mt9v011", 0, mt9v011_addrs);
2572 v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal); 2572 v4l2_subdev_call(sd, core, s_config, 0, &dev->sensor_xtal);
2573 } 2573 }
2574 2574
2575 2575
2576 if (dev->board.adecoder == EM28XX_TVAUDIO) 2576 if (dev->board.adecoder == EM28XX_TVAUDIO)
2577 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2577 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2578 NULL, "tvaudio", dev->board.tvaudio_addr, NULL); 2578 "tvaudio", dev->board.tvaudio_addr, NULL);
2579 2579
2580 if (dev->board.tuner_type != TUNER_ABSENT) { 2580 if (dev->board.tuner_type != TUNER_ABSENT) {
2581 int has_demod = (dev->tda9887_conf & TDA9887_PRESENT); 2581 int has_demod = (dev->tda9887_conf & TDA9887_PRESENT);
2582 2582
2583 if (dev->board.radio.type) 2583 if (dev->board.radio.type)
2584 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2584 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2585 NULL, "tuner", dev->board.radio_addr, NULL); 2585 "tuner", dev->board.radio_addr, NULL);
2586 2586
2587 if (has_demod) 2587 if (has_demod)
2588 v4l2_i2c_new_subdev(&dev->v4l2_dev, 2588 v4l2_i2c_new_subdev(&dev->v4l2_dev,
2589 &dev->i2c_adap, NULL, "tuner", 2589 &dev->i2c_adap, "tuner",
2590 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); 2590 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
2591 if (dev->tuner_addr == 0) { 2591 if (dev->tuner_addr == 0) {
2592 enum v4l2_i2c_tuner_type type = 2592 enum v4l2_i2c_tuner_type type =
@@ -2594,14 +2594,14 @@ void em28xx_card_setup(struct em28xx *dev)
2594 struct v4l2_subdev *sd; 2594 struct v4l2_subdev *sd;
2595 2595
2596 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, 2596 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
2597 &dev->i2c_adap, NULL, "tuner", 2597 &dev->i2c_adap, "tuner",
2598 0, v4l2_i2c_tuner_addrs(type)); 2598 0, v4l2_i2c_tuner_addrs(type));
2599 2599
2600 if (sd) 2600 if (sd)
2601 dev->tuner_addr = v4l2_i2c_subdev_addr(sd); 2601 dev->tuner_addr = v4l2_i2c_subdev_addr(sd);
2602 } else { 2602 } else {
2603 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 2603 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
2604 NULL, "tuner", dev->tuner_addr, NULL); 2604 "tuner", dev->tuner_addr, NULL);
2605 } 2605 }
2606 } 2606 }
2607 2607
diff --git a/drivers/media/video/fsl-viu.c b/drivers/media/video/fsl-viu.c
index 9a075d83dd1f..b8faff2dd711 100644
--- a/drivers/media/video/fsl-viu.c
+++ b/drivers/media/video/fsl-viu.c
@@ -1486,7 +1486,7 @@ static int __devinit viu_of_probe(struct platform_device *op,
1486 1486
1487 ad = i2c_get_adapter(0); 1487 ad = i2c_get_adapter(0);
1488 viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad, 1488 viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
1489 NULL, "saa7113", VIU_VIDEO_DECODER_ADDR, NULL); 1489 "saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
1490 1490
1491 viu_dev->vidq.timeout.function = viu_vid_timeout; 1491 viu_dev->vidq.timeout.function = viu_vid_timeout;
1492 viu_dev->vidq.timeout.data = (unsigned long)viu_dev; 1492 viu_dev->vidq.timeout.data = (unsigned long)viu_dev;
diff --git a/drivers/media/video/ivtv/ivtv-i2c.c b/drivers/media/video/ivtv/ivtv-i2c.c
index 9e8039ac909e..665191c9b407 100644
--- a/drivers/media/video/ivtv/ivtv-i2c.c
+++ b/drivers/media/video/ivtv/ivtv-i2c.c
@@ -239,19 +239,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
239 return -1; 239 return -1;
240 if (hw == IVTV_HW_TUNER) { 240 if (hw == IVTV_HW_TUNER) {
241 /* special tuner handling */ 241 /* special tuner handling */
242 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, 242 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
243 adap, NULL, type, 243 itv->card_i2c->radio);
244 0, itv->card_i2c->radio);
245 if (sd) 244 if (sd)
246 sd->grp_id = 1 << idx; 245 sd->grp_id = 1 << idx;
247 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, 246 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
248 adap, NULL, type, 247 itv->card_i2c->demod);
249 0, itv->card_i2c->demod);
250 if (sd) 248 if (sd)
251 sd->grp_id = 1 << idx; 249 sd->grp_id = 1 << idx;
252 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, 250 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, adap, type, 0,
253 adap, NULL, type, 251 itv->card_i2c->tv);
254 0, itv->card_i2c->tv);
255 if (sd) 252 if (sd)
256 sd->grp_id = 1 << idx; 253 sd->grp_id = 1 << idx;
257 return sd ? 0 : -1; 254 return sd ? 0 : -1;
@@ -267,17 +264,16 @@ int ivtv_i2c_register(struct ivtv *itv, unsigned idx)
267 /* It's an I2C device other than an analog tuner or IR chip */ 264 /* It's an I2C device other than an analog tuner or IR chip */
268 if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) { 265 if (hw == IVTV_HW_UPD64031A || hw == IVTV_HW_UPD6408X) {
269 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, 266 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
270 adap, NULL, type, 0, I2C_ADDRS(hw_addrs[idx])); 267 adap, type, 0, I2C_ADDRS(hw_addrs[idx]));
271 } else if (hw == IVTV_HW_CX25840) { 268 } else if (hw == IVTV_HW_CX25840) {
272 struct cx25840_platform_data pdata; 269 struct cx25840_platform_data pdata;
273 270
274 pdata.pvr150_workaround = itv->pvr150_workaround; 271 pdata.pvr150_workaround = itv->pvr150_workaround;
275 sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev, 272 sd = v4l2_i2c_new_subdev_cfg(&itv->v4l2_dev,
276 adap, NULL, type, 0, &pdata, hw_addrs[idx], 273 adap, type, 0, &pdata, hw_addrs[idx], NULL);
277 NULL);
278 } else { 274 } else {
279 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev, 275 sd = v4l2_i2c_new_subdev(&itv->v4l2_dev,
280 adap, NULL, type, hw_addrs[idx], NULL); 276 adap, type, hw_addrs[idx], NULL);
281 } 277 }
282 if (sd) 278 if (sd)
283 sd->grp_id = 1 << idx; 279 sd->grp_id = 1 << idx;
diff --git a/drivers/media/video/mxb.c b/drivers/media/video/mxb.c
index 94ba698d0ad4..4e8fd965f151 100644
--- a/drivers/media/video/mxb.c
+++ b/drivers/media/video/mxb.c
@@ -185,17 +185,17 @@ static int mxb_probe(struct saa7146_dev *dev)
185 } 185 }
186 186
187 mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, 187 mxb->saa7111a = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
188 NULL, "saa7111", I2C_SAA7111A, NULL); 188 "saa7111", I2C_SAA7111A, NULL);
189 mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, 189 mxb->tea6420_1 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
190 NULL, "tea6420", I2C_TEA6420_1, NULL); 190 "tea6420", I2C_TEA6420_1, NULL);
191 mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, 191 mxb->tea6420_2 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
192 NULL, "tea6420", I2C_TEA6420_2, NULL); 192 "tea6420", I2C_TEA6420_2, NULL);
193 mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, 193 mxb->tea6415c = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
194 NULL, "tea6415c", I2C_TEA6415C, NULL); 194 "tea6415c", I2C_TEA6415C, NULL);
195 mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, 195 mxb->tda9840 = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
196 NULL, "tda9840", I2C_TDA9840, NULL); 196 "tda9840", I2C_TDA9840, NULL);
197 mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter, 197 mxb->tuner = v4l2_i2c_new_subdev(&dev->v4l2_dev, &mxb->i2c_adapter,
198 NULL, "tuner", I2C_TUNER, NULL); 198 "tuner", I2C_TUNER, NULL);
199 199
200 /* check if all devices are present */ 200 /* check if all devices are present */
201 if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c || 201 if (!mxb->tea6420_1 || !mxb->tea6420_2 || !mxb->tea6415c ||
diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw.c b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
index bef202752cc8..66ad516bdfd9 100644
--- a/drivers/media/video/pvrusb2/pvrusb2-hdw.c
+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw.c
@@ -2088,16 +2088,14 @@ static int pvr2_hdw_load_subdev(struct pvr2_hdw *hdw,
2088 " Setting up with specified i2c address 0x%x", 2088 " Setting up with specified i2c address 0x%x",
2089 mid, i2caddr[0]); 2089 mid, i2caddr[0]);
2090 sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, 2090 sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
2091 NULL, fname, 2091 fname, i2caddr[0], NULL);
2092 i2caddr[0], NULL);
2093 } else { 2092 } else {
2094 pvr2_trace(PVR2_TRACE_INIT, 2093 pvr2_trace(PVR2_TRACE_INIT,
2095 "Module ID %u:" 2094 "Module ID %u:"
2096 " Setting up with address probe list", 2095 " Setting up with address probe list",
2097 mid); 2096 mid);
2098 sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap, 2097 sd = v4l2_i2c_new_subdev(&hdw->v4l2_dev, &hdw->i2c_adap,
2099 NULL, fname, 2098 fname, 0, i2caddr);
2100 0, i2caddr);
2101 } 2099 }
2102 2100
2103 if (!sd) { 2101 if (!sd) {
diff --git a/drivers/media/video/s5p-fimc/fimc-capture.c b/drivers/media/video/s5p-fimc/fimc-capture.c
index e8f13d3e2df1..1b93207c89e8 100644
--- a/drivers/media/video/s5p-fimc/fimc-capture.c
+++ b/drivers/media/video/s5p-fimc/fimc-capture.c
@@ -44,7 +44,7 @@ static struct v4l2_subdev *fimc_subdev_register(struct fimc_dev *fimc,
44 return ERR_PTR(-ENOMEM); 44 return ERR_PTR(-ENOMEM);
45 45
46 sd = v4l2_i2c_new_subdev_board(&vid_cap->v4l2_dev, i2c_adap, 46 sd = v4l2_i2c_new_subdev_board(&vid_cap->v4l2_dev, i2c_adap,
47 MODULE_NAME, isp_info->board_info, NULL); 47 isp_info->board_info, NULL);
48 if (!sd) { 48 if (!sd) {
49 v4l2_err(&vid_cap->v4l2_dev, "failed to acquire subdev\n"); 49 v4l2_err(&vid_cap->v4l2_dev, "failed to acquire subdev\n");
50 return NULL; 50 return NULL;
diff --git a/drivers/media/video/saa7134/saa7134-cards.c b/drivers/media/video/saa7134/saa7134-cards.c
index 0911cb580e18..1d4d0a49ea52 100644
--- a/drivers/media/video/saa7134/saa7134-cards.c
+++ b/drivers/media/video/saa7134/saa7134-cards.c
@@ -7551,22 +7551,22 @@ int saa7134_board_init2(struct saa7134_dev *dev)
7551 so we do not need to probe for a radio tuner device. */ 7551 so we do not need to probe for a radio tuner device. */
7552 if (dev->radio_type != UNSET) 7552 if (dev->radio_type != UNSET)
7553 v4l2_i2c_new_subdev(&dev->v4l2_dev, 7553 v4l2_i2c_new_subdev(&dev->v4l2_dev,
7554 &dev->i2c_adap, NULL, "tuner", 7554 &dev->i2c_adap, "tuner",
7555 dev->radio_addr, NULL); 7555 dev->radio_addr, NULL);
7556 if (has_demod) 7556 if (has_demod)
7557 v4l2_i2c_new_subdev(&dev->v4l2_dev, 7557 v4l2_i2c_new_subdev(&dev->v4l2_dev,
7558 &dev->i2c_adap, NULL, "tuner", 7558 &dev->i2c_adap, "tuner",
7559 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); 7559 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
7560 if (dev->tuner_addr == ADDR_UNSET) { 7560 if (dev->tuner_addr == ADDR_UNSET) {
7561 enum v4l2_i2c_tuner_type type = 7561 enum v4l2_i2c_tuner_type type =
7562 has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; 7562 has_demod ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
7563 7563
7564 v4l2_i2c_new_subdev(&dev->v4l2_dev, 7564 v4l2_i2c_new_subdev(&dev->v4l2_dev,
7565 &dev->i2c_adap, NULL, "tuner", 7565 &dev->i2c_adap, "tuner",
7566 0, v4l2_i2c_tuner_addrs(type)); 7566 0, v4l2_i2c_tuner_addrs(type));
7567 } else { 7567 } else {
7568 v4l2_i2c_new_subdev(&dev->v4l2_dev, 7568 v4l2_i2c_new_subdev(&dev->v4l2_dev,
7569 &dev->i2c_adap, NULL, "tuner", 7569 &dev->i2c_adap, "tuner",
7570 dev->tuner_addr, NULL); 7570 dev->tuner_addr, NULL);
7571 } 7571 }
7572 } 7572 }
diff --git a/drivers/media/video/saa7134/saa7134-core.c b/drivers/media/video/saa7134/saa7134-core.c
index 764d7d219fed..756a27812260 100644
--- a/drivers/media/video/saa7134/saa7134-core.c
+++ b/drivers/media/video/saa7134/saa7134-core.c
@@ -991,7 +991,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
991 if (card_is_empress(dev)) { 991 if (card_is_empress(dev)) {
992 struct v4l2_subdev *sd = 992 struct v4l2_subdev *sd =
993 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 993 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
994 NULL, "saa6752hs", 994 "saa6752hs",
995 saa7134_boards[dev->board].empress_addr, NULL); 995 saa7134_boards[dev->board].empress_addr, NULL);
996 996
997 if (sd) 997 if (sd)
@@ -1002,7 +1002,7 @@ static int __devinit saa7134_initdev(struct pci_dev *pci_dev,
1002 struct v4l2_subdev *sd; 1002 struct v4l2_subdev *sd;
1003 1003
1004 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev, 1004 sd = v4l2_i2c_new_subdev(&dev->v4l2_dev,
1005 &dev->i2c_adap, NULL, "saa6588", 1005 &dev->i2c_adap, "saa6588",
1006 0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr)); 1006 0, I2C_ADDRS(saa7134_boards[dev->board].rds_addr));
1007 if (sd) { 1007 if (sd) {
1008 printk(KERN_INFO "%s: found RDS decoder\n", dev->name); 1008 printk(KERN_INFO "%s: found RDS decoder\n", dev->name);
diff --git a/drivers/media/video/sh_vou.c b/drivers/media/video/sh_vou.c
index 0f4906136b8f..4e5a8cf76ded 100644
--- a/drivers/media/video/sh_vou.c
+++ b/drivers/media/video/sh_vou.c
@@ -1406,7 +1406,7 @@ static int __devinit sh_vou_probe(struct platform_device *pdev)
1406 goto ereset; 1406 goto ereset;
1407 1407
1408 subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap, 1408 subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap,
1409 NULL, vou_pdata->board_info, NULL); 1409 vou_pdata->board_info, NULL);
1410 if (!subdev) { 1410 if (!subdev) {
1411 ret = -ENOMEM; 1411 ret = -ENOMEM;
1412 goto ei2cnd; 1412 goto ei2cnd;
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 43848a751d11..335120c2021b 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -896,7 +896,7 @@ static int soc_camera_init_i2c(struct soc_camera_device *icd,
896 icl->board_info->platform_data = icd; 896 icl->board_info->platform_data = icd;
897 897
898 subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap, 898 subdev = v4l2_i2c_new_subdev_board(&ici->v4l2_dev, adap,
899 NULL, icl->board_info, NULL); 899 icl->board_info, NULL);
900 if (!subdev) 900 if (!subdev)
901 goto ei2cnd; 901 goto ei2cnd;
902 902
diff --git a/drivers/media/video/usbvision/usbvision-i2c.c b/drivers/media/video/usbvision/usbvision-i2c.c
index e3bbae26e3ce..81dd53bb5267 100644
--- a/drivers/media/video/usbvision/usbvision-i2c.c
+++ b/drivers/media/video/usbvision/usbvision-i2c.c
@@ -251,7 +251,7 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
251 hit-and-miss. */ 251 hit-and-miss. */
252 mdelay(10); 252 mdelay(10);
253 v4l2_i2c_new_subdev(&usbvision->v4l2_dev, 253 v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
254 &usbvision->i2c_adap, NULL, 254 &usbvision->i2c_adap,
255 "saa7115_auto", 0, saa711x_addrs); 255 "saa7115_auto", 0, saa711x_addrs);
256 break; 256 break;
257 } 257 }
@@ -261,14 +261,14 @@ int usbvision_i2c_register(struct usb_usbvision *usbvision)
261 struct tuner_setup tun_setup; 261 struct tuner_setup tun_setup;
262 262
263 sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, 263 sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
264 &usbvision->i2c_adap, NULL, 264 &usbvision->i2c_adap,
265 "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD)); 265 "tuner", 0, v4l2_i2c_tuner_addrs(ADDRS_DEMOD));
266 /* depending on whether we found a demod or not, select 266 /* depending on whether we found a demod or not, select
267 the tuner type. */ 267 the tuner type. */
268 type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV; 268 type = sd ? ADDRS_TV_WITH_DEMOD : ADDRS_TV;
269 269
270 sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev, 270 sd = v4l2_i2c_new_subdev(&usbvision->v4l2_dev,
271 &usbvision->i2c_adap, NULL, 271 &usbvision->i2c_adap,
272 "tuner", 0, v4l2_i2c_tuner_addrs(type)); 272 "tuner", 0, v4l2_i2c_tuner_addrs(type));
273 273
274 if (sd == NULL) 274 if (sd == NULL)
diff --git a/drivers/media/video/v4l2-common.c b/drivers/media/video/v4l2-common.c
index 9294282b5add..b5eb1f3950b1 100644
--- a/drivers/media/video/v4l2-common.c
+++ b/drivers/media/video/v4l2-common.c
@@ -368,18 +368,15 @@ EXPORT_SYMBOL_GPL(v4l2_i2c_subdev_init);
368 368
369/* Load an i2c sub-device. */ 369/* Load an i2c sub-device. */
370struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, 370struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
371 struct i2c_adapter *adapter, const char *module_name, 371 struct i2c_adapter *adapter, struct i2c_board_info *info,
372 struct i2c_board_info *info, const unsigned short *probe_addrs) 372 const unsigned short *probe_addrs)
373{ 373{
374 struct v4l2_subdev *sd = NULL; 374 struct v4l2_subdev *sd = NULL;
375 struct i2c_client *client; 375 struct i2c_client *client;
376 376
377 BUG_ON(!v4l2_dev); 377 BUG_ON(!v4l2_dev);
378 378
379 if (module_name) 379 request_module(I2C_MODULE_PREFIX "%s", info->type);
380 request_module(module_name);
381 else
382 request_module(I2C_MODULE_PREFIX "%s", info->type);
383 380
384 /* Create the i2c client */ 381 /* Create the i2c client */
385 if (info->addr == 0 && probe_addrs) 382 if (info->addr == 0 && probe_addrs)
@@ -432,8 +429,7 @@ error:
432EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board); 429EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_board);
433 430
434struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, 431struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
435 struct i2c_adapter *adapter, 432 struct i2c_adapter *adapter, const char *client_type,
436 const char *module_name, const char *client_type,
437 int irq, void *platform_data, 433 int irq, void *platform_data,
438 u8 addr, const unsigned short *probe_addrs) 434 u8 addr, const unsigned short *probe_addrs)
439{ 435{
@@ -447,8 +443,7 @@ struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
447 info.irq = irq; 443 info.irq = irq;
448 info.platform_data = platform_data; 444 info.platform_data = platform_data;
449 445
450 return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, module_name, 446 return v4l2_i2c_new_subdev_board(v4l2_dev, adapter, &info, probe_addrs);
451 &info, probe_addrs);
452} 447}
453EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg); 448EXPORT_SYMBOL_GPL(v4l2_i2c_new_subdev_cfg);
454 449
diff --git a/drivers/media/video/via-camera.c b/drivers/media/video/via-camera.c
index 02a21bccae18..9eda7cc03121 100644
--- a/drivers/media/video/via-camera.c
+++ b/drivers/media/video/via-camera.c
@@ -1360,7 +1360,7 @@ static __devinit int viacam_probe(struct platform_device *pdev)
1360 */ 1360 */
1361 sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31); 1361 sensor_adapter = viafb_find_i2c_adapter(VIA_PORT_31);
1362 cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter, 1362 cam->sensor = v4l2_i2c_new_subdev(&cam->v4l2_dev, sensor_adapter,
1363 "ov7670", "ov7670", 0x42 >> 1, NULL); 1363 "ov7670", 0x42 >> 1, NULL);
1364 if (cam->sensor == NULL) { 1364 if (cam->sensor == NULL) {
1365 dev_err(&pdev->dev, "Unable to find the sensor!\n"); 1365 dev_err(&pdev->dev, "Unable to find the sensor!\n");
1366 ret = -ENODEV; 1366 ret = -ENODEV;
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index e5e005dc1554..7e7eec48f8b1 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -4334,10 +4334,10 @@ static int __init vino_module_init(void)
4334 4334
4335 vino_drvdata->decoder = 4335 vino_drvdata->decoder =
4336 v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, 4336 v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
4337 NULL, "saa7191", 0, I2C_ADDRS(0x45)); 4337 "saa7191", 0, I2C_ADDRS(0x45));
4338 vino_drvdata->camera = 4338 vino_drvdata->camera =
4339 v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter, 4339 v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
4340 NULL, "indycam", 0, I2C_ADDRS(0x2b)); 4340 "indycam", 0, I2C_ADDRS(0x2b));
4341 4341
4342 dprintk("init complete!\n"); 4342 dprintk("init complete!\n");
4343 4343
diff --git a/drivers/media/video/zoran/zoran_card.c b/drivers/media/video/zoran/zoran_card.c
index 7e6d62467eaa..e520abf9f4c3 100644
--- a/drivers/media/video/zoran/zoran_card.c
+++ b/drivers/media/video/zoran/zoran_card.c
@@ -1343,13 +1343,12 @@ static int __devinit zoran_probe(struct pci_dev *pdev,
1343 } 1343 }
1344 1344
1345 zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, 1345 zr->decoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
1346 &zr->i2c_adapter, NULL, zr->card.i2c_decoder, 1346 &zr->i2c_adapter, zr->card.i2c_decoder,
1347 0, zr->card.addrs_decoder); 1347 0, zr->card.addrs_decoder);
1348 1348
1349 if (zr->card.i2c_encoder) 1349 if (zr->card.i2c_encoder)
1350 zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev, 1350 zr->encoder = v4l2_i2c_new_subdev(&zr->v4l2_dev,
1351 &zr->i2c_adapter, 1351 &zr->i2c_adapter, zr->card.i2c_encoder,
1352 NULL, zr->card.i2c_encoder,
1353 0, zr->card.addrs_encoder); 1352 0, zr->card.addrs_encoder);
1354 1353
1355 dprintk(2, 1354 dprintk(2,
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index ca47e6285075..307aada5fffe 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -183,9 +183,7 @@ static int isl29020_probe(struct i2c_client *client,
183 183
184static int isl29020_remove(struct i2c_client *client) 184static int isl29020_remove(struct i2c_client *client)
185{ 185{
186 struct als_data *data = i2c_get_clientdata(client);
187 sysfs_remove_group(&client->dev.kobj, &m_als_gr); 186 sysfs_remove_group(&client->dev.kobj, &m_als_gr);
188 kfree(data);
189 return 0; 187 return 0;
190} 188}
191 189
@@ -245,6 +243,6 @@ static void __exit sensor_isl29020_exit(void)
245module_init(sensor_isl29020_init); 243module_init(sensor_isl29020_init);
246module_exit(sensor_isl29020_exit); 244module_exit(sensor_isl29020_exit);
247 245
248MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com"); 246MODULE_AUTHOR("Kalhan Trisal <kalhan.trisal@intel.com>");
249MODULE_DESCRIPTION("Intersil isl29020 ALS Driver"); 247MODULE_DESCRIPTION("Intersil isl29020 ALS Driver");
250MODULE_LICENSE("GPL v2"); 248MODULE_LICENSE("GPL v2");
diff --git a/drivers/misc/sgi-xp/xpc_partition.c b/drivers/misc/sgi-xp/xpc_partition.c
index d551f09ccb79..6956f7e7d439 100644
--- a/drivers/misc/sgi-xp/xpc_partition.c
+++ b/drivers/misc/sgi-xp/xpc_partition.c
@@ -439,18 +439,23 @@ xpc_discovery(void)
439 * nodes that can comprise an access protection grouping. The access 439 * nodes that can comprise an access protection grouping. The access
440 * protection is in regards to memory, IOI and IPI. 440 * protection is in regards to memory, IOI and IPI.
441 */ 441 */
442 max_regions = 64;
443 region_size = xp_region_size; 442 region_size = xp_region_size;
444 443
445 switch (region_size) { 444 if (is_uv())
446 case 128: 445 max_regions = 256;
447 max_regions *= 2; 446 else {
448 case 64: 447 max_regions = 64;
449 max_regions *= 2; 448
450 case 32: 449 switch (region_size) {
451 max_regions *= 2; 450 case 128:
452 region_size = 16; 451 max_regions *= 2;
453 DBUG_ON(!is_shub2()); 452 case 64:
453 max_regions *= 2;
454 case 32:
455 max_regions *= 2;
456 region_size = 16;
457 DBUG_ON(!is_shub2());
458 }
454 } 459 }
455 460
456 for (region = 0; region < max_regions; region++) { 461 for (region = 0; region < max_regions; region++) {
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 8f86d702e46e..31ae07a36576 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1559,7 +1559,7 @@ void mmc_stop_host(struct mmc_host *host)
1559 1559
1560 if (host->caps & MMC_CAP_DISABLE) 1560 if (host->caps & MMC_CAP_DISABLE)
1561 cancel_delayed_work(&host->disable); 1561 cancel_delayed_work(&host->disable);
1562 cancel_delayed_work(&host->detect); 1562 cancel_delayed_work_sync(&host->detect);
1563 mmc_flush_scheduled_work(); 1563 mmc_flush_scheduled_work();
1564 1564
1565 /* clear pm flags now and let card drivers set them as needed */ 1565 /* clear pm flags now and let card drivers set them as needed */
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 995261f7fd70..77f93c3b8808 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -375,7 +375,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
375 struct mmc_card *oldcard) 375 struct mmc_card *oldcard)
376{ 376{
377 struct mmc_card *card; 377 struct mmc_card *card;
378 int err, ddr = MMC_SDR_MODE; 378 int err, ddr = 0;
379 u32 cid[4]; 379 u32 cid[4];
380 unsigned int max_dtr; 380 unsigned int max_dtr;
381 381
@@ -562,7 +562,11 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
562 1 << bus_width, ddr); 562 1 << bus_width, ddr);
563 err = 0; 563 err = 0;
564 } else { 564 } else {
565 mmc_card_set_ddr_mode(card); 565 if (ddr)
566 mmc_card_set_ddr_mode(card);
567 else
568 ddr = MMC_SDR_MODE;
569
566 mmc_set_bus_width_ddr(card->host, bus_width, ddr); 570 mmc_set_bus_width_ddr(card->host, bus_width, ddr);
567 } 571 }
568 } 572 }
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index c3ad1058cd31..efef5f94ac42 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -547,9 +547,11 @@ static void mmc_sdio_detect(struct mmc_host *host)
547 BUG_ON(!host->card); 547 BUG_ON(!host->card);
548 548
549 /* Make sure card is powered before detecting it */ 549 /* Make sure card is powered before detecting it */
550 err = pm_runtime_get_sync(&host->card->dev); 550 if (host->caps & MMC_CAP_POWER_OFF_CARD) {
551 if (err < 0) 551 err = pm_runtime_get_sync(&host->card->dev);
552 goto out; 552 if (err < 0)
553 goto out;
554 }
553 555
554 mmc_claim_host(host); 556 mmc_claim_host(host);
555 557
@@ -560,6 +562,20 @@ static void mmc_sdio_detect(struct mmc_host *host)
560 562
561 mmc_release_host(host); 563 mmc_release_host(host);
562 564
565 /*
566 * Tell PM core it's OK to power off the card now.
567 *
568 * The _sync variant is used in order to ensure that the card
569 * is left powered off in case an error occurred, and the card
570 * is going to be removed.
571 *
572 * Since there is no specific reason to believe a new user
573 * is about to show up at this point, the _sync variant is
574 * desirable anyway.
575 */
576 if (host->caps & MMC_CAP_POWER_OFF_CARD)
577 pm_runtime_put_sync(&host->card->dev);
578
563out: 579out:
564 if (err) { 580 if (err) {
565 mmc_sdio_remove(host); 581 mmc_sdio_remove(host);
@@ -568,9 +584,6 @@ out:
568 mmc_detach_bus(host); 584 mmc_detach_bus(host);
569 mmc_release_host(host); 585 mmc_release_host(host);
570 } 586 }
571
572 /* Tell PM core that we're done */
573 pm_runtime_put(&host->card->dev);
574} 587}
575 588
576/* 589/*
@@ -718,16 +731,21 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
718 card = host->card; 731 card = host->card;
719 732
720 /* 733 /*
721 * Let runtime PM core know our card is active 734 * Enable runtime PM only if supported by host+card+board
722 */ 735 */
723 err = pm_runtime_set_active(&card->dev); 736 if (host->caps & MMC_CAP_POWER_OFF_CARD) {
724 if (err) 737 /*
725 goto remove; 738 * Let runtime PM core know our card is active
739 */
740 err = pm_runtime_set_active(&card->dev);
741 if (err)
742 goto remove;
726 743
727 /* 744 /*
728 * Enable runtime PM for this card 745 * Enable runtime PM for this card
729 */ 746 */
730 pm_runtime_enable(&card->dev); 747 pm_runtime_enable(&card->dev);
748 }
731 749
732 /* 750 /*
733 * The number of functions on the card is encoded inside 751 * The number of functions on the card is encoded inside
@@ -745,9 +763,10 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
745 goto remove; 763 goto remove;
746 764
747 /* 765 /*
748 * Enable Runtime PM for this func 766 * Enable Runtime PM for this func (if supported)
749 */ 767 */
750 pm_runtime_enable(&card->sdio_func[i]->dev); 768 if (host->caps & MMC_CAP_POWER_OFF_CARD)
769 pm_runtime_enable(&card->sdio_func[i]->dev);
751 } 770 }
752 771
753 mmc_release_host(host); 772 mmc_release_host(host);
diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c
index 2716c7ab6bbf..203da443e339 100644
--- a/drivers/mmc/core/sdio_bus.c
+++ b/drivers/mmc/core/sdio_bus.c
@@ -17,6 +17,7 @@
17#include <linux/pm_runtime.h> 17#include <linux/pm_runtime.h>
18 18
19#include <linux/mmc/card.h> 19#include <linux/mmc/card.h>
20#include <linux/mmc/host.h>
20#include <linux/mmc/sdio_func.h> 21#include <linux/mmc/sdio_func.h>
21 22
22#include "sdio_cis.h" 23#include "sdio_cis.h"
@@ -132,9 +133,11 @@ static int sdio_bus_probe(struct device *dev)
132 * it should call pm_runtime_put_noidle() in its probe routine and 133 * it should call pm_runtime_put_noidle() in its probe routine and
133 * pm_runtime_get_noresume() in its remove routine. 134 * pm_runtime_get_noresume() in its remove routine.
134 */ 135 */
135 ret = pm_runtime_get_sync(dev); 136 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
136 if (ret < 0) 137 ret = pm_runtime_get_sync(dev);
137 goto out; 138 if (ret < 0)
139 goto out;
140 }
138 141
139 /* Set the default block size so the driver is sure it's something 142 /* Set the default block size so the driver is sure it's something
140 * sensible. */ 143 * sensible. */
@@ -151,7 +154,8 @@ static int sdio_bus_probe(struct device *dev)
151 return 0; 154 return 0;
152 155
153disable_runtimepm: 156disable_runtimepm:
154 pm_runtime_put_noidle(dev); 157 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
158 pm_runtime_put_noidle(dev);
155out: 159out:
156 return ret; 160 return ret;
157} 161}
@@ -160,12 +164,14 @@ static int sdio_bus_remove(struct device *dev)
160{ 164{
161 struct sdio_driver *drv = to_sdio_driver(dev->driver); 165 struct sdio_driver *drv = to_sdio_driver(dev->driver);
162 struct sdio_func *func = dev_to_sdio_func(dev); 166 struct sdio_func *func = dev_to_sdio_func(dev);
163 int ret; 167 int ret = 0;
164 168
165 /* Make sure card is powered before invoking ->remove() */ 169 /* Make sure card is powered before invoking ->remove() */
166 ret = pm_runtime_get_sync(dev); 170 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) {
167 if (ret < 0) 171 ret = pm_runtime_get_sync(dev);
168 goto out; 172 if (ret < 0)
173 goto out;
174 }
169 175
170 drv->remove(func); 176 drv->remove(func);
171 177
@@ -178,10 +184,12 @@ static int sdio_bus_remove(struct device *dev)
178 } 184 }
179 185
180 /* First, undo the increment made directly above */ 186 /* First, undo the increment made directly above */
181 pm_runtime_put_noidle(dev); 187 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
188 pm_runtime_put_noidle(dev);
182 189
183 /* Then undo the runtime PM settings in sdio_bus_probe() */ 190 /* Then undo the runtime PM settings in sdio_bus_probe() */
184 pm_runtime_put_noidle(dev); 191 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
192 pm_runtime_put_noidle(dev);
185 193
186out: 194out:
187 return ret; 195 return ret;
@@ -191,6 +199,8 @@ out:
191 199
192static int sdio_bus_pm_prepare(struct device *dev) 200static int sdio_bus_pm_prepare(struct device *dev)
193{ 201{
202 struct sdio_func *func = dev_to_sdio_func(dev);
203
194 /* 204 /*
195 * Resume an SDIO device which was suspended at run time at this 205 * Resume an SDIO device which was suspended at run time at this
196 * point, in order to allow standard SDIO suspend/resume paths 206 * point, in order to allow standard SDIO suspend/resume paths
@@ -212,7 +222,8 @@ static int sdio_bus_pm_prepare(struct device *dev)
212 * since there is little point in failing system suspend if a 222 * since there is little point in failing system suspend if a
213 * device can't be resumed. 223 * device can't be resumed.
214 */ 224 */
215 pm_runtime_resume(dev); 225 if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD)
226 pm_runtime_resume(dev);
216 227
217 return 0; 228 return 0;
218} 229}
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 82a1079bbdc7..5d46021cbb57 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1002,7 +1002,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,
1002 * Monitor a 0->1 transition first 1002 * Monitor a 0->1 transition first
1003 */ 1003 */
1004 if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { 1004 if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) {
1005 while ((!(OMAP_HSMMC_READ(host, SYSCTL) & bit)) 1005 while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))
1006 && (i++ < limit)) 1006 && (i++ < limit))
1007 cpu_relax(); 1007 cpu_relax();
1008 } 1008 }
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index 2e9cca19c90b..9b82910b9dbb 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -17,6 +17,7 @@
17#include <linux/clk.h> 17#include <linux/clk.h>
18#include <linux/mmc/host.h> 18#include <linux/mmc/host.h>
19#include <linux/mmc/sdhci-pltfm.h> 19#include <linux/mmc/sdhci-pltfm.h>
20#include <mach/hardware.h>
20#include "sdhci.h" 21#include "sdhci.h"
21#include "sdhci-pltfm.h" 22#include "sdhci-pltfm.h"
22#include "sdhci-esdhc.h" 23#include "sdhci-esdhc.h"
@@ -112,6 +113,13 @@ static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pd
112 clk_enable(clk); 113 clk_enable(clk);
113 pltfm_host->clk = clk; 114 pltfm_host->clk = clk;
114 115
116 if (cpu_is_mx35() || cpu_is_mx51())
117 host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
118
119 /* Fix errata ENGcm07207 which is present on i.MX25 and i.MX35 */
120 if (cpu_is_mx25() || cpu_is_mx35())
121 host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK;
122
115 return 0; 123 return 0;
116} 124}
117 125
@@ -133,10 +141,8 @@ static struct sdhci_ops sdhci_esdhc_ops = {
133}; 141};
134 142
135struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { 143struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
136 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_MULTIBLOCK 144 .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA,
137 | SDHCI_QUIRK_BROKEN_ADMA,
138 /* ADMA has issues. Might be fixable */ 145 /* ADMA has issues. Might be fixable */
139 /* NO_MULTIBLOCK might be MX35 only (Errata: ENGcm07207) */
140 .ops = &sdhci_esdhc_ops, 146 .ops = &sdhci_esdhc_ops,
141 .init = esdhc_pltfm_init, 147 .init = esdhc_pltfm_init,
142 .exit = esdhc_pltfm_exit, 148 .exit = esdhc_pltfm_exit,
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 55746bac2f44..3d9c2460d437 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -149,11 +149,11 @@ static const struct sdhci_pci_fixes sdhci_cafe = {
149 * ADMA operation is disabled for Moorestown platform due to 149 * ADMA operation is disabled for Moorestown platform due to
150 * hardware bugs. 150 * hardware bugs.
151 */ 151 */
152static int mrst_hc1_probe(struct sdhci_pci_chip *chip) 152static int mrst_hc_probe(struct sdhci_pci_chip *chip)
153{ 153{
154 /* 154 /*
155 * slots number is fixed here for MRST as SDIO3 is never used and has 155 * slots number is fixed here for MRST as SDIO3/5 are never used and
156 * hardware bugs. 156 * have hardware bugs.
157 */ 157 */
158 chip->num_slots = 1; 158 chip->num_slots = 1;
159 return 0; 159 return 0;
@@ -163,9 +163,9 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {
163 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 163 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
164}; 164};
165 165
166static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1 = { 166static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {
167 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, 167 .quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT,
168 .probe = mrst_hc1_probe, 168 .probe = mrst_hc_probe,
169}; 169};
170 170
171static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { 171static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {
@@ -538,7 +538,15 @@ static const struct pci_device_id pci_ids[] __devinitdata = {
538 .device = PCI_DEVICE_ID_INTEL_MRST_SD1, 538 .device = PCI_DEVICE_ID_INTEL_MRST_SD1,
539 .subvendor = PCI_ANY_ID, 539 .subvendor = PCI_ANY_ID,
540 .subdevice = PCI_ANY_ID, 540 .subdevice = PCI_ANY_ID,
541 .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1, 541 .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
542 },
543
544 {
545 .vendor = PCI_VENDOR_ID_INTEL,
546 .device = PCI_DEVICE_ID_INTEL_MRST_SD2,
547 .subvendor = PCI_ANY_ID,
548 .subdevice = PCI_ANY_ID,
549 .driver_data = (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,
542 }, 550 },
543 551
544 { 552 {
@@ -637,6 +645,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
637{ 645{
638 struct sdhci_pci_chip *chip; 646 struct sdhci_pci_chip *chip;
639 struct sdhci_pci_slot *slot; 647 struct sdhci_pci_slot *slot;
648 mmc_pm_flag_t slot_pm_flags;
640 mmc_pm_flag_t pm_flags = 0; 649 mmc_pm_flag_t pm_flags = 0;
641 int i, ret; 650 int i, ret;
642 651
@@ -657,7 +666,11 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
657 return ret; 666 return ret;
658 } 667 }
659 668
660 pm_flags |= slot->host->mmc->pm_flags; 669 slot_pm_flags = slot->host->mmc->pm_flags;
670 if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ)
671 sdhci_enable_irq_wakeups(slot->host);
672
673 pm_flags |= slot_pm_flags;
661 } 674 }
662 675
663 if (chip->fixes && chip->fixes->suspend) { 676 if (chip->fixes && chip->fixes->suspend) {
@@ -671,8 +684,10 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
671 684
672 pci_save_state(pdev); 685 pci_save_state(pdev);
673 if (pm_flags & MMC_PM_KEEP_POWER) { 686 if (pm_flags & MMC_PM_KEEP_POWER) {
674 if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) 687 if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) {
688 pci_pme_active(pdev, true);
675 pci_enable_wake(pdev, PCI_D3hot, 1); 689 pci_enable_wake(pdev, PCI_D3hot, 1);
690 }
676 pci_set_power_state(pdev, PCI_D3hot); 691 pci_set_power_state(pdev, PCI_D3hot);
677 } else { 692 } else {
678 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); 693 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c
index fc406ac5d193..5a61208cbc66 100644
--- a/drivers/mmc/host/sdhci-pxa.c
+++ b/drivers/mmc/host/sdhci-pxa.c
@@ -141,6 +141,10 @@ static int __devinit sdhci_pxa_probe(struct platform_device *pdev)
141 if (pdata->quirks) 141 if (pdata->quirks)
142 host->quirks |= pdata->quirks; 142 host->quirks |= pdata->quirks;
143 143
144 /* If slot design supports 8 bit data, indicate this to MMC. */
145 if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT)
146 host->mmc->caps |= MMC_CAP_8_BIT_DATA;
147
144 ret = sdhci_add_host(host); 148 ret = sdhci_add_host(host);
145 if (ret) { 149 if (ret) {
146 dev_err(&pdev->dev, "failed to add host\n"); 150 dev_err(&pdev->dev, "failed to add host\n");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 782c0ee3c925..a25db426c910 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1185,17 +1185,31 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1185 if (host->ops->platform_send_init_74_clocks) 1185 if (host->ops->platform_send_init_74_clocks)
1186 host->ops->platform_send_init_74_clocks(host, ios->power_mode); 1186 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
1187 1187
1188 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); 1188 /*
1189 1189 * If your platform has 8-bit width support but is not a v3 controller,
1190 if (ios->bus_width == MMC_BUS_WIDTH_8) 1190 * or if it requires special setup code, you should implement that in
1191 ctrl |= SDHCI_CTRL_8BITBUS; 1191 * platform_8bit_width().
1192 else 1192 */
1193 ctrl &= ~SDHCI_CTRL_8BITBUS; 1193 if (host->ops->platform_8bit_width)
1194 host->ops->platform_8bit_width(host, ios->bus_width);
1195 else {
1196 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1197 if (ios->bus_width == MMC_BUS_WIDTH_8) {
1198 ctrl &= ~SDHCI_CTRL_4BITBUS;
1199 if (host->version >= SDHCI_SPEC_300)
1200 ctrl |= SDHCI_CTRL_8BITBUS;
1201 } else {
1202 if (host->version >= SDHCI_SPEC_300)
1203 ctrl &= ~SDHCI_CTRL_8BITBUS;
1204 if (ios->bus_width == MMC_BUS_WIDTH_4)
1205 ctrl |= SDHCI_CTRL_4BITBUS;
1206 else
1207 ctrl &= ~SDHCI_CTRL_4BITBUS;
1208 }
1209 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
1210 }
1194 1211
1195 if (ios->bus_width == MMC_BUS_WIDTH_4) 1212 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
1196 ctrl |= SDHCI_CTRL_4BITBUS;
1197 else
1198 ctrl &= ~SDHCI_CTRL_4BITBUS;
1199 1213
1200 if ((ios->timing == MMC_TIMING_SD_HS || 1214 if ((ios->timing == MMC_TIMING_SD_HS ||
1201 ios->timing == MMC_TIMING_MMC_HS) 1215 ios->timing == MMC_TIMING_MMC_HS)
@@ -1681,6 +1695,16 @@ int sdhci_resume_host(struct sdhci_host *host)
1681 1695
1682EXPORT_SYMBOL_GPL(sdhci_resume_host); 1696EXPORT_SYMBOL_GPL(sdhci_resume_host);
1683 1697
1698void sdhci_enable_irq_wakeups(struct sdhci_host *host)
1699{
1700 u8 val;
1701 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
1702 val |= SDHCI_WAKE_ON_INT;
1703 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
1704}
1705
1706EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups);
1707
1684#endif /* CONFIG_PM */ 1708#endif /* CONFIG_PM */
1685 1709
1686/*****************************************************************************\ 1710/*****************************************************************************\
@@ -1845,11 +1869,19 @@ int sdhci_add_host(struct sdhci_host *host)
1845 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 1869 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
1846 else 1870 else
1847 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 1871 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
1872
1848 mmc->f_max = host->max_clk; 1873 mmc->f_max = host->max_clk;
1849 mmc->caps |= MMC_CAP_SDIO_IRQ; 1874 mmc->caps |= MMC_CAP_SDIO_IRQ;
1850 1875
1876 /*
1877 * A controller may support 8-bit width, but the board itself
1878 * might not have the pins brought out. Boards that support
1879 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
1880 * their platform code before calling sdhci_add_host(), and we
1881 * won't assume 8-bit width for hosts without that CAP.
1882 */
1851 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) 1883 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
1852 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 1884 mmc->caps |= MMC_CAP_4_BIT_DATA;
1853 1885
1854 if (caps & SDHCI_CAN_DO_HISPD) 1886 if (caps & SDHCI_CAN_DO_HISPD)
1855 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; 1887 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index b7b8a3b28b01..e42d7f00c060 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -76,7 +76,7 @@
76#define SDHCI_CTRL_ADMA1 0x08 76#define SDHCI_CTRL_ADMA1 0x08
77#define SDHCI_CTRL_ADMA32 0x10 77#define SDHCI_CTRL_ADMA32 0x10
78#define SDHCI_CTRL_ADMA64 0x18 78#define SDHCI_CTRL_ADMA64 0x18
79#define SDHCI_CTRL_8BITBUS 0x20 79#define SDHCI_CTRL_8BITBUS 0x20
80 80
81#define SDHCI_POWER_CONTROL 0x29 81#define SDHCI_POWER_CONTROL 0x29
82#define SDHCI_POWER_ON 0x01 82#define SDHCI_POWER_ON 0x01
@@ -87,6 +87,9 @@
87#define SDHCI_BLOCK_GAP_CONTROL 0x2A 87#define SDHCI_BLOCK_GAP_CONTROL 0x2A
88 88
89#define SDHCI_WAKE_UP_CONTROL 0x2B 89#define SDHCI_WAKE_UP_CONTROL 0x2B
90#define SDHCI_WAKE_ON_INT 0x01
91#define SDHCI_WAKE_ON_INSERT 0x02
92#define SDHCI_WAKE_ON_REMOVE 0x04
90 93
91#define SDHCI_CLOCK_CONTROL 0x2C 94#define SDHCI_CLOCK_CONTROL 0x2C
92#define SDHCI_DIVIDER_SHIFT 8 95#define SDHCI_DIVIDER_SHIFT 8
@@ -152,6 +155,7 @@
152#define SDHCI_CLOCK_BASE_SHIFT 8 155#define SDHCI_CLOCK_BASE_SHIFT 8
153#define SDHCI_MAX_BLOCK_MASK 0x00030000 156#define SDHCI_MAX_BLOCK_MASK 0x00030000
154#define SDHCI_MAX_BLOCK_SHIFT 16 157#define SDHCI_MAX_BLOCK_SHIFT 16
158#define SDHCI_CAN_DO_8BIT 0x00040000
155#define SDHCI_CAN_DO_ADMA2 0x00080000 159#define SDHCI_CAN_DO_ADMA2 0x00080000
156#define SDHCI_CAN_DO_ADMA1 0x00100000 160#define SDHCI_CAN_DO_ADMA1 0x00100000
157#define SDHCI_CAN_DO_HISPD 0x00200000 161#define SDHCI_CAN_DO_HISPD 0x00200000
@@ -212,6 +216,8 @@ struct sdhci_ops {
212 unsigned int (*get_max_clock)(struct sdhci_host *host); 216 unsigned int (*get_max_clock)(struct sdhci_host *host);
213 unsigned int (*get_min_clock)(struct sdhci_host *host); 217 unsigned int (*get_min_clock)(struct sdhci_host *host);
214 unsigned int (*get_timeout_clock)(struct sdhci_host *host); 218 unsigned int (*get_timeout_clock)(struct sdhci_host *host);
219 int (*platform_8bit_width)(struct sdhci_host *host,
220 int width);
215 void (*platform_send_init_74_clocks)(struct sdhci_host *host, 221 void (*platform_send_init_74_clocks)(struct sdhci_host *host,
216 u8 power_mode); 222 u8 power_mode);
217 unsigned int (*get_ro)(struct sdhci_host *host); 223 unsigned int (*get_ro)(struct sdhci_host *host);
@@ -317,6 +323,7 @@ extern void sdhci_remove_host(struct sdhci_host *host, int dead);
317#ifdef CONFIG_PM 323#ifdef CONFIG_PM
318extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); 324extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state);
319extern int sdhci_resume_host(struct sdhci_host *host); 325extern int sdhci_resume_host(struct sdhci_host *host);
326extern void sdhci_enable_irq_wakeups(struct sdhci_host *host);
320#endif 327#endif
321 328
322#endif /* __SDHCI_HW_H */ 329#endif /* __SDHCI_HW_H */
diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c
index b4ead4a13c98..f8f65df9b017 100644
--- a/drivers/mmc/host/ushc.c
+++ b/drivers/mmc/host/ushc.c
@@ -425,7 +425,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
425 struct usb_device *usb_dev = interface_to_usbdev(intf); 425 struct usb_device *usb_dev = interface_to_usbdev(intf);
426 struct mmc_host *mmc; 426 struct mmc_host *mmc;
427 struct ushc_data *ushc; 427 struct ushc_data *ushc;
428 int ret = -ENOMEM; 428 int ret;
429 429
430 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev); 430 mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);
431 if (mmc == NULL) 431 if (mmc == NULL)
@@ -462,11 +462,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
462 mmc->max_blk_count = 511; 462 mmc->max_blk_count = 511;
463 463
464 ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); 464 ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL);
465 if (ushc->int_urb == NULL) 465 if (ushc->int_urb == NULL) {
466 ret = -ENOMEM;
466 goto err; 467 goto err;
468 }
467 ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); 469 ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL);
468 if (ushc->int_data == NULL) 470 if (ushc->int_data == NULL) {
471 ret = -ENOMEM;
469 goto err; 472 goto err;
473 }
470 usb_fill_int_urb(ushc->int_urb, ushc->usb_dev, 474 usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,
471 usb_rcvintpipe(usb_dev, 475 usb_rcvintpipe(usb_dev,
472 intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), 476 intf->cur_altsetting->endpoint[0].desc.bEndpointAddress),
@@ -475,11 +479,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
475 intf->cur_altsetting->endpoint[0].desc.bInterval); 479 intf->cur_altsetting->endpoint[0].desc.bInterval);
476 480
477 ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); 481 ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL);
478 if (ushc->cbw_urb == NULL) 482 if (ushc->cbw_urb == NULL) {
483 ret = -ENOMEM;
479 goto err; 484 goto err;
485 }
480 ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); 486 ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
481 if (ushc->cbw == NULL) 487 if (ushc->cbw == NULL) {
488 ret = -ENOMEM;
482 goto err; 489 goto err;
490 }
483 ushc->cbw->signature = USHC_CBW_SIGNATURE; 491 ushc->cbw->signature = USHC_CBW_SIGNATURE;
484 492
485 usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), 493 usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2),
@@ -487,15 +495,21 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id
487 cbw_callback, ushc); 495 cbw_callback, ushc);
488 496
489 ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); 497 ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL);
490 if (ushc->data_urb == NULL) 498 if (ushc->data_urb == NULL) {
499 ret = -ENOMEM;
491 goto err; 500 goto err;
501 }
492 502
493 ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); 503 ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL);
494 if (ushc->csw_urb == NULL) 504 if (ushc->csw_urb == NULL) {
505 ret = -ENOMEM;
495 goto err; 506 goto err;
507 }
496 ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); 508 ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL);
497 if (ushc->csw == NULL) 509 if (ushc->csw == NULL) {
510 ret = -ENOMEM;
498 goto err; 511 goto err;
512 }
499 usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6), 513 usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),
500 ushc->csw, sizeof(struct ushc_csw), 514 ushc->csw, sizeof(struct ushc_csw),
501 csw_callback, ushc); 515 csw_callback, ushc);
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index c2960ac9f39c..811775aa8ee8 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -482,10 +482,17 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
482 uint32_t data = 0; 482 uint32_t data = 0;
483 struct ubi_vid_hdr vid_hdr; 483 struct ubi_vid_hdr vid_hdr;
484 484
485 addr = (loff_t)pnum * ubi->peb_size + ubi->vid_hdr_aloffset; 485 /*
486 * It is important to first invalidate the EC header, and then the VID
487 * header. Otherwise a power cut may lead to valid EC header and
488 * invalid VID header, in which case UBI will treat this PEB as
489 * corrupted and will try to preserve it, and print scary warnings (see
490 * the header comment in scan.c for more information).
491 */
492 addr = (loff_t)pnum * ubi->peb_size;
486 err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data); 493 err = ubi->mtd->write(ubi->mtd, addr, 4, &written, (void *)&data);
487 if (!err) { 494 if (!err) {
488 addr -= ubi->vid_hdr_aloffset; 495 addr += ubi->vid_hdr_aloffset;
489 err = ubi->mtd->write(ubi->mtd, addr, 4, &written, 496 err = ubi->mtd->write(ubi->mtd, addr, 4, &written,
490 (void *)&data); 497 (void *)&data);
491 if (!err) 498 if (!err)
@@ -494,18 +501,24 @@ static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
494 501
495 /* 502 /*
496 * We failed to write to the media. This was observed with Spansion 503 * We failed to write to the media. This was observed with Spansion
497 * S29GL512N NOR flash. Most probably the eraseblock erasure was 504 * S29GL512N NOR flash. Most probably the previously eraseblock erasure
498 * interrupted at a very inappropriate moment, so it became unwritable. 505 * was interrupted at a very inappropriate moment, so it became
499 * In this case we probably anyway have garbage in this PEB. 506 * unwritable. In this case we probably anyway have garbage in this
507 * PEB.
500 */ 508 */
501 err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0); 509 err1 = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
502 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) 510 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR) {
503 /* 511 struct ubi_ec_hdr ec_hdr;
504 * The VID header is corrupted, so we can safely erase this 512
505 * PEB and not afraid that it will be treated as a valid PEB in 513 err1 = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
506 * case of an unclean reboot. 514 if (err1 == UBI_IO_BAD_HDR_EBADMSG || err1 == UBI_IO_BAD_HDR)
507 */ 515 /*
508 return 0; 516 * Both VID and EC headers are corrupted, so we can
517 * safely erase this PEB and not afraid that it will be
518 * treated as a valid PEB in case of an unclean reboot.
519 */
520 return 0;
521 }
509 522
510 /* 523 /*
511 * The PEB contains a valid VID header, but we cannot invalidate it. 524 * The PEB contains a valid VID header, but we cannot invalidate it.
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 3c631863bf40..79ca304fc4db 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -787,16 +787,15 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
787 * erased, so it became unstable and corrupted, and should be 787 * erased, so it became unstable and corrupted, and should be
788 * erased. 788 * erased.
789 */ 789 */
790 return 0; 790 err = 0;
791 goto out_unlock;
791 } 792 }
792 793
793 if (err) 794 if (err)
794 return err; 795 goto out_unlock;
795 796
796 if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size)) { 797 if (ubi_check_pattern(ubi->peb_buf1, 0xFF, ubi->leb_size))
797 mutex_unlock(&ubi->buf_mutex); 798 goto out_unlock;
798 return 0;
799 }
800 799
801 ubi_err("PEB %d contains corrupted VID header, and the data does not " 800 ubi_err("PEB %d contains corrupted VID header, and the data does not "
802 "contain all 0xFF, this may be a non-UBI PEB or a severe VID " 801 "contain all 0xFF, this may be a non-UBI PEB or a severe VID "
@@ -806,8 +805,11 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
806 pnum, ubi->leb_start, ubi->leb_size); 805 pnum, ubi->leb_start, ubi->leb_size);
807 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 806 ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
808 ubi->peb_buf1, ubi->leb_size, 1); 807 ubi->peb_buf1, ubi->leb_size, 1);
808 err = 1;
809
810out_unlock:
809 mutex_unlock(&ubi->buf_mutex); 811 mutex_unlock(&ubi->buf_mutex);
810 return 1; 812 return err;
811} 813}
812 814
813/** 815/**
@@ -951,6 +953,10 @@ static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si,
951 * impossible to distinguish it from a PEB which just 953 * impossible to distinguish it from a PEB which just
952 * contains garbage because of a power cut during erase 954 * contains garbage because of a power cut during erase
953 * operation. So we just schedule this PEB for erasure. 955 * operation. So we just schedule this PEB for erasure.
956 *
957 * Besides, in case of NOR flash, we deliberatly
958 * corrupt both headers because NOR flash erasure is
959 * slow and can start from the end.
954 */ 960 */
955 err = 0; 961 err = 0;
956 else 962 else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cdaac85..4f1755bddf6b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2543,10 +2543,10 @@ config PCH_GBE
2543 depends on PCI 2543 depends on PCI
2544 select MII 2544 select MII
2545 ---help--- 2545 ---help---
2546 This is a gigabit ethernet driver for Topcliff PCH. 2546 This is a gigabit ethernet driver for EG20T PCH.
2547 Topcliff PCH is the platform controller hub that is used in Intel's 2547 EG20T PCH is the platform controller hub that is used in Intel's
2548 general embedded platform. 2548 general embedded platform.
2549 Topcliff PCH has Gigabit Ethernet interface. 2549 EG20T PCH has Gigabit Ethernet interface.
2550 Using this interface, it is able to access system devices connected 2550 Using this interface, it is able to access system devices connected
2551 to Gigabit Ethernet. 2551 to Gigabit Ethernet.
2552 This driver enables Gigabit Ethernet function. 2552 This driver enables Gigabit Ethernet function.
@@ -2945,6 +2945,18 @@ source "drivers/s390/net/Kconfig"
2945 2945
2946source "drivers/net/caif/Kconfig" 2946source "drivers/net/caif/Kconfig"
2947 2947
2948config TILE_NET
2949 tristate "Tilera GBE/XGBE network driver support"
2950 depends on TILE
2951 default y
2952 select CRC32
2953 help
2954 This is a standard Linux network device driver for the
2955 on-chip Tilera Gigabit Ethernet and XAUI interfaces.
2956
2957 To compile this driver as a module, choose M here: the module
2958 will be called tile_net.
2959
2948config XEN_NETDEV_FRONTEND 2960config XEN_NETDEV_FRONTEND
2949 tristate "Xen network device frontend driver" 2961 tristate "Xen network device frontend driver"
2950 depends on XEN 2962 depends on XEN
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index 652fc6b98039..b90738d13994 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -301,3 +301,4 @@ obj-$(CONFIG_CAIF) += caif/
301 301
302obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ 302obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
303obj-$(CONFIG_PCH_GBE) += pch_gbe/ 303obj-$(CONFIG_PCH_GBE) += pch_gbe/
304obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b2c3a5..1bf672009948 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
82 addr[0] = addr[1] = 0; 82 addr[0] = addr[1] = 0;
83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data); 83 AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
84 if (atl1c_check_eeprom_exist(hw)) { 84 if (atl1c_check_eeprom_exist(hw)) {
85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) { 85 if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
86 /* Enable OTP CLK */ 86 /* Enable OTP CLK */
87 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) { 87 if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
88 otp_ctrl_data |= OTP_CTRL_CLK_EN; 88 otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f89c142..53eff9ba6e95 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -155,10 +155,10 @@ static void au1000_enable_mac(struct net_device *dev, int force_reset)
155 spin_lock_irqsave(&aup->lock, flags); 155 spin_lock_irqsave(&aup->lock, flags);
156 156
157 if (force_reset || (!aup->mac_enabled)) { 157 if (force_reset || (!aup->mac_enabled)) {
158 writel(MAC_EN_CLOCK_ENABLE, &aup->enable); 158 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
159 au_sync_delay(2); 159 au_sync_delay(2);
160 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 160 writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
161 | MAC_EN_CLOCK_ENABLE), &aup->enable); 161 | MAC_EN_CLOCK_ENABLE), aup->enable);
162 au_sync_delay(2); 162 au_sync_delay(2);
163 163
164 aup->mac_enabled = 1; 164 aup->mac_enabled = 1;
@@ -503,9 +503,9 @@ static void au1000_reset_mac_unlocked(struct net_device *dev)
503 503
504 au1000_hard_stop(dev); 504 au1000_hard_stop(dev);
505 505
506 writel(MAC_EN_CLOCK_ENABLE, &aup->enable); 506 writel(MAC_EN_CLOCK_ENABLE, aup->enable);
507 au_sync_delay(2); 507 au_sync_delay(2);
508 writel(0, &aup->enable); 508 writel(0, aup->enable);
509 au_sync_delay(2); 509 au_sync_delay(2);
510 510
511 aup->tx_full = 0; 511 aup->tx_full = 0;
@@ -1119,7 +1119,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1119 /* set a random MAC now in case platform_data doesn't provide one */ 1119 /* set a random MAC now in case platform_data doesn't provide one */
1120 random_ether_addr(dev->dev_addr); 1120 random_ether_addr(dev->dev_addr);
1121 1121
1122 writel(0, &aup->enable); 1122 writel(0, aup->enable);
1123 aup->mac_enabled = 0; 1123 aup->mac_enabled = 0;
1124 1124
1125 pd = pdev->dev.platform_data; 1125 pd = pdev->dev.platform_data;
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index c3449bbc585a..d887a76cd39d 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -816,40 +816,48 @@ static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
816} 816}
817 817
818/* 818/*
819 * Collect up to maxaddrs worth of a netdevice's unicast addresses into an 819 * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
820 * array of addrss pointers and return the number collected. 820 * at a specified offset within the list, into an array of addrss pointers and
821 * return the number collected.
821 */ 822 */
822static inline int collect_netdev_uc_list_addrs(const struct net_device *dev, 823static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
823 const u8 **addr, 824 const u8 **addr,
824 unsigned int maxaddrs) 825 unsigned int offset,
826 unsigned int maxaddrs)
825{ 827{
828 unsigned int index = 0;
826 unsigned int naddr = 0; 829 unsigned int naddr = 0;
827 const struct netdev_hw_addr *ha; 830 const struct netdev_hw_addr *ha;
828 831
829 for_each_dev_addr(dev, ha) { 832 for_each_dev_addr(dev, ha)
830 addr[naddr++] = ha->addr; 833 if (index++ >= offset) {
831 if (naddr >= maxaddrs) 834 addr[naddr++] = ha->addr;
832 break; 835 if (naddr >= maxaddrs)
833 } 836 break;
837 }
834 return naddr; 838 return naddr;
835} 839}
836 840
837/* 841/*
838 * Collect up to maxaddrs worth of a netdevice's multicast addresses into an 842 * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
839 * array of addrss pointers and return the number collected. 843 * at a specified offset within the list, into an array of addrss pointers and
844 * return the number collected.
840 */ 845 */
841static inline int collect_netdev_mc_list_addrs(const struct net_device *dev, 846static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
842 const u8 **addr, 847 const u8 **addr,
843 unsigned int maxaddrs) 848 unsigned int offset,
849 unsigned int maxaddrs)
844{ 850{
851 unsigned int index = 0;
845 unsigned int naddr = 0; 852 unsigned int naddr = 0;
846 const struct netdev_hw_addr *ha; 853 const struct netdev_hw_addr *ha;
847 854
848 netdev_for_each_mc_addr(ha, dev) { 855 netdev_for_each_mc_addr(ha, dev)
849 addr[naddr++] = ha->addr; 856 if (index++ >= offset) {
850 if (naddr >= maxaddrs) 857 addr[naddr++] = ha->addr;
851 break; 858 if (naddr >= maxaddrs)
852 } 859 break;
860 }
853 return naddr; 861 return naddr;
854} 862}
855 863
@@ -862,16 +870,20 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
862 u64 mhash = 0; 870 u64 mhash = 0;
863 u64 uhash = 0; 871 u64 uhash = 0;
864 bool free = true; 872 bool free = true;
865 u16 filt_idx[7]; 873 unsigned int offset, naddr;
866 const u8 *addr[7]; 874 const u8 *addr[7];
867 int ret, naddr = 0; 875 int ret;
868 const struct port_info *pi = netdev_priv(dev); 876 const struct port_info *pi = netdev_priv(dev);
869 877
870 /* first do the secondary unicast addresses */ 878 /* first do the secondary unicast addresses */
871 naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 879 for (offset = 0; ; offset += naddr) {
872 if (naddr > 0) { 880 naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
881 ARRAY_SIZE(addr));
882 if (naddr == 0)
883 break;
884
873 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 885 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
874 naddr, addr, filt_idx, &uhash, sleep); 886 naddr, addr, NULL, &uhash, sleep);
875 if (ret < 0) 887 if (ret < 0)
876 return ret; 888 return ret;
877 889
@@ -879,12 +891,17 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
879 } 891 }
880 892
881 /* next set up the multicast addresses */ 893 /* next set up the multicast addresses */
882 naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr)); 894 for (offset = 0; ; offset += naddr) {
883 if (naddr > 0) { 895 naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
896 ARRAY_SIZE(addr));
897 if (naddr == 0)
898 break;
899
884 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free, 900 ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
885 naddr, addr, filt_idx, &mhash, sleep); 901 naddr, addr, NULL, &mhash, sleep);
886 if (ret < 0) 902 if (ret < 0)
887 return ret; 903 return ret;
904 free = false;
888 } 905 }
889 906
890 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0, 907 return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index e306c20dfaee..19520afe1a12 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -1014,48 +1014,72 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
1014 unsigned int naddr, const u8 **addr, u16 *idx, 1014 unsigned int naddr, const u8 **addr, u16 *idx,
1015 u64 *hash, bool sleep_ok) 1015 u64 *hash, bool sleep_ok)
1016{ 1016{
1017 int i, ret; 1017 int offset, ret = 0;
1018 unsigned nfilters = 0;
1019 unsigned int rem = naddr;
1018 struct fw_vi_mac_cmd cmd, rpl; 1020 struct fw_vi_mac_cmd cmd, rpl;
1019 struct fw_vi_mac_exact *p;
1020 size_t len16;
1021 1021
1022 if (naddr > ARRAY_SIZE(cmd.u.exact)) 1022 if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
1023 return -EINVAL; 1023 return -EINVAL;
1024 len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1025 u.exact[naddr]), 16);
1026 1024
1027 memset(&cmd, 0, sizeof(cmd)); 1025 for (offset = 0; offset < naddr; /**/) {
1028 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) | 1026 unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
1029 FW_CMD_REQUEST | 1027 ? rem
1030 FW_CMD_WRITE | 1028 : ARRAY_SIZE(cmd.u.exact));
1031 (free ? FW_CMD_EXEC : 0) | 1029 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
1032 FW_VI_MAC_CMD_VIID(viid)); 1030 u.exact[fw_naddr]), 16);
1033 cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) | 1031 struct fw_vi_mac_exact *p;
1034 FW_CMD_LEN16(len16)); 1032 int i;
1035 1033
1036 for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) { 1034 memset(&cmd, 0, sizeof(cmd));
1037 p->valid_to_idx = 1035 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
1038 cpu_to_be16(FW_VI_MAC_CMD_VALID | 1036 FW_CMD_REQUEST |
1039 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC)); 1037 FW_CMD_WRITE |
1040 memcpy(p->macaddr, addr[i], sizeof(p->macaddr)); 1038 (free ? FW_CMD_EXEC : 0) |
1041 } 1039 FW_VI_MAC_CMD_VIID(viid));
1040 cmd.freemacs_to_len16 =
1041 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
1042 FW_CMD_LEN16(len16));
1043
1044 for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
1045 p->valid_to_idx = cpu_to_be16(
1046 FW_VI_MAC_CMD_VALID |
1047 FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
1048 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
1049 }
1050
1051
1052 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
1053 sleep_ok);
1054 if (ret && ret != -ENOMEM)
1055 break;
1042 1056
1043 ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok); 1057 for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
1044 if (ret) 1058 u16 index = FW_VI_MAC_CMD_IDX_GET(
1045 return ret; 1059 be16_to_cpu(p->valid_to_idx));
1046 1060
1047 for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) { 1061 if (idx)
1048 u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx)); 1062 idx[offset+i] =
1049 1063 (index >= FW_CLS_TCAM_NUM_ENTRIES
1050 if (idx) 1064 ? 0xffff
1051 idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES 1065 : index);
1052 ? 0xffff 1066 if (index < FW_CLS_TCAM_NUM_ENTRIES)
1053 : index); 1067 nfilters++;
1054 if (index < FW_CLS_TCAM_NUM_ENTRIES) 1068 else if (hash)
1055 ret++; 1069 *hash |= (1ULL << hash_mac_addr(addr[offset+i]));
1056 else if (hash) 1070 }
1057 *hash |= (1 << hash_mac_addr(addr[i])); 1071
1072 free = false;
1073 offset += fw_naddr;
1074 rem -= fw_naddr;
1058 } 1075 }
1076
1077 /*
1078 * If there were no errors or we merely ran out of room in our MAC
1079 * address arena, return the number of filters actually written.
1080 */
1081 if (ret == 0 || ret == -ENOMEM)
1082 ret = nfilters;
1059 return ret; 1083 return ret;
1060} 1084}
1061 1085
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4686c3983fc3..4d62f7bfa036 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
31 31
32char e1000_driver_name[] = "e1000"; 32char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
34#define DRV_VERSION "7.3.21-k6-NAPI" 34#define DRV_VERSION "7.3.21-k8-NAPI"
35const char e1000_driver_version[] = DRV_VERSION; 35const char e1000_driver_version[] = DRV_VERSION;
36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation."; 36static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
37 37
@@ -485,9 +485,6 @@ void e1000_down(struct e1000_adapter *adapter)
485 struct net_device *netdev = adapter->netdev; 485 struct net_device *netdev = adapter->netdev;
486 u32 rctl, tctl; 486 u32 rctl, tctl;
487 487
488 /* signal that we're down so the interrupt handler does not
489 * reschedule our watchdog timer */
490 set_bit(__E1000_DOWN, &adapter->flags);
491 488
492 /* disable receives in the hardware */ 489 /* disable receives in the hardware */
493 rctl = er32(RCTL); 490 rctl = er32(RCTL);
@@ -508,6 +505,13 @@ void e1000_down(struct e1000_adapter *adapter)
508 505
509 e1000_irq_disable(adapter); 506 e1000_irq_disable(adapter);
510 507
508 /*
509 * Setting DOWN must be after irq_disable to prevent
510 * a screaming interrupt. Setting DOWN also prevents
511 * timers and tasks from rescheduling.
512 */
513 set_bit(__E1000_DOWN, &adapter->flags);
514
511 del_timer_sync(&adapter->tx_fifo_stall_timer); 515 del_timer_sync(&adapter->tx_fifo_stall_timer);
512 del_timer_sync(&adapter->watchdog_timer); 516 del_timer_sync(&adapter->watchdog_timer);
513 del_timer_sync(&adapter->phy_info_timer); 517 del_timer_sync(&adapter->phy_info_timer);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 182b2a7be8dc..3d0af08483a1 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -400,6 +400,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
400 skb_arr_rq1[index] = netdev_alloc_skb(dev, 400 skb_arr_rq1[index] = netdev_alloc_skb(dev,
401 EHEA_L_PKT_SIZE); 401 EHEA_L_PKT_SIZE);
402 if (!skb_arr_rq1[index]) { 402 if (!skb_arr_rq1[index]) {
403 ehea_info("Unable to allocate enough skb in the array\n");
403 pr->rq1_skba.os_skbs = fill_wqes - i; 404 pr->rq1_skba.os_skbs = fill_wqes - i;
404 break; 405 break;
405 } 406 }
@@ -422,13 +423,20 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
422 struct net_device *dev = pr->port->netdev; 423 struct net_device *dev = pr->port->netdev;
423 int i; 424 int i;
424 425
425 for (i = 0; i < pr->rq1_skba.len; i++) { 426 if (nr_rq1a > pr->rq1_skba.len) {
427 ehea_error("NR_RQ1A bigger than skb array len\n");
428 return;
429 }
430
431 for (i = 0; i < nr_rq1a; i++) {
426 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 432 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
427 if (!skb_arr_rq1[i]) 433 if (!skb_arr_rq1[i]) {
434 ehea_info("No enough memory to allocate skb array\n");
428 break; 435 break;
436 }
429 } 437 }
430 /* Ring doorbell */ 438 /* Ring doorbell */
431 ehea_update_rq1a(pr->qp, nr_rq1a); 439 ehea_update_rq1a(pr->qp, i);
432} 440}
433 441
434static int ehea_refill_rq_def(struct ehea_port_res *pr, 442static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -735,8 +743,10 @@ static int ehea_proc_rwqes(struct net_device *dev,
735 743
736 skb = netdev_alloc_skb(dev, 744 skb = netdev_alloc_skb(dev,
737 EHEA_L_PKT_SIZE); 745 EHEA_L_PKT_SIZE);
738 if (!skb) 746 if (!skb) {
747 ehea_info("Not enough memory to allocate skb\n");
739 break; 748 break;
749 }
740 } 750 }
741 skb_copy_to_linear_data(skb, ((char *)cqe) + 64, 751 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
742 cqe->num_bytes_transfered - 4); 752 cqe->num_bytes_transfered - 4);
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bccd6d0..52a7c86af663 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@ static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
258 258
259 /* Baud Rate Error Correction x 10000 */ 259 /* Baud Rate Error Correction x 10000 */
260 u32 rate_err_array[] = { 260 u32 rate_err_array[] = {
261 0000, 0625, 1250, 1875, 261 0, 625, 1250, 1875,
262 2500, 3125, 3750, 4375, 262 2500, 3125, 3750, 4375,
263 5000, 5625, 6250, 6875, 263 5000, 5625, 6250, 6875,
264 7500, 8125, 8750, 9375, 264 7500, 8125, 8750, 9375,
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index b68eee2414c2..7a7e18ba278a 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,6 +289,10 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
290 dev_cap->bf_reg_size = 1 << (field & 0x1f); 290 dev_cap->bf_reg_size = 1 << (field & 0x1f);
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) {
293 mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
294 field = 3;
295 }
292 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 296 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
293 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 297 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
294 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 298 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b47440..03a1d280105f 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation. 2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. 3 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
4 * 4 *
5 * This code was derived from the Intel e1000e Linux driver. 5 * This code was derived from the Intel e1000e Linux driver.
6 * 6 *
@@ -2464,8 +2464,8 @@ static void __exit pch_gbe_exit_module(void)
2464module_init(pch_gbe_init_module); 2464module_init(pch_gbe_init_module);
2465module_exit(pch_gbe_exit_module); 2465module_exit(pch_gbe_exit_module);
2466 2466
2467MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver"); 2467MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
2468MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>"); 2468MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
2469MODULE_LICENSE("GPL"); 2469MODULE_LICENSE("GPL");
2470MODULE_VERSION(DRV_VERSION); 2470MODULE_VERSION(DRV_VERSION);
2471MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id); 2471MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146fc560..ef0996a0eaaa 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
434 .err = "using default of " 434 .err = "using default of "
435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD), 435 __MODULE_STRING(PCH_GBE_DEFAULT_TXD),
436 .def = PCH_GBE_DEFAULT_TXD, 436 .def = PCH_GBE_DEFAULT_TXD,
437 .arg = { .r = { .min = PCH_GBE_MIN_TXD } }, 437 .arg = { .r = { .min = PCH_GBE_MIN_TXD,
438 .arg = { .r = { .max = PCH_GBE_MAX_TXD } } 438 .max = PCH_GBE_MAX_TXD } }
439 }; 439 };
440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; 440 struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
441 tx_ring->count = TxDescriptors; 441 tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
450 .err = "using default of " 450 .err = "using default of "
451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD), 451 __MODULE_STRING(PCH_GBE_DEFAULT_RXD),
452 .def = PCH_GBE_DEFAULT_RXD, 452 .def = PCH_GBE_DEFAULT_RXD,
453 .arg = { .r = { .min = PCH_GBE_MIN_RXD } }, 453 .arg = { .r = { .min = PCH_GBE_MIN_RXD,
454 .arg = { .r = { .max = PCH_GBE_MAX_RXD } } 454 .max = PCH_GBE_MAX_RXD } }
455 }; 455 };
456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring; 456 struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
457 rx_ring->count = RxDescriptors; 457 rx_ring->count = RxDescriptors;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f0bd1a1aba3a..e8b9c53c304b 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
30#include <linux/ethtool.h> 30#include <linux/ethtool.h>
31#include <linux/phy.h> 31#include <linux/phy.h>
32#include <linux/marvell_phy.h> 32#include <linux/marvell_phy.h>
33#include <linux/of.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/irq.h> 36#include <asm/irq.h>
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37 38
39#define MII_MARVELL_PHY_PAGE 22
40
38#define MII_M1011_IEVENT 0x13 41#define MII_M1011_IEVENT 0x13
39#define MII_M1011_IEVENT_CLEAR 0x0000 42#define MII_M1011_IEVENT_CLEAR 0x0000
40 43
@@ -80,7 +83,6 @@
80#define MII_88E1121_PHY_LED_CTRL 16 83#define MII_88E1121_PHY_LED_CTRL 16
81#define MII_88E1121_PHY_LED_PAGE 3 84#define MII_88E1121_PHY_LED_PAGE 3
82#define MII_88E1121_PHY_LED_DEF 0x0030 85#define MII_88E1121_PHY_LED_DEF 0x0030
83#define MII_88E1121_PHY_PAGE 22
84 86
85#define MII_M1011_PHY_STATUS 0x11 87#define MII_M1011_PHY_STATUS 0x11
86#define MII_M1011_PHY_STATUS_1000 0x8000 88#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -186,13 +188,94 @@ static int marvell_config_aneg(struct phy_device *phydev)
186 return 0; 188 return 0;
187} 189}
188 190
191#ifdef CONFIG_OF_MDIO
192/*
193 * Set and/or override some configuration registers based on the
194 * marvell,reg-init property stored in the of_node for the phydev.
195 *
196 * marvell,reg-init = <reg-page reg mask value>,...;
197 *
198 * There may be one or more sets of <reg-page reg mask value>:
199 *
200 * reg-page: which register bank to use.
201 * reg: the register.
202 * mask: if non-zero, ANDed with existing register value.
203 * value: ORed with the masked value and written to the regiser.
204 *
205 */
206static int marvell_of_reg_init(struct phy_device *phydev)
207{
208 const __be32 *paddr;
209 int len, i, saved_page, current_page, page_changed, ret;
210
211 if (!phydev->dev.of_node)
212 return 0;
213
214 paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
215 if (!paddr || len < (4 * sizeof(*paddr)))
216 return 0;
217
218 saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
219 if (saved_page < 0)
220 return saved_page;
221 page_changed = 0;
222 current_page = saved_page;
223
224 ret = 0;
225 len /= sizeof(*paddr);
226 for (i = 0; i < len - 3; i += 4) {
227 u16 reg_page = be32_to_cpup(paddr + i);
228 u16 reg = be32_to_cpup(paddr + i + 1);
229 u16 mask = be32_to_cpup(paddr + i + 2);
230 u16 val_bits = be32_to_cpup(paddr + i + 3);
231 int val;
232
233 if (reg_page != current_page) {
234 current_page = reg_page;
235 page_changed = 1;
236 ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
237 if (ret < 0)
238 goto err;
239 }
240
241 val = 0;
242 if (mask) {
243 val = phy_read(phydev, reg);
244 if (val < 0) {
245 ret = val;
246 goto err;
247 }
248 val &= mask;
249 }
250 val |= val_bits;
251
252 ret = phy_write(phydev, reg, val);
253 if (ret < 0)
254 goto err;
255
256 }
257err:
258 if (page_changed) {
259 i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
260 if (ret == 0)
261 ret = i;
262 }
263 return ret;
264}
265#else
266static int marvell_of_reg_init(struct phy_device *phydev)
267{
268 return 0;
269}
270#endif /* CONFIG_OF_MDIO */
271
189static int m88e1121_config_aneg(struct phy_device *phydev) 272static int m88e1121_config_aneg(struct phy_device *phydev)
190{ 273{
191 int err, oldpage, mscr; 274 int err, oldpage, mscr;
192 275
193 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 276 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
194 277
195 err = phy_write(phydev, MII_88E1121_PHY_PAGE, 278 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
196 MII_88E1121_PHY_MSCR_PAGE); 279 MII_88E1121_PHY_MSCR_PAGE);
197 if (err < 0) 280 if (err < 0)
198 return err; 281 return err;
@@ -218,7 +301,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
218 return err; 301 return err;
219 } 302 }
220 303
221 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 304 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
222 305
223 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 306 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
224 if (err < 0) 307 if (err < 0)
@@ -229,11 +312,11 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
229 if (err < 0) 312 if (err < 0)
230 return err; 313 return err;
231 314
232 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 315 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
233 316
234 phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE); 317 phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
235 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF); 318 phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
236 phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 319 phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
237 320
238 err = genphy_config_aneg(phydev); 321 err = genphy_config_aneg(phydev);
239 322
@@ -244,9 +327,9 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
244{ 327{
245 int err, oldpage, mscr; 328 int err, oldpage, mscr;
246 329
247 oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE); 330 oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
248 331
249 err = phy_write(phydev, MII_88E1121_PHY_PAGE, 332 err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
250 MII_88E1121_PHY_MSCR_PAGE); 333 MII_88E1121_PHY_MSCR_PAGE);
251 if (err < 0) 334 if (err < 0)
252 return err; 335 return err;
@@ -258,7 +341,7 @@ static int m88e1318_config_aneg(struct phy_device *phydev)
258 if (err < 0) 341 if (err < 0)
259 return err; 342 return err;
260 343
261 err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage); 344 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
262 if (err < 0) 345 if (err < 0)
263 return err; 346 return err;
264 347
@@ -368,6 +451,9 @@ static int m88e1111_config_init(struct phy_device *phydev)
368 return err; 451 return err;
369 } 452 }
370 453
454 err = marvell_of_reg_init(phydev);
455 if (err < 0)
456 return err;
371 457
372 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 458 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
373 if (err < 0) 459 if (err < 0)
@@ -398,7 +484,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
398 int err; 484 int err;
399 485
400 /* Change address */ 486 /* Change address */
401 err = phy_write(phydev, 0x16, 0x0002); 487 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
402 if (err < 0) 488 if (err < 0)
403 return err; 489 return err;
404 490
@@ -408,7 +494,7 @@ static int m88e1118_config_init(struct phy_device *phydev)
408 return err; 494 return err;
409 495
410 /* Change address */ 496 /* Change address */
411 err = phy_write(phydev, 0x16, 0x0003); 497 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
412 if (err < 0) 498 if (err < 0)
413 return err; 499 return err;
414 500
@@ -420,8 +506,42 @@ static int m88e1118_config_init(struct phy_device *phydev)
420 if (err < 0) 506 if (err < 0)
421 return err; 507 return err;
422 508
509 err = marvell_of_reg_init(phydev);
510 if (err < 0)
511 return err;
512
423 /* Reset address */ 513 /* Reset address */
424 err = phy_write(phydev, 0x16, 0x0); 514 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
515 if (err < 0)
516 return err;
517
518 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
519 if (err < 0)
520 return err;
521
522 return 0;
523}
524
525static int m88e1149_config_init(struct phy_device *phydev)
526{
527 int err;
528
529 /* Change address */
530 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
531 if (err < 0)
532 return err;
533
534 /* Enable 1000 Mbit */
535 err = phy_write(phydev, 0x15, 0x1048);
536 if (err < 0)
537 return err;
538
539 err = marvell_of_reg_init(phydev);
540 if (err < 0)
541 return err;
542
543 /* Reset address */
544 err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
425 if (err < 0) 545 if (err < 0)
426 return err; 546 return err;
427 547
@@ -491,6 +611,10 @@ static int m88e1145_config_init(struct phy_device *phydev)
491 } 611 }
492 } 612 }
493 613
614 err = marvell_of_reg_init(phydev);
615 if (err < 0)
616 return err;
617
494 return 0; 618 return 0;
495} 619}
496 620
@@ -685,6 +809,19 @@ static struct phy_driver marvell_drivers[] = {
685 .driver = { .owner = THIS_MODULE }, 809 .driver = { .owner = THIS_MODULE },
686 }, 810 },
687 { 811 {
812 .phy_id = MARVELL_PHY_ID_88E1149R,
813 .phy_id_mask = MARVELL_PHY_ID_MASK,
814 .name = "Marvell 88E1149R",
815 .features = PHY_GBIT_FEATURES,
816 .flags = PHY_HAS_INTERRUPT,
817 .config_init = &m88e1149_config_init,
818 .config_aneg = &m88e1118_config_aneg,
819 .read_status = &genphy_read_status,
820 .ack_interrupt = &marvell_ack_interrupt,
821 .config_intr = &marvell_config_intr,
822 .driver = { .owner = THIS_MODULE },
823 },
824 {
688 .phy_id = MARVELL_PHY_ID_88E1240, 825 .phy_id = MARVELL_PHY_ID_88E1240,
689 .phy_id_mask = MARVELL_PHY_ID_MASK, 826 .phy_id_mask = MARVELL_PHY_ID_MASK,
690 .name = "Marvell 88E1240", 827 .name = "Marvell 88E1240",
@@ -735,6 +872,7 @@ static struct mdio_device_id __maybe_unused marvell_tbl[] = {
735 { 0x01410e10, 0xfffffff0 }, 872 { 0x01410e10, 0xfffffff0 },
736 { 0x01410cb0, 0xfffffff0 }, 873 { 0x01410cb0, 0xfffffff0 },
737 { 0x01410cd0, 0xfffffff0 }, 874 { 0x01410cd0, 0xfffffff0 },
875 { 0x01410e50, 0xfffffff0 },
738 { 0x01410e30, 0xfffffff0 }, 876 { 0x01410e30, 0xfffffff0 },
739 { 0x01410e90, 0xfffffff0 }, 877 { 0x01410e90, 0xfffffff0 },
740 { } 878 { }
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d0416a..39659976a1ac 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2584,16 +2584,16 @@ ppp_create_interface(struct net *net, int unit, int *retp)
2584 */ 2584 */
2585 dev_net_set(dev, net); 2585 dev_net_set(dev, net);
2586 2586
2587 ret = -EEXIST;
2588 mutex_lock(&pn->all_ppp_mutex); 2587 mutex_lock(&pn->all_ppp_mutex);
2589 2588
2590 if (unit < 0) { 2589 if (unit < 0) {
2591 unit = unit_get(&pn->units_idr, ppp); 2590 unit = unit_get(&pn->units_idr, ppp);
2592 if (unit < 0) { 2591 if (unit < 0) {
2593 *retp = unit; 2592 ret = unit;
2594 goto out2; 2593 goto out2;
2595 } 2594 }
2596 } else { 2595 } else {
2596 ret = -EEXIST;
2597 if (unit_find(&pn->units_idr, unit)) 2597 if (unit_find(&pn->units_idr, unit))
2598 goto out2; /* unit already exists */ 2598 goto out2; /* unit already exists */
2599 /* 2599 /*
@@ -2668,10 +2668,10 @@ static void ppp_shutdown_interface(struct ppp *ppp)
2668 ppp->closing = 1; 2668 ppp->closing = 1;
2669 ppp_unlock(ppp); 2669 ppp_unlock(ppp);
2670 unregister_netdev(ppp->dev); 2670 unregister_netdev(ppp->dev);
2671 unit_put(&pn->units_idr, ppp->file.index);
2671 } else 2672 } else
2672 ppp_unlock(ppp); 2673 ppp_unlock(ppp);
2673 2674
2674 unit_put(&pn->units_idr, ppp->file.index);
2675 ppp->file.dead = 1; 2675 ppp->file.dead = 1;
2676 ppp->owner = NULL; 2676 ppp->owner = NULL;
2677 wake_up_interruptible(&ppp->file.rwait); 2677 wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2859,7 @@ static void __exit ppp_cleanup(void)
2859 * by holding all_ppp_mutex 2859 * by holding all_ppp_mutex
2860 */ 2860 */
2861 2861
2862/* associate pointer with specified number */ 2862static int __unit_alloc(struct idr *p, void *ptr, int n)
2863static int unit_set(struct idr *p, void *ptr, int n)
2864{ 2863{
2865 int unit, err; 2864 int unit, err;
2866 2865
@@ -2871,10 +2870,24 @@ again:
2871 } 2870 }
2872 2871
2873 err = idr_get_new_above(p, ptr, n, &unit); 2872 err = idr_get_new_above(p, ptr, n, &unit);
2874 if (err == -EAGAIN) 2873 if (err < 0) {
2875 goto again; 2874 if (err == -EAGAIN)
2875 goto again;
2876 return err;
2877 }
2878
2879 return unit;
2880}
2881
2882/* associate pointer with specified number */
2883static int unit_set(struct idr *p, void *ptr, int n)
2884{
2885 int unit;
2876 2886
2877 if (unit != n) { 2887 unit = __unit_alloc(p, ptr, n);
2888 if (unit < 0)
2889 return unit;
2890 else if (unit != n) {
2878 idr_remove(p, unit); 2891 idr_remove(p, unit);
2879 return -EINVAL; 2892 return -EINVAL;
2880 } 2893 }
@@ -2885,19 +2898,7 @@ again:
2885/* get new free unit number and associate pointer with it */ 2898/* get new free unit number and associate pointer with it */
2886static int unit_get(struct idr *p, void *ptr) 2899static int unit_get(struct idr *p, void *ptr)
2887{ 2900{
2888 int unit, err; 2901 return __unit_alloc(p, ptr, 0);
2889
2890again:
2891 if (!idr_pre_get(p, GFP_KERNEL)) {
2892 printk(KERN_ERR "PPP: No free memory for idr\n");
2893 return -ENOMEM;
2894 }
2895
2896 err = idr_get_new_above(p, ptr, 0, &unit);
2897 if (err == -EAGAIN)
2898 goto again;
2899
2900 return unit;
2901} 2902}
2902 2903
2903/* put unit number back to a pool */ 2904/* put unit number back to a pool */
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c30e0fe55a31..528eaef5308f 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@ static const u32 default_msg =
62/* NETIF_MSG_PKTDATA | */ 62/* NETIF_MSG_PKTDATA | */
63 NETIF_MSG_HW | NETIF_MSG_WOL | 0; 63 NETIF_MSG_HW | NETIF_MSG_WOL | 0;
64 64
65static int debug = 0x00007fff; /* defaults above */ 65static int debug = -1; /* defaults above */
66module_param(debug, int, 0); 66module_param(debug, int, 0664);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68 68
69#define MSIX_IRQ 0 69#define MSIX_IRQ 0
70#define MSI_IRQ 1 70#define MSI_IRQ 1
71#define LEG_IRQ 2 71#define LEG_IRQ 2
72static int qlge_irq_type = MSIX_IRQ; 72static int qlge_irq_type = MSIX_IRQ;
73module_param(qlge_irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, 0664);
74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static int qlge_mpi_coredump; 76static int qlge_mpi_coredump;
diff --git a/drivers/net/tile/Makefile b/drivers/net/tile/Makefile
new file mode 100644
index 000000000000..f634f142cab4
--- /dev/null
+++ b/drivers/net/tile/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for the TILE on-chip networking support.
3#
4
5obj-$(CONFIG_TILE_NET) += tile_net.o
6ifdef CONFIG_TILEGX
7tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o
8else
9tile_net-objs := tilepro.o
10endif
diff --git a/drivers/net/tile/tilepro.c b/drivers/net/tile/tilepro.c
new file mode 100644
index 000000000000..0e6bac5ec65b
--- /dev/null
+++ b/drivers/net/tile/tilepro.c
@@ -0,0 +1,2406 @@
1/*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h> /* printk() */
20#include <linux/slab.h> /* kmalloc() */
21#include <linux/errno.h> /* error codes */
22#include <linux/types.h> /* size_t */
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/netdevice.h> /* struct device, and other headers */
26#include <linux/etherdevice.h> /* eth_type_trans */
27#include <linux/skbuff.h>
28#include <linux/ioctl.h>
29#include <linux/cdev.h>
30#include <linux/hugetlb.h>
31#include <linux/in6.h>
32#include <linux/timer.h>
33#include <linux/io.h>
34#include <asm/checksum.h>
35#include <asm/homecache.h>
36
37#include <hv/drv_xgbe_intf.h>
38#include <hv/drv_xgbe_impl.h>
39#include <hv/hypervisor.h>
40#include <hv/netio_intf.h>
41
42/* For TSO */
43#include <linux/ip.h>
44#include <linux/tcp.h>
45
46
47/* There is no singlethread_cpu, so schedule work on the current cpu. */
48#define singlethread_cpu -1
49
50
51/*
52 * First, "tile_net_init_module()" initializes all four "devices" which
53 * can be used by linux.
54 *
55 * Then, "ifconfig DEVICE up" calls "tile_net_open()", which analyzes
56 * the network cpus, then uses "tile_net_open_aux()" to initialize
57 * LIPP/LEPP, and then uses "tile_net_open_inner()" to register all
58 * the tiles, provide buffers to LIPP, allow ingress to start, and
59 * turn on hypervisor interrupt handling (and NAPI) on all tiles.
60 *
61 * If registration fails due to the link being down, then "retry_work"
62 * is used to keep calling "tile_net_open_inner()" until it succeeds.
63 *
64 * If "ifconfig DEVICE down" is called, it uses "tile_net_stop()" to
65 * stop egress, drain the LIPP buffers, unregister all the tiles, stop
66 * LIPP/LEPP, and wipe the LEPP queue.
67 *
68 * We start out with the ingress interrupt enabled on each CPU. When
69 * this interrupt fires, we disable it, and call "napi_schedule()".
70 * This will cause "tile_net_poll()" to be called, which will pull
71 * packets from the netio queue, filtering them out, or passing them
72 * to "netif_receive_skb()". If our budget is exhausted, we will
73 * return, knowing we will be called again later. Otherwise, we
74 * reenable the ingress interrupt, and call "napi_complete()".
75 *
76 *
77 * NOTE: The use of "native_driver" ensures that EPP exists, and that
78 * "epp_sendv" is legal, and that "LIPP" is being used.
79 *
80 * NOTE: Failing to free completions for an arbitrarily long time
81 * (which is defined to be illegal) does in fact cause bizarre
82 * problems. The "egress_timer" helps prevent this from happening.
83 *
84 * NOTE: The egress code can be interrupted by the interrupt handler.
85 */
86
87
88/* HACK: Allow use of "jumbo" packets. */
89/* This should be 1500 if "jumbo" is not set in LIPP. */
90/* This should be at most 10226 (10240 - 14) if "jumbo" is set in LIPP. */
91/* ISSUE: This has not been thoroughly tested (except at 1500). */
92#define TILE_NET_MTU 1500
93
94/* HACK: Define to support GSO. */
95/* ISSUE: This may actually hurt performance of the TCP blaster. */
96/* #define TILE_NET_GSO */
97
98/* Define this to collapse "duplicate" acks. */
99/* #define IGNORE_DUP_ACKS */
100
101/* HACK: Define this to verify incoming packets. */
102/* #define TILE_NET_VERIFY_INGRESS */
103
104/* Use 3000 to enable the Linux Traffic Control (QoS) layer, else 0. */
105#define TILE_NET_TX_QUEUE_LEN 0
106
107/* Define to dump packets (prints out the whole packet on tx and rx). */
108/* #define TILE_NET_DUMP_PACKETS */
109
110/* Define to enable debug spew (all PDEBUG's are enabled). */
111/* #define TILE_NET_DEBUG */
112
113
114/* Define to activate paranoia checks. */
115/* #define TILE_NET_PARANOIA */
116
117/* Default transmit lockup timeout period, in jiffies. */
118#define TILE_NET_TIMEOUT (5 * HZ)
119
120/* Default retry interval for bringing up the NetIO interface, in jiffies. */
121#define TILE_NET_RETRY_INTERVAL (5 * HZ)
122
123/* Number of ports (xgbe0, xgbe1, gbe0, gbe1). */
124#define TILE_NET_DEVS 4
125
126
127
128/* Paranoia. */
129#if NET_IP_ALIGN != LIPP_PACKET_PADDING
130#error "NET_IP_ALIGN must match LIPP_PACKET_PADDING."
131#endif
132
133
134/* Debug print. */
135#ifdef TILE_NET_DEBUG
136#define PDEBUG(fmt, args...) net_printk(fmt, ## args)
137#else
138#define PDEBUG(fmt, args...)
139#endif
140
141
142MODULE_AUTHOR("Tilera");
143MODULE_LICENSE("GPL");
144
145
146#define IS_MULTICAST(mac_addr) \
147 (((u8 *)(mac_addr))[0] & 0x01)
148
149#define IS_BROADCAST(mac_addr) \
150 (((u16 *)(mac_addr))[0] == 0xffff)
151
152
153/*
154 * Queue of incoming packets for a specific cpu and device.
155 *
156 * Includes a pointer to the "system" data, and the actual "user" data.
157 */
158struct tile_netio_queue {
159 netio_queue_impl_t *__system_part;
160 netio_queue_user_impl_t __user_part;
161
162};
163
164
165/*
166 * Statistics counters for a specific cpu and device.
167 */
168struct tile_net_stats_t {
169 u32 rx_packets;
170 u32 rx_bytes;
171 u32 tx_packets;
172 u32 tx_bytes;
173};
174
175
176/*
177 * Info for a specific cpu and device.
178 *
179 * ISSUE: There is a "dev" pointer in "napi" as well.
180 */
181struct tile_net_cpu {
182 /* The NAPI struct. */
183 struct napi_struct napi;
184 /* Packet queue. */
185 struct tile_netio_queue queue;
186 /* Statistics. */
187 struct tile_net_stats_t stats;
188 /* ISSUE: Is this needed? */
189 bool napi_enabled;
190 /* True if this tile has succcessfully registered with the IPP. */
191 bool registered;
192 /* True if the link was down last time we tried to register. */
193 bool link_down;
194 /* True if "egress_timer" is scheduled. */
195 bool egress_timer_scheduled;
196 /* Number of small sk_buffs which must still be provided. */
197 unsigned int num_needed_small_buffers;
198 /* Number of large sk_buffs which must still be provided. */
199 unsigned int num_needed_large_buffers;
200 /* A timer for handling egress completions. */
201 struct timer_list egress_timer;
202};
203
204
205/*
206 * Info for a specific device.
207 */
208struct tile_net_priv {
209 /* Our network device. */
210 struct net_device *dev;
211 /* The actual egress queue. */
212 lepp_queue_t *epp_queue;
213 /* Protects "epp_queue->cmd_tail" and "epp_queue->comp_tail" */
214 spinlock_t cmd_lock;
215 /* Protects "epp_queue->comp_head". */
216 spinlock_t comp_lock;
217 /* The hypervisor handle for this interface. */
218 int hv_devhdl;
219 /* The intr bit mask that IDs this device. */
220 u32 intr_id;
221 /* True iff "tile_net_open_aux()" has succeeded. */
222 int partly_opened;
223 /* True iff "tile_net_open_inner()" has succeeded. */
224 int fully_opened;
225 /* Effective network cpus. */
226 struct cpumask network_cpus_map;
227 /* Number of network cpus. */
228 int network_cpus_count;
229 /* Credits per network cpu. */
230 int network_cpus_credits;
231 /* Network stats. */
232 struct net_device_stats stats;
233 /* For NetIO bringup retries. */
234 struct delayed_work retry_work;
235 /* Quick access to per cpu data. */
236 struct tile_net_cpu *cpu[NR_CPUS];
237};
238
239
240/*
241 * The actual devices (xgbe0, xgbe1, gbe0, gbe1).
242 */
243static struct net_device *tile_net_devs[TILE_NET_DEVS];
244
245/*
246 * The "tile_net_cpu" structures for each device.
247 */
248static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe0);
249static DEFINE_PER_CPU(struct tile_net_cpu, hv_xgbe1);
250static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe0);
251static DEFINE_PER_CPU(struct tile_net_cpu, hv_gbe1);
252
253
254/*
255 * True if "network_cpus" was specified.
256 */
257static bool network_cpus_used;
258
259/*
260 * The actual cpus in "network_cpus".
261 */
262static struct cpumask network_cpus_map;
263
264
265
266#ifdef TILE_NET_DEBUG
267/*
268 * printk with extra stuff.
269 *
270 * We print the CPU we're running in brackets.
271 */
272static void net_printk(char *fmt, ...)
273{
274 int i;
275 int len;
276 va_list args;
277 static char buf[256];
278
279 len = sprintf(buf, "tile_net[%2.2d]: ", smp_processor_id());
280 va_start(args, fmt);
281 i = vscnprintf(buf + len, sizeof(buf) - len - 1, fmt, args);
282 va_end(args);
283 buf[255] = '\0';
284 pr_notice(buf);
285}
286#endif
287
288
289#ifdef TILE_NET_DUMP_PACKETS
290/*
291 * Dump a packet.
292 */
293static void dump_packet(unsigned char *data, unsigned long length, char *s)
294{
295 unsigned long i;
296 static unsigned int count;
297
298 pr_info("dump_packet(data %p, length 0x%lx s %s count 0x%x)\n",
299 data, length, s, count++);
300
301 pr_info("\n");
302
303 for (i = 0; i < length; i++) {
304 if ((i & 0xf) == 0)
305 sprintf(buf, "%8.8lx:", i);
306 sprintf(buf + strlen(buf), " %2.2x", data[i]);
307 if ((i & 0xf) == 0xf || i == length - 1)
308 pr_info("%s\n", buf);
309 }
310}
311#endif
312
313
314/*
315 * Provide support for the __netio_fastio1() swint
316 * (see <hv/drv_xgbe_intf.h> for how it is used).
317 *
318 * The fastio swint2 call may clobber all the caller-saved registers.
319 * It rarely clobbers memory, but we allow for the possibility in
320 * the signature just to be on the safe side.
321 *
322 * Also, gcc doesn't seem to allow an input operand to be
323 * clobbered, so we fake it with dummy outputs.
324 *
325 * This function can't be static because of the way it is declared
326 * in the netio header.
327 */
328inline int __netio_fastio1(u32 fastio_index, u32 arg0)
329{
330 long result, clobber_r1, clobber_r10;
331 asm volatile("swint2"
332 : "=R00" (result),
333 "=R01" (clobber_r1), "=R10" (clobber_r10)
334 : "R10" (fastio_index), "R01" (arg0)
335 : "memory", "r2", "r3", "r4",
336 "r5", "r6", "r7", "r8", "r9",
337 "r11", "r12", "r13", "r14",
338 "r15", "r16", "r17", "r18", "r19",
339 "r20", "r21", "r22", "r23", "r24",
340 "r25", "r26", "r27", "r28", "r29");
341 return result;
342}
343
344
345/*
346 * Provide a linux buffer to LIPP.
347 */
348static void tile_net_provide_linux_buffer(struct tile_net_cpu *info,
349 void *va, bool small)
350{
351 struct tile_netio_queue *queue = &info->queue;
352
353 /* Convert "va" and "small" to "linux_buffer_t". */
354 unsigned int buffer = ((unsigned int)(__pa(va) >> 7) << 1) + small;
355
356 __netio_fastio_free_buffer(queue->__user_part.__fastio_index, buffer);
357}
358
359
360/*
361 * Provide a linux buffer for LIPP.
362 */
363static bool tile_net_provide_needed_buffer(struct tile_net_cpu *info,
364 bool small)
365{
366 /* ISSUE: What should we use here? */
367 unsigned int large_size = NET_IP_ALIGN + TILE_NET_MTU + 100;
368
369 /* Round up to ensure to avoid "false sharing" with last cache line. */
370 unsigned int buffer_size =
371 (((small ? LIPP_SMALL_PACKET_SIZE : large_size) +
372 CHIP_L2_LINE_SIZE() - 1) & -CHIP_L2_LINE_SIZE());
373
374 /*
375 * ISSUE: Since CPAs are 38 bits, and we can only encode the
376 * high 31 bits in a "linux_buffer_t", the low 7 bits must be
377 * zero, and thus, we must align the actual "va" mod 128.
378 */
379 const unsigned long align = 128;
380
381 struct sk_buff *skb;
382 void *va;
383
384 struct sk_buff **skb_ptr;
385
386 /* Note that "dev_alloc_skb()" adds NET_SKB_PAD more bytes, */
387 /* and also "reserves" that many bytes. */
388 /* ISSUE: Can we "share" the NET_SKB_PAD bytes with "skb_ptr"? */
389 int len = sizeof(*skb_ptr) + align + buffer_size;
390
391 while (1) {
392
393 /* Allocate (or fail). */
394 skb = dev_alloc_skb(len);
395 if (skb == NULL)
396 return false;
397
398 /* Make room for a back-pointer to 'skb'. */
399 skb_reserve(skb, sizeof(*skb_ptr));
400
401 /* Make sure we are aligned. */
402 skb_reserve(skb, -(long)skb->data & (align - 1));
403
404 /* This address is given to IPP. */
405 va = skb->data;
406
407 if (small)
408 break;
409
410 /* ISSUE: This has never been observed! */
411 /* Large buffers must not span a huge page. */
412 if (((((long)va & ~HPAGE_MASK) + 1535) & HPAGE_MASK) == 0)
413 break;
414 pr_err("Leaking unaligned linux buffer at %p.\n", va);
415 }
416
417 /* Skip two bytes to satisfy LIPP assumptions. */
418 /* Note that this aligns IP on a 16 byte boundary. */
419 /* ISSUE: Do this when the packet arrives? */
420 skb_reserve(skb, NET_IP_ALIGN);
421
422 /* Save a back-pointer to 'skb'. */
423 skb_ptr = va - sizeof(*skb_ptr);
424 *skb_ptr = skb;
425
426 /* Invalidate the packet buffer. */
427 if (!hash_default)
428 __inv_buffer(skb->data, buffer_size);
429
430 /* Make sure "skb_ptr" has been flushed. */
431 __insn_mf();
432
433#ifdef TILE_NET_PARANOIA
434#if CHIP_HAS_CBOX_HOME_MAP()
435 if (hash_default) {
436 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)va);
437 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
438 panic("Non-coherent ingress buffer!");
439 }
440#endif
441#endif
442
443 /* Provide the new buffer. */
444 tile_net_provide_linux_buffer(info, va, small);
445
446 return true;
447}
448
449
450/*
451 * Provide linux buffers for LIPP.
452 */
453static void tile_net_provide_needed_buffers(struct tile_net_cpu *info)
454{
455 while (info->num_needed_small_buffers != 0) {
456 if (!tile_net_provide_needed_buffer(info, true))
457 goto oops;
458 info->num_needed_small_buffers--;
459 }
460
461 while (info->num_needed_large_buffers != 0) {
462 if (!tile_net_provide_needed_buffer(info, false))
463 goto oops;
464 info->num_needed_large_buffers--;
465 }
466
467 return;
468
469oops:
470
471 /* Add a description to the page allocation failure dump. */
472 pr_notice("Could not provide a linux buffer to LIPP.\n");
473}
474
475
476/*
477 * Grab some LEPP completions, and store them in "comps", of size
478 * "comps_size", and return the number of completions which were
479 * stored, so the caller can free them.
480 *
481 * If "pending" is not NULL, it will be set to true if there might
482 * still be some pending completions caused by this tile, else false.
483 */
484static unsigned int tile_net_lepp_grab_comps(struct net_device *dev,
485 struct sk_buff *comps[],
486 unsigned int comps_size,
487 bool *pending)
488{
489 struct tile_net_priv *priv = netdev_priv(dev);
490
491 lepp_queue_t *eq = priv->epp_queue;
492
493 unsigned int n = 0;
494
495 unsigned int comp_head;
496 unsigned int comp_busy;
497 unsigned int comp_tail;
498
499 spin_lock(&priv->comp_lock);
500
501 comp_head = eq->comp_head;
502 comp_busy = eq->comp_busy;
503 comp_tail = eq->comp_tail;
504
505 while (comp_head != comp_busy && n < comps_size) {
506 comps[n++] = eq->comps[comp_head];
507 LEPP_QINC(comp_head);
508 }
509
510 if (pending != NULL)
511 *pending = (comp_head != comp_tail);
512
513 eq->comp_head = comp_head;
514
515 spin_unlock(&priv->comp_lock);
516
517 return n;
518}
519
520
521/*
522 * Make sure the egress timer is scheduled.
523 *
524 * Note that we use "schedule if not scheduled" logic instead of the more
525 * obvious "reschedule" logic, because "reschedule" is fairly expensive.
526 */
527static void tile_net_schedule_egress_timer(struct tile_net_cpu *info)
528{
529 if (!info->egress_timer_scheduled) {
530 mod_timer_pinned(&info->egress_timer, jiffies + 1);
531 info->egress_timer_scheduled = true;
532 }
533}
534
535
536/*
537 * The "function" for "info->egress_timer".
538 *
539 * This timer will reschedule itself as long as there are any pending
540 * completions expected (on behalf of any tile).
541 *
542 * ISSUE: Realistically, will the timer ever stop scheduling itself?
543 *
544 * ISSUE: This timer is almost never actually needed, so just use a global
545 * timer that can run on any tile.
546 *
547 * ISSUE: Maybe instead track number of expected completions, and free
548 * only that many, resetting to zero if "pending" is ever false.
549 */
550static void tile_net_handle_egress_timer(unsigned long arg)
551{
552 struct tile_net_cpu *info = (struct tile_net_cpu *)arg;
553 struct net_device *dev = info->napi.dev;
554
555 struct sk_buff *olds[32];
556 unsigned int wanted = 32;
557 unsigned int i, nolds = 0;
558 bool pending;
559
560 /* The timer is no longer scheduled. */
561 info->egress_timer_scheduled = false;
562
563 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, &pending);
564
565 for (i = 0; i < nolds; i++)
566 kfree_skb(olds[i]);
567
568 /* Reschedule timer if needed. */
569 if (pending)
570 tile_net_schedule_egress_timer(info);
571}
572
573
574#ifdef IGNORE_DUP_ACKS
575
576/*
577 * Help detect "duplicate" ACKs. These are sequential packets (for a
578 * given flow) which are exactly 66 bytes long, sharing everything but
579 * ID=2@0x12, Hsum=2@0x18, Ack=4@0x2a, WinSize=2@0x30, Csum=2@0x32,
580 * Tstamps=10@0x38. The ID's are +1, the Hsum's are -1, the Ack's are
581 * +N, and the Tstamps are usually identical.
582 *
583 * NOTE: Apparently truly duplicate acks (with identical "ack" values),
584 * should not be collapsed, as they are used for some kind of flow control.
585 */
586static bool is_dup_ack(char *s1, char *s2, unsigned int len)
587{
588 int i;
589
590 unsigned long long ignorable = 0;
591
592 /* Identification. */
593 ignorable |= (1ULL << 0x12);
594 ignorable |= (1ULL << 0x13);
595
596 /* Header checksum. */
597 ignorable |= (1ULL << 0x18);
598 ignorable |= (1ULL << 0x19);
599
600 /* ACK. */
601 ignorable |= (1ULL << 0x2a);
602 ignorable |= (1ULL << 0x2b);
603 ignorable |= (1ULL << 0x2c);
604 ignorable |= (1ULL << 0x2d);
605
606 /* WinSize. */
607 ignorable |= (1ULL << 0x30);
608 ignorable |= (1ULL << 0x31);
609
610 /* Checksum. */
611 ignorable |= (1ULL << 0x32);
612 ignorable |= (1ULL << 0x33);
613
614 for (i = 0; i < len; i++, ignorable >>= 1) {
615
616 if ((ignorable & 1) || (s1[i] == s2[i]))
617 continue;
618
619#ifdef TILE_NET_DEBUG
620 /* HACK: Mention non-timestamp diffs. */
621 if (i < 0x38 && i != 0x2f &&
622 net_ratelimit())
623 pr_info("Diff at 0x%x\n", i);
624#endif
625
626 return false;
627 }
628
629#ifdef TILE_NET_NO_SUPPRESS_DUP_ACKS
630 /* HACK: Do not suppress truly duplicate ACKs. */
631 /* ISSUE: Is this actually necessary or helpful? */
632 if (s1[0x2a] == s2[0x2a] &&
633 s1[0x2b] == s2[0x2b] &&
634 s1[0x2c] == s2[0x2c] &&
635 s1[0x2d] == s2[0x2d]) {
636 return false;
637 }
638#endif
639
640 return true;
641}
642
643#endif
644
645
646
647/*
648 * Like "tile_net_handle_packets()", but just discard packets.
649 */
650static void tile_net_discard_packets(struct net_device *dev)
651{
652 struct tile_net_priv *priv = netdev_priv(dev);
653 int my_cpu = smp_processor_id();
654 struct tile_net_cpu *info = priv->cpu[my_cpu];
655 struct tile_netio_queue *queue = &info->queue;
656 netio_queue_impl_t *qsp = queue->__system_part;
657 netio_queue_user_impl_t *qup = &queue->__user_part;
658
659 while (qup->__packet_receive_read !=
660 qsp->__packet_receive_queue.__packet_write) {
661
662 int index = qup->__packet_receive_read;
663
664 int index2_aux = index + sizeof(netio_pkt_t);
665 int index2 =
666 ((index2_aux ==
667 qsp->__packet_receive_queue.__last_packet_plus_one) ?
668 0 : index2_aux);
669
670 netio_pkt_t *pkt = (netio_pkt_t *)
671 ((unsigned long) &qsp[1] + index);
672
673 /* Extract the "linux_buffer_t". */
674 unsigned int buffer = pkt->__packet.word;
675
676 /* Convert "linux_buffer_t" to "va". */
677 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
678
679 /* Acquire the associated "skb". */
680 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
681 struct sk_buff *skb = *skb_ptr;
682
683 kfree_skb(skb);
684
685 /* Consume this packet. */
686 qup->__packet_receive_read = index2;
687 }
688}
689
690
691/*
692 * Handle the next packet. Return true if "processed", false if "filtered".
693 */
694static bool tile_net_poll_aux(struct tile_net_cpu *info, int index)
695{
696 struct net_device *dev = info->napi.dev;
697
698 struct tile_netio_queue *queue = &info->queue;
699 netio_queue_impl_t *qsp = queue->__system_part;
700 netio_queue_user_impl_t *qup = &queue->__user_part;
701 struct tile_net_stats_t *stats = &info->stats;
702
703 int filter;
704
705 int index2_aux = index + sizeof(netio_pkt_t);
706 int index2 =
707 ((index2_aux ==
708 qsp->__packet_receive_queue.__last_packet_plus_one) ?
709 0 : index2_aux);
710
711 netio_pkt_t *pkt = (netio_pkt_t *)((unsigned long) &qsp[1] + index);
712
713 netio_pkt_metadata_t *metadata = NETIO_PKT_METADATA(pkt);
714
715 /* Extract the packet size. */
716 unsigned long len =
717 (NETIO_PKT_CUSTOM_LENGTH(pkt) +
718 NET_IP_ALIGN - NETIO_PACKET_PADDING);
719
720 /* Extract the "linux_buffer_t". */
721 unsigned int buffer = pkt->__packet.word;
722
723 /* Extract "small" (vs "large"). */
724 bool small = ((buffer & 1) != 0);
725
726 /* Convert "linux_buffer_t" to "va". */
727 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
728
729 /* Extract the packet data pointer. */
730 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
731 unsigned char *buf = va + NET_IP_ALIGN;
732
733#ifdef IGNORE_DUP_ACKS
734
735 static int other;
736 static int final;
737 static int keep;
738 static int skip;
739
740#endif
741
742 /* Invalidate the packet buffer. */
743 if (!hash_default)
744 __inv_buffer(buf, len);
745
746 /* ISSUE: Is this needed? */
747 dev->last_rx = jiffies;
748
749#ifdef TILE_NET_DUMP_PACKETS
750 dump_packet(buf, len, "rx");
751#endif /* TILE_NET_DUMP_PACKETS */
752
753#ifdef TILE_NET_VERIFY_INGRESS
754 if (!NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt) &&
755 NETIO_PKT_L4_CSUM_CALCULATED_M(metadata, pkt)) {
756 /*
757 * FIXME: This complains about UDP packets
758 * with a "zero" checksum (bug 6624).
759 */
760#ifdef TILE_NET_PANIC_ON_BAD
761 dump_packet(buf, len, "rx");
762 panic("Bad L4 checksum.");
763#else
764 pr_warning("Bad L4 checksum on %d byte packet.\n", len);
765#endif
766 }
767 if (!NETIO_PKT_L3_CSUM_CORRECT_M(metadata, pkt) &&
768 NETIO_PKT_L3_CSUM_CALCULATED_M(metadata, pkt)) {
769 dump_packet(buf, len, "rx");
770 panic("Bad L3 checksum.");
771 }
772 switch (NETIO_PKT_STATUS_M(metadata, pkt)) {
773 case NETIO_PKT_STATUS_OVERSIZE:
774 if (len >= 64) {
775 dump_packet(buf, len, "rx");
776 panic("Unexpected OVERSIZE.");
777 }
778 break;
779 case NETIO_PKT_STATUS_BAD:
780#ifdef TILE_NET_PANIC_ON_BAD
781 dump_packet(buf, len, "rx");
782 panic("Unexpected BAD packet.");
783#else
784 pr_warning("Unexpected BAD %d byte packet.\n", len);
785#endif
786 }
787#endif
788
789 filter = 0;
790
791 if (!(dev->flags & IFF_UP)) {
792 /* Filter packets received before we're up. */
793 filter = 1;
794 } else if (!(dev->flags & IFF_PROMISC)) {
795 /*
796 * FIXME: Implement HW multicast filter.
797 */
798 if (!IS_MULTICAST(buf) && !IS_BROADCAST(buf)) {
799 /* Filter packets not for our address. */
800 const u8 *mine = dev->dev_addr;
801 filter = compare_ether_addr(mine, buf);
802 }
803 }
804
805#ifdef IGNORE_DUP_ACKS
806
807 if (len != 66) {
808 /* FIXME: Must check "is_tcp_ack(buf, len)" somehow. */
809
810 other++;
811
812 } else if (index2 ==
813 qsp->__packet_receive_queue.__packet_write) {
814
815 final++;
816
817 } else {
818
819 netio_pkt_t *pkt2 = (netio_pkt_t *)
820 ((unsigned long) &qsp[1] + index2);
821
822 netio_pkt_metadata_t *metadata2 =
823 NETIO_PKT_METADATA(pkt2);
824
825 /* Extract the packet size. */
826 unsigned long len2 =
827 (NETIO_PKT_CUSTOM_LENGTH(pkt2) +
828 NET_IP_ALIGN - NETIO_PACKET_PADDING);
829
830 if (len2 == 66 &&
831 NETIO_PKT_FLOW_HASH_M(metadata, pkt) ==
832 NETIO_PKT_FLOW_HASH_M(metadata2, pkt2)) {
833
834 /* Extract the "linux_buffer_t". */
835 unsigned int buffer2 = pkt2->__packet.word;
836
837 /* Convert "linux_buffer_t" to "va". */
838 void *va2 =
839 __va((phys_addr_t)(buffer2 >> 1) << 7);
840
841 /* Extract the packet data pointer. */
842 /* Compare to "NETIO_PKT_CUSTOM_DATA(pkt)". */
843 unsigned char *buf2 = va2 + NET_IP_ALIGN;
844
845 /* Invalidate the packet buffer. */
846 if (!hash_default)
847 __inv_buffer(buf2, len2);
848
849 if (is_dup_ack(buf, buf2, len)) {
850 skip++;
851 filter = 1;
852 } else {
853 keep++;
854 }
855 }
856 }
857
858 if (net_ratelimit())
859 pr_info("Other %d Final %d Keep %d Skip %d.\n",
860 other, final, keep, skip);
861
862#endif
863
864 if (filter) {
865
866 /* ISSUE: Update "drop" statistics? */
867
868 tile_net_provide_linux_buffer(info, va, small);
869
870 } else {
871
872 /* Acquire the associated "skb". */
873 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
874 struct sk_buff *skb = *skb_ptr;
875
876 /* Paranoia. */
877 if (skb->data != buf)
878 panic("Corrupt linux buffer from LIPP! "
879 "VA=%p, skb=%p, skb->data=%p\n",
880 va, skb, skb->data);
881
882 /* Encode the actual packet length. */
883 skb_put(skb, len);
884
885 /* NOTE: This call also sets "skb->dev = dev". */
886 skb->protocol = eth_type_trans(skb, dev);
887
888 /* ISSUE: Discard corrupt packets? */
889 /* ISSUE: Discard packets with bad checksums? */
890
891 /* Avoid recomputing TCP/UDP checksums. */
892 if (NETIO_PKT_L4_CSUM_CORRECT_M(metadata, pkt))
893 skb->ip_summed = CHECKSUM_UNNECESSARY;
894
895 netif_receive_skb(skb);
896
897 stats->rx_packets++;
898 stats->rx_bytes += len;
899
900 if (small)
901 info->num_needed_small_buffers++;
902 else
903 info->num_needed_large_buffers++;
904 }
905
906 /* Return four credits after every fourth packet. */
907 if (--qup->__receive_credit_remaining == 0) {
908 u32 interval = qup->__receive_credit_interval;
909 qup->__receive_credit_remaining = interval;
910 __netio_fastio_return_credits(qup->__fastio_index, interval);
911 }
912
913 /* Consume this packet. */
914 qup->__packet_receive_read = index2;
915
916 return !filter;
917}
918
919
920/*
921 * Handle some packets for the given device on the current CPU.
922 *
923 * ISSUE: The "rotting packet" race condition occurs if a packet
924 * arrives after the queue appears to be empty, and before the
925 * hypervisor interrupt is re-enabled.
926 */
927static int tile_net_poll(struct napi_struct *napi, int budget)
928{
929 struct net_device *dev = napi->dev;
930 struct tile_net_priv *priv = netdev_priv(dev);
931 int my_cpu = smp_processor_id();
932 struct tile_net_cpu *info = priv->cpu[my_cpu];
933 struct tile_netio_queue *queue = &info->queue;
934 netio_queue_impl_t *qsp = queue->__system_part;
935 netio_queue_user_impl_t *qup = &queue->__user_part;
936
937 unsigned int work = 0;
938
939 while (1) {
940 int index = qup->__packet_receive_read;
941 if (index == qsp->__packet_receive_queue.__packet_write)
942 break;
943
944 if (tile_net_poll_aux(info, index)) {
945 if (++work >= budget)
946 goto done;
947 }
948 }
949
950 napi_complete(&info->napi);
951
952 /* Re-enable hypervisor interrupts. */
953 enable_percpu_irq(priv->intr_id);
954
955 /* HACK: Avoid the "rotting packet" problem. */
956 if (qup->__packet_receive_read !=
957 qsp->__packet_receive_queue.__packet_write)
958 napi_schedule(&info->napi);
959
960 /* ISSUE: Handle completions? */
961
962done:
963
964 tile_net_provide_needed_buffers(info);
965
966 return work;
967}
968
969
970/*
971 * Handle an ingress interrupt for the given device on the current cpu.
972 */
973static irqreturn_t tile_net_handle_ingress_interrupt(int irq, void *dev_ptr)
974{
975 struct net_device *dev = (struct net_device *)dev_ptr;
976 struct tile_net_priv *priv = netdev_priv(dev);
977 int my_cpu = smp_processor_id();
978 struct tile_net_cpu *info = priv->cpu[my_cpu];
979
980 /* Disable hypervisor interrupt. */
981 disable_percpu_irq(priv->intr_id);
982
983 napi_schedule(&info->napi);
984
985 return IRQ_HANDLED;
986}
987
988
989/*
990 * One time initialization per interface.
991 */
992static int tile_net_open_aux(struct net_device *dev)
993{
994 struct tile_net_priv *priv = netdev_priv(dev);
995
996 int ret;
997 int dummy;
998 unsigned int epp_lotar;
999
1000 /*
1001 * Find out where EPP memory should be homed.
1002 */
1003 ret = hv_dev_pread(priv->hv_devhdl, 0,
1004 (HV_VirtAddr)&epp_lotar, sizeof(epp_lotar),
1005 NETIO_EPP_SHM_OFF);
1006 if (ret < 0) {
1007 pr_err("could not read epp_shm_queue lotar.\n");
1008 return -EIO;
1009 }
1010
1011 /*
1012 * Home the page on the EPP.
1013 */
1014 {
1015 int epp_home = hv_lotar_to_cpu(epp_lotar);
1016 struct page *page = virt_to_page(priv->epp_queue);
1017 homecache_change_page_home(page, 0, epp_home);
1018 }
1019
1020 /*
1021 * Register the EPP shared memory queue.
1022 */
1023 {
1024 netio_ipp_address_t ea = {
1025 .va = 0,
1026 .pa = __pa(priv->epp_queue),
1027 .pte = hv_pte(0),
1028 .size = PAGE_SIZE,
1029 };
1030 ea.pte = hv_pte_set_lotar(ea.pte, epp_lotar);
1031 ea.pte = hv_pte_set_mode(ea.pte, HV_PTE_MODE_CACHE_TILE_L3);
1032 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
1033 (HV_VirtAddr)&ea,
1034 sizeof(ea),
1035 NETIO_EPP_SHM_OFF);
1036 if (ret < 0)
1037 return -EIO;
1038 }
1039
1040 /*
1041 * Start LIPP/LEPP.
1042 */
1043 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1044 sizeof(dummy), NETIO_IPP_START_SHIM_OFF) < 0) {
1045 pr_warning("Failed to start LIPP/LEPP.\n");
1046 return -EIO;
1047 }
1048
1049 return 0;
1050}
1051
1052
1053/*
1054 * Register with hypervisor on each CPU.
1055 *
1056 * Strangely, this function does important things even if it "fails",
1057 * which is especially common if the link is not up yet. Hopefully
1058 * these things are all "harmless" if done twice!
1059 */
1060static void tile_net_register(void *dev_ptr)
1061{
1062 struct net_device *dev = (struct net_device *)dev_ptr;
1063 struct tile_net_priv *priv = netdev_priv(dev);
1064 int my_cpu = smp_processor_id();
1065 struct tile_net_cpu *info;
1066
1067 struct tile_netio_queue *queue;
1068
1069 /* Only network cpus can receive packets. */
1070 int queue_id =
1071 cpumask_test_cpu(my_cpu, &priv->network_cpus_map) ? 0 : 255;
1072
1073 netio_input_config_t config = {
1074 .flags = 0,
1075 .num_receive_packets = priv->network_cpus_credits,
1076 .queue_id = queue_id
1077 };
1078
1079 int ret = 0;
1080 netio_queue_impl_t *queuep;
1081
1082 PDEBUG("tile_net_register(queue_id %d)\n", queue_id);
1083
1084 if (!strcmp(dev->name, "xgbe0"))
1085 info = &__get_cpu_var(hv_xgbe0);
1086 else if (!strcmp(dev->name, "xgbe1"))
1087 info = &__get_cpu_var(hv_xgbe1);
1088 else if (!strcmp(dev->name, "gbe0"))
1089 info = &__get_cpu_var(hv_gbe0);
1090 else if (!strcmp(dev->name, "gbe1"))
1091 info = &__get_cpu_var(hv_gbe1);
1092 else
1093 BUG();
1094
1095 /* Initialize the egress timer. */
1096 init_timer(&info->egress_timer);
1097 info->egress_timer.data = (long)info;
1098 info->egress_timer.function = tile_net_handle_egress_timer;
1099
1100 priv->cpu[my_cpu] = info;
1101
1102 /*
1103 * Register ourselves with the IPP.
1104 */
1105 ret = hv_dev_pwrite(priv->hv_devhdl, 0,
1106 (HV_VirtAddr)&config,
1107 sizeof(netio_input_config_t),
1108 NETIO_IPP_INPUT_REGISTER_OFF);
1109 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1110 ret);
1111 if (ret < 0) {
1112 printk(KERN_DEBUG "hv_dev_pwrite NETIO_IPP_INPUT_REGISTER_OFF"
1113 " failure %d\n", ret);
1114 info->link_down = (ret == NETIO_LINK_DOWN);
1115 return;
1116 }
1117
1118 /*
1119 * Get the pointer to our queue's system part.
1120 */
1121
1122 ret = hv_dev_pread(priv->hv_devhdl, 0,
1123 (HV_VirtAddr)&queuep,
1124 sizeof(netio_queue_impl_t *),
1125 NETIO_IPP_INPUT_REGISTER_OFF);
1126 PDEBUG("hv_dev_pread(NETIO_IPP_INPUT_REGISTER_OFF) returned %d\n",
1127 ret);
1128 PDEBUG("queuep %p\n", queuep);
1129 if (ret <= 0) {
1130 /* ISSUE: Shouldn't this be a fatal error? */
1131 pr_err("hv_dev_pread NETIO_IPP_INPUT_REGISTER_OFF failure\n");
1132 return;
1133 }
1134
1135 queue = &info->queue;
1136
1137 queue->__system_part = queuep;
1138
1139 memset(&queue->__user_part, 0, sizeof(netio_queue_user_impl_t));
1140
1141 /* This is traditionally "config.num_receive_packets / 2". */
1142 queue->__user_part.__receive_credit_interval = 4;
1143 queue->__user_part.__receive_credit_remaining =
1144 queue->__user_part.__receive_credit_interval;
1145
1146 /*
1147 * Get a fastio index from the hypervisor.
1148 * ISSUE: Shouldn't this check the result?
1149 */
1150 ret = hv_dev_pread(priv->hv_devhdl, 0,
1151 (HV_VirtAddr)&queue->__user_part.__fastio_index,
1152 sizeof(queue->__user_part.__fastio_index),
1153 NETIO_IPP_GET_FASTIO_OFF);
1154 PDEBUG("hv_dev_pread(NETIO_IPP_GET_FASTIO_OFF) returned %d\n", ret);
1155
1156 netif_napi_add(dev, &info->napi, tile_net_poll, 64);
1157
1158 /* Now we are registered. */
1159 info->registered = true;
1160}
1161
1162
1163/*
1164 * Unregister with hypervisor on each CPU.
1165 */
1166static void tile_net_unregister(void *dev_ptr)
1167{
1168 struct net_device *dev = (struct net_device *)dev_ptr;
1169 struct tile_net_priv *priv = netdev_priv(dev);
1170 int my_cpu = smp_processor_id();
1171 struct tile_net_cpu *info = priv->cpu[my_cpu];
1172
1173 int ret = 0;
1174 int dummy = 0;
1175
1176 /* Do nothing if never registered. */
1177 if (info == NULL)
1178 return;
1179
1180 /* Do nothing if already unregistered. */
1181 if (!info->registered)
1182 return;
1183
1184 /*
1185 * Unregister ourselves with LIPP.
1186 */
1187 ret = hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1188 sizeof(dummy), NETIO_IPP_INPUT_UNREGISTER_OFF);
1189 PDEBUG("hv_dev_pwrite(NETIO_IPP_INPUT_UNREGISTER_OFF) returned %d\n",
1190 ret);
1191 if (ret < 0) {
1192 /* FIXME: Just panic? */
1193 pr_err("hv_dev_pwrite NETIO_IPP_INPUT_UNREGISTER_OFF"
1194 " failure %d\n", ret);
1195 }
1196
1197 /*
1198 * Discard all packets still in our NetIO queue. Hopefully,
1199 * once the unregister call is complete, there will be no
1200 * packets still in flight on the IDN.
1201 */
1202 tile_net_discard_packets(dev);
1203
1204 /* Reset state. */
1205 info->num_needed_small_buffers = 0;
1206 info->num_needed_large_buffers = 0;
1207
1208 /* Cancel egress timer. */
1209 del_timer(&info->egress_timer);
1210 info->egress_timer_scheduled = false;
1211
1212 netif_napi_del(&info->napi);
1213
1214 /* Now we are unregistered. */
1215 info->registered = false;
1216}
1217
1218
1219/*
1220 * Helper function for "tile_net_stop()".
1221 *
1222 * Also used to handle registration failure in "tile_net_open_inner()",
1223 * when "fully_opened" is known to be false, and the various extra
1224 * steps in "tile_net_stop()" are not necessary. ISSUE: It might be
1225 * simpler if we could just call "tile_net_stop()" anyway.
1226 */
1227static void tile_net_stop_aux(struct net_device *dev)
1228{
1229 struct tile_net_priv *priv = netdev_priv(dev);
1230
1231 int dummy = 0;
1232
1233 /* Unregister all tiles, so LIPP will stop delivering packets. */
1234 on_each_cpu(tile_net_unregister, (void *)dev, 1);
1235
1236 /* Stop LIPP/LEPP. */
1237 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1238 sizeof(dummy), NETIO_IPP_STOP_SHIM_OFF) < 0)
1239 panic("Failed to stop LIPP/LEPP!\n");
1240
1241 priv->partly_opened = 0;
1242}
1243
1244
1245/*
1246 * Disable ingress interrupts for the given device on the current cpu.
1247 */
1248static void tile_net_disable_intr(void *dev_ptr)
1249{
1250 struct net_device *dev = (struct net_device *)dev_ptr;
1251 struct tile_net_priv *priv = netdev_priv(dev);
1252 int my_cpu = smp_processor_id();
1253 struct tile_net_cpu *info = priv->cpu[my_cpu];
1254
1255 /* Disable hypervisor interrupt. */
1256 disable_percpu_irq(priv->intr_id);
1257
1258 /* Disable NAPI if needed. */
1259 if (info != NULL && info->napi_enabled) {
1260 napi_disable(&info->napi);
1261 info->napi_enabled = false;
1262 }
1263}
1264
1265
1266/*
1267 * Enable ingress interrupts for the given device on the current cpu.
1268 */
1269static void tile_net_enable_intr(void *dev_ptr)
1270{
1271 struct net_device *dev = (struct net_device *)dev_ptr;
1272 struct tile_net_priv *priv = netdev_priv(dev);
1273 int my_cpu = smp_processor_id();
1274 struct tile_net_cpu *info = priv->cpu[my_cpu];
1275
1276 /* Enable hypervisor interrupt. */
1277 enable_percpu_irq(priv->intr_id);
1278
1279 /* Enable NAPI. */
1280 napi_enable(&info->napi);
1281 info->napi_enabled = true;
1282}
1283
1284
1285/*
1286 * tile_net_open_inner does most of the work of bringing up the interface.
1287 * It's called from tile_net_open(), and also from tile_net_retry_open().
1288 * The return value is 0 if the interface was brought up, < 0 if
1289 * tile_net_open() should return the return value as an error, and > 0 if
1290 * tile_net_open() should return success and schedule a work item to
1291 * periodically retry the bringup.
1292 */
1293static int tile_net_open_inner(struct net_device *dev)
1294{
1295 struct tile_net_priv *priv = netdev_priv(dev);
1296 int my_cpu = smp_processor_id();
1297 struct tile_net_cpu *info;
1298 struct tile_netio_queue *queue;
1299 unsigned int irq;
1300 int i;
1301
1302 /*
1303 * First try to register just on the local CPU, and handle any
1304 * semi-expected "link down" failure specially. Note that we
1305 * do NOT call "tile_net_stop_aux()", unlike below.
1306 */
1307 tile_net_register(dev);
1308 info = priv->cpu[my_cpu];
1309 if (!info->registered) {
1310 if (info->link_down)
1311 return 1;
1312 return -EAGAIN;
1313 }
1314
1315 /*
1316 * Now register everywhere else. If any registration fails,
1317 * even for "link down" (which might not be possible), we
1318 * clean up using "tile_net_stop_aux()".
1319 */
1320 smp_call_function(tile_net_register, (void *)dev, 1);
1321 for_each_online_cpu(i) {
1322 if (!priv->cpu[i]->registered) {
1323 tile_net_stop_aux(dev);
1324 return -EAGAIN;
1325 }
1326 }
1327
1328 queue = &info->queue;
1329
1330 /*
1331 * Set the device intr bit mask.
1332 * The tile_net_register above sets per tile __intr_id.
1333 */
1334 priv->intr_id = queue->__system_part->__intr_id;
1335 BUG_ON(!priv->intr_id);
1336
1337 /*
1338 * Register the device interrupt handler.
1339 * The __ffs() function returns the index into the interrupt handler
1340 * table from the interrupt bit mask which should have one bit
1341 * and one bit only set.
1342 */
1343 irq = __ffs(priv->intr_id);
1344 tile_irq_activate(irq, TILE_IRQ_PERCPU);
1345 BUG_ON(request_irq(irq, tile_net_handle_ingress_interrupt,
1346 0, dev->name, (void *)dev) != 0);
1347
1348 /* ISSUE: How could "priv->fully_opened" ever be "true" here? */
1349
1350 if (!priv->fully_opened) {
1351
1352 int dummy = 0;
1353
1354 /* Allocate initial buffers. */
1355
1356 int max_buffers =
1357 priv->network_cpus_count * priv->network_cpus_credits;
1358
1359 info->num_needed_small_buffers =
1360 min(LIPP_SMALL_BUFFERS, max_buffers);
1361
1362 info->num_needed_large_buffers =
1363 min(LIPP_LARGE_BUFFERS, max_buffers);
1364
1365 tile_net_provide_needed_buffers(info);
1366
1367 if (info->num_needed_small_buffers != 0 ||
1368 info->num_needed_large_buffers != 0)
1369 panic("Insufficient memory for buffer stack!");
1370
1371 /* Start LIPP/LEPP and activate "ingress" at the shim. */
1372 if (hv_dev_pwrite(priv->hv_devhdl, 0, (HV_VirtAddr)&dummy,
1373 sizeof(dummy), NETIO_IPP_INPUT_INIT_OFF) < 0)
1374 panic("Failed to activate the LIPP Shim!\n");
1375
1376 priv->fully_opened = 1;
1377 }
1378
1379 /* On each tile, enable the hypervisor to trigger interrupts. */
1380 /* ISSUE: Do this before starting LIPP/LEPP? */
1381 on_each_cpu(tile_net_enable_intr, (void *)dev, 1);
1382
1383 /* Start our transmit queue. */
1384 netif_start_queue(dev);
1385
1386 return 0;
1387}
1388
1389
1390/*
1391 * Called periodically to retry bringing up the NetIO interface,
1392 * if it doesn't come up cleanly during tile_net_open().
1393 */
1394static void tile_net_open_retry(struct work_struct *w)
1395{
1396 struct delayed_work *dw =
1397 container_of(w, struct delayed_work, work);
1398
1399 struct tile_net_priv *priv =
1400 container_of(dw, struct tile_net_priv, retry_work);
1401
1402 /*
1403 * Try to bring the NetIO interface up. If it fails, reschedule
1404 * ourselves to try again later; otherwise, tell Linux we now have
1405 * a working link. ISSUE: What if the return value is negative?
1406 */
1407 if (tile_net_open_inner(priv->dev))
1408 schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
1409 TILE_NET_RETRY_INTERVAL);
1410 else
1411 netif_carrier_on(priv->dev);
1412}
1413
1414
1415/*
1416 * Called when a network interface is made active.
1417 *
1418 * Returns 0 on success, negative value on failure.
1419 *
1420 * The open entry point is called when a network interface is made
1421 * active by the system (IFF_UP). At this point all resources needed
1422 * for transmit and receive operations are allocated, the interrupt
1423 * handler is registered with the OS, the watchdog timer is started,
1424 * and the stack is notified that the interface is ready.
1425 *
1426 * If the actual link is not available yet, then we tell Linux that
1427 * we have no carrier, and we keep checking until the link comes up.
1428 */
1429static int tile_net_open(struct net_device *dev)
1430{
1431 int ret = 0;
1432 struct tile_net_priv *priv = netdev_priv(dev);
1433
1434 /*
1435 * We rely on priv->partly_opened to tell us if this is the
1436 * first time this interface is being brought up. If it is
1437 * set, the IPP was already initialized and should not be
1438 * initialized again.
1439 */
1440 if (!priv->partly_opened) {
1441
1442 int count;
1443 int credits;
1444
1445 /* Initialize LIPP/LEPP, and start the Shim. */
1446 ret = tile_net_open_aux(dev);
1447 if (ret < 0) {
1448 pr_err("tile_net_open_aux failed: %d\n", ret);
1449 return ret;
1450 }
1451
1452 /* Analyze the network cpus. */
1453
1454 if (network_cpus_used)
1455 cpumask_copy(&priv->network_cpus_map,
1456 &network_cpus_map);
1457 else
1458 cpumask_copy(&priv->network_cpus_map, cpu_online_mask);
1459
1460
1461 count = cpumask_weight(&priv->network_cpus_map);
1462
1463 /* Limit credits to available buffers, and apply min. */
1464 credits = max(16, (LIPP_LARGE_BUFFERS / count) & ~1);
1465
1466 /* Apply "GBE" max limit. */
1467 /* ISSUE: Use higher limit for XGBE? */
1468 credits = min(NETIO_MAX_RECEIVE_PKTS, credits);
1469
1470 priv->network_cpus_count = count;
1471 priv->network_cpus_credits = credits;
1472
1473#ifdef TILE_NET_DEBUG
1474 pr_info("Using %d network cpus, with %d credits each\n",
1475 priv->network_cpus_count, priv->network_cpus_credits);
1476#endif
1477
1478 priv->partly_opened = 1;
1479 }
1480
1481 /*
1482 * Attempt to bring up the link.
1483 */
1484 ret = tile_net_open_inner(dev);
1485 if (ret <= 0) {
1486 if (ret == 0)
1487 netif_carrier_on(dev);
1488 return ret;
1489 }
1490
1491 /*
1492 * We were unable to bring up the NetIO interface, but we want to
1493 * try again in a little bit. Tell Linux that we have no carrier
1494 * so it doesn't try to use the interface before the link comes up
1495 * and then remember to try again later.
1496 */
1497 netif_carrier_off(dev);
1498 schedule_delayed_work_on(singlethread_cpu, &priv->retry_work,
1499 TILE_NET_RETRY_INTERVAL);
1500
1501 return 0;
1502}
1503
1504
1505/*
1506 * Disables a network interface.
1507 *
1508 * Returns 0, this is not allowed to fail.
1509 *
1510 * The close entry point is called when an interface is de-activated
1511 * by the OS. The hardware is still under the drivers control, but
1512 * needs to be disabled. A global MAC reset is issued to stop the
1513 * hardware, and all transmit and receive resources are freed.
1514 *
1515 * ISSUE: Can this can be called while "tile_net_poll()" is running?
1516 */
1517static int tile_net_stop(struct net_device *dev)
1518{
1519 struct tile_net_priv *priv = netdev_priv(dev);
1520
1521 bool pending = true;
1522
1523 PDEBUG("tile_net_stop()\n");
1524
1525 /* ISSUE: Only needed if not yet fully open. */
1526 cancel_delayed_work_sync(&priv->retry_work);
1527
1528 /* Can't transmit any more. */
1529 netif_stop_queue(dev);
1530
1531 /*
1532 * Disable hypervisor interrupts on each tile.
1533 */
1534 on_each_cpu(tile_net_disable_intr, (void *)dev, 1);
1535
1536 /*
1537 * Unregister the interrupt handler.
1538 * The __ffs() function returns the index into the interrupt handler
1539 * table from the interrupt bit mask which should have one bit
1540 * and one bit only set.
1541 */
1542 if (priv->intr_id)
1543 free_irq(__ffs(priv->intr_id), dev);
1544
1545 /*
1546 * Drain all the LIPP buffers.
1547 */
1548
1549 while (true) {
1550 int buffer;
1551
1552 /* NOTE: This should never fail. */
1553 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&buffer,
1554 sizeof(buffer), NETIO_IPP_DRAIN_OFF) < 0)
1555 break;
1556
1557 /* Stop when done. */
1558 if (buffer == 0)
1559 break;
1560
1561 {
1562 /* Convert "linux_buffer_t" to "va". */
1563 void *va = __va((phys_addr_t)(buffer >> 1) << 7);
1564
1565 /* Acquire the associated "skb". */
1566 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
1567 struct sk_buff *skb = *skb_ptr;
1568
1569 kfree_skb(skb);
1570 }
1571 }
1572
1573 /* Stop LIPP/LEPP. */
1574 tile_net_stop_aux(dev);
1575
1576
1577 priv->fully_opened = 0;
1578
1579
1580 /*
1581 * XXX: ISSUE: It appears that, in practice anyway, by the
1582 * time we get here, there are no pending completions.
1583 */
1584 while (pending) {
1585
1586 struct sk_buff *olds[32];
1587 unsigned int wanted = 32;
1588 unsigned int i, nolds = 0;
1589
1590 nolds = tile_net_lepp_grab_comps(dev, olds,
1591 wanted, &pending);
1592
1593 /* ISSUE: We have never actually seen this debug spew. */
1594 if (nolds != 0)
1595 pr_info("During tile_net_stop(), grabbed %d comps.\n",
1596 nolds);
1597
1598 for (i = 0; i < nolds; i++)
1599 kfree_skb(olds[i]);
1600 }
1601
1602
1603 /* Wipe the EPP queue. */
1604 memset(priv->epp_queue, 0, sizeof(lepp_queue_t));
1605
1606 /* Evict the EPP queue. */
1607 finv_buffer(priv->epp_queue, PAGE_SIZE);
1608
1609 return 0;
1610}
1611
1612
1613/*
1614 * Prepare the "frags" info for the resulting LEPP command.
1615 *
1616 * If needed, flush the memory used by the frags.
1617 */
1618static unsigned int tile_net_tx_frags(lepp_frag_t *frags,
1619 struct sk_buff *skb,
1620 void *b_data, unsigned int b_len)
1621{
1622 unsigned int i, n = 0;
1623
1624 struct skb_shared_info *sh = skb_shinfo(skb);
1625
1626 phys_addr_t cpa;
1627
1628 if (b_len != 0) {
1629
1630 if (!hash_default)
1631 finv_buffer_remote(b_data, b_len);
1632
1633 cpa = __pa(b_data);
1634 frags[n].cpa_lo = cpa;
1635 frags[n].cpa_hi = cpa >> 32;
1636 frags[n].length = b_len;
1637 frags[n].hash_for_home = hash_default;
1638 n++;
1639 }
1640
1641 for (i = 0; i < sh->nr_frags; i++) {
1642
1643 skb_frag_t *f = &sh->frags[i];
1644 unsigned long pfn = page_to_pfn(f->page);
1645
1646 /* FIXME: Compute "hash_for_home" properly. */
1647 /* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
1648 int hash_for_home = hash_default;
1649
1650 /* FIXME: Hmmm. */
1651 if (!hash_default) {
1652 void *va = pfn_to_kaddr(pfn) + f->page_offset;
1653 BUG_ON(PageHighMem(f->page));
1654 finv_buffer_remote(va, f->size);
1655 }
1656
1657 cpa = ((phys_addr_t)pfn << PAGE_SHIFT) + f->page_offset;
1658 frags[n].cpa_lo = cpa;
1659 frags[n].cpa_hi = cpa >> 32;
1660 frags[n].length = f->size;
1661 frags[n].hash_for_home = hash_for_home;
1662 n++;
1663 }
1664
1665 return n;
1666}
1667
1668
1669/*
1670 * This function takes "skb", consisting of a header template and a
1671 * payload, and hands it to LEPP, to emit as one or more segments,
1672 * each consisting of a possibly modified header, plus a piece of the
1673 * payload, via a process known as "tcp segmentation offload".
1674 *
1675 * Usually, "data" will contain the header template, of size "sh_len",
1676 * and "sh->frags" will contain "skb->data_len" bytes of payload, and
1677 * there will be "sh->gso_segs" segments.
1678 *
1679 * Sometimes, if "sendfile()" requires copying, we will be called with
1680 * "data" containing the header and payload, with "frags" being empty.
1681 *
1682 * In theory, "sh->nr_frags" could be 3, but in practice, it seems
1683 * that this will never actually happen.
1684 *
1685 * See "emulate_large_send_offload()" for some reference code, which
1686 * does not handle checksumming.
1687 *
1688 * ISSUE: How do we make sure that high memory DMA does not migrate?
1689 */
1690static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1691{
1692 struct tile_net_priv *priv = netdev_priv(dev);
1693 int my_cpu = smp_processor_id();
1694 struct tile_net_cpu *info = priv->cpu[my_cpu];
1695 struct tile_net_stats_t *stats = &info->stats;
1696
1697 struct skb_shared_info *sh = skb_shinfo(skb);
1698
1699 unsigned char *data = skb->data;
1700
1701 /* The ip header follows the ethernet header. */
1702 struct iphdr *ih = ip_hdr(skb);
1703 unsigned int ih_len = ih->ihl * 4;
1704
1705 /* Note that "nh == ih", by definition. */
1706 unsigned char *nh = skb_network_header(skb);
1707 unsigned int eh_len = nh - data;
1708
1709 /* The tcp header follows the ip header. */
1710 struct tcphdr *th = (struct tcphdr *)(nh + ih_len);
1711 unsigned int th_len = th->doff * 4;
1712
1713 /* The total number of header bytes. */
1714 /* NOTE: This may be less than skb_headlen(skb). */
1715 unsigned int sh_len = eh_len + ih_len + th_len;
1716
1717 /* The number of payload bytes at "skb->data + sh_len". */
1718 /* This is non-zero for sendfile() without HIGHDMA. */
1719 unsigned int b_len = skb_headlen(skb) - sh_len;
1720
1721 /* The total number of payload bytes. */
1722 unsigned int d_len = b_len + skb->data_len;
1723
1724 /* The maximum payload size. */
1725 unsigned int p_len = sh->gso_size;
1726
1727 /* The total number of segments. */
1728 unsigned int num_segs = sh->gso_segs;
1729
1730 /* The temporary copy of the command. */
1731 u32 cmd_body[(LEPP_MAX_CMD_SIZE + 3) / 4];
1732 lepp_tso_cmd_t *cmd = (lepp_tso_cmd_t *)cmd_body;
1733
1734 /* Analyze the "frags". */
1735 unsigned int num_frags =
1736 tile_net_tx_frags(cmd->frags, skb, data + sh_len, b_len);
1737
1738 /* The size of the command, including frags and header. */
1739 size_t cmd_size = LEPP_TSO_CMD_SIZE(num_frags, sh_len);
1740
1741 /* The command header. */
1742 lepp_tso_cmd_t cmd_init = {
1743 .tso = true,
1744 .header_size = sh_len,
1745 .ip_offset = eh_len,
1746 .tcp_offset = eh_len + ih_len,
1747 .payload_size = p_len,
1748 .num_frags = num_frags,
1749 };
1750
1751 unsigned long irqflags;
1752
1753 lepp_queue_t *eq = priv->epp_queue;
1754
1755 struct sk_buff *olds[4];
1756 unsigned int wanted = 4;
1757 unsigned int i, nolds = 0;
1758
1759 unsigned int cmd_head, cmd_tail, cmd_next;
1760 unsigned int comp_tail;
1761
1762 unsigned int free_slots;
1763
1764
1765 /* Paranoia. */
1766 BUG_ON(skb->protocol != htons(ETH_P_IP));
1767 BUG_ON(ih->protocol != IPPROTO_TCP);
1768 BUG_ON(skb->ip_summed != CHECKSUM_PARTIAL);
1769 BUG_ON(num_frags > LEPP_MAX_FRAGS);
1770 /*--BUG_ON(num_segs != (d_len + (p_len - 1)) / p_len); */
1771 BUG_ON(num_segs <= 1);
1772
1773
1774 /* Finish preparing the command. */
1775
1776 /* Copy the command header. */
1777 *cmd = cmd_init;
1778
1779 /* Copy the "header". */
1780 memcpy(&cmd->frags[num_frags], data, sh_len);
1781
1782
1783 /* Prefetch and wait, to minimize time spent holding the spinlock. */
1784 prefetch_L1(&eq->comp_tail);
1785 prefetch_L1(&eq->cmd_tail);
1786 mb();
1787
1788
1789 /* Enqueue the command. */
1790
1791 spin_lock_irqsave(&priv->cmd_lock, irqflags);
1792
1793 /*
1794 * Handle completions if needed to make room.
1795 * HACK: Spin until there is sufficient room.
1796 */
1797 free_slots = lepp_num_free_comp_slots(eq);
1798 if (free_slots < 1) {
1799spin:
1800 nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
1801 wanted - nolds, NULL);
1802 if (lepp_num_free_comp_slots(eq) < 1)
1803 goto spin;
1804 }
1805
1806 cmd_head = eq->cmd_head;
1807 cmd_tail = eq->cmd_tail;
1808
1809 /* NOTE: The "gotos" below are untested. */
1810
1811 /* Prepare to advance, detecting full queue. */
1812 cmd_next = cmd_tail + cmd_size;
1813 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1814 goto spin;
1815 if (cmd_next > LEPP_CMD_LIMIT) {
1816 cmd_next = 0;
1817 if (cmd_next == cmd_head)
1818 goto spin;
1819 }
1820
1821 /* Copy the command. */
1822 memcpy(&eq->cmds[cmd_tail], cmd, cmd_size);
1823
1824 /* Advance. */
1825 cmd_tail = cmd_next;
1826
1827 /* Record "skb" for eventual freeing. */
1828 comp_tail = eq->comp_tail;
1829 eq->comps[comp_tail] = skb;
1830 LEPP_QINC(comp_tail);
1831 eq->comp_tail = comp_tail;
1832
1833 /* Flush before allowing LEPP to handle the command. */
1834 __insn_mf();
1835
1836 eq->cmd_tail = cmd_tail;
1837
1838 spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
1839
1840 if (nolds == 0)
1841 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
1842
1843 /* Handle completions. */
1844 for (i = 0; i < nolds; i++)
1845 kfree_skb(olds[i]);
1846
1847 /* Update stats. */
1848 stats->tx_packets += num_segs;
1849 stats->tx_bytes += (num_segs * sh_len) + d_len;
1850
1851 /* Make sure the egress timer is scheduled. */
1852 tile_net_schedule_egress_timer(info);
1853
1854 return NETDEV_TX_OK;
1855}
1856
1857
1858/*
1859 * Transmit a packet (called by the kernel via "hard_start_xmit" hook).
1860 */
1861static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1862{
1863 struct tile_net_priv *priv = netdev_priv(dev);
1864 int my_cpu = smp_processor_id();
1865 struct tile_net_cpu *info = priv->cpu[my_cpu];
1866 struct tile_net_stats_t *stats = &info->stats;
1867
1868 unsigned long irqflags;
1869
1870 struct skb_shared_info *sh = skb_shinfo(skb);
1871
1872 unsigned int len = skb->len;
1873 unsigned char *data = skb->data;
1874
1875 unsigned int csum_start = skb->csum_start - skb_headroom(skb);
1876
1877 lepp_frag_t frags[LEPP_MAX_FRAGS];
1878
1879 unsigned int num_frags;
1880
1881 lepp_queue_t *eq = priv->epp_queue;
1882
1883 struct sk_buff *olds[4];
1884 unsigned int wanted = 4;
1885 unsigned int i, nolds = 0;
1886
1887 unsigned int cmd_size = sizeof(lepp_cmd_t);
1888
1889 unsigned int cmd_head, cmd_tail, cmd_next;
1890 unsigned int comp_tail;
1891
1892 lepp_cmd_t cmds[LEPP_MAX_FRAGS];
1893
1894 unsigned int free_slots;
1895
1896
1897 /*
1898 * This is paranoia, since we think that if the link doesn't come
1899 * up, telling Linux we have no carrier will keep it from trying
1900 * to transmit. If it does, though, we can't execute this routine,
1901 * since data structures we depend on aren't set up yet.
1902 */
1903 if (!info->registered)
1904 return NETDEV_TX_BUSY;
1905
1906
1907 /* Save the timestamp. */
1908 dev->trans_start = jiffies;
1909
1910
1911#ifdef TILE_NET_PARANOIA
1912#if CHIP_HAS_CBOX_HOME_MAP()
1913 if (hash_default) {
1914 HV_PTE pte = *virt_to_pte(current->mm, (unsigned long)data);
1915 if (hv_pte_get_mode(pte) != HV_PTE_MODE_CACHE_HASH_L3)
1916 panic("Non-coherent egress buffer!");
1917 }
1918#endif
1919#endif
1920
1921
1922#ifdef TILE_NET_DUMP_PACKETS
1923 /* ISSUE: Does not dump the "frags". */
1924 dump_packet(data, skb_headlen(skb), "tx");
1925#endif /* TILE_NET_DUMP_PACKETS */
1926
1927
1928 if (sh->gso_size != 0)
1929 return tile_net_tx_tso(skb, dev);
1930
1931
1932 /* Prepare the commands. */
1933
1934 num_frags = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1935
1936 for (i = 0; i < num_frags; i++) {
1937
1938 bool final = (i == num_frags - 1);
1939
1940 lepp_cmd_t cmd = {
1941 .cpa_lo = frags[i].cpa_lo,
1942 .cpa_hi = frags[i].cpa_hi,
1943 .length = frags[i].length,
1944 .hash_for_home = frags[i].hash_for_home,
1945 .send_completion = final,
1946 .end_of_packet = final
1947 };
1948
1949 if (i == 0 && skb->ip_summed == CHECKSUM_PARTIAL) {
1950 cmd.compute_checksum = 1;
1951 cmd.checksum_data.bits.start_byte = csum_start;
1952 cmd.checksum_data.bits.count = len - csum_start;
1953 cmd.checksum_data.bits.destination_byte =
1954 csum_start + skb->csum_offset;
1955 }
1956
1957 cmds[i] = cmd;
1958 }
1959
1960
1961 /* Prefetch and wait, to minimize time spent holding the spinlock. */
1962 prefetch_L1(&eq->comp_tail);
1963 prefetch_L1(&eq->cmd_tail);
1964 mb();
1965
1966
1967 /* Enqueue the commands. */
1968
1969 spin_lock_irqsave(&priv->cmd_lock, irqflags);
1970
1971 /*
1972 * Handle completions if needed to make room.
1973 * HACK: Spin until there is sufficient room.
1974 */
1975 free_slots = lepp_num_free_comp_slots(eq);
1976 if (free_slots < 1) {
1977spin:
1978 nolds += tile_net_lepp_grab_comps(dev, olds + nolds,
1979 wanted - nolds, NULL);
1980 if (lepp_num_free_comp_slots(eq) < 1)
1981 goto spin;
1982 }
1983
1984 cmd_head = eq->cmd_head;
1985 cmd_tail = eq->cmd_tail;
1986
1987 /* NOTE: The "gotos" below are untested. */
1988
1989 /* Copy the commands, or fail. */
1990 for (i = 0; i < num_frags; i++) {
1991
1992 /* Prepare to advance, detecting full queue. */
1993 cmd_next = cmd_tail + cmd_size;
1994 if (cmd_tail < cmd_head && cmd_next >= cmd_head)
1995 goto spin;
1996 if (cmd_next > LEPP_CMD_LIMIT) {
1997 cmd_next = 0;
1998 if (cmd_next == cmd_head)
1999 goto spin;
2000 }
2001
2002 /* Copy the command. */
2003 *(lepp_cmd_t *)&eq->cmds[cmd_tail] = cmds[i];
2004
2005 /* Advance. */
2006 cmd_tail = cmd_next;
2007 }
2008
2009 /* Record "skb" for eventual freeing. */
2010 comp_tail = eq->comp_tail;
2011 eq->comps[comp_tail] = skb;
2012 LEPP_QINC(comp_tail);
2013 eq->comp_tail = comp_tail;
2014
2015 /* Flush before allowing LEPP to handle the command. */
2016 __insn_mf();
2017
2018 eq->cmd_tail = cmd_tail;
2019
2020 spin_unlock_irqrestore(&priv->cmd_lock, irqflags);
2021
2022 if (nolds == 0)
2023 nolds = tile_net_lepp_grab_comps(dev, olds, wanted, NULL);
2024
2025 /* Handle completions. */
2026 for (i = 0; i < nolds; i++)
2027 kfree_skb(olds[i]);
2028
2029 /* HACK: Track "expanded" size for short packets (e.g. 42 < 60). */
2030 stats->tx_packets++;
2031 stats->tx_bytes += ((len >= ETH_ZLEN) ? len : ETH_ZLEN);
2032
2033 /* Make sure the egress timer is scheduled. */
2034 tile_net_schedule_egress_timer(info);
2035
2036 return NETDEV_TX_OK;
2037}
2038
2039
2040/*
2041 * Deal with a transmit timeout.
2042 */
2043static void tile_net_tx_timeout(struct net_device *dev)
2044{
2045 PDEBUG("tile_net_tx_timeout()\n");
2046 PDEBUG("Transmit timeout at %ld, latency %ld\n", jiffies,
2047 jiffies - dev->trans_start);
2048
2049 /* XXX: ISSUE: This doesn't seem useful for us. */
2050 netif_wake_queue(dev);
2051}
2052
2053
2054/*
2055 * Ioctl commands.
2056 */
2057static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2058{
2059 return -EOPNOTSUPP;
2060}
2061
2062
2063/*
2064 * Get System Network Statistics.
2065 *
2066 * Returns the address of the device statistics structure.
2067 */
2068static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
2069{
2070 struct tile_net_priv *priv = netdev_priv(dev);
2071 u32 rx_packets = 0;
2072 u32 tx_packets = 0;
2073 u32 rx_bytes = 0;
2074 u32 tx_bytes = 0;
2075 int i;
2076
2077 for_each_online_cpu(i) {
2078 if (priv->cpu[i]) {
2079 rx_packets += priv->cpu[i]->stats.rx_packets;
2080 rx_bytes += priv->cpu[i]->stats.rx_bytes;
2081 tx_packets += priv->cpu[i]->stats.tx_packets;
2082 tx_bytes += priv->cpu[i]->stats.tx_bytes;
2083 }
2084 }
2085
2086 priv->stats.rx_packets = rx_packets;
2087 priv->stats.rx_bytes = rx_bytes;
2088 priv->stats.tx_packets = tx_packets;
2089 priv->stats.tx_bytes = tx_bytes;
2090
2091 return &priv->stats;
2092}
2093
2094
2095/*
2096 * Change the "mtu".
2097 *
2098 * The "change_mtu" method is usually not needed.
2099 * If you need it, it must be like this.
2100 */
2101static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
2102{
2103 PDEBUG("tile_net_change_mtu()\n");
2104
2105 /* Check ranges. */
2106 if ((new_mtu < 68) || (new_mtu > 1500))
2107 return -EINVAL;
2108
2109 /* Accept the value. */
2110 dev->mtu = new_mtu;
2111
2112 return 0;
2113}
2114
2115
2116/*
2117 * Change the Ethernet Address of the NIC.
2118 *
2119 * The hypervisor driver does not support changing MAC address. However,
2120 * the IPP does not do anything with the MAC address, so the address which
2121 * gets used on outgoing packets, and which is accepted on incoming packets,
2122 * is completely up to the NetIO program or kernel driver which is actually
2123 * handling them.
2124 *
2125 * Returns 0 on success, negative on failure.
2126 */
2127static int tile_net_set_mac_address(struct net_device *dev, void *p)
2128{
2129 struct sockaddr *addr = p;
2130
2131 if (!is_valid_ether_addr(addr->sa_data))
2132 return -EINVAL;
2133
2134 /* ISSUE: Note that "dev_addr" is now a pointer. */
2135 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2136
2137 return 0;
2138}
2139
2140
2141/*
2142 * Obtain the MAC address from the hypervisor.
2143 * This must be done before opening the device.
2144 */
2145static int tile_net_get_mac(struct net_device *dev)
2146{
2147 struct tile_net_priv *priv = netdev_priv(dev);
2148
2149 char hv_dev_name[32];
2150 int len;
2151
2152 __netio_getset_offset_t offset = { .word = NETIO_IPP_PARAM_OFF };
2153
2154 int ret;
2155
2156 /* For example, "xgbe0". */
2157 strcpy(hv_dev_name, dev->name);
2158 len = strlen(hv_dev_name);
2159
2160 /* For example, "xgbe/0". */
2161 hv_dev_name[len] = hv_dev_name[len - 1];
2162 hv_dev_name[len - 1] = '/';
2163 len++;
2164
2165 /* For example, "xgbe/0/native_hash". */
2166 strcpy(hv_dev_name + len, hash_default ? "/native_hash" : "/native");
2167
2168 /* Get the hypervisor handle for this device. */
2169 priv->hv_devhdl = hv_dev_open((HV_VirtAddr)hv_dev_name, 0);
2170 PDEBUG("hv_dev_open(%s) returned %d %p\n",
2171 hv_dev_name, priv->hv_devhdl, &priv->hv_devhdl);
2172 if (priv->hv_devhdl < 0) {
2173 if (priv->hv_devhdl == HV_ENODEV)
2174 printk(KERN_DEBUG "Ignoring unconfigured device %s\n",
2175 hv_dev_name);
2176 else
2177 printk(KERN_DEBUG "hv_dev_open(%s) returned %d\n",
2178 hv_dev_name, priv->hv_devhdl);
2179 return -1;
2180 }
2181
2182 /*
2183 * Read the hardware address from the hypervisor.
2184 * ISSUE: Note that "dev_addr" is now a pointer.
2185 */
2186 offset.bits.class = NETIO_PARAM;
2187 offset.bits.addr = NETIO_PARAM_MAC;
2188 ret = hv_dev_pread(priv->hv_devhdl, 0,
2189 (HV_VirtAddr)dev->dev_addr, dev->addr_len,
2190 offset.word);
2191 PDEBUG("hv_dev_pread(NETIO_PARAM_MAC) returned %d\n", ret);
2192 if (ret <= 0) {
2193 printk(KERN_DEBUG "hv_dev_pread(NETIO_PARAM_MAC) %s failed\n",
2194 dev->name);
2195 /*
2196 * Since the device is configured by the hypervisor but we
2197 * can't get its MAC address, we are most likely running
2198 * the simulator, so let's generate a random MAC address.
2199 */
2200 random_ether_addr(dev->dev_addr);
2201 }
2202
2203 return 0;
2204}
2205
2206
2207static struct net_device_ops tile_net_ops = {
2208 .ndo_open = tile_net_open,
2209 .ndo_stop = tile_net_stop,
2210 .ndo_start_xmit = tile_net_tx,
2211 .ndo_do_ioctl = tile_net_ioctl,
2212 .ndo_get_stats = tile_net_get_stats,
2213 .ndo_change_mtu = tile_net_change_mtu,
2214 .ndo_tx_timeout = tile_net_tx_timeout,
2215 .ndo_set_mac_address = tile_net_set_mac_address
2216};
2217
2218
2219/*
2220 * The setup function.
2221 *
2222 * This uses ether_setup() to assign various fields in dev, including
2223 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
2224 */
2225static void tile_net_setup(struct net_device *dev)
2226{
2227 PDEBUG("tile_net_setup()\n");
2228
2229 ether_setup(dev);
2230
2231 dev->netdev_ops = &tile_net_ops;
2232
2233 dev->watchdog_timeo = TILE_NET_TIMEOUT;
2234
2235 /* We want lockless xmit. */
2236 dev->features |= NETIF_F_LLTX;
2237
2238 /* We support hardware tx checksums. */
2239 dev->features |= NETIF_F_HW_CSUM;
2240
2241 /* We support scatter/gather. */
2242 dev->features |= NETIF_F_SG;
2243
2244 /* We support TSO. */
2245 dev->features |= NETIF_F_TSO;
2246
2247#ifdef TILE_NET_GSO
2248 /* We support GSO. */
2249 dev->features |= NETIF_F_GSO;
2250#endif
2251
2252 if (hash_default)
2253 dev->features |= NETIF_F_HIGHDMA;
2254
2255 /* ISSUE: We should support NETIF_F_UFO. */
2256
2257 dev->tx_queue_len = TILE_NET_TX_QUEUE_LEN;
2258
2259 dev->mtu = TILE_NET_MTU;
2260}
2261
2262
2263/*
2264 * Allocate the device structure, register the device, and obtain the
2265 * MAC address from the hypervisor.
2266 */
2267static struct net_device *tile_net_dev_init(const char *name)
2268{
2269 int ret;
2270 struct net_device *dev;
2271 struct tile_net_priv *priv;
2272 struct page *page;
2273
2274 /*
2275 * Allocate the device structure. This allocates "priv", calls
2276 * tile_net_setup(), and saves "name". Normally, "name" is a
2277 * template, instantiated by register_netdev(), but not for us.
2278 */
2279 dev = alloc_netdev(sizeof(*priv), name, tile_net_setup);
2280 if (!dev) {
2281 pr_err("alloc_netdev(%s) failed\n", name);
2282 return NULL;
2283 }
2284
2285 priv = netdev_priv(dev);
2286
2287 /* Initialize "priv". */
2288
2289 memset(priv, 0, sizeof(*priv));
2290
2291 /* Save "dev" for "tile_net_open_retry()". */
2292 priv->dev = dev;
2293
2294 INIT_DELAYED_WORK(&priv->retry_work, tile_net_open_retry);
2295
2296 spin_lock_init(&priv->cmd_lock);
2297 spin_lock_init(&priv->comp_lock);
2298
2299 /* Allocate "epp_queue". */
2300 BUG_ON(get_order(sizeof(lepp_queue_t)) != 0);
2301 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
2302 if (!page) {
2303 free_netdev(dev);
2304 return NULL;
2305 }
2306 priv->epp_queue = page_address(page);
2307
2308 /* Register the network device. */
2309 ret = register_netdev(dev);
2310 if (ret) {
2311 pr_err("register_netdev %s failed %d\n", dev->name, ret);
2312 free_page((unsigned long)priv->epp_queue);
2313 free_netdev(dev);
2314 return NULL;
2315 }
2316
2317 /* Get the MAC address. */
2318 ret = tile_net_get_mac(dev);
2319 if (ret < 0) {
2320 unregister_netdev(dev);
2321 free_page((unsigned long)priv->epp_queue);
2322 free_netdev(dev);
2323 return NULL;
2324 }
2325
2326 return dev;
2327}
2328
2329
2330/*
2331 * Module cleanup.
2332 */
2333static void tile_net_cleanup(void)
2334{
2335 int i;
2336
2337 for (i = 0; i < TILE_NET_DEVS; i++) {
2338 if (tile_net_devs[i]) {
2339 struct net_device *dev = tile_net_devs[i];
2340 struct tile_net_priv *priv = netdev_priv(dev);
2341 unregister_netdev(dev);
2342 finv_buffer(priv->epp_queue, PAGE_SIZE);
2343 free_page((unsigned long)priv->epp_queue);
2344 free_netdev(dev);
2345 }
2346 }
2347}
2348
2349
2350/*
2351 * Module initialization.
2352 */
2353static int tile_net_init_module(void)
2354{
2355 pr_info("Tilera IPP Net Driver\n");
2356
2357 tile_net_devs[0] = tile_net_dev_init("xgbe0");
2358 tile_net_devs[1] = tile_net_dev_init("xgbe1");
2359 tile_net_devs[2] = tile_net_dev_init("gbe0");
2360 tile_net_devs[3] = tile_net_dev_init("gbe1");
2361
2362 return 0;
2363}
2364
2365
2366#ifndef MODULE
2367/*
2368 * The "network_cpus" boot argument specifies the cpus that are dedicated
2369 * to handle ingress packets.
2370 *
2371 * The parameter should be in the form "network_cpus=m-n[,x-y]", where
2372 * m, n, x, y are integer numbers that represent the cpus that can be
2373 * neither a dedicated cpu nor a dataplane cpu.
2374 */
2375static int __init network_cpus_setup(char *str)
2376{
2377 int rc = cpulist_parse_crop(str, &network_cpus_map);
2378 if (rc != 0) {
2379 pr_warning("network_cpus=%s: malformed cpu list\n",
2380 str);
2381 } else {
2382
2383 /* Remove dedicated cpus. */
2384 cpumask_and(&network_cpus_map, &network_cpus_map,
2385 cpu_possible_mask);
2386
2387
2388 if (cpumask_empty(&network_cpus_map)) {
2389 pr_warning("Ignoring network_cpus='%s'.\n",
2390 str);
2391 } else {
2392 char buf[1024];
2393 cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
2394 pr_info("Linux network CPUs: %s\n", buf);
2395 network_cpus_used = true;
2396 }
2397 }
2398
2399 return 0;
2400}
2401__setup("network_cpus=", network_cpus_setup);
2402#endif
2403
2404
2405module_init(tile_net_init_module);
2406module_exit(tile_net_cleanup);
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a95586f3c5..055b87ab4f07 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@ struct ucc_geth_hardware_statistics {
899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size 899#define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size
900 */ 900 */
901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ 901#define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */
902#define UCC_GETH_UTFTT_INIT 512 902#define UCC_GETH_UTFTT_INIT 256 /* 1/2 utfs
903 due to errata */
903/* Gigabit Ethernet (1000 Mbps) */ 904/* Gigabit Ethernet (1000 Mbps) */
904#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual 905#define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual
905 FIFO size */ 906 FIFO size */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94de03e..62e9e8dc8190 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2994,12 +2994,14 @@ static int hso_probe(struct usb_interface *interface,
2994 2994
2995 case HSO_INTF_BULK: 2995 case HSO_INTF_BULK:
2996 /* It's a regular bulk interface */ 2996 /* It's a regular bulk interface */
2997 if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) && 2997 if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
2998 !disable_net) 2998 if (!disable_net)
2999 hso_dev = hso_create_net_device(interface, port_spec); 2999 hso_dev =
3000 else 3000 hso_create_net_device(interface, port_spec);
3001 } else {
3001 hso_dev = 3002 hso_dev =
3002 hso_create_bulk_serial_device(interface, port_spec); 3003 hso_create_bulk_serial_device(interface, port_spec);
3004 }
3003 if (!hso_dev) 3005 if (!hso_dev)
3004 goto exit; 3006 goto exit;
3005 break; 3007 break;
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad8397885..24297b274cd4 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@ norbuff:
498static int x25_asy_close(struct net_device *dev) 498static int x25_asy_close(struct net_device *dev)
499{ 499{
500 struct x25_asy *sl = netdev_priv(dev); 500 struct x25_asy *sl = netdev_priv(dev);
501 int err;
502 501
503 spin_lock(&sl->lock); 502 spin_lock(&sl->lock);
504 if (sl->tty) 503 if (sl->tty)
@@ -507,10 +506,6 @@ static int x25_asy_close(struct net_device *dev)
507 netif_stop_queue(dev); 506 netif_stop_queue(dev);
508 sl->rcount = 0; 507 sl->rcount = 0;
509 sl->xleft = 0; 508 sl->xleft = 0;
510 err = lapb_unregister(dev);
511 if (err != LAPB_OK)
512 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
513 err);
514 spin_unlock(&sl->lock); 509 spin_unlock(&sl->lock);
515 return 0; 510 return 0;
516} 511}
@@ -582,7 +577,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
582 if (err) 577 if (err)
583 return err; 578 return err;
584 /* Done. We have linked the TTY line to a channel. */ 579 /* Done. We have linked the TTY line to a channel. */
585 return sl->dev->base_addr; 580 return 0;
586} 581}
587 582
588 583
@@ -595,6 +590,7 @@ static int x25_asy_open_tty(struct tty_struct *tty)
595static void x25_asy_close_tty(struct tty_struct *tty) 590static void x25_asy_close_tty(struct tty_struct *tty)
596{ 591{
597 struct x25_asy *sl = tty->disc_data; 592 struct x25_asy *sl = tty->disc_data;
593 int err;
598 594
599 /* First make sure we're connected. */ 595 /* First make sure we're connected. */
600 if (!sl || sl->magic != X25_ASY_MAGIC) 596 if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@ static void x25_asy_close_tty(struct tty_struct *tty)
605 dev_close(sl->dev); 601 dev_close(sl->dev);
606 rtnl_unlock(); 602 rtnl_unlock();
607 603
604 err = lapb_unregister(sl->dev);
605 if (err != LAPB_OK)
606 printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
607 err);
608
608 tty->disc_data = NULL; 609 tty->disc_data = NULL;
609 sl->tty = NULL; 610 sl->tty = NULL;
610 x25_asy_free(sl); 611 x25_asy_free(sl);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index c76ea53c20ce..1a62e351ec77 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -518,7 +518,7 @@ bool ath_stoprecv(struct ath_softc *sc)
518 bool stopped; 518 bool stopped;
519 519
520 spin_lock_bh(&sc->rx.rxbuflock); 520 spin_lock_bh(&sc->rx.rxbuflock);
521 ath9k_hw_stoppcurecv(ah); 521 ath9k_hw_abortpcurecv(ah);
522 ath9k_hw_setrxfilter(ah, 0); 522 ath9k_hw_setrxfilter(ah, 0);
523 stopped = ath9k_hw_stopdmarecv(ah); 523 stopped = ath9k_hw_stopdmarecv(ah);
524 524
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index 980ae70ea424..a314c2c2bfbe 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -647,7 +647,7 @@ init:
647 } 647 }
648 648
649unlock: 649unlock:
650 if (err && (vif_id != -1)) { 650 if (err && (vif_id >= 0)) {
651 vif_priv->active = false; 651 vif_priv->active = false;
652 bitmap_release_region(&ar->vif_bitmap, vif_id, 0); 652 bitmap_release_region(&ar->vif_bitmap, vif_id, 0);
653 ar->vifs--; 653 ar->vifs--;
diff --git a/drivers/net/wireless/b43/sdio.c b/drivers/net/wireless/b43/sdio.c
index 9a55338d957f..09e2dfd7b175 100644
--- a/drivers/net/wireless/b43/sdio.c
+++ b/drivers/net/wireless/b43/sdio.c
@@ -163,6 +163,7 @@ static int b43_sdio_probe(struct sdio_func *func,
163err_free_ssb: 163err_free_ssb:
164 kfree(sdio); 164 kfree(sdio);
165err_disable_func: 165err_disable_func:
166 sdio_claim_host(func);
166 sdio_disable_func(func); 167 sdio_disable_func(func);
167err_release_host: 168err_release_host:
168 sdio_release_host(func); 169 sdio_release_host(func);
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index d9f51485beee..9383063d2b16 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -349,7 +349,6 @@ static struct irq_chip dino_interrupt_type = {
349 .name = "GSC-PCI", 349 .name = "GSC-PCI",
350 .unmask = dino_unmask_irq, 350 .unmask = dino_unmask_irq,
351 .mask = dino_mask_irq, 351 .mask = dino_mask_irq,
352 .ack = no_ack_irq,
353}; 352};
354 353
355 354
diff --git a/drivers/parisc/eisa.c b/drivers/parisc/eisa.c
index 1211974f55aa..e860038b0b84 100644
--- a/drivers/parisc/eisa.c
+++ b/drivers/parisc/eisa.c
@@ -186,7 +186,6 @@ static struct irq_chip eisa_interrupt_type = {
186 .name = "EISA", 186 .name = "EISA",
187 .unmask = eisa_unmask_irq, 187 .unmask = eisa_unmask_irq,
188 .mask = eisa_mask_irq, 188 .mask = eisa_mask_irq,
189 .ack = no_ack_irq,
190}; 189};
191 190
192static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) 191static irqreturn_t eisa_irq(int wax_irq, void *intr_dev)
@@ -340,7 +339,7 @@ static int __init eisa_probe(struct parisc_device *dev)
340 setup_irq(2, &irq2_action); 339 setup_irq(2, &irq2_action);
341 for (i = 0; i < 16; i++) { 340 for (i = 0; i < 16; i++) {
342 set_irq_chip_and_handler(i, &eisa_interrupt_type, 341 set_irq_chip_and_handler(i, &eisa_interrupt_type,
343 handle_level_irq); 342 handle_simple_irq);
344 } 343 }
345 344
346 EISA_bus = 1; 345 EISA_bus = 1;
diff --git a/drivers/parisc/gsc.c b/drivers/parisc/gsc.c
index e605298e3aee..772b1939ac21 100644
--- a/drivers/parisc/gsc.c
+++ b/drivers/parisc/gsc.c
@@ -143,7 +143,6 @@ static struct irq_chip gsc_asic_interrupt_type = {
143 .name = "GSC-ASIC", 143 .name = "GSC-ASIC",
144 .unmask = gsc_asic_unmask_irq, 144 .unmask = gsc_asic_unmask_irq,
145 .mask = gsc_asic_mask_irq, 145 .mask = gsc_asic_mask_irq,
146 .ack = no_ack_irq,
147}; 146};
148 147
149int gsc_assign_irq(struct irq_chip *type, void *data) 148int gsc_assign_irq(struct irq_chip *type, void *data)
@@ -153,7 +152,7 @@ int gsc_assign_irq(struct irq_chip *type, void *data)
153 if (irq > GSC_IRQ_MAX) 152 if (irq > GSC_IRQ_MAX)
154 return NO_IRQ; 153 return NO_IRQ;
155 154
156 set_irq_chip_and_handler(irq, type, handle_level_irq); 155 set_irq_chip_and_handler(irq, type, handle_simple_irq);
157 set_irq_chip_data(irq, data); 156 set_irq_chip_data(irq, data);
158 157
159 return irq++; 158 return irq++;
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index a3120a09c43d..0327894bf235 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -669,6 +669,13 @@ printk("\n");
669 DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq, 669 DBG(KERN_DEBUG "enable_irq(%d): eoi(%p, 0x%x)\n", irq,
670 vi->eoi_addr, vi->eoi_data); 670 vi->eoi_addr, vi->eoi_data);
671 iosapic_eoi(vi->eoi_addr, vi->eoi_data); 671 iosapic_eoi(vi->eoi_addr, vi->eoi_data);
672}
673
674static void iosapic_eoi_irq(unsigned int irq)
675{
676 struct vector_info *vi = get_irq_chip_data(irq);
677
678 iosapic_eoi(vi->eoi_addr, vi->eoi_data);
672 cpu_eoi_irq(irq); 679 cpu_eoi_irq(irq);
673} 680}
674 681
@@ -705,6 +712,7 @@ static struct irq_chip iosapic_interrupt_type = {
705 .unmask = iosapic_unmask_irq, 712 .unmask = iosapic_unmask_irq,
706 .mask = iosapic_mask_irq, 713 .mask = iosapic_mask_irq,
707 .ack = cpu_ack_irq, 714 .ack = cpu_ack_irq,
715 .eoi = iosapic_eoi_irq,
708#ifdef CONFIG_SMP 716#ifdef CONFIG_SMP
709 .set_affinity = iosapic_set_affinity_irq, 717 .set_affinity = iosapic_set_affinity_irq,
710#endif 718#endif
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 2350e8a86eef..f2f501e5b6a0 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -64,6 +64,7 @@ static unsigned int led_diskio __read_mostly = 1;
64static unsigned int led_lanrxtx __read_mostly = 1; 64static unsigned int led_lanrxtx __read_mostly = 1;
65static char lcd_text[32] __read_mostly; 65static char lcd_text[32] __read_mostly;
66static char lcd_text_default[32] __read_mostly; 66static char lcd_text_default[32] __read_mostly;
67static int lcd_no_led_support __read_mostly = 0; /* KittyHawk doesn't support LED on its LCD */
67 68
68 69
69static struct workqueue_struct *led_wq; 70static struct workqueue_struct *led_wq;
@@ -115,7 +116,7 @@ lcd_info __attribute__((aligned(8))) __read_mostly =
115 .lcd_width = 16, 116 .lcd_width = 16,
116 .lcd_cmd_reg_addr = KITTYHAWK_LCD_CMD, 117 .lcd_cmd_reg_addr = KITTYHAWK_LCD_CMD,
117 .lcd_data_reg_addr = KITTYHAWK_LCD_DATA, 118 .lcd_data_reg_addr = KITTYHAWK_LCD_DATA,
118 .min_cmd_delay = 40, 119 .min_cmd_delay = 80,
119 .reset_cmd1 = 0x80, 120 .reset_cmd1 = 0x80,
120 .reset_cmd2 = 0xc0, 121 .reset_cmd2 = 0xc0,
121}; 122};
@@ -135,6 +136,9 @@ static int start_task(void)
135 /* Display the default text now */ 136 /* Display the default text now */
136 if (led_type == LED_HASLCD) lcd_print( lcd_text_default ); 137 if (led_type == LED_HASLCD) lcd_print( lcd_text_default );
137 138
139 /* KittyHawk has no LED support on its LCD */
140 if (lcd_no_led_support) return 0;
141
138 /* Create the work queue and queue the LED task */ 142 /* Create the work queue and queue the LED task */
139 led_wq = create_singlethread_workqueue("led_wq"); 143 led_wq = create_singlethread_workqueue("led_wq");
140 queue_delayed_work(led_wq, &led_task, 0); 144 queue_delayed_work(led_wq, &led_task, 0);
@@ -248,9 +252,13 @@ static int __init led_create_procfs(void)
248 252
249 proc_pdc_root = proc_mkdir("pdc", 0); 253 proc_pdc_root = proc_mkdir("pdc", 0);
250 if (!proc_pdc_root) return -1; 254 if (!proc_pdc_root) return -1;
251 ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root, 255
252 &led_proc_fops, (void *)LED_NOLCD); /* LED */ 256 if (!lcd_no_led_support)
253 if (!ent) return -1; 257 {
258 ent = proc_create_data("led", S_IRUGO|S_IWUSR, proc_pdc_root,
259 &led_proc_fops, (void *)LED_NOLCD); /* LED */
260 if (!ent) return -1;
261 }
254 262
255 if (led_type == LED_HASLCD) 263 if (led_type == LED_HASLCD)
256 { 264 {
@@ -692,6 +700,7 @@ int __init led_init(void)
692 case 0x58B: /* KittyHawk DC2 100 (K200) */ 700 case 0x58B: /* KittyHawk DC2 100 (K200) */
693 printk(KERN_INFO "%s: KittyHawk-Machine (hversion 0x%x) found, " 701 printk(KERN_INFO "%s: KittyHawk-Machine (hversion 0x%x) found, "
694 "LED detection skipped.\n", __FILE__, CPU_HVERSION); 702 "LED detection skipped.\n", __FILE__, CPU_HVERSION);
703 lcd_no_led_support = 1;
695 goto found; /* use the preinitialized values of lcd_info */ 704 goto found; /* use the preinitialized values of lcd_info */
696 } 705 }
697 706
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index 0846dafdfff1..28241532c0fd 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -323,7 +323,6 @@ static struct irq_chip superio_interrupt_type = {
323 .name = SUPERIO, 323 .name = SUPERIO,
324 .unmask = superio_unmask_irq, 324 .unmask = superio_unmask_irq,
325 .mask = superio_mask_irq, 325 .mask = superio_mask_irq,
326 .ack = no_ack_irq,
327}; 326};
328 327
329#ifdef DEBUG_SUPERIO_INIT 328#ifdef DEBUG_SUPERIO_INIT
@@ -354,7 +353,7 @@ int superio_fixup_irq(struct pci_dev *pcidev)
354#endif 353#endif
355 354
356 for (i = 0; i < 16; i++) { 355 for (i = 0; i < 16; i++) {
357 set_irq_chip_and_handler(i, &superio_interrupt_type, handle_level_irq); 356 set_irq_chip_and_handler(i, &superio_interrupt_type, handle_simple_irq);
358 } 357 }
359 358
360 /* 359 /*
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index f01e344cf4bd..98e6fdf34d30 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -49,6 +49,7 @@ obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
49obj-$(CONFIG_X86_VISWS) += setup-irq.o 49obj-$(CONFIG_X86_VISWS) += setup-irq.o
50obj-$(CONFIG_MN10300) += setup-bus.o 50obj-$(CONFIG_MN10300) += setup-bus.o
51obj-$(CONFIG_MICROBLAZE) += setup-bus.o 51obj-$(CONFIG_MICROBLAZE) += setup-bus.o
52obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
52 53
53# 54#
54# ACPI Related PCI FW Functions 55# ACPI Related PCI FW Functions
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index f5c63fe9db5c..6f9350cabbd5 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2136,6 +2136,24 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82865_HB,
2136DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB, 2136DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82875_HB,
2137 quirk_unhide_mch_dev6); 2137 quirk_unhide_mch_dev6);
2138 2138
2139#ifdef CONFIG_TILE
2140/*
2141 * The Tilera TILEmpower platform needs to set the link speed
2142 * to 2.5GT(Giga-Transfers)/s (Gen 1). The default link speed
2143 * setting is 5GT/s (Gen 2). 0x98 is the Link Control2 PCIe
2144 * capability register of the PEX8624 PCIe switch. The switch
2145 * supports link speed auto negotiation, but falsely sets
2146 * the link speed to 5GT/s.
2147 */
2148static void __devinit quirk_tile_plx_gen1(struct pci_dev *dev)
2149{
2150 if (tile_plx_gen1) {
2151 pci_write_config_dword(dev, 0x98, 0x1);
2152 mdelay(50);
2153 }
2154}
2155DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_PLX, 0x8624, quirk_tile_plx_gen1);
2156#endif /* CONFIG_TILE */
2139 2157
2140#ifdef CONFIG_PCI_MSI 2158#ifdef CONFIG_PCI_MSI
2141/* Some chipsets do not support MSI. We cannot easily rely on setting 2159/* Some chipsets do not support MSI. We cannot easily rely on setting
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index 3753fd0722e7..2fe8cb8e95cd 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -70,6 +70,7 @@ void soc_pcmcia_debug(struct soc_pcmcia_socket *skt, const char *func,
70 va_end(args); 70 va_end(args);
71 } 71 }
72} 72}
73EXPORT_SYMBOL(soc_pcmcia_debug);
73 74
74#endif 75#endif
75 76
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 60a5a5c6b50a..d235f44fd7a3 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -81,6 +81,8 @@ MODULE_PARM_DESC(wapf, "WAPF value");
81 81
82static int wlan_status = 1; 82static int wlan_status = 1;
83static int bluetooth_status = 1; 83static int bluetooth_status = 1;
84static int wimax_status = -1;
85static int wwan_status = -1;
84 86
85module_param(wlan_status, int, 0444); 87module_param(wlan_status, int, 0444);
86MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot " 88MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
@@ -92,6 +94,16 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
92 "(0 = disabled, 1 = enabled, -1 = don't do anything). " 94 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
93 "default is 1"); 95 "default is 1");
94 96
97module_param(wimax_status, int, 0444);
98MODULE_PARM_DESC(wimax_status, "Set the wireless status on boot "
99 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
100 "default is 1");
101
102module_param(wwan_status, int, 0444);
103MODULE_PARM_DESC(wwan_status, "Set the wireless status on boot "
104 "(0 = disabled, 1 = enabled, -1 = don't do anything). "
105 "default is 1");
106
95/* 107/*
96 * Some events we use, same for all Asus 108 * Some events we use, same for all Asus
97 */ 109 */
@@ -114,6 +126,8 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
114 */ 126 */
115#define WL_RSTS 0x01 /* internal Wifi */ 127#define WL_RSTS 0x01 /* internal Wifi */
116#define BT_RSTS 0x02 /* internal Bluetooth */ 128#define BT_RSTS 0x02 /* internal Bluetooth */
129#define WM_RSTS 0x08 /* internal wimax */
130#define WW_RSTS 0x20 /* internal wwan */
117 131
118/* LED */ 132/* LED */
119#define METHOD_MLED "MLED" 133#define METHOD_MLED "MLED"
@@ -132,6 +146,11 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
132 */ 146 */
133#define METHOD_WLAN "WLED" 147#define METHOD_WLAN "WLED"
134#define METHOD_BLUETOOTH "BLED" 148#define METHOD_BLUETOOTH "BLED"
149
150/* WWAN and WIMAX */
151#define METHOD_WWAN "GSMC"
152#define METHOD_WIMAX "WMXC"
153
135#define METHOD_WL_STATUS "RSTS" 154#define METHOD_WL_STATUS "RSTS"
136 155
137/* Brightness */ 156/* Brightness */
@@ -883,6 +902,64 @@ static ssize_t store_bluetooth(struct device *dev,
883} 902}
884 903
885/* 904/*
905 * Wimax
906 */
907static int asus_wimax_set(struct asus_laptop *asus, int status)
908{
909 if (write_acpi_int(asus->handle, METHOD_WIMAX, !!status)) {
910 pr_warning("Error setting wimax status to %d", status);
911 return -EIO;
912 }
913 return 0;
914}
915
916static ssize_t show_wimax(struct device *dev,
917 struct device_attribute *attr, char *buf)
918{
919 struct asus_laptop *asus = dev_get_drvdata(dev);
920
921 return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
922}
923
924static ssize_t store_wimax(struct device *dev,
925 struct device_attribute *attr, const char *buf,
926 size_t count)
927{
928 struct asus_laptop *asus = dev_get_drvdata(dev);
929
930 return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX);
931}
932
933/*
934 * Wwan
935 */
936static int asus_wwan_set(struct asus_laptop *asus, int status)
937{
938 if (write_acpi_int(asus->handle, METHOD_WWAN, !!status)) {
939 pr_warning("Error setting wwan status to %d", status);
940 return -EIO;
941 }
942 return 0;
943}
944
945static ssize_t show_wwan(struct device *dev,
946 struct device_attribute *attr, char *buf)
947{
948 struct asus_laptop *asus = dev_get_drvdata(dev);
949
950 return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
951}
952
953static ssize_t store_wwan(struct device *dev,
954 struct device_attribute *attr, const char *buf,
955 size_t count)
956{
957 struct asus_laptop *asus = dev_get_drvdata(dev);
958
959 return sysfs_acpi_set(asus, buf, count, METHOD_WWAN);
960}
961
962/*
886 * Display 963 * Display
887 */ 964 */
888static void asus_set_display(struct asus_laptop *asus, int value) 965static void asus_set_display(struct asus_laptop *asus, int value)
@@ -1202,6 +1279,8 @@ static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
1202static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan); 1279static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
1203static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, 1280static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR,
1204 show_bluetooth, store_bluetooth); 1281 show_bluetooth, store_bluetooth);
1282static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
1283static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
1205static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp); 1284static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
1206static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); 1285static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
1207static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); 1286static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
@@ -1212,6 +1291,8 @@ static struct attribute *asus_attributes[] = {
1212 &dev_attr_infos.attr, 1291 &dev_attr_infos.attr,
1213 &dev_attr_wlan.attr, 1292 &dev_attr_wlan.attr,
1214 &dev_attr_bluetooth.attr, 1293 &dev_attr_bluetooth.attr,
1294 &dev_attr_wimax.attr,
1295 &dev_attr_wwan.attr,
1215 &dev_attr_display.attr, 1296 &dev_attr_display.attr,
1216 &dev_attr_ledd.attr, 1297 &dev_attr_ledd.attr,
1217 &dev_attr_ls_level.attr, 1298 &dev_attr_ls_level.attr,
@@ -1239,6 +1320,13 @@ static mode_t asus_sysfs_is_visible(struct kobject *kobj,
1239 } else if (attr == &dev_attr_display.attr) { 1320 } else if (attr == &dev_attr_display.attr) {
1240 supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL); 1321 supported = !acpi_check_handle(handle, METHOD_SWITCH_DISPLAY, NULL);
1241 1322
1323 } else if (attr == &dev_attr_wimax.attr) {
1324 supported =
1325 !acpi_check_handle(asus->handle, METHOD_WIMAX, NULL);
1326
1327 } else if (attr == &dev_attr_wwan.attr) {
1328 supported = !acpi_check_handle(asus->handle, METHOD_WWAN, NULL);
1329
1242 } else if (attr == &dev_attr_ledd.attr) { 1330 } else if (attr == &dev_attr_ledd.attr) {
1243 supported = !acpi_check_handle(handle, METHOD_LEDD, NULL); 1331 supported = !acpi_check_handle(handle, METHOD_LEDD, NULL);
1244 1332
@@ -1397,7 +1485,8 @@ static int asus_laptop_get_info(struct asus_laptop *asus)
1397 1485
1398 /* 1486 /*
1399 * The HWRS method return informations about the hardware. 1487 * The HWRS method return informations about the hardware.
1400 * 0x80 bit is for WLAN, 0x100 for Bluetooth. 1488 * 0x80 bit is for WLAN, 0x100 for Bluetooth,
1489 * 0x40 for WWAN, 0x10 for WIMAX.
1401 * The significance of others is yet to be found. 1490 * The significance of others is yet to be found.
1402 */ 1491 */
1403 status = 1492 status =
@@ -1440,6 +1529,12 @@ static int __devinit asus_acpi_init(struct asus_laptop *asus)
1440 if (wlan_status >= 0) 1529 if (wlan_status >= 0)
1441 asus_wlan_set(asus, !!wlan_status); 1530 asus_wlan_set(asus, !!wlan_status);
1442 1531
1532 if (wimax_status >= 0)
1533 asus_wimax_set(asus, !!wimax_status);
1534
1535 if (wwan_status >= 0)
1536 asus_wwan_set(asus, !!wwan_status);
1537
1443 /* Keyboard Backlight is on by default */ 1538 /* Keyboard Backlight is on by default */
1444 if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL)) 1539 if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
1445 asus_kled_set(asus, 1); 1540 asus_kled_set(asus, 1);
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
index 462ceab93f87..0d50fbbe2478 100644
--- a/drivers/platform/x86/eeepc-wmi.c
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -298,8 +298,8 @@ static void eeepc_wmi_notify(u32 value, void *context)
298 kfree(obj); 298 kfree(obj);
299} 299}
300 300
301static int store_cpufv(struct device *dev, struct device_attribute *attr, 301static ssize_t store_cpufv(struct device *dev, struct device_attribute *attr,
302 const char *buf, size_t count) 302 const char *buf, size_t count)
303{ 303{
304 int value; 304 int value;
305 struct acpi_buffer input = { (acpi_size)sizeof(value), &value }; 305 struct acpi_buffer input = { (acpi_size)sizeof(value), &value };
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index 1dac659b5e0c..9e05af9c41cb 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -172,6 +172,8 @@ static int hp_wmi_perform_query(int query, int write, u32 *buffer,
172 bios_return = *((struct bios_return *)obj->buffer.pointer); 172 bios_return = *((struct bios_return *)obj->buffer.pointer);
173 173
174 memcpy(buffer, &bios_return.value, sizeof(bios_return.value)); 174 memcpy(buffer, &bios_return.value, sizeof(bios_return.value));
175
176 kfree(obj);
175 return 0; 177 return 0;
176} 178}
177 179
diff --git a/drivers/platform/x86/ibm_rtl.c b/drivers/platform/x86/ibm_rtl.c
index 3c2c6b91ecb3..94a114aa8e28 100644
--- a/drivers/platform/x86/ibm_rtl.c
+++ b/drivers/platform/x86/ibm_rtl.c
@@ -28,6 +28,7 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/sysdev.h> 29#include <linux/sysdev.h>
30#include <linux/dmi.h> 30#include <linux/dmi.h>
31#include <linux/efi.h>
31#include <linux/mutex.h> 32#include <linux/mutex.h>
32#include <asm/bios_ebda.h> 33#include <asm/bios_ebda.h>
33 34
@@ -220,32 +221,13 @@ static void rtl_teardown_sysfs(void) {
220 sysdev_class_unregister(&class_rtl); 221 sysdev_class_unregister(&class_rtl);
221} 222}
222 223
223static int dmi_check_cb(const struct dmi_system_id *id)
224{
225 RTL_DEBUG("found IBM server '%s'\n", id->ident);
226 return 0;
227}
228
229#define ibm_dmi_entry(NAME, TYPE) \
230{ \
231 .ident = NAME, \
232 .matches = { \
233 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
234 DMI_MATCH(DMI_PRODUCT_NAME, TYPE), \
235 }, \
236 .callback = dmi_check_cb \
237}
238 224
239static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = { 225static struct dmi_system_id __initdata ibm_rtl_dmi_table[] = {
240 ibm_dmi_entry("BladeCenter LS21", "7971"), 226 { \
241 ibm_dmi_entry("BladeCenter LS22", "7901"), 227 .matches = { \
242 ibm_dmi_entry("BladeCenter HS21 XM", "7995"), 228 DMI_MATCH(DMI_SYS_VENDOR, "IBM"), \
243 ibm_dmi_entry("BladeCenter HS22", "7870"), 229 }, \
244 ibm_dmi_entry("BladeCenter HS22V", "7871"), 230 },
245 ibm_dmi_entry("System x3550 M2", "7946"),
246 ibm_dmi_entry("System x3650 M2", "7947"),
247 ibm_dmi_entry("System x3550 M3", "7944"),
248 ibm_dmi_entry("System x3650 M3", "7945"),
249 { } 231 { }
250}; 232};
251 233
@@ -257,7 +239,7 @@ static int __init ibm_rtl_init(void) {
257 if (force) 239 if (force)
258 pr_warning("ibm-rtl: module loaded by force\n"); 240 pr_warning("ibm-rtl: module loaded by force\n");
259 /* first ensure that we are running on IBM HW */ 241 /* first ensure that we are running on IBM HW */
260 else if (!dmi_check_system(ibm_rtl_dmi_table)) 242 else if (efi_enabled || !dmi_check_system(ibm_rtl_dmi_table))
261 return -ENODEV; 243 return -ENODEV;
262 244
263 /* Get the address for the Extended BIOS Data Area */ 245 /* Get the address for the Extended BIOS Data Area */
@@ -302,7 +284,7 @@ static int __init ibm_rtl_init(void) {
302 RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n", 284 RTL_DEBUG("rtl_cmd_width = %u, rtl_cmd_type = %u\n",
303 rtl_cmd_width, rtl_cmd_type); 285 rtl_cmd_width, rtl_cmd_type);
304 addr = ioread32(&rtl_table->cmd_port_address); 286 addr = ioread32(&rtl_table->cmd_port_address);
305 RTL_DEBUG("addr = %#llx\n", addr); 287 RTL_DEBUG("addr = %#llx\n", (unsigned long long)addr);
306 plen = rtl_cmd_width/sizeof(char); 288 plen = rtl_cmd_width/sizeof(char);
307 rtl_cmd_addr = rtl_port_map(addr, plen); 289 rtl_cmd_addr = rtl_port_map(addr, plen);
308 RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr); 290 RTL_DEBUG("rtl_cmd_addr = %#llx\n", (u64)rtl_cmd_addr);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index 42a5469a2459..35278ad7e628 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -43,16 +43,18 @@ MODULE_ALIAS("wmi:B6F3EEF2-3D2F-49DC-9DE3-85BCE18C62F2");
43 43
44#define dprintk(msg...) pr_debug(DRV_PFX msg) 44#define dprintk(msg...) pr_debug(DRV_PFX msg)
45 45
46#define KEYCODE_BASE 0xD0 46#define SCANCODE_BASE 0xD0
47#define MSI_WMI_BRIGHTNESSUP KEYCODE_BASE 47#define MSI_WMI_BRIGHTNESSUP SCANCODE_BASE
48#define MSI_WMI_BRIGHTNESSDOWN (KEYCODE_BASE + 1) 48#define MSI_WMI_BRIGHTNESSDOWN (SCANCODE_BASE + 1)
49#define MSI_WMI_VOLUMEUP (KEYCODE_BASE + 2) 49#define MSI_WMI_VOLUMEUP (SCANCODE_BASE + 2)
50#define MSI_WMI_VOLUMEDOWN (KEYCODE_BASE + 3) 50#define MSI_WMI_VOLUMEDOWN (SCANCODE_BASE + 3)
51#define MSI_WMI_MUTE (SCANCODE_BASE + 4)
51static struct key_entry msi_wmi_keymap[] = { 52static struct key_entry msi_wmi_keymap[] = {
52 { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} }, 53 { KE_KEY, MSI_WMI_BRIGHTNESSUP, {KEY_BRIGHTNESSUP} },
53 { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} }, 54 { KE_KEY, MSI_WMI_BRIGHTNESSDOWN, {KEY_BRIGHTNESSDOWN} },
54 { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} }, 55 { KE_KEY, MSI_WMI_VOLUMEUP, {KEY_VOLUMEUP} },
55 { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} }, 56 { KE_KEY, MSI_WMI_VOLUMEDOWN, {KEY_VOLUMEDOWN} },
57 { KE_KEY, MSI_WMI_MUTE, {KEY_MUTE} },
56 { KE_END, 0} 58 { KE_END, 0}
57}; 59};
58static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1]; 60static ktime_t last_pressed[ARRAY_SIZE(msi_wmi_keymap) - 1];
@@ -169,7 +171,7 @@ static void msi_wmi_notify(u32 value, void *context)
169 ktime_t diff; 171 ktime_t diff;
170 cur = ktime_get_real(); 172 cur = ktime_get_real();
171 diff = ktime_sub(cur, last_pressed[key->code - 173 diff = ktime_sub(cur, last_pressed[key->code -
172 KEYCODE_BASE]); 174 SCANCODE_BASE]);
173 /* Ignore event if the same event happened in a 50 ms 175 /* Ignore event if the same event happened in a 50 ms
174 timeframe -> Key press may result in 10-20 GPEs */ 176 timeframe -> Key press may result in 10-20 GPEs */
175 if (ktime_to_us(diff) < 1000 * 50) { 177 if (ktime_to_us(diff) < 1000 * 50) {
@@ -178,7 +180,7 @@ static void msi_wmi_notify(u32 value, void *context)
178 key->code, ktime_to_us(diff)); 180 key->code, ktime_to_us(diff));
179 return; 181 return;
180 } 182 }
181 last_pressed[key->code - KEYCODE_BASE] = cur; 183 last_pressed[key->code - SCANCODE_BASE] = cur;
182 184
183 if (key->type == KE_KEY && 185 if (key->type == KE_KEY &&
184 /* Brightness is served via acpi video driver */ 186 /* Brightness is served via acpi video driver */
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 2d61186ad5a2..e8c21994b36d 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -8497,7 +8497,6 @@ static void ibm_exit(struct ibm_struct *ibm)
8497 ibm->acpi->type, 8497 ibm->acpi->type,
8498 dispatch_acpi_notify); 8498 dispatch_acpi_notify);
8499 ibm->flags.acpi_notify_installed = 0; 8499 ibm->flags.acpi_notify_installed = 0;
8500 ibm->flags.acpi_notify_installed = 0;
8501 } 8500 }
8502 8501
8503 if (ibm->flags.proc_created) { 8502 if (ibm->flags.proc_created) {
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 06f304f46e02..4276da7291b8 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,6 +135,7 @@ static const struct key_entry toshiba_acpi_keymap[] __initconst = {
135 { KE_KEY, 0x141, { KEY_BRIGHTNESSUP } }, 135 { KE_KEY, 0x141, { KEY_BRIGHTNESSUP } },
136 { KE_KEY, 0x142, { KEY_WLAN } }, 136 { KE_KEY, 0x142, { KEY_WLAN } },
137 { KE_KEY, 0x143, { KEY_PROG1 } }, 137 { KE_KEY, 0x143, { KEY_PROG1 } },
138 { KE_KEY, 0x17f, { KEY_FN } },
138 { KE_KEY, 0xb05, { KEY_PROG2 } }, 139 { KE_KEY, 0xb05, { KEY_PROG2 } },
139 { KE_KEY, 0xb06, { KEY_WWW } }, 140 { KE_KEY, 0xb06, { KEY_WWW } },
140 { KE_KEY, 0xb07, { KEY_MAIL } }, 141 { KE_KEY, 0xb07, { KEY_MAIL } },
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c
index 104b77c87ef5..aecd9a9b549f 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/x86/wmi.c
@@ -755,7 +755,7 @@ static bool guid_already_parsed(const char *guid_string)
755 struct wmi_block *wblock; 755 struct wmi_block *wblock;
756 756
757 list_for_each_entry(wblock, &wmi_block_list, list) 757 list_for_each_entry(wblock, &wmi_block_list, list)
758 if (strncmp(wblock->gblock.guid, guid_string, 16) == 0) 758 if (memcmp(wblock->gblock.guid, guid_string, 16) == 0)
759 return true; 759 return true;
760 760
761 return false; 761 return false;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f1d10c974cd4..ba521f0f0fac 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -911,7 +911,7 @@ out:
911} 911}
912 912
913/** 913/**
914 * set_consumer_device_supply: Bind a regulator to a symbolic supply 914 * set_consumer_device_supply - Bind a regulator to a symbolic supply
915 * @rdev: regulator source 915 * @rdev: regulator source
916 * @consumer_dev: device the supply applies to 916 * @consumer_dev: device the supply applies to
917 * @consumer_dev_name: dev_name() string for device supply applies to 917 * @consumer_dev_name: dev_name() string for device supply applies to
@@ -1052,7 +1052,6 @@ static struct regulator *create_regulator(struct regulator_dev *rdev,
1052 printk(KERN_WARNING 1052 printk(KERN_WARNING
1053 "%s: could not add device link %s err %d\n", 1053 "%s: could not add device link %s err %d\n",
1054 __func__, dev->kobj.name, err); 1054 __func__, dev->kobj.name, err);
1055 device_remove_file(dev, &regulator->dev_attr);
1056 goto link_name_err; 1055 goto link_name_err;
1057 } 1056 }
1058 } 1057 }
@@ -1268,13 +1267,17 @@ static int _regulator_enable(struct regulator_dev *rdev)
1268{ 1267{
1269 int ret, delay; 1268 int ret, delay;
1270 1269
1271 /* do we need to enable the supply regulator first */ 1270 if (rdev->use_count == 0) {
1272 if (rdev->supply) { 1271 /* do we need to enable the supply regulator first */
1273 ret = _regulator_enable(rdev->supply); 1272 if (rdev->supply) {
1274 if (ret < 0) { 1273 mutex_lock(&rdev->supply->mutex);
1275 printk(KERN_ERR "%s: failed to enable %s: %d\n", 1274 ret = _regulator_enable(rdev->supply);
1276 __func__, rdev_get_name(rdev), ret); 1275 mutex_unlock(&rdev->supply->mutex);
1277 return ret; 1276 if (ret < 0) {
1277 printk(KERN_ERR "%s: failed to enable %s: %d\n",
1278 __func__, rdev_get_name(rdev), ret);
1279 return ret;
1280 }
1278 } 1281 }
1279 } 1282 }
1280 1283
@@ -1313,10 +1316,12 @@ static int _regulator_enable(struct regulator_dev *rdev)
1313 if (ret < 0) 1316 if (ret < 0)
1314 return ret; 1317 return ret;
1315 1318
1316 if (delay >= 1000) 1319 if (delay >= 1000) {
1317 mdelay(delay / 1000); 1320 mdelay(delay / 1000);
1318 else if (delay) 1321 udelay(delay % 1000);
1322 } else if (delay) {
1319 udelay(delay); 1323 udelay(delay);
1324 }
1320 1325
1321 } else if (ret < 0) { 1326 } else if (ret < 0) {
1322 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n", 1327 printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
@@ -1359,6 +1364,7 @@ static int _regulator_disable(struct regulator_dev *rdev,
1359 struct regulator_dev **supply_rdev_ptr) 1364 struct regulator_dev **supply_rdev_ptr)
1360{ 1365{
1361 int ret = 0; 1366 int ret = 0;
1367 *supply_rdev_ptr = NULL;
1362 1368
1363 if (WARN(rdev->use_count <= 0, 1369 if (WARN(rdev->use_count <= 0,
1364 "unbalanced disables for %s\n", 1370 "unbalanced disables for %s\n",
@@ -2346,6 +2352,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2346 if (init_data->supply_regulator && init_data->supply_regulator_dev) { 2352 if (init_data->supply_regulator && init_data->supply_regulator_dev) {
2347 dev_err(dev, 2353 dev_err(dev,
2348 "Supply regulator specified by both name and dev\n"); 2354 "Supply regulator specified by both name and dev\n");
2355 ret = -EINVAL;
2349 goto scrub; 2356 goto scrub;
2350 } 2357 }
2351 2358
@@ -2364,6 +2371,7 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc,
2364 if (!found) { 2371 if (!found) {
2365 dev_err(dev, "Failed to find supply %s\n", 2372 dev_err(dev, "Failed to find supply %s\n",
2366 init_data->supply_regulator); 2373 init_data->supply_regulator);
2374 ret = -ENODEV;
2367 goto scrub; 2375 goto scrub;
2368 } 2376 }
2369 2377
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 4597d508a229..ecd99f59dba8 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -465,8 +465,8 @@ static struct regulator_ops mc13783_fixed_regulator_ops = {
465 .get_voltage = mc13783_fixed_regulator_get_voltage, 465 .get_voltage = mc13783_fixed_regulator_get_voltage,
466}; 466};
467 467
468int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask, 468static int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
469 u32 val) 469 u32 val)
470{ 470{
471 struct mc13783 *mc13783 = priv->mc13783; 471 struct mc13783 *mc13783 = priv->mc13783;
472 int ret; 472 int ret;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7e5892efc437..a57262a4fa6c 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -219,12 +219,12 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode)
219 return -EACCES; 219 return -EACCES;
220 220
221 status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 221 status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
222 message >> 8, 0x15 /* PB_WORD_MSB */ ); 222 message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB);
223 if (status >= 0) 223 if (status < 0)
224 return status; 224 return status;
225 225
226 return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 226 return twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
227 message, 0x16 /* PB_WORD_LSB */ ); 227 message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB);
228} 228}
229 229
230/*----------------------------------------------------------------------*/ 230/*----------------------------------------------------------------------*/
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index a5050e217150..825951b6b83f 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -635,7 +635,7 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
635 init_subchannel_id(&mchk_schid); 635 init_subchannel_id(&mchk_schid);
636 mchk_schid.sch_no = crw0->rsid; 636 mchk_schid.sch_no = crw0->rsid;
637 if (crw1) 637 if (crw1)
638 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 638 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
639 639
640 /* 640 /*
641 * Since we are always presented with IPI in the CRW, we have to 641 * Since we are always presented with IPI in the CRW, we have to
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 752dbee06af5..5d9c66627b6e 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -292,8 +292,8 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
292 return; 292 return;
293 293
294 /* reset adapter interrupt indicators */ 294 /* reset adapter interrupt indicators */
295 put_indicator(irq_ptr->dsci);
296 set_subchannel_ind(irq_ptr, 1); 295 set_subchannel_ind(irq_ptr, 1);
296 put_indicator(irq_ptr->dsci);
297} 297}
298 298
299void __exit tiqdio_unregister_thinints(void) 299void __exit tiqdio_unregister_thinints(void)
diff --git a/drivers/scsi/arm/fas216.h b/drivers/scsi/arm/fas216.h
index 377cfb72cc66..f30f8d659dc4 100644
--- a/drivers/scsi/arm/fas216.h
+++ b/drivers/scsi/arm/fas216.h
@@ -345,7 +345,7 @@ extern int fas216_queue_command(struct Scsi_Host *h, struct scsi_cmnd *SCpnt);
345 * : SCpnt - Command to queue 345 * : SCpnt - Command to queue
346 * Returns : 0 - success, else error 346 * Returns : 0 - success, else error
347 */ 347 */
348extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *) 348extern int fas216_noqueue_command(struct Scsi_Host *, struct scsi_cmnd *);
349 349
350/* Function: irqreturn_t fas216_intr (FAS216_Info *info) 350/* Function: irqreturn_t fas216_intr (FAS216_Info *info)
351 * Purpose : handle interrupts from the interface to progress a command 351 * Purpose : handle interrupts from the interface to progress a command
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 4d8e14b7aa93..09a550860dcf 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2872,7 +2872,7 @@ static struct console serial8250_console = {
2872 .device = uart_console_device, 2872 .device = uart_console_device,
2873 .setup = serial8250_console_setup, 2873 .setup = serial8250_console_setup,
2874 .early_setup = serial8250_console_early_setup, 2874 .early_setup = serial8250_console_early_setup,
2875 .flags = CON_PRINTBUFFER, 2875 .flags = CON_PRINTBUFFER | CON_ANYTIME,
2876 .index = -1, 2876 .index = -1,
2877 .data = &serial8250_reg, 2877 .data = &serial8250_reg,
2878}; 2878};
diff --git a/drivers/serial/mfd.c b/drivers/serial/mfd.c
index 5fc699e929dc..d40010a22ecd 100644
--- a/drivers/serial/mfd.c
+++ b/drivers/serial/mfd.c
@@ -900,8 +900,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
900 unsigned char cval, fcr = 0; 900 unsigned char cval, fcr = 0;
901 unsigned long flags; 901 unsigned long flags;
902 unsigned int baud, quot; 902 unsigned int baud, quot;
903 u32 mul = 0x3600; 903 u32 ps, mul;
904 u32 ps = 0x10;
905 904
906 switch (termios->c_cflag & CSIZE) { 905 switch (termios->c_cflag & CSIZE) {
907 case CS5: 906 case CS5:
@@ -943,31 +942,24 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
943 baud = uart_get_baud_rate(port, termios, old, 0, 4000000); 942 baud = uart_get_baud_rate(port, termios, old, 0, 4000000);
944 943
945 quot = 1; 944 quot = 1;
945 ps = 0x10;
946 mul = 0x3600;
946 switch (baud) { 947 switch (baud) {
947 case 3500000: 948 case 3500000:
948 mul = 0x3345; 949 mul = 0x3345;
949 ps = 0xC; 950 ps = 0xC;
950 break; 951 break;
951 case 3000000:
952 mul = 0x2EE0;
953 break;
954 case 2500000:
955 mul = 0x2710;
956 break;
957 case 2000000:
958 mul = 0x1F40;
959 break;
960 case 1843200: 952 case 1843200:
961 mul = 0x2400; 953 mul = 0x2400;
962 break; 954 break;
955 case 3000000:
956 case 2500000:
957 case 2000000:
963 case 1500000: 958 case 1500000:
964 mul = 0x1770;
965 break;
966 case 1000000: 959 case 1000000:
967 mul = 0xFA0;
968 break;
969 case 500000: 960 case 500000:
970 mul = 0x7D0; 961 /* mul/ps/quot = 0x9C4/0x10/0x1 will make a 500000 bps */
962 mul = baud / 500000 * 0x9C4;
971 break; 963 break;
972 default: 964 default:
973 /* Use uart_get_divisor to get quot for other baud rates */ 965 /* Use uart_get_divisor to get quot for other baud rates */
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index cb12a8e1466b..3f5e387ed564 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -418,8 +418,11 @@ int clk_register(struct clk *clk)
418 list_add(&clk->sibling, &root_clks); 418 list_add(&clk->sibling, &root_clks);
419 419
420 list_add(&clk->node, &clock_list); 420 list_add(&clk->node, &clock_list);
421
422#ifdef CONFIG_SH_CLK_CPG_LEGACY
421 if (clk->ops && clk->ops->init) 423 if (clk->ops && clk->ops->init)
422 clk->ops->init(clk); 424 clk->ops->init(clk);
425#endif
423 426
424out_unlock: 427out_unlock:
425 mutex_unlock(&clock_list_sem); 428 mutex_unlock(&clock_list_sem);
@@ -455,19 +458,13 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
455 458
456int clk_set_rate(struct clk *clk, unsigned long rate) 459int clk_set_rate(struct clk *clk, unsigned long rate)
457{ 460{
458 return clk_set_rate_ex(clk, rate, 0);
459}
460EXPORT_SYMBOL_GPL(clk_set_rate);
461
462int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
463{
464 int ret = -EOPNOTSUPP; 461 int ret = -EOPNOTSUPP;
465 unsigned long flags; 462 unsigned long flags;
466 463
467 spin_lock_irqsave(&clock_lock, flags); 464 spin_lock_irqsave(&clock_lock, flags);
468 465
469 if (likely(clk->ops && clk->ops->set_rate)) { 466 if (likely(clk->ops && clk->ops->set_rate)) {
470 ret = clk->ops->set_rate(clk, rate, algo_id); 467 ret = clk->ops->set_rate(clk, rate);
471 if (ret != 0) 468 if (ret != 0)
472 goto out_unlock; 469 goto out_unlock;
473 } else { 470 } else {
@@ -485,7 +482,7 @@ out_unlock:
485 482
486 return ret; 483 return ret;
487} 484}
488EXPORT_SYMBOL_GPL(clk_set_rate_ex); 485EXPORT_SYMBOL_GPL(clk_set_rate);
489 486
490int clk_set_parent(struct clk *clk, struct clk *parent) 487int clk_set_parent(struct clk *clk, struct clk *parent)
491{ 488{
@@ -653,8 +650,7 @@ static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
653 clkp->ops->set_parent(clkp, 650 clkp->ops->set_parent(clkp,
654 clkp->parent); 651 clkp->parent);
655 if (likely(clkp->ops->set_rate)) 652 if (likely(clkp->ops->set_rate))
656 clkp->ops->set_rate(clkp, 653 clkp->ops->set_rate(clkp, rate);
657 rate, NO_CHANGE);
658 else if (likely(clkp->ops->recalc)) 654 else if (likely(clkp->ops->recalc))
659 clkp->rate = clkp->ops->recalc(clkp); 655 clkp->rate = clkp->ops->recalc(clkp);
660 } 656 }
diff --git a/drivers/sh/clk/cpg.c b/drivers/sh/clk/cpg.c
index 3aea5f0ceb09..6172335ae323 100644
--- a/drivers/sh/clk/cpg.c
+++ b/drivers/sh/clk/cpg.c
@@ -110,8 +110,7 @@ static int sh_clk_div6_set_parent(struct clk *clk, struct clk *parent)
110 return 0; 110 return 0;
111} 111}
112 112
113static int sh_clk_div6_set_rate(struct clk *clk, 113static int sh_clk_div6_set_rate(struct clk *clk, unsigned long rate)
114 unsigned long rate, int algo_id)
115{ 114{
116 unsigned long value; 115 unsigned long value;
117 int idx; 116 int idx;
@@ -132,7 +131,7 @@ static int sh_clk_div6_enable(struct clk *clk)
132 unsigned long value; 131 unsigned long value;
133 int ret; 132 int ret;
134 133
135 ret = sh_clk_div6_set_rate(clk, clk->rate, 0); 134 ret = sh_clk_div6_set_rate(clk, clk->rate);
136 if (ret == 0) { 135 if (ret == 0) {
137 value = __raw_readl(clk->enable_reg); 136 value = __raw_readl(clk->enable_reg);
138 value &= ~0x100; /* clear stop bit to enable clock */ 137 value &= ~0x100; /* clear stop bit to enable clock */
@@ -253,7 +252,7 @@ static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
253 return 0; 252 return 0;
254} 253}
255 254
256static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id) 255static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate)
257{ 256{
258 struct clk_div4_table *d4t = clk->priv; 257 struct clk_div4_table *d4t = clk->priv;
259 unsigned long value; 258 unsigned long value;
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c
index 154529aacc03..a067046c9da2 100644
--- a/drivers/spi/atmel_spi.c
+++ b/drivers/spi/atmel_spi.c
@@ -352,8 +352,12 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
352 352
353 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS; 353 xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
354 if (xfer->tx_buf) { 354 if (xfer->tx_buf) {
355 /* tx_buf is a const void* where we need a void * for the dma
356 * mapping */
357 void *nonconst_tx = (void *)xfer->tx_buf;
358
355 xfer->tx_dma = dma_map_single(dev, 359 xfer->tx_dma = dma_map_single(dev,
356 (void *) xfer->tx_buf, xfer->len, 360 nonconst_tx, xfer->len,
357 DMA_TO_DEVICE); 361 DMA_TO_DEVICE);
358 if (dma_mapping_error(dev, xfer->tx_dma)) 362 if (dma_mapping_error(dev, xfer->tx_dma))
359 return -ENOMEM; 363 return -ENOMEM;
diff --git a/drivers/ssb/b43_pci_bridge.c b/drivers/ssb/b43_pci_bridge.c
index ef9c6a04ad8f..744d3f6e4709 100644
--- a/drivers/ssb/b43_pci_bridge.c
+++ b/drivers/ssb/b43_pci_bridge.c
@@ -24,6 +24,7 @@ static const struct pci_device_id b43_pci_bridge_tbl[] = {
24 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) }, 24 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4312) },
25 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) }, 25 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4315) },
26 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) }, 26 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4318) },
27 { PCI_DEVICE(PCI_VENDOR_ID_BCM_GVC, 0x4318) },
27 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) }, 28 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4319) },
28 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) }, 29 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4320) },
29 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) }, 30 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4321) },
diff --git a/drivers/staging/asus_oled/asus_oled.c b/drivers/staging/asus_oled/asus_oled.c
index 8c95d8c2a4f4..016c6f7f8630 100644
--- a/drivers/staging/asus_oled/asus_oled.c
+++ b/drivers/staging/asus_oled/asus_oled.c
@@ -620,13 +620,13 @@ static ssize_t class_set_picture(struct device *device,
620 620
621#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file 621#define ASUS_OLED_DEVICE_ATTR(_file) dev_attr_asus_oled_##_file
622 622
623static DEVICE_ATTR(asus_oled_enabled, S_IWUGO | S_IRUGO, 623static DEVICE_ATTR(asus_oled_enabled, S_IWUSR | S_IRUGO,
624 get_enabled, set_enabled); 624 get_enabled, set_enabled);
625static DEVICE_ATTR(asus_oled_picture, S_IWUGO , NULL, set_picture); 625static DEVICE_ATTR(asus_oled_picture, S_IWUSR , NULL, set_picture);
626 626
627static DEVICE_ATTR(enabled, S_IWUGO | S_IRUGO, 627static DEVICE_ATTR(enabled, S_IWUSR | S_IRUGO,
628 class_get_enabled, class_set_enabled); 628 class_get_enabled, class_set_enabled);
629static DEVICE_ATTR(picture, S_IWUGO, NULL, class_set_picture); 629static DEVICE_ATTR(picture, S_IWUSR, NULL, class_set_picture);
630 630
631static int asus_oled_probe(struct usb_interface *interface, 631static int asus_oled_probe(struct usb_interface *interface,
632 const struct usb_device_id *id) 632 const struct usb_device_id *id)
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index b68a7e5173be..d85de82f941a 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -463,9 +463,6 @@ static void hardif_remove_interface(struct batman_if *batman_if)
463 return; 463 return;
464 464
465 batman_if->if_status = IF_TO_BE_REMOVED; 465 batman_if->if_status = IF_TO_BE_REMOVED;
466
467 /* caller must take if_list_lock */
468 list_del_rcu(&batman_if->list);
469 synchronize_rcu(); 466 synchronize_rcu();
470 sysfs_del_hardif(&batman_if->hardif_obj); 467 sysfs_del_hardif(&batman_if->hardif_obj);
471 hardif_put(batman_if); 468 hardif_put(batman_if);
@@ -474,13 +471,21 @@ static void hardif_remove_interface(struct batman_if *batman_if)
474void hardif_remove_interfaces(void) 471void hardif_remove_interfaces(void)
475{ 472{
476 struct batman_if *batman_if, *batman_if_tmp; 473 struct batman_if *batman_if, *batman_if_tmp;
474 struct list_head if_queue;
475
476 INIT_LIST_HEAD(&if_queue);
477 477
478 rtnl_lock();
479 spin_lock(&if_list_lock); 478 spin_lock(&if_list_lock);
480 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) { 479 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_list, list) {
481 hardif_remove_interface(batman_if); 480 list_del_rcu(&batman_if->list);
481 list_add_tail(&batman_if->list, &if_queue);
482 } 482 }
483 spin_unlock(&if_list_lock); 483 spin_unlock(&if_list_lock);
484
485 rtnl_lock();
486 list_for_each_entry_safe(batman_if, batman_if_tmp, &if_queue, list) {
487 hardif_remove_interface(batman_if);
488 }
484 rtnl_unlock(); 489 rtnl_unlock();
485} 490}
486 491
@@ -507,8 +512,10 @@ static int hard_if_event(struct notifier_block *this,
507 break; 512 break;
508 case NETDEV_UNREGISTER: 513 case NETDEV_UNREGISTER:
509 spin_lock(&if_list_lock); 514 spin_lock(&if_list_lock);
510 hardif_remove_interface(batman_if); 515 list_del_rcu(&batman_if->list);
511 spin_unlock(&if_list_lock); 516 spin_unlock(&if_list_lock);
517
518 hardif_remove_interface(batman_if);
512 break; 519 break;
513 case NETDEV_CHANGEMTU: 520 case NETDEV_CHANGEMTU:
514 if (batman_if->soft_iface) 521 if (batman_if->soft_iface)
diff --git a/drivers/staging/batman-adv/soft-interface.c b/drivers/staging/batman-adv/soft-interface.c
index 3904db9ce7b1..0e996181daf7 100644
--- a/drivers/staging/batman-adv/soft-interface.c
+++ b/drivers/staging/batman-adv/soft-interface.c
@@ -194,14 +194,15 @@ void interface_rx(struct net_device *soft_iface,
194 struct bat_priv *priv = netdev_priv(soft_iface); 194 struct bat_priv *priv = netdev_priv(soft_iface);
195 195
196 /* check if enough space is available for pulling, and pull */ 196 /* check if enough space is available for pulling, and pull */
197 if (!pskb_may_pull(skb, hdr_size)) { 197 if (!pskb_may_pull(skb, hdr_size))
198 kfree_skb(skb); 198 goto dropped;
199 return; 199
200 }
201 skb_pull_rcsum(skb, hdr_size); 200 skb_pull_rcsum(skb, hdr_size);
202/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/ 201/* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/
203 202
204 /* skb->dev & skb->pkt_type are set here */ 203 /* skb->dev & skb->pkt_type are set here */
204 if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
205 goto dropped;
205 skb->protocol = eth_type_trans(skb, soft_iface); 206 skb->protocol = eth_type_trans(skb, soft_iface);
206 207
207 /* should not be neccesary anymore as we use skb_pull_rcsum() 208 /* should not be neccesary anymore as we use skb_pull_rcsum()
@@ -216,6 +217,11 @@ void interface_rx(struct net_device *soft_iface,
216 soft_iface->last_rx = jiffies; 217 soft_iface->last_rx = jiffies;
217 218
218 netif_rx(skb); 219 netif_rx(skb);
220 return;
221
222dropped:
223 kfree_skb(skb);
224 return;
219} 225}
220 226
221#ifdef HAVE_NET_DEVICE_OPS 227#ifdef HAVE_NET_DEVICE_OPS
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index c8f1cf1b4409..a27bb0b4f581 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -88,7 +88,9 @@ with the driver.
88 88
89Contact Info: 89Contact Info:
90============= 90=============
91Brett Rudley brudley@broadcom.com 91Brett Rudley brudley@broadcom.com
92Henry Ptasinski henryp@broadcom.com 92Henry Ptasinski henryp@broadcom.com
93Dowan Kim dowan@broadcom.com 93Dowan Kim dowan@broadcom.com
94Roland Vossen rvossen@broadcom.com
95Arend van Spriel arend@broadcom.com
94 96
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index dbf904184899..24ebadbe4241 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -46,4 +46,6 @@ Contact
46Brett Rudley <brudley@broadcom.com> 46Brett Rudley <brudley@broadcom.com>
47Henry Ptasinski <henryp@broadcom.com> 47Henry Ptasinski <henryp@broadcom.com>
48Dowan Kim <dowan@broadcom.com> 48Dowan Kim <dowan@broadcom.com>
49Roland Vossen <rvossen@broadcom.com>
50Arend van Spriel <arend@broadcom.com>
49 51
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 1f177a67ff11..de784ff08caa 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -2295,8 +2295,8 @@ static void tidy_up(struct usbduxsub *usbduxsub_tmp)
2295 usbduxsub_tmp->inBuffer = NULL; 2295 usbduxsub_tmp->inBuffer = NULL;
2296 kfree(usbduxsub_tmp->insnBuffer); 2296 kfree(usbduxsub_tmp->insnBuffer);
2297 usbduxsub_tmp->insnBuffer = NULL; 2297 usbduxsub_tmp->insnBuffer = NULL;
2298 kfree(usbduxsub_tmp->inBuffer); 2298 kfree(usbduxsub_tmp->outBuffer);
2299 usbduxsub_tmp->inBuffer = NULL; 2299 usbduxsub_tmp->outBuffer = NULL;
2300 kfree(usbduxsub_tmp->dac_commands); 2300 kfree(usbduxsub_tmp->dac_commands);
2301 usbduxsub_tmp->dac_commands = NULL; 2301 usbduxsub_tmp->dac_commands = NULL;
2302 kfree(usbduxsub_tmp->dux_commands); 2302 kfree(usbduxsub_tmp->dux_commands);
diff --git a/drivers/staging/easycap/easycap.h b/drivers/staging/easycap/easycap.h
index 25961c23dc0f..884263b2775d 100644
--- a/drivers/staging/easycap/easycap.h
+++ b/drivers/staging/easycap/easycap.h
@@ -75,6 +75,7 @@
75#include <linux/errno.h> 75#include <linux/errno.h>
76#include <linux/init.h> 76#include <linux/init.h>
77#include <linux/slab.h> 77#include <linux/slab.h>
78#include <linux/smp_lock.h>
78#include <linux/module.h> 79#include <linux/module.h>
79#include <linux/kref.h> 80#include <linux/kref.h>
80#include <linux/usb.h> 81#include <linux/usb.h>
diff --git a/drivers/staging/frontier/tranzport.c b/drivers/staging/frontier/tranzport.c
index a145a15cfdb3..8894ab14f167 100644
--- a/drivers/staging/frontier/tranzport.c
+++ b/drivers/staging/frontier/tranzport.c
@@ -204,7 +204,7 @@ static void usb_tranzport_abort_transfers(struct usb_tranzport *dev)
204 t->value = temp; \ 204 t->value = temp; \
205 return count; \ 205 return count; \
206 } \ 206 } \
207 static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); 207 static DEVICE_ATTR(value, S_IWUSR | S_IRUGO, show_##value, set_##value);
208 208
209show_int(enable); 209show_int(enable);
210show_int(offline); 210show_int(offline);
diff --git a/drivers/staging/go7007/go7007-driver.c b/drivers/staging/go7007/go7007-driver.c
index b3f42f37a313..48d4e483d8a4 100644
--- a/drivers/staging/go7007/go7007-driver.c
+++ b/drivers/staging/go7007/go7007-driver.c
@@ -199,7 +199,7 @@ static int init_i2c_module(struct i2c_adapter *adapter, const char *type,
199 struct go7007 *go = i2c_get_adapdata(adapter); 199 struct go7007 *go = i2c_get_adapdata(adapter);
200 struct v4l2_device *v4l2_dev = &go->v4l2_dev; 200 struct v4l2_device *v4l2_dev = &go->v4l2_dev;
201 201
202 if (v4l2_i2c_new_subdev(v4l2_dev, adapter, NULL, type, addr, NULL)) 202 if (v4l2_i2c_new_subdev(v4l2_dev, adapter, type, addr, NULL))
203 return 0; 203 return 0;
204 204
205 printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type); 205 printk(KERN_INFO "go7007: probing for module i2c:%s failed\n", type);
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
index c86d1498737d..1c1e98aee2d9 100644
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ b/drivers/staging/iio/accel/adis16220_core.c
@@ -507,7 +507,7 @@ static IIO_DEVICE_ATTR(reset, S_IWUSR, NULL,
507 adis16220_write_reset, 0); 507 adis16220_write_reset, 0);
508 508
509#define IIO_DEV_ATTR_CAPTURE(_store) \ 509#define IIO_DEV_ATTR_CAPTURE(_store) \
510 IIO_DEVICE_ATTR(capture, S_IWUGO, NULL, _store, 0) 510 IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
511 511
512static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture); 512static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
513 513
diff --git a/drivers/staging/intel_sst/intel_sst_stream_encoded.c b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
index fbae39fda5c0..5c455608b024 100644
--- a/drivers/staging/intel_sst/intel_sst_stream_encoded.c
+++ b/drivers/staging/intel_sst/intel_sst_stream_encoded.c
@@ -1269,7 +1269,7 @@ finish:
1269 dbufs->output_bytes_produced = total_output; 1269 dbufs->output_bytes_produced = total_output;
1270 str_info->status = str_info->prev; 1270 str_info->status = str_info->prev;
1271 str_info->prev = STREAM_DECODE; 1271 str_info->prev = STREAM_DECODE;
1272 str_info->decode_ibuf = NULL;
1273 kfree(str_info->decode_ibuf); 1272 kfree(str_info->decode_ibuf);
1273 str_info->decode_ibuf = NULL;
1274 return retval; 1274 return retval;
1275} 1275}
diff --git a/drivers/staging/line6/control.c b/drivers/staging/line6/control.c
index 040e25ca6d33..67e23b6e2d35 100644
--- a/drivers/staging/line6/control.c
+++ b/drivers/staging/line6/control.c
@@ -266,210 +266,210 @@ VARIAX_PARAM_R(float, mix2);
266VARIAX_PARAM_R(float, mix1); 266VARIAX_PARAM_R(float, mix1);
267VARIAX_PARAM_R(int, pickup_wiring); 267VARIAX_PARAM_R(int, pickup_wiring);
268 268
269static DEVICE_ATTR(tweak, S_IWUGO | S_IRUGO, pod_get_tweak, pod_set_tweak); 269static DEVICE_ATTR(tweak, S_IWUSR | S_IRUGO, pod_get_tweak, pod_set_tweak);
270static DEVICE_ATTR(wah_position, S_IWUGO | S_IRUGO, pod_get_wah_position, 270static DEVICE_ATTR(wah_position, S_IWUSR | S_IRUGO, pod_get_wah_position,
271 pod_set_wah_position); 271 pod_set_wah_position);
272static DEVICE_ATTR(compression_gain, S_IWUGO | S_IRUGO, 272static DEVICE_ATTR(compression_gain, S_IWUSR | S_IRUGO,
273 pod_get_compression_gain, pod_set_compression_gain); 273 pod_get_compression_gain, pod_set_compression_gain);
274static DEVICE_ATTR(vol_pedal_position, S_IWUGO | S_IRUGO, 274static DEVICE_ATTR(vol_pedal_position, S_IWUSR | S_IRUGO,
275 pod_get_vol_pedal_position, pod_set_vol_pedal_position); 275 pod_get_vol_pedal_position, pod_set_vol_pedal_position);
276static DEVICE_ATTR(compression_threshold, S_IWUGO | S_IRUGO, 276static DEVICE_ATTR(compression_threshold, S_IWUSR | S_IRUGO,
277 pod_get_compression_threshold, 277 pod_get_compression_threshold,
278 pod_set_compression_threshold); 278 pod_set_compression_threshold);
279static DEVICE_ATTR(pan, S_IWUGO | S_IRUGO, pod_get_pan, pod_set_pan); 279static DEVICE_ATTR(pan, S_IWUSR | S_IRUGO, pod_get_pan, pod_set_pan);
280static DEVICE_ATTR(amp_model_setup, S_IWUGO | S_IRUGO, pod_get_amp_model_setup, 280static DEVICE_ATTR(amp_model_setup, S_IWUSR | S_IRUGO, pod_get_amp_model_setup,
281 pod_set_amp_model_setup); 281 pod_set_amp_model_setup);
282static DEVICE_ATTR(amp_model, S_IWUGO | S_IRUGO, pod_get_amp_model, 282static DEVICE_ATTR(amp_model, S_IWUSR | S_IRUGO, pod_get_amp_model,
283 pod_set_amp_model); 283 pod_set_amp_model);
284static DEVICE_ATTR(drive, S_IWUGO | S_IRUGO, pod_get_drive, pod_set_drive); 284static DEVICE_ATTR(drive, S_IWUSR | S_IRUGO, pod_get_drive, pod_set_drive);
285static DEVICE_ATTR(bass, S_IWUGO | S_IRUGO, pod_get_bass, pod_set_bass); 285static DEVICE_ATTR(bass, S_IWUSR | S_IRUGO, pod_get_bass, pod_set_bass);
286static DEVICE_ATTR(mid, S_IWUGO | S_IRUGO, pod_get_mid, pod_set_mid); 286static DEVICE_ATTR(mid, S_IWUSR | S_IRUGO, pod_get_mid, pod_set_mid);
287static DEVICE_ATTR(lowmid, S_IWUGO | S_IRUGO, pod_get_lowmid, pod_set_lowmid); 287static DEVICE_ATTR(lowmid, S_IWUSR | S_IRUGO, pod_get_lowmid, pod_set_lowmid);
288static DEVICE_ATTR(treble, S_IWUGO | S_IRUGO, pod_get_treble, pod_set_treble); 288static DEVICE_ATTR(treble, S_IWUSR | S_IRUGO, pod_get_treble, pod_set_treble);
289static DEVICE_ATTR(highmid, S_IWUGO | S_IRUGO, pod_get_highmid, 289static DEVICE_ATTR(highmid, S_IWUSR | S_IRUGO, pod_get_highmid,
290 pod_set_highmid); 290 pod_set_highmid);
291static DEVICE_ATTR(chan_vol, S_IWUGO | S_IRUGO, pod_get_chan_vol, 291static DEVICE_ATTR(chan_vol, S_IWUSR | S_IRUGO, pod_get_chan_vol,
292 pod_set_chan_vol); 292 pod_set_chan_vol);
293static DEVICE_ATTR(reverb_mix, S_IWUGO | S_IRUGO, pod_get_reverb_mix, 293static DEVICE_ATTR(reverb_mix, S_IWUSR | S_IRUGO, pod_get_reverb_mix,
294 pod_set_reverb_mix); 294 pod_set_reverb_mix);
295static DEVICE_ATTR(effect_setup, S_IWUGO | S_IRUGO, pod_get_effect_setup, 295static DEVICE_ATTR(effect_setup, S_IWUSR | S_IRUGO, pod_get_effect_setup,
296 pod_set_effect_setup); 296 pod_set_effect_setup);
297static DEVICE_ATTR(band_1_frequency, S_IWUGO | S_IRUGO, 297static DEVICE_ATTR(band_1_frequency, S_IWUSR | S_IRUGO,
298 pod_get_band_1_frequency, pod_set_band_1_frequency); 298 pod_get_band_1_frequency, pod_set_band_1_frequency);
299static DEVICE_ATTR(presence, S_IWUGO | S_IRUGO, pod_get_presence, 299static DEVICE_ATTR(presence, S_IWUSR | S_IRUGO, pod_get_presence,
300 pod_set_presence); 300 pod_set_presence);
301static DEVICE_ATTR2(treble__bass, treble, S_IWUGO | S_IRUGO, 301static DEVICE_ATTR2(treble__bass, treble, S_IWUSR | S_IRUGO,
302 pod_get_treble__bass, pod_set_treble__bass); 302 pod_get_treble__bass, pod_set_treble__bass);
303static DEVICE_ATTR(noise_gate_enable, S_IWUGO | S_IRUGO, 303static DEVICE_ATTR(noise_gate_enable, S_IWUSR | S_IRUGO,
304 pod_get_noise_gate_enable, pod_set_noise_gate_enable); 304 pod_get_noise_gate_enable, pod_set_noise_gate_enable);
305static DEVICE_ATTR(gate_threshold, S_IWUGO | S_IRUGO, pod_get_gate_threshold, 305static DEVICE_ATTR(gate_threshold, S_IWUSR | S_IRUGO, pod_get_gate_threshold,
306 pod_set_gate_threshold); 306 pod_set_gate_threshold);
307static DEVICE_ATTR(gate_decay_time, S_IWUGO | S_IRUGO, pod_get_gate_decay_time, 307static DEVICE_ATTR(gate_decay_time, S_IWUSR | S_IRUGO, pod_get_gate_decay_time,
308 pod_set_gate_decay_time); 308 pod_set_gate_decay_time);
309static DEVICE_ATTR(stomp_enable, S_IWUGO | S_IRUGO, pod_get_stomp_enable, 309static DEVICE_ATTR(stomp_enable, S_IWUSR | S_IRUGO, pod_get_stomp_enable,
310 pod_set_stomp_enable); 310 pod_set_stomp_enable);
311static DEVICE_ATTR(comp_enable, S_IWUGO | S_IRUGO, pod_get_comp_enable, 311static DEVICE_ATTR(comp_enable, S_IWUSR | S_IRUGO, pod_get_comp_enable,
312 pod_set_comp_enable); 312 pod_set_comp_enable);
313static DEVICE_ATTR(stomp_time, S_IWUGO | S_IRUGO, pod_get_stomp_time, 313static DEVICE_ATTR(stomp_time, S_IWUSR | S_IRUGO, pod_get_stomp_time,
314 pod_set_stomp_time); 314 pod_set_stomp_time);
315static DEVICE_ATTR(delay_enable, S_IWUGO | S_IRUGO, pod_get_delay_enable, 315static DEVICE_ATTR(delay_enable, S_IWUSR | S_IRUGO, pod_get_delay_enable,
316 pod_set_delay_enable); 316 pod_set_delay_enable);
317static DEVICE_ATTR(mod_param_1, S_IWUGO | S_IRUGO, pod_get_mod_param_1, 317static DEVICE_ATTR(mod_param_1, S_IWUSR | S_IRUGO, pod_get_mod_param_1,
318 pod_set_mod_param_1); 318 pod_set_mod_param_1);
319static DEVICE_ATTR(delay_param_1, S_IWUGO | S_IRUGO, pod_get_delay_param_1, 319static DEVICE_ATTR(delay_param_1, S_IWUSR | S_IRUGO, pod_get_delay_param_1,
320 pod_set_delay_param_1); 320 pod_set_delay_param_1);
321static DEVICE_ATTR(delay_param_1_note_value, S_IWUGO | S_IRUGO, 321static DEVICE_ATTR(delay_param_1_note_value, S_IWUSR | S_IRUGO,
322 pod_get_delay_param_1_note_value, 322 pod_get_delay_param_1_note_value,
323 pod_set_delay_param_1_note_value); 323 pod_set_delay_param_1_note_value);
324static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUGO | S_IRUGO, 324static DEVICE_ATTR2(band_2_frequency__bass, band_2_frequency, S_IWUSR | S_IRUGO,
325 pod_get_band_2_frequency__bass, 325 pod_get_band_2_frequency__bass,
326 pod_set_band_2_frequency__bass); 326 pod_set_band_2_frequency__bass);
327static DEVICE_ATTR(delay_param_2, S_IWUGO | S_IRUGO, pod_get_delay_param_2, 327static DEVICE_ATTR(delay_param_2, S_IWUSR | S_IRUGO, pod_get_delay_param_2,
328 pod_set_delay_param_2); 328 pod_set_delay_param_2);
329static DEVICE_ATTR(delay_volume_mix, S_IWUGO | S_IRUGO, 329static DEVICE_ATTR(delay_volume_mix, S_IWUSR | S_IRUGO,
330 pod_get_delay_volume_mix, pod_set_delay_volume_mix); 330 pod_get_delay_volume_mix, pod_set_delay_volume_mix);
331static DEVICE_ATTR(delay_param_3, S_IWUGO | S_IRUGO, pod_get_delay_param_3, 331static DEVICE_ATTR(delay_param_3, S_IWUSR | S_IRUGO, pod_get_delay_param_3,
332 pod_set_delay_param_3); 332 pod_set_delay_param_3);
333static DEVICE_ATTR(reverb_enable, S_IWUGO | S_IRUGO, pod_get_reverb_enable, 333static DEVICE_ATTR(reverb_enable, S_IWUSR | S_IRUGO, pod_get_reverb_enable,
334 pod_set_reverb_enable); 334 pod_set_reverb_enable);
335static DEVICE_ATTR(reverb_type, S_IWUGO | S_IRUGO, pod_get_reverb_type, 335static DEVICE_ATTR(reverb_type, S_IWUSR | S_IRUGO, pod_get_reverb_type,
336 pod_set_reverb_type); 336 pod_set_reverb_type);
337static DEVICE_ATTR(reverb_decay, S_IWUGO | S_IRUGO, pod_get_reverb_decay, 337static DEVICE_ATTR(reverb_decay, S_IWUSR | S_IRUGO, pod_get_reverb_decay,
338 pod_set_reverb_decay); 338 pod_set_reverb_decay);
339static DEVICE_ATTR(reverb_tone, S_IWUGO | S_IRUGO, pod_get_reverb_tone, 339static DEVICE_ATTR(reverb_tone, S_IWUSR | S_IRUGO, pod_get_reverb_tone,
340 pod_set_reverb_tone); 340 pod_set_reverb_tone);
341static DEVICE_ATTR(reverb_pre_delay, S_IWUGO | S_IRUGO, 341static DEVICE_ATTR(reverb_pre_delay, S_IWUSR | S_IRUGO,
342 pod_get_reverb_pre_delay, pod_set_reverb_pre_delay); 342 pod_get_reverb_pre_delay, pod_set_reverb_pre_delay);
343static DEVICE_ATTR(reverb_pre_post, S_IWUGO | S_IRUGO, pod_get_reverb_pre_post, 343static DEVICE_ATTR(reverb_pre_post, S_IWUSR | S_IRUGO, pod_get_reverb_pre_post,
344 pod_set_reverb_pre_post); 344 pod_set_reverb_pre_post);
345static DEVICE_ATTR(band_2_frequency, S_IWUGO | S_IRUGO, 345static DEVICE_ATTR(band_2_frequency, S_IWUSR | S_IRUGO,
346 pod_get_band_2_frequency, pod_set_band_2_frequency); 346 pod_get_band_2_frequency, pod_set_band_2_frequency);
347static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUGO | S_IRUGO, 347static DEVICE_ATTR2(band_3_frequency__bass, band_3_frequency, S_IWUSR | S_IRUGO,
348 pod_get_band_3_frequency__bass, 348 pod_get_band_3_frequency__bass,
349 pod_set_band_3_frequency__bass); 349 pod_set_band_3_frequency__bass);
350static DEVICE_ATTR(wah_enable, S_IWUGO | S_IRUGO, pod_get_wah_enable, 350static DEVICE_ATTR(wah_enable, S_IWUSR | S_IRUGO, pod_get_wah_enable,
351 pod_set_wah_enable); 351 pod_set_wah_enable);
352static DEVICE_ATTR(modulation_lo_cut, S_IWUGO | S_IRUGO, 352static DEVICE_ATTR(modulation_lo_cut, S_IWUSR | S_IRUGO,
353 pod_get_modulation_lo_cut, pod_set_modulation_lo_cut); 353 pod_get_modulation_lo_cut, pod_set_modulation_lo_cut);
354static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUGO | S_IRUGO, 354static DEVICE_ATTR(delay_reverb_lo_cut, S_IWUSR | S_IRUGO,
355 pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut); 355 pod_get_delay_reverb_lo_cut, pod_set_delay_reverb_lo_cut);
356static DEVICE_ATTR(volume_pedal_minimum, S_IWUGO | S_IRUGO, 356static DEVICE_ATTR(volume_pedal_minimum, S_IWUSR | S_IRUGO,
357 pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum); 357 pod_get_volume_pedal_minimum, pod_set_volume_pedal_minimum);
358static DEVICE_ATTR(eq_pre_post, S_IWUGO | S_IRUGO, pod_get_eq_pre_post, 358static DEVICE_ATTR(eq_pre_post, S_IWUSR | S_IRUGO, pod_get_eq_pre_post,
359 pod_set_eq_pre_post); 359 pod_set_eq_pre_post);
360static DEVICE_ATTR(volume_pre_post, S_IWUGO | S_IRUGO, pod_get_volume_pre_post, 360static DEVICE_ATTR(volume_pre_post, S_IWUSR | S_IRUGO, pod_get_volume_pre_post,
361 pod_set_volume_pre_post); 361 pod_set_volume_pre_post);
362static DEVICE_ATTR(di_model, S_IWUGO | S_IRUGO, pod_get_di_model, 362static DEVICE_ATTR(di_model, S_IWUSR | S_IRUGO, pod_get_di_model,
363 pod_set_di_model); 363 pod_set_di_model);
364static DEVICE_ATTR(di_delay, S_IWUGO | S_IRUGO, pod_get_di_delay, 364static DEVICE_ATTR(di_delay, S_IWUSR | S_IRUGO, pod_get_di_delay,
365 pod_set_di_delay); 365 pod_set_di_delay);
366static DEVICE_ATTR(mod_enable, S_IWUGO | S_IRUGO, pod_get_mod_enable, 366static DEVICE_ATTR(mod_enable, S_IWUSR | S_IRUGO, pod_get_mod_enable,
367 pod_set_mod_enable); 367 pod_set_mod_enable);
368static DEVICE_ATTR(mod_param_1_note_value, S_IWUGO | S_IRUGO, 368static DEVICE_ATTR(mod_param_1_note_value, S_IWUSR | S_IRUGO,
369 pod_get_mod_param_1_note_value, 369 pod_get_mod_param_1_note_value,
370 pod_set_mod_param_1_note_value); 370 pod_set_mod_param_1_note_value);
371static DEVICE_ATTR(mod_param_2, S_IWUGO | S_IRUGO, pod_get_mod_param_2, 371static DEVICE_ATTR(mod_param_2, S_IWUSR | S_IRUGO, pod_get_mod_param_2,
372 pod_set_mod_param_2); 372 pod_set_mod_param_2);
373static DEVICE_ATTR(mod_param_3, S_IWUGO | S_IRUGO, pod_get_mod_param_3, 373static DEVICE_ATTR(mod_param_3, S_IWUSR | S_IRUGO, pod_get_mod_param_3,
374 pod_set_mod_param_3); 374 pod_set_mod_param_3);
375static DEVICE_ATTR(mod_param_4, S_IWUGO | S_IRUGO, pod_get_mod_param_4, 375static DEVICE_ATTR(mod_param_4, S_IWUSR | S_IRUGO, pod_get_mod_param_4,
376 pod_set_mod_param_4); 376 pod_set_mod_param_4);
377static DEVICE_ATTR(mod_param_5, S_IWUGO | S_IRUGO, pod_get_mod_param_5, 377static DEVICE_ATTR(mod_param_5, S_IWUSR | S_IRUGO, pod_get_mod_param_5,
378 pod_set_mod_param_5); 378 pod_set_mod_param_5);
379static DEVICE_ATTR(mod_volume_mix, S_IWUGO | S_IRUGO, pod_get_mod_volume_mix, 379static DEVICE_ATTR(mod_volume_mix, S_IWUSR | S_IRUGO, pod_get_mod_volume_mix,
380 pod_set_mod_volume_mix); 380 pod_set_mod_volume_mix);
381static DEVICE_ATTR(mod_pre_post, S_IWUGO | S_IRUGO, pod_get_mod_pre_post, 381static DEVICE_ATTR(mod_pre_post, S_IWUSR | S_IRUGO, pod_get_mod_pre_post,
382 pod_set_mod_pre_post); 382 pod_set_mod_pre_post);
383static DEVICE_ATTR(modulation_model, S_IWUGO | S_IRUGO, 383static DEVICE_ATTR(modulation_model, S_IWUSR | S_IRUGO,
384 pod_get_modulation_model, pod_set_modulation_model); 384 pod_get_modulation_model, pod_set_modulation_model);
385static DEVICE_ATTR(band_3_frequency, S_IWUGO | S_IRUGO, 385static DEVICE_ATTR(band_3_frequency, S_IWUSR | S_IRUGO,
386 pod_get_band_3_frequency, pod_set_band_3_frequency); 386 pod_get_band_3_frequency, pod_set_band_3_frequency);
387static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUGO | S_IRUGO, 387static DEVICE_ATTR2(band_4_frequency__bass, band_4_frequency, S_IWUSR | S_IRUGO,
388 pod_get_band_4_frequency__bass, 388 pod_get_band_4_frequency__bass,
389 pod_set_band_4_frequency__bass); 389 pod_set_band_4_frequency__bass);
390static DEVICE_ATTR(mod_param_1_double_precision, S_IWUGO | S_IRUGO, 390static DEVICE_ATTR(mod_param_1_double_precision, S_IWUSR | S_IRUGO,
391 pod_get_mod_param_1_double_precision, 391 pod_get_mod_param_1_double_precision,
392 pod_set_mod_param_1_double_precision); 392 pod_set_mod_param_1_double_precision);
393static DEVICE_ATTR(delay_param_1_double_precision, S_IWUGO | S_IRUGO, 393static DEVICE_ATTR(delay_param_1_double_precision, S_IWUSR | S_IRUGO,
394 pod_get_delay_param_1_double_precision, 394 pod_get_delay_param_1_double_precision,
395 pod_set_delay_param_1_double_precision); 395 pod_set_delay_param_1_double_precision);
396static DEVICE_ATTR(eq_enable, S_IWUGO | S_IRUGO, pod_get_eq_enable, 396static DEVICE_ATTR(eq_enable, S_IWUSR | S_IRUGO, pod_get_eq_enable,
397 pod_set_eq_enable); 397 pod_set_eq_enable);
398static DEVICE_ATTR(tap, S_IWUGO | S_IRUGO, pod_get_tap, pod_set_tap); 398static DEVICE_ATTR(tap, S_IWUSR | S_IRUGO, pod_get_tap, pod_set_tap);
399static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUGO | S_IRUGO, 399static DEVICE_ATTR(volume_tweak_pedal_assign, S_IWUSR | S_IRUGO,
400 pod_get_volume_tweak_pedal_assign, 400 pod_get_volume_tweak_pedal_assign,
401 pod_set_volume_tweak_pedal_assign); 401 pod_set_volume_tweak_pedal_assign);
402static DEVICE_ATTR(band_5_frequency, S_IWUGO | S_IRUGO, 402static DEVICE_ATTR(band_5_frequency, S_IWUSR | S_IRUGO,
403 pod_get_band_5_frequency, pod_set_band_5_frequency); 403 pod_get_band_5_frequency, pod_set_band_5_frequency);
404static DEVICE_ATTR(tuner, S_IWUGO | S_IRUGO, pod_get_tuner, pod_set_tuner); 404static DEVICE_ATTR(tuner, S_IWUSR | S_IRUGO, pod_get_tuner, pod_set_tuner);
405static DEVICE_ATTR(mic_selection, S_IWUGO | S_IRUGO, pod_get_mic_selection, 405static DEVICE_ATTR(mic_selection, S_IWUSR | S_IRUGO, pod_get_mic_selection,
406 pod_set_mic_selection); 406 pod_set_mic_selection);
407static DEVICE_ATTR(cabinet_model, S_IWUGO | S_IRUGO, pod_get_cabinet_model, 407static DEVICE_ATTR(cabinet_model, S_IWUSR | S_IRUGO, pod_get_cabinet_model,
408 pod_set_cabinet_model); 408 pod_set_cabinet_model);
409static DEVICE_ATTR(stomp_model, S_IWUGO | S_IRUGO, pod_get_stomp_model, 409static DEVICE_ATTR(stomp_model, S_IWUSR | S_IRUGO, pod_get_stomp_model,
410 pod_set_stomp_model); 410 pod_set_stomp_model);
411static DEVICE_ATTR(roomlevel, S_IWUGO | S_IRUGO, pod_get_roomlevel, 411static DEVICE_ATTR(roomlevel, S_IWUSR | S_IRUGO, pod_get_roomlevel,
412 pod_set_roomlevel); 412 pod_set_roomlevel);
413static DEVICE_ATTR(band_4_frequency, S_IWUGO | S_IRUGO, 413static DEVICE_ATTR(band_4_frequency, S_IWUSR | S_IRUGO,
414 pod_get_band_4_frequency, pod_set_band_4_frequency); 414 pod_get_band_4_frequency, pod_set_band_4_frequency);
415static DEVICE_ATTR(band_6_frequency, S_IWUGO | S_IRUGO, 415static DEVICE_ATTR(band_6_frequency, S_IWUSR | S_IRUGO,
416 pod_get_band_6_frequency, pod_set_band_6_frequency); 416 pod_get_band_6_frequency, pod_set_band_6_frequency);
417static DEVICE_ATTR(stomp_param_1_note_value, S_IWUGO | S_IRUGO, 417static DEVICE_ATTR(stomp_param_1_note_value, S_IWUSR | S_IRUGO,
418 pod_get_stomp_param_1_note_value, 418 pod_get_stomp_param_1_note_value,
419 pod_set_stomp_param_1_note_value); 419 pod_set_stomp_param_1_note_value);
420static DEVICE_ATTR(stomp_param_2, S_IWUGO | S_IRUGO, pod_get_stomp_param_2, 420static DEVICE_ATTR(stomp_param_2, S_IWUSR | S_IRUGO, pod_get_stomp_param_2,
421 pod_set_stomp_param_2); 421 pod_set_stomp_param_2);
422static DEVICE_ATTR(stomp_param_3, S_IWUGO | S_IRUGO, pod_get_stomp_param_3, 422static DEVICE_ATTR(stomp_param_3, S_IWUSR | S_IRUGO, pod_get_stomp_param_3,
423 pod_set_stomp_param_3); 423 pod_set_stomp_param_3);
424static DEVICE_ATTR(stomp_param_4, S_IWUGO | S_IRUGO, pod_get_stomp_param_4, 424static DEVICE_ATTR(stomp_param_4, S_IWUSR | S_IRUGO, pod_get_stomp_param_4,
425 pod_set_stomp_param_4); 425 pod_set_stomp_param_4);
426static DEVICE_ATTR(stomp_param_5, S_IWUGO | S_IRUGO, pod_get_stomp_param_5, 426static DEVICE_ATTR(stomp_param_5, S_IWUSR | S_IRUGO, pod_get_stomp_param_5,
427 pod_set_stomp_param_5); 427 pod_set_stomp_param_5);
428static DEVICE_ATTR(stomp_param_6, S_IWUGO | S_IRUGO, pod_get_stomp_param_6, 428static DEVICE_ATTR(stomp_param_6, S_IWUSR | S_IRUGO, pod_get_stomp_param_6,
429 pod_set_stomp_param_6); 429 pod_set_stomp_param_6);
430static DEVICE_ATTR(amp_switch_select, S_IWUGO | S_IRUGO, 430static DEVICE_ATTR(amp_switch_select, S_IWUSR | S_IRUGO,
431 pod_get_amp_switch_select, pod_set_amp_switch_select); 431 pod_get_amp_switch_select, pod_set_amp_switch_select);
432static DEVICE_ATTR(delay_param_4, S_IWUGO | S_IRUGO, pod_get_delay_param_4, 432static DEVICE_ATTR(delay_param_4, S_IWUSR | S_IRUGO, pod_get_delay_param_4,
433 pod_set_delay_param_4); 433 pod_set_delay_param_4);
434static DEVICE_ATTR(delay_param_5, S_IWUGO | S_IRUGO, pod_get_delay_param_5, 434static DEVICE_ATTR(delay_param_5, S_IWUSR | S_IRUGO, pod_get_delay_param_5,
435 pod_set_delay_param_5); 435 pod_set_delay_param_5);
436static DEVICE_ATTR(delay_pre_post, S_IWUGO | S_IRUGO, pod_get_delay_pre_post, 436static DEVICE_ATTR(delay_pre_post, S_IWUSR | S_IRUGO, pod_get_delay_pre_post,
437 pod_set_delay_pre_post); 437 pod_set_delay_pre_post);
438static DEVICE_ATTR(delay_model, S_IWUGO | S_IRUGO, pod_get_delay_model, 438static DEVICE_ATTR(delay_model, S_IWUSR | S_IRUGO, pod_get_delay_model,
439 pod_set_delay_model); 439 pod_set_delay_model);
440static DEVICE_ATTR(delay_verb_model, S_IWUGO | S_IRUGO, 440static DEVICE_ATTR(delay_verb_model, S_IWUSR | S_IRUGO,
441 pod_get_delay_verb_model, pod_set_delay_verb_model); 441 pod_get_delay_verb_model, pod_set_delay_verb_model);
442static DEVICE_ATTR(tempo_msb, S_IWUGO | S_IRUGO, pod_get_tempo_msb, 442static DEVICE_ATTR(tempo_msb, S_IWUSR | S_IRUGO, pod_get_tempo_msb,
443 pod_set_tempo_msb); 443 pod_set_tempo_msb);
444static DEVICE_ATTR(tempo_lsb, S_IWUGO | S_IRUGO, pod_get_tempo_lsb, 444static DEVICE_ATTR(tempo_lsb, S_IWUSR | S_IRUGO, pod_get_tempo_lsb,
445 pod_set_tempo_lsb); 445 pod_set_tempo_lsb);
446static DEVICE_ATTR(wah_model, S_IWUGO | S_IRUGO, pod_get_wah_model, 446static DEVICE_ATTR(wah_model, S_IWUSR | S_IRUGO, pod_get_wah_model,
447 pod_set_wah_model); 447 pod_set_wah_model);
448static DEVICE_ATTR(bypass_volume, S_IWUGO | S_IRUGO, pod_get_bypass_volume, 448static DEVICE_ATTR(bypass_volume, S_IWUSR | S_IRUGO, pod_get_bypass_volume,
449 pod_set_bypass_volume); 449 pod_set_bypass_volume);
450static DEVICE_ATTR(fx_loop_on_off, S_IWUGO | S_IRUGO, pod_get_fx_loop_on_off, 450static DEVICE_ATTR(fx_loop_on_off, S_IWUSR | S_IRUGO, pod_get_fx_loop_on_off,
451 pod_set_fx_loop_on_off); 451 pod_set_fx_loop_on_off);
452static DEVICE_ATTR(tweak_param_select, S_IWUGO | S_IRUGO, 452static DEVICE_ATTR(tweak_param_select, S_IWUSR | S_IRUGO,
453 pod_get_tweak_param_select, pod_set_tweak_param_select); 453 pod_get_tweak_param_select, pod_set_tweak_param_select);
454static DEVICE_ATTR(amp1_engage, S_IWUGO | S_IRUGO, pod_get_amp1_engage, 454static DEVICE_ATTR(amp1_engage, S_IWUSR | S_IRUGO, pod_get_amp1_engage,
455 pod_set_amp1_engage); 455 pod_set_amp1_engage);
456static DEVICE_ATTR(band_1_gain, S_IWUGO | S_IRUGO, pod_get_band_1_gain, 456static DEVICE_ATTR(band_1_gain, S_IWUSR | S_IRUGO, pod_get_band_1_gain,
457 pod_set_band_1_gain); 457 pod_set_band_1_gain);
458static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUGO | S_IRUGO, 458static DEVICE_ATTR2(band_2_gain__bass, band_2_gain, S_IWUSR | S_IRUGO,
459 pod_get_band_2_gain__bass, pod_set_band_2_gain__bass); 459 pod_get_band_2_gain__bass, pod_set_band_2_gain__bass);
460static DEVICE_ATTR(band_2_gain, S_IWUGO | S_IRUGO, pod_get_band_2_gain, 460static DEVICE_ATTR(band_2_gain, S_IWUSR | S_IRUGO, pod_get_band_2_gain,
461 pod_set_band_2_gain); 461 pod_set_band_2_gain);
462static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUGO | S_IRUGO, 462static DEVICE_ATTR2(band_3_gain__bass, band_3_gain, S_IWUSR | S_IRUGO,
463 pod_get_band_3_gain__bass, pod_set_band_3_gain__bass); 463 pod_get_band_3_gain__bass, pod_set_band_3_gain__bass);
464static DEVICE_ATTR(band_3_gain, S_IWUGO | S_IRUGO, pod_get_band_3_gain, 464static DEVICE_ATTR(band_3_gain, S_IWUSR | S_IRUGO, pod_get_band_3_gain,
465 pod_set_band_3_gain); 465 pod_set_band_3_gain);
466static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUGO | S_IRUGO, 466static DEVICE_ATTR2(band_4_gain__bass, band_4_gain, S_IWUSR | S_IRUGO,
467 pod_get_band_4_gain__bass, pod_set_band_4_gain__bass); 467 pod_get_band_4_gain__bass, pod_set_band_4_gain__bass);
468static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUGO | S_IRUGO, 468static DEVICE_ATTR2(band_5_gain__bass, band_5_gain, S_IWUSR | S_IRUGO,
469 pod_get_band_5_gain__bass, pod_set_band_5_gain__bass); 469 pod_get_band_5_gain__bass, pod_set_band_5_gain__bass);
470static DEVICE_ATTR(band_4_gain, S_IWUGO | S_IRUGO, pod_get_band_4_gain, 470static DEVICE_ATTR(band_4_gain, S_IWUSR | S_IRUGO, pod_get_band_4_gain,
471 pod_set_band_4_gain); 471 pod_set_band_4_gain);
472static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUGO | S_IRUGO, 472static DEVICE_ATTR2(band_6_gain__bass, band_6_gain, S_IWUSR | S_IRUGO,
473 pod_get_band_6_gain__bass, pod_set_band_6_gain__bass); 473 pod_get_band_6_gain__bass, pod_set_band_6_gain__bass);
474static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write); 474static DEVICE_ATTR(body, S_IRUGO, variax_get_body, line6_nop_write);
475static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable, 475static DEVICE_ATTR(pickup1_enable, S_IRUGO, variax_get_pickup1_enable,
diff --git a/drivers/staging/line6/midi.c b/drivers/staging/line6/midi.c
index 4304dfe6c166..ab67e889d2c4 100644
--- a/drivers/staging/line6/midi.c
+++ b/drivers/staging/line6/midi.c
@@ -350,9 +350,9 @@ static ssize_t midi_set_midi_mask_receive(struct device *dev,
350 return count; 350 return count;
351} 351}
352 352
353static DEVICE_ATTR(midi_mask_transmit, S_IWUGO | S_IRUGO, 353static DEVICE_ATTR(midi_mask_transmit, S_IWUSR | S_IRUGO,
354 midi_get_midi_mask_transmit, midi_set_midi_mask_transmit); 354 midi_get_midi_mask_transmit, midi_set_midi_mask_transmit);
355static DEVICE_ATTR(midi_mask_receive, S_IWUGO | S_IRUGO, 355static DEVICE_ATTR(midi_mask_receive, S_IWUSR | S_IRUGO,
356 midi_get_midi_mask_receive, midi_set_midi_mask_receive); 356 midi_get_midi_mask_receive, midi_set_midi_mask_receive);
357 357
358/* MIDI device destructor */ 358/* MIDI device destructor */
diff --git a/drivers/staging/line6/pcm.c b/drivers/staging/line6/pcm.c
index e54770e34d2e..b9c55f9eb501 100644
--- a/drivers/staging/line6/pcm.c
+++ b/drivers/staging/line6/pcm.c
@@ -79,9 +79,9 @@ static ssize_t pcm_set_impulse_period(struct device *dev,
79 return count; 79 return count;
80} 80}
81 81
82static DEVICE_ATTR(impulse_volume, S_IWUGO | S_IRUGO, pcm_get_impulse_volume, 82static DEVICE_ATTR(impulse_volume, S_IWUSR | S_IRUGO, pcm_get_impulse_volume,
83 pcm_set_impulse_volume); 83 pcm_set_impulse_volume);
84static DEVICE_ATTR(impulse_period, S_IWUGO | S_IRUGO, pcm_get_impulse_period, 84static DEVICE_ATTR(impulse_period, S_IWUSR | S_IRUGO, pcm_get_impulse_period,
85 pcm_set_impulse_period); 85 pcm_set_impulse_period);
86 86
87#endif 87#endif
diff --git a/drivers/staging/line6/pod.c b/drivers/staging/line6/pod.c
index 22e2cedcacf7..d9b30212585c 100644
--- a/drivers/staging/line6/pod.c
+++ b/drivers/staging/line6/pod.c
@@ -1051,48 +1051,48 @@ POD_GET_SYSTEM_PARAM(tuner_pitch, 1);
1051#undef GET_SYSTEM_PARAM 1051#undef GET_SYSTEM_PARAM
1052 1052
1053/* POD special files: */ 1053/* POD special files: */
1054static DEVICE_ATTR(channel, S_IWUGO | S_IRUGO, pod_get_channel, 1054static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel,
1055 pod_set_channel); 1055 pod_set_channel);
1056static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write); 1056static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write);
1057static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write); 1057static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
1058static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write); 1058static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write);
1059static DEVICE_ATTR(dump, S_IWUGO | S_IRUGO, pod_get_dump, pod_set_dump); 1059static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump);
1060static DEVICE_ATTR(dump_buf, S_IWUGO | S_IRUGO, pod_get_dump_buf, 1060static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf,
1061 pod_set_dump_buf); 1061 pod_set_dump_buf);
1062static DEVICE_ATTR(finish, S_IWUGO, line6_nop_read, pod_set_finish); 1062static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish);
1063static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version, 1063static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version,
1064 line6_nop_write); 1064 line6_nop_write);
1065static DEVICE_ATTR(midi_postprocess, S_IWUGO | S_IRUGO, 1065static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO,
1066 pod_get_midi_postprocess, pod_set_midi_postprocess); 1066 pod_get_midi_postprocess, pod_set_midi_postprocess);
1067static DEVICE_ATTR(monitor_level, S_IWUGO | S_IRUGO, pod_get_monitor_level, 1067static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level,
1068 pod_set_monitor_level); 1068 pod_set_monitor_level);
1069static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write); 1069static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write);
1070static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write); 1070static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write);
1071static DEVICE_ATTR(retrieve_amp_setup, S_IWUGO, line6_nop_read, 1071static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read,
1072 pod_set_retrieve_amp_setup); 1072 pod_set_retrieve_amp_setup);
1073static DEVICE_ATTR(retrieve_channel, S_IWUGO, line6_nop_read, 1073static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read,
1074 pod_set_retrieve_channel); 1074 pod_set_retrieve_channel);
1075static DEVICE_ATTR(retrieve_effects_setup, S_IWUGO, line6_nop_read, 1075static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read,
1076 pod_set_retrieve_effects_setup); 1076 pod_set_retrieve_effects_setup);
1077static DEVICE_ATTR(routing, S_IWUGO | S_IRUGO, pod_get_routing, 1077static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing,
1078 pod_set_routing); 1078 pod_set_routing);
1079static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number, 1079static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number,
1080 line6_nop_write); 1080 line6_nop_write);
1081static DEVICE_ATTR(store_amp_setup, S_IWUGO, line6_nop_read, 1081static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read,
1082 pod_set_store_amp_setup); 1082 pod_set_store_amp_setup);
1083static DEVICE_ATTR(store_channel, S_IWUGO, line6_nop_read, 1083static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read,
1084 pod_set_store_channel); 1084 pod_set_store_channel);
1085static DEVICE_ATTR(store_effects_setup, S_IWUGO, line6_nop_read, 1085static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read,
1086 pod_set_store_effects_setup); 1086 pod_set_store_effects_setup);
1087static DEVICE_ATTR(tuner_freq, S_IWUGO | S_IRUGO, pod_get_tuner_freq, 1087static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq,
1088 pod_set_tuner_freq); 1088 pod_set_tuner_freq);
1089static DEVICE_ATTR(tuner_mute, S_IWUGO | S_IRUGO, pod_get_tuner_mute, 1089static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute,
1090 pod_set_tuner_mute); 1090 pod_set_tuner_mute);
1091static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write); 1091static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write);
1092static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write); 1092static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write);
1093 1093
1094#ifdef CONFIG_LINE6_USB_RAW 1094#ifdef CONFIG_LINE6_USB_RAW
1095static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); 1095static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
1096#endif 1096#endif
1097 1097
1098/* control info callback */ 1098/* control info callback */
diff --git a/drivers/staging/line6/toneport.c b/drivers/staging/line6/toneport.c
index 6a10b0f9749a..879e6992bbc6 100644
--- a/drivers/staging/line6/toneport.c
+++ b/drivers/staging/line6/toneport.c
@@ -154,9 +154,9 @@ static ssize_t toneport_set_led_green(struct device *dev,
154 return count; 154 return count;
155} 155}
156 156
157static DEVICE_ATTR(led_red, S_IWUGO | S_IRUGO, line6_nop_read, 157static DEVICE_ATTR(led_red, S_IWUSR | S_IRUGO, line6_nop_read,
158 toneport_set_led_red); 158 toneport_set_led_red);
159static DEVICE_ATTR(led_green, S_IWUGO | S_IRUGO, line6_nop_read, 159static DEVICE_ATTR(led_green, S_IWUSR | S_IRUGO, line6_nop_read,
160 toneport_set_led_green); 160 toneport_set_led_green);
161 161
162static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2) 162static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2)
diff --git a/drivers/staging/line6/variax.c b/drivers/staging/line6/variax.c
index 894eee7f2317..81241cdf1be9 100644
--- a/drivers/staging/line6/variax.c
+++ b/drivers/staging/line6/variax.c
@@ -549,21 +549,21 @@ static ssize_t variax_set_raw2(struct device *dev,
549#endif 549#endif
550 550
551/* Variax workbench special files: */ 551/* Variax workbench special files: */
552static DEVICE_ATTR(model, S_IWUGO | S_IRUGO, variax_get_model, 552static DEVICE_ATTR(model, S_IWUSR | S_IRUGO, variax_get_model,
553 variax_set_model); 553 variax_set_model);
554static DEVICE_ATTR(volume, S_IWUGO | S_IRUGO, variax_get_volume, 554static DEVICE_ATTR(volume, S_IWUSR | S_IRUGO, variax_get_volume,
555 variax_set_volume); 555 variax_set_volume);
556static DEVICE_ATTR(tone, S_IWUGO | S_IRUGO, variax_get_tone, variax_set_tone); 556static DEVICE_ATTR(tone, S_IWUSR | S_IRUGO, variax_get_tone, variax_set_tone);
557static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write); 557static DEVICE_ATTR(name, S_IRUGO, variax_get_name, line6_nop_write);
558static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write); 558static DEVICE_ATTR(bank, S_IRUGO, variax_get_bank, line6_nop_write);
559static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write); 559static DEVICE_ATTR(dump, S_IRUGO, variax_get_dump, line6_nop_write);
560static DEVICE_ATTR(active, S_IWUGO | S_IRUGO, variax_get_active, 560static DEVICE_ATTR(active, S_IWUSR | S_IRUGO, variax_get_active,
561 variax_set_active); 561 variax_set_active);
562static DEVICE_ATTR(guitar, S_IRUGO, variax_get_guitar, line6_nop_write); 562static DEVICE_ATTR(guitar, S_IRUGO, variax_get_guitar, line6_nop_write);
563 563
564#ifdef CONFIG_LINE6_USB_RAW 564#ifdef CONFIG_LINE6_USB_RAW
565static DEVICE_ATTR(raw, S_IWUGO, line6_nop_read, line6_set_raw); 565static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
566static DEVICE_ATTR(raw2, S_IWUGO, line6_nop_read, variax_set_raw2); 566static DEVICE_ATTR(raw2, S_IWUSR, line6_nop_read, variax_set_raw2);
567#endif 567#endif
568 568
569/* 569/*
diff --git a/drivers/staging/quickstart/quickstart.c b/drivers/staging/quickstart/quickstart.c
index d746715d3d89..d83bec876d2e 100644
--- a/drivers/staging/quickstart/quickstart.c
+++ b/drivers/staging/quickstart/quickstart.c
@@ -355,7 +355,6 @@ static int quickstart_acpi_remove(struct acpi_device *device, int type)
355static void quickstart_exit(void) 355static void quickstart_exit(void)
356{ 356{
357 input_unregister_device(quickstart_input); 357 input_unregister_device(quickstart_input);
358 input_free_device(quickstart_input);
359 358
360 device_remove_file(&pf_device->dev, &dev_attr_pressed_button); 359 device_remove_file(&pf_device->dev, &dev_attr_pressed_button);
361 device_remove_file(&pf_device->dev, &dev_attr_buttons); 360 device_remove_file(&pf_device->dev, &dev_attr_buttons);
@@ -375,6 +374,7 @@ static int __init quickstart_init_input(void)
375{ 374{
376 struct quickstart_btn **ptr = &quickstart_data.btn_lst; 375 struct quickstart_btn **ptr = &quickstart_data.btn_lst;
377 int count; 376 int count;
377 int ret;
378 378
379 quickstart_input = input_allocate_device(); 379 quickstart_input = input_allocate_device();
380 380
@@ -391,7 +391,13 @@ static int __init quickstart_init_input(void)
391 ptr = &((*ptr)->next); 391 ptr = &((*ptr)->next);
392 } 392 }
393 393
394 return input_register_device(quickstart_input); 394 ret = input_register_device(quickstart_input);
395 if (ret) {
396 input_free_device(quickstart_input);
397 return ret;
398 }
399
400 return 0;
395} 401}
396 402
397static int __init quickstart_init(void) 403static int __init quickstart_init(void)
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ddacfc6c4861..cd15daae5412 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -182,6 +182,7 @@ struct usb_device_id rtusb_usb_id[] = {
182 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */ 182 {USB_DEVICE(0x2001, 0x3C09)}, /* D-Link */
183 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */ 183 {USB_DEVICE(0x2001, 0x3C0A)}, /* D-Link 3072 */
184 {USB_DEVICE(0x2019, 0xED14)}, /* Planex Communications, Inc. */ 184 {USB_DEVICE(0x2019, 0xED14)}, /* Planex Communications, Inc. */
185 {USB_DEVICE(0x0411, 0x015D)}, /* Buffalo Airstation WLI-UC-GN */
185 {} /* Terminating entry */ 186 {} /* Terminating entry */
186}; 187};
187 188
diff --git a/drivers/staging/rtl8187se/r8185b_init.c b/drivers/staging/rtl8187se/r8185b_init.c
index 46000d72f4c4..3bdf9b31cc4e 100644
--- a/drivers/staging/rtl8187se/r8185b_init.c
+++ b/drivers/staging/rtl8187se/r8185b_init.c
@@ -264,8 +264,12 @@ HwHSSIThreeWire(
264 264
265 udelay(10); 265 udelay(10);
266 } 266 }
267 if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) 267 if (TryCnt == TC_3W_POLL_MAX_TRY_CNT) {
268 panic("HwThreeWire(): CmdReg: %#X RE|WE bits are not clear!!\n", u1bTmp); 268 printk(KERN_ERR "rtl8187se: HwThreeWire(): CmdReg:"
269 " %#X RE|WE bits are not clear!!\n", u1bTmp);
270 dump_stack();
271 return 0;
272 }
269 273
270 /* RTL8187S HSSI Read/Write Function */ 274 /* RTL8187S HSSI Read/Write Function */
271 u1bTmp = read_nic_byte(dev, RF_SW_CONFIG); 275 u1bTmp = read_nic_byte(dev, RF_SW_CONFIG);
@@ -298,13 +302,23 @@ HwHSSIThreeWire(
298 int idx; 302 int idx;
299 int ByteCnt = nDataBufBitCnt / 8; 303 int ByteCnt = nDataBufBitCnt / 8;
300 /* printk("%d\n",nDataBufBitCnt); */ 304 /* printk("%d\n",nDataBufBitCnt); */
301 if ((nDataBufBitCnt % 8) != 0) 305 if ((nDataBufBitCnt % 8) != 0) {
302 panic("HwThreeWire(): nDataBufBitCnt(%d) should be multiple of 8!!!\n", 306 printk(KERN_ERR "rtl8187se: "
303 nDataBufBitCnt); 307 "HwThreeWire(): nDataBufBitCnt(%d)"
308 " should be multiple of 8!!!\n",
309 nDataBufBitCnt);
310 dump_stack();
311 nDataBufBitCnt += 8;
312 nDataBufBitCnt &= ~7;
313 }
304 314
305 if (nDataBufBitCnt > 64) 315 if (nDataBufBitCnt > 64) {
306 panic("HwThreeWire(): nDataBufBitCnt(%d) should <= 64!!!\n", 316 printk(KERN_ERR "rtl8187se: HwThreeWire():"
307 nDataBufBitCnt); 317 " nDataBufBitCnt(%d) should <= 64!!!\n",
318 nDataBufBitCnt);
319 dump_stack();
320 nDataBufBitCnt = 64;
321 }
308 322
309 for (idx = 0; idx < ByteCnt; idx++) 323 for (idx = 0; idx < ByteCnt; idx++)
310 write_nic_byte(dev, (SW_3W_DB0+idx), *(pDataBuf+idx)); 324 write_nic_byte(dev, (SW_3W_DB0+idx), *(pDataBuf+idx));
diff --git a/drivers/staging/rtl8712/usb_halinit.c b/drivers/staging/rtl8712/usb_halinit.c
index f6569dce3012..0e9483bbabe1 100644
--- a/drivers/staging/rtl8712/usb_halinit.c
+++ b/drivers/staging/rtl8712/usb_halinit.c
@@ -37,7 +37,7 @@ u8 r8712_usb_hal_bus_init(struct _adapter *padapter)
37{ 37{
38 u8 val8 = 0; 38 u8 val8 = 0;
39 u8 ret = _SUCCESS; 39 u8 ret = _SUCCESS;
40 u8 PollingCnt = 20; 40 int PollingCnt = 20;
41 struct registry_priv *pregistrypriv = &padapter->registrypriv; 41 struct registry_priv *pregistrypriv = &padapter->registrypriv;
42 42
43 if (pregistrypriv->chip_version == RTL8712_FPGA) { 43 if (pregistrypriv->chip_version == RTL8712_FPGA) {
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index eb44b60e1eb5..ac2bf11e1119 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -356,7 +356,7 @@ static ssize_t set_silent_state(struct device *dev,
356 } 356 }
357 return count; 357 return count;
358} 358}
359static DEVICE_ATTR(silent, S_IWUGO | S_IRUGO, 359static DEVICE_ATTR(silent, S_IWUSR | S_IRUGO,
360 get_silent_state, set_silent_state); 360 get_silent_state, set_silent_state);
361 361
362 362
diff --git a/drivers/staging/speakup/fakekey.c b/drivers/staging/speakup/fakekey.c
index adb93f21c0d6..65b231178f05 100644
--- a/drivers/staging/speakup/fakekey.c
+++ b/drivers/staging/speakup/fakekey.c
@@ -62,7 +62,6 @@ void speakup_remove_virtual_keyboard(void)
62{ 62{
63 if (virt_keyboard != NULL) { 63 if (virt_keyboard != NULL) {
64 input_unregister_device(virt_keyboard); 64 input_unregister_device(virt_keyboard);
65 input_free_device(virt_keyboard);
66 virt_keyboard = NULL; 65 virt_keyboard = NULL;
67 } 66 }
68} 67}
diff --git a/drivers/staging/spectra/ffsport.c b/drivers/staging/spectra/ffsport.c
index c7932da03c56..63a9d0adf32d 100644
--- a/drivers/staging/spectra/ffsport.c
+++ b/drivers/staging/spectra/ffsport.c
@@ -656,7 +656,7 @@ static int SBD_setup_device(struct spectra_nand_dev *dev, int which)
656 /* Here we force report 512 byte hardware sector size to Kernel */ 656 /* Here we force report 512 byte hardware sector size to Kernel */
657 blk_queue_logical_block_size(dev->queue, 512); 657 blk_queue_logical_block_size(dev->queue, 512);
658 658
659 blk_queue_ordered(dev->queue, QUEUE_ORDERED_DRAIN_FLUSH); 659 blk_queue_flush(dev->queue, REQ_FLUSH);
660 660
661 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd"); 661 dev->thread = kthread_run(spectra_trans_thread, dev, "nand_thd");
662 if (IS_ERR(dev->thread)) { 662 if (IS_ERR(dev->thread)) {
diff --git a/drivers/staging/tm6000/tm6000-cards.c b/drivers/staging/tm6000/tm6000-cards.c
index 664e6038090d..b143258f094a 100644
--- a/drivers/staging/tm6000/tm6000-cards.c
+++ b/drivers/staging/tm6000/tm6000-cards.c
@@ -545,7 +545,7 @@ static void tm6000_config_tuner(struct tm6000_core *dev)
545 545
546 /* Load tuner module */ 546 /* Load tuner module */
547 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 547 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
548 NULL, "tuner", dev->tuner_addr, NULL); 548 "tuner", dev->tuner_addr, NULL);
549 549
550 memset(&tun_setup, 0, sizeof(tun_setup)); 550 memset(&tun_setup, 0, sizeof(tun_setup));
551 tun_setup.type = dev->tuner_type; 551 tun_setup.type = dev->tuner_type;
@@ -683,7 +683,7 @@ static int tm6000_init_dev(struct tm6000_core *dev)
683 683
684 if (dev->caps.has_tda9874) 684 if (dev->caps.has_tda9874)
685 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, 685 v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap,
686 NULL, "tvaudio", I2C_ADDR_TDA9874, NULL); 686 "tvaudio", I2C_ADDR_TDA9874, NULL);
687 687
688 /* register and initialize V4L2 */ 688 /* register and initialize V4L2 */
689 rc = tm6000_v4l2_register(dev); 689 rc = tm6000_v4l2_register(dev);
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index fed25105970a..b7ac16005265 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -1441,7 +1441,7 @@ static struct device_attribute fb_device_attrs[] = {
1441 __ATTR_RO(metrics_bytes_identical), 1441 __ATTR_RO(metrics_bytes_identical),
1442 __ATTR_RO(metrics_bytes_sent), 1442 __ATTR_RO(metrics_bytes_sent),
1443 __ATTR_RO(metrics_cpu_kcycles_used), 1443 __ATTR_RO(metrics_cpu_kcycles_used),
1444 __ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store), 1444 __ATTR(metrics_reset, S_IWUSR, NULL, metrics_reset_store),
1445}; 1445};
1446 1446
1447/* 1447/*
diff --git a/drivers/staging/winbond/sysdef.h b/drivers/staging/winbond/sysdef.h
index 9195adf98e14..d0d71f69bc8c 100644
--- a/drivers/staging/winbond/sysdef.h
+++ b/drivers/staging/winbond/sysdef.h
@@ -2,6 +2,9 @@
2 2
3#ifndef SYS_DEF_H 3#ifndef SYS_DEF_H
4#define SYS_DEF_H 4#define SYS_DEF_H
5
6#include <linux/delay.h>
7
5#define WB_LINUX 8#define WB_LINUX
6#define WB_LINUX_WPA_PSK 9#define WB_LINUX_WPA_PSK
7 10
diff --git a/drivers/staging/zram/zram_sysfs.c b/drivers/staging/zram/zram_sysfs.c
index 6c574a994d11..6b3cf00b0ff4 100644
--- a/drivers/staging/zram/zram_sysfs.c
+++ b/drivers/staging/zram/zram_sysfs.c
@@ -189,10 +189,10 @@ static ssize_t mem_used_total_show(struct device *dev,
189 return sprintf(buf, "%llu\n", val); 189 return sprintf(buf, "%llu\n", val);
190} 190}
191 191
192static DEVICE_ATTR(disksize, S_IRUGO | S_IWUGO, 192static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
193 disksize_show, disksize_store); 193 disksize_show, disksize_store);
194static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL); 194static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
195static DEVICE_ATTR(reset, S_IWUGO, NULL, reset_store); 195static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
196static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL); 196static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
197static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL); 197static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
198static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL); 198static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index c05c5af5aa04..35480dd57a30 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -559,6 +559,9 @@ void __tty_hangup(struct tty_struct *tty)
559 559
560 tty_lock(); 560 tty_lock();
561 561
562 /* some functions below drop BTM, so we need this bit */
563 set_bit(TTY_HUPPING, &tty->flags);
564
562 /* inuse_filps is protected by the single tty lock, 565 /* inuse_filps is protected by the single tty lock,
563 this really needs to change if we want to flush the 566 this really needs to change if we want to flush the
564 workqueue with the lock held */ 567 workqueue with the lock held */
@@ -578,6 +581,10 @@ void __tty_hangup(struct tty_struct *tty)
578 } 581 }
579 spin_unlock(&tty_files_lock); 582 spin_unlock(&tty_files_lock);
580 583
584 /*
585 * it drops BTM and thus races with reopen
586 * we protect the race by TTY_HUPPING
587 */
581 tty_ldisc_hangup(tty); 588 tty_ldisc_hangup(tty);
582 589
583 read_lock(&tasklist_lock); 590 read_lock(&tasklist_lock);
@@ -615,7 +622,6 @@ void __tty_hangup(struct tty_struct *tty)
615 tty->session = NULL; 622 tty->session = NULL;
616 tty->pgrp = NULL; 623 tty->pgrp = NULL;
617 tty->ctrl_status = 0; 624 tty->ctrl_status = 0;
618 set_bit(TTY_HUPPED, &tty->flags);
619 spin_unlock_irqrestore(&tty->ctrl_lock, flags); 625 spin_unlock_irqrestore(&tty->ctrl_lock, flags);
620 626
621 /* Account for the p->signal references we killed */ 627 /* Account for the p->signal references we killed */
@@ -641,6 +647,7 @@ void __tty_hangup(struct tty_struct *tty)
641 * can't yet guarantee all that. 647 * can't yet guarantee all that.
642 */ 648 */
643 set_bit(TTY_HUPPED, &tty->flags); 649 set_bit(TTY_HUPPED, &tty->flags);
650 clear_bit(TTY_HUPPING, &tty->flags);
644 tty_ldisc_enable(tty); 651 tty_ldisc_enable(tty);
645 652
646 tty_unlock(); 653 tty_unlock();
@@ -1310,7 +1317,9 @@ static int tty_reopen(struct tty_struct *tty)
1310{ 1317{
1311 struct tty_driver *driver = tty->driver; 1318 struct tty_driver *driver = tty->driver;
1312 1319
1313 if (test_bit(TTY_CLOSING, &tty->flags)) 1320 if (test_bit(TTY_CLOSING, &tty->flags) ||
1321 test_bit(TTY_HUPPING, &tty->flags) ||
1322 test_bit(TTY_LDISC_CHANGING, &tty->flags))
1314 return -EIO; 1323 return -EIO;
1315 1324
1316 if (driver->type == TTY_DRIVER_TYPE_PTY && 1325 if (driver->type == TTY_DRIVER_TYPE_PTY &&
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index d8e96b005023..4214d58276f7 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -454,6 +454,8 @@ static int tty_ldisc_open(struct tty_struct *tty, struct tty_ldisc *ld)
454 /* BTM here locks versus a hangup event */ 454 /* BTM here locks versus a hangup event */
455 WARN_ON(!tty_locked()); 455 WARN_ON(!tty_locked());
456 ret = ld->ops->open(tty); 456 ret = ld->ops->open(tty);
457 if (ret)
458 clear_bit(TTY_LDISC_OPEN, &tty->flags);
457 return ret; 459 return ret;
458 } 460 }
459 return 0; 461 return 0;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a858d2b87b94..51fe1795d5a8 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> 4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
6 * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> 6 * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> 7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
8 * 8 *
9 * Userspace IO 9 * Userspace IO
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index a8ea2f19a0cc..a84a451159ed 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * UIO Hilscher CIF card driver 2 * UIO Hilscher CIF card driver
3 * 3 *
4 * (C) 2007 Hans J. Koch <hjk@linutronix.de> 4 * (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
5 * Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de> 5 * Original code (C) 2005 Benedikt Spranger <b.spranger@linutronix.de>
6 * 6 *
7 * Licensed under GPL version 2 only. 7 * Licensed under GPL version 2 only.
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index 5a18e9f7b836..5ffdb483b015 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -2,7 +2,7 @@
2 * UIO driver for Hilscher NetX based fieldbus cards (cifX, comX). 2 * UIO driver for Hilscher NetX based fieldbus cards (cifX, comX).
3 * See http://www.hilscher.com for details. 3 * See http://www.hilscher.com for details.
4 * 4 *
5 * (C) 2007 Hans J. Koch <hjk@linutronix.de> 5 * (C) 2007 Hans J. Koch <hjk@hansjkoch.de>
6 * (C) 2008 Manuel Traut <manut@linutronix.de> 6 * (C) 2008 Manuel Traut <manut@linutronix.de>
7 * 7 *
8 * Licensed under GPL version 2 only. 8 * Licensed under GPL version 2 only.
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c
index ea071a5b6eee..44447f54942f 100644
--- a/drivers/usb/atm/ueagle-atm.c
+++ b/drivers/usb/atm/ueagle-atm.c
@@ -2301,7 +2301,7 @@ out:
2301 return ret; 2301 return ret;
2302} 2302}
2303 2303
2304static DEVICE_ATTR(stat_status, S_IWUGO | S_IRUGO, read_status, reboot); 2304static DEVICE_ATTR(stat_status, S_IWUSR | S_IRUGO, read_status, reboot);
2305 2305
2306static ssize_t read_human_status(struct device *dev, 2306static ssize_t read_human_status(struct device *dev,
2307 struct device_attribute *attr, char *buf) 2307 struct device_attribute *attr, char *buf)
@@ -2364,8 +2364,7 @@ out:
2364 return ret; 2364 return ret;
2365} 2365}
2366 2366
2367static DEVICE_ATTR(stat_human_status, S_IWUGO | S_IRUGO, 2367static DEVICE_ATTR(stat_human_status, S_IRUGO, read_human_status, NULL);
2368 read_human_status, NULL);
2369 2368
2370static ssize_t read_delin(struct device *dev, struct device_attribute *attr, 2369static ssize_t read_delin(struct device *dev, struct device_attribute *attr,
2371 char *buf) 2370 char *buf)
@@ -2397,7 +2396,7 @@ out:
2397 return ret; 2396 return ret;
2398} 2397}
2399 2398
2400static DEVICE_ATTR(stat_delin, S_IWUGO | S_IRUGO, read_delin, NULL); 2399static DEVICE_ATTR(stat_delin, S_IRUGO, read_delin, NULL);
2401 2400
2402#define UEA_ATTR(name, reset) \ 2401#define UEA_ATTR(name, reset) \
2403 \ 2402 \
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 61800f77dac8..ced846ac4141 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1330,6 +1330,8 @@ static int map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
1330 */ 1330 */
1331 1331
1332 if (usb_endpoint_xfer_control(&urb->ep->desc)) { 1332 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1333 if (hcd->self.uses_pio_for_control)
1334 return ret;
1333 if (hcd->self.uses_dma) { 1335 if (hcd->self.uses_dma) {
1334 urb->setup_dma = dma_map_single( 1336 urb->setup_dma = dma_map_single(
1335 hcd->self.controller, 1337 hcd->self.controller,
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index b5e20e873cba..717ff653fa23 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -2017,7 +2017,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
2017 } 2017 }
2018 } else { 2018 } else {
2019 /* gpio_request fail so use -EINVAL for gpio_is_valid */ 2019 /* gpio_request fail so use -EINVAL for gpio_is_valid */
2020 ubc->vbus_pin = -EINVAL; 2020 udc->vbus_pin = -EINVAL;
2021 } 2021 }
2022 } 2022 }
2023 2023
diff --git a/drivers/usb/host/ehci-dbg.c b/drivers/usb/host/ehci-dbg.c
index 86afdc73322f..6e2599661b5b 100644
--- a/drivers/usb/host/ehci-dbg.c
+++ b/drivers/usb/host/ehci-dbg.c
@@ -1067,7 +1067,7 @@ static inline void create_debug_files (struct ehci_hcd *ehci)
1067 &debug_registers_fops)) 1067 &debug_registers_fops))
1068 goto file_error; 1068 goto file_error;
1069 1069
1070 if (!debugfs_create_file("lpm", S_IRUGO|S_IWUGO, ehci->debug_dir, bus, 1070 if (!debugfs_create_file("lpm", S_IRUGO|S_IWUSR, ehci->debug_dir, bus,
1071 &debug_lpm_fops)) 1071 &debug_lpm_fops))
1072 goto file_error; 1072 goto file_error;
1073 1073
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 502a7e6fef42..e9062806d4a2 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1063,10 +1063,11 @@ rescan:
1063 tmp && tmp != qh; 1063 tmp && tmp != qh;
1064 tmp = tmp->qh_next.qh) 1064 tmp = tmp->qh_next.qh)
1065 continue; 1065 continue;
1066 /* periodic qh self-unlinks on empty */ 1066 /* periodic qh self-unlinks on empty, and a COMPLETING qh
1067 if (!tmp) 1067 * may already be unlinked.
1068 goto nogood; 1068 */
1069 unlink_async (ehci, qh); 1069 if (tmp)
1070 unlink_async(ehci, qh);
1070 /* FALL THROUGH */ 1071 /* FALL THROUGH */
1071 case QH_STATE_UNLINK: /* wait for hw to finish? */ 1072 case QH_STATE_UNLINK: /* wait for hw to finish? */
1072 case QH_STATE_UNLINK_WAIT: 1073 case QH_STATE_UNLINK_WAIT:
@@ -1083,7 +1084,6 @@ idle_timeout:
1083 } 1084 }
1084 /* else FALL THROUGH */ 1085 /* else FALL THROUGH */
1085 default: 1086 default:
1086nogood:
1087 /* caller was supposed to have unlinked any requests; 1087 /* caller was supposed to have unlinked any requests;
1088 * that's not our job. just leak this memory. 1088 * that's not our job. just leak this memory.
1089 */ 1089 */
diff --git a/drivers/usb/host/ehci-mem.c b/drivers/usb/host/ehci-mem.c
index d36e4e75e08d..12f70c302b0b 100644
--- a/drivers/usb/host/ehci-mem.c
+++ b/drivers/usb/host/ehci-mem.c
@@ -141,6 +141,10 @@ static void ehci_mem_cleanup (struct ehci_hcd *ehci)
141 qh_put (ehci->async); 141 qh_put (ehci->async);
142 ehci->async = NULL; 142 ehci->async = NULL;
143 143
144 if (ehci->dummy)
145 qh_put(ehci->dummy);
146 ehci->dummy = NULL;
147
144 /* DMA consistent memory and pools */ 148 /* DMA consistent memory and pools */
145 if (ehci->qtd_pool) 149 if (ehci->qtd_pool)
146 dma_pool_destroy (ehci->qtd_pool); 150 dma_pool_destroy (ehci->qtd_pool);
@@ -227,8 +231,26 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
227 if (ehci->periodic == NULL) { 231 if (ehci->periodic == NULL) {
228 goto fail; 232 goto fail;
229 } 233 }
230 for (i = 0; i < ehci->periodic_size; i++) 234
231 ehci->periodic [i] = EHCI_LIST_END(ehci); 235 if (ehci->use_dummy_qh) {
236 struct ehci_qh_hw *hw;
237 ehci->dummy = ehci_qh_alloc(ehci, flags);
238 if (!ehci->dummy)
239 goto fail;
240
241 hw = ehci->dummy->hw;
242 hw->hw_next = EHCI_LIST_END(ehci);
243 hw->hw_qtd_next = EHCI_LIST_END(ehci);
244 hw->hw_alt_next = EHCI_LIST_END(ehci);
245 hw->hw_token &= ~QTD_STS_ACTIVE;
246 ehci->dummy->hw = hw;
247
248 for (i = 0; i < ehci->periodic_size; i++)
249 ehci->periodic[i] = ehci->dummy->qh_dma;
250 } else {
251 for (i = 0; i < ehci->periodic_size; i++)
252 ehci->periodic[i] = EHCI_LIST_END(ehci);
253 }
232 254
233 /* software shadow of hardware table */ 255 /* software shadow of hardware table */
234 ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags); 256 ehci->pshadow = kcalloc(ehci->periodic_size, sizeof(void *), flags);
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index a1e8d273103f..655f3c9f88bf 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -103,6 +103,19 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
103 if (retval) 103 if (retval)
104 return retval; 104 return retval;
105 105
106 if ((pdev->vendor == PCI_VENDOR_ID_AMD && pdev->device == 0x7808) ||
107 (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x4396)) {
108 /* EHCI controller on AMD SB700/SB800/Hudson-2/3 platforms may
109 * read/write memory space which does not belong to it when
110 * there is NULL pointer with T-bit set to 1 in the frame list
111 * table. To avoid the issue, the frame list link pointer
112 * should always contain a valid pointer to a inactive qh.
113 */
114 ehci->use_dummy_qh = 1;
115 ehci_info(ehci, "applying AMD SB700/SB800/Hudson-2/3 EHCI "
116 "dummy qh workaround\n");
117 }
118
106 /* data structure init */ 119 /* data structure init */
107 retval = ehci_init(hcd); 120 retval = ehci_init(hcd);
108 if (retval) 121 if (retval)
@@ -148,6 +161,18 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
148 if (pdev->revision < 0xa4) 161 if (pdev->revision < 0xa4)
149 ehci->no_selective_suspend = 1; 162 ehci->no_selective_suspend = 1;
150 break; 163 break;
164
165 /* MCP89 chips on the MacBookAir3,1 give EPROTO when
166 * fetching device descriptors unless LPM is disabled.
167 * There are also intermittent problems enumerating
168 * devices with PPCD enabled.
169 */
170 case 0x0d9d:
171 ehci_info(ehci, "disable lpm/ppcd for nvidia mcp89");
172 ehci->has_lpm = 0;
173 ehci->has_ppcd = 0;
174 ehci->command &= ~CMD_PPCEE;
175 break;
151 } 176 }
152 break; 177 break;
153 case PCI_VENDOR_ID_VIA: 178 case PCI_VENDOR_ID_VIA:
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index a92526d6e5ae..d9f78eb26572 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -98,7 +98,14 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
98 */ 98 */
99 *prev_p = *periodic_next_shadow(ehci, &here, 99 *prev_p = *periodic_next_shadow(ehci, &here,
100 Q_NEXT_TYPE(ehci, *hw_p)); 100 Q_NEXT_TYPE(ehci, *hw_p));
101 *hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p)); 101
102 if (!ehci->use_dummy_qh ||
103 *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p))
104 != EHCI_LIST_END(ehci))
105 *hw_p = *shadow_next_periodic(ehci, &here,
106 Q_NEXT_TYPE(ehci, *hw_p));
107 else
108 *hw_p = ehci->dummy->qh_dma;
102} 109}
103 110
104/* how many of the uframe's 125 usecs are allocated? */ 111/* how many of the uframe's 125 usecs are allocated? */
@@ -2335,7 +2342,11 @@ restart:
2335 * pointer for much longer, if at all. 2342 * pointer for much longer, if at all.
2336 */ 2343 */
2337 *q_p = q.itd->itd_next; 2344 *q_p = q.itd->itd_next;
2338 *hw_p = q.itd->hw_next; 2345 if (!ehci->use_dummy_qh ||
2346 q.itd->hw_next != EHCI_LIST_END(ehci))
2347 *hw_p = q.itd->hw_next;
2348 else
2349 *hw_p = ehci->dummy->qh_dma;
2339 type = Q_NEXT_TYPE(ehci, q.itd->hw_next); 2350 type = Q_NEXT_TYPE(ehci, q.itd->hw_next);
2340 wmb(); 2351 wmb();
2341 modified = itd_complete (ehci, q.itd); 2352 modified = itd_complete (ehci, q.itd);
@@ -2368,7 +2379,11 @@ restart:
2368 * URB completion. 2379 * URB completion.
2369 */ 2380 */
2370 *q_p = q.sitd->sitd_next; 2381 *q_p = q.sitd->sitd_next;
2371 *hw_p = q.sitd->hw_next; 2382 if (!ehci->use_dummy_qh ||
2383 q.sitd->hw_next != EHCI_LIST_END(ehci))
2384 *hw_p = q.sitd->hw_next;
2385 else
2386 *hw_p = ehci->dummy->qh_dma;
2372 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); 2387 type = Q_NEXT_TYPE(ehci, q.sitd->hw_next);
2373 wmb(); 2388 wmb();
2374 modified = sitd_complete (ehci, q.sitd); 2389 modified = sitd_complete (ehci, q.sitd);
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index bde823f704e9..ba8eab366b82 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -73,6 +73,7 @@ struct ehci_hcd { /* one per controller */
73 73
74 /* async schedule support */ 74 /* async schedule support */
75 struct ehci_qh *async; 75 struct ehci_qh *async;
76 struct ehci_qh *dummy; /* For AMD quirk use */
76 struct ehci_qh *reclaim; 77 struct ehci_qh *reclaim;
77 unsigned scanning : 1; 78 unsigned scanning : 1;
78 79
@@ -131,6 +132,7 @@ struct ehci_hcd { /* one per controller */
131 unsigned need_io_watchdog:1; 132 unsigned need_io_watchdog:1;
132 unsigned broken_periodic:1; 133 unsigned broken_periodic:1;
133 unsigned fs_i_thresh:1; /* Intel iso scheduling */ 134 unsigned fs_i_thresh:1; /* Intel iso scheduling */
135 unsigned use_dummy_qh:1; /* AMD Frame List table quirk*/
134 136
135 /* required for usb32 quirk */ 137 /* required for usb32 quirk */
136 #define OHCI_CTRL_HCFS (3 << 6) 138 #define OHCI_CTRL_HCFS (3 << 6)
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 6c4fb4efb4bb..43a39eb56cc6 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -2683,7 +2683,7 @@ static int __devexit isp1362_remove(struct platform_device *pdev)
2683 return 0; 2683 return 0;
2684} 2684}
2685 2685
2686static int __init isp1362_probe(struct platform_device *pdev) 2686static int __devinit isp1362_probe(struct platform_device *pdev)
2687{ 2687{
2688 struct usb_hcd *hcd; 2688 struct usb_hcd *hcd;
2689 struct isp1362_hcd *isp1362_hcd; 2689 struct isp1362_hcd *isp1362_hcd;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index fef5a1f9d483..5d963e350494 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -229,6 +229,13 @@ void xhci_ring_device(struct xhci_hcd *xhci, int slot_id)
229static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex, 229static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
230 u32 __iomem *addr, u32 port_status) 230 u32 __iomem *addr, u32 port_status)
231{ 231{
232 /* Don't allow the USB core to disable SuperSpeed ports. */
233 if (xhci->port_array[wIndex] == 0x03) {
234 xhci_dbg(xhci, "Ignoring request to disable "
235 "SuperSpeed port.\n");
236 return;
237 }
238
232 /* Write 1 to disable the port */ 239 /* Write 1 to disable the port */
233 xhci_writel(xhci, port_status | PORT_PE, addr); 240 xhci_writel(xhci, port_status | PORT_PE, addr);
234 port_status = xhci_readl(xhci, addr); 241 port_status = xhci_readl(xhci, addr);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 202770676da3..0fae58ef8afe 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1045,7 +1045,7 @@ static inline u32 xhci_get_max_esit_payload(struct xhci_hcd *xhci,
1045 if (udev->speed == USB_SPEED_SUPER) 1045 if (udev->speed == USB_SPEED_SUPER)
1046 return ep->ss_ep_comp.wBytesPerInterval; 1046 return ep->ss_ep_comp.wBytesPerInterval;
1047 1047
1048 max_packet = ep->desc.wMaxPacketSize & 0x3ff; 1048 max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
1049 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; 1049 max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11;
1050 /* A 0 in max burst means 1 transfer per ESIT */ 1050 /* A 0 in max burst means 1 transfer per ESIT */
1051 return max_packet * (max_burst + 1); 1051 return max_packet * (max_burst + 1);
@@ -1135,7 +1135,7 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1135 /* Fall through */ 1135 /* Fall through */
1136 case USB_SPEED_FULL: 1136 case USB_SPEED_FULL:
1137 case USB_SPEED_LOW: 1137 case USB_SPEED_LOW:
1138 max_packet = ep->desc.wMaxPacketSize & 0x3ff; 1138 max_packet = GET_MAX_PACKET(ep->desc.wMaxPacketSize);
1139 ep_ctx->ep_info2 |= MAX_PACKET(max_packet); 1139 ep_ctx->ep_info2 |= MAX_PACKET(max_packet);
1140 break; 1140 break;
1141 default: 1141 default:
@@ -1443,6 +1443,13 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1443 xhci->dcbaa = NULL; 1443 xhci->dcbaa = NULL;
1444 1444
1445 scratchpad_free(xhci); 1445 scratchpad_free(xhci);
1446
1447 xhci->num_usb2_ports = 0;
1448 xhci->num_usb3_ports = 0;
1449 kfree(xhci->usb2_ports);
1450 kfree(xhci->usb3_ports);
1451 kfree(xhci->port_array);
1452
1446 xhci->page_size = 0; 1453 xhci->page_size = 0;
1447 xhci->page_shift = 0; 1454 xhci->page_shift = 0;
1448 xhci->bus_suspended = 0; 1455 xhci->bus_suspended = 0;
@@ -1627,6 +1634,161 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
1627 &xhci->ir_set->erst_dequeue); 1634 &xhci->ir_set->erst_dequeue);
1628} 1635}
1629 1636
1637static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
1638 u32 __iomem *addr, u8 major_revision)
1639{
1640 u32 temp, port_offset, port_count;
1641 int i;
1642
1643 if (major_revision > 0x03) {
1644 xhci_warn(xhci, "Ignoring unknown port speed, "
1645 "Ext Cap %p, revision = 0x%x\n",
1646 addr, major_revision);
1647 /* Ignoring port protocol we can't understand. FIXME */
1648 return;
1649 }
1650
1651 /* Port offset and count in the third dword, see section 7.2 */
1652 temp = xhci_readl(xhci, addr + 2);
1653 port_offset = XHCI_EXT_PORT_OFF(temp);
1654 port_count = XHCI_EXT_PORT_COUNT(temp);
1655 xhci_dbg(xhci, "Ext Cap %p, port offset = %u, "
1656 "count = %u, revision = 0x%x\n",
1657 addr, port_offset, port_count, major_revision);
1658 /* Port count includes the current port offset */
1659 if (port_offset == 0 || (port_offset + port_count - 1) > num_ports)
1660 /* WTF? "Valid values are ‘1’ to MaxPorts" */
1661 return;
1662 port_offset--;
1663 for (i = port_offset; i < (port_offset + port_count); i++) {
1664 /* Duplicate entry. Ignore the port if the revisions differ. */
1665 if (xhci->port_array[i] != 0) {
1666 xhci_warn(xhci, "Duplicate port entry, Ext Cap %p,"
1667 " port %u\n", addr, i);
1668 xhci_warn(xhci, "Port was marked as USB %u, "
1669 "duplicated as USB %u\n",
1670 xhci->port_array[i], major_revision);
1671 /* Only adjust the roothub port counts if we haven't
1672 * found a similar duplicate.
1673 */
1674 if (xhci->port_array[i] != major_revision &&
1675 xhci->port_array[i] != (u8) -1) {
1676 if (xhci->port_array[i] == 0x03)
1677 xhci->num_usb3_ports--;
1678 else
1679 xhci->num_usb2_ports--;
1680 xhci->port_array[i] = (u8) -1;
1681 }
1682 /* FIXME: Should we disable the port? */
1683 }
1684 xhci->port_array[i] = major_revision;
1685 if (major_revision == 0x03)
1686 xhci->num_usb3_ports++;
1687 else
1688 xhci->num_usb2_ports++;
1689 }
1690 /* FIXME: Should we disable ports not in the Extended Capabilities? */
1691}
1692
1693/*
1694 * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
1695 * specify what speeds each port is supposed to be. We can't count on the port
1696 * speed bits in the PORTSC register being correct until a device is connected,
1697 * but we need to set up the two fake roothubs with the correct number of USB
1698 * 3.0 and USB 2.0 ports at host controller initialization time.
1699 */
1700static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
1701{
1702 u32 __iomem *addr;
1703 u32 offset;
1704 unsigned int num_ports;
1705 int i, port_index;
1706
1707 addr = &xhci->cap_regs->hcc_params;
1708 offset = XHCI_HCC_EXT_CAPS(xhci_readl(xhci, addr));
1709 if (offset == 0) {
1710 xhci_err(xhci, "No Extended Capability registers, "
1711 "unable to set up roothub.\n");
1712 return -ENODEV;
1713 }
1714
1715 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1716 xhci->port_array = kzalloc(sizeof(*xhci->port_array)*num_ports, flags);
1717 if (!xhci->port_array)
1718 return -ENOMEM;
1719
1720 /*
1721 * For whatever reason, the first capability offset is from the
1722 * capability register base, not from the HCCPARAMS register.
1723 * See section 5.3.6 for offset calculation.
1724 */
1725 addr = &xhci->cap_regs->hc_capbase + offset;
1726 while (1) {
1727 u32 cap_id;
1728
1729 cap_id = xhci_readl(xhci, addr);
1730 if (XHCI_EXT_CAPS_ID(cap_id) == XHCI_EXT_CAPS_PROTOCOL)
1731 xhci_add_in_port(xhci, num_ports, addr,
1732 (u8) XHCI_EXT_PORT_MAJOR(cap_id));
1733 offset = XHCI_EXT_CAPS_NEXT(cap_id);
1734 if (!offset || (xhci->num_usb2_ports + xhci->num_usb3_ports)
1735 == num_ports)
1736 break;
1737 /*
1738 * Once you're into the Extended Capabilities, the offset is
1739 * always relative to the register holding the offset.
1740 */
1741 addr += offset;
1742 }
1743
1744 if (xhci->num_usb2_ports == 0 && xhci->num_usb3_ports == 0) {
1745 xhci_warn(xhci, "No ports on the roothubs?\n");
1746 return -ENODEV;
1747 }
1748 xhci_dbg(xhci, "Found %u USB 2.0 ports and %u USB 3.0 ports.\n",
1749 xhci->num_usb2_ports, xhci->num_usb3_ports);
1750 /*
1751 * Note we could have all USB 3.0 ports, or all USB 2.0 ports.
1752 * Not sure how the USB core will handle a hub with no ports...
1753 */
1754 if (xhci->num_usb2_ports) {
1755 xhci->usb2_ports = kmalloc(sizeof(*xhci->usb2_ports)*
1756 xhci->num_usb2_ports, flags);
1757 if (!xhci->usb2_ports)
1758 return -ENOMEM;
1759
1760 port_index = 0;
1761 for (i = 0; i < num_ports; i++)
1762 if (xhci->port_array[i] != 0x03) {
1763 xhci->usb2_ports[port_index] =
1764 &xhci->op_regs->port_status_base +
1765 NUM_PORT_REGS*i;
1766 xhci_dbg(xhci, "USB 2.0 port at index %u, "
1767 "addr = %p\n", i,
1768 xhci->usb2_ports[port_index]);
1769 port_index++;
1770 }
1771 }
1772 if (xhci->num_usb3_ports) {
1773 xhci->usb3_ports = kmalloc(sizeof(*xhci->usb3_ports)*
1774 xhci->num_usb3_ports, flags);
1775 if (!xhci->usb3_ports)
1776 return -ENOMEM;
1777
1778 port_index = 0;
1779 for (i = 0; i < num_ports; i++)
1780 if (xhci->port_array[i] == 0x03) {
1781 xhci->usb3_ports[port_index] =
1782 &xhci->op_regs->port_status_base +
1783 NUM_PORT_REGS*i;
1784 xhci_dbg(xhci, "USB 3.0 port at index %u, "
1785 "addr = %p\n", i,
1786 xhci->usb3_ports[port_index]);
1787 port_index++;
1788 }
1789 }
1790 return 0;
1791}
1630 1792
1631int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) 1793int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1632{ 1794{
@@ -1809,6 +1971,8 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1809 1971
1810 if (scratchpad_alloc(xhci, flags)) 1972 if (scratchpad_alloc(xhci, flags))
1811 goto fail; 1973 goto fail;
1974 if (xhci_setup_port_arrays(xhci, flags))
1975 goto fail;
1812 1976
1813 return 0; 1977 return 0;
1814 1978
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9f3115e729b1..df558f6f84e3 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -2104,7 +2104,6 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
2104 2104
2105 if (!(status & STS_EINT)) { 2105 if (!(status & STS_EINT)) {
2106 spin_unlock(&xhci->lock); 2106 spin_unlock(&xhci->lock);
2107 xhci_warn(xhci, "Spurious interrupt.\n");
2108 return IRQ_NONE; 2107 return IRQ_NONE;
2109 } 2108 }
2110 xhci_dbg(xhci, "op reg status = %08x\n", status); 2109 xhci_dbg(xhci, "op reg status = %08x\n", status);
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 5d7d4e951ea4..45e4a3108cc3 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -577,6 +577,65 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
577 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base); 577 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
578} 578}
579 579
580static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
581{
582 u64 val_64;
583
584 /* step 2: initialize command ring buffer */
585 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
586 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
587 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
588 xhci->cmd_ring->dequeue) &
589 (u64) ~CMD_RING_RSVD_BITS) |
590 xhci->cmd_ring->cycle_state;
591 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
592 (long unsigned long) val_64);
593 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
594}
595
596/*
597 * The whole command ring must be cleared to zero when we suspend the host.
598 *
599 * The host doesn't save the command ring pointer in the suspend well, so we
600 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
601 * aligned, because of the reserved bits in the command ring dequeue pointer
602 * register. Therefore, we can't just set the dequeue pointer back in the
603 * middle of the ring (TRBs are 16-byte aligned).
604 */
605static void xhci_clear_command_ring(struct xhci_hcd *xhci)
606{
607 struct xhci_ring *ring;
608 struct xhci_segment *seg;
609
610 ring = xhci->cmd_ring;
611 seg = ring->deq_seg;
612 do {
613 memset(seg->trbs, 0, SEGMENT_SIZE);
614 seg = seg->next;
615 } while (seg != ring->deq_seg);
616
617 /* Reset the software enqueue and dequeue pointers */
618 ring->deq_seg = ring->first_seg;
619 ring->dequeue = ring->first_seg->trbs;
620 ring->enq_seg = ring->deq_seg;
621 ring->enqueue = ring->dequeue;
622
623 /*
624 * Ring is now zeroed, so the HW should look for change of ownership
625 * when the cycle bit is set to 1.
626 */
627 ring->cycle_state = 1;
628
629 /*
630 * Reset the hardware dequeue pointer.
631 * Yes, this will need to be re-written after resume, but we're paranoid
632 * and want to make sure the hardware doesn't access bogus memory
633 * because, say, the BIOS or an SMI started the host without changing
634 * the command ring pointers.
635 */
636 xhci_set_cmd_ring_deq(xhci);
637}
638
580/* 639/*
581 * Stop HC (not bus-specific) 640 * Stop HC (not bus-specific)
582 * 641 *
@@ -604,6 +663,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
604 spin_unlock_irq(&xhci->lock); 663 spin_unlock_irq(&xhci->lock);
605 return -ETIMEDOUT; 664 return -ETIMEDOUT;
606 } 665 }
666 xhci_clear_command_ring(xhci);
607 667
608 /* step 3: save registers */ 668 /* step 3: save registers */
609 xhci_save_registers(xhci); 669 xhci_save_registers(xhci);
@@ -635,7 +695,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
635 u32 command, temp = 0; 695 u32 command, temp = 0;
636 struct usb_hcd *hcd = xhci_to_hcd(xhci); 696 struct usb_hcd *hcd = xhci_to_hcd(xhci);
637 struct pci_dev *pdev = to_pci_dev(hcd->self.controller); 697 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
638 u64 val_64;
639 int old_state, retval; 698 int old_state, retval;
640 699
641 old_state = hcd->state; 700 old_state = hcd->state;
@@ -648,15 +707,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
648 /* step 1: restore register */ 707 /* step 1: restore register */
649 xhci_restore_registers(xhci); 708 xhci_restore_registers(xhci);
650 /* step 2: initialize command ring buffer */ 709 /* step 2: initialize command ring buffer */
651 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); 710 xhci_set_cmd_ring_deq(xhci);
652 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
653 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
654 xhci->cmd_ring->dequeue) &
655 (u64) ~CMD_RING_RSVD_BITS) |
656 xhci->cmd_ring->cycle_state;
657 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
658 (long unsigned long) val_64);
659 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
660 /* step 3: restore state and start state*/ 711 /* step 3: restore state and start state*/
661 /* step 3: set CRS flag */ 712 /* step 3: set CRS flag */
662 command = xhci_readl(xhci, &xhci->op_regs->command); 713 command = xhci_readl(xhci, &xhci->op_regs->command);
@@ -714,6 +765,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
714 return retval; 765 return retval;
715 } 766 }
716 767
768 spin_unlock_irq(&xhci->lock);
717 /* Re-setup MSI-X */ 769 /* Re-setup MSI-X */
718 if (hcd->irq) 770 if (hcd->irq)
719 free_irq(hcd->irq, hcd); 771 free_irq(hcd->irq, hcd);
@@ -736,6 +788,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
736 hcd->irq = pdev->irq; 788 hcd->irq = pdev->irq;
737 } 789 }
738 790
791 spin_lock_irq(&xhci->lock);
739 /* step 4: set Run/Stop bit */ 792 /* step 4: set Run/Stop bit */
740 command = xhci_readl(xhci, &xhci->op_regs->command); 793 command = xhci_readl(xhci, &xhci->op_regs->command);
741 command |= CMD_RUN; 794 command |= CMD_RUN;
@@ -1496,6 +1549,15 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1496 cmd_completion = command->completion; 1549 cmd_completion = command->completion;
1497 cmd_status = &command->status; 1550 cmd_status = &command->status;
1498 command->command_trb = xhci->cmd_ring->enqueue; 1551 command->command_trb = xhci->cmd_ring->enqueue;
1552
1553 /* Enqueue pointer can be left pointing to the link TRB,
1554 * we must handle that
1555 */
1556 if ((command->command_trb->link.control & TRB_TYPE_BITMASK)
1557 == TRB_TYPE(TRB_LINK))
1558 command->command_trb =
1559 xhci->cmd_ring->enq_seg->next->trbs;
1560
1499 list_add_tail(&command->cmd_list, &virt_dev->cmd_list); 1561 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1500 } else { 1562 } else {
1501 in_ctx = virt_dev->in_ctx; 1563 in_ctx = virt_dev->in_ctx;
@@ -2219,6 +2281,15 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2219 /* Attempt to submit the Reset Device command to the command ring */ 2281 /* Attempt to submit the Reset Device command to the command ring */
2220 spin_lock_irqsave(&xhci->lock, flags); 2282 spin_lock_irqsave(&xhci->lock, flags);
2221 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue; 2283 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
2284
2285 /* Enqueue pointer can be left pointing to the link TRB,
2286 * we must handle that
2287 */
2288 if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK)
2289 == TRB_TYPE(TRB_LINK))
2290 reset_device_cmd->command_trb =
2291 xhci->cmd_ring->enq_seg->next->trbs;
2292
2222 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list); 2293 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
2223 ret = xhci_queue_reset_device(xhci, slot_id); 2294 ret = xhci_queue_reset_device(xhci, slot_id);
2224 if (ret) { 2295 if (ret) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 93d3bf4d213c..170c367112d2 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -454,6 +454,24 @@ struct xhci_doorbell_array {
454 454
455 455
456/** 456/**
457 * struct xhci_protocol_caps
458 * @revision: major revision, minor revision, capability ID,
459 * and next capability pointer.
460 * @name_string: Four ASCII characters to say which spec this xHC
461 * follows, typically "USB ".
462 * @port_info: Port offset, count, and protocol-defined information.
463 */
464struct xhci_protocol_caps {
465 u32 revision;
466 u32 name_string;
467 u32 port_info;
468};
469
470#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
471#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
472#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
473
474/**
457 * struct xhci_container_ctx 475 * struct xhci_container_ctx
458 * @type: Type of context. Used to calculated offsets to contained contexts. 476 * @type: Type of context. Used to calculated offsets to contained contexts.
459 * @size: Size of the context data 477 * @size: Size of the context data
@@ -621,6 +639,11 @@ struct xhci_ep_ctx {
621#define MAX_PACKET_MASK (0xffff << 16) 639#define MAX_PACKET_MASK (0xffff << 16)
622#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff) 640#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
623 641
642/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
643 * USB2.0 spec 9.6.6.
644 */
645#define GET_MAX_PACKET(p) ((p) & 0x7ff)
646
624/* tx_info bitmasks */ 647/* tx_info bitmasks */
625#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff) 648#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
626#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16) 649#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
@@ -1235,6 +1258,14 @@ struct xhci_hcd {
1235 u32 suspended_ports[8]; /* which ports are 1258 u32 suspended_ports[8]; /* which ports are
1236 suspended */ 1259 suspended */
1237 unsigned long resume_done[MAX_HC_PORTS]; 1260 unsigned long resume_done[MAX_HC_PORTS];
1261 /* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
1262 u8 *port_array;
1263 /* Array of pointers to USB 3.0 PORTSC registers */
1264 u32 __iomem **usb3_ports;
1265 unsigned int num_usb3_ports;
1266 /* Array of pointers to USB 2.0 PORTSC registers */
1267 u32 __iomem **usb2_ports;
1268 unsigned int num_usb2_ports;
1238}; 1269};
1239 1270
1240/* For testing purposes */ 1271/* For testing purposes */
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 2f43c57743c9..9251773ecef4 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -196,11 +196,9 @@ static ssize_t get_port1_handler(struct device *dev,
196 return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1); 196 return read_port(dev, attr, buf, 1, CYPRESS_READ_PORT_ID1);
197} 197}
198 198
199static DEVICE_ATTR(port0, S_IWUGO | S_IRUGO, 199static DEVICE_ATTR(port0, S_IRUGO | S_IWUSR, get_port0_handler, set_port0_handler);
200 get_port0_handler, set_port0_handler);
201 200
202static DEVICE_ATTR(port1, S_IWUGO | S_IRUGO, 201static DEVICE_ATTR(port1, S_IRUGO | S_IWUSR, get_port1_handler, set_port1_handler);
203 get_port1_handler, set_port1_handler);
204 202
205 203
206static int cypress_probe(struct usb_interface *interface, 204static int cypress_probe(struct usb_interface *interface,
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index d77aba46ae85..f63776a48e2a 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -86,7 +86,7 @@ static ssize_t set_speed(struct device *dev, struct device_attribute *attr,
86 return count; 86 return count;
87} 87}
88 88
89static DEVICE_ATTR(speed, S_IWUGO | S_IRUGO, show_speed, set_speed); 89static DEVICE_ATTR(speed, S_IRUGO | S_IWUSR, show_speed, set_speed);
90 90
91static int tv_probe(struct usb_interface *interface, 91static int tv_probe(struct usb_interface *interface,
92 const struct usb_device_id *id) 92 const struct usb_device_id *id)
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 63da2c3c838f..c96f51de1696 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -94,7 +94,7 @@ static ssize_t set_##value(struct device *dev, struct device_attribute *attr, co
94 change_color(led); \ 94 change_color(led); \
95 return count; \ 95 return count; \
96} \ 96} \
97static DEVICE_ATTR(value, S_IWUGO | S_IRUGO, show_##value, set_##value); 97static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, show_##value, set_##value);
98show_set(blue); 98show_set(blue);
99show_set(red); 99show_set(red);
100show_set(green); 100show_set(green);
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index de8ef945b536..417b8f207e8b 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -192,7 +192,7 @@ static ssize_t set_attr_##name(struct device *dev, \
192 \ 192 \
193 return count; \ 193 return count; \
194} \ 194} \
195static DEVICE_ATTR(name, S_IWUGO | S_IRUGO, show_attr_##name, set_attr_##name); 195static DEVICE_ATTR(name, S_IRUGO | S_IWUSR, show_attr_##name, set_attr_##name);
196 196
197static ssize_t show_attr_text(struct device *dev, 197static ssize_t show_attr_text(struct device *dev,
198 struct device_attribute *attr, char *buf) 198 struct device_attribute *attr, char *buf)
@@ -223,7 +223,7 @@ static ssize_t set_attr_text(struct device *dev,
223 return count; 223 return count;
224} 224}
225 225
226static DEVICE_ATTR(text, S_IWUGO | S_IRUGO, show_attr_text, set_attr_text); 226static DEVICE_ATTR(text, S_IRUGO | S_IWUSR, show_attr_text, set_attr_text);
227 227
228static ssize_t show_attr_decimals(struct device *dev, 228static ssize_t show_attr_decimals(struct device *dev,
229 struct device_attribute *attr, char *buf) 229 struct device_attribute *attr, char *buf)
@@ -272,8 +272,7 @@ static ssize_t set_attr_decimals(struct device *dev,
272 return count; 272 return count;
273} 273}
274 274
275static DEVICE_ATTR(decimals, S_IWUGO | S_IRUGO, 275static DEVICE_ATTR(decimals, S_IRUGO | S_IWUSR, show_attr_decimals, set_attr_decimals);
276 show_attr_decimals, set_attr_decimals);
277 276
278static ssize_t show_attr_textmode(struct device *dev, 277static ssize_t show_attr_textmode(struct device *dev,
279 struct device_attribute *attr, char *buf) 278 struct device_attribute *attr, char *buf)
@@ -319,8 +318,7 @@ static ssize_t set_attr_textmode(struct device *dev,
319 return -EINVAL; 318 return -EINVAL;
320} 319}
321 320
322static DEVICE_ATTR(textmode, S_IWUGO | S_IRUGO, 321static DEVICE_ATTR(textmode, S_IRUGO | S_IWUSR, show_attr_textmode, set_attr_textmode);
323 show_attr_textmode, set_attr_textmode);
324 322
325 323
326MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered); 324MYDEV_ATTR_SIMPLE_UNSIGNED(powered, update_display_powered);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 719c6180b31f..ac5bfd619e62 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -536,6 +536,7 @@ static const struct file_operations yurex_fops = {
536 .open = yurex_open, 536 .open = yurex_open,
537 .release = yurex_release, 537 .release = yurex_release,
538 .fasync = yurex_fasync, 538 .fasync = yurex_fasync,
539 .llseek = default_llseek,
539}; 540};
540 541
541 542
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index e6669fc3b804..99beebce8550 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2116,12 +2116,15 @@ bad_config:
2116 * Otherwise, wait till the gadget driver hooks up. 2116 * Otherwise, wait till the gadget driver hooks up.
2117 */ 2117 */
2118 if (!is_otg_enabled(musb) && is_host_enabled(musb)) { 2118 if (!is_otg_enabled(musb) && is_host_enabled(musb)) {
2119 struct usb_hcd *hcd = musb_to_hcd(musb);
2120
2119 MUSB_HST_MODE(musb); 2121 MUSB_HST_MODE(musb);
2120 musb->xceiv->default_a = 1; 2122 musb->xceiv->default_a = 1;
2121 musb->xceiv->state = OTG_STATE_A_IDLE; 2123 musb->xceiv->state = OTG_STATE_A_IDLE;
2122 2124
2123 status = usb_add_hcd(musb_to_hcd(musb), -1, 0); 2125 status = usb_add_hcd(musb_to_hcd(musb), -1, 0);
2124 2126
2127 hcd->self.uses_pio_for_control = 1;
2125 DBG(1, "%s mode, status %d, devctl %02x %c\n", 2128 DBG(1, "%s mode, status %d, devctl %02x %c\n",
2126 "HOST", status, 2129 "HOST", status,
2127 musb_readb(musb->mregs, MUSB_DEVCTL), 2130 musb_readb(musb->mregs, MUSB_DEVCTL),
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 36cfd060dbe5..9d6ade82b9f2 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -92,6 +92,59 @@
92 92
93/* ----------------------------------------------------------------------- */ 93/* ----------------------------------------------------------------------- */
94 94
95/* Maps the buffer to dma */
96
97static inline void map_dma_buffer(struct musb_request *request,
98 struct musb *musb)
99{
100 if (request->request.dma == DMA_ADDR_INVALID) {
101 request->request.dma = dma_map_single(
102 musb->controller,
103 request->request.buf,
104 request->request.length,
105 request->tx
106 ? DMA_TO_DEVICE
107 : DMA_FROM_DEVICE);
108 request->mapped = 1;
109 } else {
110 dma_sync_single_for_device(musb->controller,
111 request->request.dma,
112 request->request.length,
113 request->tx
114 ? DMA_TO_DEVICE
115 : DMA_FROM_DEVICE);
116 request->mapped = 0;
117 }
118}
119
120/* Unmap the buffer from dma and maps it back to cpu */
121static inline void unmap_dma_buffer(struct musb_request *request,
122 struct musb *musb)
123{
124 if (request->request.dma == DMA_ADDR_INVALID) {
125 DBG(20, "not unmapping a never mapped buffer\n");
126 return;
127 }
128 if (request->mapped) {
129 dma_unmap_single(musb->controller,
130 request->request.dma,
131 request->request.length,
132 request->tx
133 ? DMA_TO_DEVICE
134 : DMA_FROM_DEVICE);
135 request->request.dma = DMA_ADDR_INVALID;
136 request->mapped = 0;
137 } else {
138 dma_sync_single_for_cpu(musb->controller,
139 request->request.dma,
140 request->request.length,
141 request->tx
142 ? DMA_TO_DEVICE
143 : DMA_FROM_DEVICE);
144
145 }
146}
147
95/* 148/*
96 * Immediately complete a request. 149 * Immediately complete a request.
97 * 150 *
@@ -119,24 +172,8 @@ __acquires(ep->musb->lock)
119 172
120 ep->busy = 1; 173 ep->busy = 1;
121 spin_unlock(&musb->lock); 174 spin_unlock(&musb->lock);
122 if (is_dma_capable()) { 175 if (is_dma_capable() && ep->dma)
123 if (req->mapped) { 176 unmap_dma_buffer(req, musb);
124 dma_unmap_single(musb->controller,
125 req->request.dma,
126 req->request.length,
127 req->tx
128 ? DMA_TO_DEVICE
129 : DMA_FROM_DEVICE);
130 req->request.dma = DMA_ADDR_INVALID;
131 req->mapped = 0;
132 } else if (req->request.dma != DMA_ADDR_INVALID)
133 dma_sync_single_for_cpu(musb->controller,
134 req->request.dma,
135 req->request.length,
136 req->tx
137 ? DMA_TO_DEVICE
138 : DMA_FROM_DEVICE);
139 }
140 if (request->status == 0) 177 if (request->status == 0)
141 DBG(5, "%s done request %p, %d/%d\n", 178 DBG(5, "%s done request %p, %d/%d\n",
142 ep->end_point.name, request, 179 ep->end_point.name, request,
@@ -395,6 +432,13 @@ static void txstate(struct musb *musb, struct musb_request *req)
395#endif 432#endif
396 433
397 if (!use_dma) { 434 if (!use_dma) {
435 /*
436 * Unmap the dma buffer back to cpu if dma channel
437 * programming fails
438 */
439 if (is_dma_capable() && musb_ep->dma)
440 unmap_dma_buffer(req, musb);
441
398 musb_write_fifo(musb_ep->hw_ep, fifo_count, 442 musb_write_fifo(musb_ep->hw_ep, fifo_count,
399 (u8 *) (request->buf + request->actual)); 443 (u8 *) (request->buf + request->actual));
400 request->actual += fifo_count; 444 request->actual += fifo_count;
@@ -713,6 +757,21 @@ static void rxstate(struct musb *musb, struct musb_request *req)
713 return; 757 return;
714 } 758 }
715#endif 759#endif
760 /*
761 * Unmap the dma buffer back to cpu if dma channel
762 * programming fails. This buffer is mapped if the
763 * channel allocation is successful
764 */
765 if (is_dma_capable() && musb_ep->dma) {
766 unmap_dma_buffer(req, musb);
767
768 /*
769 * Clear DMAENAB and AUTOCLEAR for the
770 * PIO mode transfer
771 */
772 csr &= ~(MUSB_RXCSR_DMAENAB | MUSB_RXCSR_AUTOCLEAR);
773 musb_writew(epio, MUSB_RXCSR, csr);
774 }
716 775
717 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *) 776 musb_read_fifo(musb_ep->hw_ep, fifo_count, (u8 *)
718 (request->buf + request->actual)); 777 (request->buf + request->actual));
@@ -837,7 +896,9 @@ void musb_g_rx(struct musb *musb, u8 epnum)
837 if (!request) 896 if (!request)
838 return; 897 return;
839 } 898 }
899#if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
840exit: 900exit:
901#endif
841 /* Analyze request */ 902 /* Analyze request */
842 rxstate(musb, to_musb_request(request)); 903 rxstate(musb, to_musb_request(request));
843} 904}
@@ -1150,26 +1211,9 @@ static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
1150 request->epnum = musb_ep->current_epnum; 1211 request->epnum = musb_ep->current_epnum;
1151 request->tx = musb_ep->is_in; 1212 request->tx = musb_ep->is_in;
1152 1213
1153 if (is_dma_capable() && musb_ep->dma) { 1214 if (is_dma_capable() && musb_ep->dma)
1154 if (request->request.dma == DMA_ADDR_INVALID) { 1215 map_dma_buffer(request, musb);
1155 request->request.dma = dma_map_single( 1216 else
1156 musb->controller,
1157 request->request.buf,
1158 request->request.length,
1159 request->tx
1160 ? DMA_TO_DEVICE
1161 : DMA_FROM_DEVICE);
1162 request->mapped = 1;
1163 } else {
1164 dma_sync_single_for_device(musb->controller,
1165 request->request.dma,
1166 request->request.length,
1167 request->tx
1168 ? DMA_TO_DEVICE
1169 : DMA_FROM_DEVICE);
1170 request->mapped = 0;
1171 }
1172 } else
1173 request->mapped = 0; 1217 request->mapped = 0;
1174 1218
1175 spin_lock_irqsave(&musb->lock, lockflags); 1219 spin_lock_irqsave(&musb->lock, lockflags);
@@ -1789,6 +1833,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1789 spin_unlock_irqrestore(&musb->lock, flags); 1833 spin_unlock_irqrestore(&musb->lock, flags);
1790 1834
1791 if (is_otg_enabled(musb)) { 1835 if (is_otg_enabled(musb)) {
1836 struct usb_hcd *hcd = musb_to_hcd(musb);
1837
1792 DBG(3, "OTG startup...\n"); 1838 DBG(3, "OTG startup...\n");
1793 1839
1794 /* REVISIT: funcall to other code, which also 1840 /* REVISIT: funcall to other code, which also
@@ -1803,6 +1849,8 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
1803 musb->gadget_driver = NULL; 1849 musb->gadget_driver = NULL;
1804 musb->g.dev.driver = NULL; 1850 musb->g.dev.driver = NULL;
1805 spin_unlock_irqrestore(&musb->lock, flags); 1851 spin_unlock_irqrestore(&musb->lock, flags);
1852 } else {
1853 hcd->self.uses_pio_for_control = 1;
1806 } 1854 }
1807 } 1855 }
1808 } 1856 }
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
index bdc3ea66be69..9fea48264fa2 100644
--- a/drivers/usb/otg/langwell_otg.c
+++ b/drivers/usb/otg/langwell_otg.c
@@ -1896,7 +1896,7 @@ set_a_bus_req(struct device *dev, struct device_attribute *attr,
1896 } 1896 }
1897 return count; 1897 return count;
1898} 1898}
1899static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUGO, get_a_bus_req, set_a_bus_req); 1899static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req);
1900 1900
1901static ssize_t 1901static ssize_t
1902get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf) 1902get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1942,8 +1942,7 @@ set_a_bus_drop(struct device *dev, struct device_attribute *attr,
1942 } 1942 }
1943 return count; 1943 return count;
1944} 1944}
1945static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUGO, 1945static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop);
1946 get_a_bus_drop, set_a_bus_drop);
1947 1946
1948static ssize_t 1947static ssize_t
1949get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf) 1948get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
@@ -1988,7 +1987,7 @@ set_b_bus_req(struct device *dev, struct device_attribute *attr,
1988 } 1987 }
1989 return count; 1988 return count;
1990} 1989}
1991static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUGO, get_b_bus_req, set_b_bus_req); 1990static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req);
1992 1991
1993static ssize_t 1992static ssize_t
1994set_a_clr_err(struct device *dev, struct device_attribute *attr, 1993set_a_clr_err(struct device *dev, struct device_attribute *attr,
@@ -2012,7 +2011,7 @@ set_a_clr_err(struct device *dev, struct device_attribute *attr,
2012 } 2011 }
2013 return count; 2012 return count;
2014} 2013}
2015static DEVICE_ATTR(a_clr_err, S_IWUGO, NULL, set_a_clr_err); 2014static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
2016 2015
2017static struct attribute *inputs_attrs[] = { 2016static struct attribute *inputs_attrs[] = {
2018 &dev_attr_a_bus_req.attr, 2017 &dev_attr_a_bus_req.attr,
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 76f8b3556672..6a50965e23f2 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -201,6 +201,7 @@ static struct usb_device_id id_table_combined [] = {
201 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) }, 201 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_5_PID) },
202 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) }, 202 { USB_DEVICE(FTDI_VID, FTDI_MTXORB_6_PID) },
203 { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) }, 203 { USB_DEVICE(FTDI_VID, FTDI_R2000KU_TRUE_RNG) },
204 { USB_DEVICE(FTDI_VID, FTDI_VARDAAN_PID) },
204 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) }, 205 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0100_PID) },
205 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) }, 206 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0101_PID) },
206 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) }, 207 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_0102_PID) },
@@ -696,6 +697,7 @@ static struct usb_device_id id_table_combined [] = {
696 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, 697 .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk },
697 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 698 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
698 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, 699 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
700 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
699 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 701 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
700 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, 702 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
701 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 703 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 263f62551197..1286f1e23d8c 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -114,6 +114,9 @@
114/* Lenz LI-USB Computer Interface. */ 114/* Lenz LI-USB Computer Interface. */
115#define FTDI_LENZ_LIUSB_PID 0xD780 115#define FTDI_LENZ_LIUSB_PID 0xD780
116 116
117/* Vardaan Enterprises Serial Interface VEUSB422R3 */
118#define FTDI_VARDAAN_PID 0xF070
119
117/* 120/*
118 * Xsens Technologies BV products (http://www.xsens.com). 121 * Xsens Technologies BV products (http://www.xsens.com).
119 */ 122 */
@@ -721,6 +724,7 @@
721 */ 724 */
722#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 725#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
723#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ 726#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
727#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
724 728
725/* 729/*
726 * Bayer Ascensia Contour blood glucose meter USB-converter cable. 730 * Bayer Ascensia Contour blood glucose meter USB-converter cable.
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 861223f2af6e..6954de50c0ff 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -51,6 +51,7 @@ static struct usb_driver usb_serial_driver = {
51 .suspend = usb_serial_suspend, 51 .suspend = usb_serial_suspend,
52 .resume = usb_serial_resume, 52 .resume = usb_serial_resume,
53 .no_dynamic_id = 1, 53 .no_dynamic_id = 1,
54 .supports_autosuspend = 1,
54}; 55};
55 56
56/* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead 57/* There is no MODULE_DEVICE_TABLE for usbserial.c. Instead
@@ -1343,6 +1344,8 @@ int usb_serial_register(struct usb_serial_driver *driver)
1343 return -ENODEV; 1344 return -ENODEV;
1344 1345
1345 fixup_generic(driver); 1346 fixup_generic(driver);
1347 if (driver->usb_driver)
1348 driver->usb_driver->supports_autosuspend = 1;
1346 1349
1347 if (!driver->description) 1350 if (!driver->description)
1348 driver->description = driver->driver.name; 1351 driver->description = driver->driver.name;
diff --git a/drivers/usb/storage/sierra_ms.c b/drivers/usb/storage/sierra_ms.c
index 57fc2f532cab..ceba512f84d0 100644
--- a/drivers/usb/storage/sierra_ms.c
+++ b/drivers/usb/storage/sierra_ms.c
@@ -121,7 +121,7 @@ static ssize_t show_truinst(struct device *dev, struct device_attribute *attr,
121 } 121 }
122 return result; 122 return result;
123} 123}
124static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); 124static DEVICE_ATTR(truinst, S_IRUGO, show_truinst, NULL);
125 125
126int sierra_ms_init(struct us_data *us) 126int sierra_ms_init(struct us_data *us)
127{ 127{
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4b4da5b86ff9..f442668a1e52 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -129,8 +129,9 @@ static void handle_tx(struct vhost_net *net)
129 size_t hdr_size; 129 size_t hdr_size;
130 struct socket *sock; 130 struct socket *sock;
131 131
132 sock = rcu_dereference_check(vq->private_data, 132 /* TODO: check that we are running from vhost_worker?
133 lockdep_is_held(&vq->mutex)); 133 * Not sure it's worth it, it's straight-forward enough. */
134 sock = rcu_dereference_check(vq->private_data, 1);
134 if (!sock) 135 if (!sock)
135 return; 136 return;
136 137
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index e207810bba3c..08703299ef61 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -197,12 +197,12 @@ static int backlight_suspend(struct device *dev, pm_message_t state)
197{ 197{
198 struct backlight_device *bd = to_backlight_device(dev); 198 struct backlight_device *bd = to_backlight_device(dev);
199 199
200 if (bd->ops->options & BL_CORE_SUSPENDRESUME) { 200 mutex_lock(&bd->ops_lock);
201 mutex_lock(&bd->ops_lock); 201 if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
202 bd->props.state |= BL_CORE_SUSPENDED; 202 bd->props.state |= BL_CORE_SUSPENDED;
203 backlight_update_status(bd); 203 backlight_update_status(bd);
204 mutex_unlock(&bd->ops_lock);
205 } 204 }
205 mutex_unlock(&bd->ops_lock);
206 206
207 return 0; 207 return 0;
208} 208}
@@ -211,12 +211,12 @@ static int backlight_resume(struct device *dev)
211{ 211{
212 struct backlight_device *bd = to_backlight_device(dev); 212 struct backlight_device *bd = to_backlight_device(dev);
213 213
214 if (bd->ops->options & BL_CORE_SUSPENDRESUME) { 214 mutex_lock(&bd->ops_lock);
215 mutex_lock(&bd->ops_lock); 215 if (bd->ops && bd->ops->options & BL_CORE_SUSPENDRESUME) {
216 bd->props.state &= ~BL_CORE_SUSPENDED; 216 bd->props.state &= ~BL_CORE_SUSPENDED;
217 backlight_update_status(bd); 217 backlight_update_status(bd);
218 mutex_unlock(&bd->ops_lock);
219 } 218 }
219 mutex_unlock(&bd->ops_lock);
220 220
221 return 0; 221 return 0;
222} 222}
diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c
index cad7d45c8bac..c265aed09e04 100644
--- a/drivers/video/da8xx-fb.c
+++ b/drivers/video/da8xx-fb.c
@@ -1029,10 +1029,6 @@ static int __init fb_probe(struct platform_device *device)
1029 goto err_release_pl_mem; 1029 goto err_release_pl_mem;
1030 } 1030 }
1031 1031
1032 ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par);
1033 if (ret)
1034 goto err_release_pl_mem;
1035
1036 /* Initialize par */ 1032 /* Initialize par */
1037 da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp; 1033 da8xx_fb_info->var.bits_per_pixel = lcd_cfg->bpp;
1038 1034
@@ -1060,7 +1056,7 @@ static int __init fb_probe(struct platform_device *device)
1060 1056
1061 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0); 1057 ret = fb_alloc_cmap(&da8xx_fb_info->cmap, PALETTE_SIZE, 0);
1062 if (ret) 1058 if (ret)
1063 goto err_free_irq; 1059 goto err_release_pl_mem;
1064 da8xx_fb_info->cmap.len = par->palette_sz; 1060 da8xx_fb_info->cmap.len = par->palette_sz;
1065 1061
1066 /* initialize var_screeninfo */ 1062 /* initialize var_screeninfo */
@@ -1088,8 +1084,13 @@ static int __init fb_probe(struct platform_device *device)
1088 goto err_cpu_freq; 1084 goto err_cpu_freq;
1089 } 1085 }
1090#endif 1086#endif
1087
1088 ret = request_irq(par->irq, lcdc_irq_handler, 0, DRIVER_NAME, par);
1089 if (ret)
1090 goto irq_freq;
1091 return 0; 1091 return 0;
1092 1092
1093irq_freq:
1093#ifdef CONFIG_CPU_FREQ 1094#ifdef CONFIG_CPU_FREQ
1094err_cpu_freq: 1095err_cpu_freq:
1095 unregister_framebuffer(da8xx_fb_info); 1096 unregister_framebuffer(da8xx_fb_info);
@@ -1098,9 +1099,6 @@ err_cpu_freq:
1098err_dealloc_cmap: 1099err_dealloc_cmap:
1099 fb_dealloc_cmap(&da8xx_fb_info->cmap); 1100 fb_dealloc_cmap(&da8xx_fb_info->cmap);
1100 1101
1101err_free_irq:
1102 free_irq(par->irq, par);
1103
1104err_release_pl_mem: 1102err_release_pl_mem:
1105 dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base, 1103 dma_free_coherent(NULL, PALETTE_SIZE, par->v_palette_base,
1106 par->p_palette_base); 1104 par->p_palette_base);
diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
index f53b9f1d6aba..5c3960da755a 100644
--- a/drivers/video/fbcmap.c
+++ b/drivers/video/fbcmap.c
@@ -80,6 +80,7 @@ static const struct fb_cmap default_16_colors = {
80 * @cmap: frame buffer colormap structure 80 * @cmap: frame buffer colormap structure
81 * @len: length of @cmap 81 * @len: length of @cmap
82 * @transp: boolean, 1 if there is transparency, 0 otherwise 82 * @transp: boolean, 1 if there is transparency, 0 otherwise
83 * @flags: flags for kmalloc memory allocation
83 * 84 *
84 * Allocates memory for a colormap @cmap. @len is the 85 * Allocates memory for a colormap @cmap. @len is the
85 * number of entries in the palette. 86 * number of entries in the palette.
@@ -88,34 +89,48 @@ static const struct fb_cmap default_16_colors = {
88 * 89 *
89 */ 90 */
90 91
91int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp) 92int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
92{ 93{
93 int size = len*sizeof(u16); 94 int size = len * sizeof(u16);
94 95 int ret = -ENOMEM;
95 if (cmap->len != len) { 96
96 fb_dealloc_cmap(cmap); 97 if (cmap->len != len) {
97 if (!len) 98 fb_dealloc_cmap(cmap);
98 return 0; 99 if (!len)
99 if (!(cmap->red = kmalloc(size, GFP_ATOMIC))) 100 return 0;
100 goto fail; 101
101 if (!(cmap->green = kmalloc(size, GFP_ATOMIC))) 102 cmap->red = kmalloc(size, flags);
102 goto fail; 103 if (!cmap->red)
103 if (!(cmap->blue = kmalloc(size, GFP_ATOMIC))) 104 goto fail;
104 goto fail; 105 cmap->green = kmalloc(size, flags);
105 if (transp) { 106 if (!cmap->green)
106 if (!(cmap->transp = kmalloc(size, GFP_ATOMIC))) 107 goto fail;
108 cmap->blue = kmalloc(size, flags);
109 if (!cmap->blue)
110 goto fail;
111 if (transp) {
112 cmap->transp = kmalloc(size, flags);
113 if (!cmap->transp)
114 goto fail;
115 } else {
116 cmap->transp = NULL;
117 }
118 }
119 cmap->start = 0;
120 cmap->len = len;
121 ret = fb_copy_cmap(fb_default_cmap(len), cmap);
122 if (ret)
107 goto fail; 123 goto fail;
108 } else 124 return 0;
109 cmap->transp = NULL;
110 }
111 cmap->start = 0;
112 cmap->len = len;
113 fb_copy_cmap(fb_default_cmap(len), cmap);
114 return 0;
115 125
116fail: 126fail:
117 fb_dealloc_cmap(cmap); 127 fb_dealloc_cmap(cmap);
118 return -ENOMEM; 128 return ret;
129}
130
131int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp)
132{
133 return fb_alloc_cmap_gfp(cmap, len, transp, GFP_ATOMIC);
119} 134}
120 135
121/** 136/**
@@ -250,8 +265,12 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
250 int rc, size = cmap->len * sizeof(u16); 265 int rc, size = cmap->len * sizeof(u16);
251 struct fb_cmap umap; 266 struct fb_cmap umap;
252 267
268 if (size < 0 || size < cmap->len)
269 return -E2BIG;
270
253 memset(&umap, 0, sizeof(struct fb_cmap)); 271 memset(&umap, 0, sizeof(struct fb_cmap));
254 rc = fb_alloc_cmap(&umap, cmap->len, cmap->transp != NULL); 272 rc = fb_alloc_cmap_gfp(&umap, cmap->len, cmap->transp != NULL,
273 GFP_KERNEL);
255 if (rc) 274 if (rc)
256 return rc; 275 return rc;
257 if (copy_from_user(umap.red, cmap->red, size) || 276 if (copy_from_user(umap.red, cmap->red, size) ||
diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h
index e4c4d89b7860..be8ccb47ebe0 100644
--- a/drivers/video/geode/lxfb.h
+++ b/drivers/video/geode/lxfb.h
@@ -22,6 +22,7 @@
22#define DC_HFILT_COUNT 0x100 22#define DC_HFILT_COUNT 0x100
23#define DC_VFILT_COUNT 0x100 23#define DC_VFILT_COUNT 0x100
24#define VP_COEFF_SIZE 0x1000 24#define VP_COEFF_SIZE 0x1000
25#define VP_PAL_COUNT 0x100
25 26
26#define OUTPUT_CRT 0x01 27#define OUTPUT_CRT 0x01
27#define OUTPUT_PANEL 0x02 28#define OUTPUT_PANEL 0x02
@@ -48,7 +49,8 @@ struct lxfb_par {
48 uint64_t vp[VP_REG_COUNT]; 49 uint64_t vp[VP_REG_COUNT];
49 uint64_t fp[FP_REG_COUNT]; 50 uint64_t fp[FP_REG_COUNT];
50 51
51 uint32_t pal[DC_PAL_COUNT]; 52 uint32_t dc_pal[DC_PAL_COUNT];
53 uint32_t vp_pal[VP_PAL_COUNT];
52 uint32_t hcoeff[DC_HFILT_COUNT * 2]; 54 uint32_t hcoeff[DC_HFILT_COUNT * 2];
53 uint32_t vcoeff[DC_VFILT_COUNT]; 55 uint32_t vcoeff[DC_VFILT_COUNT];
54 uint32_t vp_coeff[VP_COEFF_SIZE / 4]; 56 uint32_t vp_coeff[VP_COEFF_SIZE / 4];
diff --git a/drivers/video/geode/lxfb_ops.c b/drivers/video/geode/lxfb_ops.c
index bc35a95e59d4..79e9abc72b83 100644
--- a/drivers/video/geode/lxfb_ops.c
+++ b/drivers/video/geode/lxfb_ops.c
@@ -276,10 +276,10 @@ static void lx_graphics_enable(struct fb_info *info)
276 write_fp(par, FP_PT1, 0); 276 write_fp(par, FP_PT1, 0);
277 temp = FP_PT2_SCRC; 277 temp = FP_PT2_SCRC;
278 278
279 if (info->var.sync & FB_SYNC_HOR_HIGH_ACT) 279 if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
280 temp |= FP_PT2_HSP; 280 temp |= FP_PT2_HSP;
281 281
282 if (info->var.sync & FB_SYNC_VERT_HIGH_ACT) 282 if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
283 temp |= FP_PT2_VSP; 283 temp |= FP_PT2_VSP;
284 284
285 write_fp(par, FP_PT2, temp); 285 write_fp(par, FP_PT2, temp);
@@ -610,10 +610,15 @@ static void lx_save_regs(struct lxfb_par *par)
610 memcpy(par->vp, par->vp_regs, sizeof(par->vp)); 610 memcpy(par->vp, par->vp_regs, sizeof(par->vp));
611 memcpy(par->fp, par->vp_regs + VP_FP_START, sizeof(par->fp)); 611 memcpy(par->fp, par->vp_regs + VP_FP_START, sizeof(par->fp));
612 612
613 /* save the palette */ 613 /* save the display controller palette */
614 write_dc(par, DC_PAL_ADDRESS, 0); 614 write_dc(par, DC_PAL_ADDRESS, 0);
615 for (i = 0; i < ARRAY_SIZE(par->pal); i++) 615 for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++)
616 par->pal[i] = read_dc(par, DC_PAL_DATA); 616 par->dc_pal[i] = read_dc(par, DC_PAL_DATA);
617
618 /* save the video processor palette */
619 write_vp(par, VP_PAR, 0);
620 for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++)
621 par->vp_pal[i] = read_vp(par, VP_PDR);
617 622
618 /* save the horizontal filter coefficients */ 623 /* save the horizontal filter coefficients */
619 filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL; 624 filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
@@ -706,8 +711,8 @@ static void lx_restore_display_ctlr(struct lxfb_par *par)
706 711
707 /* restore the palette */ 712 /* restore the palette */
708 write_dc(par, DC_PAL_ADDRESS, 0); 713 write_dc(par, DC_PAL_ADDRESS, 0);
709 for (i = 0; i < ARRAY_SIZE(par->pal); i++) 714 for (i = 0; i < ARRAY_SIZE(par->dc_pal); i++)
710 write_dc(par, DC_PAL_DATA, par->pal[i]); 715 write_dc(par, DC_PAL_DATA, par->dc_pal[i]);
711 716
712 /* restore the horizontal filter coefficients */ 717 /* restore the horizontal filter coefficients */
713 filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL; 718 filt = par->dc[DC_IRQ_FILT_CTL] | DC_IRQ_FILT_CTL_H_FILT_SEL;
@@ -751,6 +756,11 @@ static void lx_restore_video_proc(struct lxfb_par *par)
751 } 756 }
752 } 757 }
753 758
759 /* restore video processor palette */
760 write_vp(par, VP_PAR, 0);
761 for (i = 0; i < ARRAY_SIZE(par->vp_pal); i++)
762 write_vp(par, VP_PDR, par->vp_pal[i]);
763
754 /* restore video coeff ram */ 764 /* restore video coeff ram */
755 memcpy(par->vp_regs + VP_VCR, par->vp_coeff, sizeof(par->vp_coeff)); 765 memcpy(par->vp_regs + VP_VCR, par->vp_coeff, sizeof(par->vp_coeff));
756} 766}
diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c
index 7cfc170bce19..ca0f6be9d12e 100644
--- a/drivers/video/mx3fb.c
+++ b/drivers/video/mx3fb.c
@@ -27,6 +27,7 @@
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29 29
30#include <mach/dma.h>
30#include <mach/hardware.h> 31#include <mach/hardware.h>
31#include <mach/ipu.h> 32#include <mach/ipu.h>
32#include <mach/mx3fb.h> 33#include <mach/mx3fb.h>
@@ -1420,6 +1421,9 @@ static bool chan_filter(struct dma_chan *chan, void *arg)
1420 struct device *dev; 1421 struct device *dev;
1421 struct mx3fb_platform_data *mx3fb_pdata; 1422 struct mx3fb_platform_data *mx3fb_pdata;
1422 1423
1424 if (!imx_dma_is_ipu(chan))
1425 return false;
1426
1423 if (!rq) 1427 if (!rq)
1424 return false; 1428 return false;
1425 1429
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 9b1364723c65..b02d97a879d6 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -860,7 +860,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
860 /* Couldn't reconfigure, hopefully, can continue as before */ 860 /* Couldn't reconfigure, hopefully, can continue as before */
861 return; 861 return;
862 862
863 info->fix.line_length = mode2.xres * (ch->cfg.bpp / 8); 863 info->fix.line_length = mode1.xres * (ch->cfg.bpp / 8);
864 864
865 /* 865 /*
866 * fb_set_var() calls the notifier change internally, only if 866 * fb_set_var() calls the notifier change internally, only if
@@ -868,7 +868,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info)
868 * user event, we have to call the chain ourselves. 868 * user event, we have to call the chain ourselves.
869 */ 869 */
870 event.info = info; 870 event.info = info;
871 event.data = &mode2; 871 event.data = &mode1;
872 fb_notifier_call_chain(evnt, &event); 872 fb_notifier_call_chain(evnt, &event);
873} 873}
874 874
diff --git a/drivers/video/sis/init.c b/drivers/video/sis/init.c
index c311ad3c3687..31137adc8fba 100644
--- a/drivers/video/sis/init.c
+++ b/drivers/video/sis/init.c
@@ -62,11 +62,11 @@
62 62
63#include "init.h" 63#include "init.h"
64 64
65#ifdef SIS300 65#ifdef CONFIG_FB_SIS_300
66#include "300vtbl.h" 66#include "300vtbl.h"
67#endif 67#endif
68 68
69#ifdef SIS315H 69#ifdef CONFIG_FB_SIS_315
70#include "310vtbl.h" 70#include "310vtbl.h"
71#endif 71#endif
72 72
@@ -78,7 +78,7 @@
78/* POINTER INITIALIZATION */ 78/* POINTER INITIALIZATION */
79/*********************************************/ 79/*********************************************/
80 80
81#if defined(SIS300) || defined(SIS315H) 81#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
82static void 82static void
83InitCommonPointer(struct SiS_Private *SiS_Pr) 83InitCommonPointer(struct SiS_Private *SiS_Pr)
84{ 84{
@@ -160,7 +160,7 @@ InitCommonPointer(struct SiS_Private *SiS_Pr)
160} 160}
161#endif 161#endif
162 162
163#ifdef SIS300 163#ifdef CONFIG_FB_SIS_300
164static void 164static void
165InitTo300Pointer(struct SiS_Private *SiS_Pr) 165InitTo300Pointer(struct SiS_Private *SiS_Pr)
166{ 166{
@@ -237,7 +237,7 @@ InitTo300Pointer(struct SiS_Private *SiS_Pr)
237} 237}
238#endif 238#endif
239 239
240#ifdef SIS315H 240#ifdef CONFIG_FB_SIS_315
241static void 241static void
242InitTo310Pointer(struct SiS_Private *SiS_Pr) 242InitTo310Pointer(struct SiS_Private *SiS_Pr)
243{ 243{
@@ -321,13 +321,13 @@ bool
321SiSInitPtr(struct SiS_Private *SiS_Pr) 321SiSInitPtr(struct SiS_Private *SiS_Pr)
322{ 322{
323 if(SiS_Pr->ChipType < SIS_315H) { 323 if(SiS_Pr->ChipType < SIS_315H) {
324#ifdef SIS300 324#ifdef CONFIG_FB_SIS_300
325 InitTo300Pointer(SiS_Pr); 325 InitTo300Pointer(SiS_Pr);
326#else 326#else
327 return false; 327 return false;
328#endif 328#endif
329 } else { 329 } else {
330#ifdef SIS315H 330#ifdef CONFIG_FB_SIS_315
331 InitTo310Pointer(SiS_Pr); 331 InitTo310Pointer(SiS_Pr);
332#else 332#else
333 return false; 333 return false;
@@ -340,9 +340,7 @@ SiSInitPtr(struct SiS_Private *SiS_Pr)
340/* HELPER: Get ModeID */ 340/* HELPER: Get ModeID */
341/*********************************************/ 341/*********************************************/
342 342
343#ifndef SIS_XORG_XF86
344static 343static
345#endif
346unsigned short 344unsigned short
347SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay, 345SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay,
348 int Depth, bool FSTN, int LCDwidth, int LCDheight) 346 int Depth, bool FSTN, int LCDwidth, int LCDheight)
@@ -884,51 +882,51 @@ SiS_GetModeID_VGA2(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDispl
884void 882void
885SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data) 883SiS_SetReg(SISIOADDRESS port, unsigned short index, unsigned short data)
886{ 884{
887 OutPortByte(port, index); 885 outb((u8)index, port);
888 OutPortByte(port + 1, data); 886 outb((u8)data, port + 1);
889} 887}
890 888
891void 889void
892SiS_SetRegByte(SISIOADDRESS port, unsigned short data) 890SiS_SetRegByte(SISIOADDRESS port, unsigned short data)
893{ 891{
894 OutPortByte(port, data); 892 outb((u8)data, port);
895} 893}
896 894
897void 895void
898SiS_SetRegShort(SISIOADDRESS port, unsigned short data) 896SiS_SetRegShort(SISIOADDRESS port, unsigned short data)
899{ 897{
900 OutPortWord(port, data); 898 outw((u16)data, port);
901} 899}
902 900
903void 901void
904SiS_SetRegLong(SISIOADDRESS port, unsigned int data) 902SiS_SetRegLong(SISIOADDRESS port, unsigned int data)
905{ 903{
906 OutPortLong(port, data); 904 outl((u32)data, port);
907} 905}
908 906
909unsigned char 907unsigned char
910SiS_GetReg(SISIOADDRESS port, unsigned short index) 908SiS_GetReg(SISIOADDRESS port, unsigned short index)
911{ 909{
912 OutPortByte(port, index); 910 outb((u8)index, port);
913 return(InPortByte(port + 1)); 911 return inb(port + 1);
914} 912}
915 913
916unsigned char 914unsigned char
917SiS_GetRegByte(SISIOADDRESS port) 915SiS_GetRegByte(SISIOADDRESS port)
918{ 916{
919 return(InPortByte(port)); 917 return inb(port);
920} 918}
921 919
922unsigned short 920unsigned short
923SiS_GetRegShort(SISIOADDRESS port) 921SiS_GetRegShort(SISIOADDRESS port)
924{ 922{
925 return(InPortWord(port)); 923 return inw(port);
926} 924}
927 925
928unsigned int 926unsigned int
929SiS_GetRegLong(SISIOADDRESS port) 927SiS_GetRegLong(SISIOADDRESS port)
930{ 928{
931 return(InPortLong(port)); 929 return inl(port);
932} 930}
933 931
934void 932void
@@ -1089,7 +1087,7 @@ static void
1089SiSInitPCIetc(struct SiS_Private *SiS_Pr) 1087SiSInitPCIetc(struct SiS_Private *SiS_Pr)
1090{ 1088{
1091 switch(SiS_Pr->ChipType) { 1089 switch(SiS_Pr->ChipType) {
1092#ifdef SIS300 1090#ifdef CONFIG_FB_SIS_300
1093 case SIS_300: 1091 case SIS_300:
1094 case SIS_540: 1092 case SIS_540:
1095 case SIS_630: 1093 case SIS_630:
@@ -1108,7 +1106,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr)
1108 SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A); 1106 SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x5A);
1109 break; 1107 break;
1110#endif 1108#endif
1111#ifdef SIS315H 1109#ifdef CONFIG_FB_SIS_315
1112 case SIS_315H: 1110 case SIS_315H:
1113 case SIS_315: 1111 case SIS_315:
1114 case SIS_315PRO: 1112 case SIS_315PRO:
@@ -1152,9 +1150,7 @@ SiSInitPCIetc(struct SiS_Private *SiS_Pr)
1152/* HELPER: SetLVDSetc */ 1150/* HELPER: SetLVDSetc */
1153/*********************************************/ 1151/*********************************************/
1154 1152
1155#ifdef SIS_LINUX_KERNEL
1156static 1153static
1157#endif
1158void 1154void
1159SiSSetLVDSetc(struct SiS_Private *SiS_Pr) 1155SiSSetLVDSetc(struct SiS_Private *SiS_Pr)
1160{ 1156{
@@ -1174,7 +1170,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr)
1174 if((temp == 1) || (temp == 2)) return; 1170 if((temp == 1) || (temp == 2)) return;
1175 1171
1176 switch(SiS_Pr->ChipType) { 1172 switch(SiS_Pr->ChipType) {
1177#ifdef SIS300 1173#ifdef CONFIG_FB_SIS_300
1178 case SIS_540: 1174 case SIS_540:
1179 case SIS_630: 1175 case SIS_630:
1180 case SIS_730: 1176 case SIS_730:
@@ -1188,7 +1184,7 @@ SiSSetLVDSetc(struct SiS_Private *SiS_Pr)
1188 } 1184 }
1189 break; 1185 break;
1190#endif 1186#endif
1191#ifdef SIS315H 1187#ifdef CONFIG_FB_SIS_315
1192 case SIS_550: 1188 case SIS_550:
1193 case SIS_650: 1189 case SIS_650:
1194 case SIS_740: 1190 case SIS_740:
@@ -1420,9 +1416,7 @@ SiS_ResetSegmentRegisters(struct SiS_Private *SiS_Pr)
1420/* HELPER: GetVBType */ 1416/* HELPER: GetVBType */
1421/*********************************************/ 1417/*********************************************/
1422 1418
1423#ifdef SIS_LINUX_KERNEL
1424static 1419static
1425#endif
1426void 1420void
1427SiS_GetVBType(struct SiS_Private *SiS_Pr) 1421SiS_GetVBType(struct SiS_Private *SiS_Pr)
1428{ 1422{
@@ -1487,7 +1481,6 @@ SiS_GetVBType(struct SiS_Private *SiS_Pr)
1487/* HELPER: Check RAM size */ 1481/* HELPER: Check RAM size */
1488/*********************************************/ 1482/*********************************************/
1489 1483
1490#ifdef SIS_LINUX_KERNEL
1491static bool 1484static bool
1492SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 1485SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
1493 unsigned short ModeIdIndex) 1486 unsigned short ModeIdIndex)
@@ -1501,13 +1494,12 @@ SiS_CheckMemorySize(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
1501 if(AdapterMemSize < memorysize) return false; 1494 if(AdapterMemSize < memorysize) return false;
1502 return true; 1495 return true;
1503} 1496}
1504#endif
1505 1497
1506/*********************************************/ 1498/*********************************************/
1507/* HELPER: Get DRAM type */ 1499/* HELPER: Get DRAM type */
1508/*********************************************/ 1500/*********************************************/
1509 1501
1510#ifdef SIS315H 1502#ifdef CONFIG_FB_SIS_315
1511static unsigned char 1503static unsigned char
1512SiS_Get310DRAMType(struct SiS_Private *SiS_Pr) 1504SiS_Get310DRAMType(struct SiS_Private *SiS_Pr)
1513{ 1505{
@@ -1574,7 +1566,6 @@ SiS_GetMCLK(struct SiS_Private *SiS_Pr)
1574/* HELPER: ClearBuffer */ 1566/* HELPER: ClearBuffer */
1575/*********************************************/ 1567/*********************************************/
1576 1568
1577#ifdef SIS_LINUX_KERNEL
1578static void 1569static void
1579SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo) 1570SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
1580{ 1571{
@@ -1587,7 +1578,7 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
1587 1578
1588 if(SiS_Pr->SiS_ModeType >= ModeEGA) { 1579 if(SiS_Pr->SiS_ModeType >= ModeEGA) {
1589 if(ModeNo > 0x13) { 1580 if(ModeNo > 0x13) {
1590 SiS_SetMemory(memaddr, memsize, 0); 1581 memset_io(memaddr, 0, memsize);
1591 } else { 1582 } else {
1592 pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; 1583 pBuffer = (unsigned short SISIOMEMTYPE *)memaddr;
1593 for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]); 1584 for(i = 0; i < 0x4000; i++) writew(0x0000, &pBuffer[i]);
@@ -1596,10 +1587,9 @@ SiS_ClearBuffer(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
1596 pBuffer = (unsigned short SISIOMEMTYPE *)memaddr; 1587 pBuffer = (unsigned short SISIOMEMTYPE *)memaddr;
1597 for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]); 1588 for(i = 0; i < 0x4000; i++) writew(0x0720, &pBuffer[i]);
1598 } else { 1589 } else {
1599 SiS_SetMemory(memaddr, 0x8000, 0); 1590 memset_io(memaddr, 0, 0x8000);
1600 } 1591 }
1601} 1592}
1602#endif
1603 1593
1604/*********************************************/ 1594/*********************************************/
1605/* HELPER: SearchModeID */ 1595/* HELPER: SearchModeID */
@@ -2132,7 +2122,7 @@ SiS_SetCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2132 SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F); 2122 SiS_SetReg(SiS_Pr->SiS_P3d4,0x14,0x4F);
2133 } 2123 }
2134 2124
2135#ifdef SIS315H 2125#ifdef CONFIG_FB_SIS_315
2136 if(SiS_Pr->ChipType == XGI_20) { 2126 if(SiS_Pr->ChipType == XGI_20) {
2137 SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1); 2127 SiS_SetReg(SiS_Pr->SiS_P3d4,0x04,crt1data[4] - 1);
2138 if(!(temp = crt1data[5] & 0x1f)) { 2128 if(!(temp = crt1data[5] & 0x1f)) {
@@ -2215,7 +2205,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2215 SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb); 2205 SiS_SetReg(SiS_Pr->SiS_P3c4,0x2c,clkb);
2216 2206
2217 if(SiS_Pr->ChipType >= SIS_315H) { 2207 if(SiS_Pr->ChipType >= SIS_315H) {
2218#ifdef SIS315H 2208#ifdef CONFIG_FB_SIS_315
2219 SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01); 2209 SiS_SetReg(SiS_Pr->SiS_P3c4,0x2D,0x01);
2220 if(SiS_Pr->ChipType == XGI_20) { 2210 if(SiS_Pr->ChipType == XGI_20) {
2221 unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex); 2211 unsigned short mf = SiS_GetModeFlag(SiS_Pr, ModeNo, ModeIdIndex);
@@ -2236,7 +2226,7 @@ SiS_SetCRT1VCLK(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2236/* FIFO */ 2226/* FIFO */
2237/*********************************************/ 2227/*********************************************/
2238 2228
2239#ifdef SIS300 2229#ifdef CONFIG_FB_SIS_300
2240void 2230void
2241SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, 2231SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1,
2242 unsigned short *idx2) 2232 unsigned short *idx2)
@@ -2506,11 +2496,7 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2506 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data); 2496 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x09,0x80,data);
2507 2497
2508 /* Write foreground and background queue */ 2498 /* Write foreground and background queue */
2509#ifdef SIS_LINUX_KERNEL
2510 templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); 2499 templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50);
2511#else
2512 templ = pciReadLong(0x00000000, 0x50);
2513#endif
2514 2500
2515 if(SiS_Pr->ChipType == SIS_730) { 2501 if(SiS_Pr->ChipType == SIS_730) {
2516 2502
@@ -2530,13 +2516,8 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2530 2516
2531 } 2517 }
2532 2518
2533#ifdef SIS_LINUX_KERNEL
2534 sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ); 2519 sisfb_write_nbridge_pci_dword(SiS_Pr, 0x50, templ);
2535 templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0); 2520 templ = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xA0);
2536#else
2537 pciWriteLong(0x00000000, 0x50, templ);
2538 templ = pciReadLong(0x00000000, 0xA0);
2539#endif
2540 2521
2541 /* GUI grant timer (PCI config 0xA3) */ 2522 /* GUI grant timer (PCI config 0xA3) */
2542 if(SiS_Pr->ChipType == SIS_730) { 2523 if(SiS_Pr->ChipType == SIS_730) {
@@ -2552,15 +2533,11 @@ SiS_SetCRT1FIFO_630(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2552 2533
2553 } 2534 }
2554 2535
2555#ifdef SIS_LINUX_KERNEL
2556 sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ); 2536 sisfb_write_nbridge_pci_dword(SiS_Pr, 0xA0, templ);
2557#else
2558 pciWriteLong(0x00000000, 0xA0, templ);
2559#endif
2560} 2537}
2561#endif /* SIS300 */ 2538#endif /* CONFIG_FB_SIS_300 */
2562 2539
2563#ifdef SIS315H 2540#ifdef CONFIG_FB_SIS_315
2564static void 2541static void
2565SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex) 2542SiS_SetCRT1FIFO_310(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex)
2566{ 2543{
@@ -2612,7 +2589,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2612 } 2589 }
2613 2590
2614 if(SiS_Pr->ChipType < SIS_315H) { 2591 if(SiS_Pr->ChipType < SIS_315H) {
2615#ifdef SIS300 2592#ifdef CONFIG_FB_SIS_300
2616 if(VCLK > 150) data |= 0x80; 2593 if(VCLK > 150) data |= 0x80;
2617 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data); 2594 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x07,0x7B,data);
2618 2595
@@ -2621,7 +2598,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2621 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data); 2598 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xF7,data);
2622#endif 2599#endif
2623 } else if(SiS_Pr->ChipType < XGI_20) { 2600 } else if(SiS_Pr->ChipType < XGI_20) {
2624#ifdef SIS315H 2601#ifdef CONFIG_FB_SIS_315
2625 if(VCLK >= 166) data |= 0x0c; 2602 if(VCLK >= 166) data |= 0x0c;
2626 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); 2603 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data);
2627 2604
@@ -2630,7 +2607,7 @@ SiS_SetVCLKState(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2630 } 2607 }
2631#endif 2608#endif
2632 } else { 2609 } else {
2633#ifdef SIS315H 2610#ifdef CONFIG_FB_SIS_315
2634 if(VCLK >= 200) data |= 0x0c; 2611 if(VCLK >= 200) data |= 0x0c;
2635 if(SiS_Pr->ChipType == XGI_20) data &= ~0x04; 2612 if(SiS_Pr->ChipType == XGI_20) data &= ~0x04;
2636 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data); 2613 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x32,0xf3,data);
@@ -2675,7 +2652,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2675 unsigned short ModeIdIndex, unsigned short RRTI) 2652 unsigned short ModeIdIndex, unsigned short RRTI)
2676{ 2653{
2677 unsigned short data, infoflag = 0, modeflag, resindex; 2654 unsigned short data, infoflag = 0, modeflag, resindex;
2678#ifdef SIS315H 2655#ifdef CONFIG_FB_SIS_315
2679 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 2656 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
2680 unsigned short data2, data3; 2657 unsigned short data2, data3;
2681#endif 2658#endif
@@ -2736,7 +2713,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2736 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data); 2713 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0F,0xB7,data);
2737 } 2714 }
2738 2715
2739#ifdef SIS315H 2716#ifdef CONFIG_FB_SIS_315
2740 if(SiS_Pr->ChipType >= SIS_315H) { 2717 if(SiS_Pr->ChipType >= SIS_315H) {
2741 SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb); 2718 SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x31,0xfb);
2742 } 2719 }
@@ -2826,7 +2803,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2826 2803
2827 SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex); 2804 SiS_SetVCLKState(SiS_Pr, ModeNo, RRTI, ModeIdIndex);
2828 2805
2829#ifdef SIS315H 2806#ifdef CONFIG_FB_SIS_315
2830 if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) || 2807 if(((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->ChipType < SIS_661)) ||
2831 (SiS_Pr->ChipType == XGI_40)) { 2808 (SiS_Pr->ChipType == XGI_40)) {
2832 if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) { 2809 if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x31) & 0x40) {
@@ -2845,7 +2822,7 @@ SiS_SetCRT1ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
2845#endif 2822#endif
2846} 2823}
2847 2824
2848#ifdef SIS315H 2825#ifdef CONFIG_FB_SIS_315
2849static void 2826static void
2850SiS_SetupDualChip(struct SiS_Private *SiS_Pr) 2827SiS_SetupDualChip(struct SiS_Private *SiS_Pr)
2851{ 2828{
@@ -2999,11 +2976,6 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho
2999 SiS_Pr->SiS_SelectCRT2Rate = 0; 2976 SiS_Pr->SiS_SelectCRT2Rate = 0;
3000 SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2); 2977 SiS_Pr->SiS_SetFlag &= (~ProgrammingCRT2);
3001 2978
3002#ifdef SIS_XORG_XF86
3003 xf86DrvMsgVerb(0, X_PROBED, 4, "(init: VBType=0x%04x, VBInfo=0x%04x)\n",
3004 SiS_Pr->SiS_VBType, SiS_Pr->SiS_VBInfo);
3005#endif
3006
3007 if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) { 2979 if(SiS_Pr->SiS_VBInfo & SetSimuScanMode) {
3008 if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) { 2980 if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) {
3009 SiS_Pr->SiS_SetFlag |= ProgrammingCRT2; 2981 SiS_Pr->SiS_SetFlag |= ProgrammingCRT2;
@@ -3028,7 +3000,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho
3028 } 3000 }
3029 3001
3030 switch(SiS_Pr->ChipType) { 3002 switch(SiS_Pr->ChipType) {
3031#ifdef SIS300 3003#ifdef CONFIG_FB_SIS_300
3032 case SIS_300: 3004 case SIS_300:
3033 SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex); 3005 SiS_SetCRT1FIFO_300(SiS_Pr, ModeNo, RefreshRateTableIndex);
3034 break; 3006 break;
@@ -3039,7 +3011,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho
3039 break; 3011 break;
3040#endif 3012#endif
3041 default: 3013 default:
3042#ifdef SIS315H 3014#ifdef CONFIG_FB_SIS_315
3043 if(SiS_Pr->ChipType == XGI_20) { 3015 if(SiS_Pr->ChipType == XGI_20) {
3044 unsigned char sr2b = 0, sr2c = 0; 3016 unsigned char sr2b = 0, sr2c = 0;
3045 switch(ModeNo) { 3017 switch(ModeNo) {
@@ -3062,7 +3034,7 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho
3062 3034
3063 SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); 3035 SiS_SetCRT1ModeRegs(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex);
3064 3036
3065#ifdef SIS315H 3037#ifdef CONFIG_FB_SIS_315
3066 if(SiS_Pr->ChipType == XGI_40) { 3038 if(SiS_Pr->ChipType == XGI_40) {
3067 SiS_SetupDualChip(SiS_Pr); 3039 SiS_SetupDualChip(SiS_Pr);
3068 } 3040 }
@@ -3070,11 +3042,9 @@ SiS_SetCRT1Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sho
3070 3042
3071 SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex); 3043 SiS_LoadDAC(SiS_Pr, ModeNo, ModeIdIndex);
3072 3044
3073#ifdef SIS_LINUX_KERNEL
3074 if(SiS_Pr->SiS_flag_clearbuffer) { 3045 if(SiS_Pr->SiS_flag_clearbuffer) {
3075 SiS_ClearBuffer(SiS_Pr, ModeNo); 3046 SiS_ClearBuffer(SiS_Pr, ModeNo);
3076 } 3047 }
3077#endif
3078 3048
3079 if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) { 3049 if(!(SiS_Pr->SiS_VBInfo & (SetSimuScanMode | SwitchCRT2 | SetCRT2ToLCDA))) {
3080 SiS_WaitRetrace1(SiS_Pr); 3050 SiS_WaitRetrace1(SiS_Pr);
@@ -3104,7 +3074,7 @@ SiS_InitVB(struct SiS_Private *SiS_Pr)
3104static void 3074static void
3105SiS_ResetVB(struct SiS_Private *SiS_Pr) 3075SiS_ResetVB(struct SiS_Private *SiS_Pr)
3106{ 3076{
3107#ifdef SIS315H 3077#ifdef CONFIG_FB_SIS_315
3108 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 3078 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
3109 unsigned short temp; 3079 unsigned short temp;
3110 3080
@@ -3139,7 +3109,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr)
3139 * which locks CRT2 in some way to CRT1 timing. Disable 3109 * which locks CRT2 in some way to CRT1 timing. Disable
3140 * this here. 3110 * this here.
3141 */ 3111 */
3142#ifdef SIS315H 3112#ifdef CONFIG_FB_SIS_315
3143 if((IS_SIS651) || (IS_SISM650) || 3113 if((IS_SIS651) || (IS_SISM650) ||
3144 SiS_Pr->ChipType == SIS_340 || 3114 SiS_Pr->ChipType == SIS_340 ||
3145 SiS_Pr->ChipType == XGI_40) { 3115 SiS_Pr->ChipType == XGI_40) {
@@ -3160,7 +3130,7 @@ SiS_StrangeStuff(struct SiS_Private *SiS_Pr)
3160static void 3130static void
3161SiS_Handle760(struct SiS_Private *SiS_Pr) 3131SiS_Handle760(struct SiS_Private *SiS_Pr)
3162{ 3132{
3163#ifdef SIS315H 3133#ifdef CONFIG_FB_SIS_315
3164 unsigned int somebase; 3134 unsigned int somebase;
3165 unsigned char temp1, temp2, temp3; 3135 unsigned char temp1, temp2, temp3;
3166 3136
@@ -3170,11 +3140,7 @@ SiS_Handle760(struct SiS_Private *SiS_Pr)
3170 (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) ) 3140 (!(SiS_Pr->SiS_SysFlags & SF_760UMA)) )
3171 return; 3141 return;
3172 3142
3173#ifdef SIS_LINUX_KERNEL
3174 somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74); 3143 somebase = sisfb_read_mio_pci_word(SiS_Pr, 0x74);
3175#else
3176 somebase = pciReadWord(0x00001000, 0x74);
3177#endif
3178 somebase &= 0xffff; 3144 somebase &= 0xffff;
3179 3145
3180 if(somebase == 0) return; 3146 if(somebase == 0) return;
@@ -3190,105 +3156,34 @@ SiS_Handle760(struct SiS_Private *SiS_Pr)
3190 temp2 = 0x0b; 3156 temp2 = 0x0b;
3191 } 3157 }
3192 3158
3193#ifdef SIS_LINUX_KERNEL
3194 sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1); 3159 sisfb_write_nbridge_pci_byte(SiS_Pr, 0x7e, temp1);
3195 sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2); 3160 sisfb_write_nbridge_pci_byte(SiS_Pr, 0x8d, temp2);
3196#else
3197 pciWriteByte(0x00000000, 0x7e, temp1);
3198 pciWriteByte(0x00000000, 0x8d, temp2);
3199#endif
3200 3161
3201 SiS_SetRegByte((somebase + 0x85), temp3); 3162 SiS_SetRegByte((somebase + 0x85), temp3);
3202#endif 3163#endif
3203} 3164}
3204 3165
3205/*********************************************/ 3166/*********************************************/
3206/* X.org/XFree86: SET SCREEN PITCH */
3207/*********************************************/
3208
3209#ifdef SIS_XORG_XF86
3210static void
3211SiS_SetPitchCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn)
3212{
3213 SISPtr pSiS = SISPTR(pScrn);
3214 unsigned short HDisplay = pSiS->scrnPitch >> 3;
3215
3216 SiS_SetReg(SiS_Pr->SiS_P3d4,0x13,(HDisplay & 0xFF));
3217 SiS_SetRegANDOR(SiS_Pr->SiS_P3c4,0x0E,0xF0,(HDisplay >> 8));
3218}
3219
3220static void
3221SiS_SetPitchCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn)
3222{
3223 SISPtr pSiS = SISPTR(pScrn);
3224 unsigned short HDisplay = pSiS->scrnPitch2 >> 3;
3225
3226 /* Unlock CRT2 */
3227 if(pSiS->VGAEngine == SIS_315_VGA)
3228 SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2F, 0x01);
3229 else
3230 SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24, 0x01);
3231
3232 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x07,(HDisplay & 0xFF));
3233 SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x09,0xF0,(HDisplay >> 8));
3234}
3235
3236static void
3237SiS_SetPitch(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn)
3238{
3239 SISPtr pSiS = SISPTR(pScrn);
3240 bool isslavemode = false;
3241
3242 if( (pSiS->VBFlags2 & VB2_VIDEOBRIDGE) &&
3243 ( ((pSiS->VGAEngine == SIS_300_VGA) &&
3244 (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0xa0) == 0x20) ||
3245 ((pSiS->VGAEngine == SIS_315_VGA) &&
3246 (SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x50) == 0x10) ) ) {
3247 isslavemode = true;
3248 }
3249
3250 /* We need to set pitch for CRT1 if bridge is in slave mode, too */
3251 if((pSiS->VBFlags & DISPTYPE_DISP1) || (isslavemode)) {
3252 SiS_SetPitchCRT1(SiS_Pr, pScrn);
3253 }
3254 /* We must not set the pitch for CRT2 if bridge is in slave mode */
3255 if((pSiS->VBFlags & DISPTYPE_DISP2) && (!isslavemode)) {
3256 SiS_SetPitchCRT2(SiS_Pr, pScrn);
3257 }
3258}
3259#endif
3260
3261/*********************************************/
3262/* SiSSetMode() */ 3167/* SiSSetMode() */
3263/*********************************************/ 3168/*********************************************/
3264 3169
3265#ifdef SIS_XORG_XF86
3266/* We need pScrn for setting the pitch correctly */
3267bool
3268SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo, bool dosetpitch)
3269#else
3270bool 3170bool
3271SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo) 3171SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
3272#endif
3273{ 3172{
3274 SISIOADDRESS BaseAddr = SiS_Pr->IOAddress; 3173 SISIOADDRESS BaseAddr = SiS_Pr->IOAddress;
3275 unsigned short RealModeNo, ModeIdIndex; 3174 unsigned short RealModeNo, ModeIdIndex;
3276 unsigned char backupreg = 0; 3175 unsigned char backupreg = 0;
3277#ifdef SIS_LINUX_KERNEL
3278 unsigned short KeepLockReg; 3176 unsigned short KeepLockReg;
3279 3177
3280 SiS_Pr->UseCustomMode = false; 3178 SiS_Pr->UseCustomMode = false;
3281 SiS_Pr->CRT1UsesCustomMode = false; 3179 SiS_Pr->CRT1UsesCustomMode = false;
3282#endif
3283 3180
3284 SiS_Pr->SiS_flag_clearbuffer = 0; 3181 SiS_Pr->SiS_flag_clearbuffer = 0;
3285 3182
3286 if(SiS_Pr->UseCustomMode) { 3183 if(SiS_Pr->UseCustomMode) {
3287 ModeNo = 0xfe; 3184 ModeNo = 0xfe;
3288 } else { 3185 } else {
3289#ifdef SIS_LINUX_KERNEL
3290 if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1; 3186 if(!(ModeNo & 0x80)) SiS_Pr->SiS_flag_clearbuffer = 1;
3291#endif
3292 ModeNo &= 0x7f; 3187 ModeNo &= 0x7f;
3293 } 3188 }
3294 3189
@@ -3301,13 +3196,8 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
3301 SiS_GetSysFlags(SiS_Pr); 3196 SiS_GetSysFlags(SiS_Pr);
3302 3197
3303 SiS_Pr->SiS_VGAINFO = 0x11; 3198 SiS_Pr->SiS_VGAINFO = 0x11;
3304#if defined(SIS_XORG_XF86) && (defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__))
3305 if(pScrn) SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff);
3306#endif
3307 3199
3308#ifdef SIS_LINUX_KERNEL
3309 KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05); 3200 KeepLockReg = SiS_GetReg(SiS_Pr->SiS_P3c4,0x05);
3310#endif
3311 SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86); 3201 SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86);
3312 3202
3313 SiSInitPCIetc(SiS_Pr); 3203 SiSInitPCIetc(SiS_Pr);
@@ -3344,12 +3234,10 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
3344 SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex); 3234 SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex);
3345 SiS_SetLowModeTest(SiS_Pr, ModeNo); 3235 SiS_SetLowModeTest(SiS_Pr, ModeNo);
3346 3236
3347#ifdef SIS_LINUX_KERNEL
3348 /* Check memory size (kernel framebuffer driver only) */ 3237 /* Check memory size (kernel framebuffer driver only) */
3349 if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) { 3238 if(!SiS_CheckMemorySize(SiS_Pr, ModeNo, ModeIdIndex)) {
3350 return false; 3239 return false;
3351 } 3240 }
3352#endif
3353 3241
3354 SiS_OpenCRTC(SiS_Pr); 3242 SiS_OpenCRTC(SiS_Pr);
3355 3243
@@ -3384,7 +3272,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
3384 SiS_DisplayOn(SiS_Pr); 3272 SiS_DisplayOn(SiS_Pr);
3385 SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF); 3273 SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF);
3386 3274
3387#ifdef SIS315H 3275#ifdef CONFIG_FB_SIS_315
3388 if(SiS_Pr->ChipType >= SIS_315H) { 3276 if(SiS_Pr->ChipType >= SIS_315H) {
3389 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { 3277 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) {
3390 if(!(SiS_IsDualEdge(SiS_Pr))) { 3278 if(!(SiS_IsDualEdge(SiS_Pr))) {
@@ -3396,7 +3284,7 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
3396 3284
3397 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { 3285 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) {
3398 if(SiS_Pr->ChipType >= SIS_315H) { 3286 if(SiS_Pr->ChipType >= SIS_315H) {
3399#ifdef SIS315H 3287#ifdef CONFIG_FB_SIS_315
3400 if(!SiS_Pr->SiS_ROMNew) { 3288 if(!SiS_Pr->SiS_ROMNew) {
3401 if(SiS_IsVAMode(SiS_Pr)) { 3289 if(SiS_IsVAMode(SiS_Pr)) {
3402 SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01); 3290 SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01);
@@ -3424,424 +3312,16 @@ SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
3424 } 3312 }
3425 } 3313 }
3426 3314
3427#ifdef SIS_XORG_XF86
3428 if(pScrn) {
3429 /* SetPitch: Adapt to virtual size & position */
3430 if((ModeNo > 0x13) && (dosetpitch)) {
3431 SiS_SetPitch(SiS_Pr, pScrn);
3432 }
3433
3434 /* Backup/Set ModeNo in BIOS scratch area */
3435 SiS_GetSetModeID(pScrn, ModeNo);
3436 }
3437#endif
3438
3439 SiS_CloseCRTC(SiS_Pr); 3315 SiS_CloseCRTC(SiS_Pr);
3440 3316
3441 SiS_Handle760(SiS_Pr); 3317 SiS_Handle760(SiS_Pr);
3442 3318
3443#ifdef SIS_LINUX_KERNEL
3444 /* We never lock registers in XF86 */ 3319 /* We never lock registers in XF86 */
3445 if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00); 3320 if(KeepLockReg != 0xA1) SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x00);
3446#endif
3447 3321
3448 return true; 3322 return true;
3449} 3323}
3450 3324
3451/*********************************************/
3452/* X.org/XFree86: SiSBIOSSetMode() */
3453/* for non-Dual-Head mode */
3454/*********************************************/
3455
3456#ifdef SIS_XORG_XF86
3457bool
3458SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn,
3459 DisplayModePtr mode, bool IsCustom)
3460{
3461 SISPtr pSiS = SISPTR(pScrn);
3462 unsigned short ModeNo = 0;
3463
3464 SiS_Pr->UseCustomMode = false;
3465
3466 if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) {
3467
3468 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting custom mode %dx%d\n",
3469 SiS_Pr->CHDisplay,
3470 (mode->Flags & V_INTERLACE ? SiS_Pr->CVDisplay * 2 :
3471 (mode->Flags & V_DBLSCAN ? SiS_Pr->CVDisplay / 2 :
3472 SiS_Pr->CVDisplay)));
3473
3474 } else {
3475
3476 /* Don't need vbflags here; checks done earlier */
3477 ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags);
3478 if(!ModeNo) return false;
3479
3480 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3, "Setting standard mode 0x%x\n", ModeNo);
3481
3482 }
3483
3484 return(SiSSetMode(SiS_Pr, pScrn, ModeNo, true));
3485}
3486
3487/*********************************************/
3488/* X.org/XFree86: SiSBIOSSetModeCRT2() */
3489/* for Dual-Head modes */
3490/*********************************************/
3491
3492bool
3493SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn,
3494 DisplayModePtr mode, bool IsCustom)
3495{
3496 SISIOADDRESS BaseAddr = SiS_Pr->IOAddress;
3497 SISPtr pSiS = SISPTR(pScrn);
3498#ifdef SISDUALHEAD
3499 SISEntPtr pSiSEnt = pSiS->entityPrivate;
3500#endif
3501 unsigned short ModeIdIndex;
3502 unsigned short ModeNo = 0;
3503 unsigned char backupreg = 0;
3504
3505 SiS_Pr->UseCustomMode = false;
3506
3507 /* Remember: Custom modes for CRT2 are ONLY supported
3508 * -) on the 30x/B/C, and
3509 * -) if CRT2 is LCD or VGA, or CRT1 is LCDA
3510 */
3511
3512 if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) {
3513
3514 ModeNo = 0xfe;
3515
3516 } else {
3517
3518 ModeNo = SiS_GetModeNumber(pScrn, mode, pSiS->VBFlags);
3519 if(!ModeNo) return false;
3520
3521 }
3522
3523 SiSRegInit(SiS_Pr, BaseAddr);
3524 SiSInitPtr(SiS_Pr);
3525 SiS_GetSysFlags(SiS_Pr);
3526#if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__)
3527 SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff);
3528#else
3529 SiS_Pr->SiS_VGAINFO = 0x11;
3530#endif
3531
3532 SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86);
3533
3534 SiSInitPCIetc(SiS_Pr);
3535 SiSSetLVDSetc(SiS_Pr);
3536 SiSDetermineROMUsage(SiS_Pr);
3537
3538 /* Save mode info so we can set it from within SetMode for CRT1 */
3539#ifdef SISDUALHEAD
3540 if(pSiS->DualHeadMode) {
3541 pSiSEnt->CRT2ModeNo = ModeNo;
3542 pSiSEnt->CRT2DMode = mode;
3543 pSiSEnt->CRT2IsCustom = IsCustom;
3544 pSiSEnt->CRT2CR30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30);
3545 pSiSEnt->CRT2CR31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31);
3546 pSiSEnt->CRT2CR35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35);
3547 pSiSEnt->CRT2CR38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38);
3548#if 0
3549 /* We can't set CRT2 mode before CRT1 mode is set - says who...? */
3550 if(pSiSEnt->CRT1ModeNo == -1) {
3551 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3,
3552 "Setting CRT2 mode delayed until after setting CRT1 mode\n");
3553 return true;
3554 }
3555#endif
3556 pSiSEnt->CRT2ModeSet = true;
3557 }
3558#endif
3559
3560 if(SiS_Pr->UseCustomMode) {
3561
3562 unsigned short temptemp = SiS_Pr->CVDisplay;
3563
3564 if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1;
3565 else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1;
3566
3567 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3,
3568 "Setting custom mode %dx%d on CRT2\n",
3569 SiS_Pr->CHDisplay, temptemp);
3570
3571 } else {
3572
3573 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3,
3574 "Setting standard mode 0x%x on CRT2\n", ModeNo);
3575
3576 }
3577
3578 SiS_UnLockCRT2(SiS_Pr);
3579
3580 if(!SiS_Pr->UseCustomMode) {
3581 if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false;
3582 } else {
3583 ModeIdIndex = 0;
3584 }
3585
3586 SiS_GetVBType(SiS_Pr);
3587
3588 SiS_InitVB(SiS_Pr);
3589 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) {
3590 if(SiS_Pr->ChipType >= SIS_315H) {
3591 SiS_ResetVB(SiS_Pr);
3592 SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x32,0x10);
3593 SiS_SetRegOR(SiS_Pr->SiS_Part2Port,0x00,0x0c);
3594 backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38);
3595 } else {
3596 backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35);
3597 }
3598 }
3599
3600 /* Get VB information (connectors, connected devices) */
3601 if(!SiS_Pr->UseCustomMode) {
3602 SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 1);
3603 } else {
3604 /* If this is a custom mode, we don't check the modeflag for CRT2Mode */
3605 SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0);
3606 }
3607 SiS_SetYPbPr(SiS_Pr);
3608 SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex);
3609 SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex);
3610 SiS_SetLowModeTest(SiS_Pr, ModeNo);
3611
3612 SiS_ResetSegmentRegisters(SiS_Pr);
3613
3614 /* Set mode on CRT2 */
3615 if( (SiS_Pr->SiS_VBType & VB_SISVB) ||
3616 (SiS_Pr->SiS_IF_DEF_LVDS == 1) ||
3617 (SiS_Pr->SiS_IF_DEF_CH70xx != 0) ||
3618 (SiS_Pr->SiS_IF_DEF_TRUMPION != 0) ) {
3619 SiS_SetCRT2Group(SiS_Pr, ModeNo);
3620 }
3621
3622 SiS_StrangeStuff(SiS_Pr);
3623
3624 SiS_DisplayOn(SiS_Pr);
3625 SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF);
3626
3627 if(SiS_Pr->ChipType >= SIS_315H) {
3628 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) {
3629 if(!(SiS_IsDualEdge(SiS_Pr))) {
3630 SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x13,0xfb);
3631 }
3632 }
3633 }
3634
3635 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) {
3636 if(SiS_Pr->ChipType >= SIS_315H) {
3637 if(!SiS_Pr->SiS_ROMNew) {
3638 if(SiS_IsVAMode(SiS_Pr)) {
3639 SiS_SetRegOR(SiS_Pr->SiS_P3d4,0x35,0x01);
3640 } else {
3641 SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x35,0xFE);
3642 }
3643 }
3644
3645 SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg);
3646
3647 if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x30) & SetCRT2ToLCD) {
3648 SiS_SetRegAND(SiS_Pr->SiS_P3d4,0x38,0xfc);
3649 }
3650 } else if((SiS_Pr->ChipType == SIS_630) ||
3651 (SiS_Pr->ChipType == SIS_730)) {
3652 SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg);
3653 }
3654 }
3655
3656 /* SetPitch: Adapt to virtual size & position */
3657 SiS_SetPitchCRT2(SiS_Pr, pScrn);
3658
3659 SiS_Handle760(SiS_Pr);
3660
3661 return true;
3662}
3663
3664/*********************************************/
3665/* X.org/XFree86: SiSBIOSSetModeCRT1() */
3666/* for Dual-Head modes */
3667/*********************************************/
3668
3669bool
3670SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn,
3671 DisplayModePtr mode, bool IsCustom)
3672{
3673 SISIOADDRESS BaseAddr = SiS_Pr->IOAddress;
3674 SISPtr pSiS = SISPTR(pScrn);
3675 unsigned short ModeIdIndex, ModeNo = 0;
3676 unsigned char backupreg = 0;
3677#ifdef SISDUALHEAD
3678 SISEntPtr pSiSEnt = pSiS->entityPrivate;
3679 unsigned char backupcr30, backupcr31, backupcr38, backupcr35, backupp40d=0;
3680 bool backupcustom;
3681#endif
3682
3683 SiS_Pr->UseCustomMode = false;
3684
3685 if((IsCustom) && (SiS_CheckBuildCustomMode(pScrn, mode, pSiS->VBFlags))) {
3686
3687 unsigned short temptemp = SiS_Pr->CVDisplay;
3688
3689 if(SiS_Pr->CModeFlag & DoubleScanMode) temptemp >>= 1;
3690 else if(SiS_Pr->CInfoFlag & InterlaceMode) temptemp <<= 1;
3691
3692 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3,
3693 "Setting custom mode %dx%d on CRT1\n",
3694 SiS_Pr->CHDisplay, temptemp);
3695 ModeNo = 0xfe;
3696
3697 } else {
3698
3699 ModeNo = SiS_GetModeNumber(pScrn, mode, 0); /* don't give VBFlags */
3700 if(!ModeNo) return false;
3701
3702 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3,
3703 "Setting standard mode 0x%x on CRT1\n", ModeNo);
3704 }
3705
3706 SiSInitPtr(SiS_Pr);
3707 SiSRegInit(SiS_Pr, BaseAddr);
3708 SiS_GetSysFlags(SiS_Pr);
3709#if defined(i386) || defined(__i386) || defined(__i386__) || defined(__AMD64__) || defined(__amd64__) || defined(__x86_64__)
3710 SiS_Pr->SiS_VGAINFO = SiS_GetSetBIOSScratch(pScrn, 0x489, 0xff);
3711#else
3712 SiS_Pr->SiS_VGAINFO = 0x11;
3713#endif
3714
3715 SiS_SetReg(SiS_Pr->SiS_P3c4,0x05,0x86);
3716
3717 SiSInitPCIetc(SiS_Pr);
3718 SiSSetLVDSetc(SiS_Pr);
3719 SiSDetermineROMUsage(SiS_Pr);
3720
3721 SiS_UnLockCRT2(SiS_Pr);
3722
3723 if(!SiS_Pr->UseCustomMode) {
3724 if(!(SiS_SearchModeID(SiS_Pr, &ModeNo, &ModeIdIndex))) return false;
3725 } else {
3726 ModeIdIndex = 0;
3727 }
3728
3729 /* Determine VBType */
3730 SiS_GetVBType(SiS_Pr);
3731
3732 SiS_InitVB(SiS_Pr);
3733 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) {
3734 if(SiS_Pr->ChipType >= SIS_315H) {
3735 backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38);
3736 } else {
3737 backupreg = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35);
3738 }
3739 }
3740
3741 /* Get VB information (connectors, connected devices) */
3742 /* (We don't care if the current mode is a CRT2 mode) */
3743 SiS_GetVBInfo(SiS_Pr, ModeNo, ModeIdIndex, 0);
3744 SiS_SetYPbPr(SiS_Pr);
3745 SiS_SetTVMode(SiS_Pr, ModeNo, ModeIdIndex);
3746 SiS_GetLCDResInfo(SiS_Pr, ModeNo, ModeIdIndex);
3747 SiS_SetLowModeTest(SiS_Pr, ModeNo);
3748
3749 SiS_OpenCRTC(SiS_Pr);
3750
3751 /* Set mode on CRT1 */
3752 SiS_SetCRT1Group(SiS_Pr, ModeNo, ModeIdIndex);
3753 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) {
3754 SiS_SetCRT2Group(SiS_Pr, ModeNo);
3755 }
3756
3757 /* SetPitch: Adapt to virtual size & position */
3758 SiS_SetPitchCRT1(SiS_Pr, pScrn);
3759
3760 SiS_HandleCRT1(SiS_Pr);
3761
3762 SiS_StrangeStuff(SiS_Pr);
3763
3764 SiS_CloseCRTC(SiS_Pr);
3765
3766#ifdef SISDUALHEAD
3767 if(pSiS->DualHeadMode) {
3768 pSiSEnt->CRT1ModeNo = ModeNo;
3769 pSiSEnt->CRT1DMode = mode;
3770 }
3771#endif
3772
3773 if(SiS_Pr->UseCustomMode) {
3774 SiS_Pr->CRT1UsesCustomMode = true;
3775 SiS_Pr->CSRClock_CRT1 = SiS_Pr->CSRClock;
3776 SiS_Pr->CModeFlag_CRT1 = SiS_Pr->CModeFlag;
3777 } else {
3778 SiS_Pr->CRT1UsesCustomMode = false;
3779 }
3780
3781 /* Reset CRT2 if changing mode on CRT1 */
3782#ifdef SISDUALHEAD
3783 if(pSiS->DualHeadMode) {
3784 if(pSiSEnt->CRT2ModeNo != -1) {
3785 xf86DrvMsgVerb(pScrn->scrnIndex, X_INFO, 3,
3786 "(Re-)Setting mode for CRT2\n");
3787 backupcustom = SiS_Pr->UseCustomMode;
3788 backupcr30 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x30);
3789 backupcr31 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x31);
3790 backupcr35 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x35);
3791 backupcr38 = SiS_GetReg(SiS_Pr->SiS_P3d4,0x38);
3792 if(SiS_Pr->SiS_VBType & VB_SISVB) {
3793 /* Backup LUT-enable */
3794 if(pSiSEnt->CRT2ModeSet) {
3795 backupp40d = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x0d) & 0x08;
3796 }
3797 }
3798 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) {
3799 SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,pSiSEnt->CRT2CR30);
3800 SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,pSiSEnt->CRT2CR31);
3801 SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,pSiSEnt->CRT2CR35);
3802 SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,pSiSEnt->CRT2CR38);
3803 }
3804
3805 SiSBIOSSetModeCRT2(SiS_Pr, pSiSEnt->pScrn_1,
3806 pSiSEnt->CRT2DMode, pSiSEnt->CRT2IsCustom);
3807
3808 SiS_SetReg(SiS_Pr->SiS_P3d4,0x30,backupcr30);
3809 SiS_SetReg(SiS_Pr->SiS_P3d4,0x31,backupcr31);
3810 SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupcr35);
3811 SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupcr38);
3812 if(SiS_Pr->SiS_VBType & VB_SISVB) {
3813 SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x0d, ~0x08, backupp40d);
3814 }
3815 SiS_Pr->UseCustomMode = backupcustom;
3816 }
3817 }
3818#endif
3819
3820 /* Warning: From here, the custom mode entries in SiS_Pr are
3821 * possibly overwritten
3822 */
3823
3824 SiS_DisplayOn(SiS_Pr);
3825 SiS_SetRegByte(SiS_Pr->SiS_P3c6,0xFF);
3826
3827 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) {
3828 if(SiS_Pr->ChipType >= SIS_315H) {
3829 SiS_SetReg(SiS_Pr->SiS_P3d4,0x38,backupreg);
3830 } else if((SiS_Pr->ChipType == SIS_630) ||
3831 (SiS_Pr->ChipType == SIS_730)) {
3832 SiS_SetReg(SiS_Pr->SiS_P3d4,0x35,backupreg);
3833 }
3834 }
3835
3836 SiS_Handle760(SiS_Pr);
3837
3838 /* Backup/Set ModeNo in BIOS scratch area */
3839 SiS_GetSetModeID(pScrn,ModeNo);
3840
3841 return true;
3842}
3843#endif /* Linux_XF86 */
3844
3845#ifndef GETBITSTR 3325#ifndef GETBITSTR
3846#define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l)) 3326#define BITMASK(h,l) (((unsigned)(1U << ((h)-(l)+1))-1)<<(l))
3847#define GENMASK(mask) BITMASK(1?mask,0?mask) 3327#define GENMASK(mask) BITMASK(1?mask,0?mask)
@@ -3927,7 +3407,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
3927 SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE; 3407 SiS_Pr->CVBlankStart = SiS_Pr->SiS_VGAVDE;
3928 3408
3929 if(SiS_Pr->ChipType < SIS_315H) { 3409 if(SiS_Pr->ChipType < SIS_315H) {
3930#ifdef SIS300 3410#ifdef CONFIG_FB_SIS_300
3931 tempbx = SiS_Pr->SiS_VGAHT; 3411 tempbx = SiS_Pr->SiS_VGAHT;
3932 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { 3412 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) {
3933 tempbx = SiS_Pr->PanelHT; 3413 tempbx = SiS_Pr->PanelHT;
@@ -3936,7 +3416,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
3936 remaining = tempbx % 8; 3416 remaining = tempbx % 8;
3937#endif 3417#endif
3938 } else { 3418 } else {
3939#ifdef SIS315H 3419#ifdef CONFIG_FB_SIS_315
3940 /* OK for LCDA, LVDS */ 3420 /* OK for LCDA, LVDS */
3941 tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes; 3421 tempbx = SiS_Pr->PanelHT - SiS_Pr->PanelXRes;
3942 tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */ 3422 tempax = SiS_Pr->SiS_VGAHDE; /* not /2 ! */
@@ -3950,7 +3430,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
3950 SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx; 3430 SiS_Pr->CHTotal = SiS_Pr->CHBlankEnd = tempbx;
3951 3431
3952 if(SiS_Pr->ChipType < SIS_315H) { 3432 if(SiS_Pr->ChipType < SIS_315H) {
3953#ifdef SIS300 3433#ifdef CONFIG_FB_SIS_300
3954 if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) { 3434 if(SiS_Pr->SiS_VGAHDE == SiS_Pr->PanelXRes) {
3955 SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1); 3435 SiS_Pr->CHSyncStart = SiS_Pr->SiS_VGAHDE + ((SiS_Pr->PanelHRS + 1) & ~1);
3956 SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE; 3436 SiS_Pr->CHSyncEnd = SiS_Pr->CHSyncStart + SiS_Pr->PanelHRE;
@@ -3982,7 +3462,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
3982 } 3462 }
3983#endif 3463#endif
3984 } else { 3464 } else {
3985#ifdef SIS315H 3465#ifdef CONFIG_FB_SIS_315
3986 tempax = VGAHDE; 3466 tempax = VGAHDE;
3987 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { 3467 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) {
3988 tempbx = SiS_Pr->PanelXRes; 3468 tempbx = SiS_Pr->PanelXRes;
@@ -4001,7 +3481,7 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
4001 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { 3481 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) {
4002 tempax = SiS_Pr->PanelYRes; 3482 tempax = SiS_Pr->PanelYRes;
4003 } else if(SiS_Pr->ChipType < SIS_315H) { 3483 } else if(SiS_Pr->ChipType < SIS_315H) {
4004#ifdef SIS300 3484#ifdef CONFIG_FB_SIS_300
4005 /* Stupid hack for 640x400/320x200 */ 3485 /* Stupid hack for 640x400/320x200 */
4006 if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) { 3486 if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) {
4007 if((tempax + tempbx) == 438) tempbx += 16; 3487 if((tempax + tempbx) == 438) tempbx += 16;
@@ -4054,36 +3534,12 @@ SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
4054 if(modeflag & DoubleScanMode) tempax |= 0x80; 3534 if(modeflag & DoubleScanMode) tempax |= 0x80;
4055 SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax); 3535 SiS_SetRegANDOR(SiS_Pr->SiS_P3d4,0x09,0x5F,tempax);
4056 3536
4057#ifdef SIS_XORG_XF86
4058#ifdef TWDEBUG
4059 xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n",
4060 SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal,
4061 SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal,
4062 SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd);
4063 xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n",
4064 SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1],
4065 SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3],
4066 SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5],
4067 SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]);
4068 xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n",
4069 SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9],
4070 SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11],
4071 SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13],
4072 SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]);
4073 xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]);
4074#endif
4075#endif
4076} 3537}
4077 3538
4078void 3539void
4079SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, 3540SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
4080 int xres, int yres, 3541 int xres, int yres,
4081#ifdef SIS_XORG_XF86
4082 DisplayModePtr current
4083#endif
4084#ifdef SIS_LINUX_KERNEL
4085 struct fb_var_screeninfo *var, bool writeres 3542 struct fb_var_screeninfo *var, bool writeres
4086#endif
4087) 3543)
4088{ 3544{
4089 unsigned short HRE, HBE, HRS, HBS, HDE, HT; 3545 unsigned short HRE, HBE, HRS, HBS, HDE, HT;
@@ -4127,25 +3583,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
4127 3583
4128 D = B - F - C; 3584 D = B - F - C;
4129 3585
4130#ifdef SIS_XORG_XF86
4131 current->HDisplay = (E * 8);
4132 current->HSyncStart = (E * 8) + (F * 8);
4133 current->HSyncEnd = (E * 8) + (F * 8) + (C * 8);
4134 current->HTotal = (E * 8) + (F * 8) + (C * 8) + (D * 8);
4135#ifdef TWDEBUG
4136 xf86DrvMsg(0, X_INFO,
4137 "H: A %d B %d C %d D %d E %d F %d HT %d HDE %d HRS %d HBS %d HBE %d HRE %d\n",
4138 A, B, C, D, E, F, HT, HDE, HRS, HBS, HBE, HRE);
4139#else
4140 (void)VBS; (void)HBS; (void)A;
4141#endif
4142#endif
4143#ifdef SIS_LINUX_KERNEL
4144 if(writeres) var->xres = xres = E * 8; 3586 if(writeres) var->xres = xres = E * 8;
4145 var->left_margin = D * 8; 3587 var->left_margin = D * 8;
4146 var->right_margin = F * 8; 3588 var->right_margin = F * 8;
4147 var->hsync_len = C * 8; 3589 var->hsync_len = C * 8;
4148#endif
4149 3590
4150 /* Vertical */ 3591 /* Vertical */
4151 sr_data = crdata[13]; 3592 sr_data = crdata[13];
@@ -4192,30 +3633,10 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
4192 3633
4193 D = B - F - C; 3634 D = B - F - C;
4194 3635
4195#ifdef SIS_XORG_XF86
4196 current->VDisplay = VDE + 1;
4197 current->VSyncStart = VRS + 1;
4198 current->VSyncEnd = ((VRS & ~0x1f) | VRE) + 1;
4199 if(VRE <= (VRS & 0x1f)) current->VSyncEnd += 32;
4200 current->VTotal = E + D + C + F;
4201#if 0
4202 current->VDisplay = E;
4203 current->VSyncStart = E + D;
4204 current->VSyncEnd = E + D + C;
4205 current->VTotal = E + D + C + F;
4206#endif
4207#ifdef TWDEBUG
4208 xf86DrvMsg(0, X_INFO,
4209 "V: A %d B %d C %d D %d E %d F %d VT %d VDE %d VRS %d VBS %d VBE %d VRE %d\n",
4210 A, B, C, D, E, F, VT, VDE, VRS, VBS, VBE, VRE);
4211#endif
4212#endif
4213#ifdef SIS_LINUX_KERNEL
4214 if(writeres) var->yres = yres = E; 3636 if(writeres) var->yres = yres = E;
4215 var->upper_margin = D; 3637 var->upper_margin = D;
4216 var->lower_margin = F; 3638 var->lower_margin = F;
4217 var->vsync_len = C; 3639 var->vsync_len = C;
4218#endif
4219 3640
4220 if((xres == 320) && ((yres == 200) || (yres == 240))) { 3641 if((xres == 320) && ((yres == 200) || (yres == 240))) {
4221 /* Terrible hack, but correct CRTC data for 3642 /* Terrible hack, but correct CRTC data for
@@ -4224,17 +3645,9 @@ SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata,
4224 * a negative D. The CRT controller does not 3645 * a negative D. The CRT controller does not
4225 * seem to like correcting HRE to 50) 3646 * seem to like correcting HRE to 50)
4226 */ 3647 */
4227#ifdef SIS_XORG_XF86
4228 current->HDisplay = 320;
4229 current->HSyncStart = 328;
4230 current->HSyncEnd = 376;
4231 current->HTotal = 400;
4232#endif
4233#ifdef SIS_LINUX_KERNEL
4234 var->left_margin = (400 - 376); 3648 var->left_margin = (400 - 376);
4235 var->right_margin = (328 - 320); 3649 var->right_margin = (328 - 320);
4236 var->hsync_len = (376 - 328); 3650 var->hsync_len = (376 - 328);
4237#endif
4238 3651
4239 } 3652 }
4240 3653
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index b96005c39c67..ee8ed3c203da 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -53,21 +53,8 @@
53#ifndef _INIT_H_ 53#ifndef _INIT_H_
54#define _INIT_H_ 54#define _INIT_H_
55 55
56#include "osdef.h"
57#include "initdef.h" 56#include "initdef.h"
58 57
59#ifdef SIS_XORG_XF86
60#include "sis.h"
61#define SIS_NEED_inSISREG
62#define SIS_NEED_inSISREGW
63#define SIS_NEED_inSISREGL
64#define SIS_NEED_outSISREG
65#define SIS_NEED_outSISREGW
66#define SIS_NEED_outSISREGL
67#include "sis_regs.h"
68#endif
69
70#ifdef SIS_LINUX_KERNEL
71#include "vgatypes.h" 58#include "vgatypes.h"
72#include "vstruct.h" 59#include "vstruct.h"
73#ifdef SIS_CP 60#ifdef SIS_CP
@@ -78,7 +65,6 @@
78#include <linux/fb.h> 65#include <linux/fb.h>
79#include "sis.h" 66#include "sis.h"
80#include <video/sisfb.h> 67#include <video/sisfb.h>
81#endif
82 68
83/* Mode numbers */ 69/* Mode numbers */
84static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f}; 70static const unsigned short ModeIndex_320x200[] = {0x59, 0x41, 0x00, 0x4f};
@@ -286,7 +272,7 @@ static const struct SiS_ModeResInfo_S SiS_ModeResInfo[] =
286 { 1280, 854, 8,16} /* 0x22 */ 272 { 1280, 854, 8,16} /* 0x22 */
287}; 273};
288 274
289#if defined(SIS300) || defined(SIS315H) 275#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
290static const struct SiS_StandTable_S SiS_StandTable[]= 276static const struct SiS_StandTable_S SiS_StandTable[]=
291{ 277{
292/* 0x00: MD_0_200 */ 278/* 0x00: MD_0_200 */
@@ -1521,10 +1507,6 @@ static const struct SiS_LVDSCRT1Data SiS_LVDSCRT1640x480_1_H[] =
1521}; 1507};
1522 1508
1523bool SiSInitPtr(struct SiS_Private *SiS_Pr); 1509bool SiSInitPtr(struct SiS_Private *SiS_Pr);
1524#ifdef SIS_XORG_XF86
1525unsigned short SiS_GetModeID(int VGAEngine, unsigned int VBFlags, int HDisplay, int VDisplay,
1526 int Depth, bool FSTN, int LCDwith, int LCDheight);
1527#endif
1528unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay, 1510unsigned short SiS_GetModeID_LCD(int VGAEngine, unsigned int VBFlags, int HDisplay,
1529 int VDisplay, int Depth, bool FSTN, 1511 int VDisplay, int Depth, bool FSTN,
1530 unsigned short CustomT, int LCDwith, int LCDheight, 1512 unsigned short CustomT, int LCDwith, int LCDheight,
@@ -1550,17 +1532,11 @@ void SiS_SetRegOR(SISIOADDRESS Port,unsigned short Index, unsigned short DataOR
1550void SiS_DisplayOn(struct SiS_Private *SiS_Pr); 1532void SiS_DisplayOn(struct SiS_Private *SiS_Pr);
1551void SiS_DisplayOff(struct SiS_Private *SiS_Pr); 1533void SiS_DisplayOff(struct SiS_Private *SiS_Pr);
1552void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr); 1534void SiSRegInit(struct SiS_Private *SiS_Pr, SISIOADDRESS BaseAddr);
1553#ifndef SIS_LINUX_KERNEL
1554void SiSSetLVDSetc(struct SiS_Private *SiS_Pr);
1555#endif
1556void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable); 1535void SiS_SetEnableDstn(struct SiS_Private *SiS_Pr, int enable);
1557void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable); 1536void SiS_SetEnableFstn(struct SiS_Private *SiS_Pr, int enable);
1558unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 1537unsigned short SiS_GetModeFlag(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
1559 unsigned short ModeIdIndex); 1538 unsigned short ModeIdIndex);
1560bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr); 1539bool SiSDetermineROMLayout661(struct SiS_Private *SiS_Pr);
1561#ifndef SIS_LINUX_KERNEL
1562void SiS_GetVBType(struct SiS_Private *SiS_Pr);
1563#endif
1564 1540
1565bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo, 1541bool SiS_SearchModeID(struct SiS_Private *SiS_Pr, unsigned short *ModeNo,
1566 unsigned short *ModeIdIndex); 1542 unsigned short *ModeIdIndex);
@@ -1572,37 +1548,19 @@ unsigned short SiS_GetColorDepth(struct SiS_Private *SiS_Pr, unsigned short Mode
1572 unsigned short ModeIdIndex); 1548 unsigned short ModeIdIndex);
1573unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo, 1549unsigned short SiS_GetOffset(struct SiS_Private *SiS_Pr,unsigned short ModeNo,
1574 unsigned short ModeIdIndex, unsigned short RRTI); 1550 unsigned short ModeIdIndex, unsigned short RRTI);
1575#ifdef SIS300 1551#ifdef CONFIG_FB_SIS_300
1576void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1, 1552void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *idx1,
1577 unsigned short *idx2); 1553 unsigned short *idx2);
1578unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2); 1554unsigned short SiS_GetFIFOThresholdB300(unsigned short idx1, unsigned short idx2);
1579unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); 1555unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index);
1580#endif 1556#endif
1581void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); 1557void SiS_LoadDAC(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex);
1582#ifdef SIS_XORG_XF86
1583bool SiSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn, unsigned short ModeNo,
1584 bool dosetpitch);
1585bool SiSBIOSSetMode(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn,
1586 DisplayModePtr mode, bool IsCustom);
1587bool SiSBIOSSetModeCRT2(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn,
1588 DisplayModePtr mode, bool IsCustom);
1589bool SiSBIOSSetModeCRT1(struct SiS_Private *SiS_Pr, ScrnInfoPtr pScrn,
1590 DisplayModePtr mode, bool IsCustom);
1591#endif
1592#ifdef SIS_LINUX_KERNEL
1593bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo); 1558bool SiSSetMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
1594#endif
1595void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); 1559void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth);
1596void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 1560void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
1597 unsigned short ModeIdIndex); 1561 unsigned short ModeIdIndex);
1598#ifdef SIS_XORG_XF86
1599void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres,
1600 int yres, DisplayModePtr current);
1601#endif
1602#ifdef SIS_LINUX_KERNEL
1603void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres, 1562void SiS_Generic_ConvertCRData(struct SiS_Private *SiS_Pr, unsigned char *crdata, int xres,
1604 int yres, struct fb_var_screeninfo *var, bool writeres); 1563 int yres, struct fb_var_screeninfo *var, bool writeres);
1605#endif
1606 1564
1607/* From init301.c: */ 1565/* From init301.c: */
1608extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, 1566extern void SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
@@ -1626,29 +1584,16 @@ extern unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short
1626extern bool SiS_IsVAMode(struct SiS_Private *); 1584extern bool SiS_IsVAMode(struct SiS_Private *);
1627extern bool SiS_IsDualEdge(struct SiS_Private *); 1585extern bool SiS_IsDualEdge(struct SiS_Private *);
1628 1586
1629#ifdef SIS_XORG_XF86 1587#ifdef CONFIG_FB_SIS_300
1630/* From other modules: */
1631extern unsigned short SiS_CheckBuildCustomMode(ScrnInfoPtr pScrn, DisplayModePtr mode,
1632 unsigned int VBFlags);
1633extern unsigned char SiS_GetSetBIOSScratch(ScrnInfoPtr pScrn, unsigned short offset,
1634 unsigned char value);
1635extern unsigned char SiS_GetSetModeID(ScrnInfoPtr pScrn, unsigned char id);
1636extern unsigned short SiS_GetModeNumber(ScrnInfoPtr pScrn, DisplayModePtr mode,
1637 unsigned int VBFlags);
1638#endif
1639
1640#ifdef SIS_LINUX_KERNEL
1641#ifdef SIS300
1642extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); 1588extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg);
1643extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg, 1589extern void sisfb_write_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg,
1644 unsigned int val); 1590 unsigned int val);
1645#endif 1591#endif
1646#ifdef SIS315H 1592#ifdef CONFIG_FB_SIS_315
1647extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg, 1593extern void sisfb_write_nbridge_pci_byte(struct SiS_Private *SiS_Pr, int reg,
1648 unsigned char val); 1594 unsigned char val);
1649extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg); 1595extern unsigned int sisfb_read_mio_pci_word(struct SiS_Private *SiS_Pr, int reg);
1650#endif 1596#endif
1651#endif
1652 1597
1653#endif 1598#endif
1654 1599
diff --git a/drivers/video/sis/init301.c b/drivers/video/sis/init301.c
index da33d801c22e..9fa66fd4052a 100644
--- a/drivers/video/sis/init301.c
+++ b/drivers/video/sis/init301.c
@@ -75,11 +75,11 @@
75 75
76#include "init301.h" 76#include "init301.h"
77 77
78#ifdef SIS300 78#ifdef CONFIG_FB_SIS_300
79#include "oem300.h" 79#include "oem300.h"
80#endif 80#endif
81 81
82#ifdef SIS315H 82#ifdef CONFIG_FB_SIS_315
83#include "oem310.h" 83#include "oem310.h"
84#endif 84#endif
85 85
@@ -87,9 +87,7 @@
87#define SiS_I2CDELAYSHORT 150 87#define SiS_I2CDELAYSHORT 150
88 88
89static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr); 89static unsigned short SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr);
90#ifdef SIS_LINUX_KERNEL
91static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); 90static void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val);
92#endif
93 91
94/*********************************************/ 92/*********************************************/
95/* HELPER: Lock/Unlock CRT2 */ 93/* HELPER: Lock/Unlock CRT2 */
@@ -106,9 +104,7 @@ SiS_UnLockCRT2(struct SiS_Private *SiS_Pr)
106 SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01); 104 SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x24,0x01);
107} 105}
108 106
109#ifdef SIS_LINUX_KERNEL
110static 107static
111#endif
112void 108void
113SiS_LockCRT2(struct SiS_Private *SiS_Pr) 109SiS_LockCRT2(struct SiS_Private *SiS_Pr)
114{ 110{
@@ -138,7 +134,7 @@ SiS_SetRegSR11ANDOR(struct SiS_Private *SiS_Pr, unsigned short DataAND, unsigned
138/* HELPER: Get Pointer to LCD structure */ 134/* HELPER: Get Pointer to LCD structure */
139/*********************************************/ 135/*********************************************/
140 136
141#ifdef SIS315H 137#ifdef CONFIG_FB_SIS_315
142static unsigned char * 138static unsigned char *
143GetLCDStructPtr661(struct SiS_Private *SiS_Pr) 139GetLCDStructPtr661(struct SiS_Private *SiS_Pr)
144{ 140{
@@ -404,7 +400,7 @@ SiS_SaveCRT2Info(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
404/* HELPER: GET SOME DATA FROM BIOS ROM */ 400/* HELPER: GET SOME DATA FROM BIOS ROM */
405/*********************************************/ 401/*********************************************/
406 402
407#ifdef SIS300 403#ifdef CONFIG_FB_SIS_300
408static bool 404static bool
409SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr) 405SiS_CR36BIOSWord23b(struct SiS_Private *SiS_Pr)
410{ 406{
@@ -449,7 +445,7 @@ SiS_DDC2Delay(struct SiS_Private *SiS_Pr, unsigned int delaytime)
449 SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05); 445 SiS_GetReg(SiS_Pr->SiS_P3c4, 0x05);
450} 446}
451 447
452#if defined(SIS300) || defined(SIS315H) 448#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
453static void 449static void
454SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay) 450SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay)
455{ 451{
@@ -457,7 +453,7 @@ SiS_GenericDelay(struct SiS_Private *SiS_Pr, unsigned short delay)
457} 453}
458#endif 454#endif
459 455
460#ifdef SIS315H 456#ifdef CONFIG_FB_SIS_315
461static void 457static void
462SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay) 458SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay)
463{ 459{
@@ -467,7 +463,7 @@ SiS_LongDelay(struct SiS_Private *SiS_Pr, unsigned short delay)
467} 463}
468#endif 464#endif
469 465
470#if defined(SIS300) || defined(SIS315H) 466#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
471static void 467static void
472SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay) 468SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay)
473{ 469{
@@ -480,14 +476,14 @@ SiS_ShortDelay(struct SiS_Private *SiS_Pr, unsigned short delay)
480static void 476static void
481SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime) 477SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime)
482{ 478{
483#if defined(SIS300) || defined(SIS315H) 479#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
484 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 480 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
485 unsigned short PanelID, DelayIndex, Delay=0; 481 unsigned short PanelID, DelayIndex, Delay=0;
486#endif 482#endif
487 483
488 if(SiS_Pr->ChipType < SIS_315H) { 484 if(SiS_Pr->ChipType < SIS_315H) {
489 485
490#ifdef SIS300 486#ifdef CONFIG_FB_SIS_300
491 487
492 PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36); 488 PanelID = SiS_GetReg(SiS_Pr->SiS_P3d4,0x36);
493 if(SiS_Pr->SiS_VBType & VB_SISVB) { 489 if(SiS_Pr->SiS_VBType & VB_SISVB) {
@@ -513,11 +509,11 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime)
513 } 509 }
514 SiS_ShortDelay(SiS_Pr, Delay); 510 SiS_ShortDelay(SiS_Pr, Delay);
515 511
516#endif /* SIS300 */ 512#endif /* CONFIG_FB_SIS_300 */
517 513
518 } else { 514 } else {
519 515
520#ifdef SIS315H 516#ifdef CONFIG_FB_SIS_315
521 517
522 if((SiS_Pr->ChipType >= SIS_661) || 518 if((SiS_Pr->ChipType >= SIS_661) ||
523 (SiS_Pr->ChipType <= SIS_315PRO) || 519 (SiS_Pr->ChipType <= SIS_315PRO) ||
@@ -579,12 +575,12 @@ SiS_PanelDelay(struct SiS_Private *SiS_Pr, unsigned short DelayTime)
579 575
580 } 576 }
581 577
582#endif /* SIS315H */ 578#endif /* CONFIG_FB_SIS_315 */
583 579
584 } 580 }
585} 581}
586 582
587#ifdef SIS315H 583#ifdef CONFIG_FB_SIS_315
588static void 584static void
589SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop) 585SiS_PanelDelayLoop(struct SiS_Private *SiS_Pr, unsigned short DelayTime, unsigned short DelayLoop)
590{ 586{
@@ -613,7 +609,7 @@ SiS_WaitRetrace1(struct SiS_Private *SiS_Pr)
613 while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog); 609 while((!(SiS_GetRegByte(SiS_Pr->SiS_P3da) & 0x08)) && --watchdog);
614} 610}
615 611
616#if defined(SIS300) || defined(SIS315H) 612#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
617static void 613static void
618SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg) 614SiS_WaitRetrace2(struct SiS_Private *SiS_Pr, unsigned short reg)
619{ 615{
@@ -630,7 +626,7 @@ static void
630SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr) 626SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr)
631{ 627{
632 if(SiS_Pr->ChipType < SIS_315H) { 628 if(SiS_Pr->ChipType < SIS_315H) {
633#ifdef SIS300 629#ifdef CONFIG_FB_SIS_300
634 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { 630 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) {
635 if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return; 631 if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x20)) return;
636 } 632 }
@@ -641,7 +637,7 @@ SiS_WaitVBRetrace(struct SiS_Private *SiS_Pr)
641 } 637 }
642#endif 638#endif
643 } else { 639 } else {
644#ifdef SIS315H 640#ifdef CONFIG_FB_SIS_315
645 if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) { 641 if(!(SiS_GetReg(SiS_Pr->SiS_Part1Port,0x00) & 0x40)) {
646 SiS_WaitRetrace1(SiS_Pr); 642 SiS_WaitRetrace1(SiS_Pr);
647 } else { 643 } else {
@@ -686,7 +682,7 @@ SiS_VBLongWait(struct SiS_Private *SiS_Pr)
686/* HELPER: MISC */ 682/* HELPER: MISC */
687/*********************************************/ 683/*********************************************/
688 684
689#ifdef SIS300 685#ifdef CONFIG_FB_SIS_300
690static bool 686static bool
691SiS_Is301B(struct SiS_Private *SiS_Pr) 687SiS_Is301B(struct SiS_Private *SiS_Pr)
692{ 688{
@@ -708,7 +704,7 @@ SiS_CRT2IsLCD(struct SiS_Private *SiS_Pr)
708bool 704bool
709SiS_IsDualEdge(struct SiS_Private *SiS_Pr) 705SiS_IsDualEdge(struct SiS_Private *SiS_Pr)
710{ 706{
711#ifdef SIS315H 707#ifdef CONFIG_FB_SIS_315
712 if(SiS_Pr->ChipType >= SIS_315H) { 708 if(SiS_Pr->ChipType >= SIS_315H) {
713 if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) { 709 if((SiS_Pr->ChipType != SIS_650) || (SiS_GetReg(SiS_Pr->SiS_P3d4,0x5f) & 0xf0)) {
714 if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true; 710 if(SiS_GetReg(SiS_Pr->SiS_P3d4,0x38) & EnableDualEdge) return true;
@@ -721,7 +717,7 @@ SiS_IsDualEdge(struct SiS_Private *SiS_Pr)
721bool 717bool
722SiS_IsVAMode(struct SiS_Private *SiS_Pr) 718SiS_IsVAMode(struct SiS_Private *SiS_Pr)
723{ 719{
724#ifdef SIS315H 720#ifdef CONFIG_FB_SIS_315
725 unsigned short flag; 721 unsigned short flag;
726 722
727 if(SiS_Pr->ChipType >= SIS_315H) { 723 if(SiS_Pr->ChipType >= SIS_315H) {
@@ -732,7 +728,7 @@ SiS_IsVAMode(struct SiS_Private *SiS_Pr)
732 return false; 728 return false;
733} 729}
734 730
735#ifdef SIS315H 731#ifdef CONFIG_FB_SIS_315
736static bool 732static bool
737SiS_IsVAorLCD(struct SiS_Private *SiS_Pr) 733SiS_IsVAorLCD(struct SiS_Private *SiS_Pr)
738{ 734{
@@ -745,7 +741,7 @@ SiS_IsVAorLCD(struct SiS_Private *SiS_Pr)
745static bool 741static bool
746SiS_IsDualLink(struct SiS_Private *SiS_Pr) 742SiS_IsDualLink(struct SiS_Private *SiS_Pr)
747{ 743{
748#ifdef SIS315H 744#ifdef CONFIG_FB_SIS_315
749 if(SiS_Pr->ChipType >= SIS_315H) { 745 if(SiS_Pr->ChipType >= SIS_315H) {
750 if((SiS_CRT2IsLCD(SiS_Pr)) || 746 if((SiS_CRT2IsLCD(SiS_Pr)) ||
751 (SiS_IsVAMode(SiS_Pr))) { 747 (SiS_IsVAMode(SiS_Pr))) {
@@ -756,7 +752,7 @@ SiS_IsDualLink(struct SiS_Private *SiS_Pr)
756 return false; 752 return false;
757} 753}
758 754
759#ifdef SIS315H 755#ifdef CONFIG_FB_SIS_315
760static bool 756static bool
761SiS_TVEnabled(struct SiS_Private *SiS_Pr) 757SiS_TVEnabled(struct SiS_Private *SiS_Pr)
762{ 758{
@@ -768,7 +764,7 @@ SiS_TVEnabled(struct SiS_Private *SiS_Pr)
768} 764}
769#endif 765#endif
770 766
771#ifdef SIS315H 767#ifdef CONFIG_FB_SIS_315
772static bool 768static bool
773SiS_LCDAEnabled(struct SiS_Private *SiS_Pr) 769SiS_LCDAEnabled(struct SiS_Private *SiS_Pr)
774{ 770{
@@ -777,7 +773,7 @@ SiS_LCDAEnabled(struct SiS_Private *SiS_Pr)
777} 773}
778#endif 774#endif
779 775
780#ifdef SIS315H 776#ifdef CONFIG_FB_SIS_315
781static bool 777static bool
782SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr) 778SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr)
783{ 779{
@@ -788,7 +784,7 @@ SiS_WeHaveBacklightCtrl(struct SiS_Private *SiS_Pr)
788} 784}
789#endif 785#endif
790 786
791#ifdef SIS315H 787#ifdef CONFIG_FB_SIS_315
792static bool 788static bool
793SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr) 789SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr)
794{ 790{
@@ -804,7 +800,7 @@ SiS_IsNotM650orLater(struct SiS_Private *SiS_Pr)
804} 800}
805#endif 801#endif
806 802
807#ifdef SIS315H 803#ifdef CONFIG_FB_SIS_315
808static bool 804static bool
809SiS_IsYPbPr(struct SiS_Private *SiS_Pr) 805SiS_IsYPbPr(struct SiS_Private *SiS_Pr)
810{ 806{
@@ -816,7 +812,7 @@ SiS_IsYPbPr(struct SiS_Private *SiS_Pr)
816} 812}
817#endif 813#endif
818 814
819#ifdef SIS315H 815#ifdef CONFIG_FB_SIS_315
820static bool 816static bool
821SiS_IsChScart(struct SiS_Private *SiS_Pr) 817SiS_IsChScart(struct SiS_Private *SiS_Pr)
822{ 818{
@@ -828,7 +824,7 @@ SiS_IsChScart(struct SiS_Private *SiS_Pr)
828} 824}
829#endif 825#endif
830 826
831#ifdef SIS315H 827#ifdef CONFIG_FB_SIS_315
832static bool 828static bool
833SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr) 829SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr)
834{ 830{
@@ -848,7 +844,7 @@ SiS_IsTVOrYPbPrOrScart(struct SiS_Private *SiS_Pr)
848} 844}
849#endif 845#endif
850 846
851#ifdef SIS315H 847#ifdef CONFIG_FB_SIS_315
852static bool 848static bool
853SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr) 849SiS_IsLCDOrLCDA(struct SiS_Private *SiS_Pr)
854{ 850{
@@ -914,7 +910,7 @@ SiS_BridgeInSlavemode(struct SiS_Private *SiS_Pr)
914/*********************************************/ 910/*********************************************/
915 911
916/* Setup general purpose IO for Chrontel communication */ 912/* Setup general purpose IO for Chrontel communication */
917#ifdef SIS300 913#ifdef CONFIG_FB_SIS_300
918void 914void
919SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo) 915SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo)
920{ 916{
@@ -923,11 +919,7 @@ SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo)
923 919
924 if(!(SiS_Pr->SiS_ChSW)) return; 920 if(!(SiS_Pr->SiS_ChSW)) return;
925 921
926#ifdef SIS_LINUX_KERNEL
927 acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74); 922 acpibase = sisfb_read_lpc_pci_dword(SiS_Pr, 0x74);
928#else
929 acpibase = pciReadLong(0x00000800, 0x74);
930#endif
931 acpibase &= 0xFFFF; 923 acpibase &= 0xFFFF;
932 if(!acpibase) return; 924 if(!acpibase) return;
933 temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */ 925 temp = SiS_GetRegShort((acpibase + 0x3c)); /* ACPI register 0x3c: GP Event 1 I/O mode select */
@@ -969,7 +961,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
969 tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV); 961 tempax &= (DriverMode | LoadDACFlag | SetNotSimuMode | SetPALTV);
970 tempbx |= tempax; 962 tempbx |= tempax;
971 963
972#ifdef SIS315H 964#ifdef CONFIG_FB_SIS_315
973 if(SiS_Pr->ChipType >= SIS_315H) { 965 if(SiS_Pr->ChipType >= SIS_315H) {
974 if(SiS_Pr->SiS_VBType & VB_SISLCDA) { 966 if(SiS_Pr->SiS_VBType & VB_SISLCDA) {
975 if(ModeNo == 0x03) { 967 if(ModeNo == 0x03) {
@@ -1019,7 +1011,7 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
1019 } 1011 }
1020 } 1012 }
1021 1013
1022#endif /* SIS315H */ 1014#endif /* CONFIG_FB_SIS_315 */
1023 1015
1024 if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) { 1016 if(!(SiS_Pr->SiS_VBType & VB_SISVGA2)) {
1025 tempbx &= ~(SetCRT2ToRAMDAC); 1017 tempbx &= ~(SetCRT2ToRAMDAC);
@@ -1154,24 +1146,16 @@ SiS_GetVBInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo,
1154 1146
1155 SiS_Pr->SiS_VBInfo = tempbx; 1147 SiS_Pr->SiS_VBInfo = tempbx;
1156 1148
1157#ifdef SIS300 1149#ifdef CONFIG_FB_SIS_300
1158 if(SiS_Pr->ChipType == SIS_630) { 1150 if(SiS_Pr->ChipType == SIS_630) {
1159 SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo); 1151 SiS_SetChrontelGPIO(SiS_Pr, SiS_Pr->SiS_VBInfo);
1160 } 1152 }
1161#endif 1153#endif
1162 1154
1163#ifdef SIS_LINUX_KERNEL
1164#if 0 1155#if 0
1165 printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n", 1156 printk(KERN_DEBUG "sisfb: (init301: VBInfo= 0x%04x, SetFlag=0x%04x)\n",
1166 SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag); 1157 SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag);
1167#endif 1158#endif
1168#endif
1169#ifdef SIS_XORG_XF86
1170#ifdef TWDEBUG
1171 xf86DrvMsg(0, X_PROBED, "(init301: VBInfo=0x%04x, SetFlag=0x%04x)\n",
1172 SiS_Pr->SiS_VBInfo, SiS_Pr->SiS_SetFlag);
1173#endif
1174#endif
1175} 1159}
1176 1160
1177/*********************************************/ 1161/*********************************************/
@@ -1415,12 +1399,6 @@ SiS_SetTVMode(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
1415 } 1399 }
1416 1400
1417 SiS_Pr->SiS_VBInfo &= ~SetPALTV; 1401 SiS_Pr->SiS_VBInfo &= ~SetPALTV;
1418
1419#ifdef SIS_XORG_XF86
1420#ifdef TWDEBUG
1421 xf86DrvMsg(0, X_INFO, "(init301: TVMode %x, VBInfo %x)\n", SiS_Pr->SiS_TVMode, SiS_Pr->SiS_VBInfo);
1422#endif
1423#endif
1424} 1402}
1425 1403
1426/*********************************************/ 1404/*********************************************/
@@ -1443,22 +1421,10 @@ SiS_GetBIOSLCDResInfo(struct SiS_Private *SiS_Pr)
1443static void 1421static void
1444SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr) 1422SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr)
1445{ 1423{
1446#ifdef SIS315H 1424#ifdef CONFIG_FB_SIS_315
1447 unsigned char *ROMAddr; 1425 unsigned char *ROMAddr;
1448 unsigned short temp; 1426 unsigned short temp;
1449 1427
1450#ifdef SIS_XORG_XF86
1451#ifdef TWDEBUG
1452 xf86DrvMsg(0, X_INFO, "Paneldata driver: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n",
1453 SiS_Pr->PanelHT, SiS_Pr->PanelVT,
1454 SiS_Pr->PanelHRS, SiS_Pr->PanelHRE,
1455 SiS_Pr->PanelVRS, SiS_Pr->PanelVRE,
1456 SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK,
1457 SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A,
1458 SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B);
1459#endif
1460#endif
1461
1462 if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) { 1428 if((ROMAddr = GetLCDStructPtr661(SiS_Pr))) {
1463 if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) { 1429 if((temp = SISGETROMW(6)) != SiS_Pr->PanelHT) {
1464 SiS_Pr->SiS_NeedRomModeData = true; 1430 SiS_Pr->SiS_NeedRomModeData = true;
@@ -1480,18 +1446,6 @@ SiS_GetLCDInfoBIOS(struct SiS_Private *SiS_Pr)
1480 SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C = 1446 SiS_Pr->SiS_VCLKData[VCLK_CUSTOM_315].SR2C =
1481 SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20]; 1447 SiS_Pr->SiS_VBVCLKData[VCLK_CUSTOM_315].Part4_B = ROMAddr[20];
1482 1448
1483#ifdef SIS_XORG_XF86
1484#ifdef TWDEBUG
1485 xf86DrvMsg(0, X_INFO, "Paneldata BIOS: [%d %d] [H %d %d] [V %d %d] [C %d 0x%02x 0x%02x]\n",
1486 SiS_Pr->PanelHT, SiS_Pr->PanelVT,
1487 SiS_Pr->PanelHRS, SiS_Pr->PanelHRE,
1488 SiS_Pr->PanelVRS, SiS_Pr->PanelVRE,
1489 SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].CLOCK,
1490 SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_A,
1491 SiS_Pr->SiS_VBVCLKData[SiS_Pr->PanelVCLKIdx315].Part4_B);
1492#endif
1493#endif
1494
1495 } 1449 }
1496#endif 1450#endif
1497} 1451}
@@ -1517,13 +1471,13 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh
1517{ 1471{
1518 unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0; 1472 unsigned short temp,modeflag,resinfo=0,modexres=0,modeyres=0;
1519 bool panelcanscale = false; 1473 bool panelcanscale = false;
1520#ifdef SIS300 1474#ifdef CONFIG_FB_SIS_300
1521 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 1475 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
1522 static const unsigned char SiS300SeriesLCDRes[] = 1476 static const unsigned char SiS300SeriesLCDRes[] =
1523 { 0, 1, 2, 3, 7, 4, 5, 8, 1477 { 0, 1, 2, 3, 7, 4, 5, 8,
1524 0, 0, 10, 0, 0, 0, 0, 15 }; 1478 0, 0, 10, 0, 0, 0, 0, 15 };
1525#endif 1479#endif
1526#ifdef SIS315H 1480#ifdef CONFIG_FB_SIS_315
1527 unsigned char *myptr = NULL; 1481 unsigned char *myptr = NULL;
1528#endif 1482#endif
1529 1483
@@ -1562,7 +1516,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh
1562 SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1; 1516 SiS_Pr->SiS_LCDTypeInfo = (temp & 0x0F) - 1;
1563 } 1517 }
1564 temp &= 0x0f; 1518 temp &= 0x0f;
1565#ifdef SIS300 1519#ifdef CONFIG_FB_SIS_300
1566 if(SiS_Pr->ChipType < SIS_315H) { 1520 if(SiS_Pr->ChipType < SIS_315H) {
1567 /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */ 1521 /* Very old BIOSes only know 7 sizes (NetVista 2179, 1.01g) */
1568 if(SiS_Pr->SiS_VBType & VB_SIS301) { 1522 if(SiS_Pr->SiS_VBType & VB_SIS301) {
@@ -1574,7 +1528,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh
1574#endif 1528#endif
1575 1529
1576 /* Translate to our internal types */ 1530 /* Translate to our internal types */
1577#ifdef SIS315H 1531#ifdef CONFIG_FB_SIS_315
1578 if(SiS_Pr->ChipType == SIS_550) { 1532 if(SiS_Pr->ChipType == SIS_550) {
1579 if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */ 1533 if (temp == Panel310_1152x768) temp = Panel_320x240_2; /* Verified working */
1580 else if(temp == Panel310_320x240_2) temp = Panel_320x240_2; 1534 else if(temp == Panel310_320x240_2) temp = Panel_320x240_2;
@@ -1597,7 +1551,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh
1597 1551
1598 SiS_Pr->SiS_LCDResInfo = temp; 1552 SiS_Pr->SiS_LCDResInfo = temp;
1599 1553
1600#ifdef SIS300 1554#ifdef CONFIG_FB_SIS_300
1601 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { 1555 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) {
1602 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { 1556 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) {
1603 SiS_Pr->SiS_LCDResInfo = Panel_Barco1366; 1557 SiS_Pr->SiS_LCDResInfo = Panel_Barco1366;
@@ -1639,7 +1593,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh
1639 else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD; 1593 else if(SiS_Pr->UsePanelScaler == 1) SiS_Pr->SiS_LCDInfo |= DontExpandLCD;
1640 1594
1641 /* Dual link, Pass 1:1 BIOS default, etc. */ 1595 /* Dual link, Pass 1:1 BIOS default, etc. */
1642#ifdef SIS315H 1596#ifdef CONFIG_FB_SIS_315
1643 if(SiS_Pr->ChipType >= SIS_661) { 1597 if(SiS_Pr->ChipType >= SIS_661) {
1644 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { 1598 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) {
1645 if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11; 1599 if(temp & 0x08) SiS_Pr->SiS_LCDInfo |= LCDPass11;
@@ -2076,7 +2030,7 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh
2076 } 2030 }
2077 } 2031 }
2078 2032
2079#ifdef SIS300 2033#ifdef CONFIG_FB_SIS_300
2080 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { 2034 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) {
2081 if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) { 2035 if(SiS_Pr->SiS_CustomT == CUT_PANEL848 || SiS_Pr->SiS_CustomT == CUT_PANEL856) {
2082 SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */ 2036 SiS_Pr->SiS_LCDInfo = 0x80 | 0x40 | 0x20; /* neg h/v sync, RGB24(D0 = 0) */
@@ -2186,17 +2140,10 @@ SiS_GetLCDResInfo(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned sh
2186 SiS_Pr->SiS_SetFlag |= LCDVESATiming; 2140 SiS_Pr->SiS_SetFlag |= LCDVESATiming;
2187 } 2141 }
2188 2142
2189#ifdef SIS_LINUX_KERNEL
2190#if 0 2143#if 0
2191 printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n", 2144 printk(KERN_DEBUG "sisfb: (LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x)\n",
2192 SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo); 2145 SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo);
2193#endif 2146#endif
2194#endif
2195#ifdef SIS_XORG_XF86
2196 xf86DrvMsgVerb(0, X_PROBED, 4,
2197 "(init301: LCDInfo=0x%04x LCDResInfo=0x%02x LCDTypeInfo=0x%02x SetFlag=0x%04x)\n",
2198 SiS_Pr->SiS_LCDInfo, SiS_Pr->SiS_LCDResInfo, SiS_Pr->SiS_LCDTypeInfo, SiS_Pr->SiS_SetFlag);
2199#endif
2200} 2147}
2201 2148
2202/*********************************************/ 2149/*********************************************/
@@ -2359,7 +2306,7 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
2359 VCLKIndex = SiS_Pr->PanelVCLKIdx315; 2306 VCLKIndex = SiS_Pr->PanelVCLKIdx315;
2360 } 2307 }
2361 2308
2362#ifdef SIS300 2309#ifdef CONFIG_FB_SIS_300
2363 /* Special Timing: Barco iQ Pro R series */ 2310 /* Special Timing: Barco iQ Pro R series */
2364 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44; 2311 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) VCLKIndex = 0x44;
2365 2312
@@ -2410,12 +2357,6 @@ SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
2410 2357
2411 } 2358 }
2412 2359
2413#ifdef SIS_XORG_XF86
2414#ifdef TWDEBUG
2415 xf86DrvMsg(0, X_INFO, "VCLKIndex %d (0x%x)\n", VCLKIndex, VCLKIndex);
2416#endif
2417#endif
2418
2419 return VCLKIndex; 2360 return VCLKIndex;
2420} 2361}
2421 2362
@@ -2428,10 +2369,10 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2428{ 2369{
2429 unsigned short i, j, modeflag, tempah=0; 2370 unsigned short i, j, modeflag, tempah=0;
2430 short tempcl; 2371 short tempcl;
2431#if defined(SIS300) || defined(SIS315H) 2372#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
2432 unsigned short tempbl; 2373 unsigned short tempbl;
2433#endif 2374#endif
2434#ifdef SIS315H 2375#ifdef CONFIG_FB_SIS_315
2435 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 2376 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
2436 unsigned short tempah2, tempbl2; 2377 unsigned short tempah2, tempbl2;
2437#endif 2378#endif
@@ -2454,7 +2395,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2454 2395
2455 if(SiS_Pr->ChipType < SIS_315H) { 2396 if(SiS_Pr->ChipType < SIS_315H) {
2456 2397
2457#ifdef SIS300 /* ---- 300 series ---- */ 2398#ifdef CONFIG_FB_SIS_300 /* ---- 300 series ---- */
2458 2399
2459 /* For 301BDH: (with LCD via LVDS) */ 2400 /* For 301BDH: (with LCD via LVDS) */
2460 if(SiS_Pr->SiS_VBType & VB_NoLCD) { 2401 if(SiS_Pr->SiS_VBType & VB_NoLCD) {
@@ -2477,11 +2418,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2477 2418
2478 if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0; 2419 if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0xA0;
2479 2420
2480#endif /* SIS300 */ 2421#endif /* CONFIG_FB_SIS_300 */
2481 2422
2482 } else { 2423 } else {
2483 2424
2484#ifdef SIS315H /* ------- 315/330 series ------ */ 2425#ifdef CONFIG_FB_SIS_315 /* ------- 315/330 series ------ */
2485 2426
2486 if(ModeNo > 0x13) { 2427 if(ModeNo > 0x13) {
2487 tempcl -= ModeVGA; 2428 tempcl -= ModeVGA;
@@ -2494,7 +2435,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2494 2435
2495 if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50; 2436 if(SiS_Pr->SiS_VBInfo & SetInSlaveMode) tempah ^= 0x50;
2496 2437
2497#endif /* SIS315H */ 2438#endif /* CONFIG_FB_SIS_315 */
2498 2439
2499 } 2440 }
2500 2441
@@ -2503,7 +2444,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2503 if(SiS_Pr->ChipType < SIS_315H) { 2444 if(SiS_Pr->ChipType < SIS_315H) {
2504 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah); 2445 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,tempah);
2505 } else { 2446 } else {
2506#ifdef SIS315H 2447#ifdef CONFIG_FB_SIS_315
2507 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) { 2448 if(SiS_Pr->SiS_IF_DEF_LVDS == 1) {
2508 SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah); 2449 SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x00,0xa0,tempah);
2509 } else if(SiS_Pr->SiS_VBType & VB_SISVB) { 2450 } else if(SiS_Pr->SiS_VBType & VB_SISVB) {
@@ -2584,7 +2525,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2584 2525
2585 if(SiS_Pr->ChipType >= SIS_315H) { 2526 if(SiS_Pr->ChipType >= SIS_315H) {
2586 2527
2587#ifdef SIS315H 2528#ifdef CONFIG_FB_SIS_315
2588 /* LVDS can only be slave in 8bpp modes */ 2529 /* LVDS can only be slave in 8bpp modes */
2589 tempah = 0x80; 2530 tempah = 0x80;
2590 if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) { 2531 if((modeflag & CRT2Mode) && (SiS_Pr->SiS_ModeType > ModeVGA)) {
@@ -2604,7 +2545,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2604 2545
2605 } else { 2546 } else {
2606 2547
2607#ifdef SIS300 2548#ifdef CONFIG_FB_SIS_300
2608 tempah = 0; 2549 tempah = 0;
2609 if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) { 2550 if( (!(SiS_Pr->SiS_VBInfo & SetInSlaveMode)) && (SiS_Pr->SiS_ModeType > ModeVGA) ) {
2610 tempah |= 0x02; 2551 tempah |= 0x02;
@@ -2626,7 +2567,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2626 2567
2627 if(SiS_Pr->ChipType >= SIS_315H) { 2568 if(SiS_Pr->ChipType >= SIS_315H) {
2628 2569
2629#ifdef SIS315H 2570#ifdef CONFIG_FB_SIS_315
2630 /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */ 2571 /* unsigned char bridgerev = SiS_GetReg(SiS_Pr->SiS_Part4Port,0x01); */
2631 2572
2632 /* The following is nearly unpreditable and varies from machine 2573 /* The following is nearly unpreditable and varies from machine
@@ -2718,11 +2659,11 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2718 SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah); 2659 SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x23,tempbl,tempah);
2719 } 2660 }
2720 2661
2721#endif /* SIS315H */ 2662#endif /* CONFIG_FB_SIS_315 */
2722 2663
2723 } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { 2664 } else if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) {
2724 2665
2725#ifdef SIS300 2666#ifdef CONFIG_FB_SIS_300
2726 SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f); 2667 SiS_SetRegAND(SiS_Pr->SiS_Part4Port,0x21,0x3f);
2727 2668
2728 if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) || 2669 if((SiS_Pr->SiS_VBInfo & DisableCRT2Display) ||
@@ -2745,7 +2686,7 @@ SiS_SetCRT2ModeRegs(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
2745 2686
2746 } else { /* LVDS */ 2687 } else { /* LVDS */
2747 2688
2748#ifdef SIS315H 2689#ifdef CONFIG_FB_SIS_315
2749 if(SiS_Pr->ChipType >= SIS_315H) { 2690 if(SiS_Pr->ChipType >= SIS_315H) {
2750 2691
2751 if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { 2692 if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) {
@@ -2931,7 +2872,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
2931 } 2872 }
2932 } 2873 }
2933 2874
2934#ifdef SIS315H 2875#ifdef CONFIG_FB_SIS_315
2935 if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) { 2876 if(SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) {
2936 if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) { 2877 if(SiS_Pr->SiS_LCDResInfo == Panel_1280x1024) {
2937 if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) { 2878 if(!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) {
@@ -3036,7 +2977,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
3036 case Panel_1280x1024: tempbx = 24; break; 2977 case Panel_1280x1024: tempbx = 24; break;
3037 case Panel_1400x1050: tempbx = 26; break; 2978 case Panel_1400x1050: tempbx = 26; break;
3038 case Panel_1600x1200: tempbx = 28; break; 2979 case Panel_1600x1200: tempbx = 28; break;
3039#ifdef SIS300 2980#ifdef CONFIG_FB_SIS_300
3040 case Panel_Barco1366: tempbx = 80; break; 2981 case Panel_Barco1366: tempbx = 80; break;
3041#endif 2982#endif
3042 } 2983 }
@@ -3053,7 +2994,7 @@ SiS_GetCRT2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
3053 2994
3054 if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30; 2995 if(SiS_Pr->SiS_LCDInfo & LCDPass11) tempbx = 30;
3055 2996
3056#ifdef SIS300 2997#ifdef CONFIG_FB_SIS_300
3057 if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) { 2998 if(SiS_Pr->SiS_CustomT == CUT_BARCO1024) {
3058 tempbx = 82; 2999 tempbx = 82;
3059 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++; 3000 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) tempbx++;
@@ -3189,7 +3130,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
3189 3130
3190 if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { 3131 if((SiS_Pr->SiS_VBType & VB_SISVB) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) {
3191 3132
3192#ifdef SIS315H 3133#ifdef CONFIG_FB_SIS_315
3193 SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); 3134 SiS_CalcPanelLinkTiming(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex);
3194 SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex); 3135 SiS_CalcLCDACRT1Timing(SiS_Pr, ModeNo, ModeIdIndex);
3195#endif 3136#endif
@@ -3214,7 +3155,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
3214 case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break; 3155 case 16: LVDSData = SiS_Pr->SiS_LVDS800x600Data_1; break;
3215 case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break; 3156 case 18: LVDSData = SiS_Pr->SiS_LVDS1024x600Data_1; break;
3216 case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break; 3157 case 20: LVDSData = SiS_Pr->SiS_LVDS1024x768Data_1; break;
3217#ifdef SIS300 3158#ifdef CONFIG_FB_SIS_300
3218 case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break; 3159 case 80: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_1; break;
3219 case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break; 3160 case 81: LVDSData = SiS_Pr->SiS_LVDSBARCO1366Data_2; break;
3220 case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break; 3161 case 82: LVDSData = SiS_Pr->SiS_LVDSBARCO1024Data_1; break;
@@ -3248,7 +3189,7 @@ SiS_GetCRT2DataLVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned
3248 (SiS_Pr->SiS_SetFlag & SetDOSMode) ) { 3189 (SiS_Pr->SiS_SetFlag & SetDOSMode) ) {
3249 SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes; 3190 SiS_Pr->SiS_HDE = SiS_Pr->PanelXRes;
3250 SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes; 3191 SiS_Pr->SiS_VDE = SiS_Pr->PanelYRes;
3251#ifdef SIS300 3192#ifdef CONFIG_FB_SIS_300
3252 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) { 3193 if(SiS_Pr->SiS_CustomT == CUT_BARCO1366) {
3253 if(ResIndex < 0x08) { 3194 if(ResIndex < 0x08) {
3254 SiS_Pr->SiS_HDE = 1280; 3195 SiS_Pr->SiS_HDE = 1280;
@@ -3270,7 +3211,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3270 unsigned short resinfo, CRT2Index, ResIndex; 3211 unsigned short resinfo, CRT2Index, ResIndex;
3271 const struct SiS_LCDData *LCDPtr = NULL; 3212 const struct SiS_LCDData *LCDPtr = NULL;
3272 const struct SiS_TVData *TVPtr = NULL; 3213 const struct SiS_TVData *TVPtr = NULL;
3273#ifdef SIS315H 3214#ifdef CONFIG_FB_SIS_315
3274 short resinfo661; 3215 short resinfo661;
3275#endif 3216#endif
3276 3217
@@ -3283,7 +3224,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3283 } else { 3224 } else {
3284 modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; 3225 modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
3285 resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; 3226 resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
3286#ifdef SIS315H 3227#ifdef CONFIG_FB_SIS_315
3287 resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661; 3228 resinfo661 = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].ROMMODEIDX661;
3288 if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) && 3229 if( (SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) &&
3289 (SiS_Pr->SiS_SetFlag & LCDVESATiming) && 3230 (SiS_Pr->SiS_SetFlag & LCDVESATiming) &&
@@ -3460,7 +3401,7 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3460 3401
3461 } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) { 3402 } else if( (!(SiS_Pr->SiS_LCDInfo & DontExpandLCD)) && (romptr) && (ROMAddr) ) {
3462 3403
3463#ifdef SIS315H 3404#ifdef CONFIG_FB_SIS_315
3464 SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr]; 3405 SiS_Pr->SiS_RVBHCMAX = ROMAddr[romptr];
3465 SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1]; 3406 SiS_Pr->SiS_RVBHCFACT = ROMAddr[romptr+1];
3466 SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8); 3407 SiS_Pr->SiS_VGAHT = ROMAddr[romptr+2] | ((ROMAddr[romptr+3] & 0x0f) << 8);
@@ -3520,19 +3461,13 @@ SiS_GetCRT2Data301(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3520 case Panel_1680x1050 : 3461 case Panel_1680x1050 :
3521 case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break; 3462 case Panel_1680x1050 + 32: LCDPtr = SiS_Pr->SiS_LCD1680x1050Data; break;
3522 case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break; 3463 case 100 : LCDPtr = SiS_Pr->SiS_NoScaleData; break;
3523#ifdef SIS315H 3464#ifdef CONFIG_FB_SIS_315
3524 case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break; 3465 case 200 : LCDPtr = SiS310_ExtCompaq1280x1024Data; break;
3525 case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break; 3466 case 201 : LCDPtr = SiS_Pr->SiS_St2LCD1280x1024Data; break;
3526#endif 3467#endif
3527 default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break; 3468 default : LCDPtr = SiS_Pr->SiS_ExtLCD1024x768Data; break;
3528 } 3469 }
3529 3470
3530#ifdef SIS_XORG_XF86
3531#ifdef TWDEBUG
3532 xf86DrvMsg(0, X_INFO, "GetCRT2Data: Index %d ResIndex %d\n", CRT2Index, ResIndex);
3533#endif
3534#endif
3535
3536 SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX; 3471 SiS_Pr->SiS_RVBHCMAX = (LCDPtr+ResIndex)->RVBHCMAX;
3537 SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT; 3472 SiS_Pr->SiS_RVBHCFACT = (LCDPtr+ResIndex)->RVBHCFACT;
3538 SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT; 3473 SiS_Pr->SiS_VGAHT = (LCDPtr+ResIndex)->VGAHT;
@@ -3624,7 +3559,7 @@ SiS_GetLVDSDesPtr(struct SiS_Private *SiS_Pr)
3624{ 3559{
3625 const struct SiS_LVDSDes *PanelDesPtr = NULL; 3560 const struct SiS_LVDSDes *PanelDesPtr = NULL;
3626 3561
3627#ifdef SIS300 3562#ifdef CONFIG_FB_SIS_300
3628 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) { 3563 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCD) {
3629 3564
3630 if(SiS_Pr->ChipType < SIS_315H) { 3565 if(SiS_Pr->ChipType < SIS_315H) {
@@ -3696,7 +3631,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3696 3631
3697 if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { 3632 if((SiS_Pr->SiS_VBType & VB_SIS30xBLV) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) {
3698 3633
3699#ifdef SIS315H 3634#ifdef CONFIG_FB_SIS_315
3700 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) { 3635 if(SiS_Pr->SiS_LCDInfo & DontExpandLCD) {
3701 /* non-pass 1:1 only, see above */ 3636 /* non-pass 1:1 only, see above */
3702 if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) { 3637 if(SiS_Pr->SiS_VGAHDE != SiS_Pr->PanelXRes) {
@@ -3771,7 +3706,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3771 } else { 3706 } else {
3772 3707
3773 if(SiS_Pr->ChipType < SIS_315H) { 3708 if(SiS_Pr->ChipType < SIS_315H) {
3774#ifdef SIS300 3709#ifdef CONFIG_FB_SIS_300
3775 switch(SiS_Pr->SiS_LCDResInfo) { 3710 switch(SiS_Pr->SiS_LCDResInfo) {
3776 case Panel_800x600: 3711 case Panel_800x600:
3777 if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) { 3712 if(SiS_Pr->SiS_VGAVDE == SiS_Pr->PanelYRes) {
@@ -3816,7 +3751,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3816 } 3751 }
3817#endif 3752#endif
3818 } else { 3753 } else {
3819#ifdef SIS315H 3754#ifdef CONFIG_FB_SIS_315
3820 switch(SiS_Pr->SiS_LCDResInfo) { 3755 switch(SiS_Pr->SiS_LCDResInfo) {
3821 case Panel_1024x768: 3756 case Panel_1024x768:
3822 case Panel_1280x1024: 3757 case Panel_1280x1024:
@@ -3844,7 +3779,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3844 if(SiS_Pr->ChipType < SIS_315H) { 3779 if(SiS_Pr->ChipType < SIS_315H) {
3845 if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320; 3780 if(!(modeflag & HalfDCLK)) SiS_Pr->SiS_LCDHDES = 320;
3846 } else { 3781 } else {
3847#ifdef SIS315H 3782#ifdef CONFIG_FB_SIS_315
3848 if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480; 3783 if(SiS_Pr->SiS_LCDResInfo == Panel_1024x768) SiS_Pr->SiS_LCDHDES = 480;
3849 if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804; 3784 if(SiS_Pr->SiS_LCDResInfo == Panel_1400x1050) SiS_Pr->SiS_LCDHDES = 804;
3850 if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704; 3785 if(SiS_Pr->SiS_LCDResInfo == Panel_1600x1200) SiS_Pr->SiS_LCDHDES = 704;
@@ -3866,7 +3801,7 @@ SiS_GetLVDSDesData(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
3866/* DISABLE VIDEO BRIDGE */ 3801/* DISABLE VIDEO BRIDGE */
3867/*********************************************/ 3802/*********************************************/
3868 3803
3869#ifdef SIS315H 3804#ifdef CONFIG_FB_SIS_315
3870static int 3805static int
3871SiS_HandlePWD(struct SiS_Private *SiS_Pr) 3806SiS_HandlePWD(struct SiS_Private *SiS_Pr)
3872{ 3807{
@@ -3891,11 +3826,6 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr)
3891 ret = 1; 3826 ret = 1;
3892 } 3827 }
3893 SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp); 3828 SiS_SetRegANDOR(SiS_Pr->SiS_Part4Port,0x27,0x7f,temp);
3894#ifdef SIS_XORG_XF86
3895#ifdef TWDEBUG
3896 xf86DrvMsg(0, 0, "Setting PWD %x\n", temp);
3897#endif
3898#endif
3899 } 3829 }
3900#endif 3830#endif
3901 return ret; 3831 return ret;
@@ -3909,7 +3839,7 @@ SiS_HandlePWD(struct SiS_Private *SiS_Pr)
3909void 3839void
3910SiS_DisableBridge(struct SiS_Private *SiS_Pr) 3840SiS_DisableBridge(struct SiS_Private *SiS_Pr)
3911{ 3841{
3912#ifdef SIS315H 3842#ifdef CONFIG_FB_SIS_315
3913 unsigned short tempah, pushax=0, modenum; 3843 unsigned short tempah, pushax=0, modenum;
3914#endif 3844#endif
3915 unsigned short temp=0; 3845 unsigned short temp=0;
@@ -3920,7 +3850,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
3920 3850
3921 if(SiS_Pr->ChipType < SIS_315H) { 3851 if(SiS_Pr->ChipType < SIS_315H) {
3922 3852
3923#ifdef SIS300 /* 300 series */ 3853#ifdef CONFIG_FB_SIS_300 /* 300 series */
3924 3854
3925 if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { 3855 if(!(SiS_CR36BIOSWord23b(SiS_Pr))) {
3926 if(SiS_Pr->SiS_VBType & VB_SISLVDS) { 3856 if(SiS_Pr->SiS_VBType & VB_SISLVDS) {
@@ -3953,11 +3883,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
3953 } 3883 }
3954 } 3884 }
3955 3885
3956#endif /* SIS300 */ 3886#endif /* CONFIG_FB_SIS_300 */
3957 3887
3958 } else { 3888 } else {
3959 3889
3960#ifdef SIS315H /* 315 series */ 3890#ifdef CONFIG_FB_SIS_315 /* 315 series */
3961 3891
3962 int didpwd = 0; 3892 int didpwd = 0;
3963 bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) || 3893 bool custom1 = (SiS_Pr->SiS_CustomT == CUT_COMPAQ1280) ||
@@ -4081,14 +4011,14 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
4081 4011
4082 } 4012 }
4083 4013
4084#endif /* SIS315H */ 4014#endif /* CONFIG_FB_SIS_315 */
4085 4015
4086 } 4016 }
4087 4017
4088 } else { /* ============ For 301 ================ */ 4018 } else { /* ============ For 301 ================ */
4089 4019
4090 if(SiS_Pr->ChipType < SIS_315H) { 4020 if(SiS_Pr->ChipType < SIS_315H) {
4091#ifdef SIS300 4021#ifdef CONFIG_FB_SIS_300
4092 if(!(SiS_CR36BIOSWord23b(SiS_Pr))) { 4022 if(!(SiS_CR36BIOSWord23b(SiS_Pr))) {
4093 SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08); 4023 SiS_SetRegSR11ANDOR(SiS_Pr,0xF7,0x08);
4094 SiS_PanelDelay(SiS_Pr, 3); 4024 SiS_PanelDelay(SiS_Pr, 3);
@@ -4111,7 +4041,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
4111 SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20); 4041 SiS_SetRegOR(SiS_Pr->SiS_P3c4,0x1E,0x20);
4112 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp); 4042 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x00,temp);
4113 } else { 4043 } else {
4114#ifdef SIS300 4044#ifdef CONFIG_FB_SIS_300
4115 SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */ 4045 SiS_SetRegAND(SiS_Pr->SiS_P3c4,0x1E,0xDF); /* disable CRT2 */
4116 if( (!(SiS_CRT2IsLCD(SiS_Pr))) || 4046 if( (!(SiS_CRT2IsLCD(SiS_Pr))) ||
4117 (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) { 4047 (!(SiS_CR36BIOSWord23d(SiS_Pr))) ) {
@@ -4127,7 +4057,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
4127 4057
4128 if(SiS_Pr->ChipType < SIS_315H) { 4058 if(SiS_Pr->ChipType < SIS_315H) {
4129 4059
4130#ifdef SIS300 /* 300 series */ 4060#ifdef CONFIG_FB_SIS_300 /* 300 series */
4131 4061
4132 if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { 4062 if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) {
4133 SiS_SetCH700x(SiS_Pr,0x0E,0x09); 4063 SiS_SetCH700x(SiS_Pr,0x0E,0x09);
@@ -4171,11 +4101,11 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
4171 SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04); 4101 SiS_SetRegSR11ANDOR(SiS_Pr,0xFB,0x04);
4172 } 4102 }
4173 4103
4174#endif /* SIS300 */ 4104#endif /* CONFIG_FB_SIS_300 */
4175 4105
4176 } else { 4106 } else {
4177 4107
4178#ifdef SIS315H /* 315 series */ 4108#ifdef CONFIG_FB_SIS_315 /* 315 series */
4179 4109
4180 if(!(SiS_IsNotM650orLater(SiS_Pr))) { 4110 if(!(SiS_IsNotM650orLater(SiS_Pr))) {
4181 /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */ 4111 /*if(SiS_Pr->ChipType < SIS_340) { */ /* XGI needs this */
@@ -4288,7 +4218,7 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
4288 } 4218 }
4289 } 4219 }
4290 4220
4291#endif /* SIS315H */ 4221#endif /* CONFIG_FB_SIS_315 */
4292 4222
4293 } /* 315 series */ 4223 } /* 315 series */
4294 4224
@@ -4304,14 +4234,12 @@ SiS_DisableBridge(struct SiS_Private *SiS_Pr)
4304 * from outside the context of a mode switch! 4234 * from outside the context of a mode switch!
4305 * MUST call getVBType before calling this 4235 * MUST call getVBType before calling this
4306 */ 4236 */
4307#ifdef SIS_LINUX_KERNEL
4308static 4237static
4309#endif
4310void 4238void
4311SiS_EnableBridge(struct SiS_Private *SiS_Pr) 4239SiS_EnableBridge(struct SiS_Private *SiS_Pr)
4312{ 4240{
4313 unsigned short temp=0, tempah; 4241 unsigned short temp=0, tempah;
4314#ifdef SIS315H 4242#ifdef CONFIG_FB_SIS_315
4315 unsigned short temp1, pushax=0; 4243 unsigned short temp1, pushax=0;
4316 bool delaylong = false; 4244 bool delaylong = false;
4317#endif 4245#endif
@@ -4322,7 +4250,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr)
4322 4250
4323 if(SiS_Pr->ChipType < SIS_315H) { 4251 if(SiS_Pr->ChipType < SIS_315H) {
4324 4252
4325#ifdef SIS300 /* 300 series */ 4253#ifdef CONFIG_FB_SIS_300 /* 300 series */
4326 4254
4327 if(SiS_CRT2IsLCD(SiS_Pr)) { 4255 if(SiS_CRT2IsLCD(SiS_Pr)) {
4328 if(SiS_Pr->SiS_VBType & VB_SISLVDS) { 4256 if(SiS_Pr->SiS_VBType & VB_SISLVDS) {
@@ -4385,11 +4313,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr)
4385 } 4313 }
4386 4314
4387 4315
4388#endif /* SIS300 */ 4316#endif /* CONFIG_FB_SIS_300 */
4389 4317
4390 } else { 4318 } else {
4391 4319
4392#ifdef SIS315H /* 315 series */ 4320#ifdef CONFIG_FB_SIS_315 /* 315 series */
4393 4321
4394#ifdef SET_EMI 4322#ifdef SET_EMI
4395 unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0; 4323 unsigned char r30=0, r31=0, r32=0, r33=0, cr36=0;
@@ -4688,7 +4616,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr)
4688 SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f); 4616 SiS_SetRegAND(SiS_Pr->SiS_Part1Port,0x00,0x7f);
4689 } 4617 }
4690 4618
4691#endif /* SIS315H */ 4619#endif /* CONFIG_FB_SIS_315 */
4692 4620
4693 } 4621 }
4694 4622
@@ -4739,7 +4667,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr)
4739 4667
4740 if(SiS_Pr->ChipType < SIS_315H) { 4668 if(SiS_Pr->ChipType < SIS_315H) {
4741 4669
4742#ifdef SIS300 /* 300 series */ 4670#ifdef CONFIG_FB_SIS_300 /* 300 series */
4743 4671
4744 if(SiS_CRT2IsLCD(SiS_Pr)) { 4672 if(SiS_CRT2IsLCD(SiS_Pr)) {
4745 if(SiS_Pr->ChipType == SIS_730) { 4673 if(SiS_Pr->ChipType == SIS_730) {
@@ -4783,11 +4711,11 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr)
4783 } 4711 }
4784 } 4712 }
4785 4713
4786#endif /* SIS300 */ 4714#endif /* CONFIG_FB_SIS_300 */
4787 4715
4788 } else { 4716 } else {
4789 4717
4790#ifdef SIS315H /* 315 series */ 4718#ifdef CONFIG_FB_SIS_315 /* 315 series */
4791 4719
4792 if(!(SiS_IsNotM650orLater(SiS_Pr))) { 4720 if(!(SiS_IsNotM650orLater(SiS_Pr))) {
4793 /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */ 4721 /*if(SiS_Pr->ChipType < SIS_340) {*/ /* XGI needs this */
@@ -4881,7 +4809,7 @@ SiS_EnableBridge(struct SiS_Private *SiS_Pr)
4881 } 4809 }
4882 } 4810 }
4883 4811
4884#endif /* SIS315H */ 4812#endif /* CONFIG_FB_SIS_315 */
4885 4813
4886 } /* 310 series */ 4814 } /* 310 series */
4887 4815
@@ -4971,7 +4899,7 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
4971 4899
4972 if(SiS_Pr->ChipType < SIS_315H) { 4900 if(SiS_Pr->ChipType < SIS_315H) {
4973 4901
4974#ifdef SIS300 /* ---- 300 series --- */ 4902#ifdef CONFIG_FB_SIS_300 /* ---- 300 series --- */
4975 4903
4976 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */ 4904 if(SiS_Pr->SiS_VBType & VB_SIS30xBLV) { /* 630 - 301B(-DH) */
4977 4905
@@ -5000,11 +4928,11 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
5000 4928
5001 } 4929 }
5002 4930
5003#endif /* SIS300 */ 4931#endif /* CONFIG_FB_SIS_300 */
5004 4932
5005 } else { 4933 } else {
5006 4934
5007#ifdef SIS315H /* ------- 315 series ------ */ 4935#ifdef CONFIG_FB_SIS_315 /* ------- 315 series ------ */
5008 4936
5009 if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */ 4937 if(SiS_Pr->SiS_VBType & VB_SISLVDS) { /* 315 - LVDS */
5010 4938
@@ -5076,13 +5004,13 @@ SiS_SetCRT2Sync(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
5076 } 5004 }
5077 5005
5078 } 5006 }
5079#endif /* SIS315H */ 5007#endif /* CONFIG_FB_SIS_315 */
5080 } 5008 }
5081 } 5009 }
5082} 5010}
5083 5011
5084/* Set CRT2 FIFO on 300/540/630/730 */ 5012/* Set CRT2 FIFO on 300/540/630/730 */
5085#ifdef SIS300 5013#ifdef CONFIG_FB_SIS_300
5086static void 5014static void
5087SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo) 5015SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo)
5088{ 5016{
@@ -5154,13 +5082,8 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo)
5154 5082
5155 } else { 5083 } else {
5156 5084
5157#ifdef SIS_LINUX_KERNEL
5158 pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50); 5085 pci50 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0x50);
5159 pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0); 5086 pciA0 = sisfb_read_nbridge_pci_dword(SiS_Pr, 0xa0);
5160#else
5161 pci50 = pciReadLong(0x00000000, 0x50);
5162 pciA0 = pciReadLong(0x00000000, 0xA0);
5163#endif
5164 5087
5165 if(SiS_Pr->ChipType == SIS_730) { 5088 if(SiS_Pr->ChipType == SIS_730) {
5166 5089
@@ -5262,7 +5185,7 @@ SiS_SetCRT2FIFO_300(struct SiS_Private *SiS_Pr,unsigned short ModeNo)
5262#endif 5185#endif
5263 5186
5264/* Set CRT2 FIFO on 315/330 series */ 5187/* Set CRT2 FIFO on 315/330 series */
5265#ifdef SIS315H 5188#ifdef CONFIG_FB_SIS_315
5266static void 5189static void
5267SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr) 5190SiS_SetCRT2FIFO_310(struct SiS_Private *SiS_Pr)
5268{ 5191{
@@ -5420,27 +5343,6 @@ SiS_SetGroup1_301(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned sho
5420 5343
5421 temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02)); 5344 temp = SiS_GetRegByte((SiS_Pr->SiS_P3ca+0x02));
5422 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */ 5345 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1b,temp); /* ? */
5423
5424#ifdef SIS_XORG_XF86
5425#ifdef TWDEBUG
5426 xf86DrvMsg(0, X_INFO, "%d %d %d %d %d %d %d %d (%d %d %d %d)\n",
5427 SiS_Pr->CHDisplay, SiS_Pr->CHSyncStart, SiS_Pr->CHSyncEnd, SiS_Pr->CHTotal,
5428 SiS_Pr->CVDisplay, SiS_Pr->CVSyncStart, SiS_Pr->CVSyncEnd, SiS_Pr->CVTotal,
5429 SiS_Pr->CHBlankStart, SiS_Pr->CHBlankEnd, SiS_Pr->CVBlankStart, SiS_Pr->CVBlankEnd);
5430
5431 xf86DrvMsg(0, X_INFO, " {{0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n",
5432 SiS_Pr->CCRT1CRTC[0], SiS_Pr->CCRT1CRTC[1],
5433 SiS_Pr->CCRT1CRTC[2], SiS_Pr->CCRT1CRTC[3],
5434 SiS_Pr->CCRT1CRTC[4], SiS_Pr->CCRT1CRTC[5],
5435 SiS_Pr->CCRT1CRTC[6], SiS_Pr->CCRT1CRTC[7]);
5436 xf86DrvMsg(0, X_INFO, " 0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,0x%02x,\n",
5437 SiS_Pr->CCRT1CRTC[8], SiS_Pr->CCRT1CRTC[9],
5438 SiS_Pr->CCRT1CRTC[10], SiS_Pr->CCRT1CRTC[11],
5439 SiS_Pr->CCRT1CRTC[12], SiS_Pr->CCRT1CRTC[13],
5440 SiS_Pr->CCRT1CRTC[14], SiS_Pr->CCRT1CRTC[15]);
5441 xf86DrvMsg(0, X_INFO, " 0x%02x}},\n", SiS_Pr->CCRT1CRTC[16]);
5442#endif
5443#endif
5444} 5346}
5445 5347
5446/* Setup panel link 5348/* Setup panel link
@@ -5455,17 +5357,17 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5455 unsigned short push2, tempax, tempbx, tempcx, temp; 5357 unsigned short push2, tempax, tempbx, tempcx, temp;
5456 unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0; 5358 unsigned int tempeax = 0, tempebx, tempecx, tempvcfact = 0;
5457 bool islvds = false, issis = false, chkdclkfirst = false; 5359 bool islvds = false, issis = false, chkdclkfirst = false;
5458#ifdef SIS300 5360#ifdef CONFIG_FB_SIS_300
5459 unsigned short crt2crtc = 0; 5361 unsigned short crt2crtc = 0;
5460#endif 5362#endif
5461#ifdef SIS315H 5363#ifdef CONFIG_FB_SIS_315
5462 unsigned short pushcx; 5364 unsigned short pushcx;
5463#endif 5365#endif
5464 5366
5465 if(ModeNo <= 0x13) { 5367 if(ModeNo <= 0x13) {
5466 modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag; 5368 modeflag = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ModeFlag;
5467 resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo; 5369 resinfo = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_ResInfo;
5468#ifdef SIS300 5370#ifdef CONFIG_FB_SIS_300
5469 crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC; 5371 crt2crtc = SiS_Pr->SiS_SModeIDTable[ModeIdIndex].St_CRT2CRTC;
5470#endif 5372#endif
5471 } else if(SiS_Pr->UseCustomMode) { 5373 } else if(SiS_Pr->UseCustomMode) {
@@ -5473,7 +5375,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5473 } else { 5375 } else {
5474 modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag; 5376 modeflag = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_ModeFlag;
5475 resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO; 5377 resinfo = SiS_Pr->SiS_EModeIDTable[ModeIdIndex].Ext_RESINFO;
5476#ifdef SIS300 5378#ifdef CONFIG_FB_SIS_300
5477 crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC; 5379 crt2crtc = SiS_Pr->SiS_RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
5478#endif 5380#endif
5479 } 5381 }
@@ -5494,7 +5396,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5494 } 5396 }
5495 } 5397 }
5496 5398
5497#ifdef SIS315H 5399#ifdef CONFIG_FB_SIS_315
5498 if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) { 5400 if((SiS_Pr->ChipType >= SIS_315H) && (SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA)) {
5499 if(IS_SIS330) { 5401 if(IS_SIS330) {
5500 SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10); 5402 SiS_SetRegOR(SiS_Pr->SiS_Part1Port,0x2D,0x10);
@@ -5744,7 +5646,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5744 5646
5745 if(SiS_Pr->ChipType < SIS_315H) { 5647 if(SiS_Pr->ChipType < SIS_315H) {
5746 5648
5747#ifdef SIS300 /* 300 series */ 5649#ifdef CONFIG_FB_SIS_300 /* 300 series */
5748 tempeax = SiS_Pr->SiS_VGAVDE << 6; 5650 tempeax = SiS_Pr->SiS_VGAVDE << 6;
5749 temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE); 5651 temp = (tempeax % (unsigned int)SiS_Pr->SiS_VDE);
5750 tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE; 5652 tempeax = tempeax / (unsigned int)SiS_Pr->SiS_VDE;
@@ -5755,11 +5657,11 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5755 temp = (unsigned short)(tempeax & 0x00FF); 5657 temp = (unsigned short)(tempeax & 0x00FF);
5756 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */ 5658 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x1E,temp); /* BPLVCFACT */
5757 tempvcfact = temp; 5659 tempvcfact = temp;
5758#endif /* SIS300 */ 5660#endif /* CONFIG_FB_SIS_300 */
5759 5661
5760 } else { 5662 } else {
5761 5663
5762#ifdef SIS315H /* 315 series */ 5664#ifdef CONFIG_FB_SIS_315 /* 315 series */
5763 tempeax = SiS_Pr->SiS_VGAVDE << 18; 5665 tempeax = SiS_Pr->SiS_VGAVDE << 18;
5764 tempebx = SiS_Pr->SiS_VDE; 5666 tempebx = SiS_Pr->SiS_VDE;
5765 temp = (tempeax % tempebx); 5667 temp = (tempeax % tempebx);
@@ -5845,7 +5747,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5845 temp = (unsigned short)(tempecx & 0x00FF); 5747 temp = (unsigned short)(tempecx & 0x00FF);
5846 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp); 5748 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x23,temp);
5847 5749
5848#ifdef SIS315H 5750#ifdef CONFIG_FB_SIS_315
5849 if(SiS_Pr->ChipType >= SIS_315H) { 5751 if(SiS_Pr->ChipType >= SIS_315H) {
5850 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) { 5752 if(SiS_Pr->SiS_VBInfo & SetCRT2ToLCDA) {
5851 if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) { 5753 if((islvds) || (SiS_Pr->SiS_VBInfo & VB_SISLVDS)) {
@@ -5863,7 +5765,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5863 } 5765 }
5864#endif 5766#endif
5865 5767
5866#ifdef SIS300 5768#ifdef CONFIG_FB_SIS_300
5867 if(SiS_Pr->SiS_IF_DEF_TRUMPION) { 5769 if(SiS_Pr->SiS_IF_DEF_TRUMPION) {
5868 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 5770 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
5869 unsigned char *trumpdata; 5771 unsigned char *trumpdata;
@@ -5899,7 +5801,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5899 } 5801 }
5900#endif 5802#endif
5901 5803
5902#ifdef SIS315H 5804#ifdef CONFIG_FB_SIS_315
5903 if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) { 5805 if(SiS_Pr->SiS_IF_DEF_FSTN || SiS_Pr->SiS_IF_DEF_DSTN) {
5904 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00); 5806 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x25,0x00);
5905 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00); 5807 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x26,0x00);
@@ -5999,7 +5901,7 @@ SiS_SetGroup1_LVDS(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned s
5999 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a); 5901 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x45,0x0a);
6000 } 5902 }
6001 } 5903 }
6002#endif /* SIS315H */ 5904#endif /* CONFIG_FB_SIS_315 */
6003} 5905}
6004 5906
6005/* Set Part 1 */ 5907/* Set Part 1 */
@@ -6007,12 +5909,12 @@ static void
6007SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex, 5909SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex,
6008 unsigned short RefreshRateTableIndex) 5910 unsigned short RefreshRateTableIndex)
6009{ 5911{
6010#if defined(SIS300) || defined(SIS315H) 5912#if defined(CONFIG_FB_SIS_300) || defined(CONFIG_FB_SIS_315)
6011 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 5913 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
6012#endif 5914#endif
6013 unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0; 5915 unsigned short temp=0, tempax=0, tempbx=0, tempcx=0, bridgeadd=0;
6014 unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0; 5916 unsigned short pushbx=0, CRT1Index=0, modeflag, resinfo=0;
6015#ifdef SIS315H 5917#ifdef CONFIG_FB_SIS_315
6016 unsigned short tempbl=0; 5918 unsigned short tempbl=0;
6017#endif 5919#endif
6018 5920
@@ -6038,11 +5940,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6038 (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) { 5940 (SiS_Pr->SiS_VBInfo & SetInSlaveMode)) ) {
6039 5941
6040 if(SiS_Pr->ChipType < SIS_315H ) { 5942 if(SiS_Pr->ChipType < SIS_315H ) {
6041#ifdef SIS300 5943#ifdef CONFIG_FB_SIS_300
6042 SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo); 5944 SiS_SetCRT2FIFO_300(SiS_Pr, ModeNo);
6043#endif 5945#endif
6044 } else { 5946 } else {
6045#ifdef SIS315H 5947#ifdef CONFIG_FB_SIS_315
6046 SiS_SetCRT2FIFO_310(SiS_Pr); 5948 SiS_SetCRT2FIFO_310(SiS_Pr);
6047#endif 5949#endif
6048 } 5950 }
@@ -6051,7 +5953,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6051 5953
6052 if(SiS_Pr->ChipType < SIS_315H ) { 5954 if(SiS_Pr->ChipType < SIS_315H ) {
6053 5955
6054#ifdef SIS300 /* ------------- 300 series --------------*/ 5956#ifdef CONFIG_FB_SIS_300 /* ------------- 300 series --------------*/
6055 5957
6056 temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */ 5958 temp = (SiS_Pr->SiS_VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */
6057 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */ 5959 SiS_SetReg(SiS_Pr->SiS_Part1Port,0x08,temp); /* CRT2 Horizontal Total */
@@ -6070,11 +5972,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6070 5972
6071 bridgeadd = 12; 5973 bridgeadd = 12;
6072 5974
6073#endif /* SIS300 */ 5975#endif /* CONFIG_FB_SIS_300 */
6074 5976
6075 } else { 5977 } else {
6076 5978
6077#ifdef SIS315H /* ------------------- 315/330 series --------------- */ 5979#ifdef CONFIG_FB_SIS_315 /* ------------------- 315/330 series --------------- */
6078 5980
6079 tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */ 5981 tempcx = SiS_Pr->SiS_VGAHT; /* BTVGA2HT 0x08,0x09 */
6080 if(modeflag & HalfDCLK) { 5982 if(modeflag & HalfDCLK) {
@@ -6125,7 +6027,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6125 } 6027 }
6126 } 6028 }
6127 6029
6128#endif /* SIS315H */ 6030#endif /* CONFIG_FB_SIS_315 */
6129 6031
6130 } /* 315/330 series */ 6032 } /* 315/330 series */
6131 6033
@@ -6256,7 +6158,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6256 6158
6257 if(SiS_Pr->ChipType < SIS_315H) { 6159 if(SiS_Pr->ChipType < SIS_315H) {
6258 6160
6259#ifdef SIS300 /* ---------- 300 series -------------- */ 6161#ifdef CONFIG_FB_SIS_300 /* ---------- 300 series -------------- */
6260 6162
6261 if(SiS_Pr->SiS_VBType & VB_SISVB) { 6163 if(SiS_Pr->SiS_VBType & VB_SISVB) {
6262 temp = 0x20; 6164 temp = 0x20;
@@ -6310,11 +6212,11 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6310 6212
6311 SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */ 6213 SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x13,~0x3C,temp); /* Panel Link Delay Compensation; (Software Command Reset; Power Saving) */
6312 6214
6313#endif /* SIS300 */ 6215#endif /* CONFIG_FB_SIS_300 */
6314 6216
6315 } else { 6217 } else {
6316 6218
6317#ifdef SIS315H /* --------------- 315/330 series ---------------*/ 6219#ifdef CONFIG_FB_SIS_315 /* --------------- 315/330 series ---------------*/
6318 6220
6319 if(SiS_Pr->ChipType < SIS_661) { 6221 if(SiS_Pr->ChipType < SIS_661) {
6320 6222
@@ -6349,7 +6251,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6349 if(modeflag & HalfDCLK) tempax |= 0x40; 6251 if(modeflag & HalfDCLK) tempax |= 0x40;
6350 SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax); 6252 SiS_SetRegANDOR(SiS_Pr->SiS_Part1Port,0x2C,0x3f,tempax);
6351 6253
6352#endif /* SIS315H */ 6254#endif /* CONFIG_FB_SIS_315 */
6353 6255
6354 } 6256 }
6355 6257
@@ -6381,7 +6283,7 @@ SiS_SetGroup1(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6381/* SET PART 2 REGISTER GROUP */ 6283/* SET PART 2 REGISTER GROUP */
6382/*********************************************/ 6284/*********************************************/
6383 6285
6384#ifdef SIS315H 6286#ifdef CONFIG_FB_SIS_315
6385static unsigned char * 6287static unsigned char *
6386SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype) 6288SiS_GetGroup2CLVXPtr(struct SiS_Private *SiS_Pr, int tabletype)
6387{ 6289{
@@ -6478,7 +6380,7 @@ SiS_GetCRT2Part2Ptr(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned sh
6478} 6380}
6479#endif 6381#endif
6480 6382
6481#ifdef SIS300 6383#ifdef CONFIG_FB_SIS_300
6482static void 6384static void
6483SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc) 6385SiS_Group2LCDSpecial(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short crt2crtc)
6484{ 6386{
@@ -6690,7 +6592,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
6690 unsigned int longtemp, PhaseIndex; 6592 unsigned int longtemp, PhaseIndex;
6691 bool newtvphase; 6593 bool newtvphase;
6692 const unsigned char *TimingPoint; 6594 const unsigned char *TimingPoint;
6693#ifdef SIS315H 6595#ifdef CONFIG_FB_SIS_315
6694 unsigned short resindex, CRT2Index; 6596 unsigned short resindex, CRT2Index;
6695 const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL; 6597 const struct SiS_Part2PortTbl *CRT2Part2Ptr = NULL;
6696 6598
@@ -7069,7 +6971,7 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7069 SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB); 6971 SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x17,0xFB);
7070 SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF); 6972 SiS_SetRegAND(SiS_Pr->SiS_Part2Port,0x18,0xDF);
7071 6973
7072#ifdef SIS315H 6974#ifdef CONFIG_FB_SIS_315
7073 if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex, 6975 if(SiS_GetCRT2Part2Ptr(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex,
7074 &CRT2Index, &resindex)) { 6976 &CRT2Index, &resindex)) {
7075 switch(CRT2Index) { 6977 switch(CRT2Index) {
@@ -7130,12 +7032,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7130 7032
7131 /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */ 7033 /* Non-expanding: lcdvdes = tempcx = VT-1; lcdvdee = tempbx = VDE-1 */
7132 7034
7133#ifdef SIS_XORG_XF86
7134#ifdef TWDEBUG
7135 xf86DrvMsg(0, X_INFO, "lcdvdes 0x%x lcdvdee 0x%x\n", tempcx, tempbx);
7136#endif
7137#endif
7138
7139 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */ 7035 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x05,tempcx); /* lcdvdes */
7140 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */ 7036 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x06,tempbx); /* lcdvdee */
7141 7037
@@ -7184,12 +7080,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7184 tempbx = SiS_Pr->CVSyncStart; 7080 tempbx = SiS_Pr->CVSyncStart;
7185 } 7081 }
7186 7082
7187#ifdef SIS_XORG_XF86
7188#ifdef TWDEBUG
7189 xf86DrvMsg(0, X_INFO, "lcdvrs 0x%x\n", tempbx);
7190#endif
7191#endif
7192
7193 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */ 7083 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x04,tempbx); /* lcdvrs */
7194 7084
7195 temp = (tempbx >> 4) & 0xF0; 7085 temp = (tempbx >> 4) & 0xF0;
@@ -7201,15 +7091,9 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7201 temp |= (SiS_Pr->CVSyncEnd & 0x0f); 7091 temp |= (SiS_Pr->CVSyncEnd & 0x0f);
7202 } 7092 }
7203 7093
7204#ifdef SIS_XORG_XF86
7205#ifdef TWDEBUG
7206 xf86DrvMsg(0, X_INFO, "lcdvre[3:0] 0x%x\n", (temp & 0x0f));
7207#endif
7208#endif
7209
7210 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp); 7094 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x01,temp);
7211 7095
7212#ifdef SIS300 7096#ifdef CONFIG_FB_SIS_300
7213 SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc); 7097 SiS_Group2LCDSpecial(SiS_Pr, ModeNo, crt2crtc);
7214#endif 7098#endif
7215 7099
@@ -7245,12 +7129,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7245 tempax >>= 1; 7129 tempax >>= 1;
7246 } 7130 }
7247 7131
7248#ifdef SIS_XORG_XF86
7249#ifdef TWDEBUG
7250 xf86DrvMsg(0, X_INFO, "lcdhdee 0x%x\n", tempbx);
7251#endif
7252#endif
7253
7254 tempbx += bridgeoffset; 7132 tempbx += bridgeoffset;
7255 7133
7256 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */ 7134 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x23,tempbx); /* lcdhdee */
@@ -7276,12 +7154,6 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7276 tempbx += bridgeoffset; 7154 tempbx += bridgeoffset;
7277 } 7155 }
7278 7156
7279#ifdef SIS_XORG_XF86
7280#ifdef TWDEBUG
7281 xf86DrvMsg(0, X_INFO, "lcdhrs 0x%x\n", tempbx);
7282#endif
7283#endif
7284
7285 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */ 7157 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x1C,tempbx); /* lcdhrs */
7286 SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0)); 7158 SiS_SetRegANDOR(SiS_Pr->SiS_Part2Port,0x1D,0x0F,((tempbx >> 4) & 0xf0));
7287 7159
@@ -7300,20 +7172,14 @@ SiS_SetGroup2(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7300 tempbx += bridgeoffset; 7172 tempbx += bridgeoffset;
7301 } 7173 }
7302 7174
7303#ifdef SIS_XORG_XF86
7304#ifdef TWDEBUG
7305 xf86DrvMsg(0, X_INFO, "lcdhre 0x%x\n", tempbx);
7306#endif
7307#endif
7308
7309 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */ 7175 SiS_SetReg(SiS_Pr->SiS_Part2Port,0x21,tempbx); /* lcdhre */
7310 7176
7311 SiS_SetGroup2_Tail(SiS_Pr, ModeNo); 7177 SiS_SetGroup2_Tail(SiS_Pr, ModeNo);
7312 7178
7313#ifdef SIS300 7179#ifdef CONFIG_FB_SIS_300
7314 SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo); 7180 SiS_Set300Part2Regs(SiS_Pr, ModeIdIndex, RefreshRateTableIndex, ModeNo);
7315#endif 7181#endif
7316#ifdef SIS315H 7182#ifdef CONFIG_FB_SIS_315
7317 } /* CRT2-LCD from table */ 7183 } /* CRT2-LCD from table */
7318#endif 7184#endif
7319} 7185}
@@ -7382,7 +7248,7 @@ SiS_SetGroup3(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
7382/* SET PART 4 REGISTER GROUP */ 7248/* SET PART 4 REGISTER GROUP */
7383/*********************************************/ 7249/*********************************************/
7384 7250
7385#ifdef SIS315H 7251#ifdef CONFIG_FB_SIS_315
7386#if 0 7252#if 0
7387static void 7253static void
7388SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift) 7254SiS_ShiftXPos(struct SiS_Private *SiS_Pr, int shift)
@@ -8011,7 +7877,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
8011 7877
8012 if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) { 7878 if(SiS_Pr->SiS_IF_DEF_CH70xx == 1) {
8013 7879
8014#ifdef SIS300 7880#ifdef CONFIG_FB_SIS_300
8015 7881
8016 /* Chrontel 7005 - I assume that it does not come with a 315 series chip */ 7882 /* Chrontel 7005 - I assume that it does not come with a 315 series chip */
8017 7883
@@ -8124,7 +7990,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
8124 7990
8125 /* Chrontel 7019 - assumed that it does not come with a 300 series chip */ 7991 /* Chrontel 7019 - assumed that it does not come with a 300 series chip */
8126 7992
8127#ifdef SIS315H 7993#ifdef CONFIG_FB_SIS_315
8128 7994
8129 unsigned short temp; 7995 unsigned short temp;
8130 7996
@@ -8175,7 +8041,7 @@ SiS_SetCHTVReg(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short
8175 8041
8176} 8042}
8177 8043
8178#ifdef SIS315H /* ----------- 315 series only ---------- */ 8044#ifdef CONFIG_FB_SIS_315 /* ----------- 315 series only ---------- */
8179 8045
8180void 8046void
8181SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr) 8047SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr)
@@ -8657,7 +8523,7 @@ SiS_ChrontelDoSomething1(struct SiS_Private *SiS_Pr)
8657bool 8523bool
8658SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo) 8524SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
8659{ 8525{
8660#ifdef SIS300 8526#ifdef CONFIG_FB_SIS_300
8661 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase; 8527 unsigned char *ROMAddr = SiS_Pr->VirtualRomBase;
8662#endif 8528#endif
8663 unsigned short ModeIdIndex, RefreshRateTableIndex; 8529 unsigned short ModeIdIndex, RefreshRateTableIndex;
@@ -8703,16 +8569,6 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
8703 SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); 8569 SiS_GetLVDSDesData(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex);
8704 } 8570 }
8705 8571
8706#ifdef SIS_XORG_XF86
8707#ifdef TWDEBUG
8708 xf86DrvMsg(0, X_INFO, "(init301: LCDHDES 0x%03x LCDVDES 0x%03x)\n", SiS_Pr->SiS_LCDHDES, SiS_Pr->SiS_LCDVDES);
8709 xf86DrvMsg(0, X_INFO, "(init301: HDE 0x%03x VDE 0x%03x)\n", SiS_Pr->SiS_HDE, SiS_Pr->SiS_VDE);
8710 xf86DrvMsg(0, X_INFO, "(init301: VGAHDE 0x%03x VGAVDE 0x%03x)\n", SiS_Pr->SiS_VGAHDE, SiS_Pr->SiS_VGAVDE);
8711 xf86DrvMsg(0, X_INFO, "(init301: HT 0x%03x VT 0x%03x)\n", SiS_Pr->SiS_HT, SiS_Pr->SiS_VT);
8712 xf86DrvMsg(0, X_INFO, "(init301: VGAHT 0x%03x VGAVT 0x%03x)\n", SiS_Pr->SiS_VGAHT, SiS_Pr->SiS_VGAVT);
8713#endif
8714#endif
8715
8716 if(SiS_Pr->SiS_SetFlag & LowModeTests) { 8572 if(SiS_Pr->SiS_SetFlag & LowModeTests) {
8717 SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); 8573 SiS_SetGroup1(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex);
8718 } 8574 }
@@ -8722,12 +8578,12 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
8722 if(SiS_Pr->SiS_SetFlag & LowModeTests) { 8578 if(SiS_Pr->SiS_SetFlag & LowModeTests) {
8723 8579
8724 SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); 8580 SiS_SetGroup2(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex);
8725#ifdef SIS315H 8581#ifdef CONFIG_FB_SIS_315
8726 SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); 8582 SiS_SetGroup2_C_ELV(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex);
8727#endif 8583#endif
8728 SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex); 8584 SiS_SetGroup3(SiS_Pr, ModeNo, ModeIdIndex);
8729 SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex); 8585 SiS_SetGroup4(SiS_Pr, ModeNo, ModeIdIndex, RefreshRateTableIndex);
8730#ifdef SIS315H 8586#ifdef CONFIG_FB_SIS_315
8731 SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex); 8587 SiS_SetGroup4_C_ELV(SiS_Pr, ModeNo, ModeIdIndex);
8732#endif 8588#endif
8733 SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex); 8589 SiS_SetGroup5(SiS_Pr, ModeNo, ModeIdIndex);
@@ -8758,7 +8614,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
8758 if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) { 8614 if(SiS_Pr->SiS_IF_DEF_CH70xx != 0) {
8759 if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { 8615 if(SiS_Pr->SiS_VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
8760 if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) { 8616 if(SiS_Pr->SiS_IF_DEF_CH70xx == 2) {
8761#ifdef SIS315H 8617#ifdef CONFIG_FB_SIS_315
8762 SiS_SetCH701xForLCD(SiS_Pr); 8618 SiS_SetCH701xForLCD(SiS_Pr);
8763#endif 8619#endif
8764 } 8620 }
@@ -8771,7 +8627,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
8771 8627
8772 } 8628 }
8773 8629
8774#ifdef SIS300 8630#ifdef CONFIG_FB_SIS_300
8775 if(SiS_Pr->ChipType < SIS_315H) { 8631 if(SiS_Pr->ChipType < SIS_315H) {
8776 if(SiS_Pr->SiS_SetFlag & LowModeTests) { 8632 if(SiS_Pr->SiS_SetFlag & LowModeTests) {
8777 if(SiS_Pr->SiS_UseOEM) { 8633 if(SiS_Pr->SiS_UseOEM) {
@@ -8794,7 +8650,7 @@ SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo)
8794 } 8650 }
8795#endif 8651#endif
8796 8652
8797#ifdef SIS315H 8653#ifdef CONFIG_FB_SIS_315
8798 if(SiS_Pr->ChipType >= SIS_315H) { 8654 if(SiS_Pr->ChipType >= SIS_315H) {
8799 if(SiS_Pr->SiS_SetFlag & LowModeTests) { 8655 if(SiS_Pr->SiS_SetFlag & LowModeTests) {
8800 if(SiS_Pr->ChipType < SIS_661) { 8656 if(SiS_Pr->ChipType < SIS_661) {
@@ -8873,7 +8729,7 @@ SiS_SetupDDCN(struct SiS_Private *SiS_Pr)
8873 } 8729 }
8874} 8730}
8875 8731
8876#ifdef SIS300 8732#ifdef CONFIG_FB_SIS_300
8877static unsigned char * 8733static unsigned char *
8878SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr) 8734SiS_SetTrumpBlockLoop(struct SiS_Private *SiS_Pr, unsigned char *dataptr)
8879{ 8735{
@@ -8923,11 +8779,6 @@ SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr)
8923 dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr); 8779 dataptr = SiS_SetTrumpBlockLoop(SiS_Pr, dataptr);
8924 if(!dataptr) return false; 8780 if(!dataptr) return false;
8925 } 8781 }
8926#ifdef SIS_XORG_XF86
8927#ifdef TWDEBUG
8928 xf86DrvMsg(0, X_INFO, "Trumpion block success\n");
8929#endif
8930#endif
8931 return true; 8782 return true;
8932} 8783}
8933#endif 8784#endif
@@ -9002,9 +8853,7 @@ SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val)
9002 SiS_SetChReg(SiS_Pr, reg, val, 0); 8853 SiS_SetChReg(SiS_Pr, reg, val, 0);
9003} 8854}
9004 8855
9005#ifdef SIS_LINUX_KERNEL
9006static 8856static
9007#endif
9008void 8857void
9009SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val) 8858SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val)
9010{ 8859{
@@ -9091,9 +8940,7 @@ SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempbx)
9091 8940
9092/* Read from Chrontel 70xx */ 8941/* Read from Chrontel 70xx */
9093/* Parameter is [Register no (S7-S0)] */ 8942/* Parameter is [Register no (S7-S0)] */
9094#ifdef SIS_LINUX_KERNEL
9095static 8943static
9096#endif
9097unsigned short 8944unsigned short
9098SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx) 8945SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempbx)
9099{ 8946{
@@ -9114,9 +8961,7 @@ SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg,
9114} 8961}
9115 8962
9116/* Our own DDC functions */ 8963/* Our own DDC functions */
9117#ifndef SIS_XORG_XF86
9118static 8964static
9119#endif
9120unsigned short 8965unsigned short
9121SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine, 8966SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine,
9122 unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32, 8967 unsigned short adaptnum, unsigned short DDCdatatype, bool checkcr32,
@@ -9224,12 +9069,6 @@ SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, int VGAEngine,
9224 9069
9225 SiS_SetupDDCN(SiS_Pr); 9070 SiS_SetupDDCN(SiS_Pr);
9226 9071
9227#ifdef SIS_XORG_XF86
9228#ifdef TWDEBUG
9229 xf86DrvMsg(0, X_INFO, "DDC Port %x Index %x Shift %d\n",
9230 SiS_Pr->SiS_DDC_Port, SiS_Pr->SiS_DDC_Index, temp);
9231#endif
9232#endif
9233 return 0; 9072 return 0;
9234} 9073}
9235 9074
@@ -9292,11 +9131,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr)
9292 SiS_SetSwitchDDC2(SiS_Pr); 9131 SiS_SetSwitchDDC2(SiS_Pr);
9293 if(SiS_PrepareDDC(SiS_Pr)) { 9132 if(SiS_PrepareDDC(SiS_Pr)) {
9294 SiS_SetStop(SiS_Pr); 9133 SiS_SetStop(SiS_Pr);
9295#ifdef SIS_XORG_XF86
9296#ifdef TWDEBUG
9297 xf86DrvMsg(0, X_INFO, "Probe: Prepare failed\n");
9298#endif
9299#endif
9300 return 0xFFFF; 9134 return 0xFFFF;
9301 } 9135 }
9302 mask = 0xf0; 9136 mask = 0xf0;
@@ -9310,11 +9144,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr)
9310 } else { 9144 } else {
9311 failed = true; 9145 failed = true;
9312 ret = 0xFFFF; 9146 ret = 0xFFFF;
9313#ifdef SIS_XORG_XF86
9314#ifdef TWDEBUG
9315 xf86DrvMsg(0, X_INFO, "Probe: Read 1 failed\n");
9316#endif
9317#endif
9318 } 9147 }
9319 } 9148 }
9320 if(!failed) { 9149 if(!failed) {
@@ -9324,11 +9153,6 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr)
9324 if(temp == value) ret = 0; 9153 if(temp == value) ret = 0;
9325 else { 9154 else {
9326 ret = 0xFFFF; 9155 ret = 0xFFFF;
9327#ifdef SIS_XORG_XF86
9328#ifdef TWDEBUG
9329 xf86DrvMsg(0, X_INFO, "Probe: Read 2 failed\n");
9330#endif
9331#endif
9332 if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) { 9156 if(SiS_Pr->SiS_DDC_DeviceAddr == 0xa0) {
9333 if(temp == 0x30) ret = 0; 9157 if(temp == 0x30) ret = 0;
9334 } 9158 }
@@ -9338,9 +9162,7 @@ SiS_DoProbeDDC(struct SiS_Private *SiS_Pr)
9338 return ret; 9162 return ret;
9339} 9163}
9340 9164
9341#ifndef SIS_XORG_XF86
9342static 9165static
9343#endif
9344unsigned short 9166unsigned short
9345SiS_ProbeDDC(struct SiS_Private *SiS_Pr) 9167SiS_ProbeDDC(struct SiS_Private *SiS_Pr)
9346{ 9168{
@@ -9357,9 +9179,7 @@ SiS_ProbeDDC(struct SiS_Private *SiS_Pr)
9357 return flag; 9179 return flag;
9358} 9180}
9359 9181
9360#ifndef SIS_XORG_XF86
9361static 9182static
9362#endif
9363unsigned short 9183unsigned short
9364SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer) 9184SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, unsigned char *buffer)
9365{ 9185{
@@ -9606,11 +9426,6 @@ SiS_SetSCLKHigh(struct SiS_Private *SiS_Pr)
9606 temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index); 9426 temp = SiS_GetReg(SiS_Pr->SiS_DDC_Port,SiS_Pr->SiS_DDC_Index);
9607 } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog); 9427 } while((!(temp & SiS_Pr->SiS_DDC_Clk)) && --watchdog);
9608 if (!watchdog) { 9428 if (!watchdog) {
9609#ifdef SIS_XORG_XF86
9610#ifdef TWDEBUG
9611 xf86DrvMsg(0, X_INFO, "SetClkHigh failed\n");
9612#endif
9613#endif
9614 return 0xFFFF; 9429 return 0xFFFF;
9615 } 9430 }
9616 SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT); 9431 SiS_DDC2Delay(SiS_Pr,SiS_I2CDELAYSHORT);
@@ -9641,7 +9456,7 @@ SiS_CheckACK(struct SiS_Private *SiS_Pr)
9641 9456
9642/* =============== SiS 315/330 O.E.M. ================= */ 9457/* =============== SiS 315/330 O.E.M. ================= */
9643 9458
9644#ifdef SIS315H 9459#ifdef CONFIG_FB_SIS_315
9645 9460
9646static unsigned short 9461static unsigned short
9647GetRAMDACromptr(struct SiS_Private *SiS_Pr) 9462GetRAMDACromptr(struct SiS_Private *SiS_Pr)
@@ -10829,7 +10644,7 @@ SiS_FinalizeLCD(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned shor
10829 10644
10830/* ================= SiS 300 O.E.M. ================== */ 10645/* ================= SiS 300 O.E.M. ================== */
10831 10646
10832#ifdef SIS300 10647#ifdef CONFIG_FB_SIS_300
10833 10648
10834static void 10649static void
10835SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex, 10650SetOEMLCDData2(struct SiS_Private *SiS_Pr, unsigned short ModeNo,unsigned short ModeIdIndex,
diff --git a/drivers/video/sis/init301.h b/drivers/video/sis/init301.h
index 51d99222375d..e1fd31d0fddf 100644
--- a/drivers/video/sis/init301.h
+++ b/drivers/video/sis/init301.h
@@ -53,15 +53,8 @@
53#ifndef _INIT301_H_ 53#ifndef _INIT301_H_
54#define _INIT301_H_ 54#define _INIT301_H_
55 55
56#include "osdef.h"
57#include "initdef.h" 56#include "initdef.h"
58 57
59#ifdef SIS_XORG_XF86
60#include "sis.h"
61#include "sis_regs.h"
62#endif
63
64#ifdef SIS_LINUX_KERNEL
65#include "vgatypes.h" 58#include "vgatypes.h"
66#include "vstruct.h" 59#include "vstruct.h"
67#ifdef SIS_CP 60#ifdef SIS_CP
@@ -72,7 +65,6 @@
72#include <linux/fb.h> 65#include <linux/fb.h>
73#include "sis.h" 66#include "sis.h"
74#include <video/sisfb.h> 67#include <video/sisfb.h>
75#endif
76 68
77static const unsigned char SiS_YPbPrTable[3][64] = { 69static const unsigned char SiS_YPbPrTable[3][64] = {
78 { 70 {
@@ -237,7 +229,7 @@ static const unsigned char SiS_Part2CLVX_6[] = { /* 1080i */
237 0xFF,0xFF, 229 0xFF,0xFF,
238}; 230};
239 231
240#ifdef SIS315H 232#ifdef CONFIG_FB_SIS_315
241/* 661 et al LCD data structure (2.03.00) */ 233/* 661 et al LCD data structure (2.03.00) */
242static const unsigned char SiS_LCDStruct661[] = { 234static const unsigned char SiS_LCDStruct661[] = {
243 /* 1024x768 */ 235 /* 1024x768 */
@@ -279,7 +271,7 @@ static const unsigned char SiS_LCDStruct661[] = {
279}; 271};
280#endif 272#endif
281 273
282#ifdef SIS300 274#ifdef CONFIG_FB_SIS_300
283static unsigned char SiS300_TrumpionData[14][80] = { 275static unsigned char SiS300_TrumpionData[14][80] = {
284 { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02, 276 { 0x02,0x0A,0x0A,0x01,0x04,0x01,0x00,0x03,0x0D,0x00,0x0D,0x10,0x7F,0x00,0x80,0x02,
285 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23, 277 0x20,0x03,0x0B,0x00,0x90,0x01,0xC1,0x01,0x60,0x0C,0x30,0x10,0x00,0x00,0x04,0x23,
@@ -356,9 +348,6 @@ static unsigned char SiS300_TrumpionData[14][80] = {
356#endif 348#endif
357 349
358void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr); 350void SiS_UnLockCRT2(struct SiS_Private *SiS_Pr);
359#ifndef SIS_LINUX_KERNEL
360void SiS_LockCRT2(struct SiS_Private *SiS_Pr);
361#endif
362void SiS_EnableCRT2(struct SiS_Private *SiS_Pr); 351void SiS_EnableCRT2(struct SiS_Private *SiS_Pr);
363unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex); 352unsigned short SiS_GetRatePtr(struct SiS_Private *SiS_Pr, unsigned short ModeNo, unsigned short ModeIdIndex);
364void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr); 353void SiS_WaitRetrace1(struct SiS_Private *SiS_Pr);
@@ -375,9 +364,6 @@ unsigned short SiS_GetVCLK2Ptr(struct SiS_Private *SiS_Pr, unsigned short ModeNo
375 unsigned short RefreshRateTableIndex); 364 unsigned short RefreshRateTableIndex);
376unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex); 365unsigned short SiS_GetResInfo(struct SiS_Private *SiS_Pr,unsigned short ModeNo,unsigned short ModeIdIndex);
377void SiS_DisableBridge(struct SiS_Private *SiS_Pr); 366void SiS_DisableBridge(struct SiS_Private *SiS_Pr);
378#ifndef SIS_LINUX_KERNEL
379void SiS_EnableBridge(struct SiS_Private *SiS_Pr);
380#endif
381bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo); 367bool SiS_SetCRT2Group(struct SiS_Private *SiS_Pr, unsigned short ModeNo);
382void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr); 368void SiS_SiS30xBLOn(struct SiS_Private *SiS_Pr);
383void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr); 369void SiS_SiS30xBLOff(struct SiS_Private *SiS_Pr);
@@ -386,13 +372,9 @@ void SiS_SetCH700x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned cha
386unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax); 372unsigned short SiS_GetCH700x(struct SiS_Private *SiS_Pr, unsigned short tempax);
387void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val); 373void SiS_SetCH701x(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val);
388unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax); 374unsigned short SiS_GetCH701x(struct SiS_Private *SiS_Pr, unsigned short tempax);
389#ifndef SIS_LINUX_KERNEL
390void SiS_SetCH70xx(struct SiS_Private *SiS_Pr, unsigned short reg, unsigned char val);
391unsigned short SiS_GetCH70xx(struct SiS_Private *SiS_Pr, unsigned short tempax);
392#endif
393void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg, 375void SiS_SetCH70xxANDOR(struct SiS_Private *SiS_Pr, unsigned short reg,
394 unsigned char orval,unsigned short andval); 376 unsigned char orval,unsigned short andval);
395#ifdef SIS315H 377#ifdef CONFIG_FB_SIS_315
396static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr); 378static void SiS_Chrontel701xOn(struct SiS_Private *SiS_Pr);
397static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr); 379static void SiS_Chrontel701xOff(struct SiS_Private *SiS_Pr);
398static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr); 380static void SiS_ChrontelInitTVVSync(struct SiS_Private *SiS_Pr);
@@ -401,7 +383,7 @@ void SiS_Chrontel701xBLOn(struct SiS_Private *SiS_Pr);
401void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr); 383void SiS_Chrontel701xBLOff(struct SiS_Private *SiS_Pr);
402#endif /* 315 */ 384#endif /* 315 */
403 385
404#ifdef SIS300 386#ifdef CONFIG_FB_SIS_300
405static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr); 387static bool SiS_SetTrumpionBlock(struct SiS_Private *SiS_Pr, unsigned char *dataptr);
406void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo); 388void SiS_SetChrontelGPIO(struct SiS_Private *SiS_Pr, unsigned short myvbinfo);
407#endif 389#endif
@@ -412,21 +394,12 @@ unsigned short SiS_HandleDDC(struct SiS_Private *SiS_Pr, unsigned int VBFlags, i
412 unsigned short adaptnum, unsigned short DDCdatatype, 394 unsigned short adaptnum, unsigned short DDCdatatype,
413 unsigned char *buffer, unsigned int VBFlags2); 395 unsigned char *buffer, unsigned int VBFlags2);
414 396
415#ifdef SIS_XORG_XF86
416unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags,
417 int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype,
418 bool checkcr32, unsigned int VBFlags2);
419unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr);
420unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype,
421 unsigned char *buffer);
422#else
423static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags, 397static unsigned short SiS_InitDDCRegs(struct SiS_Private *SiS_Pr, unsigned int VBFlags,
424 int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype, 398 int VGAEngine, unsigned short adaptnum, unsigned short DDCdatatype,
425 bool checkcr32, unsigned int VBFlags2); 399 bool checkcr32, unsigned int VBFlags2);
426static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr); 400static unsigned short SiS_ProbeDDC(struct SiS_Private *SiS_Pr);
427static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype, 401static unsigned short SiS_ReadDDC(struct SiS_Private *SiS_Pr, unsigned short DDCdatatype,
428 unsigned char *buffer); 402 unsigned char *buffer);
429#endif
430static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr); 403static void SiS_SetSwitchDDC2(struct SiS_Private *SiS_Pr);
431static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr); 404static unsigned short SiS_SetStart(struct SiS_Private *SiS_Pr);
432static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr); 405static unsigned short SiS_SetStop(struct SiS_Private *SiS_Pr);
@@ -441,13 +414,13 @@ static unsigned short SiS_PrepareDDC(struct SiS_Private *SiS_Pr);
441static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno); 414static void SiS_SendACK(struct SiS_Private *SiS_Pr, unsigned short yesno);
442static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr); 415static unsigned short SiS_DoProbeDDC(struct SiS_Private *SiS_Pr);
443 416
444#ifdef SIS300 417#ifdef CONFIG_FB_SIS_300
445static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr, 418static void SiS_OEM300Setting(struct SiS_Private *SiS_Pr,
446 unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex); 419 unsigned short ModeNo, unsigned short ModeIdIndex, unsigned short RefTabindex);
447static void SetOEMLCDData2(struct SiS_Private *SiS_Pr, 420static void SetOEMLCDData2(struct SiS_Private *SiS_Pr,
448 unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex); 421 unsigned short ModeNo, unsigned short ModeIdIndex,unsigned short RefTableIndex);
449#endif 422#endif
450#ifdef SIS315H 423#ifdef CONFIG_FB_SIS_315
451static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr, 424static void SiS_OEM310Setting(struct SiS_Private *SiS_Pr,
452 unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI); 425 unsigned short ModeNo,unsigned short ModeIdIndex, unsigned short RRTI);
453static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr, 426static void SiS_OEM661Setting(struct SiS_Private *SiS_Pr,
@@ -482,15 +455,13 @@ extern void SiS_CalcLCDACRT1Timing(struct SiS_Private *SiS_Pr, unsigned short M
482extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth); 455extern void SiS_CalcCRRegisters(struct SiS_Private *SiS_Pr, int depth);
483extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); 456extern unsigned short SiS_GetRefCRTVCLK(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide);
484extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide); 457extern unsigned short SiS_GetRefCRT1CRTC(struct SiS_Private *SiS_Pr, unsigned short Index, int UseWide);
485#ifdef SIS300 458#ifdef CONFIG_FB_SIS_300
486extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx, 459extern void SiS_GetFIFOThresholdIndex300(struct SiS_Private *SiS_Pr, unsigned short *tempbx,
487 unsigned short *tempcl); 460 unsigned short *tempcl);
488extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl); 461extern unsigned short SiS_GetFIFOThresholdB300(unsigned short tempbx, unsigned short tempcl);
489extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index); 462extern unsigned short SiS_GetLatencyFactor630(struct SiS_Private *SiS_Pr, unsigned short index);
490#ifdef SIS_LINUX_KERNEL
491extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg); 463extern unsigned int sisfb_read_nbridge_pci_dword(struct SiS_Private *SiS_Pr, int reg);
492extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg); 464extern unsigned int sisfb_read_lpc_pci_dword(struct SiS_Private *SiS_Pr, int reg);
493#endif 465#endif
494#endif
495 466
496#endif 467#endif
diff --git a/drivers/video/sis/initextlfb.c b/drivers/video/sis/initextlfb.c
index 99c04a4855d1..9dec64da4015 100644
--- a/drivers/video/sis/initextlfb.c
+++ b/drivers/video/sis/initextlfb.c
@@ -25,7 +25,6 @@
25 * Author: Thomas Winischhofer <thomas@winischhofer.net> 25 * Author: Thomas Winischhofer <thomas@winischhofer.net>
26 */ 26 */
27 27
28#include "osdef.h"
29#include "initdef.h" 28#include "initdef.h"
30#include "vgatypes.h" 29#include "vgatypes.h"
31#include "vstruct.h" 30#include "vstruct.h"
@@ -59,7 +58,7 @@ sisfb_mode_rate_to_dclock(struct SiS_Private *SiS_Pr, unsigned char modeno,
59 58
60 if(rateindex > 0) rateindex--; 59 if(rateindex > 0) rateindex--;
61 60
62#ifdef SIS315H 61#ifdef CONFIG_FB_SIS_315
63 switch(ModeNo) { 62 switch(ModeNo) {
64 case 0x5a: ModeNo = 0x50; break; 63 case 0x5a: ModeNo = 0x50; break;
65 case 0x5b: ModeNo = 0x56; 64 case 0x5b: ModeNo = 0x56;
@@ -103,7 +102,7 @@ sisfb_mode_rate_to_ddata(struct SiS_Private *SiS_Pr, unsigned char modeno,
103 102
104 if(rateindex > 0) rateindex--; 103 if(rateindex > 0) rateindex--;
105 104
106#ifdef SIS315H 105#ifdef CONFIG_FB_SIS_315
107 switch(ModeNo) { 106 switch(ModeNo) {
108 case 0x5a: ModeNo = 0x50; break; 107 case 0x5a: ModeNo = 0x50; break;
109 case 0x5b: ModeNo = 0x56; 108 case 0x5b: ModeNo = 0x56;
@@ -187,7 +186,7 @@ sisfb_gettotalfrommode(struct SiS_Private *SiS_Pr, unsigned char modeno, int *ht
187 186
188 if(rateindex > 0) rateindex--; 187 if(rateindex > 0) rateindex--;
189 188
190#ifdef SIS315H 189#ifdef CONFIG_FB_SIS_315
191 switch(ModeNo) { 190 switch(ModeNo) {
192 case 0x5a: ModeNo = 0x50; break; 191 case 0x5a: ModeNo = 0x50; break;
193 case 0x5b: ModeNo = 0x56; 192 case 0x5b: ModeNo = 0x56;
diff --git a/drivers/video/sis/osdef.h b/drivers/video/sis/osdef.h
deleted file mode 100644
index 6ff8f988a1a7..000000000000
--- a/drivers/video/sis/osdef.h
+++ /dev/null
@@ -1,133 +0,0 @@
1/* $XFree86$ */
2/* $XdotOrg$ */
3/*
4 * OS depending defines
5 *
6 * Copyright (C) 2001-2005 by Thomas Winischhofer, Vienna, Austria
7 *
8 * If distributed as part of the Linux kernel, the following license terms
9 * apply:
10 *
11 * * This program is free software; you can redistribute it and/or modify
12 * * it under the terms of the GNU General Public License as published by
13 * * the Free Software Foundation; either version 2 of the named License,
14 * * or any later version.
15 * *
16 * * This program is distributed in the hope that it will be useful,
17 * * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * * GNU General Public License for more details.
20 * *
21 * * You should have received a copy of the GNU General Public License
22 * * along with this program; if not, write to the Free Software
23 * * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA
24 *
25 * Otherwise, the following license terms apply:
26 *
27 * * Redistribution and use in source and binary forms, with or without
28 * * modification, are permitted provided that the following conditions
29 * * are met:
30 * * 1) Redistributions of source code must retain the above copyright
31 * * notice, this list of conditions and the following disclaimer.
32 * * 2) Redistributions in binary form must reproduce the above copyright
33 * * notice, this list of conditions and the following disclaimer in the
34 * * documentation and/or other materials provided with the distribution.
35 * * 3) The name of the author may not be used to endorse or promote products
36 * * derived from this software without specific prior written permission.
37 * *
38 * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
39 * * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
40 * * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
41 * * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
42 * * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
43 * * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
44 * * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
45 * * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
46 * * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
47 * * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
48 *
49 * Author: Thomas Winischhofer <thomas@winischhofer.net>
50 * Silicon Integrated Systems, Inc. (used by permission)
51 *
52 */
53
54#ifndef _SIS_OSDEF_H_
55#define _SIS_OSDEF_H_
56
57/* The choices are: */
58#define SIS_LINUX_KERNEL /* Linux kernel framebuffer */
59#undef SIS_XORG_XF86 /* XFree86/X.org */
60
61#ifdef OutPortByte
62#undef OutPortByte
63#endif
64
65#ifdef OutPortWord
66#undef OutPortWord
67#endif
68
69#ifdef OutPortLong
70#undef OutPortLong
71#endif
72
73#ifdef InPortByte
74#undef InPortByte
75#endif
76
77#ifdef InPortWord
78#undef InPortWord
79#endif
80
81#ifdef InPortLong
82#undef InPortLong
83#endif
84
85/**********************************************************************/
86/* LINUX KERNEL */
87/**********************************************************************/
88
89#ifdef SIS_LINUX_KERNEL
90
91#ifdef CONFIG_FB_SIS_300
92#define SIS300
93#endif
94
95#ifdef CONFIG_FB_SIS_315
96#define SIS315H
97#endif
98
99#if !defined(SIS300) && !defined(SIS315H)
100#warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set
101#warning sisfb will not work!
102#endif
103
104#define OutPortByte(p,v) outb((u8)(v),(SISIOADDRESS)(p))
105#define OutPortWord(p,v) outw((u16)(v),(SISIOADDRESS)(p))
106#define OutPortLong(p,v) outl((u32)(v),(SISIOADDRESS)(p))
107#define InPortByte(p) inb((SISIOADDRESS)(p))
108#define InPortWord(p) inw((SISIOADDRESS)(p))
109#define InPortLong(p) inl((SISIOADDRESS)(p))
110#define SiS_SetMemory(MemoryAddress,MemorySize,value) memset_io(MemoryAddress, value, MemorySize)
111
112#endif /* LINUX_KERNEL */
113
114/**********************************************************************/
115/* XFree86/X.org */
116/**********************************************************************/
117
118#ifdef SIS_XORG_XF86
119
120#define SIS300
121#define SIS315H
122
123#define OutPortByte(p,v) outSISREG((IOADDRESS)(p),(CARD8)(v))
124#define OutPortWord(p,v) outSISREGW((IOADDRESS)(p),(CARD16)(v))
125#define OutPortLong(p,v) outSISREGL((IOADDRESS)(p),(CARD32)(v))
126#define InPortByte(p) inSISREG((IOADDRESS)(p))
127#define InPortWord(p) inSISREGW((IOADDRESS)(p))
128#define InPortLong(p) inSISREGL((IOADDRESS)(p))
129#define SiS_SetMemory(MemoryAddress,MemorySize,value) memset(MemoryAddress, value, MemorySize)
130
131#endif /* XF86 */
132
133#endif /* _OSDEF_H_ */
diff --git a/drivers/video/sis/sis.h b/drivers/video/sis/sis.h
index 7c5710e3fb56..80d89d37c414 100644
--- a/drivers/video/sis/sis.h
+++ b/drivers/video/sis/sis.h
@@ -24,7 +24,6 @@
24#ifndef _SIS_H_ 24#ifndef _SIS_H_
25#define _SIS_H_ 25#define _SIS_H_
26 26
27#include "osdef.h"
28#include <video/sisfb.h> 27#include <video/sisfb.h>
29 28
30#include "vgatypes.h" 29#include "vgatypes.h"
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 3dde12b0ab06..7e3370f115b6 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -60,6 +60,11 @@
60#include "sis.h" 60#include "sis.h"
61#include "sis_main.h" 61#include "sis_main.h"
62 62
63#if !defined(CONFIG_FB_SIS_300) && !defined(CONFIG_FB_SIS_315)
64#warning Neither CONFIG_FB_SIS_300 nor CONFIG_FB_SIS_315 is set
65#warning sisfb will not work!
66#endif
67
63static void sisfb_handle_command(struct sis_video_info *ivideo, 68static void sisfb_handle_command(struct sis_video_info *ivideo,
64 struct sisfb_cmd *sisfb_command); 69 struct sisfb_cmd *sisfb_command);
65 70
@@ -4114,14 +4119,6 @@ sisfb_find_rom(struct pci_dev *pdev)
4114 if(sisfb_check_rom(rom_base, ivideo)) { 4119 if(sisfb_check_rom(rom_base, ivideo)) {
4115 4120
4116 if((myrombase = vmalloc(65536))) { 4121 if((myrombase = vmalloc(65536))) {
4117
4118 /* Work around bug in pci/rom.c: Folks forgot to check
4119 * whether the size retrieved from the BIOS image eventually
4120 * is larger than the mapped size
4121 */
4122 if(pci_resource_len(pdev, PCI_ROM_RESOURCE) < romsize)
4123 romsize = pci_resource_len(pdev, PCI_ROM_RESOURCE);
4124
4125 memcpy_fromio(myrombase, rom_base, 4122 memcpy_fromio(myrombase, rom_base,
4126 (romsize > 65536) ? 65536 : romsize); 4123 (romsize > 65536) ? 65536 : romsize);
4127 } 4124 }
@@ -4155,23 +4152,6 @@ sisfb_find_rom(struct pci_dev *pdev)
4155 4152
4156 } 4153 }
4157 4154
4158#else
4159
4160 pci_read_config_dword(pdev, PCI_ROM_ADDRESS, &temp);
4161 pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
4162 (ivideo->video_base & PCI_ROM_ADDRESS_MASK) | PCI_ROM_ADDRESS_ENABLE);
4163
4164 rom_base = ioremap(ivideo->video_base, 65536);
4165 if(rom_base) {
4166 if(sisfb_check_rom(rom_base, ivideo)) {
4167 if((myrombase = vmalloc(65536)))
4168 memcpy_fromio(myrombase, rom_base, 65536);
4169 }
4170 iounmap(rom_base);
4171 }
4172
4173 pci_write_config_dword(pdev, PCI_ROM_ADDRESS, temp);
4174
4175#endif 4155#endif
4176 4156
4177 return myrombase; 4157 return myrombase;
diff --git a/drivers/video/sis/vgatypes.h b/drivers/video/sis/vgatypes.h
index 81a22eaabfde..12c0dfaf2518 100644
--- a/drivers/video/sis/vgatypes.h
+++ b/drivers/video/sis/vgatypes.h
@@ -55,21 +55,10 @@
55 55
56#define SISIOMEMTYPE 56#define SISIOMEMTYPE
57 57
58#ifdef SIS_LINUX_KERNEL
59typedef unsigned long SISIOADDRESS; 58typedef unsigned long SISIOADDRESS;
60#include <linux/types.h> /* Need __iomem */ 59#include <linux/types.h> /* Need __iomem */
61#undef SISIOMEMTYPE 60#undef SISIOMEMTYPE
62#define SISIOMEMTYPE __iomem 61#define SISIOMEMTYPE __iomem
63#endif
64
65#ifdef SIS_XORG_XF86
66#if XF86_VERSION_CURRENT < XF86_VERSION_NUMERIC(4,2,0,0,0)
67typedef unsigned long IOADDRESS;
68typedef unsigned long SISIOADDRESS;
69#else
70typedef IOADDRESS SISIOADDRESS;
71#endif
72#endif
73 62
74typedef enum _SIS_CHIP_TYPE { 63typedef enum _SIS_CHIP_TYPE {
75 SIS_VGALegacy = 0, 64 SIS_VGALegacy = 0,
diff --git a/drivers/video/sis/vstruct.h b/drivers/video/sis/vstruct.h
index bef4aae388d0..ea94d214dcff 100644
--- a/drivers/video/sis/vstruct.h
+++ b/drivers/video/sis/vstruct.h
@@ -233,24 +233,15 @@ struct SiS_Private
233{ 233{
234 unsigned char ChipType; 234 unsigned char ChipType;
235 unsigned char ChipRevision; 235 unsigned char ChipRevision;
236#ifdef SIS_XORG_XF86
237 PCITAG PciTag;
238#endif
239#ifdef SIS_LINUX_KERNEL
240 void *ivideo; 236 void *ivideo;
241#endif
242 unsigned char *VirtualRomBase; 237 unsigned char *VirtualRomBase;
243 bool UseROM; 238 bool UseROM;
244#ifdef SIS_LINUX_KERNEL
245 unsigned char SISIOMEMTYPE *VideoMemoryAddress; 239 unsigned char SISIOMEMTYPE *VideoMemoryAddress;
246 unsigned int VideoMemorySize; 240 unsigned int VideoMemorySize;
247#endif
248 SISIOADDRESS IOAddress; 241 SISIOADDRESS IOAddress;
249 SISIOADDRESS IOAddress2; /* For dual chip XGI volari */ 242 SISIOADDRESS IOAddress2; /* For dual chip XGI volari */
250 243
251#ifdef SIS_LINUX_KERNEL
252 SISIOADDRESS RelIO; 244 SISIOADDRESS RelIO;
253#endif
254 SISIOADDRESS SiS_P3c4; 245 SISIOADDRESS SiS_P3c4;
255 SISIOADDRESS SiS_P3d4; 246 SISIOADDRESS SiS_P3d4;
256 SISIOADDRESS SiS_P3c0; 247 SISIOADDRESS SiS_P3c0;
@@ -280,9 +271,6 @@ struct SiS_Private
280 unsigned short SiS_IF_DEF_FSTN; 271 unsigned short SiS_IF_DEF_FSTN;
281 unsigned short SiS_SysFlags; 272 unsigned short SiS_SysFlags;
282 unsigned char SiS_VGAINFO; 273 unsigned char SiS_VGAINFO;
283#ifdef SIS_XORG_XF86
284 unsigned short SiS_CP1, SiS_CP2, SiS_CP3, SiS_CP4;
285#endif
286 bool SiS_UseROM; 274 bool SiS_UseROM;
287 bool SiS_ROMNew; 275 bool SiS_ROMNew;
288 bool SiS_XGIROM; 276 bool SiS_XGIROM;
diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
index 3a43ebf83a49..efb35aa8309a 100644
--- a/drivers/virtio/virtio.c
+++ b/drivers/virtio/virtio.c
@@ -9,19 +9,19 @@ static ssize_t device_show(struct device *_d,
9 struct device_attribute *attr, char *buf) 9 struct device_attribute *attr, char *buf)
10{ 10{
11 struct virtio_device *dev = container_of(_d,struct virtio_device,dev); 11 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
12 return sprintf(buf, "%hu", dev->id.device); 12 return sprintf(buf, "0x%04x\n", dev->id.device);
13} 13}
14static ssize_t vendor_show(struct device *_d, 14static ssize_t vendor_show(struct device *_d,
15 struct device_attribute *attr, char *buf) 15 struct device_attribute *attr, char *buf)
16{ 16{
17 struct virtio_device *dev = container_of(_d,struct virtio_device,dev); 17 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
18 return sprintf(buf, "%hu", dev->id.vendor); 18 return sprintf(buf, "0x%04x\n", dev->id.vendor);
19} 19}
20static ssize_t status_show(struct device *_d, 20static ssize_t status_show(struct device *_d,
21 struct device_attribute *attr, char *buf) 21 struct device_attribute *attr, char *buf)
22{ 22{
23 struct virtio_device *dev = container_of(_d,struct virtio_device,dev); 23 struct virtio_device *dev = container_of(_d,struct virtio_device,dev);
24 return sprintf(buf, "0x%08x", dev->config->get_status(dev)); 24 return sprintf(buf, "0x%08x\n", dev->config->get_status(dev));
25} 25}
26static ssize_t modalias_show(struct device *_d, 26static ssize_t modalias_show(struct device *_d,
27 struct device_attribute *attr, char *buf) 27 struct device_attribute *attr, char *buf)
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 1475ed6b575f..cc2f73e03475 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -230,9 +230,6 @@ add_head:
230 pr_debug("Added buffer head %i to %p\n", head, vq); 230 pr_debug("Added buffer head %i to %p\n", head, vq);
231 END_USE(vq); 231 END_USE(vq);
232 232
233 /* If we're indirect, we can fit many (assuming not OOM). */
234 if (vq->indirect)
235 return vq->num_free ? vq->vring.num : 0;
236 return vq->num_free; 233 return vq->num_free;
237} 234}
238EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp); 235EXPORT_SYMBOL_GPL(virtqueue_add_buf_gfp);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4a291045ebac..a5ad77ef4266 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -558,6 +558,9 @@ config IT8712F_WDT
558 This is the driver for the built-in watchdog timer on the IT8712F 558 This is the driver for the built-in watchdog timer on the IT8712F
559 Super I/0 chipset used on many motherboards. 559 Super I/0 chipset used on many motherboards.
560 560
561 If the driver does not work, then make sure that the game port in
562 the BIOS is enabled.
563
561 To compile this driver as a module, choose M here: the 564 To compile this driver as a module, choose M here: the
562 module will be called it8712f_wdt. 565 module will be called it8712f_wdt.
563 566
diff --git a/drivers/watchdog/bcm63xx_wdt.c b/drivers/watchdog/bcm63xx_wdt.c
index a1debc89356b..3c5045a206dd 100644
--- a/drivers/watchdog/bcm63xx_wdt.c
+++ b/drivers/watchdog/bcm63xx_wdt.c
@@ -18,7 +18,6 @@
18#include <linux/miscdevice.h> 18#include <linux/miscdevice.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/moduleparam.h> 20#include <linux/moduleparam.h>
21#include <linux/reboot.h>
22#include <linux/types.h> 21#include <linux/types.h>
23#include <linux/uaccess.h> 22#include <linux/uaccess.h>
24#include <linux/watchdog.h> 23#include <linux/watchdog.h>
@@ -220,14 +219,6 @@ static long bcm63xx_wdt_ioctl(struct file *file, unsigned int cmd,
220 } 219 }
221} 220}
222 221
223static int bcm63xx_wdt_notify_sys(struct notifier_block *this,
224 unsigned long code, void *unused)
225{
226 if (code == SYS_DOWN || code == SYS_HALT)
227 bcm63xx_wdt_pause();
228 return NOTIFY_DONE;
229}
230
231static const struct file_operations bcm63xx_wdt_fops = { 222static const struct file_operations bcm63xx_wdt_fops = {
232 .owner = THIS_MODULE, 223 .owner = THIS_MODULE,
233 .llseek = no_llseek, 224 .llseek = no_llseek,
@@ -243,12 +234,8 @@ static struct miscdevice bcm63xx_wdt_miscdev = {
243 .fops = &bcm63xx_wdt_fops, 234 .fops = &bcm63xx_wdt_fops,
244}; 235};
245 236
246static struct notifier_block bcm63xx_wdt_notifier = {
247 .notifier_call = bcm63xx_wdt_notify_sys,
248};
249 237
250 238static int __devinit bcm63xx_wdt_probe(struct platform_device *pdev)
251static int bcm63xx_wdt_probe(struct platform_device *pdev)
252{ 239{
253 int ret; 240 int ret;
254 struct resource *r; 241 struct resource *r;
@@ -280,16 +267,10 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
280 wdt_time); 267 wdt_time);
281 } 268 }
282 269
283 ret = register_reboot_notifier(&bcm63xx_wdt_notifier);
284 if (ret) {
285 dev_err(&pdev->dev, "failed to register reboot_notifier\n");
286 goto unregister_timer;
287 }
288
289 ret = misc_register(&bcm63xx_wdt_miscdev); 270 ret = misc_register(&bcm63xx_wdt_miscdev);
290 if (ret < 0) { 271 if (ret < 0) {
291 dev_err(&pdev->dev, "failed to register watchdog device\n"); 272 dev_err(&pdev->dev, "failed to register watchdog device\n");
292 goto unregister_reboot_notifier; 273 goto unregister_timer;
293 } 274 }
294 275
295 dev_info(&pdev->dev, " started, timer margin: %d sec\n", 276 dev_info(&pdev->dev, " started, timer margin: %d sec\n",
@@ -297,8 +278,6 @@ static int bcm63xx_wdt_probe(struct platform_device *pdev)
297 278
298 return 0; 279 return 0;
299 280
300unregister_reboot_notifier:
301 unregister_reboot_notifier(&bcm63xx_wdt_notifier);
302unregister_timer: 281unregister_timer:
303 bcm63xx_timer_unregister(TIMER_WDT_ID); 282 bcm63xx_timer_unregister(TIMER_WDT_ID);
304unmap: 283unmap:
@@ -306,25 +285,28 @@ unmap:
306 return ret; 285 return ret;
307} 286}
308 287
309static int bcm63xx_wdt_remove(struct platform_device *pdev) 288static int __devexit bcm63xx_wdt_remove(struct platform_device *pdev)
310{ 289{
311 if (!nowayout) 290 if (!nowayout)
312 bcm63xx_wdt_pause(); 291 bcm63xx_wdt_pause();
313 292
314 misc_deregister(&bcm63xx_wdt_miscdev); 293 misc_deregister(&bcm63xx_wdt_miscdev);
315
316 iounmap(bcm63xx_wdt_device.regs);
317
318 unregister_reboot_notifier(&bcm63xx_wdt_notifier);
319 bcm63xx_timer_unregister(TIMER_WDT_ID); 294 bcm63xx_timer_unregister(TIMER_WDT_ID);
320 295 iounmap(bcm63xx_wdt_device.regs);
321 return 0; 296 return 0;
322} 297}
323 298
299static void bcm63xx_wdt_shutdown(struct platform_device *pdev)
300{
301 bcm63xx_wdt_pause();
302}
303
324static struct platform_driver bcm63xx_wdt = { 304static struct platform_driver bcm63xx_wdt = {
325 .probe = bcm63xx_wdt_probe, 305 .probe = bcm63xx_wdt_probe,
326 .remove = bcm63xx_wdt_remove, 306 .remove = __devexit_p(bcm63xx_wdt_remove),
307 .shutdown = bcm63xx_wdt_shutdown,
327 .driver = { 308 .driver = {
309 .owner = THIS_MODULE,
328 .name = "bcm63xx-wdt", 310 .name = "bcm63xx-wdt",
329 } 311 }
330}; 312};
diff --git a/drivers/watchdog/gef_wdt.c b/drivers/watchdog/gef_wdt.c
index 9c21d19043a6..f6bd6f10fcec 100644
--- a/drivers/watchdog/gef_wdt.c
+++ b/drivers/watchdog/gef_wdt.c
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/miscdevice.h> 31#include <linux/miscdevice.h>
32#include <linux/watchdog.h> 32#include <linux/watchdog.h>
33#include <linux/fs.h>
33#include <linux/of.h> 34#include <linux/of.h>
34#include <linux/of_platform.h> 35#include <linux/of_platform.h>
35#include <linux/io.h> 36#include <linux/io.h>
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index f7e90fe47b71..b8838d2c67a6 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -32,6 +32,7 @@
32 * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH) 32 * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
33 * document number 320066-003, 320257-008: EP80597 (IICH) 33 * document number 320066-003, 320257-008: EP80597 (IICH)
34 * document number TBD : Cougar Point (CPT) 34 * document number TBD : Cougar Point (CPT)
35 * document number TBD : Patsburg (PBG)
35 */ 36 */
36 37
37/* 38/*
@@ -146,7 +147,8 @@ enum iTCO_chipsets {
146 TCO_CPT29, /* Cougar Point */ 147 TCO_CPT29, /* Cougar Point */
147 TCO_CPT30, /* Cougar Point */ 148 TCO_CPT30, /* Cougar Point */
148 TCO_CPT31, /* Cougar Point */ 149 TCO_CPT31, /* Cougar Point */
149 TCO_PBG, /* Patsburg */ 150 TCO_PBG1, /* Patsburg */
151 TCO_PBG2, /* Patsburg */
150}; 152};
151 153
152static struct { 154static struct {
@@ -235,6 +237,7 @@ static struct {
235 {"Cougar Point", 2}, 237 {"Cougar Point", 2},
236 {"Cougar Point", 2}, 238 {"Cougar Point", 2},
237 {"Patsburg", 2}, 239 {"Patsburg", 2},
240 {"Patsburg", 2},
238 {NULL, 0} 241 {NULL, 0}
239}; 242};
240 243
@@ -350,7 +353,8 @@ static struct pci_device_id iTCO_wdt_pci_tbl[] = {
350 { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)}, 353 { ITCO_PCI_DEVICE(0x1c5d, TCO_CPT29)},
351 { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)}, 354 { ITCO_PCI_DEVICE(0x1c5e, TCO_CPT30)},
352 { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)}, 355 { ITCO_PCI_DEVICE(0x1c5f, TCO_CPT31)},
353 { ITCO_PCI_DEVICE(0x1d40, TCO_PBG)}, 356 { ITCO_PCI_DEVICE(0x1d40, TCO_PBG1)},
357 { ITCO_PCI_DEVICE(0x1d41, TCO_PBG2)},
354 { 0, }, /* End of list */ 358 { 0, }, /* End of list */
355}; 359};
356MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); 360MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index eb8a78d77d9d..533a199e7a3f 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -8,9 +8,12 @@ obj-$(CONFIG_BLOCK) += biomerge.o
8obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o 8obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o 9obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
10obj-$(CONFIG_XEN_BALLOON) += balloon.o 10obj-$(CONFIG_XEN_BALLOON) += balloon.o
11obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o 11obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
12obj-$(CONFIG_XENFS) += xenfs/ 12obj-$(CONFIG_XENFS) += xenfs/
13obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o 13obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
14obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o 14obj-$(CONFIG_XEN_PLATFORM_PCI) += platform-pci.o
15obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o 15obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
16obj-$(CONFIG_XEN_DOM0) += pci.o 16obj-$(CONFIG_XEN_DOM0) += pci.o
17
18xen-evtchn-y := evtchn.o
19
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 500290b150bb..43f9f02c7db0 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -50,6 +50,7 @@
50#include <asm/pgtable.h> 50#include <asm/pgtable.h>
51#include <asm/uaccess.h> 51#include <asm/uaccess.h>
52#include <asm/tlb.h> 52#include <asm/tlb.h>
53#include <asm/e820.h>
53 54
54#include <asm/xen/hypervisor.h> 55#include <asm/xen/hypervisor.h>
55#include <asm/xen/hypercall.h> 56#include <asm/xen/hypercall.h>
@@ -119,7 +120,7 @@ static void scrub_page(struct page *page)
119} 120}
120 121
121/* balloon_append: add the given page to the balloon. */ 122/* balloon_append: add the given page to the balloon. */
122static void balloon_append(struct page *page) 123static void __balloon_append(struct page *page)
123{ 124{
124 /* Lowmem is re-populated first, so highmem pages go at list tail. */ 125 /* Lowmem is re-populated first, so highmem pages go at list tail. */
125 if (PageHighMem(page)) { 126 if (PageHighMem(page)) {
@@ -130,7 +131,11 @@ static void balloon_append(struct page *page)
130 list_add(&page->lru, &ballooned_pages); 131 list_add(&page->lru, &ballooned_pages);
131 balloon_stats.balloon_low++; 132 balloon_stats.balloon_low++;
132 } 133 }
134}
133 135
136static void balloon_append(struct page *page)
137{
138 __balloon_append(page);
134 totalram_pages--; 139 totalram_pages--;
135} 140}
136 141
@@ -191,7 +196,7 @@ static unsigned long current_target(void)
191 196
192static int increase_reservation(unsigned long nr_pages) 197static int increase_reservation(unsigned long nr_pages)
193{ 198{
194 unsigned long pfn, i, flags; 199 unsigned long pfn, i;
195 struct page *page; 200 struct page *page;
196 long rc; 201 long rc;
197 struct xen_memory_reservation reservation = { 202 struct xen_memory_reservation reservation = {
@@ -203,8 +208,6 @@ static int increase_reservation(unsigned long nr_pages)
203 if (nr_pages > ARRAY_SIZE(frame_list)) 208 if (nr_pages > ARRAY_SIZE(frame_list))
204 nr_pages = ARRAY_SIZE(frame_list); 209 nr_pages = ARRAY_SIZE(frame_list);
205 210
206 spin_lock_irqsave(&xen_reservation_lock, flags);
207
208 page = balloon_first_page(); 211 page = balloon_first_page();
209 for (i = 0; i < nr_pages; i++) { 212 for (i = 0; i < nr_pages; i++) {
210 BUG_ON(page == NULL); 213 BUG_ON(page == NULL);
@@ -247,14 +250,12 @@ static int increase_reservation(unsigned long nr_pages)
247 balloon_stats.current_pages += rc; 250 balloon_stats.current_pages += rc;
248 251
249 out: 252 out:
250 spin_unlock_irqrestore(&xen_reservation_lock, flags);
251
252 return rc < 0 ? rc : rc != nr_pages; 253 return rc < 0 ? rc : rc != nr_pages;
253} 254}
254 255
255static int decrease_reservation(unsigned long nr_pages) 256static int decrease_reservation(unsigned long nr_pages)
256{ 257{
257 unsigned long pfn, i, flags; 258 unsigned long pfn, i;
258 struct page *page; 259 struct page *page;
259 int need_sleep = 0; 260 int need_sleep = 0;
260 int ret; 261 int ret;
@@ -292,8 +293,6 @@ static int decrease_reservation(unsigned long nr_pages)
292 kmap_flush_unused(); 293 kmap_flush_unused();
293 flush_tlb_all(); 294 flush_tlb_all();
294 295
295 spin_lock_irqsave(&xen_reservation_lock, flags);
296
297 /* No more mappings: invalidate P2M and add to balloon. */ 296 /* No more mappings: invalidate P2M and add to balloon. */
298 for (i = 0; i < nr_pages; i++) { 297 for (i = 0; i < nr_pages; i++) {
299 pfn = mfn_to_pfn(frame_list[i]); 298 pfn = mfn_to_pfn(frame_list[i]);
@@ -308,8 +307,6 @@ static int decrease_reservation(unsigned long nr_pages)
308 307
309 balloon_stats.current_pages -= nr_pages; 308 balloon_stats.current_pages -= nr_pages;
310 309
311 spin_unlock_irqrestore(&xen_reservation_lock, flags);
312
313 return need_sleep; 310 return need_sleep;
314} 311}
315 312
@@ -395,7 +392,7 @@ static struct notifier_block xenstore_notifier;
395 392
396static int __init balloon_init(void) 393static int __init balloon_init(void)
397{ 394{
398 unsigned long pfn; 395 unsigned long pfn, extra_pfn_end;
399 struct page *page; 396 struct page *page;
400 397
401 if (!xen_pv_domain()) 398 if (!xen_pv_domain())
@@ -415,11 +412,24 @@ static int __init balloon_init(void)
415 412
416 register_balloon(&balloon_sysdev); 413 register_balloon(&balloon_sysdev);
417 414
418 /* Initialise the balloon with excess memory space. */ 415 /*
419 for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) { 416 * Initialise the balloon with excess memory space. We need
417 * to make sure we don't add memory which doesn't exist or
418 * logically exist. The E820 map can be trimmed to be smaller
419 * than the amount of physical memory due to the mem= command
420 * line parameter. And if this is a 32-bit non-HIGHMEM kernel
421 * on a system with memory which requires highmem to access,
422 * don't try to use it.
423 */
424 extra_pfn_end = min(min(max_pfn, e820_end_of_ram_pfn()),
425 (unsigned long)PFN_DOWN(xen_extra_mem_start + xen_extra_mem_size));
426 for (pfn = PFN_UP(xen_extra_mem_start);
427 pfn < extra_pfn_end;
428 pfn++) {
420 page = pfn_to_page(pfn); 429 page = pfn_to_page(pfn);
421 if (!PageReserved(page)) 430 /* totalram_pages doesn't include the boot-time
422 balloon_append(page); 431 balloon extension, so don't subtract from it. */
432 __balloon_append(page);
423 } 433 }
424 434
425 target_watch.callback = watch_target; 435 target_watch.callback = watch_target;
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 321a0c8346e5..31af0ac31a98 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -105,7 +105,6 @@ struct irq_info
105 105
106static struct irq_info *irq_info; 106static struct irq_info *irq_info;
107static int *pirq_to_irq; 107static int *pirq_to_irq;
108static int nr_pirqs;
109 108
110static int *evtchn_to_irq; 109static int *evtchn_to_irq;
111struct cpu_evtchn_s { 110struct cpu_evtchn_s {
@@ -278,17 +277,17 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
278 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); 277 cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
279#endif 278#endif
280 279
281 __clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); 280 clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
282 __set_bit(chn, cpu_evtchn_mask(cpu)); 281 set_bit(chn, cpu_evtchn_mask(cpu));
283 282
284 irq_info[irq].cpu = cpu; 283 irq_info[irq].cpu = cpu;
285} 284}
286 285
287static void init_evtchn_cpu_bindings(void) 286static void init_evtchn_cpu_bindings(void)
288{ 287{
288 int i;
289#ifdef CONFIG_SMP 289#ifdef CONFIG_SMP
290 struct irq_desc *desc; 290 struct irq_desc *desc;
291 int i;
292 291
293 /* By default all event channels notify CPU#0. */ 292 /* By default all event channels notify CPU#0. */
294 for_each_irq_desc(i, desc) { 293 for_each_irq_desc(i, desc) {
@@ -296,7 +295,10 @@ static void init_evtchn_cpu_bindings(void)
296 } 295 }
297#endif 296#endif
298 297
299 memset(cpu_evtchn_mask(0), ~0, sizeof(struct cpu_evtchn_s)); 298 for_each_possible_cpu(i)
299 memset(cpu_evtchn_mask(i),
300 (i == 0) ? ~0 : 0, sizeof(struct cpu_evtchn_s));
301
300} 302}
301 303
302static inline void clear_evtchn(int port) 304static inline void clear_evtchn(int port)
@@ -382,12 +384,17 @@ static int get_nr_hw_irqs(void)
382 return ret; 384 return ret;
383} 385}
384 386
385/* callers of this function should make sure that PHYSDEVOP_get_nr_pirqs 387static int find_unbound_pirq(int type)
386 * succeeded otherwise nr_pirqs won't hold the right value */
387static int find_unbound_pirq(void)
388{ 388{
389 int i; 389 int rc, i;
390 for (i = nr_pirqs-1; i >= 0; i--) { 390 struct physdev_get_free_pirq op_get_free_pirq;
391 op_get_free_pirq.type = type;
392
393 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
394 if (!rc)
395 return op_get_free_pirq.pirq;
396
397 for (i = 0; i < nr_irqs; i++) {
391 if (pirq_to_irq[i] < 0) 398 if (pirq_to_irq[i] < 0)
392 return i; 399 return i;
393 } 400 }
@@ -420,7 +427,7 @@ static int find_unbound_irq(void)
420 if (irq == start) 427 if (irq == start)
421 goto no_irqs; 428 goto no_irqs;
422 429
423 res = irq_alloc_desc_at(irq, 0); 430 res = irq_alloc_desc_at(irq, -1);
424 431
425 if (WARN_ON(res != irq)) 432 if (WARN_ON(res != irq))
426 return -1; 433 return -1;
@@ -608,10 +615,10 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
608 615
609 spin_lock(&irq_mapping_update_lock); 616 spin_lock(&irq_mapping_update_lock);
610 617
611 if ((pirq > nr_pirqs) || (gsi > nr_irqs)) { 618 if ((pirq > nr_irqs) || (gsi > nr_irqs)) {
612 printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n", 619 printk(KERN_WARNING "xen_map_pirq_gsi: %s %s is incorrect!\n",
613 pirq > nr_pirqs ? "nr_pirqs" :"", 620 pirq > nr_irqs ? "pirq" :"",
614 gsi > nr_irqs ? "nr_irqs" : ""); 621 gsi > nr_irqs ? "gsi" : "");
615 goto out; 622 goto out;
616 } 623 }
617 624
@@ -627,7 +634,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
627 if (identity_mapped_irq(gsi) || (!xen_initial_domain() && 634 if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
628 xen_pv_domain())) { 635 xen_pv_domain())) {
629 irq = gsi; 636 irq = gsi;
630 irq_alloc_desc_at(irq, 0); 637 irq_alloc_desc_at(irq, -1);
631 } else 638 } else
632 irq = find_unbound_irq(); 639 irq = find_unbound_irq();
633 640
@@ -661,17 +668,21 @@ out:
661#include <linux/msi.h> 668#include <linux/msi.h>
662#include "../pci/msi.h" 669#include "../pci/msi.h"
663 670
664void xen_allocate_pirq_msi(char *name, int *irq, int *pirq) 671void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
665{ 672{
666 spin_lock(&irq_mapping_update_lock); 673 spin_lock(&irq_mapping_update_lock);
667 674
668 *irq = find_unbound_irq(); 675 if (alloc & XEN_ALLOC_IRQ) {
669 if (*irq == -1) 676 *irq = find_unbound_irq();
670 goto out; 677 if (*irq == -1)
678 goto out;
679 }
671 680
672 *pirq = find_unbound_pirq(); 681 if (alloc & XEN_ALLOC_PIRQ) {
673 if (*pirq == -1) 682 *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI);
674 goto out; 683 if (*pirq == -1)
684 goto out;
685 }
675 686
676 set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, 687 set_irq_chip_and_handler_name(*irq, &xen_pirq_chip,
677 handle_level_irq, name); 688 handle_level_irq, name);
@@ -752,13 +763,14 @@ int xen_destroy_irq(int irq)
752 goto out; 763 goto out;
753 764
754 if (xen_initial_domain()) { 765 if (xen_initial_domain()) {
755 unmap_irq.pirq = info->u.pirq.gsi; 766 unmap_irq.pirq = info->u.pirq.pirq;
756 unmap_irq.domid = DOMID_SELF; 767 unmap_irq.domid = DOMID_SELF;
757 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq); 768 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
758 if (rc) { 769 if (rc) {
759 printk(KERN_WARNING "unmap irq failed %d\n", rc); 770 printk(KERN_WARNING "unmap irq failed %d\n", rc);
760 goto out; 771 goto out;
761 } 772 }
773 pirq_to_irq[info->u.pirq.pirq] = -1;
762 } 774 }
763 irq_info[irq] = mk_unbound_info(); 775 irq_info[irq] = mk_unbound_info();
764 776
@@ -779,6 +791,11 @@ int xen_gsi_from_irq(unsigned irq)
779 return gsi_from_irq(irq); 791 return gsi_from_irq(irq);
780} 792}
781 793
794int xen_irq_from_pirq(unsigned pirq)
795{
796 return pirq_to_irq[pirq];
797}
798
782int bind_evtchn_to_irq(unsigned int evtchn) 799int bind_evtchn_to_irq(unsigned int evtchn)
783{ 800{
784 int irq; 801 int irq;
@@ -1276,6 +1293,42 @@ static int retrigger_dynirq(unsigned int irq)
1276 return ret; 1293 return ret;
1277} 1294}
1278 1295
1296static void restore_cpu_pirqs(void)
1297{
1298 int pirq, rc, irq, gsi;
1299 struct physdev_map_pirq map_irq;
1300
1301 for (pirq = 0; pirq < nr_irqs; pirq++) {
1302 irq = pirq_to_irq[pirq];
1303 if (irq == -1)
1304 continue;
1305
1306 /* save/restore of PT devices doesn't work, so at this point the
1307 * only devices present are GSI based emulated devices */
1308 gsi = gsi_from_irq(irq);
1309 if (!gsi)
1310 continue;
1311
1312 map_irq.domid = DOMID_SELF;
1313 map_irq.type = MAP_PIRQ_TYPE_GSI;
1314 map_irq.index = gsi;
1315 map_irq.pirq = pirq;
1316
1317 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1318 if (rc) {
1319 printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1320 gsi, irq, pirq, rc);
1321 irq_info[irq] = mk_unbound_info();
1322 pirq_to_irq[pirq] = -1;
1323 continue;
1324 }
1325
1326 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1327
1328 startup_pirq(irq);
1329 }
1330}
1331
1279static void restore_cpu_virqs(unsigned int cpu) 1332static void restore_cpu_virqs(unsigned int cpu)
1280{ 1333{
1281 struct evtchn_bind_virq bind_virq; 1334 struct evtchn_bind_virq bind_virq;
@@ -1419,6 +1472,8 @@ void xen_irq_resume(void)
1419 1472
1420 unmask_evtchn(evtchn); 1473 unmask_evtchn(evtchn);
1421 } 1474 }
1475
1476 restore_cpu_pirqs();
1422} 1477}
1423 1478
1424static struct irq_chip xen_dynamic_chip __read_mostly = { 1479static struct irq_chip xen_dynamic_chip __read_mostly = {
@@ -1503,26 +1558,17 @@ void xen_callback_vector(void) {}
1503 1558
1504void __init xen_init_IRQ(void) 1559void __init xen_init_IRQ(void)
1505{ 1560{
1506 int i, rc; 1561 int i;
1507 struct physdev_nr_pirqs op_nr_pirqs;
1508 1562
1509 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s), 1563 cpu_evtchn_mask_p = kcalloc(nr_cpu_ids, sizeof(struct cpu_evtchn_s),
1510 GFP_KERNEL); 1564 GFP_KERNEL);
1511 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL); 1565 irq_info = kcalloc(nr_irqs, sizeof(*irq_info), GFP_KERNEL);
1512 1566
1513 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_nr_pirqs, &op_nr_pirqs); 1567 /* We are using nr_irqs as the maximum number of pirq available but
1514 if (rc < 0) { 1568 * that number is actually chosen by Xen and we don't know exactly
1515 nr_pirqs = nr_irqs; 1569 * what it is. Be careful choosing high pirq numbers. */
1516 if (rc != -ENOSYS) 1570 pirq_to_irq = kcalloc(nr_irqs, sizeof(*pirq_to_irq), GFP_KERNEL);
1517 printk(KERN_WARNING "PHYSDEVOP_get_nr_pirqs returned rc=%d\n", rc); 1571 for (i = 0; i < nr_irqs; i++)
1518 } else {
1519 if (xen_pv_domain() && !xen_initial_domain())
1520 nr_pirqs = max((int)op_nr_pirqs.nr_pirqs, nr_irqs);
1521 else
1522 nr_pirqs = op_nr_pirqs.nr_pirqs;
1523 }
1524 pirq_to_irq = kcalloc(nr_pirqs, sizeof(*pirq_to_irq), GFP_KERNEL);
1525 for (i = 0; i < nr_pirqs; i++)
1526 pirq_to_irq[i] = -1; 1572 pirq_to_irq[i] = -1;
1527 1573
1528 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq), 1574 evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index fec6ba3c08a8..ef11daf0cafe 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -69,20 +69,51 @@ struct per_user_data {
69 const char *name; 69 const char *name;
70}; 70};
71 71
72/* Who's bound to each port? */ 72/*
73static struct per_user_data *port_user[NR_EVENT_CHANNELS]; 73 * Who's bound to each port? This is logically an array of struct
74 * per_user_data *, but we encode the current enabled-state in bit 0.
75 */
76static unsigned long *port_user;
74static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */ 77static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */
75 78
76irqreturn_t evtchn_interrupt(int irq, void *data) 79static inline struct per_user_data *get_port_user(unsigned port)
80{
81 return (struct per_user_data *)(port_user[port] & ~1);
82}
83
84static inline void set_port_user(unsigned port, struct per_user_data *u)
85{
86 port_user[port] = (unsigned long)u;
87}
88
89static inline bool get_port_enabled(unsigned port)
90{
91 return port_user[port] & 1;
92}
93
94static inline void set_port_enabled(unsigned port, bool enabled)
95{
96 if (enabled)
97 port_user[port] |= 1;
98 else
99 port_user[port] &= ~1;
100}
101
102static irqreturn_t evtchn_interrupt(int irq, void *data)
77{ 103{
78 unsigned int port = (unsigned long)data; 104 unsigned int port = (unsigned long)data;
79 struct per_user_data *u; 105 struct per_user_data *u;
80 106
81 spin_lock(&port_user_lock); 107 spin_lock(&port_user_lock);
82 108
83 u = port_user[port]; 109 u = get_port_user(port);
110
111 WARN(!get_port_enabled(port),
112 "Interrupt for port %d, but apparently not enabled; per-user %p\n",
113 port, u);
84 114
85 disable_irq_nosync(irq); 115 disable_irq_nosync(irq);
116 set_port_enabled(port, false);
86 117
87 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) { 118 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
88 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port; 119 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
@@ -92,9 +123,8 @@ irqreturn_t evtchn_interrupt(int irq, void *data)
92 kill_fasync(&u->evtchn_async_queue, 123 kill_fasync(&u->evtchn_async_queue,
93 SIGIO, POLL_IN); 124 SIGIO, POLL_IN);
94 } 125 }
95 } else { 126 } else
96 u->ring_overflow = 1; 127 u->ring_overflow = 1;
97 }
98 128
99 spin_unlock(&port_user_lock); 129 spin_unlock(&port_user_lock);
100 130
@@ -198,9 +228,18 @@ static ssize_t evtchn_write(struct file *file, const char __user *buf,
198 goto out; 228 goto out;
199 229
200 spin_lock_irq(&port_user_lock); 230 spin_lock_irq(&port_user_lock);
201 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) 231
202 if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u)) 232 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
203 enable_irq(irq_from_evtchn(kbuf[i])); 233 unsigned port = kbuf[i];
234
235 if (port < NR_EVENT_CHANNELS &&
236 get_port_user(port) == u &&
237 !get_port_enabled(port)) {
238 set_port_enabled(port, true);
239 enable_irq(irq_from_evtchn(port));
240 }
241 }
242
204 spin_unlock_irq(&port_user_lock); 243 spin_unlock_irq(&port_user_lock);
205 244
206 rc = count; 245 rc = count;
@@ -222,8 +261,9 @@ static int evtchn_bind_to_user(struct per_user_data *u, int port)
222 * interrupt handler yet, and our caller has already 261 * interrupt handler yet, and our caller has already
223 * serialized bind operations.) 262 * serialized bind operations.)
224 */ 263 */
225 BUG_ON(port_user[port] != NULL); 264 BUG_ON(get_port_user(port) != NULL);
226 port_user[port] = u; 265 set_port_user(port, u);
266 set_port_enabled(port, true); /* start enabled */
227 267
228 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED, 268 rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
229 u->name, (void *)(unsigned long)port); 269 u->name, (void *)(unsigned long)port);
@@ -239,10 +279,7 @@ static void evtchn_unbind_from_user(struct per_user_data *u, int port)
239 279
240 unbind_from_irqhandler(irq, (void *)(unsigned long)port); 280 unbind_from_irqhandler(irq, (void *)(unsigned long)port);
241 281
242 /* make sure we unbind the irq handler before clearing the port */ 282 set_port_user(port, NULL);
243 barrier();
244
245 port_user[port] = NULL;
246} 283}
247 284
248static long evtchn_ioctl(struct file *file, 285static long evtchn_ioctl(struct file *file,
@@ -333,15 +370,17 @@ static long evtchn_ioctl(struct file *file,
333 spin_lock_irq(&port_user_lock); 370 spin_lock_irq(&port_user_lock);
334 371
335 rc = -ENOTCONN; 372 rc = -ENOTCONN;
336 if (port_user[unbind.port] != u) { 373 if (get_port_user(unbind.port) != u) {
337 spin_unlock_irq(&port_user_lock); 374 spin_unlock_irq(&port_user_lock);
338 break; 375 break;
339 } 376 }
340 377
341 evtchn_unbind_from_user(u, unbind.port); 378 disable_irq(irq_from_evtchn(unbind.port));
342 379
343 spin_unlock_irq(&port_user_lock); 380 spin_unlock_irq(&port_user_lock);
344 381
382 evtchn_unbind_from_user(u, unbind.port);
383
345 rc = 0; 384 rc = 0;
346 break; 385 break;
347 } 386 }
@@ -355,7 +394,7 @@ static long evtchn_ioctl(struct file *file,
355 394
356 if (notify.port >= NR_EVENT_CHANNELS) { 395 if (notify.port >= NR_EVENT_CHANNELS) {
357 rc = -EINVAL; 396 rc = -EINVAL;
358 } else if (port_user[notify.port] != u) { 397 } else if (get_port_user(notify.port) != u) {
359 rc = -ENOTCONN; 398 rc = -ENOTCONN;
360 } else { 399 } else {
361 notify_remote_via_evtchn(notify.port); 400 notify_remote_via_evtchn(notify.port);
@@ -431,7 +470,7 @@ static int evtchn_open(struct inode *inode, struct file *filp)
431 470
432 filp->private_data = u; 471 filp->private_data = u;
433 472
434 return 0; 473 return nonseekable_open(inode, filp);;
435} 474}
436 475
437static int evtchn_release(struct inode *inode, struct file *filp) 476static int evtchn_release(struct inode *inode, struct file *filp)
@@ -444,14 +483,21 @@ static int evtchn_release(struct inode *inode, struct file *filp)
444 free_page((unsigned long)u->ring); 483 free_page((unsigned long)u->ring);
445 484
446 for (i = 0; i < NR_EVENT_CHANNELS; i++) { 485 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
447 if (port_user[i] != u) 486 if (get_port_user(i) != u)
448 continue; 487 continue;
449 488
450 evtchn_unbind_from_user(port_user[i], i); 489 disable_irq(irq_from_evtchn(i));
451 } 490 }
452 491
453 spin_unlock_irq(&port_user_lock); 492 spin_unlock_irq(&port_user_lock);
454 493
494 for (i = 0; i < NR_EVENT_CHANNELS; i++) {
495 if (get_port_user(i) != u)
496 continue;
497
498 evtchn_unbind_from_user(get_port_user(i), i);
499 }
500
455 kfree(u->name); 501 kfree(u->name);
456 kfree(u); 502 kfree(u);
457 503
@@ -467,12 +513,12 @@ static const struct file_operations evtchn_fops = {
467 .fasync = evtchn_fasync, 513 .fasync = evtchn_fasync,
468 .open = evtchn_open, 514 .open = evtchn_open,
469 .release = evtchn_release, 515 .release = evtchn_release,
470 .llseek = noop_llseek, 516 .llseek = no_llseek,
471}; 517};
472 518
473static struct miscdevice evtchn_miscdev = { 519static struct miscdevice evtchn_miscdev = {
474 .minor = MISC_DYNAMIC_MINOR, 520 .minor = MISC_DYNAMIC_MINOR,
475 .name = "evtchn", 521 .name = "xen/evtchn",
476 .fops = &evtchn_fops, 522 .fops = &evtchn_fops,
477}; 523};
478static int __init evtchn_init(void) 524static int __init evtchn_init(void)
@@ -482,8 +528,11 @@ static int __init evtchn_init(void)
482 if (!xen_domain()) 528 if (!xen_domain())
483 return -ENODEV; 529 return -ENODEV;
484 530
531 port_user = kcalloc(NR_EVENT_CHANNELS, sizeof(*port_user), GFP_KERNEL);
532 if (port_user == NULL)
533 return -ENOMEM;
534
485 spin_lock_init(&port_user_lock); 535 spin_lock_init(&port_user_lock);
486 memset(port_user, 0, sizeof(port_user));
487 536
488 /* Create '/dev/misc/evtchn'. */ 537 /* Create '/dev/misc/evtchn'. */
489 err = misc_register(&evtchn_miscdev); 538 err = misc_register(&evtchn_miscdev);
@@ -499,6 +548,9 @@ static int __init evtchn_init(void)
499 548
500static void __exit evtchn_cleanup(void) 549static void __exit evtchn_cleanup(void)
501{ 550{
551 kfree(port_user);
552 port_user = NULL;
553
502 misc_deregister(&evtchn_miscdev); 554 misc_deregister(&evtchn_miscdev);
503} 555}
504 556
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index ef9c7db52077..db8c4c4ac880 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -49,6 +49,7 @@ static int xen_hvm_suspend(void *data)
49 49
50 if (!*cancelled) { 50 if (!*cancelled) {
51 xen_irq_resume(); 51 xen_irq_resume();
52 xen_console_resume();
52 xen_timer_resume(); 53 xen_timer_resume();
53 } 54 }
54 55
diff --git a/drivers/xen/xenfs/privcmd.c b/drivers/xen/xenfs/privcmd.c
index 0f5d4162b22d..dbd3b16fd131 100644
--- a/drivers/xen/xenfs/privcmd.c
+++ b/drivers/xen/xenfs/privcmd.c
@@ -265,9 +265,7 @@ static int mmap_return_errors(void *data, void *state)
265 xen_pfn_t *mfnp = data; 265 xen_pfn_t *mfnp = data;
266 struct mmap_batch_state *st = state; 266 struct mmap_batch_state *st = state;
267 267
268 put_user(*mfnp, st->user++); 268 return put_user(*mfnp, st->user++);
269
270 return 0;
271} 269}
272 270
273static struct vm_operations_struct privcmd_vm_ops; 271static struct vm_operations_struct privcmd_vm_ops;
@@ -322,10 +320,8 @@ static long privcmd_ioctl_mmap_batch(void __user *udata)
322 up_write(&mm->mmap_sem); 320 up_write(&mm->mmap_sem);
323 321
324 if (state.err > 0) { 322 if (state.err > 0) {
325 ret = 0;
326
327 state.user = m.arr; 323 state.user = m.arr;
328 traverse_pages(m.num, sizeof(xen_pfn_t), 324 ret = traverse_pages(m.num, sizeof(xen_pfn_t),
329 &pagelist, 325 &pagelist,
330 mmap_return_errors, &state); 326 mmap_return_errors, &state);
331 } 327 }
@@ -383,8 +379,9 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma)
383 if (xen_feature(XENFEAT_auto_translated_physmap)) 379 if (xen_feature(XENFEAT_auto_translated_physmap))
384 return -ENOSYS; 380 return -ENOSYS;
385 381
386 /* DONTCOPY is essential for Xen as copy_page_range is broken. */ 382 /* DONTCOPY is essential for Xen because copy_page_range doesn't know
387 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY; 383 * how to recreate these mappings */
384 vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
388 vma->vm_ops = &privcmd_vm_ops; 385 vma->vm_ops = &privcmd_vm_ops;
389 vma->vm_private_data = NULL; 386 vma->vm_private_data = NULL;
390 387
diff --git a/drivers/xen/xenfs/super.c b/drivers/xen/xenfs/super.c
index f6339d11d59c..1aa389719846 100644
--- a/drivers/xen/xenfs/super.c
+++ b/drivers/xen/xenfs/super.c
@@ -12,8 +12,6 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/magic.h> 14#include <linux/magic.h>
15#include <linux/mm.h>
16#include <linux/backing-dev.h>
17 15
18#include <xen/xen.h> 16#include <xen/xen.h>
19 17
@@ -24,28 +22,12 @@
24MODULE_DESCRIPTION("Xen filesystem"); 22MODULE_DESCRIPTION("Xen filesystem");
25MODULE_LICENSE("GPL"); 23MODULE_LICENSE("GPL");
26 24
27static int xenfs_set_page_dirty(struct page *page)
28{
29 return !TestSetPageDirty(page);
30}
31
32static const struct address_space_operations xenfs_aops = {
33 .set_page_dirty = xenfs_set_page_dirty,
34};
35
36static struct backing_dev_info xenfs_backing_dev_info = {
37 .ra_pages = 0, /* No readahead */
38 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK,
39};
40
41static struct inode *xenfs_make_inode(struct super_block *sb, int mode) 25static struct inode *xenfs_make_inode(struct super_block *sb, int mode)
42{ 26{
43 struct inode *ret = new_inode(sb); 27 struct inode *ret = new_inode(sb);
44 28
45 if (ret) { 29 if (ret) {
46 ret->i_mode = mode; 30 ret->i_mode = mode;
47 ret->i_mapping->a_ops = &xenfs_aops;
48 ret->i_mapping->backing_dev_info = &xenfs_backing_dev_info;
49 ret->i_uid = ret->i_gid = 0; 31 ret->i_uid = ret->i_gid = 0;
50 ret->i_blocks = 0; 32 ret->i_blocks = 0;
51 ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; 33 ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME;
@@ -121,9 +103,9 @@ static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
121 return rc; 103 return rc;
122} 104}
123 105
124static int xenfs_mount(struct file_system_type *fs_type, 106static struct dentry *xenfs_mount(struct file_system_type *fs_type,
125 int flags, const char *dev_name, 107 int flags, const char *dev_name,
126 void *data) 108 void *data)
127{ 109{
128 return mount_single(fs_type, flags, data, xenfs_fill_super); 110 return mount_single(fs_type, flags, data, xenfs_fill_super);
129} 111}
@@ -137,25 +119,11 @@ static struct file_system_type xenfs_type = {
137 119
138static int __init xenfs_init(void) 120static int __init xenfs_init(void)
139{ 121{
140 int err; 122 if (xen_domain())
141 if (!xen_domain()) { 123 return register_filesystem(&xenfs_type);
142 printk(KERN_INFO "xenfs: not registering filesystem on non-xen platform\n");
143 return 0;
144 }
145
146 err = register_filesystem(&xenfs_type);
147 if (err) {
148 printk(KERN_ERR "xenfs: Unable to register filesystem!\n");
149 goto out;
150 }
151
152 err = bdi_init(&xenfs_backing_dev_info);
153 if (err)
154 unregister_filesystem(&xenfs_type);
155
156 out:
157 124
158 return err; 125 printk(KERN_INFO "XENFS: not registering filesystem on non-xen platform\n");
126 return 0;
159} 127}
160 128
161static void __exit xenfs_exit(void) 129static void __exit xenfs_exit(void)
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index d5c1401f0031..d34896cfb19f 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -980,19 +980,11 @@ static int autofs4_root_ioctl_unlocked(struct inode *inode, struct file *filp,
980 } 980 }
981} 981}
982 982
983static DEFINE_MUTEX(autofs4_ioctl_mutex);
984
985static long autofs4_root_ioctl(struct file *filp, 983static long autofs4_root_ioctl(struct file *filp,
986 unsigned int cmd, unsigned long arg) 984 unsigned int cmd, unsigned long arg)
987{ 985{
988 long ret;
989 struct inode *inode = filp->f_dentry->d_inode; 986 struct inode *inode = filp->f_dentry->d_inode;
990 987 return autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
991 mutex_lock(&autofs4_ioctl_mutex);
992 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
993 mutex_unlock(&autofs4_ioctl_mutex);
994
995 return ret;
996} 988}
997 989
998#ifdef CONFIG_COMPAT 990#ifdef CONFIG_COMPAT
@@ -1002,13 +994,11 @@ static long autofs4_root_compat_ioctl(struct file *filp,
1002 struct inode *inode = filp->f_path.dentry->d_inode; 994 struct inode *inode = filp->f_path.dentry->d_inode;
1003 int ret; 995 int ret;
1004 996
1005 mutex_lock(&autofs4_ioctl_mutex);
1006 if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL) 997 if (cmd == AUTOFS_IOC_READY || cmd == AUTOFS_IOC_FAIL)
1007 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg); 998 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, arg);
1008 else 999 else
1009 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd, 1000 ret = autofs4_root_ioctl_unlocked(inode, filp, cmd,
1010 (unsigned long)compat_ptr(arg)); 1001 (unsigned long)compat_ptr(arg));
1011 mutex_unlock(&autofs4_ioctl_mutex);
1012 1002
1013 return ret; 1003 return ret;
1014} 1004}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 7845d1f7d1d9..b50bc4bd5c56 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -91,23 +91,10 @@ static inline int compressed_bio_size(struct btrfs_root *root,
91static struct bio *compressed_bio_alloc(struct block_device *bdev, 91static struct bio *compressed_bio_alloc(struct block_device *bdev,
92 u64 first_byte, gfp_t gfp_flags) 92 u64 first_byte, gfp_t gfp_flags)
93{ 93{
94 struct bio *bio;
95 int nr_vecs; 94 int nr_vecs;
96 95
97 nr_vecs = bio_get_nr_vecs(bdev); 96 nr_vecs = bio_get_nr_vecs(bdev);
98 bio = bio_alloc(gfp_flags, nr_vecs); 97 return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
99
100 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
101 while (!bio && (nr_vecs /= 2))
102 bio = bio_alloc(gfp_flags, nr_vecs);
103 }
104
105 if (bio) {
106 bio->bi_size = 0;
107 bio->bi_bdev = bdev;
108 bio->bi_sector = first_byte >> 9;
109 }
110 return bio;
111} 98}
112 99
113static int check_compressed_csum(struct inode *inode, 100static int check_compressed_csum(struct inode *inode,
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8db9234f6b41..af52f6d7a4d8 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -808,9 +808,9 @@ struct btrfs_block_group_cache {
808 int extents_thresh; 808 int extents_thresh;
809 int free_extents; 809 int free_extents;
810 int total_bitmaps; 810 int total_bitmaps;
811 int ro:1; 811 unsigned int ro:1;
812 int dirty:1; 812 unsigned int dirty:1;
813 int iref:1; 813 unsigned int iref:1;
814 814
815 int disk_cache_state; 815 int disk_cache_state;
816 816
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index fb827d0d7181..c547cca26a26 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -28,6 +28,7 @@
28#include <linux/freezer.h> 28#include <linux/freezer.h>
29#include <linux/crc32c.h> 29#include <linux/crc32c.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/migrate.h>
31#include "compat.h" 32#include "compat.h"
32#include "ctree.h" 33#include "ctree.h"
33#include "disk-io.h" 34#include "disk-io.h"
@@ -355,6 +356,8 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
355 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE, 356 ret = btree_read_extent_buffer_pages(root, eb, start + PAGE_CACHE_SIZE,
356 btrfs_header_generation(eb)); 357 btrfs_header_generation(eb));
357 BUG_ON(ret); 358 BUG_ON(ret);
359 WARN_ON(!btrfs_header_flag(eb, BTRFS_HEADER_FLAG_WRITTEN));
360
358 found_start = btrfs_header_bytenr(eb); 361 found_start = btrfs_header_bytenr(eb);
359 if (found_start != start) { 362 if (found_start != start) {
360 WARN_ON(1); 363 WARN_ON(1);
@@ -693,6 +696,29 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
693 __btree_submit_bio_done); 696 __btree_submit_bio_done);
694} 697}
695 698
699static int btree_migratepage(struct address_space *mapping,
700 struct page *newpage, struct page *page)
701{
702 /*
703 * we can't safely write a btree page from here,
704 * we haven't done the locking hook
705 */
706 if (PageDirty(page))
707 return -EAGAIN;
708 /*
709 * Buffers may be managed in a filesystem specific way.
710 * We must have no buffers or drop them.
711 */
712 if (page_has_private(page) &&
713 !try_to_release_page(page, GFP_KERNEL))
714 return -EAGAIN;
715#ifdef CONFIG_MIGRATION
716 return migrate_page(mapping, newpage, page);
717#else
718 return -ENOSYS;
719#endif
720}
721
696static int btree_writepage(struct page *page, struct writeback_control *wbc) 722static int btree_writepage(struct page *page, struct writeback_control *wbc)
697{ 723{
698 struct extent_io_tree *tree; 724 struct extent_io_tree *tree;
@@ -707,8 +733,7 @@ static int btree_writepage(struct page *page, struct writeback_control *wbc)
707 } 733 }
708 734
709 redirty_page_for_writepage(wbc, page); 735 redirty_page_for_writepage(wbc, page);
710 eb = btrfs_find_tree_block(root, page_offset(page), 736 eb = btrfs_find_tree_block(root, page_offset(page), PAGE_CACHE_SIZE);
711 PAGE_CACHE_SIZE);
712 WARN_ON(!eb); 737 WARN_ON(!eb);
713 738
714 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags); 739 was_dirty = test_and_set_bit(EXTENT_BUFFER_DIRTY, &eb->bflags);
@@ -799,6 +824,9 @@ static const struct address_space_operations btree_aops = {
799 .releasepage = btree_releasepage, 824 .releasepage = btree_releasepage,
800 .invalidatepage = btree_invalidatepage, 825 .invalidatepage = btree_invalidatepage,
801 .sync_page = block_sync_page, 826 .sync_page = block_sync_page,
827#ifdef CONFIG_MIGRATION
828 .migratepage = btree_migratepage,
829#endif
802}; 830};
803 831
804int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize, 832int readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize,
@@ -1538,10 +1566,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1538 GFP_NOFS); 1566 GFP_NOFS);
1539 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), 1567 struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root),
1540 GFP_NOFS); 1568 GFP_NOFS);
1541 struct btrfs_root *tree_root = kzalloc(sizeof(struct btrfs_root), 1569 struct btrfs_root *tree_root = btrfs_sb(sb);
1542 GFP_NOFS); 1570 struct btrfs_fs_info *fs_info = tree_root->fs_info;
1543 struct btrfs_fs_info *fs_info = kzalloc(sizeof(*fs_info),
1544 GFP_NOFS);
1545 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), 1571 struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root),
1546 GFP_NOFS); 1572 GFP_NOFS);
1547 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), 1573 struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root),
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index 951ef09b82f4..6f0444473594 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -232,9 +232,85 @@ fail:
232 return ERR_PTR(ret); 232 return ERR_PTR(ret);
233} 233}
234 234
235static int btrfs_get_name(struct dentry *parent, char *name,
236 struct dentry *child)
237{
238 struct inode *inode = child->d_inode;
239 struct inode *dir = parent->d_inode;
240 struct btrfs_path *path;
241 struct btrfs_root *root = BTRFS_I(dir)->root;
242 struct btrfs_inode_ref *iref;
243 struct btrfs_root_ref *rref;
244 struct extent_buffer *leaf;
245 unsigned long name_ptr;
246 struct btrfs_key key;
247 int name_len;
248 int ret;
249
250 if (!dir || !inode)
251 return -EINVAL;
252
253 if (!S_ISDIR(dir->i_mode))
254 return -EINVAL;
255
256 path = btrfs_alloc_path();
257 if (!path)
258 return -ENOMEM;
259 path->leave_spinning = 1;
260
261 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
262 key.objectid = BTRFS_I(inode)->root->root_key.objectid;
263 key.type = BTRFS_ROOT_BACKREF_KEY;
264 key.offset = (u64)-1;
265 root = root->fs_info->tree_root;
266 } else {
267 key.objectid = inode->i_ino;
268 key.offset = dir->i_ino;
269 key.type = BTRFS_INODE_REF_KEY;
270 }
271
272 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
273 if (ret < 0) {
274 btrfs_free_path(path);
275 return ret;
276 } else if (ret > 0) {
277 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
278 path->slots[0]--;
279 } else {
280 btrfs_free_path(path);
281 return -ENOENT;
282 }
283 }
284 leaf = path->nodes[0];
285
286 if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) {
287 rref = btrfs_item_ptr(leaf, path->slots[0],
288 struct btrfs_root_ref);
289 name_ptr = (unsigned long)(rref + 1);
290 name_len = btrfs_root_ref_name_len(leaf, rref);
291 } else {
292 iref = btrfs_item_ptr(leaf, path->slots[0],
293 struct btrfs_inode_ref);
294 name_ptr = (unsigned long)(iref + 1);
295 name_len = btrfs_inode_ref_name_len(leaf, iref);
296 }
297
298 read_extent_buffer(leaf, name, name_ptr, name_len);
299 btrfs_free_path(path);
300
301 /*
302 * have to add the null termination to make sure that reconnect_path
303 * gets the right len for strlen
304 */
305 name[name_len] = '\0';
306
307 return 0;
308}
309
235const struct export_operations btrfs_export_ops = { 310const struct export_operations btrfs_export_ops = {
236 .encode_fh = btrfs_encode_fh, 311 .encode_fh = btrfs_encode_fh,
237 .fh_to_dentry = btrfs_fh_to_dentry, 312 .fh_to_dentry = btrfs_fh_to_dentry,
238 .fh_to_parent = btrfs_fh_to_parent, 313 .fh_to_parent = btrfs_fh_to_parent,
239 .get_parent = btrfs_get_parent, 314 .get_parent = btrfs_get_parent,
315 .get_name = btrfs_get_name,
240}; 316};
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0c097f3aec41..bcd59c7dfb57 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3412,7 +3412,7 @@ again:
3412 * our reservation. 3412 * our reservation.
3413 */ 3413 */
3414 if (unused <= space_info->total_bytes) { 3414 if (unused <= space_info->total_bytes) {
3415 unused -= space_info->total_bytes; 3415 unused = space_info->total_bytes - unused;
3416 if (unused >= num_bytes) { 3416 if (unused >= num_bytes) {
3417 if (!reserved) 3417 if (!reserved)
3418 space_info->bytes_reserved += orig_bytes; 3418 space_info->bytes_reserved += orig_bytes;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index eac10e3260a9..3e86b9f36507 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1828,9 +1828,9 @@ static void end_bio_extent_preparewrite(struct bio *bio, int err)
1828 bio_put(bio); 1828 bio_put(bio);
1829} 1829}
1830 1830
1831static struct bio * 1831struct bio *
1832extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 1832btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1833 gfp_t gfp_flags) 1833 gfp_t gfp_flags)
1834{ 1834{
1835 struct bio *bio; 1835 struct bio *bio;
1836 1836
@@ -1919,7 +1919,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
1919 else 1919 else
1920 nr = bio_get_nr_vecs(bdev); 1920 nr = bio_get_nr_vecs(bdev);
1921 1921
1922 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH); 1922 bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1923 1923
1924 bio_add_page(bio, page, page_size, offset); 1924 bio_add_page(bio, page, page_size, offset);
1925 bio->bi_end_io = end_io_func; 1925 bio->bi_end_io = end_io_func;
@@ -2901,21 +2901,53 @@ out:
2901int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2901int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2902 __u64 start, __u64 len, get_extent_t *get_extent) 2902 __u64 start, __u64 len, get_extent_t *get_extent)
2903{ 2903{
2904 int ret; 2904 int ret = 0;
2905 u64 off = start; 2905 u64 off = start;
2906 u64 max = start + len; 2906 u64 max = start + len;
2907 u32 flags = 0; 2907 u32 flags = 0;
2908 u32 found_type;
2909 u64 last;
2908 u64 disko = 0; 2910 u64 disko = 0;
2911 struct btrfs_key found_key;
2909 struct extent_map *em = NULL; 2912 struct extent_map *em = NULL;
2910 struct extent_state *cached_state = NULL; 2913 struct extent_state *cached_state = NULL;
2914 struct btrfs_path *path;
2915 struct btrfs_file_extent_item *item;
2911 int end = 0; 2916 int end = 0;
2912 u64 em_start = 0, em_len = 0; 2917 u64 em_start = 0, em_len = 0;
2913 unsigned long emflags; 2918 unsigned long emflags;
2914 ret = 0; 2919 int hole = 0;
2915 2920
2916 if (len == 0) 2921 if (len == 0)
2917 return -EINVAL; 2922 return -EINVAL;
2918 2923
2924 path = btrfs_alloc_path();
2925 if (!path)
2926 return -ENOMEM;
2927 path->leave_spinning = 1;
2928
2929 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2930 path, inode->i_ino, -1, 0);
2931 if (ret < 0) {
2932 btrfs_free_path(path);
2933 return ret;
2934 }
2935 WARN_ON(!ret);
2936 path->slots[0]--;
2937 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
2938 struct btrfs_file_extent_item);
2939 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2940 found_type = btrfs_key_type(&found_key);
2941
2942 /* No extents, just return */
2943 if (found_key.objectid != inode->i_ino ||
2944 found_type != BTRFS_EXTENT_DATA_KEY) {
2945 btrfs_free_path(path);
2946 return 0;
2947 }
2948 last = found_key.offset;
2949 btrfs_free_path(path);
2950
2919 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 2951 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2920 &cached_state, GFP_NOFS); 2952 &cached_state, GFP_NOFS);
2921 em = get_extent(inode, NULL, 0, off, max - off, 0); 2953 em = get_extent(inode, NULL, 0, off, max - off, 0);
@@ -2925,11 +2957,18 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2925 ret = PTR_ERR(em); 2957 ret = PTR_ERR(em);
2926 goto out; 2958 goto out;
2927 } 2959 }
2960
2928 while (!end) { 2961 while (!end) {
2962 hole = 0;
2929 off = em->start + em->len; 2963 off = em->start + em->len;
2930 if (off >= max) 2964 if (off >= max)
2931 end = 1; 2965 end = 1;
2932 2966
2967 if (em->block_start == EXTENT_MAP_HOLE) {
2968 hole = 1;
2969 goto next;
2970 }
2971
2933 em_start = em->start; 2972 em_start = em->start;
2934 em_len = em->len; 2973 em_len = em->len;
2935 2974
@@ -2939,8 +2978,6 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2939 if (em->block_start == EXTENT_MAP_LAST_BYTE) { 2978 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2940 end = 1; 2979 end = 1;
2941 flags |= FIEMAP_EXTENT_LAST; 2980 flags |= FIEMAP_EXTENT_LAST;
2942 } else if (em->block_start == EXTENT_MAP_HOLE) {
2943 flags |= FIEMAP_EXTENT_UNWRITTEN;
2944 } else if (em->block_start == EXTENT_MAP_INLINE) { 2981 } else if (em->block_start == EXTENT_MAP_INLINE) {
2945 flags |= (FIEMAP_EXTENT_DATA_INLINE | 2982 flags |= (FIEMAP_EXTENT_DATA_INLINE |
2946 FIEMAP_EXTENT_NOT_ALIGNED); 2983 FIEMAP_EXTENT_NOT_ALIGNED);
@@ -2953,10 +2990,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2953 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 2990 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2954 flags |= FIEMAP_EXTENT_ENCODED; 2991 flags |= FIEMAP_EXTENT_ENCODED;
2955 2992
2993next:
2956 emflags = em->flags; 2994 emflags = em->flags;
2957 free_extent_map(em); 2995 free_extent_map(em);
2958 em = NULL; 2996 em = NULL;
2959
2960 if (!end) { 2997 if (!end) {
2961 em = get_extent(inode, NULL, 0, off, max - off, 0); 2998 em = get_extent(inode, NULL, 0, off, max - off, 0);
2962 if (!em) 2999 if (!em)
@@ -2967,15 +3004,23 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2967 } 3004 }
2968 emflags = em->flags; 3005 emflags = em->flags;
2969 } 3006 }
3007
2970 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { 3008 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
2971 flags |= FIEMAP_EXTENT_LAST; 3009 flags |= FIEMAP_EXTENT_LAST;
2972 end = 1; 3010 end = 1;
2973 } 3011 }
2974 3012
2975 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 3013 if (em_start == last) {
2976 em_len, flags); 3014 flags |= FIEMAP_EXTENT_LAST;
2977 if (ret) 3015 end = 1;
2978 goto out_free; 3016 }
3017
3018 if (!hole) {
3019 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3020 em_len, flags);
3021 if (ret)
3022 goto out_free;
3023 }
2979 } 3024 }
2980out_free: 3025out_free:
2981 free_extent_map(em); 3026 free_extent_map(em);
@@ -3836,8 +3881,10 @@ int try_release_extent_buffer(struct extent_io_tree *tree, struct page *page)
3836 3881
3837 spin_lock(&tree->buffer_lock); 3882 spin_lock(&tree->buffer_lock);
3838 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT); 3883 eb = radix_tree_lookup(&tree->buffer, start >> PAGE_CACHE_SHIFT);
3839 if (!eb) 3884 if (!eb) {
3840 goto out; 3885 spin_unlock(&tree->buffer_lock);
3886 return ret;
3887 }
3841 3888
3842 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) { 3889 if (test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)) {
3843 ret = 0; 3890 ret = 0;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 1c6d4f342ef7..4183c8178f01 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -310,4 +310,7 @@ int extent_clear_unlock_delalloc(struct inode *inode,
310 struct extent_io_tree *tree, 310 struct extent_io_tree *tree,
311 u64 start, u64 end, struct page *locked_page, 311 u64 start, u64 end, struct page *locked_page,
312 unsigned long op); 312 unsigned long op);
313struct bio *
314btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
315 gfp_t gfp_flags);
313#endif 316#endif
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index e354c33df082..c1faded5fca0 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1047,8 +1047,14 @@ out:
1047 1047
1048 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { 1048 if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
1049 trans = btrfs_start_transaction(root, 0); 1049 trans = btrfs_start_transaction(root, 0);
1050 if (IS_ERR(trans)) {
1051 num_written = PTR_ERR(trans);
1052 goto done;
1053 }
1054 mutex_lock(&inode->i_mutex);
1050 ret = btrfs_log_dentry_safe(trans, root, 1055 ret = btrfs_log_dentry_safe(trans, root,
1051 file->f_dentry); 1056 file->f_dentry);
1057 mutex_unlock(&inode->i_mutex);
1052 if (ret == 0) { 1058 if (ret == 0) {
1053 ret = btrfs_sync_log(trans, root); 1059 ret = btrfs_sync_log(trans, root);
1054 if (ret == 0) 1060 if (ret == 0)
@@ -1067,6 +1073,7 @@ out:
1067 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT); 1073 (start_pos + num_written - 1) >> PAGE_CACHE_SHIFT);
1068 } 1074 }
1069 } 1075 }
1076done:
1070 current->backing_dev_info = NULL; 1077 current->backing_dev_info = NULL;
1071 return num_written ? num_written : err; 1078 return num_written ? num_written : err;
1072} 1079}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 558cac2dfa54..8039390bd6a6 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4501,6 +4501,7 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4501 BTRFS_I(inode)->index_cnt = 2; 4501 BTRFS_I(inode)->index_cnt = 2;
4502 BTRFS_I(inode)->root = root; 4502 BTRFS_I(inode)->root = root;
4503 BTRFS_I(inode)->generation = trans->transid; 4503 BTRFS_I(inode)->generation = trans->transid;
4504 inode->i_generation = BTRFS_I(inode)->generation;
4504 btrfs_set_inode_space_info(root, inode); 4505 btrfs_set_inode_space_info(root, inode);
4505 4506
4506 if (mode & S_IFDIR) 4507 if (mode & S_IFDIR)
@@ -4622,12 +4623,12 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4622} 4623}
4623 4624
4624static int btrfs_add_nondir(struct btrfs_trans_handle *trans, 4625static int btrfs_add_nondir(struct btrfs_trans_handle *trans,
4625 struct dentry *dentry, struct inode *inode, 4626 struct inode *dir, struct dentry *dentry,
4626 int backref, u64 index) 4627 struct inode *inode, int backref, u64 index)
4627{ 4628{
4628 int err = btrfs_add_link(trans, dentry->d_parent->d_inode, 4629 int err = btrfs_add_link(trans, dir, inode,
4629 inode, dentry->d_name.name, 4630 dentry->d_name.name, dentry->d_name.len,
4630 dentry->d_name.len, backref, index); 4631 backref, index);
4631 if (!err) { 4632 if (!err) {
4632 d_instantiate(dentry, inode); 4633 d_instantiate(dentry, inode);
4633 return 0; 4634 return 0;
@@ -4668,8 +4669,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4668 btrfs_set_trans_block_group(trans, dir); 4669 btrfs_set_trans_block_group(trans, dir);
4669 4670
4670 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4671 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4671 dentry->d_name.len, 4672 dentry->d_name.len, dir->i_ino, objectid,
4672 dentry->d_parent->d_inode->i_ino, objectid,
4673 BTRFS_I(dir)->block_group, mode, &index); 4673 BTRFS_I(dir)->block_group, mode, &index);
4674 err = PTR_ERR(inode); 4674 err = PTR_ERR(inode);
4675 if (IS_ERR(inode)) 4675 if (IS_ERR(inode))
@@ -4682,7 +4682,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4682 } 4682 }
4683 4683
4684 btrfs_set_trans_block_group(trans, inode); 4684 btrfs_set_trans_block_group(trans, inode);
4685 err = btrfs_add_nondir(trans, dentry, inode, 0, index); 4685 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4686 if (err) 4686 if (err)
4687 drop_inode = 1; 4687 drop_inode = 1;
4688 else { 4688 else {
@@ -4730,10 +4730,8 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4730 btrfs_set_trans_block_group(trans, dir); 4730 btrfs_set_trans_block_group(trans, dir);
4731 4731
4732 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4732 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4733 dentry->d_name.len, 4733 dentry->d_name.len, dir->i_ino, objectid,
4734 dentry->d_parent->d_inode->i_ino, 4734 BTRFS_I(dir)->block_group, mode, &index);
4735 objectid, BTRFS_I(dir)->block_group, mode,
4736 &index);
4737 err = PTR_ERR(inode); 4735 err = PTR_ERR(inode);
4738 if (IS_ERR(inode)) 4736 if (IS_ERR(inode))
4739 goto out_unlock; 4737 goto out_unlock;
@@ -4745,7 +4743,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4745 } 4743 }
4746 4744
4747 btrfs_set_trans_block_group(trans, inode); 4745 btrfs_set_trans_block_group(trans, inode);
4748 err = btrfs_add_nondir(trans, dentry, inode, 0, index); 4746 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4749 if (err) 4747 if (err)
4750 drop_inode = 1; 4748 drop_inode = 1;
4751 else { 4749 else {
@@ -4787,6 +4785,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4787 return -EPERM; 4785 return -EPERM;
4788 4786
4789 btrfs_inc_nlink(inode); 4787 btrfs_inc_nlink(inode);
4788 inode->i_ctime = CURRENT_TIME;
4790 4789
4791 err = btrfs_set_inode_index(dir, &index); 4790 err = btrfs_set_inode_index(dir, &index);
4792 if (err) 4791 if (err)
@@ -4805,15 +4804,17 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4805 btrfs_set_trans_block_group(trans, dir); 4804 btrfs_set_trans_block_group(trans, dir);
4806 ihold(inode); 4805 ihold(inode);
4807 4806
4808 err = btrfs_add_nondir(trans, dentry, inode, 1, index); 4807 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
4809 4808
4810 if (err) { 4809 if (err) {
4811 drop_inode = 1; 4810 drop_inode = 1;
4812 } else { 4811 } else {
4812 struct dentry *parent = dget_parent(dentry);
4813 btrfs_update_inode_block_group(trans, dir); 4813 btrfs_update_inode_block_group(trans, dir);
4814 err = btrfs_update_inode(trans, root, inode); 4814 err = btrfs_update_inode(trans, root, inode);
4815 BUG_ON(err); 4815 BUG_ON(err);
4816 btrfs_log_new_name(trans, inode, NULL, dentry->d_parent); 4816 btrfs_log_new_name(trans, inode, NULL, parent);
4817 dput(parent);
4817 } 4818 }
4818 4819
4819 nr = trans->blocks_used; 4820 nr = trans->blocks_used;
@@ -4853,8 +4854,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4853 btrfs_set_trans_block_group(trans, dir); 4854 btrfs_set_trans_block_group(trans, dir);
4854 4855
4855 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4856 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4856 dentry->d_name.len, 4857 dentry->d_name.len, dir->i_ino, objectid,
4857 dentry->d_parent->d_inode->i_ino, objectid,
4858 BTRFS_I(dir)->block_group, S_IFDIR | mode, 4858 BTRFS_I(dir)->block_group, S_IFDIR | mode,
4859 &index); 4859 &index);
4860 if (IS_ERR(inode)) { 4860 if (IS_ERR(inode)) {
@@ -4877,9 +4877,8 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4877 if (err) 4877 if (err)
4878 goto out_fail; 4878 goto out_fail;
4879 4879
4880 err = btrfs_add_link(trans, dentry->d_parent->d_inode, 4880 err = btrfs_add_link(trans, dir, inode, dentry->d_name.name,
4881 inode, dentry->d_name.name, 4881 dentry->d_name.len, 0, index);
4882 dentry->d_name.len, 0, index);
4883 if (err) 4882 if (err)
4884 goto out_fail; 4883 goto out_fail;
4885 4884
@@ -5535,13 +5534,21 @@ struct btrfs_dio_private {
5535 u64 bytes; 5534 u64 bytes;
5536 u32 *csums; 5535 u32 *csums;
5537 void *private; 5536 void *private;
5537
5538 /* number of bios pending for this dio */
5539 atomic_t pending_bios;
5540
5541 /* IO errors */
5542 int errors;
5543
5544 struct bio *orig_bio;
5538}; 5545};
5539 5546
5540static void btrfs_endio_direct_read(struct bio *bio, int err) 5547static void btrfs_endio_direct_read(struct bio *bio, int err)
5541{ 5548{
5549 struct btrfs_dio_private *dip = bio->bi_private;
5542 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; 5550 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
5543 struct bio_vec *bvec = bio->bi_io_vec; 5551 struct bio_vec *bvec = bio->bi_io_vec;
5544 struct btrfs_dio_private *dip = bio->bi_private;
5545 struct inode *inode = dip->inode; 5552 struct inode *inode = dip->inode;
5546 struct btrfs_root *root = BTRFS_I(inode)->root; 5553 struct btrfs_root *root = BTRFS_I(inode)->root;
5547 u64 start; 5554 u64 start;
@@ -5595,15 +5602,18 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
5595 struct btrfs_trans_handle *trans; 5602 struct btrfs_trans_handle *trans;
5596 struct btrfs_ordered_extent *ordered = NULL; 5603 struct btrfs_ordered_extent *ordered = NULL;
5597 struct extent_state *cached_state = NULL; 5604 struct extent_state *cached_state = NULL;
5605 u64 ordered_offset = dip->logical_offset;
5606 u64 ordered_bytes = dip->bytes;
5598 int ret; 5607 int ret;
5599 5608
5600 if (err) 5609 if (err)
5601 goto out_done; 5610 goto out_done;
5602 5611again:
5603 ret = btrfs_dec_test_ordered_pending(inode, &ordered, 5612 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5604 dip->logical_offset, dip->bytes); 5613 &ordered_offset,
5614 ordered_bytes);
5605 if (!ret) 5615 if (!ret)
5606 goto out_done; 5616 goto out_test;
5607 5617
5608 BUG_ON(!ordered); 5618 BUG_ON(!ordered);
5609 5619
@@ -5663,8 +5673,20 @@ out_unlock:
5663out: 5673out:
5664 btrfs_delalloc_release_metadata(inode, ordered->len); 5674 btrfs_delalloc_release_metadata(inode, ordered->len);
5665 btrfs_end_transaction(trans, root); 5675 btrfs_end_transaction(trans, root);
5676 ordered_offset = ordered->file_offset + ordered->len;
5666 btrfs_put_ordered_extent(ordered); 5677 btrfs_put_ordered_extent(ordered);
5667 btrfs_put_ordered_extent(ordered); 5678 btrfs_put_ordered_extent(ordered);
5679
5680out_test:
5681 /*
5682 * our bio might span multiple ordered extents. If we haven't
5683 * completed the accounting for the whole dio, go back and try again
5684 */
5685 if (ordered_offset < dip->logical_offset + dip->bytes) {
5686 ordered_bytes = dip->logical_offset + dip->bytes -
5687 ordered_offset;
5688 goto again;
5689 }
5668out_done: 5690out_done:
5669 bio->bi_private = dip->private; 5691 bio->bi_private = dip->private;
5670 5692
@@ -5684,6 +5706,176 @@ static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
5684 return 0; 5706 return 0;
5685} 5707}
5686 5708
5709static void btrfs_end_dio_bio(struct bio *bio, int err)
5710{
5711 struct btrfs_dio_private *dip = bio->bi_private;
5712
5713 if (err) {
5714 printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu "
5715 "disk_bytenr %lu len %u err no %d\n",
5716 dip->inode->i_ino, bio->bi_rw, bio->bi_sector,
5717 bio->bi_size, err);
5718 dip->errors = 1;
5719
5720 /*
5721 * before atomic variable goto zero, we must make sure
5722 * dip->errors is perceived to be set.
5723 */
5724 smp_mb__before_atomic_dec();
5725 }
5726
5727 /* if there are more bios still pending for this dio, just exit */
5728 if (!atomic_dec_and_test(&dip->pending_bios))
5729 goto out;
5730
5731 if (dip->errors)
5732 bio_io_error(dip->orig_bio);
5733 else {
5734 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags);
5735 bio_endio(dip->orig_bio, 0);
5736 }
5737out:
5738 bio_put(bio);
5739}
5740
5741static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
5742 u64 first_sector, gfp_t gfp_flags)
5743{
5744 int nr_vecs = bio_get_nr_vecs(bdev);
5745 return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
5746}
5747
5748static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
5749 int rw, u64 file_offset, int skip_sum,
5750 u32 *csums)
5751{
5752 int write = rw & REQ_WRITE;
5753 struct btrfs_root *root = BTRFS_I(inode)->root;
5754 int ret;
5755
5756 bio_get(bio);
5757 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
5758 if (ret)
5759 goto err;
5760
5761 if (write && !skip_sum) {
5762 ret = btrfs_wq_submit_bio(root->fs_info,
5763 inode, rw, bio, 0, 0,
5764 file_offset,
5765 __btrfs_submit_bio_start_direct_io,
5766 __btrfs_submit_bio_done);
5767 goto err;
5768 } else if (!skip_sum)
5769 btrfs_lookup_bio_sums_dio(root, inode, bio,
5770 file_offset, csums);
5771
5772 ret = btrfs_map_bio(root, rw, bio, 0, 1);
5773err:
5774 bio_put(bio);
5775 return ret;
5776}
5777
5778static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
5779 int skip_sum)
5780{
5781 struct inode *inode = dip->inode;
5782 struct btrfs_root *root = BTRFS_I(inode)->root;
5783 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
5784 struct bio *bio;
5785 struct bio *orig_bio = dip->orig_bio;
5786 struct bio_vec *bvec = orig_bio->bi_io_vec;
5787 u64 start_sector = orig_bio->bi_sector;
5788 u64 file_offset = dip->logical_offset;
5789 u64 submit_len = 0;
5790 u64 map_length;
5791 int nr_pages = 0;
5792 u32 *csums = dip->csums;
5793 int ret = 0;
5794
5795 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
5796 if (!bio)
5797 return -ENOMEM;
5798 bio->bi_private = dip;
5799 bio->bi_end_io = btrfs_end_dio_bio;
5800 atomic_inc(&dip->pending_bios);
5801
5802 map_length = orig_bio->bi_size;
5803 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5804 &map_length, NULL, 0);
5805 if (ret) {
5806 bio_put(bio);
5807 return -EIO;
5808 }
5809
5810 while (bvec <= (orig_bio->bi_io_vec + orig_bio->bi_vcnt - 1)) {
5811 if (unlikely(map_length < submit_len + bvec->bv_len ||
5812 bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5813 bvec->bv_offset) < bvec->bv_len)) {
5814 /*
5815 * inc the count before we submit the bio so
5816 * we know the end IO handler won't happen before
5817 * we inc the count. Otherwise, the dip might get freed
5818 * before we're done setting it up
5819 */
5820 atomic_inc(&dip->pending_bios);
5821 ret = __btrfs_submit_dio_bio(bio, inode, rw,
5822 file_offset, skip_sum,
5823 csums);
5824 if (ret) {
5825 bio_put(bio);
5826 atomic_dec(&dip->pending_bios);
5827 goto out_err;
5828 }
5829
5830 if (!skip_sum)
5831 csums = csums + nr_pages;
5832 start_sector += submit_len >> 9;
5833 file_offset += submit_len;
5834
5835 submit_len = 0;
5836 nr_pages = 0;
5837
5838 bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
5839 start_sector, GFP_NOFS);
5840 if (!bio)
5841 goto out_err;
5842 bio->bi_private = dip;
5843 bio->bi_end_io = btrfs_end_dio_bio;
5844
5845 map_length = orig_bio->bi_size;
5846 ret = btrfs_map_block(map_tree, READ, start_sector << 9,
5847 &map_length, NULL, 0);
5848 if (ret) {
5849 bio_put(bio);
5850 goto out_err;
5851 }
5852 } else {
5853 submit_len += bvec->bv_len;
5854 nr_pages ++;
5855 bvec++;
5856 }
5857 }
5858
5859 ret = __btrfs_submit_dio_bio(bio, inode, rw, file_offset, skip_sum,
5860 csums);
5861 if (!ret)
5862 return 0;
5863
5864 bio_put(bio);
5865out_err:
5866 dip->errors = 1;
5867 /*
5868 * before atomic variable goto zero, we must
5869 * make sure dip->errors is perceived to be set.
5870 */
5871 smp_mb__before_atomic_dec();
5872 if (atomic_dec_and_test(&dip->pending_bios))
5873 bio_io_error(dip->orig_bio);
5874
5875 /* bio_end_io() will handle error, so we needn't return it */
5876 return 0;
5877}
5878
5687static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, 5879static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
5688 loff_t file_offset) 5880 loff_t file_offset)
5689{ 5881{
@@ -5723,36 +5915,18 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
5723 5915
5724 dip->disk_bytenr = (u64)bio->bi_sector << 9; 5916 dip->disk_bytenr = (u64)bio->bi_sector << 9;
5725 bio->bi_private = dip; 5917 bio->bi_private = dip;
5918 dip->errors = 0;
5919 dip->orig_bio = bio;
5920 atomic_set(&dip->pending_bios, 0);
5726 5921
5727 if (write) 5922 if (write)
5728 bio->bi_end_io = btrfs_endio_direct_write; 5923 bio->bi_end_io = btrfs_endio_direct_write;
5729 else 5924 else
5730 bio->bi_end_io = btrfs_endio_direct_read; 5925 bio->bi_end_io = btrfs_endio_direct_read;
5731 5926
5732 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 5927 ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
5733 if (ret) 5928 if (!ret)
5734 goto out_err;
5735
5736 if (write && !skip_sum) {
5737 ret = btrfs_wq_submit_bio(BTRFS_I(inode)->root->fs_info,
5738 inode, rw, bio, 0, 0,
5739 dip->logical_offset,
5740 __btrfs_submit_bio_start_direct_io,
5741 __btrfs_submit_bio_done);
5742 if (ret)
5743 goto out_err;
5744 return; 5929 return;
5745 } else if (!skip_sum)
5746 btrfs_lookup_bio_sums_dio(root, inode, bio,
5747 dip->logical_offset, dip->csums);
5748
5749 ret = btrfs_map_bio(root, rw, bio, 0, 1);
5750 if (ret)
5751 goto out_err;
5752 return;
5753out_err:
5754 kfree(dip->csums);
5755 kfree(dip);
5756free_ordered: 5930free_ordered:
5757 /* 5931 /*
5758 * If this is a write, we need to clean up the reserved space and kill 5932 * If this is a write, we need to clean up the reserved space and kill
@@ -6607,8 +6781,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6607 BUG_ON(ret); 6781 BUG_ON(ret);
6608 6782
6609 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { 6783 if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) {
6610 btrfs_log_new_name(trans, old_inode, old_dir, 6784 struct dentry *parent = dget_parent(new_dentry);
6611 new_dentry->d_parent); 6785 btrfs_log_new_name(trans, old_inode, old_dir, parent);
6786 dput(parent);
6612 btrfs_end_log_trans(root); 6787 btrfs_end_log_trans(root);
6613 } 6788 }
6614out_fail: 6789out_fail:
@@ -6758,8 +6933,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
6758 btrfs_set_trans_block_group(trans, dir); 6933 btrfs_set_trans_block_group(trans, dir);
6759 6934
6760 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 6935 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
6761 dentry->d_name.len, 6936 dentry->d_name.len, dir->i_ino, objectid,
6762 dentry->d_parent->d_inode->i_ino, objectid,
6763 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 6937 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO,
6764 &index); 6938 &index);
6765 err = PTR_ERR(inode); 6939 err = PTR_ERR(inode);
@@ -6773,7 +6947,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
6773 } 6947 }
6774 6948
6775 btrfs_set_trans_block_group(trans, inode); 6949 btrfs_set_trans_block_group(trans, inode);
6776 err = btrfs_add_nondir(trans, dentry, inode, 0, index); 6950 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
6777 if (err) 6951 if (err)
6778 drop_inode = 1; 6952 drop_inode = 1;
6779 else { 6953 else {
@@ -6844,6 +7018,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
6844 struct btrfs_root *root = BTRFS_I(inode)->root; 7018 struct btrfs_root *root = BTRFS_I(inode)->root;
6845 struct btrfs_key ins; 7019 struct btrfs_key ins;
6846 u64 cur_offset = start; 7020 u64 cur_offset = start;
7021 u64 i_size;
6847 int ret = 0; 7022 int ret = 0;
6848 bool own_trans = true; 7023 bool own_trans = true;
6849 7024
@@ -6885,11 +7060,11 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
6885 (actual_len > inode->i_size) && 7060 (actual_len > inode->i_size) &&
6886 (cur_offset > inode->i_size)) { 7061 (cur_offset > inode->i_size)) {
6887 if (cur_offset > actual_len) 7062 if (cur_offset > actual_len)
6888 i_size_write(inode, actual_len); 7063 i_size = actual_len;
6889 else 7064 else
6890 i_size_write(inode, cur_offset); 7065 i_size = cur_offset;
6891 i_size_write(inode, cur_offset); 7066 i_size_write(inode, i_size);
6892 btrfs_ordered_update_i_size(inode, cur_offset, NULL); 7067 btrfs_ordered_update_i_size(inode, i_size, NULL);
6893 } 7068 }
6894 7069
6895 ret = btrfs_update_inode(trans, root, inode); 7070 ret = btrfs_update_inode(trans, root, inode);
@@ -6943,6 +7118,10 @@ static long btrfs_fallocate(struct inode *inode, int mode,
6943 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start); 7118 btrfs_wait_ordered_range(inode, alloc_start, alloc_end - alloc_start);
6944 7119
6945 mutex_lock(&inode->i_mutex); 7120 mutex_lock(&inode->i_mutex);
7121 ret = inode_newsize_ok(inode, alloc_end);
7122 if (ret)
7123 goto out;
7124
6946 if (alloc_start > inode->i_size) { 7125 if (alloc_start > inode->i_size) {
6947 ret = btrfs_cont_expand(inode, alloc_start); 7126 ret = btrfs_cont_expand(inode, alloc_start);
6948 if (ret) 7127 if (ret)
@@ -7139,6 +7318,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
7139 .readlink = generic_readlink, 7318 .readlink = generic_readlink,
7140 .follow_link = page_follow_link_light, 7319 .follow_link = page_follow_link_light,
7141 .put_link = page_put_link, 7320 .put_link = page_put_link,
7321 .getattr = btrfs_getattr,
7142 .permission = btrfs_permission, 7322 .permission = btrfs_permission,
7143 .setxattr = btrfs_setxattr, 7323 .setxattr = btrfs_setxattr,
7144 .getxattr = btrfs_getxattr, 7324 .getxattr = btrfs_getxattr,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 463d91b4dd3a..f1c9bb4079ed 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -233,7 +233,8 @@ static noinline int create_subvol(struct btrfs_root *root,
233 struct btrfs_inode_item *inode_item; 233 struct btrfs_inode_item *inode_item;
234 struct extent_buffer *leaf; 234 struct extent_buffer *leaf;
235 struct btrfs_root *new_root; 235 struct btrfs_root *new_root;
236 struct inode *dir = dentry->d_parent->d_inode; 236 struct dentry *parent = dget_parent(dentry);
237 struct inode *dir;
237 int ret; 238 int ret;
238 int err; 239 int err;
239 u64 objectid; 240 u64 objectid;
@@ -242,8 +243,13 @@ static noinline int create_subvol(struct btrfs_root *root,
242 243
243 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, 244 ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root,
244 0, &objectid); 245 0, &objectid);
245 if (ret) 246 if (ret) {
247 dput(parent);
246 return ret; 248 return ret;
249 }
250
251 dir = parent->d_inode;
252
247 /* 253 /*
248 * 1 - inode item 254 * 1 - inode item
249 * 2 - refs 255 * 2 - refs
@@ -251,8 +257,10 @@ static noinline int create_subvol(struct btrfs_root *root,
251 * 2 - dir items 257 * 2 - dir items
252 */ 258 */
253 trans = btrfs_start_transaction(root, 6); 259 trans = btrfs_start_transaction(root, 6);
254 if (IS_ERR(trans)) 260 if (IS_ERR(trans)) {
261 dput(parent);
255 return PTR_ERR(trans); 262 return PTR_ERR(trans);
263 }
256 264
257 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 265 leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
258 0, objectid, NULL, 0, 0, 0); 266 0, objectid, NULL, 0, 0, 0);
@@ -339,6 +347,7 @@ static noinline int create_subvol(struct btrfs_root *root,
339 347
340 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry)); 348 d_instantiate(dentry, btrfs_lookup_dentry(dir, dentry));
341fail: 349fail:
350 dput(parent);
342 if (async_transid) { 351 if (async_transid) {
343 *async_transid = trans->transid; 352 *async_transid = trans->transid;
344 err = btrfs_commit_transaction_async(trans, root, 1); 353 err = btrfs_commit_transaction_async(trans, root, 1);
@@ -354,6 +363,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
354 char *name, int namelen, u64 *async_transid) 363 char *name, int namelen, u64 *async_transid)
355{ 364{
356 struct inode *inode; 365 struct inode *inode;
366 struct dentry *parent;
357 struct btrfs_pending_snapshot *pending_snapshot; 367 struct btrfs_pending_snapshot *pending_snapshot;
358 struct btrfs_trans_handle *trans; 368 struct btrfs_trans_handle *trans;
359 int ret; 369 int ret;
@@ -396,7 +406,9 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
396 406
397 btrfs_orphan_cleanup(pending_snapshot->snap); 407 btrfs_orphan_cleanup(pending_snapshot->snap);
398 408
399 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); 409 parent = dget_parent(dentry);
410 inode = btrfs_lookup_dentry(parent->d_inode, dentry);
411 dput(parent);
400 if (IS_ERR(inode)) { 412 if (IS_ERR(inode)) {
401 ret = PTR_ERR(inode); 413 ret = PTR_ERR(inode);
402 goto fail; 414 goto fail;
@@ -1669,12 +1681,11 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1669 olen = len = src->i_size - off; 1681 olen = len = src->i_size - off;
1670 /* if we extend to eof, continue to block boundary */ 1682 /* if we extend to eof, continue to block boundary */
1671 if (off + len == src->i_size) 1683 if (off + len == src->i_size)
1672 len = ((src->i_size + bs-1) & ~(bs-1)) 1684 len = ALIGN(src->i_size, bs) - off;
1673 - off;
1674 1685
1675 /* verify the end result is block aligned */ 1686 /* verify the end result is block aligned */
1676 if ((off & (bs-1)) || 1687 if (!IS_ALIGNED(off, bs) || !IS_ALIGNED(off + len, bs) ||
1677 ((off + len) & (bs-1))) 1688 !IS_ALIGNED(destoff, bs))
1678 goto out_unlock; 1689 goto out_unlock;
1679 1690
1680 /* do any pending delalloc/csum calc on src, one way or 1691 /* do any pending delalloc/csum calc on src, one way or
@@ -1874,8 +1885,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
1874 * but shouldn't round up the file size 1885 * but shouldn't round up the file size
1875 */ 1886 */
1876 endoff = new_key.offset + datal; 1887 endoff = new_key.offset + datal;
1877 if (endoff > off+olen) 1888 if (endoff > destoff+olen)
1878 endoff = off+olen; 1889 endoff = destoff+olen;
1879 if (endoff > inode->i_size) 1890 if (endoff > inode->i_size)
1880 btrfs_i_size_write(inode, endoff); 1891 btrfs_i_size_write(inode, endoff);
1881 1892
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index f4621f6deca1..ae7737e352c9 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -250,6 +250,73 @@ int btrfs_add_ordered_sum(struct inode *inode,
250 250
251/* 251/*
252 * this is used to account for finished IO across a given range 252 * this is used to account for finished IO across a given range
253 * of the file. The IO may span ordered extents. If
254 * a given ordered_extent is completely done, 1 is returned, otherwise
255 * 0.
256 *
257 * test_and_set_bit on a flag in the struct btrfs_ordered_extent is used
258 * to make sure this function only returns 1 once for a given ordered extent.
259 *
260 * file_offset is updated to one byte past the range that is recorded as
261 * complete. This allows you to walk forward in the file.
262 */
263int btrfs_dec_test_first_ordered_pending(struct inode *inode,
264 struct btrfs_ordered_extent **cached,
265 u64 *file_offset, u64 io_size)
266{
267 struct btrfs_ordered_inode_tree *tree;
268 struct rb_node *node;
269 struct btrfs_ordered_extent *entry = NULL;
270 int ret;
271 u64 dec_end;
272 u64 dec_start;
273 u64 to_dec;
274
275 tree = &BTRFS_I(inode)->ordered_tree;
276 spin_lock(&tree->lock);
277 node = tree_search(tree, *file_offset);
278 if (!node) {
279 ret = 1;
280 goto out;
281 }
282
283 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
284 if (!offset_in_entry(entry, *file_offset)) {
285 ret = 1;
286 goto out;
287 }
288
289 dec_start = max(*file_offset, entry->file_offset);
290 dec_end = min(*file_offset + io_size, entry->file_offset +
291 entry->len);
292 *file_offset = dec_end;
293 if (dec_start > dec_end) {
294 printk(KERN_CRIT "bad ordering dec_start %llu end %llu\n",
295 (unsigned long long)dec_start,
296 (unsigned long long)dec_end);
297 }
298 to_dec = dec_end - dec_start;
299 if (to_dec > entry->bytes_left) {
300 printk(KERN_CRIT "bad ordered accounting left %llu size %llu\n",
301 (unsigned long long)entry->bytes_left,
302 (unsigned long long)to_dec);
303 }
304 entry->bytes_left -= to_dec;
305 if (entry->bytes_left == 0)
306 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
307 else
308 ret = 1;
309out:
310 if (!ret && cached && entry) {
311 *cached = entry;
312 atomic_inc(&entry->refs);
313 }
314 spin_unlock(&tree->lock);
315 return ret == 0;
316}
317
318/*
319 * this is used to account for finished IO across a given range
253 * of the file. The IO should not span ordered extents. If 320 * of the file. The IO should not span ordered extents. If
254 * a given ordered_extent is completely done, 1 is returned, otherwise 321 * a given ordered_extent is completely done, 1 is returned, otherwise
255 * 0. 322 * 0.
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index 8ac365492a3f..61dca83119dd 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -141,6 +141,9 @@ int btrfs_remove_ordered_extent(struct inode *inode,
141int btrfs_dec_test_ordered_pending(struct inode *inode, 141int btrfs_dec_test_ordered_pending(struct inode *inode,
142 struct btrfs_ordered_extent **cached, 142 struct btrfs_ordered_extent **cached,
143 u64 file_offset, u64 io_size); 143 u64 file_offset, u64 io_size);
144int btrfs_dec_test_first_ordered_pending(struct inode *inode,
145 struct btrfs_ordered_extent **cached,
146 u64 *file_offset, u64 io_size);
144int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 147int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
145 u64 start, u64 len, u64 disk_len, int type); 148 u64 start, u64 len, u64 disk_len, int type);
146int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 149int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 8299a25ffc8f..dbb51ea7a13c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -244,6 +244,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
244 case Opt_space_cache: 244 case Opt_space_cache:
245 printk(KERN_INFO "btrfs: enabling disk space caching\n"); 245 printk(KERN_INFO "btrfs: enabling disk space caching\n");
246 btrfs_set_opt(info->mount_opt, SPACE_CACHE); 246 btrfs_set_opt(info->mount_opt, SPACE_CACHE);
247 break;
247 case Opt_clear_cache: 248 case Opt_clear_cache:
248 printk(KERN_INFO "btrfs: force clearing of disk cache\n"); 249 printk(KERN_INFO "btrfs: force clearing of disk cache\n");
249 btrfs_set_opt(info->mount_opt, CLEAR_CACHE); 250 btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
@@ -562,12 +563,26 @@ static int btrfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
562 563
563static int btrfs_test_super(struct super_block *s, void *data) 564static int btrfs_test_super(struct super_block *s, void *data)
564{ 565{
565 struct btrfs_fs_devices *test_fs_devices = data; 566 struct btrfs_root *test_root = data;
566 struct btrfs_root *root = btrfs_sb(s); 567 struct btrfs_root *root = btrfs_sb(s);
567 568
568 return root->fs_info->fs_devices == test_fs_devices; 569 /*
570 * If this super block is going away, return false as it
571 * can't match as an existing super block.
572 */
573 if (!atomic_read(&s->s_active))
574 return 0;
575 return root->fs_info->fs_devices == test_root->fs_info->fs_devices;
576}
577
578static int btrfs_set_super(struct super_block *s, void *data)
579{
580 s->s_fs_info = data;
581
582 return set_anon_super(s, data);
569} 583}
570 584
585
571/* 586/*
572 * Find a superblock for the given device / mount point. 587 * Find a superblock for the given device / mount point.
573 * 588 *
@@ -581,6 +596,8 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
581 struct super_block *s; 596 struct super_block *s;
582 struct dentry *root; 597 struct dentry *root;
583 struct btrfs_fs_devices *fs_devices = NULL; 598 struct btrfs_fs_devices *fs_devices = NULL;
599 struct btrfs_root *tree_root = NULL;
600 struct btrfs_fs_info *fs_info = NULL;
584 fmode_t mode = FMODE_READ; 601 fmode_t mode = FMODE_READ;
585 char *subvol_name = NULL; 602 char *subvol_name = NULL;
586 u64 subvol_objectid = 0; 603 u64 subvol_objectid = 0;
@@ -608,8 +625,24 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
608 goto error_close_devices; 625 goto error_close_devices;
609 } 626 }
610 627
628 /*
629 * Setup a dummy root and fs_info for test/set super. This is because
630 * we don't actually fill this stuff out until open_ctree, but we need
631 * it for searching for existing supers, so this lets us do that and
632 * then open_ctree will properly initialize everything later.
633 */
634 fs_info = kzalloc(sizeof(struct btrfs_fs_info), GFP_NOFS);
635 tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
636 if (!fs_info || !tree_root) {
637 error = -ENOMEM;
638 goto error_close_devices;
639 }
640 fs_info->tree_root = tree_root;
641 fs_info->fs_devices = fs_devices;
642 tree_root->fs_info = fs_info;
643
611 bdev = fs_devices->latest_bdev; 644 bdev = fs_devices->latest_bdev;
612 s = sget(fs_type, btrfs_test_super, set_anon_super, fs_devices); 645 s = sget(fs_type, btrfs_test_super, btrfs_set_super, tree_root);
613 if (IS_ERR(s)) 646 if (IS_ERR(s))
614 goto error_s; 647 goto error_s;
615 648
@@ -675,6 +708,8 @@ error_s:
675 error = PTR_ERR(s); 708 error = PTR_ERR(s);
676error_close_devices: 709error_close_devices:
677 btrfs_close_devices(fs_devices); 710 btrfs_close_devices(fs_devices);
711 kfree(fs_info);
712 kfree(tree_root);
678error_free_subvol_name: 713error_free_subvol_name:
679 kfree(subvol_name); 714 kfree(subvol_name);
680 return ERR_PTR(error); 715 return ERR_PTR(error);
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 1fffbc017bdf..f50e931fc217 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -902,6 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
902 struct btrfs_root *root = pending->root; 902 struct btrfs_root *root = pending->root;
903 struct btrfs_root *parent_root; 903 struct btrfs_root *parent_root;
904 struct inode *parent_inode; 904 struct inode *parent_inode;
905 struct dentry *parent;
905 struct dentry *dentry; 906 struct dentry *dentry;
906 struct extent_buffer *tmp; 907 struct extent_buffer *tmp;
907 struct extent_buffer *old; 908 struct extent_buffer *old;
@@ -941,7 +942,8 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
941 trans->block_rsv = &pending->block_rsv; 942 trans->block_rsv = &pending->block_rsv;
942 943
943 dentry = pending->dentry; 944 dentry = pending->dentry;
944 parent_inode = dentry->d_parent->d_inode; 945 parent = dget_parent(dentry);
946 parent_inode = parent->d_inode;
945 parent_root = BTRFS_I(parent_inode)->root; 947 parent_root = BTRFS_I(parent_inode)->root;
946 record_root_in_trans(trans, parent_root); 948 record_root_in_trans(trans, parent_root);
947 949
@@ -989,6 +991,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
989 parent_inode->i_ino, index, 991 parent_inode->i_ino, index,
990 dentry->d_name.name, dentry->d_name.len); 992 dentry->d_name.name, dentry->d_name.len);
991 BUG_ON(ret); 993 BUG_ON(ret);
994 dput(parent);
992 995
993 key.offset = (u64)-1; 996 key.offset = (u64)-1;
994 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); 997 pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index a29f19384a27..054744ac5719 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2869,6 +2869,7 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
2869{ 2869{
2870 int ret = 0; 2870 int ret = 0;
2871 struct btrfs_root *root; 2871 struct btrfs_root *root;
2872 struct dentry *old_parent = NULL;
2872 2873
2873 /* 2874 /*
2874 * for regular files, if its inode is already on disk, we don't 2875 * for regular files, if its inode is already on disk, we don't
@@ -2910,10 +2911,13 @@ static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans,
2910 if (IS_ROOT(parent)) 2911 if (IS_ROOT(parent))
2911 break; 2912 break;
2912 2913
2913 parent = parent->d_parent; 2914 parent = dget_parent(parent);
2915 dput(old_parent);
2916 old_parent = parent;
2914 inode = parent->d_inode; 2917 inode = parent->d_inode;
2915 2918
2916 } 2919 }
2920 dput(old_parent);
2917out: 2921out:
2918 return ret; 2922 return ret;
2919} 2923}
@@ -2945,6 +2949,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
2945{ 2949{
2946 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 2950 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL;
2947 struct super_block *sb; 2951 struct super_block *sb;
2952 struct dentry *old_parent = NULL;
2948 int ret = 0; 2953 int ret = 0;
2949 u64 last_committed = root->fs_info->last_trans_committed; 2954 u64 last_committed = root->fs_info->last_trans_committed;
2950 2955
@@ -3016,10 +3021,13 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3016 if (IS_ROOT(parent)) 3021 if (IS_ROOT(parent))
3017 break; 3022 break;
3018 3023
3019 parent = parent->d_parent; 3024 parent = dget_parent(parent);
3025 dput(old_parent);
3026 old_parent = parent;
3020 } 3027 }
3021 ret = 0; 3028 ret = 0;
3022end_trans: 3029end_trans:
3030 dput(old_parent);
3023 if (ret < 0) { 3031 if (ret < 0) {
3024 BUG_ON(ret != -ENOSPC); 3032 BUG_ON(ret != -ENOSPC);
3025 root->fs_info->last_trans_log_full_commit = trans->transid; 3033 root->fs_info->last_trans_log_full_commit = trans->transid;
@@ -3039,8 +3047,13 @@ end_no_trans:
3039int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 3047int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans,
3040 struct btrfs_root *root, struct dentry *dentry) 3048 struct btrfs_root *root, struct dentry *dentry)
3041{ 3049{
3042 return btrfs_log_inode_parent(trans, root, dentry->d_inode, 3050 struct dentry *parent = dget_parent(dentry);
3043 dentry->d_parent, 0); 3051 int ret;
3052
3053 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0);
3054 dput(parent);
3055
3056 return ret;
3044} 3057}
3045 3058
3046/* 3059/*
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 0ed213970ced..ee45648b0d1a 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -4,6 +4,7 @@ config CIFS
4 select NLS 4 select NLS
5 select CRYPTO 5 select CRYPTO
6 select CRYPTO_MD5 6 select CRYPTO_MD5
7 select CRYPTO_HMAC
7 select CRYPTO_ARC4 8 select CRYPTO_ARC4
8 help 9 help
9 This is the client VFS module for the Common Internet File System 10 This is the client VFS module for the Common Internet File System
@@ -143,6 +144,13 @@ config CIFS_FSCACHE
143 to be cached locally on disk through the general filesystem cache 144 to be cached locally on disk through the general filesystem cache
144 manager. If unsure, say N. 145 manager. If unsure, say N.
145 146
147config CIFS_ACL
148 bool "Provide CIFS ACL support (EXPERIMENTAL)"
149 depends on EXPERIMENTAL && CIFS_XATTR
150 help
151 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
152 is handed over to the application/caller.
153
146config CIFS_EXPERIMENTAL 154config CIFS_EXPERIMENTAL
147 bool "CIFS Experimental Features (EXPERIMENTAL)" 155 bool "CIFS Experimental Features (EXPERIMENTAL)"
148 depends on CIFS && EXPERIMENTAL 156 depends on CIFS && EXPERIMENTAL
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index c9b4792ae825..c6ebea088ac7 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -560,7 +560,7 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
560 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 560 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
561 561
562 if (IS_ERR(tlink)) 562 if (IS_ERR(tlink))
563 return NULL; 563 return ERR_CAST(tlink);
564 564
565 xid = GetXid(); 565 xid = GetXid();
566 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen); 566 rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
@@ -568,7 +568,9 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
568 568
569 cifs_put_tlink(tlink); 569 cifs_put_tlink(tlink);
570 570
571 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen); 571 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
572 if (rc)
573 return ERR_PTR(rc);
572 return pntsd; 574 return pntsd;
573} 575}
574 576
@@ -583,7 +585,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
583 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); 585 struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
584 586
585 if (IS_ERR(tlink)) 587 if (IS_ERR(tlink))
586 return NULL; 588 return ERR_CAST(tlink);
587 589
588 tcon = tlink_tcon(tlink); 590 tcon = tlink_tcon(tlink);
589 xid = GetXid(); 591 xid = GetXid();
@@ -591,23 +593,22 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
591 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0, 593 rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
592 &fid, &oplock, NULL, cifs_sb->local_nls, 594 &fid, &oplock, NULL, cifs_sb->local_nls,
593 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); 595 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
594 if (rc) { 596 if (!rc) {
595 cERROR(1, "Unable to open file to get ACL"); 597 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
596 goto out; 598 CIFSSMBClose(xid, tcon, fid);
597 } 599 }
598 600
599 rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
600 cFYI(1, "GetCIFSACL rc = %d ACL len %d", rc, *pacllen);
601
602 CIFSSMBClose(xid, tcon, fid);
603 out:
604 cifs_put_tlink(tlink); 601 cifs_put_tlink(tlink);
605 FreeXid(xid); 602 FreeXid(xid);
603
604 cFYI(1, "%s: rc = %d ACL len %d", __func__, rc, *pacllen);
605 if (rc)
606 return ERR_PTR(rc);
606 return pntsd; 607 return pntsd;
607} 608}
608 609
609/* Retrieve an ACL from the server */ 610/* Retrieve an ACL from the server */
610static struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, 611struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
611 struct inode *inode, const char *path, 612 struct inode *inode, const char *path,
612 u32 *pacllen) 613 u32 *pacllen)
613{ 614{
@@ -695,7 +696,7 @@ static int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
695} 696}
696 697
697/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ 698/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
698void 699int
699cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, 700cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
700 struct inode *inode, const char *path, const __u16 *pfid) 701 struct inode *inode, const char *path, const __u16 *pfid)
701{ 702{
@@ -711,17 +712,21 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
711 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen); 712 pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
712 713
713 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */ 714 /* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
714 if (pntsd) 715 if (IS_ERR(pntsd)) {
716 rc = PTR_ERR(pntsd);
717 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
718 } else {
715 rc = parse_sec_desc(pntsd, acllen, fattr); 719 rc = parse_sec_desc(pntsd, acllen, fattr);
716 if (rc) 720 kfree(pntsd);
717 cFYI(1, "parse sec desc failed rc = %d", rc); 721 if (rc)
722 cERROR(1, "parse sec desc failed rc = %d", rc);
723 }
718 724
719 kfree(pntsd); 725 return rc;
720 return;
721} 726}
722 727
723/* Convert mode bits to an ACL so we can update the ACL on the server */ 728/* Convert mode bits to an ACL so we can update the ACL on the server */
724int mode_to_acl(struct inode *inode, const char *path, __u64 nmode) 729int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
725{ 730{
726 int rc = 0; 731 int rc = 0;
727 __u32 secdesclen = 0; 732 __u32 secdesclen = 0;
@@ -736,7 +741,10 @@ int mode_to_acl(struct inode *inode, const char *path, __u64 nmode)
736 /* Add three ACEs for owner, group, everyone getting rid of 741 /* Add three ACEs for owner, group, everyone getting rid of
737 other ACEs as chmod disables ACEs and set the security descriptor */ 742 other ACEs as chmod disables ACEs and set the security descriptor */
738 743
739 if (pntsd) { 744 if (IS_ERR(pntsd)) {
745 rc = PTR_ERR(pntsd);
746 cERROR(1, "%s: error %d getting sec desc", __func__, rc);
747 } else {
740 /* allocate memory for the smb header, 748 /* allocate memory for the smb header,
741 set security descriptor request security descriptor 749 set security descriptor request security descriptor
742 parameters, and secuirty descriptor itself */ 750 parameters, and secuirty descriptor itself */
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 9c3789762ab7..76c8a906a63e 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -458,6 +458,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
458 seq_printf(s, ",acl"); 458 seq_printf(s, ",acl");
459 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 459 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
460 seq_printf(s, ",mfsymlinks"); 460 seq_printf(s, ",mfsymlinks");
461 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
462 seq_printf(s, ",fsc");
461 463
462 seq_printf(s, ",rsize=%d", cifs_sb->rsize); 464 seq_printf(s, ",rsize=%d", cifs_sb->rsize);
463 seq_printf(s, ",wsize=%d", cifs_sb->wsize); 465 seq_printf(s, ",wsize=%d", cifs_sb->wsize);
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 7ed69b6b5fe6..db961dc4fd3d 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -130,10 +130,12 @@ extern int cifs_get_file_info_unix(struct file *filp);
130extern int cifs_get_inode_info_unix(struct inode **pinode, 130extern int cifs_get_inode_info_unix(struct inode **pinode,
131 const unsigned char *search_path, 131 const unsigned char *search_path,
132 struct super_block *sb, int xid); 132 struct super_block *sb, int xid);
133extern void cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, 133extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
134 struct cifs_fattr *fattr, struct inode *inode, 134 struct cifs_fattr *fattr, struct inode *inode,
135 const char *path, const __u16 *pfid); 135 const char *path, const __u16 *pfid);
136extern int mode_to_acl(struct inode *inode, const char *path, __u64); 136extern int mode_to_cifs_acl(struct inode *inode, const char *path, __u64);
137extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
138 const char *, u32 *);
137 139
138extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, 140extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *,
139 const char *); 141 const char *);
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 251a17c03545..32fa4d9b5dbc 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1352,6 +1352,11 @@ cifs_parse_mount_options(char *options, const char *devname,
1352 "supported. Instead set " 1352 "supported. Instead set "
1353 "/proc/fs/cifs/LookupCacheEnabled to 0\n"); 1353 "/proc/fs/cifs/LookupCacheEnabled to 0\n");
1354 } else if (strnicmp(data, "fsc", 3) == 0) { 1354 } else if (strnicmp(data, "fsc", 3) == 0) {
1355#ifndef CONFIG_CIFS_FSCACHE
1356 cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE"
1357 "kernel config option set");
1358 return 1;
1359#endif
1355 vol->fsc = true; 1360 vol->fsc = true;
1356 } else if (strnicmp(data, "mfsymlinks", 10) == 0) { 1361 } else if (strnicmp(data, "mfsymlinks", 10) == 0) {
1357 vol->mfsymlinks = true; 1362 vol->mfsymlinks = true;
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 0eb87026cad3..548f06230a6d 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -66,7 +66,7 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
66 /* Search for server name delimiter */ 66 /* Search for server name delimiter */
67 sep = memchr(hostname, '\\', len); 67 sep = memchr(hostname, '\\', len);
68 if (sep) 68 if (sep)
69 len = sep - unc; 69 len = sep - hostname;
70 else 70 else
71 cFYI(1, "%s: probably server name is whole unc: %s", 71 cFYI(1, "%s: probably server name is whole unc: %s",
72 __func__, unc); 72 __func__, unc);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 06c3e83fa387..b857ce5db775 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2271,8 +2271,10 @@ void cifs_oplock_break_get(struct cifsFileInfo *cfile)
2271 2271
2272void cifs_oplock_break_put(struct cifsFileInfo *cfile) 2272void cifs_oplock_break_put(struct cifsFileInfo *cfile)
2273{ 2273{
2274 struct super_block *sb = cfile->dentry->d_sb;
2275
2274 cifsFileInfo_put(cfile); 2276 cifsFileInfo_put(cfile);
2275 cifs_sb_deactive(cfile->dentry->d_sb); 2277 cifs_sb_deactive(sb);
2276} 2278}
2277 2279
2278const struct address_space_operations cifs_addr_ops = { 2280const struct address_space_operations cifs_addr_ops = {
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index a2ad94efcfe6..297a43d0ff7f 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -2,7 +2,7 @@
2 * fs/cifs/fscache.c - CIFS filesystem cache interface 2 * fs/cifs/fscache.c - CIFS filesystem cache interface
3 * 3 *
4 * Copyright (c) 2010 Novell, Inc. 4 * Copyright (c) 2010 Novell, Inc.
5 * Author(s): Suresh Jayaraman (sjayaraman@suse.de> 5 * Author(s): Suresh Jayaraman <sjayaraman@suse.de>
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU Lesser General Public License as published 8 * it under the terms of the GNU Lesser General Public License as published
@@ -67,10 +67,12 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
67 if (cifsi->fscache) 67 if (cifsi->fscache)
68 return; 68 return;
69 69
70 cifsi->fscache = fscache_acquire_cookie(tcon->fscache, 70 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
71 cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
71 &cifs_fscache_inode_object_def, cifsi); 72 &cifs_fscache_inode_object_def, cifsi);
72 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache, 73 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache,
73 cifsi->fscache); 74 cifsi->fscache);
75 }
74} 76}
75 77
76void cifs_fscache_release_inode_cookie(struct inode *inode) 78void cifs_fscache_release_inode_cookie(struct inode *inode)
@@ -101,10 +103,8 @@ void cifs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
101{ 103{
102 if ((filp->f_flags & O_ACCMODE) != O_RDONLY) 104 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
103 cifs_fscache_disable_inode_cookie(inode); 105 cifs_fscache_disable_inode_cookie(inode);
104 else { 106 else
105 cifs_fscache_enable_inode_cookie(inode); 107 cifs_fscache_enable_inode_cookie(inode);
106 cFYI(1, "CIFS: fscache inode cookie set");
107 }
108} 108}
109 109
110void cifs_fscache_reset_inode_cookie(struct inode *inode) 110void cifs_fscache_reset_inode_cookie(struct inode *inode)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index ef3a55bf86b6..28cb6e735943 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -689,8 +689,13 @@ int cifs_get_inode_info(struct inode **pinode,
689#ifdef CONFIG_CIFS_EXPERIMENTAL 689#ifdef CONFIG_CIFS_EXPERIMENTAL
690 /* fill in 0777 bits from ACL */ 690 /* fill in 0777 bits from ACL */
691 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { 691 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
692 cFYI(1, "Getting mode bits from ACL"); 692 rc = cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path,
693 cifs_acl_to_fattr(cifs_sb, &fattr, *pinode, full_path, pfid); 693 pfid);
694 if (rc) {
695 cFYI(1, "%s: Getting ACL failed with error: %d",
696 __func__, rc);
697 goto cgii_exit;
698 }
694 } 699 }
695#endif 700#endif
696 701
@@ -881,8 +886,10 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
881 rc = cifs_get_inode_info(&inode, full_path, NULL, sb, 886 rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
882 xid, NULL); 887 xid, NULL);
883 888
884 if (!inode) 889 if (!inode) {
885 return ERR_PTR(rc); 890 inode = ERR_PTR(rc);
891 goto out;
892 }
886 893
887#ifdef CONFIG_CIFS_FSCACHE 894#ifdef CONFIG_CIFS_FSCACHE
888 /* populate tcon->resource_id */ 895 /* populate tcon->resource_id */
@@ -898,13 +905,11 @@ struct inode *cifs_root_iget(struct super_block *sb, unsigned long ino)
898 inode->i_uid = cifs_sb->mnt_uid; 905 inode->i_uid = cifs_sb->mnt_uid;
899 inode->i_gid = cifs_sb->mnt_gid; 906 inode->i_gid = cifs_sb->mnt_gid;
900 } else if (rc) { 907 } else if (rc) {
901 kfree(full_path);
902 _FreeXid(xid);
903 iget_failed(inode); 908 iget_failed(inode);
904 return ERR_PTR(rc); 909 inode = ERR_PTR(rc);
905 } 910 }
906 911
907 912out:
908 kfree(full_path); 913 kfree(full_path);
909 /* can not call macro FreeXid here since in a void func 914 /* can not call macro FreeXid here since in a void func
910 * TODO: This is no longer true 915 * TODO: This is no longer true
@@ -1670,7 +1675,9 @@ cifs_inode_needs_reval(struct inode *inode)
1670 return false; 1675 return false;
1671} 1676}
1672 1677
1673/* check invalid_mapping flag and zap the cache if it's set */ 1678/*
1679 * Zap the cache. Called when invalid_mapping flag is set.
1680 */
1674static void 1681static void
1675cifs_invalidate_mapping(struct inode *inode) 1682cifs_invalidate_mapping(struct inode *inode)
1676{ 1683{
@@ -2115,9 +2122,14 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
2115 if (attrs->ia_valid & ATTR_MODE) { 2122 if (attrs->ia_valid & ATTR_MODE) {
2116 rc = 0; 2123 rc = 0;
2117#ifdef CONFIG_CIFS_EXPERIMENTAL 2124#ifdef CONFIG_CIFS_EXPERIMENTAL
2118 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) 2125 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
2119 rc = mode_to_acl(inode, full_path, mode); 2126 rc = mode_to_cifs_acl(inode, full_path, mode);
2120 else 2127 if (rc) {
2128 cFYI(1, "%s: Setting ACL failed with error: %d",
2129 __func__, rc);
2130 goto cifs_setattr_exit;
2131 }
2132 } else
2121#endif 2133#endif
2122 if (((mode & S_IWUGO) == 0) && 2134 if (((mode & S_IWUGO) == 0) &&
2123 (cifsInode->cifsAttrs & ATTR_READONLY) == 0) { 2135 (cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index ef7bb7b50f58..32d300e8f20e 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -226,26 +226,29 @@ static int initiate_cifs_search(const int xid, struct file *file)
226 char *full_path = NULL; 226 char *full_path = NULL;
227 struct cifsFileInfo *cifsFile; 227 struct cifsFileInfo *cifsFile;
228 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 228 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
229 struct tcon_link *tlink; 229 struct tcon_link *tlink = NULL;
230 struct cifsTconInfo *pTcon; 230 struct cifsTconInfo *pTcon;
231 231
232 tlink = cifs_sb_tlink(cifs_sb);
233 if (IS_ERR(tlink))
234 return PTR_ERR(tlink);
235 pTcon = tlink_tcon(tlink);
236
237 if (file->private_data == NULL)
238 file->private_data =
239 kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
240 if (file->private_data == NULL) { 232 if (file->private_data == NULL) {
241 rc = -ENOMEM; 233 tlink = cifs_sb_tlink(cifs_sb);
242 goto error_exit; 234 if (IS_ERR(tlink))
235 return PTR_ERR(tlink);
236
237 cifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
238 if (cifsFile == NULL) {
239 rc = -ENOMEM;
240 goto error_exit;
241 }
242 file->private_data = cifsFile;
243 cifsFile->tlink = cifs_get_tlink(tlink);
244 pTcon = tlink_tcon(tlink);
245 } else {
246 cifsFile = file->private_data;
247 pTcon = tlink_tcon(cifsFile->tlink);
243 } 248 }
244 249
245 cifsFile = file->private_data;
246 cifsFile->invalidHandle = true; 250 cifsFile->invalidHandle = true;
247 cifsFile->srch_inf.endOfSearch = false; 251 cifsFile->srch_inf.endOfSearch = false;
248 cifsFile->tlink = cifs_get_tlink(tlink);
249 252
250 full_path = build_path_from_dentry(file->f_path.dentry); 253 full_path = build_path_from_dentry(file->f_path.dentry);
251 if (full_path == NULL) { 254 if (full_path == NULL) {
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index a264b744bb41..eae2a1491608 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -30,10 +30,11 @@
30 30
31#define MAX_EA_VALUE_SIZE 65535 31#define MAX_EA_VALUE_SIZE 65535
32#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib" 32#define CIFS_XATTR_DOS_ATTRIB "user.DosAttrib"
33#define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
33#define CIFS_XATTR_USER_PREFIX "user." 34#define CIFS_XATTR_USER_PREFIX "user."
34#define CIFS_XATTR_SYSTEM_PREFIX "system." 35#define CIFS_XATTR_SYSTEM_PREFIX "system."
35#define CIFS_XATTR_OS2_PREFIX "os2." 36#define CIFS_XATTR_OS2_PREFIX "os2."
36#define CIFS_XATTR_SECURITY_PREFIX ".security" 37#define CIFS_XATTR_SECURITY_PREFIX "security."
37#define CIFS_XATTR_TRUSTED_PREFIX "trusted." 38#define CIFS_XATTR_TRUSTED_PREFIX "trusted."
38#define XATTR_TRUSTED_PREFIX_LEN 8 39#define XATTR_TRUSTED_PREFIX_LEN 8
39#define XATTR_SECURITY_PREFIX_LEN 9 40#define XATTR_SECURITY_PREFIX_LEN 9
@@ -277,29 +278,8 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
277 cifs_sb->local_nls, 278 cifs_sb->local_nls,
278 cifs_sb->mnt_cifs_flags & 279 cifs_sb->mnt_cifs_flags &
279 CIFS_MOUNT_MAP_SPECIAL_CHR); 280 CIFS_MOUNT_MAP_SPECIAL_CHR);
280#ifdef CONFIG_CIFS_EXPERIMENTAL
281 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
282 __u16 fid;
283 int oplock = 0;
284 struct cifs_ntsd *pacl = NULL;
285 __u32 buflen = 0;
286 if (experimEnabled)
287 rc = CIFSSMBOpen(xid, pTcon, full_path,
288 FILE_OPEN, GENERIC_READ, 0, &fid,
289 &oplock, NULL, cifs_sb->local_nls,
290 cifs_sb->mnt_cifs_flags &
291 CIFS_MOUNT_MAP_SPECIAL_CHR);
292 /* else rc is EOPNOTSUPP from above */
293
294 if (rc == 0) {
295 rc = CIFSSMBGetCIFSACL(xid, pTcon, fid, &pacl,
296 &buflen);
297 CIFSSMBClose(xid, pTcon, fid);
298 }
299 }
300#endif /* EXPERIMENTAL */
301#else 281#else
302 cFYI(1, "query POSIX ACL not supported yet"); 282 cFYI(1, "Query POSIX ACL not supported yet");
303#endif /* CONFIG_CIFS_POSIX */ 283#endif /* CONFIG_CIFS_POSIX */
304 } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT, 284 } else if (strncmp(ea_name, POSIX_ACL_XATTR_DEFAULT,
305 strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) { 285 strlen(POSIX_ACL_XATTR_DEFAULT)) == 0) {
@@ -311,8 +291,33 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
311 cifs_sb->mnt_cifs_flags & 291 cifs_sb->mnt_cifs_flags &
312 CIFS_MOUNT_MAP_SPECIAL_CHR); 292 CIFS_MOUNT_MAP_SPECIAL_CHR);
313#else 293#else
314 cFYI(1, "query POSIX default ACL not supported yet"); 294 cFYI(1, "Query POSIX default ACL not supported yet");
315#endif 295#endif /* CONFIG_CIFS_POSIX */
296 } else if (strncmp(ea_name, CIFS_XATTR_CIFS_ACL,
297 strlen(CIFS_XATTR_CIFS_ACL)) == 0) {
298#ifdef CONFIG_CIFS_ACL
299 u32 acllen;
300 struct cifs_ntsd *pacl;
301
302 pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
303 full_path, &acllen);
304 if (IS_ERR(pacl)) {
305 rc = PTR_ERR(pacl);
306 cERROR(1, "%s: error %zd getting sec desc",
307 __func__, rc);
308 } else {
309 if (ea_value) {
310 if (acllen > buf_size)
311 acllen = -ERANGE;
312 else
313 memcpy(ea_value, pacl, acllen);
314 }
315 rc = acllen;
316 kfree(pacl);
317 }
318#else
319 cFYI(1, "Query CIFS ACL not supported yet");
320#endif /* CONFIG_CIFS_ACL */
316 } else if (strncmp(ea_name, 321 } else if (strncmp(ea_name,
317 CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) { 322 CIFS_XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) {
318 cFYI(1, "Trusted xattr namespace not supported yet"); 323 cFYI(1, "Trusted xattr namespace not supported yet");
diff --git a/fs/compat.c b/fs/compat.c
index c580c322fa6b..eb1740ac8c0a 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1350,6 +1350,10 @@ static int compat_count(compat_uptr_t __user *argv, int max)
1350 argv++; 1350 argv++;
1351 if (i++ >= max) 1351 if (i++ >= max)
1352 return -E2BIG; 1352 return -E2BIG;
1353
1354 if (fatal_signal_pending(current))
1355 return -ERESTARTNOHAND;
1356 cond_resched();
1353 } 1357 }
1354 } 1358 }
1355 return i; 1359 return i;
@@ -1391,6 +1395,12 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
1391 while (len > 0) { 1395 while (len > 0) {
1392 int offset, bytes_to_copy; 1396 int offset, bytes_to_copy;
1393 1397
1398 if (fatal_signal_pending(current)) {
1399 ret = -ERESTARTNOHAND;
1400 goto out;
1401 }
1402 cond_resched();
1403
1394 offset = pos % PAGE_SIZE; 1404 offset = pos % PAGE_SIZE;
1395 if (offset == 0) 1405 if (offset == 0)
1396 offset = PAGE_SIZE; 1406 offset = PAGE_SIZE;
@@ -1407,18 +1417,8 @@ static int compat_copy_strings(int argc, compat_uptr_t __user *argv,
1407 if (!kmapped_page || kpos != (pos & PAGE_MASK)) { 1417 if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
1408 struct page *page; 1418 struct page *page;
1409 1419
1410#ifdef CONFIG_STACK_GROWSUP 1420 page = get_arg_page(bprm, pos, 1);
1411 ret = expand_stack_downwards(bprm->vma, pos); 1421 if (!page) {
1412 if (ret < 0) {
1413 /* We've exceed the stack rlimit. */
1414 ret = -E2BIG;
1415 goto out;
1416 }
1417#endif
1418 ret = get_user_pages(current, bprm->mm, pos,
1419 1, 1, 1, &page, NULL);
1420 if (ret <= 0) {
1421 /* We've exceed the stack rlimit. */
1422 ret = -E2BIG; 1422 ret = -E2BIG;
1423 goto out; 1423 goto out;
1424 } 1424 }
@@ -1539,8 +1539,10 @@ int compat_do_execve(char * filename,
1539 return retval; 1539 return retval;
1540 1540
1541out: 1541out:
1542 if (bprm->mm) 1542 if (bprm->mm) {
1543 acct_arg_size(bprm, 0);
1543 mmput(bprm->mm); 1544 mmput(bprm->mm);
1545 }
1544 1546
1545out_file: 1547out_file:
1546 if (bprm->file) { 1548 if (bprm->file) {
diff --git a/fs/exec.c b/fs/exec.c
index 99d33a1371e9..d68c378a3137 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -164,7 +164,26 @@ out:
164 164
165#ifdef CONFIG_MMU 165#ifdef CONFIG_MMU
166 166
167static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 167void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
168{
169 struct mm_struct *mm = current->mm;
170 long diff = (long)(pages - bprm->vma_pages);
171
172 if (!mm || !diff)
173 return;
174
175 bprm->vma_pages = pages;
176
177#ifdef SPLIT_RSS_COUNTING
178 add_mm_counter(mm, MM_ANONPAGES, diff);
179#else
180 spin_lock(&mm->page_table_lock);
181 add_mm_counter(mm, MM_ANONPAGES, diff);
182 spin_unlock(&mm->page_table_lock);
183#endif
184}
185
186struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
168 int write) 187 int write)
169{ 188{
170 struct page *page; 189 struct page *page;
@@ -186,6 +205,8 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
186 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start; 205 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
187 struct rlimit *rlim; 206 struct rlimit *rlim;
188 207
208 acct_arg_size(bprm, size / PAGE_SIZE);
209
189 /* 210 /*
190 * We've historically supported up to 32 pages (ARG_MAX) 211 * We've historically supported up to 32 pages (ARG_MAX)
191 * of argument strings even with small stacks 212 * of argument strings even with small stacks
@@ -276,7 +297,11 @@ static bool valid_arg_len(struct linux_binprm *bprm, long len)
276 297
277#else 298#else
278 299
279static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, 300void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
301{
302}
303
304struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
280 int write) 305 int write)
281{ 306{
282 struct page *page; 307 struct page *page;
@@ -1003,6 +1028,7 @@ int flush_old_exec(struct linux_binprm * bprm)
1003 /* 1028 /*
1004 * Release all of the old mmap stuff 1029 * Release all of the old mmap stuff
1005 */ 1030 */
1031 acct_arg_size(bprm, 0);
1006 retval = exec_mmap(bprm->mm); 1032 retval = exec_mmap(bprm->mm);
1007 if (retval) 1033 if (retval)
1008 goto out; 1034 goto out;
@@ -1426,8 +1452,10 @@ int do_execve(const char * filename,
1426 return retval; 1452 return retval;
1427 1453
1428out: 1454out:
1429 if (bprm->mm) 1455 if (bprm->mm) {
1430 mmput (bprm->mm); 1456 acct_arg_size(bprm, 0);
1457 mmput(bprm->mm);
1458 }
1431 1459
1432out_file: 1460out_file:
1433 if (bprm->file) { 1461 if (bprm->file) {
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index c8224587123f..9242d294fe90 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -134,6 +134,7 @@ EXPORT_SYMBOL_GPL(fuse_do_open);
134void fuse_finish_open(struct inode *inode, struct file *file) 134void fuse_finish_open(struct inode *inode, struct file *file)
135{ 135{
136 struct fuse_file *ff = file->private_data; 136 struct fuse_file *ff = file->private_data;
137 struct fuse_conn *fc = get_fuse_conn(inode);
137 138
138 if (ff->open_flags & FOPEN_DIRECT_IO) 139 if (ff->open_flags & FOPEN_DIRECT_IO)
139 file->f_op = &fuse_direct_io_file_operations; 140 file->f_op = &fuse_direct_io_file_operations;
@@ -141,6 +142,15 @@ void fuse_finish_open(struct inode *inode, struct file *file)
141 invalidate_inode_pages2(inode->i_mapping); 142 invalidate_inode_pages2(inode->i_mapping);
142 if (ff->open_flags & FOPEN_NONSEEKABLE) 143 if (ff->open_flags & FOPEN_NONSEEKABLE)
143 nonseekable_open(inode, file); 144 nonseekable_open(inode, file);
145 if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) {
146 struct fuse_inode *fi = get_fuse_inode(inode);
147
148 spin_lock(&fc->lock);
149 fi->attr_version = ++fc->attr_version;
150 i_size_write(inode, 0);
151 spin_unlock(&fc->lock);
152 fuse_invalidate_attr(inode);
153 }
144} 154}
145 155
146int fuse_open_common(struct inode *inode, struct file *file, bool isdir) 156int fuse_open_common(struct inode *inode, struct file *file, bool isdir)
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index 58a9b9998b42..f606baf9ba72 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -631,6 +631,7 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
631 struct fs_disk_quota *fdq) 631 struct fs_disk_quota *fdq)
632{ 632{
633 struct inode *inode = &ip->i_inode; 633 struct inode *inode = &ip->i_inode;
634 struct gfs2_sbd *sdp = GFS2_SB(inode);
634 struct address_space *mapping = inode->i_mapping; 635 struct address_space *mapping = inode->i_mapping;
635 unsigned long index = loc >> PAGE_CACHE_SHIFT; 636 unsigned long index = loc >> PAGE_CACHE_SHIFT;
636 unsigned offset = loc & (PAGE_CACHE_SIZE - 1); 637 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
@@ -658,11 +659,11 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
658 qd->qd_qb.qb_value = qp->qu_value; 659 qd->qd_qb.qb_value = qp->qu_value;
659 if (fdq) { 660 if (fdq) {
660 if (fdq->d_fieldmask & FS_DQ_BSOFT) { 661 if (fdq->d_fieldmask & FS_DQ_BSOFT) {
661 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit); 662 qp->qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
662 qd->qd_qb.qb_warn = qp->qu_warn; 663 qd->qd_qb.qb_warn = qp->qu_warn;
663 } 664 }
664 if (fdq->d_fieldmask & FS_DQ_BHARD) { 665 if (fdq->d_fieldmask & FS_DQ_BHARD) {
665 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit); 666 qp->qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
666 qd->qd_qb.qb_limit = qp->qu_limit; 667 qd->qd_qb.qb_limit = qp->qu_limit;
667 } 668 }
668 } 669 }
@@ -1497,9 +1498,9 @@ static int gfs2_get_dqblk(struct super_block *sb, int type, qid_t id,
1497 fdq->d_version = FS_DQUOT_VERSION; 1498 fdq->d_version = FS_DQUOT_VERSION;
1498 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA; 1499 fdq->d_flags = (type == QUOTA_USER) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
1499 fdq->d_id = id; 1500 fdq->d_id = id;
1500 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit); 1501 fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
1501 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn); 1502 fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
1502 fdq->d_bcount = be64_to_cpu(qlvb->qb_value); 1503 fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
1503 1504
1504 gfs2_glock_dq_uninit(&q_gh); 1505 gfs2_glock_dq_uninit(&q_gh);
1505out: 1506out:
@@ -1566,10 +1567,10 @@ static int gfs2_set_dqblk(struct super_block *sb, int type, qid_t id,
1566 1567
1567 /* If nothing has changed, this is a no-op */ 1568 /* If nothing has changed, this is a no-op */
1568 if ((fdq->d_fieldmask & FS_DQ_BSOFT) && 1569 if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
1569 (fdq->d_blk_softlimit == be64_to_cpu(qd->qd_qb.qb_warn))) 1570 ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
1570 fdq->d_fieldmask ^= FS_DQ_BSOFT; 1571 fdq->d_fieldmask ^= FS_DQ_BSOFT;
1571 if ((fdq->d_fieldmask & FS_DQ_BHARD) && 1572 if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
1572 (fdq->d_blk_hardlimit == be64_to_cpu(qd->qd_qb.qb_limit))) 1573 ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
1573 fdq->d_fieldmask ^= FS_DQ_BHARD; 1574 fdq->d_fieldmask ^= FS_DQ_BHARD;
1574 if (fdq->d_fieldmask == 0) 1575 if (fdq->d_fieldmask == 0)
1575 goto out_i; 1576 goto out_i;
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 2f7d05c89922..7da2a06508e5 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -103,22 +103,15 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
103 } 103 }
104 104
105 ret = -ESRCH; 105 ret = -ESRCH;
106 /* 106 rcu_read_lock();
107 * We want IOPRIO_WHO_PGRP/IOPRIO_WHO_USER to be "atomic",
108 * so we can't use rcu_read_lock(). See re-copy of ->ioprio
109 * in copy_process().
110 */
111 read_lock(&tasklist_lock);
112 switch (which) { 107 switch (which) {
113 case IOPRIO_WHO_PROCESS: 108 case IOPRIO_WHO_PROCESS:
114 rcu_read_lock();
115 if (!who) 109 if (!who)
116 p = current; 110 p = current;
117 else 111 else
118 p = find_task_by_vpid(who); 112 p = find_task_by_vpid(who);
119 if (p) 113 if (p)
120 ret = set_task_ioprio(p, ioprio); 114 ret = set_task_ioprio(p, ioprio);
121 rcu_read_unlock();
122 break; 115 break;
123 case IOPRIO_WHO_PGRP: 116 case IOPRIO_WHO_PGRP:
124 if (!who) 117 if (!who)
@@ -141,12 +134,7 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
141 break; 134 break;
142 135
143 do_each_thread(g, p) { 136 do_each_thread(g, p) {
144 int match; 137 if (__task_cred(p)->uid != who)
145
146 rcu_read_lock();
147 match = __task_cred(p)->uid == who;
148 rcu_read_unlock();
149 if (!match)
150 continue; 138 continue;
151 ret = set_task_ioprio(p, ioprio); 139 ret = set_task_ioprio(p, ioprio);
152 if (ret) 140 if (ret)
@@ -160,7 +148,7 @@ free_uid:
160 ret = -EINVAL; 148 ret = -EINVAL;
161 } 149 }
162 150
163 read_unlock(&tasklist_lock); 151 rcu_read_unlock();
164 return ret; 152 return ret;
165} 153}
166 154
@@ -204,17 +192,15 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
204 int ret = -ESRCH; 192 int ret = -ESRCH;
205 int tmpio; 193 int tmpio;
206 194
207 read_lock(&tasklist_lock); 195 rcu_read_lock();
208 switch (which) { 196 switch (which) {
209 case IOPRIO_WHO_PROCESS: 197 case IOPRIO_WHO_PROCESS:
210 rcu_read_lock();
211 if (!who) 198 if (!who)
212 p = current; 199 p = current;
213 else 200 else
214 p = find_task_by_vpid(who); 201 p = find_task_by_vpid(who);
215 if (p) 202 if (p)
216 ret = get_task_ioprio(p); 203 ret = get_task_ioprio(p);
217 rcu_read_unlock();
218 break; 204 break;
219 case IOPRIO_WHO_PGRP: 205 case IOPRIO_WHO_PGRP:
220 if (!who) 206 if (!who)
@@ -241,12 +227,7 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
241 break; 227 break;
242 228
243 do_each_thread(g, p) { 229 do_each_thread(g, p) {
244 int match; 230 if (__task_cred(p)->uid != user->uid)
245
246 rcu_read_lock();
247 match = __task_cred(p)->uid == user->uid;
248 rcu_read_unlock();
249 if (!match)
250 continue; 231 continue;
251 tmpio = get_task_ioprio(p); 232 tmpio = get_task_ioprio(p);
252 if (tmpio < 0) 233 if (tmpio < 0)
@@ -264,6 +245,6 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
264 ret = -EINVAL; 245 ret = -EINVAL;
265 } 246 }
266 247
267 read_unlock(&tasklist_lock); 248 rcu_read_unlock();
268 return ret; 249 return ret;
269} 250}
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 662df2a5fad5..f0a384e2ae63 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -162,6 +162,7 @@ struct nfs_cache_array_entry {
162 u64 cookie; 162 u64 cookie;
163 u64 ino; 163 u64 ino;
164 struct qstr string; 164 struct qstr string;
165 unsigned char d_type;
165}; 166};
166 167
167struct nfs_cache_array { 168struct nfs_cache_array {
@@ -171,8 +172,6 @@ struct nfs_cache_array {
171 struct nfs_cache_array_entry array[0]; 172 struct nfs_cache_array_entry array[0];
172}; 173};
173 174
174#define MAX_READDIR_ARRAY ((PAGE_SIZE - sizeof(struct nfs_cache_array)) / sizeof(struct nfs_cache_array_entry))
175
176typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int); 175typedef __be32 * (*decode_dirent_t)(struct xdr_stream *, struct nfs_entry *, struct nfs_server *, int);
177typedef struct { 176typedef struct {
178 struct file *file; 177 struct file *file;
@@ -257,13 +256,17 @@ int nfs_readdir_add_to_array(struct nfs_entry *entry, struct page *page)
257 256
258 if (IS_ERR(array)) 257 if (IS_ERR(array))
259 return PTR_ERR(array); 258 return PTR_ERR(array);
259
260 cache_entry = &array->array[array->size];
261
262 /* Check that this entry lies within the page bounds */
260 ret = -ENOSPC; 263 ret = -ENOSPC;
261 if (array->size >= MAX_READDIR_ARRAY) 264 if ((char *)&cache_entry[1] - (char *)page_address(page) > PAGE_SIZE)
262 goto out; 265 goto out;
263 266
264 cache_entry = &array->array[array->size];
265 cache_entry->cookie = entry->prev_cookie; 267 cache_entry->cookie = entry->prev_cookie;
266 cache_entry->ino = entry->ino; 268 cache_entry->ino = entry->ino;
269 cache_entry->d_type = entry->d_type;
267 ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len); 270 ret = nfs_readdir_make_qstr(&cache_entry->string, entry->name, entry->len);
268 if (ret) 271 if (ret)
269 goto out; 272 goto out;
@@ -392,13 +395,9 @@ int xdr_decode(nfs_readdir_descriptor_t *desc, struct nfs_entry *entry, struct x
392static 395static
393int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry) 396int nfs_same_file(struct dentry *dentry, struct nfs_entry *entry)
394{ 397{
395 struct nfs_inode *node;
396 if (dentry->d_inode == NULL) 398 if (dentry->d_inode == NULL)
397 goto different; 399 goto different;
398 node = NFS_I(dentry->d_inode); 400 if (nfs_compare_fh(entry->fh, NFS_FH(dentry->d_inode)) != 0)
399 if (node->fh.size != entry->fh->size)
400 goto different;
401 if (strncmp(node->fh.data, entry->fh->data, node->fh.size) != 0)
402 goto different; 401 goto different;
403 return 1; 402 return 1;
404different: 403different:
@@ -466,8 +465,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
466 struct xdr_stream stream; 465 struct xdr_stream stream;
467 struct xdr_buf buf; 466 struct xdr_buf buf;
468 __be32 *ptr = xdr_page; 467 __be32 *ptr = xdr_page;
469 int status;
470 struct nfs_cache_array *array; 468 struct nfs_cache_array *array;
469 unsigned int count = 0;
470 int status;
471 471
472 buf.head->iov_base = xdr_page; 472 buf.head->iov_base = xdr_page;
473 buf.head->iov_len = buflen; 473 buf.head->iov_len = buflen;
@@ -488,6 +488,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
488 break; 488 break;
489 } 489 }
490 490
491 count++;
492
491 if (desc->plus == 1) 493 if (desc->plus == 1)
492 nfs_prime_dcache(desc->file->f_path.dentry, entry); 494 nfs_prime_dcache(desc->file->f_path.dentry, entry);
493 495
@@ -496,13 +498,14 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
496 break; 498 break;
497 } while (!entry->eof); 499 } while (!entry->eof);
498 500
499 if (status == -EBADCOOKIE && entry->eof) { 501 if (count == 0 || (status == -EBADCOOKIE && entry->eof == 1)) {
500 array = nfs_readdir_get_array(page); 502 array = nfs_readdir_get_array(page);
501 if (!IS_ERR(array)) { 503 if (!IS_ERR(array)) {
502 array->eof_index = array->size; 504 array->eof_index = array->size;
503 status = 0; 505 status = 0;
504 nfs_readdir_release_array(page); 506 nfs_readdir_release_array(page);
505 } 507 } else
508 status = PTR_ERR(array);
506 } 509 }
507 return status; 510 return status;
508} 511}
@@ -696,21 +699,23 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
696 int i = 0; 699 int i = 0;
697 int res = 0; 700 int res = 0;
698 struct nfs_cache_array *array = NULL; 701 struct nfs_cache_array *array = NULL;
699 unsigned int d_type = DT_UNKNOWN;
700 struct dentry *dentry = NULL;
701 702
702 array = nfs_readdir_get_array(desc->page); 703 array = nfs_readdir_get_array(desc->page);
703 if (IS_ERR(array)) 704 if (IS_ERR(array)) {
704 return PTR_ERR(array); 705 res = PTR_ERR(array);
706 goto out;
707 }
705 708
706 for (i = desc->cache_entry_index; i < array->size; i++) { 709 for (i = desc->cache_entry_index; i < array->size; i++) {
707 d_type = DT_UNKNOWN; 710 struct nfs_cache_array_entry *ent;
708 711
709 res = filldir(dirent, array->array[i].string.name, 712 ent = &array->array[i];
710 array->array[i].string.len, file->f_pos, 713 if (filldir(dirent, ent->string.name, ent->string.len,
711 nfs_compat_user_ino64(array->array[i].ino), d_type); 714 file->f_pos, nfs_compat_user_ino64(ent->ino),
712 if (res < 0) 715 ent->d_type) < 0) {
716 desc->eof = 1;
713 break; 717 break;
718 }
714 file->f_pos++; 719 file->f_pos++;
715 desc->cache_entry_index = i; 720 desc->cache_entry_index = i;
716 if (i < (array->size-1)) 721 if (i < (array->size-1))
@@ -722,9 +727,8 @@ int nfs_do_filldir(nfs_readdir_descriptor_t *desc, void *dirent,
722 desc->eof = 1; 727 desc->eof = 1;
723 728
724 nfs_readdir_release_array(desc->page); 729 nfs_readdir_release_array(desc->page);
730out:
725 cache_page_release(desc); 731 cache_page_release(desc);
726 if (dentry != NULL)
727 dput(dentry);
728 dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n", 732 dfprintk(DIRCACHE, "NFS: nfs_do_filldir() filling ended @ cookie %Lu; returning = %d\n",
729 (unsigned long long)*desc->dir_cookie, res); 733 (unsigned long long)*desc->dir_cookie, res);
730 return res; 734 return res;
@@ -759,13 +763,13 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
759 goto out; 763 goto out;
760 } 764 }
761 765
762 if (nfs_readdir_xdr_to_array(desc, page, inode) == -1) {
763 status = -EIO;
764 goto out_release;
765 }
766
767 desc->page_index = 0; 766 desc->page_index = 0;
768 desc->page = page; 767 desc->page = page;
768
769 status = nfs_readdir_xdr_to_array(desc, page, inode);
770 if (status < 0)
771 goto out_release;
772
769 status = nfs_do_filldir(desc, dirent, filldir); 773 status = nfs_do_filldir(desc, dirent, filldir);
770 774
771 out: 775 out:
@@ -816,14 +820,14 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
816 res = readdir_search_pagecache(desc); 820 res = readdir_search_pagecache(desc);
817 821
818 if (res == -EBADCOOKIE) { 822 if (res == -EBADCOOKIE) {
823 res = 0;
819 /* This means either end of directory */ 824 /* This means either end of directory */
820 if (*desc->dir_cookie && desc->eof == 0) { 825 if (*desc->dir_cookie && desc->eof == 0) {
821 /* Or that the server has 'lost' a cookie */ 826 /* Or that the server has 'lost' a cookie */
822 res = uncached_readdir(desc, dirent, filldir); 827 res = uncached_readdir(desc, dirent, filldir);
823 if (res >= 0) 828 if (res == 0)
824 continue; 829 continue;
825 } 830 }
826 res = 0;
827 break; 831 break;
828 } 832 }
829 if (res == -ETOOSMALL && desc->plus) { 833 if (res == -ETOOSMALL && desc->plus) {
@@ -838,10 +842,8 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
838 break; 842 break;
839 843
840 res = nfs_do_filldir(desc, dirent, filldir); 844 res = nfs_do_filldir(desc, dirent, filldir);
841 if (res < 0) { 845 if (res < 0)
842 res = 0;
843 break; 846 break;
844 }
845 } 847 }
846out: 848out:
847 nfs_unblock_sillyrename(dentry); 849 nfs_unblock_sillyrename(dentry);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 84d3c8b90206..e6ace0d93c71 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -867,7 +867,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
867 goto out; 867 goto out;
868 nfs_alloc_commit_data(dreq); 868 nfs_alloc_commit_data(dreq);
869 869
870 if (dreq->commit_data == NULL || count < wsize) 870 if (dreq->commit_data == NULL || count <= wsize)
871 sync = NFS_FILE_SYNC; 871 sync = NFS_FILE_SYNC;
872 872
873 dreq->inode = inode; 873 dreq->inode = inode;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index db08ff3ff454..e6356b750b77 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -362,6 +362,15 @@ unsigned int nfs_page_length(struct page *page)
362} 362}
363 363
364/* 364/*
365 * Convert a umode to a dirent->d_type
366 */
367static inline
368unsigned char nfs_umode_to_dtype(umode_t mode)
369{
370 return (mode >> 12) & 15;
371}
372
373/*
365 * Determine the number of pages in an array of length 'len' and 374 * Determine the number of pages in an array of length 'len' and
366 * with a base offset of 'base' 375 * with a base offset of 'base'
367 */ 376 */
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 2563f765c9b4..5914a1911c95 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -485,6 +485,8 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se
485 entry->prev_cookie = entry->cookie; 485 entry->prev_cookie = entry->cookie;
486 entry->cookie = ntohl(*p++); 486 entry->cookie = ntohl(*p++);
487 487
488 entry->d_type = DT_UNKNOWN;
489
488 p = xdr_inline_peek(xdr, 8); 490 p = xdr_inline_peek(xdr, 8);
489 if (p != NULL) 491 if (p != NULL)
490 entry->eof = !p[0] && p[1]; 492 entry->eof = !p[0] && p[1];
@@ -495,7 +497,7 @@ nfs_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_se
495 497
496out_overflow: 498out_overflow:
497 print_overflow_msg(__func__, xdr); 499 print_overflow_msg(__func__, xdr);
498 return ERR_PTR(-EIO); 500 return ERR_PTR(-EAGAIN);
499} 501}
500 502
501/* 503/*
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index 748dc91a4a14..f6cc60f06dac 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -622,11 +622,13 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s
622 entry->prev_cookie = entry->cookie; 622 entry->prev_cookie = entry->cookie;
623 p = xdr_decode_hyper(p, &entry->cookie); 623 p = xdr_decode_hyper(p, &entry->cookie);
624 624
625 entry->d_type = DT_UNKNOWN;
625 if (plus) { 626 if (plus) {
626 entry->fattr->valid = 0; 627 entry->fattr->valid = 0;
627 p = xdr_decode_post_op_attr_stream(xdr, entry->fattr); 628 p = xdr_decode_post_op_attr_stream(xdr, entry->fattr);
628 if (IS_ERR(p)) 629 if (IS_ERR(p))
629 goto out_overflow_exit; 630 goto out_overflow_exit;
631 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
630 /* In fact, a post_op_fh3: */ 632 /* In fact, a post_op_fh3: */
631 p = xdr_inline_decode(xdr, 4); 633 p = xdr_inline_decode(xdr, 4);
632 if (unlikely(!p)) 634 if (unlikely(!p))
@@ -656,7 +658,7 @@ nfs3_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry, struct nfs_s
656out_overflow: 658out_overflow:
657 print_overflow_msg(__func__, xdr); 659 print_overflow_msg(__func__, xdr);
658out_overflow_exit: 660out_overflow_exit:
659 return ERR_PTR(-EIO); 661 return ERR_PTR(-EAGAIN);
660} 662}
661 663
662/* 664/*
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index b7a204ff6fe1..9f1826b012e6 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -6208,6 +6208,10 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6208 if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID) 6208 if (entry->fattr->valid & NFS_ATTR_FATTR_FILEID)
6209 entry->ino = entry->fattr->fileid; 6209 entry->ino = entry->fattr->fileid;
6210 6210
6211 entry->d_type = DT_UNKNOWN;
6212 if (entry->fattr->valid & NFS_ATTR_FATTR_TYPE)
6213 entry->d_type = nfs_umode_to_dtype(entry->fattr->mode);
6214
6211 if (verify_attr_len(xdr, p, len) < 0) 6215 if (verify_attr_len(xdr, p, len) < 0)
6212 goto out_overflow; 6216 goto out_overflow;
6213 6217
@@ -6221,7 +6225,7 @@ __be32 *nfs4_decode_dirent(struct xdr_stream *xdr, struct nfs_entry *entry,
6221 6225
6222out_overflow: 6226out_overflow:
6223 print_overflow_msg(__func__, xdr); 6227 print_overflow_msg(__func__, xdr);
6224 return ERR_PTR(-EIO); 6228 return ERR_PTR(-EAGAIN);
6225} 6229}
6226 6230
6227/* 6231/*
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 49c844dab33a..59e5fe742f7b 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -335,7 +335,7 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
335 * the device at this point. 335 * the device at this point.
336 * 336 *
337 * To prevent nilfs_dat_translate() from returning the 337 * To prevent nilfs_dat_translate() from returning the
338 * uncommited block number, this makes a copy of the entry 338 * uncommitted block number, this makes a copy of the entry
339 * buffer and redirects nilfs_dat_translate() to the copy. 339 * buffer and redirects nilfs_dat_translate() to the copy.
340 */ 340 */
341 if (!buffer_nilfs_redirected(entry_bh)) { 341 if (!buffer_nilfs_redirected(entry_bh)) {
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 3e90f86d5bfe..e00d9457c256 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -349,8 +349,8 @@ static int nilfs_ioctl_move_blocks(struct super_block *sb,
349 ino = vdesc->vd_ino; 349 ino = vdesc->vd_ino;
350 cno = vdesc->vd_cno; 350 cno = vdesc->vd_cno;
351 inode = nilfs_iget_for_gc(sb, ino, cno); 351 inode = nilfs_iget_for_gc(sb, ino, cno);
352 if (unlikely(inode == NULL)) { 352 if (IS_ERR(inode)) {
353 ret = -ENOMEM; 353 ret = PTR_ERR(inode);
354 goto failed; 354 goto failed;
355 } 355 }
356 do { 356 do {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 52c7557f3e25..9f26ac9be2a4 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -1964,8 +1964,10 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1964 if (reg == NULL) 1964 if (reg == NULL)
1965 return ERR_PTR(-ENOMEM); 1965 return ERR_PTR(-ENOMEM);
1966 1966
1967 if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) 1967 if (strlen(name) > O2HB_MAX_REGION_NAME_LEN) {
1968 return ERR_PTR(-ENAMETOOLONG); 1968 ret = -ENAMETOOLONG;
1969 goto free;
1970 }
1969 1971
1970 spin_lock(&o2hb_live_lock); 1972 spin_lock(&o2hb_live_lock);
1971 reg->hr_region_num = 0; 1973 reg->hr_region_num = 0;
@@ -1974,7 +1976,8 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1974 O2NM_MAX_REGIONS); 1976 O2NM_MAX_REGIONS);
1975 if (reg->hr_region_num >= O2NM_MAX_REGIONS) { 1977 if (reg->hr_region_num >= O2NM_MAX_REGIONS) {
1976 spin_unlock(&o2hb_live_lock); 1978 spin_unlock(&o2hb_live_lock);
1977 return ERR_PTR(-EFBIG); 1979 ret = -EFBIG;
1980 goto free;
1978 } 1981 }
1979 set_bit(reg->hr_region_num, o2hb_region_bitmap); 1982 set_bit(reg->hr_region_num, o2hb_region_bitmap);
1980 } 1983 }
@@ -1986,10 +1989,13 @@ static struct config_item *o2hb_heartbeat_group_make_item(struct config_group *g
1986 ret = o2hb_debug_region_init(reg, o2hb_debug_dir); 1989 ret = o2hb_debug_region_init(reg, o2hb_debug_dir);
1987 if (ret) { 1990 if (ret) {
1988 config_item_put(&reg->hr_item); 1991 config_item_put(&reg->hr_item);
1989 return ERR_PTR(ret); 1992 goto free;
1990 } 1993 }
1991 1994
1992 return &reg->hr_item; 1995 return &reg->hr_item;
1996free:
1997 kfree(reg);
1998 return ERR_PTR(ret);
1993} 1999}
1994 2000
1995static void o2hb_heartbeat_group_drop_item(struct config_group *group, 2001static void o2hb_heartbeat_group_drop_item(struct config_group *group,
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index edaded48e7e9..895532ac4d98 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -476,7 +476,6 @@ static void ocfs2_dentry_iput(struct dentry *dentry, struct inode *inode)
476 476
477out: 477out:
478 iput(inode); 478 iput(inode);
479 ocfs2_dentry_attach_gen(dentry);
480} 479}
481 480
482/* 481/*
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 58a93b953735..cc2aaa96cfe5 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -959,7 +959,7 @@ static int dlm_match_regions(struct dlm_ctxt *dlm,
959 r += O2HB_MAX_REGION_NAME_LEN; 959 r += O2HB_MAX_REGION_NAME_LEN;
960 } 960 }
961 961
962 local = kmalloc(sizeof(qr->qr_regions), GFP_KERNEL); 962 local = kmalloc(sizeof(qr->qr_regions), GFP_ATOMIC);
963 if (!local) { 963 if (!local) {
964 status = -ENOMEM; 964 status = -ENOMEM;
965 goto bail; 965 goto bail;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 1efea3615589..70dd3b1798f1 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -159,9 +159,9 @@ struct ocfs2_lock_res {
159 char l_name[OCFS2_LOCK_ID_MAX_LEN]; 159 char l_name[OCFS2_LOCK_ID_MAX_LEN];
160 unsigned int l_ro_holders; 160 unsigned int l_ro_holders;
161 unsigned int l_ex_holders; 161 unsigned int l_ex_holders;
162 char l_level; 162 signed char l_level;
163 char l_requested; 163 signed char l_requested;
164 char l_blocking; 164 signed char l_blocking;
165 165
166 /* Data packed - type enum ocfs2_lock_type */ 166 /* Data packed - type enum ocfs2_lock_type */
167 unsigned char l_type; 167 unsigned char l_type;
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index 252e7c82f929..a5ebe421195f 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -190,7 +190,7 @@ static struct ocfs2_live_connection *ocfs2_connection_find(const char *name)
190 return c; 190 return c;
191 } 191 }
192 192
193 return c; 193 return NULL;
194} 194}
195 195
196/* 196/*
diff --git a/fs/pipe.c b/fs/pipe.c
index a8012a955720..04629f36e397 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -1199,12 +1199,24 @@ int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
1199 return ret; 1199 return ret;
1200} 1200}
1201 1201
1202/*
1203 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1204 * location, so checking ->i_pipe is not enough to verify that this is a
1205 * pipe.
1206 */
1207struct pipe_inode_info *get_pipe_info(struct file *file)
1208{
1209 struct inode *i = file->f_path.dentry->d_inode;
1210
1211 return S_ISFIFO(i->i_mode) ? i->i_pipe : NULL;
1212}
1213
1202long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg) 1214long pipe_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
1203{ 1215{
1204 struct pipe_inode_info *pipe; 1216 struct pipe_inode_info *pipe;
1205 long ret; 1217 long ret;
1206 1218
1207 pipe = file->f_path.dentry->d_inode->i_pipe; 1219 pipe = get_pipe_info(file);
1208 if (!pipe) 1220 if (!pipe)
1209 return -EBADF; 1221 return -EBADF;
1210 1222
diff --git a/fs/proc/base.c b/fs/proc/base.c
index f3d02ca461ec..182845147fe4 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1574,7 +1574,7 @@ static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
1574 if (!tmp) 1574 if (!tmp)
1575 return -ENOMEM; 1575 return -ENOMEM;
1576 1576
1577 pathname = d_path_with_unreachable(path, tmp, PAGE_SIZE); 1577 pathname = d_path(path, tmp, PAGE_SIZE);
1578 len = PTR_ERR(pathname); 1578 len = PTR_ERR(pathname);
1579 if (IS_ERR(pathname)) 1579 if (IS_ERR(pathname))
1580 goto out; 1580 goto out;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index da6b01d70f01..c126c83b9a45 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -706,6 +706,7 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
706 * skip over unmapped regions. 706 * skip over unmapped regions.
707 */ 707 */
708#define PAGEMAP_WALK_SIZE (PMD_SIZE) 708#define PAGEMAP_WALK_SIZE (PMD_SIZE)
709#define PAGEMAP_WALK_MASK (PMD_MASK)
709static ssize_t pagemap_read(struct file *file, char __user *buf, 710static ssize_t pagemap_read(struct file *file, char __user *buf,
710 size_t count, loff_t *ppos) 711 size_t count, loff_t *ppos)
711{ 712{
@@ -776,7 +777,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
776 unsigned long end; 777 unsigned long end;
777 778
778 pm.pos = 0; 779 pm.pos = 0;
779 end = start_vaddr + PAGEMAP_WALK_SIZE; 780 end = (start_vaddr + PAGEMAP_WALK_SIZE) & PAGEMAP_WALK_MASK;
780 /* overflow ? */ 781 /* overflow ? */
781 if (end < start_vaddr || end > end_vaddr) 782 if (end < start_vaddr || end > end_vaddr)
782 end = end_vaddr; 783 end = end_vaddr;
diff --git a/fs/reiserfs/ioctl.c b/fs/reiserfs/ioctl.c
index bd9763e76bae..79265fdc317a 100644
--- a/fs/reiserfs/ioctl.c
+++ b/fs/reiserfs/ioctl.c
@@ -183,12 +183,11 @@ int reiserfs_unpack(struct inode *inode, struct file *filp)
183 return 0; 183 return 0;
184 } 184 }
185 185
186 /* we need to make sure nobody is changing the file size beneath
187 ** us
188 */
189 reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
190 depth = reiserfs_write_lock_once(inode->i_sb); 186 depth = reiserfs_write_lock_once(inode->i_sb);
191 187
188 /* we need to make sure nobody is changing the file size beneath us */
189 reiserfs_mutex_lock_safe(&inode->i_mutex, inode->i_sb);
190
192 write_from = inode->i_size & (blocksize - 1); 191 write_from = inode->i_size & (blocksize - 1);
193 /* if we are on a block boundary, we are already unpacked. */ 192 /* if we are on a block boundary, we are already unpacked. */
194 if (write_from == 0) { 193 if (write_from == 0) {
diff --git a/fs/reiserfs/xattr_acl.c b/fs/reiserfs/xattr_acl.c
index 536d697a8a28..90d2fcb67a31 100644
--- a/fs/reiserfs/xattr_acl.c
+++ b/fs/reiserfs/xattr_acl.c
@@ -472,7 +472,9 @@ int reiserfs_acl_chmod(struct inode *inode)
472 struct reiserfs_transaction_handle th; 472 struct reiserfs_transaction_handle th;
473 size_t size = reiserfs_xattr_nblocks(inode, 473 size_t size = reiserfs_xattr_nblocks(inode,
474 reiserfs_acl_size(clone->a_count)); 474 reiserfs_acl_size(clone->a_count));
475 reiserfs_write_lock(inode->i_sb); 475 int depth;
476
477 depth = reiserfs_write_lock_once(inode->i_sb);
476 error = journal_begin(&th, inode->i_sb, size * 2); 478 error = journal_begin(&th, inode->i_sb, size * 2);
477 if (!error) { 479 if (!error) {
478 int error2; 480 int error2;
@@ -482,7 +484,7 @@ int reiserfs_acl_chmod(struct inode *inode)
482 if (error2) 484 if (error2)
483 error = error2; 485 error = error2;
484 } 486 }
485 reiserfs_write_unlock(inode->i_sb); 487 reiserfs_write_unlock_once(inode->i_sb, depth);
486 } 488 }
487 posix_acl_release(clone); 489 posix_acl_release(clone);
488 return error; 490 return error;
diff --git a/fs/splice.c b/fs/splice.c
index 8f1dfaecc8f0..ce2f02579e35 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1311,18 +1311,6 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1311static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1311static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1312 struct pipe_inode_info *opipe, 1312 struct pipe_inode_info *opipe,
1313 size_t len, unsigned int flags); 1313 size_t len, unsigned int flags);
1314/*
1315 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
1316 * location, so checking ->i_pipe is not enough to verify that this is a
1317 * pipe.
1318 */
1319static inline struct pipe_inode_info *pipe_info(struct inode *inode)
1320{
1321 if (S_ISFIFO(inode->i_mode))
1322 return inode->i_pipe;
1323
1324 return NULL;
1325}
1326 1314
1327/* 1315/*
1328 * Determine where to splice to/from. 1316 * Determine where to splice to/from.
@@ -1336,8 +1324,8 @@ static long do_splice(struct file *in, loff_t __user *off_in,
1336 loff_t offset, *off; 1324 loff_t offset, *off;
1337 long ret; 1325 long ret;
1338 1326
1339 ipipe = pipe_info(in->f_path.dentry->d_inode); 1327 ipipe = get_pipe_info(in);
1340 opipe = pipe_info(out->f_path.dentry->d_inode); 1328 opipe = get_pipe_info(out);
1341 1329
1342 if (ipipe && opipe) { 1330 if (ipipe && opipe) {
1343 if (off_in || off_out) 1331 if (off_in || off_out)
@@ -1555,7 +1543,7 @@ static long vmsplice_to_user(struct file *file, const struct iovec __user *iov,
1555 int error; 1543 int error;
1556 long ret; 1544 long ret;
1557 1545
1558 pipe = pipe_info(file->f_path.dentry->d_inode); 1546 pipe = get_pipe_info(file);
1559 if (!pipe) 1547 if (!pipe)
1560 return -EBADF; 1548 return -EBADF;
1561 1549
@@ -1642,7 +1630,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
1642 }; 1630 };
1643 long ret; 1631 long ret;
1644 1632
1645 pipe = pipe_info(file->f_path.dentry->d_inode); 1633 pipe = get_pipe_info(file);
1646 if (!pipe) 1634 if (!pipe)
1647 return -EBADF; 1635 return -EBADF;
1648 1636
@@ -2022,8 +2010,8 @@ static int link_pipe(struct pipe_inode_info *ipipe,
2022static long do_tee(struct file *in, struct file *out, size_t len, 2010static long do_tee(struct file *in, struct file *out, size_t len,
2023 unsigned int flags) 2011 unsigned int flags)
2024{ 2012{
2025 struct pipe_inode_info *ipipe = pipe_info(in->f_path.dentry->d_inode); 2013 struct pipe_inode_info *ipipe = get_pipe_info(in);
2026 struct pipe_inode_info *opipe = pipe_info(out->f_path.dentry->d_inode); 2014 struct pipe_inode_info *opipe = get_pipe_info(out);
2027 int ret = -EINVAL; 2015 int ret = -EINVAL;
2028 2016
2029 /* 2017 /*
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 7d287afccde5..691f61223ed6 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -934,7 +934,6 @@ xfs_aops_discard_page(
934 struct xfs_inode *ip = XFS_I(inode); 934 struct xfs_inode *ip = XFS_I(inode);
935 struct buffer_head *bh, *head; 935 struct buffer_head *bh, *head;
936 loff_t offset = page_offset(page); 936 loff_t offset = page_offset(page);
937 ssize_t len = 1 << inode->i_blkbits;
938 937
939 if (!xfs_is_delayed_page(page, IO_DELAY)) 938 if (!xfs_is_delayed_page(page, IO_DELAY))
940 goto out_invalidate; 939 goto out_invalidate;
@@ -949,58 +948,14 @@ xfs_aops_discard_page(
949 xfs_ilock(ip, XFS_ILOCK_EXCL); 948 xfs_ilock(ip, XFS_ILOCK_EXCL);
950 bh = head = page_buffers(page); 949 bh = head = page_buffers(page);
951 do { 950 do {
952 int done;
953 xfs_fileoff_t offset_fsb;
954 xfs_bmbt_irec_t imap;
955 int nimaps = 1;
956 int error; 951 int error;
957 xfs_fsblock_t firstblock; 952 xfs_fileoff_t start_fsb;
958 xfs_bmap_free_t flist;
959 953
960 if (!buffer_delay(bh)) 954 if (!buffer_delay(bh))
961 goto next_buffer; 955 goto next_buffer;
962 956
963 offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset); 957 start_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
964 958 error = xfs_bmap_punch_delalloc_range(ip, start_fsb, 1);
965 /*
966 * Map the range first and check that it is a delalloc extent
967 * before trying to unmap the range. Otherwise we will be
968 * trying to remove a real extent (which requires a
969 * transaction) or a hole, which is probably a bad idea...
970 */
971 error = xfs_bmapi(NULL, ip, offset_fsb, 1,
972 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
973 &nimaps, NULL);
974
975 if (error) {
976 /* something screwed, just bail */
977 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
978 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
979 "page discard failed delalloc mapping lookup.");
980 }
981 break;
982 }
983 if (!nimaps) {
984 /* nothing there */
985 goto next_buffer;
986 }
987 if (imap.br_startblock != DELAYSTARTBLOCK) {
988 /* been converted, ignore */
989 goto next_buffer;
990 }
991 WARN_ON(imap.br_blockcount == 0);
992
993 /*
994 * Note: while we initialise the firstblock/flist pair, they
995 * should never be used because blocks should never be
996 * allocated or freed for a delalloc extent and hence we need
997 * don't cancel or finish them after the xfs_bunmapi() call.
998 */
999 xfs_bmap_init(&flist, &firstblock);
1000 error = xfs_bunmapi(NULL, ip, offset_fsb, 1, 0, 1, &firstblock,
1001 &flist, &done);
1002
1003 ASSERT(!flist.xbf_count && !flist.xbf_first);
1004 if (error) { 959 if (error) {
1005 /* something screwed, just bail */ 960 /* something screwed, just bail */
1006 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) { 961 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
@@ -1010,7 +965,7 @@ xfs_aops_discard_page(
1010 break; 965 break;
1011 } 966 }
1012next_buffer: 967next_buffer:
1013 offset += len; 968 offset += 1 << inode->i_blkbits;
1014 969
1015 } while ((bh = bh->b_this_page) != head); 970 } while ((bh = bh->b_this_page) != head);
1016 971
@@ -1505,11 +1460,42 @@ xfs_vm_write_failed(
1505 struct inode *inode = mapping->host; 1460 struct inode *inode = mapping->host;
1506 1461
1507 if (to > inode->i_size) { 1462 if (to > inode->i_size) {
1508 struct iattr ia = { 1463 /*
1509 .ia_valid = ATTR_SIZE | ATTR_FORCE, 1464 * punch out the delalloc blocks we have already allocated. We
1510 .ia_size = inode->i_size, 1465 * don't call xfs_setattr() to do this as we may be in the
1511 }; 1466 * middle of a multi-iovec write and so the vfs inode->i_size
1512 xfs_setattr(XFS_I(inode), &ia, XFS_ATTR_NOLOCK); 1467 * will not match the xfs ip->i_size and so it will zero too
1468 * much. Hence we jus truncate the page cache to zero what is
1469 * necessary and punch the delalloc blocks directly.
1470 */
1471 struct xfs_inode *ip = XFS_I(inode);
1472 xfs_fileoff_t start_fsb;
1473 xfs_fileoff_t end_fsb;
1474 int error;
1475
1476 truncate_pagecache(inode, to, inode->i_size);
1477
1478 /*
1479 * Check if there are any blocks that are outside of i_size
1480 * that need to be trimmed back.
1481 */
1482 start_fsb = XFS_B_TO_FSB(ip->i_mount, inode->i_size) + 1;
1483 end_fsb = XFS_B_TO_FSB(ip->i_mount, to);
1484 if (end_fsb <= start_fsb)
1485 return;
1486
1487 xfs_ilock(ip, XFS_ILOCK_EXCL);
1488 error = xfs_bmap_punch_delalloc_range(ip, start_fsb,
1489 end_fsb - start_fsb);
1490 if (error) {
1491 /* something screwed, just bail */
1492 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1493 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
1494 "xfs_vm_write_failed: unable to clean up ino %lld",
1495 ip->i_ino);
1496 }
1497 }
1498 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1513 } 1499 }
1514} 1500}
1515 1501
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index aa1d353def29..4c5deb6e9e31 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -488,29 +488,16 @@ found:
488 spin_unlock(&pag->pag_buf_lock); 488 spin_unlock(&pag->pag_buf_lock);
489 xfs_perag_put(pag); 489 xfs_perag_put(pag);
490 490
491 /* Attempt to get the semaphore without sleeping, 491 if (xfs_buf_cond_lock(bp)) {
492 * if this does not work then we need to drop the 492 /* failed, so wait for the lock if requested. */
493 * spinlock and do a hard attempt on the semaphore.
494 */
495 if (down_trylock(&bp->b_sema)) {
496 if (!(flags & XBF_TRYLOCK)) { 493 if (!(flags & XBF_TRYLOCK)) {
497 /* wait for buffer ownership */
498 xfs_buf_lock(bp); 494 xfs_buf_lock(bp);
499 XFS_STATS_INC(xb_get_locked_waited); 495 XFS_STATS_INC(xb_get_locked_waited);
500 } else { 496 } else {
501 /* We asked for a trylock and failed, no need
502 * to look at file offset and length here, we
503 * know that this buffer at least overlaps our
504 * buffer and is locked, therefore our buffer
505 * either does not exist, or is this buffer.
506 */
507 xfs_buf_rele(bp); 497 xfs_buf_rele(bp);
508 XFS_STATS_INC(xb_busy_locked); 498 XFS_STATS_INC(xb_busy_locked);
509 return NULL; 499 return NULL;
510 } 500 }
511 } else {
512 /* trylock worked */
513 XB_SET_OWNER(bp);
514 } 501 }
515 502
516 if (bp->b_flags & XBF_STALE) { 503 if (bp->b_flags & XBF_STALE) {
@@ -876,10 +863,18 @@ xfs_buf_rele(
876 */ 863 */
877 864
878/* 865/*
879 * Locks a buffer object, if it is not already locked. 866 * Locks a buffer object, if it is not already locked. Note that this in
880 * Note that this in no way locks the underlying pages, so it is only 867 * no way locks the underlying pages, so it is only useful for
881 * useful for synchronizing concurrent use of buffer objects, not for 868 * synchronizing concurrent use of buffer objects, not for synchronizing
882 * synchronizing independent access to the underlying pages. 869 * independent access to the underlying pages.
870 *
871 * If we come across a stale, pinned, locked buffer, we know that we are
872 * being asked to lock a buffer that has been reallocated. Because it is
873 * pinned, we know that the log has not been pushed to disk and hence it
874 * will still be locked. Rather than continuing to have trylock attempts
875 * fail until someone else pushes the log, push it ourselves before
876 * returning. This means that the xfsaild will not get stuck trying
877 * to push on stale inode buffers.
883 */ 878 */
884int 879int
885xfs_buf_cond_lock( 880xfs_buf_cond_lock(
@@ -890,6 +885,8 @@ xfs_buf_cond_lock(
890 locked = down_trylock(&bp->b_sema) == 0; 885 locked = down_trylock(&bp->b_sema) == 0;
891 if (locked) 886 if (locked)
892 XB_SET_OWNER(bp); 887 XB_SET_OWNER(bp);
888 else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
889 xfs_log_force(bp->b_target->bt_mount, 0);
893 890
894 trace_xfs_buf_cond_lock(bp, _RET_IP_); 891 trace_xfs_buf_cond_lock(bp, _RET_IP_);
895 return locked ? 0 : -EBUSY; 892 return locked ? 0 : -EBUSY;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 8abd12e32e13..4111cd3966c7 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5471,8 +5471,13 @@ xfs_getbmap(
5471 if (error) 5471 if (error)
5472 goto out_unlock_iolock; 5472 goto out_unlock_iolock;
5473 } 5473 }
5474 5474 /*
5475 ASSERT(ip->i_delayed_blks == 0); 5475 * even after flushing the inode, there can still be delalloc
5476 * blocks on the inode beyond EOF due to speculative
5477 * preallocation. These are not removed until the release
5478 * function is called or the inode is inactivated. Hence we
5479 * cannot assert here that ip->i_delayed_blks == 0.
5480 */
5476 } 5481 }
5477 5482
5478 lock = xfs_ilock_map_shared(ip); 5483 lock = xfs_ilock_map_shared(ip);
@@ -6070,3 +6075,79 @@ xfs_bmap_disk_count_leaves(
6070 *count += xfs_bmbt_disk_get_blockcount(frp); 6075 *count += xfs_bmbt_disk_get_blockcount(frp);
6071 } 6076 }
6072} 6077}
6078
6079/*
6080 * dead simple method of punching delalyed allocation blocks from a range in
6081 * the inode. Walks a block at a time so will be slow, but is only executed in
6082 * rare error cases so the overhead is not critical. This will alays punch out
6083 * both the start and end blocks, even if the ranges only partially overlap
6084 * them, so it is up to the caller to ensure that partial blocks are not
6085 * passed in.
6086 */
6087int
6088xfs_bmap_punch_delalloc_range(
6089 struct xfs_inode *ip,
6090 xfs_fileoff_t start_fsb,
6091 xfs_fileoff_t length)
6092{
6093 xfs_fileoff_t remaining = length;
6094 int error = 0;
6095
6096 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6097
6098 do {
6099 int done;
6100 xfs_bmbt_irec_t imap;
6101 int nimaps = 1;
6102 xfs_fsblock_t firstblock;
6103 xfs_bmap_free_t flist;
6104
6105 /*
6106 * Map the range first and check that it is a delalloc extent
6107 * before trying to unmap the range. Otherwise we will be
6108 * trying to remove a real extent (which requires a
6109 * transaction) or a hole, which is probably a bad idea...
6110 */
6111 error = xfs_bmapi(NULL, ip, start_fsb, 1,
6112 XFS_BMAPI_ENTIRE, NULL, 0, &imap,
6113 &nimaps, NULL);
6114
6115 if (error) {
6116 /* something screwed, just bail */
6117 if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
6118 xfs_fs_cmn_err(CE_ALERT, ip->i_mount,
6119 "Failed delalloc mapping lookup ino %lld fsb %lld.",
6120 ip->i_ino, start_fsb);
6121 }
6122 break;
6123 }
6124 if (!nimaps) {
6125 /* nothing there */
6126 goto next_block;
6127 }
6128 if (imap.br_startblock != DELAYSTARTBLOCK) {
6129 /* been converted, ignore */
6130 goto next_block;
6131 }
6132 WARN_ON(imap.br_blockcount == 0);
6133
6134 /*
6135 * Note: while we initialise the firstblock/flist pair, they
6136 * should never be used because blocks should never be
6137 * allocated or freed for a delalloc extent and hence we need
6138 * don't cancel or finish them after the xfs_bunmapi() call.
6139 */
6140 xfs_bmap_init(&flist, &firstblock);
6141 error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
6142 &flist, &done);
6143 if (error)
6144 break;
6145
6146 ASSERT(!flist.xbf_count && !flist.xbf_first);
6147next_block:
6148 start_fsb++;
6149 remaining--;
6150 } while(remaining > 0);
6151
6152 return error;
6153}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 71ec9b6ecdfc..3651191daea1 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -394,6 +394,11 @@ xfs_bmap_count_blocks(
394 int whichfork, 394 int whichfork,
395 int *count); 395 int *count);
396 396
397int
398xfs_bmap_punch_delalloc_range(
399 struct xfs_inode *ip,
400 xfs_fileoff_t start_fsb,
401 xfs_fileoff_t length);
397#endif /* __KERNEL__ */ 402#endif /* __KERNEL__ */
398 403
399#endif /* __XFS_BMAP_H__ */ 404#endif /* __XFS_BMAP_H__ */
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 3b9582c60a22..e60490bc00a6 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -377,6 +377,19 @@ xfs_swap_extents(
377 ip->i_d.di_format = tip->i_d.di_format; 377 ip->i_d.di_format = tip->i_d.di_format;
378 tip->i_d.di_format = tmp; 378 tip->i_d.di_format = tmp;
379 379
380 /*
381 * The extents in the source inode could still contain speculative
382 * preallocation beyond EOF (e.g. the file is open but not modified
383 * while defrag is in progress). In that case, we need to copy over the
384 * number of delalloc blocks the data fork in the source inode is
385 * tracking beyond EOF so that when the fork is truncated away when the
386 * temporary inode is unlinked we don't underrun the i_delayed_blks
387 * counter on that inode.
388 */
389 ASSERT(tip->i_delayed_blks == 0);
390 tip->i_delayed_blks = ip->i_delayed_blks;
391 ip->i_delayed_blks = 0;
392
380 ilf_fields = XFS_ILOG_CORE; 393 ilf_fields = XFS_ILOG_CORE;
381 394
382 switch(ip->i_d.di_format) { 395 switch(ip->i_d.di_format) {
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index ed9990267661..c78cc6a3d87c 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -58,6 +58,7 @@ xfs_error_trap(int e)
58int xfs_etest[XFS_NUM_INJECT_ERROR]; 58int xfs_etest[XFS_NUM_INJECT_ERROR];
59int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR]; 59int64_t xfs_etest_fsid[XFS_NUM_INJECT_ERROR];
60char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR]; 60char * xfs_etest_fsname[XFS_NUM_INJECT_ERROR];
61int xfs_error_test_active;
61 62
62int 63int
63xfs_error_test(int error_tag, int *fsidp, char *expression, 64xfs_error_test(int error_tag, int *fsidp, char *expression,
@@ -108,6 +109,7 @@ xfs_errortag_add(int error_tag, xfs_mount_t *mp)
108 len = strlen(mp->m_fsname); 109 len = strlen(mp->m_fsname);
109 xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP); 110 xfs_etest_fsname[i] = kmem_alloc(len + 1, KM_SLEEP);
110 strcpy(xfs_etest_fsname[i], mp->m_fsname); 111 strcpy(xfs_etest_fsname[i], mp->m_fsname);
112 xfs_error_test_active++;
111 return 0; 113 return 0;
112 } 114 }
113 } 115 }
@@ -137,6 +139,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
137 xfs_etest_fsid[i] = 0LL; 139 xfs_etest_fsid[i] = 0LL;
138 kmem_free(xfs_etest_fsname[i]); 140 kmem_free(xfs_etest_fsname[i]);
139 xfs_etest_fsname[i] = NULL; 141 xfs_etest_fsname[i] = NULL;
142 xfs_error_test_active--;
140 } 143 }
141 } 144 }
142 145
diff --git a/fs/xfs/xfs_error.h b/fs/xfs/xfs_error.h
index c2c1a072bb82..f338847f80b8 100644
--- a/fs/xfs/xfs_error.h
+++ b/fs/xfs/xfs_error.h
@@ -127,13 +127,14 @@ extern void xfs_corruption_error(const char *tag, int level,
127#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT 127#define XFS_RANDOM_BMAPIFORMAT XFS_RANDOM_DEFAULT
128 128
129#ifdef DEBUG 129#ifdef DEBUG
130extern int xfs_error_test_active;
130extern int xfs_error_test(int, int *, char *, int, char *, unsigned long); 131extern int xfs_error_test(int, int *, char *, int, char *, unsigned long);
131 132
132#define XFS_NUM_INJECT_ERROR 10 133#define XFS_NUM_INJECT_ERROR 10
133#define XFS_TEST_ERROR(expr, mp, tag, rf) \ 134#define XFS_TEST_ERROR(expr, mp, tag, rf) \
134 ((expr) || \ 135 ((expr) || (xfs_error_test_active && \
135 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \ 136 xfs_error_test((tag), (mp)->m_fixedfsid, "expr", __LINE__, __FILE__, \
136 (rf))) 137 (rf))))
137 138
138extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp); 139extern int xfs_errortag_add(int error_tag, xfs_mount_t *mp);
139extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud); 140extern int xfs_errortag_clearall(xfs_mount_t *mp, int loud);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index c7ac020705df..7c8d30c453c3 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -657,18 +657,37 @@ xfs_inode_item_unlock(
657} 657}
658 658
659/* 659/*
660 * This is called to find out where the oldest active copy of the 660 * This is called to find out where the oldest active copy of the inode log
661 * inode log item in the on disk log resides now that the last log 661 * item in the on disk log resides now that the last log write of it completed
662 * write of it completed at the given lsn. Since we always re-log 662 * at the given lsn. Since we always re-log all dirty data in an inode, the
663 * all dirty data in an inode, the latest copy in the on disk log 663 * latest copy in the on disk log is the only one that matters. Therefore,
664 * is the only one that matters. Therefore, simply return the 664 * simply return the given lsn.
665 * given lsn. 665 *
666 * If the inode has been marked stale because the cluster is being freed, we
667 * don't want to (re-)insert this inode into the AIL. There is a race condition
668 * where the cluster buffer may be unpinned before the inode is inserted into
669 * the AIL during transaction committed processing. If the buffer is unpinned
670 * before the inode item has been committed and inserted, then it is possible
671 * for the buffer to be written and IO completions before the inode is inserted
672 * into the AIL. In that case, we'd be inserting a clean, stale inode into the
673 * AIL which will never get removed. It will, however, get reclaimed which
674 * triggers an assert in xfs_inode_free() complaining about freein an inode
675 * still in the AIL.
676 *
677 * To avoid this, return a lower LSN than the one passed in so that the
678 * transaction committed code will not move the inode forward in the AIL but
679 * will still unpin it properly.
666 */ 680 */
667STATIC xfs_lsn_t 681STATIC xfs_lsn_t
668xfs_inode_item_committed( 682xfs_inode_item_committed(
669 struct xfs_log_item *lip, 683 struct xfs_log_item *lip,
670 xfs_lsn_t lsn) 684 xfs_lsn_t lsn)
671{ 685{
686 struct xfs_inode_log_item *iip = INODE_ITEM(lip);
687 struct xfs_inode *ip = iip->ili_inode;
688
689 if (xfs_iflags_test(ip, XFS_ISTALE))
690 return lsn - 1;
672 return lsn; 691 return lsn;
673} 692}
674 693
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 2b3398004aa5..0f14f94ed8f4 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1120,8 +1120,6 @@ struct drm_device {
1120 /*@{ */ 1120 /*@{ */
1121 spinlock_t object_name_lock; 1121 spinlock_t object_name_lock;
1122 struct idr object_name_idr; 1122 struct idr object_name_idr;
1123 uint32_t invalidate_domains; /* domains pending invalidation */
1124 uint32_t flush_domains; /* domains pending flush */
1125 /*@} */ 1123 /*@} */
1126 1124
1127}; 1125};
@@ -1411,7 +1409,6 @@ extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
1411extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); 1409extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
1412extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, 1410extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
1413 struct drm_file *file_priv); 1411 struct drm_file *file_priv);
1414extern void drm_agp_chipset_flush(struct drm_device *dev);
1415 1412
1416 /* Stub support (drm_stub.h) */ 1413 /* Stub support (drm_stub.h) */
1417extern int drm_setmaster_ioctl(struct drm_device *dev, void *data, 1414extern int drm_setmaster_ioctl(struct drm_device *dev, void *data,
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index bf01531193d5..e39177778601 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -62,11 +62,14 @@ struct drm_mm {
62 struct list_head unused_nodes; 62 struct list_head unused_nodes;
63 int num_unused; 63 int num_unused;
64 spinlock_t unused_lock; 64 spinlock_t unused_lock;
65 unsigned int scan_check_range : 1;
65 unsigned scan_alignment; 66 unsigned scan_alignment;
66 unsigned long scan_size; 67 unsigned long scan_size;
67 unsigned long scan_hit_start; 68 unsigned long scan_hit_start;
68 unsigned scan_hit_size; 69 unsigned scan_hit_size;
69 unsigned scanned_blocks; 70 unsigned scanned_blocks;
71 unsigned long scan_start;
72 unsigned long scan_end;
70}; 73};
71 74
72/* 75/*
@@ -145,6 +148,10 @@ static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
145 148
146void drm_mm_init_scan(struct drm_mm *mm, unsigned long size, 149void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
147 unsigned alignment); 150 unsigned alignment);
151void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
152 unsigned alignment,
153 unsigned long start,
154 unsigned long end);
148int drm_mm_scan_add_block(struct drm_mm_node *node); 155int drm_mm_scan_add_block(struct drm_mm_node *node);
149int drm_mm_scan_remove_block(struct drm_mm_node *node); 156int drm_mm_scan_remove_block(struct drm_mm_node *node);
150 157
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 8c641bed9bbd..0039f1f97ad8 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -287,6 +287,9 @@ typedef struct drm_i915_irq_wait {
287#define I915_PARAM_HAS_EXECBUF2 9 287#define I915_PARAM_HAS_EXECBUF2 9
288#define I915_PARAM_HAS_BSD 10 288#define I915_PARAM_HAS_BSD 10
289#define I915_PARAM_HAS_BLT 11 289#define I915_PARAM_HAS_BLT 11
290#define I915_PARAM_HAS_RELAXED_FENCING 12
291#define I915_PARAM_HAS_COHERENT_RINGS 13
292#define I915_PARAM_HAS_EXEC_CONSTANTS 14
290 293
291typedef struct drm_i915_getparam { 294typedef struct drm_i915_getparam {
292 int param; 295 int param;
@@ -633,6 +636,17 @@ struct drm_i915_gem_execbuffer2 {
633#define I915_EXEC_RENDER (1<<0) 636#define I915_EXEC_RENDER (1<<0)
634#define I915_EXEC_BSD (2<<0) 637#define I915_EXEC_BSD (2<<0)
635#define I915_EXEC_BLT (3<<0) 638#define I915_EXEC_BLT (3<<0)
639
640/* Used for switching the constants addressing mode on gen4+ RENDER ring.
641 * Gen6+ only supports relative addressing to dynamic state (default) and
642 * absolute addressing.
643 *
644 * These flags are ignored for the BSD and BLT rings.
645 */
646#define I915_EXEC_CONSTANTS_MASK (3<<6)
647#define I915_EXEC_CONSTANTS_REL_GENERAL (0<<6) /* default */
648#define I915_EXEC_CONSTANTS_ABSOLUTE (1<<6)
649#define I915_EXEC_CONSTANTS_REL_SURFACE (2<<6) /* gen4/5 only */
636 __u64 flags; 650 __u64 flags;
637 __u64 rsvd1; 651 __u64 rsvd1;
638 __u64 rsvd2; 652 __u64 rsvd2;
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h
index d3c81946f613..9e343c0998b4 100644
--- a/include/drm/intel-gtt.h
+++ b/include/drm/intel-gtt.h
@@ -2,17 +2,40 @@
2 2
3#ifndef _DRM_INTEL_GTT_H 3#ifndef _DRM_INTEL_GTT_H
4#define _DRM_INTEL_GTT_H 4#define _DRM_INTEL_GTT_H
5struct intel_gtt { 5
6 /* Number of stolen gtt entries at the beginning. */ 6const struct intel_gtt {
7 unsigned int gtt_stolen_entries; 7 /* Size of memory reserved for graphics by the BIOS */
8 unsigned int stolen_size;
8 /* Total number of gtt entries. */ 9 /* Total number of gtt entries. */
9 unsigned int gtt_total_entries; 10 unsigned int gtt_total_entries;
10 /* Part of the gtt that is mappable by the cpu, for those chips where 11 /* Part of the gtt that is mappable by the cpu, for those chips where
11 * this is not the full gtt. */ 12 * this is not the full gtt. */
12 unsigned int gtt_mappable_entries; 13 unsigned int gtt_mappable_entries;
13}; 14 /* Whether i915 needs to use the dmar apis or not. */
15 unsigned int needs_dmar : 1;
16} *intel_gtt_get(void);
14 17
15struct intel_gtt *intel_gtt_get(void); 18void intel_gtt_chipset_flush(void);
19void intel_gtt_unmap_memory(struct scatterlist *sg_list, int num_sg);
20void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries);
21int intel_gtt_map_memory(struct page **pages, unsigned int num_entries,
22 struct scatterlist **sg_list, int *num_sg);
23void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
24 unsigned int sg_len,
25 unsigned int pg_start,
26 unsigned int flags);
27void intel_gtt_insert_pages(unsigned int first_entry, unsigned int num_entries,
28 struct page **pages, unsigned int flags);
16 29
17#endif 30/* Special gtt memory types */
31#define AGP_DCACHE_MEMORY 1
32#define AGP_PHYS_MEMORY 2
33
34/* New caching attributes for gen6/sandybridge */
35#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
36#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
18 37
38/* flag for GFDT type */
39#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
40
41#endif
diff --git a/include/linux/agp_backend.h b/include/linux/agp_backend.h
index 09ea4a1e9505..eaf6cd75a1b1 100644
--- a/include/linux/agp_backend.h
+++ b/include/linux/agp_backend.h
@@ -102,10 +102,8 @@ extern struct agp_memory *agp_allocate_memory(struct agp_bridge_data *, size_t,
102extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *); 102extern int agp_copy_info(struct agp_bridge_data *, struct agp_kern_info *);
103extern int agp_bind_memory(struct agp_memory *, off_t); 103extern int agp_bind_memory(struct agp_memory *, off_t);
104extern int agp_unbind_memory(struct agp_memory *); 104extern int agp_unbind_memory(struct agp_memory *);
105extern int agp_rebind_memory(void);
106extern void agp_enable(struct agp_bridge_data *, u32); 105extern void agp_enable(struct agp_bridge_data *, u32);
107extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *); 106extern struct agp_bridge_data *agp_backend_acquire(struct pci_dev *);
108extern void agp_backend_release(struct agp_bridge_data *); 107extern void agp_backend_release(struct agp_bridge_data *);
109extern void agp_flush_chipset(struct agp_bridge_data *);
110 108
111#endif /* _AGP_BACKEND_H */ 109#endif /* _AGP_BACKEND_H */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index a065612fc928..64a7114a9394 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -29,6 +29,7 @@ struct linux_binprm{
29 char buf[BINPRM_BUF_SIZE]; 29 char buf[BINPRM_BUF_SIZE];
30#ifdef CONFIG_MMU 30#ifdef CONFIG_MMU
31 struct vm_area_struct *vma; 31 struct vm_area_struct *vma;
32 unsigned long vma_pages;
32#else 33#else
33# define MAX_ARG_PAGES 32 34# define MAX_ARG_PAGES 32
34 struct page *page[MAX_ARG_PAGES]; 35 struct page *page[MAX_ARG_PAGES];
@@ -59,6 +60,10 @@ struct linux_binprm{
59 unsigned long loader, exec; 60 unsigned long loader, exec;
60}; 61};
61 62
63extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
64extern struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
65 int write);
66
62#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 67#define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
63#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT) 68#define BINPRM_FLAGS_ENFORCE_NONDUMP (1 << BINPRM_FLAGS_ENFORCE_NONDUMP_BIT)
64 69
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 4823af64e9db..5f09323ee880 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -10,11 +10,6 @@
10 * 10 *
11 * CPUs are exported via sysfs in the class/cpu/devices/ 11 * CPUs are exported via sysfs in the class/cpu/devices/
12 * directory. 12 * directory.
13 *
14 * Per-cpu interfaces can be implemented using a struct device_interface.
15 * See the following for how to do this:
16 * - drivers/base/intf.c
17 * - Documentation/driver-model/interface.txt
18 */ 13 */
19#ifndef _LINUX_CPU_H_ 14#ifndef _LINUX_CPU_H_
20#define _LINUX_CPU_H_ 15#define _LINUX_CPU_H_
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index a7d9dc21391d..7b776d71d36d 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -175,10 +175,21 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
175 return 0; 175 return 0;
176} 176}
177 177
178#define enable_intr_remapping(mode) (-1)
179#define disable_intr_remapping() (0)
180#define reenable_intr_remapping(mode) (0)
181#define intr_remapping_enabled (0) 178#define intr_remapping_enabled (0)
179
180static inline int enable_intr_remapping(int eim)
181{
182 return -1;
183}
184
185static inline void disable_intr_remapping(void)
186{
187}
188
189static inline int reenable_intr_remapping(int eim)
190{
191 return 0;
192}
182#endif 193#endif
183 194
184/* Can't use the common MSI interrupt functions 195/* Can't use the common MSI interrupt functions
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 7fca3dc4e475..d1631d37e9e0 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -1122,6 +1122,7 @@ extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs
1122 1122
1123/* drivers/video/fbcmap.c */ 1123/* drivers/video/fbcmap.c */
1124extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp); 1124extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
1125extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
1125extern void fb_dealloc_cmap(struct fb_cmap *cmap); 1126extern void fb_dealloc_cmap(struct fb_cmap *cmap);
1126extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to); 1127extern int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to);
1127extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to); 1128extern int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index eedc00b7b1ee..c9e06cc70dad 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -34,9 +34,9 @@
34#define SEEK_MAX SEEK_END 34#define SEEK_MAX SEEK_END
35 35
36struct fstrim_range { 36struct fstrim_range {
37 uint64_t start; 37 __u64 start;
38 uint64_t len; 38 __u64 len;
39 uint64_t minlen; 39 __u64 minlen;
40}; 40};
41 41
42/* And dynamically-tunable limits and defaults: */ 42/* And dynamically-tunable limits and defaults: */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index e8713d55360a..f54adfcbec9c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -360,7 +360,7 @@ void drain_local_pages(void *dummy);
360 360
361extern gfp_t gfp_allowed_mask; 361extern gfp_t gfp_allowed_mask;
362 362
363extern void set_gfp_allowed_mask(gfp_t mask); 363extern void pm_restrict_gfp_mask(void);
364extern gfp_t clear_gfp_allowed_mask(gfp_t mask); 364extern void pm_restore_gfp_mask(void);
365 365
366#endif /* __LINUX_GFP_H */ 366#endif /* __LINUX_GFP_H */
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index ce73a30113b4..dd1a56fbe924 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -16,6 +16,8 @@ struct gpio_keys_button {
16struct gpio_keys_platform_data { 16struct gpio_keys_platform_data {
17 struct gpio_keys_button *buttons; 17 struct gpio_keys_button *buttons;
18 int nbuttons; 18 int nbuttons;
19 unsigned int poll_interval; /* polling interval in msecs -
20 for polling driver only */
19 unsigned int rep:1; /* enable input subsystem auto repeat */ 21 unsigned int rep:1; /* enable input subsystem auto repeat */
20 int (*enable)(struct device *dev); 22 int (*enable)(struct device *dev);
21 void (*disable)(struct device *dev); 23 void (*disable)(struct device *dev);
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index a2d6ea49ec56..d1e55fed2c7d 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -33,6 +33,8 @@ enum bp_type_idx {
33 33
34#ifdef CONFIG_HAVE_HW_BREAKPOINT 34#ifdef CONFIG_HAVE_HW_BREAKPOINT
35 35
36extern int __init init_hw_breakpoint(void);
37
36static inline void hw_breakpoint_init(struct perf_event_attr *attr) 38static inline void hw_breakpoint_init(struct perf_event_attr *attr)
37{ 39{
38 memset(attr, 0, sizeof(*attr)); 40 memset(attr, 0, sizeof(*attr));
@@ -108,6 +110,8 @@ static inline struct arch_hw_breakpoint *counter_arch_bp(struct perf_event *bp)
108 110
109#else /* !CONFIG_HAVE_HW_BREAKPOINT */ 111#else /* !CONFIG_HAVE_HW_BREAKPOINT */
110 112
113static inline int __init init_hw_breakpoint(void) { return 0; }
114
111static inline struct perf_event * 115static inline struct perf_event *
112register_user_hw_breakpoint(struct perf_event_attr *attr, 116register_user_hw_breakpoint(struct perf_event_attr *attr,
113 perf_overflow_handler_t triggered, 117 perf_overflow_handler_t triggered,
diff --git a/include/linux/input.h b/include/linux/input.h
index 6ef44465db8d..a8af21d42bc1 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -47,6 +47,25 @@ struct input_id {
47 __u16 version; 47 __u16 version;
48}; 48};
49 49
50/**
51 * struct input_absinfo - used by EVIOCGABS/EVIOCSABS ioctls
52 * @value: latest reported value for the axis.
53 * @minimum: specifies minimum value for the axis.
54 * @maximum: specifies maximum value for the axis.
55 * @fuzz: specifies fuzz value that is used to filter noise from
56 * the event stream.
57 * @flat: values that are within this value will be discarded by
58 * joydev interface and reported as 0 instead.
59 * @resolution: specifies resolution for the values reported for
60 * the axis.
61 *
62 * Note that input core does not clamp reported values to the
63 * [minimum, maximum] limits, such task is left to userspace.
64 *
65 * Resolution for main axes (ABS_X, ABS_Y, ABS_Z) is reported in
66 * units per millimeter (units/mm), resolution for rotational axes
67 * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian.
68 */
50struct input_absinfo { 69struct input_absinfo {
51 __s32 value; 70 __s32 value;
52 __s32 minimum; 71 __s32 minimum;
@@ -624,6 +643,10 @@ struct input_keymap_entry {
624#define KEY_CAMERA_FOCUS 0x210 643#define KEY_CAMERA_FOCUS 0x210
625#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */ 644#define KEY_WPS_BUTTON 0x211 /* WiFi Protected Setup key */
626 645
646#define KEY_TOUCHPAD_TOGGLE 0x212 /* Request switch touchpad on or off */
647#define KEY_TOUCHPAD_ON 0x213
648#define KEY_TOUCHPAD_OFF 0x214
649
627#define BTN_TRIGGER_HAPPY 0x2c0 650#define BTN_TRIGGER_HAPPY 0x2c0
628#define BTN_TRIGGER_HAPPY1 0x2c0 651#define BTN_TRIGGER_HAPPY1 0x2c0
629#define BTN_TRIGGER_HAPPY2 0x2c1 652#define BTN_TRIGGER_HAPPY2 0x2c1
@@ -1130,7 +1153,7 @@ struct input_mt_slot {
1130 * of tracked contacts 1153 * of tracked contacts
1131 * @mtsize: number of MT slots the device uses 1154 * @mtsize: number of MT slots the device uses
1132 * @slot: MT slot currently being transmitted 1155 * @slot: MT slot currently being transmitted
1133 * @absinfo: array of &struct absinfo elements holding information 1156 * @absinfo: array of &struct input_absinfo elements holding information
1134 * about absolute axes (current value, min, max, flat, fuzz, 1157 * about absolute axes (current value, min, max, flat, fuzz,
1135 * resolution) 1158 * resolution)
1136 * @key: reflects current state of device's keys/buttons 1159 * @key: reflects current state of device's keys/buttons
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h
deleted file mode 100644
index 1d19ab2afa39..000000000000
--- a/include/linux/intel-gtt.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * Common Intel AGPGART and GTT definitions.
3 */
4#ifndef _INTEL_GTT_H
5#define _INTEL_GTT_H
6
7#include <linux/agp_backend.h>
8
9/* This is for Intel only GTT controls.
10 *
11 * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only
12 */
13
14#define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2)
15#define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4)
16
17/* flag for GFDT type */
18#define AGP_USER_CACHED_MEMORY_GFDT (1 << 3)
19
20#endif
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 1ff81b51b656..dd3c34ebca9a 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -11,6 +11,7 @@
11#define MARVELL_PHY_ID_88E1118 0x01410e10 11#define MARVELL_PHY_ID_88E1118 0x01410e10
12#define MARVELL_PHY_ID_88E1121R 0x01410cb0 12#define MARVELL_PHY_ID_88E1121R 0x01410cb0
13#define MARVELL_PHY_ID_88E1145 0x01410cd0 13#define MARVELL_PHY_ID_88E1145 0x01410cd0
14#define MARVELL_PHY_ID_88E1149R 0x01410e50
14#define MARVELL_PHY_ID_88E1240 0x01410e30 15#define MARVELL_PHY_ID_88E1240 0x01410e30
15#define MARVELL_PHY_ID_88E1318S 0x01410e90 16#define MARVELL_PHY_ID_88E1318S 0x01410e90
16 17
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 4307231bd22f..31c237a00c48 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -161,6 +161,9 @@ extern void register_page_bootmem_info_node(struct pglist_data *pgdat);
161extern void put_page_bootmem(struct page *page); 161extern void put_page_bootmem(struct page *page);
162#endif 162#endif
163 163
164void lock_memory_hotplug(void);
165void unlock_memory_hotplug(void);
166
164#else /* ! CONFIG_MEMORY_HOTPLUG */ 167#else /* ! CONFIG_MEMORY_HOTPLUG */
165/* 168/*
166 * Stub functions for when hotplug is off 169 * Stub functions for when hotplug is off
@@ -192,6 +195,9 @@ static inline void register_page_bootmem_info_node(struct pglist_data *pgdat)
192{ 195{
193} 196}
194 197
198static inline void lock_memory_hotplug(void) {}
199static inline void unlock_memory_hotplug(void) {}
200
195#endif /* ! CONFIG_MEMORY_HOTPLUG */ 201#endif /* ! CONFIG_MEMORY_HOTPLUG */
196 202
197#ifdef CONFIG_MEMORY_HOTREMOVE 203#ifdef CONFIG_MEMORY_HOTREMOVE
diff --git a/include/linux/mfd/wm8350/audio.h b/include/linux/mfd/wm8350/audio.h
index a95141eafce3..bd581c6fa085 100644
--- a/include/linux/mfd/wm8350/audio.h
+++ b/include/linux/mfd/wm8350/audio.h
@@ -522,9 +522,6 @@
522#define WM8350_MCLK_SEL_PLL_32K 3 522#define WM8350_MCLK_SEL_PLL_32K 3
523#define WM8350_MCLK_SEL_MCLK 5 523#define WM8350_MCLK_SEL_MCLK 5
524 524
525#define WM8350_MCLK_DIR_OUT 0
526#define WM8350_MCLK_DIR_IN 1
527
528/* clock divider id's */ 525/* clock divider id's */
529#define WM8350_ADC_CLKDIV 0 526#define WM8350_ADC_CLKDIV 0
530#define WM8350_DAC_CLKDIV 1 527#define WM8350_DAC_CLKDIV 1
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 6d87f68ce4b6..30f6fad99a58 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -168,6 +168,7 @@ struct mmc_host {
168 /* DDR mode at 1.8V */ 168 /* DDR mode at 1.8V */
169#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */ 169#define MMC_CAP_1_2V_DDR (1 << 12) /* can support */
170 /* DDR mode at 1.2V */ 170 /* DDR mode at 1.2V */
171#define MMC_CAP_POWER_OFF_CARD (1 << 13) /* Can power off after boot */
171 172
172 mmc_pm_flag_t pm_caps; /* supported pm features */ 173 mmc_pm_flag_t pm_caps; /* supported pm features */
173 174
diff --git a/include/linux/module.h b/include/linux/module.h
index b29e7458b966..7575bbbdf2a2 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -517,7 +517,7 @@ static inline void __module_get(struct module *module)
517#define symbol_put_addr(p) do { } while(0) 517#define symbol_put_addr(p) do { } while(0)
518 518
519#endif /* CONFIG_MODULE_UNLOAD */ 519#endif /* CONFIG_MODULE_UNLOAD */
520int use_module(struct module *a, struct module *b); 520int ref_module(struct module *a, struct module *b);
521 521
522/* This is a #define so the string doesn't get put in every .o file */ 522/* This is a #define so the string doesn't get put in every .o file */
523#define module_name(mod) \ 523#define module_name(mod) \
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index ba6cc8f223c9..80f07198a31a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -483,6 +483,7 @@ struct nfs_entry {
483 int eof; 483 int eof;
484 struct nfs_fh * fh; 484 struct nfs_fh * fh;
485 struct nfs_fattr * fattr; 485 struct nfs_fattr * fattr;
486 unsigned char d_type;
486}; 487};
487 488
488/* 489/*
diff --git a/include/linux/node.h b/include/linux/node.h
index 06292dac3eab..1466945cc9ef 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -10,11 +10,6 @@
10 * 10 *
11 * Nodes are exported via driverfs in the class/node/devices/ 11 * Nodes are exported via driverfs in the class/node/devices/
12 * directory. 12 * directory.
13 *
14 * Per-node interfaces can be implemented using a struct device_interface.
15 * See the following for how to do this:
16 * - drivers/base/intf.c
17 * - Documentation/driver-model/interface.txt
18 */ 13 */
19#ifndef _LINUX_NODE_H_ 14#ifndef _LINUX_NODE_H_
20#define _LINUX_NODE_H_ 15#define _LINUX_NODE_H_
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 5bb13b3db84d..b02195dfc1b0 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -59,8 +59,6 @@ static inline void ClearPageCgroup##uname(struct page_cgroup *pc) \
59static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \ 59static inline int TestClearPageCgroup##uname(struct page_cgroup *pc) \
60 { return test_and_clear_bit(PCG_##lname, &pc->flags); } 60 { return test_and_clear_bit(PCG_##lname, &pc->flags); }
61 61
62TESTPCGFLAG(Locked, LOCK)
63
64/* Cache flag is set only once (at allocation) */ 62/* Cache flag is set only once (at allocation) */
65TESTPCGFLAG(Cache, CACHE) 63TESTPCGFLAG(Cache, CACHE)
66CLEARPCGFLAG(Cache, CACHE) 64CLEARPCGFLAG(Cache, CACHE)
@@ -104,6 +102,11 @@ static inline void unlock_page_cgroup(struct page_cgroup *pc)
104 bit_spin_unlock(PCG_LOCK, &pc->flags); 102 bit_spin_unlock(PCG_LOCK, &pc->flags);
105} 103}
106 104
105static inline int page_is_cgroup_locked(struct page_cgroup *pc)
106{
107 return bit_spin_is_locked(PCG_LOCK, &pc->flags);
108}
109
107#else /* CONFIG_CGROUP_MEM_RES_CTLR */ 110#else /* CONFIG_CGROUP_MEM_RES_CTLR */
108struct page_cgroup; 111struct page_cgroup;
109 112
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index c6bcfe93b9ca..cb845c16ad7d 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2047,6 +2047,7 @@
2047#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182 2047#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182
2048#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150 2048#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150
2049 2049
2050#define PCI_VENDOR_ID_BCM_GVC 0x14a4
2050#define PCI_VENDOR_ID_BROADCOM 0x14e4 2051#define PCI_VENDOR_ID_BROADCOM 0x14e4
2051#define PCI_DEVICE_ID_TIGON3_5752 0x1600 2052#define PCI_DEVICE_ID_TIGON3_5752 0x1600
2052#define PCI_DEVICE_ID_TIGON3_5752M 0x1601 2053#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
@@ -2441,6 +2442,7 @@
2441#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822 2442#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
2442#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823 2443#define PCI_DEVICE_ID_INTEL_MFD_EMMC0 0x0823
2443#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824 2444#define PCI_DEVICE_ID_INTEL_MFD_EMMC1 0x0824
2445#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
2444#define PCI_DEVICE_ID_INTEL_I960 0x0960 2446#define PCI_DEVICE_ID_INTEL_I960 0x0960
2445#define PCI_DEVICE_ID_INTEL_I960RM 0x0962 2447#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
2446#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 2448#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 40150f345982..de2c41758e29 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -850,6 +850,7 @@ struct perf_event_context {
850 int nr_active; 850 int nr_active;
851 int is_active; 851 int is_active;
852 int nr_stat; 852 int nr_stat;
853 int rotate_disable;
853 atomic_t refcount; 854 atomic_t refcount;
854 struct task_struct *task; 855 struct task_struct *task;
855 856
@@ -908,20 +909,6 @@ extern int perf_num_counters(void);
908extern const char *perf_pmu_name(void); 909extern const char *perf_pmu_name(void);
909extern void __perf_event_task_sched_in(struct task_struct *task); 910extern void __perf_event_task_sched_in(struct task_struct *task);
910extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); 911extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
911
912extern atomic_t perf_task_events;
913
914static inline void perf_event_task_sched_in(struct task_struct *task)
915{
916 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
917}
918
919static inline
920void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
921{
922 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
923}
924
925extern int perf_event_init_task(struct task_struct *child); 912extern int perf_event_init_task(struct task_struct *child);
926extern void perf_event_exit_task(struct task_struct *child); 913extern void perf_event_exit_task(struct task_struct *child);
927extern void perf_event_free_task(struct task_struct *task); 914extern void perf_event_free_task(struct task_struct *task);
@@ -1030,6 +1017,21 @@ have_event:
1030 __perf_sw_event(event_id, nr, nmi, regs, addr); 1017 __perf_sw_event(event_id, nr, nmi, regs, addr);
1031} 1018}
1032 1019
1020extern atomic_t perf_task_events;
1021
1022static inline void perf_event_task_sched_in(struct task_struct *task)
1023{
1024 COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1025}
1026
1027static inline
1028void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1029{
1030 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1031
1032 COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1033}
1034
1033extern void perf_event_mmap(struct vm_area_struct *vma); 1035extern void perf_event_mmap(struct vm_area_struct *vma);
1034extern struct perf_guest_info_callbacks *perf_guest_cbs; 1036extern struct perf_guest_info_callbacks *perf_guest_cbs;
1035extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); 1037extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 445796945ac9..bb27d7ec2fb9 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -160,5 +160,6 @@ void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
160 160
161/* for F_SETPIPE_SZ and F_GETPIPE_SZ */ 161/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
162long pipe_fcntl(struct file *, unsigned int, unsigned long arg); 162long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
163struct pipe_inode_info *get_pipe_info(struct file *file);
163 164
164#endif 165#endif
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index cea0c38e7a63..9a52f72527dc 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -19,11 +19,13 @@ struct clk_mapping {
19}; 19};
20 20
21struct clk_ops { 21struct clk_ops {
22#ifdef CONFIG_SH_CLK_CPG_LEGACY
22 void (*init)(struct clk *clk); 23 void (*init)(struct clk *clk);
24#endif
23 int (*enable)(struct clk *clk); 25 int (*enable)(struct clk *clk);
24 void (*disable)(struct clk *clk); 26 void (*disable)(struct clk *clk);
25 unsigned long (*recalc)(struct clk *clk); 27 unsigned long (*recalc)(struct clk *clk);
26 int (*set_rate)(struct clk *clk, unsigned long rate, int algo_id); 28 int (*set_rate)(struct clk *clk, unsigned long rate);
27 int (*set_parent)(struct clk *clk, struct clk *parent); 29 int (*set_parent)(struct clk *clk, struct clk *parent);
28 long (*round_rate)(struct clk *clk, unsigned long rate); 30 long (*round_rate)(struct clk *clk, unsigned long rate);
29}; 31};
@@ -67,36 +69,6 @@ int clk_register(struct clk *);
67void clk_unregister(struct clk *); 69void clk_unregister(struct clk *);
68void clk_enable_init_clocks(void); 70void clk_enable_init_clocks(void);
69 71
70/**
71 * clk_set_rate_ex - set the clock rate for a clock source, with additional parameter
72 * @clk: clock source
73 * @rate: desired clock rate in Hz
74 * @algo_id: algorithm id to be passed down to ops->set_rate
75 *
76 * Returns success (0) or negative errno.
77 */
78int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id);
79
80enum clk_sh_algo_id {
81 NO_CHANGE = 0,
82
83 IUS_N1_N1,
84 IUS_322,
85 IUS_522,
86 IUS_N11,
87
88 SB_N1,
89
90 SB3_N1,
91 SB3_32,
92 SB3_43,
93 SB3_54,
94
95 BP_N1,
96
97 IP_N1,
98};
99
100struct clk_div_mult_table { 72struct clk_div_mult_table {
101 unsigned int *divisors; 73 unsigned int *divisors;
102 unsigned int nr_divisors; 74 unsigned int nr_divisors;
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 032d79ff1d9d..54e4eaaa0561 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -366,6 +366,7 @@ struct tty_file_private {
366#define TTY_HUPPED 18 /* Post driver->hangup() */ 366#define TTY_HUPPED 18 /* Post driver->hangup() */
367#define TTY_FLUSHING 19 /* Flushing to ldisc in progress */ 367#define TTY_FLUSHING 19 /* Flushing to ldisc in progress */
368#define TTY_FLUSHPENDING 20 /* Queued buffer flush pending */ 368#define TTY_FLUSHPENDING 20 /* Queued buffer flush pending */
369#define TTY_HUPPING 21 /* ->hangup() in progress */
369 370
370#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty)) 371#define TTY_WRITE_FLUSH(tty) tty_write_flush((tty))
371 372
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index d6188e5a52df..665517c05eaf 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de> 4 * Copyright(C) 2005, Benedikt Spranger <b.spranger@linutronix.de>
5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> 5 * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de>
6 * Copyright(C) 2006, Hans J. Koch <hjk@linutronix.de> 6 * Copyright(C) 2006, Hans J. Koch <hjk@hansjkoch.de>
7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com> 7 * Copyright(C) 2006, Greg Kroah-Hartman <greg@kroah.com>
8 * 8 *
9 * Userspace IO driver. 9 * Userspace IO driver.
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 24300d8a1bc1..a28eb2592577 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -313,6 +313,10 @@ struct usb_bus {
313 int busnum; /* Bus number (in order of reg) */ 313 int busnum; /* Bus number (in order of reg) */
314 const char *bus_name; /* stable id (PCI slot_name etc) */ 314 const char *bus_name; /* stable id (PCI slot_name etc) */
315 u8 uses_dma; /* Does the host controller use DMA? */ 315 u8 uses_dma; /* Does the host controller use DMA? */
316 u8 uses_pio_for_control; /*
317 * Does the host controller use PIO
318 * for control transfers?
319 */
316 u8 otg_port; /* 0, or number of OTG/HNP port */ 320 u8 otg_port; /* 0, or number of OTG/HNP port */
317 unsigned is_b_host:1; /* true during some HNP roleswitches */ 321 unsigned is_b_host:1; /* true during some HNP roleswitches */
318 unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */ 322 unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index a03dcf62ca9d..44b54f619ac6 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -7,8 +7,6 @@
7 7
8struct vm_area_struct; /* vma defining user mapping in mm_types.h */ 8struct vm_area_struct; /* vma defining user mapping in mm_types.h */
9 9
10extern bool vmap_lazy_unmap;
11
12/* bits in flags of vmalloc's vm_struct below */ 10/* bits in flags of vmalloc's vm_struct below */
13#define VM_IOREMAP 0x00000001 /* ioremap() and friends */ 11#define VM_IOREMAP 0x00000001 /* ioremap() and friends */
14#define VM_ALLOC 0x00000002 /* vmalloc() */ 12#define VM_ALLOC 0x00000002 /* vmalloc() */
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 41dd480e45f1..239125af3ea3 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -137,31 +137,27 @@ struct v4l2_subdev_ops;
137 137
138 138
139/* Load an i2c module and return an initialized v4l2_subdev struct. 139/* Load an i2c module and return an initialized v4l2_subdev struct.
140 Only call request_module if module_name != NULL.
141 The client_type argument is the name of the chip that's on the adapter. */ 140 The client_type argument is the name of the chip that's on the adapter. */
142struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev, 141struct v4l2_subdev *v4l2_i2c_new_subdev_cfg(struct v4l2_device *v4l2_dev,
143 struct i2c_adapter *adapter, 142 struct i2c_adapter *adapter, const char *client_type,
144 const char *module_name, const char *client_type,
145 int irq, void *platform_data, 143 int irq, void *platform_data,
146 u8 addr, const unsigned short *probe_addrs); 144 u8 addr, const unsigned short *probe_addrs);
147 145
148/* Load an i2c module and return an initialized v4l2_subdev struct. 146/* Load an i2c module and return an initialized v4l2_subdev struct.
149 Only call request_module if module_name != NULL.
150 The client_type argument is the name of the chip that's on the adapter. */ 147 The client_type argument is the name of the chip that's on the adapter. */
151static inline struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev, 148static inline struct v4l2_subdev *v4l2_i2c_new_subdev(struct v4l2_device *v4l2_dev,
152 struct i2c_adapter *adapter, 149 struct i2c_adapter *adapter, const char *client_type,
153 const char *module_name, const char *client_type,
154 u8 addr, const unsigned short *probe_addrs) 150 u8 addr, const unsigned short *probe_addrs)
155{ 151{
156 return v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter, module_name, 152 return v4l2_i2c_new_subdev_cfg(v4l2_dev, adapter, client_type, 0, NULL,
157 client_type, 0, NULL, addr, probe_addrs); 153 addr, probe_addrs);
158} 154}
159 155
160struct i2c_board_info; 156struct i2c_board_info;
161 157
162struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev, 158struct v4l2_subdev *v4l2_i2c_new_subdev_board(struct v4l2_device *v4l2_dev,
163 struct i2c_adapter *adapter, const char *module_name, 159 struct i2c_adapter *adapter, struct i2c_board_info *info,
164 struct i2c_board_info *info, const unsigned short *probe_addrs); 160 const unsigned short *probe_addrs);
165 161
166/* Initialize an v4l2_subdev with data from an i2c_client struct */ 162/* Initialize an v4l2_subdev with data from an i2c_client struct */
167void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client, 163void v4l2_i2c_subdev_init(struct v4l2_subdev *sd, struct i2c_client *client,
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 90c9e2872f27..18e5c3f67580 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -10,6 +10,7 @@ extern void unix_inflight(struct file *fp);
10extern void unix_notinflight(struct file *fp); 10extern void unix_notinflight(struct file *fp);
11extern void unix_gc(void); 11extern void unix_gc(void);
12extern void wait_for_unix_gc(void); 12extern void wait_for_unix_gc(void);
13extern struct sock *unix_get_socket(struct file *filp);
13 14
14#define UNIX_HASH_SIZE 256 15#define UNIX_HASH_SIZE 256
15 16
@@ -56,6 +57,7 @@ struct unix_sock {
56 spinlock_t lock; 57 spinlock_t lock;
57 unsigned int gc_candidate : 1; 58 unsigned int gc_candidate : 1;
58 unsigned int gc_maybe_cycle : 1; 59 unsigned int gc_maybe_cycle : 1;
60 unsigned char recursion_level;
59 struct socket_wq peer_wq; 61 struct socket_wq peer_wq;
60}; 62};
61#define unix_sk(__sk) ((struct unix_sock *)__sk) 63#define unix_sk(__sk) ((struct unix_sock *)__sk)
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
index fa60cbda90a4..d79894192ae3 100644
--- a/include/sound/sh_fsi.h
+++ b/include/sound/sh_fsi.h
@@ -85,7 +85,9 @@
85 * ACK_MD (FSI2) 85 * ACK_MD (FSI2)
86 * CKG1 (FSI) 86 * CKG1 (FSI)
87 * 87 *
88 * err: return value < 0 88 * err : return value < 0
89 * no change : return value == 0
90 * change xMD : return value > 0
89 * 91 *
90 * 0x-00000AB 92 * 0x-00000AB
91 * 93 *
@@ -111,7 +113,7 @@
111struct sh_fsi_platform_info { 113struct sh_fsi_platform_info {
112 unsigned long porta_flags; 114 unsigned long porta_flags;
113 unsigned long portb_flags; 115 unsigned long portb_flags;
114 int (*set_rate)(int is_porta, int rate); /* for master mode */ 116 int (*set_rate)(struct device *dev, int is_porta, int rate, int enable);
115}; 117};
116 118
117#endif /* __SOUND_FSI_H */ 119#endif /* __SOUND_FSI_H */
diff --git a/include/video/da8xx-fb.h b/include/video/da8xx-fb.h
index 6316cdabf73f..89d43b3d4cb9 100644
--- a/include/video/da8xx-fb.h
+++ b/include/video/da8xx-fb.h
@@ -99,7 +99,6 @@ struct lcd_sync_arg {
99#define FBIPUT_COLOR _IOW('F', 6, int) 99#define FBIPUT_COLOR _IOW('F', 6, int)
100#define FBIPUT_HSYNC _IOW('F', 9, int) 100#define FBIPUT_HSYNC _IOW('F', 9, int)
101#define FBIPUT_VSYNC _IOW('F', 10, int) 101#define FBIPUT_VSYNC _IOW('F', 10, int)
102#define FBIO_WAITFORVSYNC _IOW('F', 0x20, u_int32_t)
103 102
104#endif /* ifndef DA8XX_FB_H */ 103#endif /* ifndef DA8XX_FB_H */
105 104
diff --git a/include/xen/events.h b/include/xen/events.h
index 646dd17d3aa4..00f53ddcc062 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -76,7 +76,9 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name);
76 76
77#ifdef CONFIG_PCI_MSI 77#ifdef CONFIG_PCI_MSI
78/* Allocate an irq and a pirq to be used with MSIs. */ 78/* Allocate an irq and a pirq to be used with MSIs. */
79void xen_allocate_pirq_msi(char *name, int *irq, int *pirq); 79#define XEN_ALLOC_PIRQ (1 << 0)
80#define XEN_ALLOC_IRQ (1 << 1)
81void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc_mask);
80int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type); 82int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type);
81#endif 83#endif
82 84
@@ -89,4 +91,7 @@ int xen_vector_from_irq(unsigned pirq);
89/* Return gsi allocated to pirq */ 91/* Return gsi allocated to pirq */
90int xen_gsi_from_irq(unsigned pirq); 92int xen_gsi_from_irq(unsigned pirq);
91 93
94/* Return irq from pirq */
95int xen_irq_from_pirq(unsigned pirq);
96
92#endif /* _XEN_EVENTS_H */ 97#endif /* _XEN_EVENTS_H */
diff --git a/include/xen/interface/memory.h b/include/xen/interface/memory.h
index d7a6c13bde69..eac3ce153719 100644
--- a/include/xen/interface/memory.h
+++ b/include/xen/interface/memory.h
@@ -141,6 +141,19 @@ struct xen_machphys_mfn_list {
141DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list); 141DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mfn_list);
142 142
143/* 143/*
144 * Returns the location in virtual address space of the machine_to_phys
145 * mapping table. Architectures which do not have a m2p table, or which do not
146 * map it by default into guest address space, do not implement this command.
147 * arg == addr of xen_machphys_mapping_t.
148 */
149#define XENMEM_machphys_mapping 12
150struct xen_machphys_mapping {
151 unsigned long v_start, v_end; /* Start and end virtual addresses. */
152 unsigned long max_mfn; /* Maximum MFN that can be looked up. */
153};
154DEFINE_GUEST_HANDLE_STRUCT(xen_machphys_mapping_t);
155
156/*
144 * Sets the GPFN at which a particular page appears in the specified guest's 157 * Sets the GPFN at which a particular page appears in the specified guest's
145 * pseudophysical address space. 158 * pseudophysical address space.
146 * arg == addr of xen_add_to_physmap_t. 159 * arg == addr of xen_add_to_physmap_t.
diff --git a/include/xen/interface/physdev.h b/include/xen/interface/physdev.h
index 2b2c66c3df00..534cac89a77d 100644
--- a/include/xen/interface/physdev.h
+++ b/include/xen/interface/physdev.h
@@ -188,6 +188,16 @@ struct physdev_nr_pirqs {
188 uint32_t nr_pirqs; 188 uint32_t nr_pirqs;
189}; 189};
190 190
191/* type is MAP_PIRQ_TYPE_GSI or MAP_PIRQ_TYPE_MSI
192 * the hypercall returns a free pirq */
193#define PHYSDEVOP_get_free_pirq 23
194struct physdev_get_free_pirq {
195 /* IN */
196 int type;
197 /* OUT */
198 uint32_t pirq;
199};
200
191/* 201/*
192 * Notify that some PIRQ-bound event channels have been unmasked. 202 * Notify that some PIRQ-bound event channels have been unmasked.
193 * ** This command is obsolete since interface version 0x00030202 and is ** 203 * ** This command is obsolete since interface version 0x00030202 and is **
diff --git a/include/xen/page.h b/include/xen/page.h
index eaf85fab1263..0be36b976f4b 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -1 +1,8 @@
1#ifndef _XEN_PAGE_H
2#define _XEN_PAGE_H
3
1#include <asm/xen/page.h> 4#include <asm/xen/page.h>
5
6extern phys_addr_t xen_extra_mem_start, xen_extra_mem_size;
7
8#endif /* _XEN_PAGE_H */
diff --git a/include/xen/privcmd.h b/include/xen/privcmd.h
index b42cdfd92fee..17857fb4d550 100644
--- a/include/xen/privcmd.h
+++ b/include/xen/privcmd.h
@@ -34,13 +34,10 @@
34#define __LINUX_PUBLIC_PRIVCMD_H__ 34#define __LINUX_PUBLIC_PRIVCMD_H__
35 35
36#include <linux/types.h> 36#include <linux/types.h>
37#include <linux/compiler.h>
37 38
38typedef unsigned long xen_pfn_t; 39typedef unsigned long xen_pfn_t;
39 40
40#ifndef __user
41#define __user
42#endif
43
44struct privcmd_hypercall { 41struct privcmd_hypercall {
45 __u64 op; 42 __u64 op;
46 __u64 arg[5]; 43 __u64 arg[5];
diff --git a/init/Kconfig b/init/Kconfig
index 88c10468db46..c9728992a776 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -613,6 +613,19 @@ config CGROUP_MEM_RES_CTLR_SWAP
613 if boot option "noswapaccount" is set, swap will not be accounted. 613 if boot option "noswapaccount" is set, swap will not be accounted.
614 Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page 614 Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page
615 size is 4096bytes, 512k per 1Gbytes of swap. 615 size is 4096bytes, 512k per 1Gbytes of swap.
616config CGROUP_MEM_RES_CTLR_SWAP_ENABLED
617 bool "Memory Resource Controller Swap Extension enabled by default"
618 depends on CGROUP_MEM_RES_CTLR_SWAP
619 default y
620 help
621 Memory Resource Controller Swap Extension comes with its price in
622 a bigger memory consumption. General purpose distribution kernels
623 which want to enable the feautre but keep it disabled by default
624 and let the user enable it by swapaccount boot command line
625 parameter should have this option unselected.
626 For those who want to have the feature enabled by default should
627 select this option (if, for some reason, they need to disable it
628 then noswapaccount does the trick).
616 629
617menuconfig CGROUP_SCHED 630menuconfig CGROUP_SCHED
618 bool "Group CPU scheduler" 631 bool "Group CPU scheduler"
diff --git a/kernel/exit.c b/kernel/exit.c
index 21aa7b3001fb..676149a4ac5f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -914,6 +914,15 @@ NORET_TYPE void do_exit(long code)
914 if (unlikely(!tsk->pid)) 914 if (unlikely(!tsk->pid))
915 panic("Attempted to kill the idle task!"); 915 panic("Attempted to kill the idle task!");
916 916
917 /*
918 * If do_exit is called because this processes oopsed, it's possible
919 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
920 * continuing. Amongst other possible reasons, this is to prevent
921 * mm_release()->clear_child_tid() from writing to a user-controlled
922 * kernel address.
923 */
924 set_fs(USER_DS);
925
917 tracehook_report_exit(&code); 926 tracehook_report_exit(&code);
918 927
919 validate_creds_for_do_exit(tsk); 928 validate_creds_for_do_exit(tsk);
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c
index 2c9120f0afca..e5325825aeb6 100644
--- a/kernel/hw_breakpoint.c
+++ b/kernel/hw_breakpoint.c
@@ -620,7 +620,7 @@ static struct pmu perf_breakpoint = {
620 .read = hw_breakpoint_pmu_read, 620 .read = hw_breakpoint_pmu_read,
621}; 621};
622 622
623static int __init init_hw_breakpoint(void) 623int __init init_hw_breakpoint(void)
624{ 624{
625 unsigned int **task_bp_pinned; 625 unsigned int **task_bp_pinned;
626 int cpu, err_cpu; 626 int cpu, err_cpu;
@@ -655,6 +655,5 @@ static int __init init_hw_breakpoint(void)
655 655
656 return -ENOMEM; 656 return -ENOMEM;
657} 657}
658core_initcall(init_hw_breakpoint);
659 658
660 659
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 01b1d3a88983..6c8a2a9f8a7b 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -214,7 +214,7 @@ static int irq_spurious_proc_show(struct seq_file *m, void *v)
214 214
215static int irq_spurious_proc_open(struct inode *inode, struct file *file) 215static int irq_spurious_proc_open(struct inode *inode, struct file *file)
216{ 216{
217 return single_open(file, irq_spurious_proc_show, NULL); 217 return single_open(file, irq_spurious_proc_show, PDE(inode)->data);
218} 218}
219 219
220static const struct file_operations irq_spurious_proc_fops = { 220static const struct file_operations irq_spurious_proc_fops = {
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index f16763ff8481..90f881904bb1 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -145,7 +145,9 @@ void irq_work_run(void)
145 * Clear the BUSY bit and return to the free state if 145 * Clear the BUSY bit and return to the free state if
146 * no-one else claimed it meanwhile. 146 * no-one else claimed it meanwhile.
147 */ 147 */
148 cmpxchg(&entry->next, next_flags(NULL, IRQ_WORK_BUSY), NULL); 148 (void)cmpxchg(&entry->next,
149 next_flags(NULL, IRQ_WORK_BUSY),
150 NULL);
149 } 151 }
150} 152}
151EXPORT_SYMBOL_GPL(irq_work_run); 153EXPORT_SYMBOL_GPL(irq_work_run);
diff --git a/kernel/module.c b/kernel/module.c
index 437a74a7524a..d190664f25ff 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2326,6 +2326,18 @@ static void find_module_sections(struct module *mod, struct load_info *info)
2326 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * 2326 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2327 mod->num_trace_events, GFP_KERNEL); 2327 mod->num_trace_events, GFP_KERNEL);
2328#endif 2328#endif
2329#ifdef CONFIG_TRACING
2330 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2331 sizeof(*mod->trace_bprintk_fmt_start),
2332 &mod->num_trace_bprintk_fmt);
2333 /*
2334 * This section contains pointers to allocated objects in the trace
2335 * code and not scanning it leads to false positives.
2336 */
2337 kmemleak_scan_area(mod->trace_bprintk_fmt_start,
2338 sizeof(*mod->trace_bprintk_fmt_start) *
2339 mod->num_trace_bprintk_fmt, GFP_KERNEL);
2340#endif
2329#ifdef CONFIG_FTRACE_MCOUNT_RECORD 2341#ifdef CONFIG_FTRACE_MCOUNT_RECORD
2330 /* sechdrs[0].sh_size is always zero */ 2342 /* sechdrs[0].sh_size is always zero */
2331 mod->ftrace_callsites = section_objs(info, "__mcount_loc", 2343 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index cb6c0d2af68f..eac7e3364335 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -31,6 +31,7 @@
31#include <linux/kernel_stat.h> 31#include <linux/kernel_stat.h>
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/ftrace_event.h> 33#include <linux/ftrace_event.h>
34#include <linux/hw_breakpoint.h>
34 35
35#include <asm/irq_regs.h> 36#include <asm/irq_regs.h>
36 37
@@ -1286,8 +1287,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
1286{ 1287{
1287 int ctxn; 1288 int ctxn;
1288 1289
1289 perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1290
1291 for_each_task_context_nr(ctxn) 1290 for_each_task_context_nr(ctxn)
1292 perf_event_context_sched_out(task, ctxn, next); 1291 perf_event_context_sched_out(task, ctxn, next);
1293} 1292}
@@ -1621,8 +1620,12 @@ static void rotate_ctx(struct perf_event_context *ctx)
1621{ 1620{
1622 raw_spin_lock(&ctx->lock); 1621 raw_spin_lock(&ctx->lock);
1623 1622
1624 /* Rotate the first entry last of non-pinned groups */ 1623 /*
1625 list_rotate_left(&ctx->flexible_groups); 1624 * Rotate the first entry last of non-pinned groups. Rotation might be
1625 * disabled by the inheritance code.
1626 */
1627 if (!ctx->rotate_disable)
1628 list_rotate_left(&ctx->flexible_groups);
1626 1629
1627 raw_spin_unlock(&ctx->lock); 1630 raw_spin_unlock(&ctx->lock);
1628} 1631}
@@ -2234,11 +2237,6 @@ int perf_event_release_kernel(struct perf_event *event)
2234 raw_spin_unlock_irq(&ctx->lock); 2237 raw_spin_unlock_irq(&ctx->lock);
2235 mutex_unlock(&ctx->mutex); 2238 mutex_unlock(&ctx->mutex);
2236 2239
2237 mutex_lock(&event->owner->perf_event_mutex);
2238 list_del_init(&event->owner_entry);
2239 mutex_unlock(&event->owner->perf_event_mutex);
2240 put_task_struct(event->owner);
2241
2242 free_event(event); 2240 free_event(event);
2243 2241
2244 return 0; 2242 return 0;
@@ -2251,9 +2249,43 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);
2251static int perf_release(struct inode *inode, struct file *file) 2249static int perf_release(struct inode *inode, struct file *file)
2252{ 2250{
2253 struct perf_event *event = file->private_data; 2251 struct perf_event *event = file->private_data;
2252 struct task_struct *owner;
2254 2253
2255 file->private_data = NULL; 2254 file->private_data = NULL;
2256 2255
2256 rcu_read_lock();
2257 owner = ACCESS_ONCE(event->owner);
2258 /*
2259 * Matches the smp_wmb() in perf_event_exit_task(). If we observe
2260 * !owner it means the list deletion is complete and we can indeed
2261 * free this event, otherwise we need to serialize on
2262 * owner->perf_event_mutex.
2263 */
2264 smp_read_barrier_depends();
2265 if (owner) {
2266 /*
2267 * Since delayed_put_task_struct() also drops the last
2268 * task reference we can safely take a new reference
2269 * while holding the rcu_read_lock().
2270 */
2271 get_task_struct(owner);
2272 }
2273 rcu_read_unlock();
2274
2275 if (owner) {
2276 mutex_lock(&owner->perf_event_mutex);
2277 /*
2278 * We have to re-check the event->owner field, if it is cleared
2279 * we raced with perf_event_exit_task(), acquiring the mutex
2280 * ensured they're done, and we can proceed with freeing the
2281 * event.
2282 */
2283 if (event->owner)
2284 list_del_init(&event->owner_entry);
2285 mutex_unlock(&owner->perf_event_mutex);
2286 put_task_struct(owner);
2287 }
2288
2257 return perf_event_release_kernel(event); 2289 return perf_event_release_kernel(event);
2258} 2290}
2259 2291
@@ -5677,7 +5709,7 @@ SYSCALL_DEFINE5(perf_event_open,
5677 mutex_unlock(&ctx->mutex); 5709 mutex_unlock(&ctx->mutex);
5678 5710
5679 event->owner = current; 5711 event->owner = current;
5680 get_task_struct(current); 5712
5681 mutex_lock(&current->perf_event_mutex); 5713 mutex_lock(&current->perf_event_mutex);
5682 list_add_tail(&event->owner_entry, &current->perf_event_list); 5714 list_add_tail(&event->owner_entry, &current->perf_event_list);
5683 mutex_unlock(&current->perf_event_mutex); 5715 mutex_unlock(&current->perf_event_mutex);
@@ -5745,12 +5777,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
5745 ++ctx->generation; 5777 ++ctx->generation;
5746 mutex_unlock(&ctx->mutex); 5778 mutex_unlock(&ctx->mutex);
5747 5779
5748 event->owner = current;
5749 get_task_struct(current);
5750 mutex_lock(&current->perf_event_mutex);
5751 list_add_tail(&event->owner_entry, &current->perf_event_list);
5752 mutex_unlock(&current->perf_event_mutex);
5753
5754 return event; 5780 return event;
5755 5781
5756err_free: 5782err_free:
@@ -5901,8 +5927,24 @@ again:
5901 */ 5927 */
5902void perf_event_exit_task(struct task_struct *child) 5928void perf_event_exit_task(struct task_struct *child)
5903{ 5929{
5930 struct perf_event *event, *tmp;
5904 int ctxn; 5931 int ctxn;
5905 5932
5933 mutex_lock(&child->perf_event_mutex);
5934 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
5935 owner_entry) {
5936 list_del_init(&event->owner_entry);
5937
5938 /*
5939 * Ensure the list deletion is visible before we clear
5940 * the owner, closes a race against perf_release() where
5941 * we need to serialize on the owner->perf_event_mutex.
5942 */
5943 smp_wmb();
5944 event->owner = NULL;
5945 }
5946 mutex_unlock(&child->perf_event_mutex);
5947
5906 for_each_task_context_nr(ctxn) 5948 for_each_task_context_nr(ctxn)
5907 perf_event_exit_task_context(child, ctxn); 5949 perf_event_exit_task_context(child, ctxn);
5908} 5950}
@@ -6122,6 +6164,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6122 struct perf_event *event; 6164 struct perf_event *event;
6123 struct task_struct *parent = current; 6165 struct task_struct *parent = current;
6124 int inherited_all = 1; 6166 int inherited_all = 1;
6167 unsigned long flags;
6125 int ret = 0; 6168 int ret = 0;
6126 6169
6127 child->perf_event_ctxp[ctxn] = NULL; 6170 child->perf_event_ctxp[ctxn] = NULL;
@@ -6162,6 +6205,15 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6162 break; 6205 break;
6163 } 6206 }
6164 6207
6208 /*
6209 * We can't hold ctx->lock when iterating the ->flexible_group list due
6210 * to allocations, but we need to prevent rotation because
6211 * rotate_ctx() will change the list from interrupt context.
6212 */
6213 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6214 parent_ctx->rotate_disable = 1;
6215 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6216
6165 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) { 6217 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
6166 ret = inherit_task_group(event, parent, parent_ctx, 6218 ret = inherit_task_group(event, parent, parent_ctx,
6167 child, ctxn, &inherited_all); 6219 child, ctxn, &inherited_all);
@@ -6169,6 +6221,10 @@ int perf_event_init_context(struct task_struct *child, int ctxn)
6169 break; 6221 break;
6170 } 6222 }
6171 6223
6224 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
6225 parent_ctx->rotate_disable = 0;
6226 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
6227
6172 child_ctx = child->perf_event_ctxp[ctxn]; 6228 child_ctx = child->perf_event_ctxp[ctxn];
6173 6229
6174 if (child_ctx && inherited_all) { 6230 if (child_ctx && inherited_all) {
@@ -6321,6 +6377,8 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
6321 6377
6322void __init perf_event_init(void) 6378void __init perf_event_init(void)
6323{ 6379{
6380 int ret;
6381
6324 perf_event_init_all_cpus(); 6382 perf_event_init_all_cpus();
6325 init_srcu_struct(&pmus_srcu); 6383 init_srcu_struct(&pmus_srcu);
6326 perf_pmu_register(&perf_swevent); 6384 perf_pmu_register(&perf_swevent);
@@ -6328,4 +6386,7 @@ void __init perf_event_init(void)
6328 perf_pmu_register(&perf_task_clock); 6386 perf_pmu_register(&perf_task_clock);
6329 perf_tp_register(); 6387 perf_tp_register();
6330 perf_cpu_notifier(perf_cpu_notify); 6388 perf_cpu_notifier(perf_cpu_notify);
6389
6390 ret = init_hw_breakpoint();
6391 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
6331} 6392}
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 6842eeba5879..05bb7173850e 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -37,13 +37,13 @@ static int check_clock(const clockid_t which_clock)
37 if (pid == 0) 37 if (pid == 0)
38 return 0; 38 return 0;
39 39
40 read_lock(&tasklist_lock); 40 rcu_read_lock();
41 p = find_task_by_vpid(pid); 41 p = find_task_by_vpid(pid);
42 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? 42 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
43 same_thread_group(p, current) : thread_group_leader(p))) { 43 same_thread_group(p, current) : has_group_leader_pid(p))) {
44 error = -EINVAL; 44 error = -EINVAL;
45 } 45 }
46 read_unlock(&tasklist_lock); 46 rcu_read_unlock();
47 47
48 return error; 48 return error;
49} 49}
@@ -390,7 +390,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
390 390
391 INIT_LIST_HEAD(&new_timer->it.cpu.entry); 391 INIT_LIST_HEAD(&new_timer->it.cpu.entry);
392 392
393 read_lock(&tasklist_lock); 393 rcu_read_lock();
394 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) { 394 if (CPUCLOCK_PERTHREAD(new_timer->it_clock)) {
395 if (pid == 0) { 395 if (pid == 0) {
396 p = current; 396 p = current;
@@ -404,7 +404,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
404 p = current->group_leader; 404 p = current->group_leader;
405 } else { 405 } else {
406 p = find_task_by_vpid(pid); 406 p = find_task_by_vpid(pid);
407 if (p && !thread_group_leader(p)) 407 if (p && !has_group_leader_pid(p))
408 p = NULL; 408 p = NULL;
409 } 409 }
410 } 410 }
@@ -414,7 +414,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
414 } else { 414 } else {
415 ret = -EINVAL; 415 ret = -EINVAL;
416 } 416 }
417 read_unlock(&tasklist_lock); 417 rcu_read_unlock();
418 418
419 return ret; 419 return ret;
420} 420}
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 657272e91d0a..048d0b514831 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -327,7 +327,6 @@ static int create_image(int platform_mode)
327int hibernation_snapshot(int platform_mode) 327int hibernation_snapshot(int platform_mode)
328{ 328{
329 int error; 329 int error;
330 gfp_t saved_mask;
331 330
332 error = platform_begin(platform_mode); 331 error = platform_begin(platform_mode);
333 if (error) 332 if (error)
@@ -339,7 +338,7 @@ int hibernation_snapshot(int platform_mode)
339 goto Close; 338 goto Close;
340 339
341 suspend_console(); 340 suspend_console();
342 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 341 pm_restrict_gfp_mask();
343 error = dpm_suspend_start(PMSG_FREEZE); 342 error = dpm_suspend_start(PMSG_FREEZE);
344 if (error) 343 if (error)
345 goto Recover_platform; 344 goto Recover_platform;
@@ -348,7 +347,10 @@ int hibernation_snapshot(int platform_mode)
348 goto Recover_platform; 347 goto Recover_platform;
349 348
350 error = create_image(platform_mode); 349 error = create_image(platform_mode);
351 /* Control returns here after successful restore */ 350 /*
351 * Control returns here (1) after the image has been created or the
352 * image creation has failed and (2) after a successful restore.
353 */
352 354
353 Resume_devices: 355 Resume_devices:
354 /* We may need to release the preallocated image pages here. */ 356 /* We may need to release the preallocated image pages here. */
@@ -357,7 +359,10 @@ int hibernation_snapshot(int platform_mode)
357 359
358 dpm_resume_end(in_suspend ? 360 dpm_resume_end(in_suspend ?
359 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE); 361 (error ? PMSG_RECOVER : PMSG_THAW) : PMSG_RESTORE);
360 set_gfp_allowed_mask(saved_mask); 362
363 if (error || !in_suspend)
364 pm_restore_gfp_mask();
365
361 resume_console(); 366 resume_console();
362 Close: 367 Close:
363 platform_end(platform_mode); 368 platform_end(platform_mode);
@@ -452,17 +457,16 @@ static int resume_target_kernel(bool platform_mode)
452int hibernation_restore(int platform_mode) 457int hibernation_restore(int platform_mode)
453{ 458{
454 int error; 459 int error;
455 gfp_t saved_mask;
456 460
457 pm_prepare_console(); 461 pm_prepare_console();
458 suspend_console(); 462 suspend_console();
459 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 463 pm_restrict_gfp_mask();
460 error = dpm_suspend_start(PMSG_QUIESCE); 464 error = dpm_suspend_start(PMSG_QUIESCE);
461 if (!error) { 465 if (!error) {
462 error = resume_target_kernel(platform_mode); 466 error = resume_target_kernel(platform_mode);
463 dpm_resume_end(PMSG_RECOVER); 467 dpm_resume_end(PMSG_RECOVER);
464 } 468 }
465 set_gfp_allowed_mask(saved_mask); 469 pm_restore_gfp_mask();
466 resume_console(); 470 resume_console();
467 pm_restore_console(); 471 pm_restore_console();
468 return error; 472 return error;
@@ -476,7 +480,6 @@ int hibernation_restore(int platform_mode)
476int hibernation_platform_enter(void) 480int hibernation_platform_enter(void)
477{ 481{
478 int error; 482 int error;
479 gfp_t saved_mask;
480 483
481 if (!hibernation_ops) 484 if (!hibernation_ops)
482 return -ENOSYS; 485 return -ENOSYS;
@@ -492,7 +495,6 @@ int hibernation_platform_enter(void)
492 495
493 entering_platform_hibernation = true; 496 entering_platform_hibernation = true;
494 suspend_console(); 497 suspend_console();
495 saved_mask = clear_gfp_allowed_mask(GFP_IOFS);
496 error = dpm_suspend_start(PMSG_HIBERNATE); 498 error = dpm_suspend_start(PMSG_HIBERNATE);
497 if (error) { 499 if (error) {
498 if (hibernation_ops->recover) 500 if (hibernation_ops->recover)
@@ -536,7 +538,6 @@ int hibernation_platform_enter(void)
536 Resume_devices: 538 Resume_devices:
537 entering_platform_hibernation = false; 539 entering_platform_hibernation = false;
538 dpm_resume_end(PMSG_RESTORE); 540 dpm_resume_end(PMSG_RESTORE);
539 set_gfp_allowed_mask(saved_mask);
540 resume_console(); 541 resume_console();
541 542
542 Close: 543 Close:
@@ -646,6 +647,7 @@ int hibernate(void)
646 swsusp_free(); 647 swsusp_free();
647 if (!error) 648 if (!error)
648 power_down(); 649 power_down();
650 pm_restore_gfp_mask();
649 } else { 651 } else {
650 pr_debug("PM: Image restored successfully.\n"); 652 pr_debug("PM: Image restored successfully.\n");
651 } 653 }
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 7335952ee473..ecf770509d0d 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -197,7 +197,6 @@ static int suspend_enter(suspend_state_t state)
197int suspend_devices_and_enter(suspend_state_t state) 197int suspend_devices_and_enter(suspend_state_t state)
198{ 198{
199 int error; 199 int error;
200 gfp_t saved_mask;
201 200
202 if (!suspend_ops) 201 if (!suspend_ops)
203 return -ENOSYS; 202 return -ENOSYS;
@@ -208,7 +207,7 @@ int suspend_devices_and_enter(suspend_state_t state)
208 goto Close; 207 goto Close;
209 } 208 }
210 suspend_console(); 209 suspend_console();
211 saved_mask = clear_gfp_allowed_mask(GFP_IOFS); 210 pm_restrict_gfp_mask();
212 suspend_test_start(); 211 suspend_test_start();
213 error = dpm_suspend_start(PMSG_SUSPEND); 212 error = dpm_suspend_start(PMSG_SUSPEND);
214 if (error) { 213 if (error) {
@@ -225,7 +224,7 @@ int suspend_devices_and_enter(suspend_state_t state)
225 suspend_test_start(); 224 suspend_test_start();
226 dpm_resume_end(PMSG_RESUME); 225 dpm_resume_end(PMSG_RESUME);
227 suspend_test_finish("resume devices"); 226 suspend_test_finish("resume devices");
228 set_gfp_allowed_mask(saved_mask); 227 pm_restore_gfp_mask();
229 resume_console(); 228 resume_console();
230 Close: 229 Close:
231 if (suspend_ops->end) 230 if (suspend_ops->end)
diff --git a/kernel/power/swap.c b/kernel/power/swap.c
index a0e4a86ccf94..baf667bb2794 100644
--- a/kernel/power/swap.c
+++ b/kernel/power/swap.c
@@ -6,6 +6,7 @@
6 * 6 *
7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> 7 * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz>
8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> 8 * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
9 * Copyright (C) 2010 Bojan Smojver <bojan@rexursive.com>
9 * 10 *
10 * This file is released under the GPLv2. 11 * This file is released under the GPLv2.
11 * 12 *
@@ -753,30 +754,43 @@ static int load_image_lzo(struct swap_map_handle *handle,
753{ 754{
754 unsigned int m; 755 unsigned int m;
755 int error = 0; 756 int error = 0;
757 struct bio *bio;
756 struct timeval start; 758 struct timeval start;
757 struct timeval stop; 759 struct timeval stop;
758 unsigned nr_pages; 760 unsigned nr_pages;
759 size_t off, unc_len, cmp_len; 761 size_t i, off, unc_len, cmp_len;
760 unsigned char *unc, *cmp, *page; 762 unsigned char *unc, *cmp, *page[LZO_CMP_PAGES];
761 763
762 page = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH); 764 for (i = 0; i < LZO_CMP_PAGES; i++) {
763 if (!page) { 765 page[i] = (void *)__get_free_page(__GFP_WAIT | __GFP_HIGH);
764 printk(KERN_ERR "PM: Failed to allocate LZO page\n"); 766 if (!page[i]) {
765 return -ENOMEM; 767 printk(KERN_ERR "PM: Failed to allocate LZO page\n");
768
769 while (i)
770 free_page((unsigned long)page[--i]);
771
772 return -ENOMEM;
773 }
766 } 774 }
767 775
768 unc = vmalloc(LZO_UNC_SIZE); 776 unc = vmalloc(LZO_UNC_SIZE);
769 if (!unc) { 777 if (!unc) {
770 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n"); 778 printk(KERN_ERR "PM: Failed to allocate LZO uncompressed\n");
771 free_page((unsigned long)page); 779
780 for (i = 0; i < LZO_CMP_PAGES; i++)
781 free_page((unsigned long)page[i]);
782
772 return -ENOMEM; 783 return -ENOMEM;
773 } 784 }
774 785
775 cmp = vmalloc(LZO_CMP_SIZE); 786 cmp = vmalloc(LZO_CMP_SIZE);
776 if (!cmp) { 787 if (!cmp) {
777 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n"); 788 printk(KERN_ERR "PM: Failed to allocate LZO compressed\n");
789
778 vfree(unc); 790 vfree(unc);
779 free_page((unsigned long)page); 791 for (i = 0; i < LZO_CMP_PAGES; i++)
792 free_page((unsigned long)page[i]);
793
780 return -ENOMEM; 794 return -ENOMEM;
781 } 795 }
782 796
@@ -787,6 +801,7 @@ static int load_image_lzo(struct swap_map_handle *handle,
787 if (!m) 801 if (!m)
788 m = 1; 802 m = 1;
789 nr_pages = 0; 803 nr_pages = 0;
804 bio = NULL;
790 do_gettimeofday(&start); 805 do_gettimeofday(&start);
791 806
792 error = snapshot_write_next(snapshot); 807 error = snapshot_write_next(snapshot);
@@ -794,11 +809,11 @@ static int load_image_lzo(struct swap_map_handle *handle,
794 goto out_finish; 809 goto out_finish;
795 810
796 for (;;) { 811 for (;;) {
797 error = swap_read_page(handle, page, NULL); /* sync */ 812 error = swap_read_page(handle, page[0], NULL); /* sync */
798 if (error) 813 if (error)
799 break; 814 break;
800 815
801 cmp_len = *(size_t *)page; 816 cmp_len = *(size_t *)page[0];
802 if (unlikely(!cmp_len || 817 if (unlikely(!cmp_len ||
803 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { 818 cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) {
804 printk(KERN_ERR "PM: Invalid LZO compressed length\n"); 819 printk(KERN_ERR "PM: Invalid LZO compressed length\n");
@@ -806,13 +821,20 @@ static int load_image_lzo(struct swap_map_handle *handle,
806 break; 821 break;
807 } 822 }
808 823
809 memcpy(cmp, page, PAGE_SIZE); 824 for (off = PAGE_SIZE, i = 1;
810 for (off = PAGE_SIZE; off < LZO_HEADER + cmp_len; off += PAGE_SIZE) { 825 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
811 error = swap_read_page(handle, page, NULL); /* sync */ 826 error = swap_read_page(handle, page[i], &bio);
812 if (error) 827 if (error)
813 goto out_finish; 828 goto out_finish;
829 }
814 830
815 memcpy(cmp + off, page, PAGE_SIZE); 831 error = hib_wait_on_bio_chain(&bio); /* need all data now */
832 if (error)
833 goto out_finish;
834
835 for (off = 0, i = 0;
836 off < LZO_HEADER + cmp_len; off += PAGE_SIZE, i++) {
837 memcpy(cmp + off, page[i], PAGE_SIZE);
816 } 838 }
817 839
818 unc_len = LZO_UNC_SIZE; 840 unc_len = LZO_UNC_SIZE;
@@ -857,7 +879,8 @@ out_finish:
857 879
858 vfree(cmp); 880 vfree(cmp);
859 vfree(unc); 881 vfree(unc);
860 free_page((unsigned long)page); 882 for (i = 0; i < LZO_CMP_PAGES; i++)
883 free_page((unsigned long)page[i]);
861 884
862 return error; 885 return error;
863} 886}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index e819e17877ca..1b2ea31e6bd8 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -263,6 +263,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
263 case SNAPSHOT_UNFREEZE: 263 case SNAPSHOT_UNFREEZE:
264 if (!data->frozen || data->ready) 264 if (!data->frozen || data->ready)
265 break; 265 break;
266 pm_restore_gfp_mask();
266 thaw_processes(); 267 thaw_processes();
267 usermodehelper_enable(); 268 usermodehelper_enable();
268 data->frozen = 0; 269 data->frozen = 0;
@@ -275,6 +276,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
275 error = -EPERM; 276 error = -EPERM;
276 break; 277 break;
277 } 278 }
279 pm_restore_gfp_mask();
278 error = hibernation_snapshot(data->platform_support); 280 error = hibernation_snapshot(data->platform_support);
279 if (!error) 281 if (!error)
280 error = put_user(in_suspend, (int __user *)arg); 282 error = put_user(in_suspend, (int __user *)arg);
diff --git a/kernel/printk.c b/kernel/printk.c
index 9a2264fc42ca..a23315dc4498 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1082,13 +1082,15 @@ void printk_tick(void)
1082 1082
1083int printk_needs_cpu(int cpu) 1083int printk_needs_cpu(int cpu)
1084{ 1084{
1085 if (unlikely(cpu_is_offline(cpu)))
1086 printk_tick();
1085 return per_cpu(printk_pending, cpu); 1087 return per_cpu(printk_pending, cpu);
1086} 1088}
1087 1089
1088void wake_up_klogd(void) 1090void wake_up_klogd(void)
1089{ 1091{
1090 if (waitqueue_active(&log_wait)) 1092 if (waitqueue_active(&log_wait))
1091 __raw_get_cpu_var(printk_pending) = 1; 1093 this_cpu_write(printk_pending, 1);
1092} 1094}
1093 1095
1094/** 1096/**
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 52ab113d8bb9..00ebd7686676 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1758,10 +1758,6 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
1758 set_task_cpu(p, this_cpu); 1758 set_task_cpu(p, this_cpu);
1759 activate_task(this_rq, p, 0); 1759 activate_task(this_rq, p, 0);
1760 check_preempt_curr(this_rq, p, 0); 1760 check_preempt_curr(this_rq, p, 0);
1761
1762 /* re-arm NEWIDLE balancing when moving tasks */
1763 src_rq->avg_idle = this_rq->avg_idle = 2*sysctl_sched_migration_cost;
1764 this_rq->idle_stamp = 0;
1765} 1761}
1766 1762
1767/* 1763/*
@@ -3219,8 +3215,10 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
3219 interval = msecs_to_jiffies(sd->balance_interval); 3215 interval = msecs_to_jiffies(sd->balance_interval);
3220 if (time_after(next_balance, sd->last_balance + interval)) 3216 if (time_after(next_balance, sd->last_balance + interval))
3221 next_balance = sd->last_balance + interval; 3217 next_balance = sd->last_balance + interval;
3222 if (pulled_task) 3218 if (pulled_task) {
3219 this_rq->idle_stamp = 0;
3223 break; 3220 break;
3221 }
3224 } 3222 }
3225 3223
3226 raw_spin_lock(&this_rq->lock); 3224 raw_spin_lock(&this_rq->lock);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 042084157980..c380612273bf 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1283,6 +1283,8 @@ void trace_dump_stack(void)
1283 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count()); 1283 __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count());
1284} 1284}
1285 1285
1286static DEFINE_PER_CPU(int, user_stack_count);
1287
1286void 1288void
1287ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) 1289ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1288{ 1290{
@@ -1301,6 +1303,18 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1301 if (unlikely(in_nmi())) 1303 if (unlikely(in_nmi()))
1302 return; 1304 return;
1303 1305
1306 /*
1307 * prevent recursion, since the user stack tracing may
1308 * trigger other kernel events.
1309 */
1310 preempt_disable();
1311 if (__this_cpu_read(user_stack_count))
1312 goto out;
1313
1314 __this_cpu_inc(user_stack_count);
1315
1316
1317
1304 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK, 1318 event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1305 sizeof(*entry), flags, pc); 1319 sizeof(*entry), flags, pc);
1306 if (!event) 1320 if (!event)
@@ -1318,6 +1332,11 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1318 save_stack_trace_user(&trace); 1332 save_stack_trace_user(&trace);
1319 if (!filter_check_discard(call, entry, buffer, event)) 1333 if (!filter_check_discard(call, entry, buffer, event))
1320 ring_buffer_unlock_commit(buffer, event); 1334 ring_buffer_unlock_commit(buffer, event);
1335
1336 __this_cpu_dec(user_stack_count);
1337
1338 out:
1339 preempt_enable();
1321} 1340}
1322 1341
1323#ifdef UNUSED 1342#ifdef UNUSED
diff --git a/lib/debug_locks.c b/lib/debug_locks.c
index 5bf0020b9248..b1c177307677 100644
--- a/lib/debug_locks.c
+++ b/lib/debug_locks.c
@@ -8,7 +8,6 @@
8 * 8 *
9 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> 9 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 */ 10 */
11#include <linux/kernel.h>
12#include <linux/rwsem.h> 11#include <linux/rwsem.h>
13#include <linux/mutex.h> 12#include <linux/mutex.h>
14#include <linux/module.h> 13#include <linux/module.h>
@@ -39,7 +38,6 @@ int debug_locks_off(void)
39{ 38{
40 if (__debug_locks_off()) { 39 if (__debug_locks_off()) {
41 if (!debug_locks_silent) { 40 if (!debug_locks_silent) {
42 oops_in_progress = 1;
43 console_verbose(); 41 console_verbose();
44 return 1; 42 return 1;
45 } 43 }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index c4a3558589ab..85855240933d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2738,7 +2738,8 @@ out_page_table_lock:
2738 unlock_page(pagecache_page); 2738 unlock_page(pagecache_page);
2739 put_page(pagecache_page); 2739 put_page(pagecache_page);
2740 } 2740 }
2741 unlock_page(page); 2741 if (page != pagecache_page)
2742 unlock_page(page);
2742 2743
2743out_mutex: 2744out_mutex:
2744 mutex_unlock(&hugetlb_instantiation_mutex); 2745 mutex_unlock(&hugetlb_instantiation_mutex);
diff --git a/mm/ksm.c b/mm/ksm.c
index 65ab5c7067d9..43bc893470b4 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1724,8 +1724,13 @@ static int ksm_memory_callback(struct notifier_block *self,
1724 /* 1724 /*
1725 * Keep it very simple for now: just lock out ksmd and 1725 * Keep it very simple for now: just lock out ksmd and
1726 * MADV_UNMERGEABLE while any memory is going offline. 1726 * MADV_UNMERGEABLE while any memory is going offline.
1727 * mutex_lock_nested() is necessary because lockdep was alarmed
1728 * that here we take ksm_thread_mutex inside notifier chain
1729 * mutex, and later take notifier chain mutex inside
1730 * ksm_thread_mutex to unlock it. But that's safe because both
1731 * are inside mem_hotplug_mutex.
1727 */ 1732 */
1728 mutex_lock(&ksm_thread_mutex); 1733 mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING);
1729 break; 1734 break;
1730 1735
1731 case MEM_OFFLINE: 1736 case MEM_OFFLINE:
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2efa8ea07ff7..7a22b4129211 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -61,7 +61,14 @@ struct mem_cgroup *root_mem_cgroup __read_mostly;
61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 61#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 62/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
63int do_swap_account __read_mostly; 63int do_swap_account __read_mostly;
64static int really_do_swap_account __initdata = 1; /* for remember boot option*/ 64
65/* for remember boot option*/
66#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP_ENABLED
67static int really_do_swap_account __initdata = 1;
68#else
69static int really_do_swap_account __initdata = 0;
70#endif
71
65#else 72#else
66#define do_swap_account (0) 73#define do_swap_account (0)
67#endif 74#endif
@@ -278,13 +285,14 @@ enum move_type {
278 285
279/* "mc" and its members are protected by cgroup_mutex */ 286/* "mc" and its members are protected by cgroup_mutex */
280static struct move_charge_struct { 287static struct move_charge_struct {
281 spinlock_t lock; /* for from, to, moving_task */ 288 spinlock_t lock; /* for from, to */
282 struct mem_cgroup *from; 289 struct mem_cgroup *from;
283 struct mem_cgroup *to; 290 struct mem_cgroup *to;
284 unsigned long precharge; 291 unsigned long precharge;
285 unsigned long moved_charge; 292 unsigned long moved_charge;
286 unsigned long moved_swap; 293 unsigned long moved_swap;
287 struct task_struct *moving_task; /* a task moving charges */ 294 struct task_struct *moving_task; /* a task moving charges */
295 struct mm_struct *mm;
288 wait_queue_head_t waitq; /* a waitq for other context */ 296 wait_queue_head_t waitq; /* a waitq for other context */
289} mc = { 297} mc = {
290 .lock = __SPIN_LOCK_UNLOCKED(mc.lock), 298 .lock = __SPIN_LOCK_UNLOCKED(mc.lock),
@@ -2152,7 +2160,7 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
2152{ 2160{
2153 VM_BUG_ON(from == to); 2161 VM_BUG_ON(from == to);
2154 VM_BUG_ON(PageLRU(pc->page)); 2162 VM_BUG_ON(PageLRU(pc->page));
2155 VM_BUG_ON(!PageCgroupLocked(pc)); 2163 VM_BUG_ON(!page_is_cgroup_locked(pc));
2156 VM_BUG_ON(!PageCgroupUsed(pc)); 2164 VM_BUG_ON(!PageCgroupUsed(pc));
2157 VM_BUG_ON(pc->mem_cgroup != from); 2165 VM_BUG_ON(pc->mem_cgroup != from);
2158 2166
@@ -4631,7 +4639,7 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4631 unsigned long precharge; 4639 unsigned long precharge;
4632 struct vm_area_struct *vma; 4640 struct vm_area_struct *vma;
4633 4641
4634 down_read(&mm->mmap_sem); 4642 /* We've already held the mmap_sem */
4635 for (vma = mm->mmap; vma; vma = vma->vm_next) { 4643 for (vma = mm->mmap; vma; vma = vma->vm_next) {
4636 struct mm_walk mem_cgroup_count_precharge_walk = { 4644 struct mm_walk mem_cgroup_count_precharge_walk = {
4637 .pmd_entry = mem_cgroup_count_precharge_pte_range, 4645 .pmd_entry = mem_cgroup_count_precharge_pte_range,
@@ -4643,7 +4651,6 @@ static unsigned long mem_cgroup_count_precharge(struct mm_struct *mm)
4643 walk_page_range(vma->vm_start, vma->vm_end, 4651 walk_page_range(vma->vm_start, vma->vm_end,
4644 &mem_cgroup_count_precharge_walk); 4652 &mem_cgroup_count_precharge_walk);
4645 } 4653 }
4646 up_read(&mm->mmap_sem);
4647 4654
4648 precharge = mc.precharge; 4655 precharge = mc.precharge;
4649 mc.precharge = 0; 4656 mc.precharge = 0;
@@ -4694,11 +4701,16 @@ static void mem_cgroup_clear_mc(void)
4694 4701
4695 mc.moved_swap = 0; 4702 mc.moved_swap = 0;
4696 } 4703 }
4704 if (mc.mm) {
4705 up_read(&mc.mm->mmap_sem);
4706 mmput(mc.mm);
4707 }
4697 spin_lock(&mc.lock); 4708 spin_lock(&mc.lock);
4698 mc.from = NULL; 4709 mc.from = NULL;
4699 mc.to = NULL; 4710 mc.to = NULL;
4700 mc.moving_task = NULL;
4701 spin_unlock(&mc.lock); 4711 spin_unlock(&mc.lock);
4712 mc.moving_task = NULL;
4713 mc.mm = NULL;
4702 mem_cgroup_end_move(from); 4714 mem_cgroup_end_move(from);
4703 memcg_oom_recover(from); 4715 memcg_oom_recover(from);
4704 memcg_oom_recover(to); 4716 memcg_oom_recover(to);
@@ -4724,12 +4736,21 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4724 return 0; 4736 return 0;
4725 /* We move charges only when we move a owner of the mm */ 4737 /* We move charges only when we move a owner of the mm */
4726 if (mm->owner == p) { 4738 if (mm->owner == p) {
4739 /*
4740 * We do all the move charge works under one mmap_sem to
4741 * avoid deadlock with down_write(&mmap_sem)
4742 * -> try_charge() -> if (mc.moving_task) -> sleep.
4743 */
4744 down_read(&mm->mmap_sem);
4745
4727 VM_BUG_ON(mc.from); 4746 VM_BUG_ON(mc.from);
4728 VM_BUG_ON(mc.to); 4747 VM_BUG_ON(mc.to);
4729 VM_BUG_ON(mc.precharge); 4748 VM_BUG_ON(mc.precharge);
4730 VM_BUG_ON(mc.moved_charge); 4749 VM_BUG_ON(mc.moved_charge);
4731 VM_BUG_ON(mc.moved_swap); 4750 VM_BUG_ON(mc.moved_swap);
4732 VM_BUG_ON(mc.moving_task); 4751 VM_BUG_ON(mc.moving_task);
4752 VM_BUG_ON(mc.mm);
4753
4733 mem_cgroup_start_move(from); 4754 mem_cgroup_start_move(from);
4734 spin_lock(&mc.lock); 4755 spin_lock(&mc.lock);
4735 mc.from = from; 4756 mc.from = from;
@@ -4737,14 +4758,16 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
4737 mc.precharge = 0; 4758 mc.precharge = 0;
4738 mc.moved_charge = 0; 4759 mc.moved_charge = 0;
4739 mc.moved_swap = 0; 4760 mc.moved_swap = 0;
4740 mc.moving_task = current;
4741 spin_unlock(&mc.lock); 4761 spin_unlock(&mc.lock);
4762 mc.moving_task = current;
4763 mc.mm = mm;
4742 4764
4743 ret = mem_cgroup_precharge_mc(mm); 4765 ret = mem_cgroup_precharge_mc(mm);
4744 if (ret) 4766 if (ret)
4745 mem_cgroup_clear_mc(); 4767 mem_cgroup_clear_mc();
4746 } 4768 /* We call up_read() and mmput() in clear_mc(). */
4747 mmput(mm); 4769 } else
4770 mmput(mm);
4748 } 4771 }
4749 return ret; 4772 return ret;
4750} 4773}
@@ -4832,7 +4855,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4832 struct vm_area_struct *vma; 4855 struct vm_area_struct *vma;
4833 4856
4834 lru_add_drain_all(); 4857 lru_add_drain_all();
4835 down_read(&mm->mmap_sem); 4858 /* We've already held the mmap_sem */
4836 for (vma = mm->mmap; vma; vma = vma->vm_next) { 4859 for (vma = mm->mmap; vma; vma = vma->vm_next) {
4837 int ret; 4860 int ret;
4838 struct mm_walk mem_cgroup_move_charge_walk = { 4861 struct mm_walk mem_cgroup_move_charge_walk = {
@@ -4851,7 +4874,6 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
4851 */ 4874 */
4852 break; 4875 break;
4853 } 4876 }
4854 up_read(&mm->mmap_sem);
4855} 4877}
4856 4878
4857static void mem_cgroup_move_task(struct cgroup_subsys *ss, 4879static void mem_cgroup_move_task(struct cgroup_subsys *ss,
@@ -4860,17 +4882,11 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
4860 struct task_struct *p, 4882 struct task_struct *p,
4861 bool threadgroup) 4883 bool threadgroup)
4862{ 4884{
4863 struct mm_struct *mm; 4885 if (!mc.mm)
4864
4865 if (!mc.to)
4866 /* no need to move charge */ 4886 /* no need to move charge */
4867 return; 4887 return;
4868 4888
4869 mm = get_task_mm(p); 4889 mem_cgroup_move_charge(mc.mm);
4870 if (mm) {
4871 mem_cgroup_move_charge(mm);
4872 mmput(mm);
4873 }
4874 mem_cgroup_clear_mc(); 4890 mem_cgroup_clear_mc();
4875} 4891}
4876#else /* !CONFIG_MMU */ 4892#else /* !CONFIG_MMU */
@@ -4911,10 +4927,20 @@ struct cgroup_subsys mem_cgroup_subsys = {
4911}; 4927};
4912 4928
4913#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4929#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
4930static int __init enable_swap_account(char *s)
4931{
4932 /* consider enabled if no parameter or 1 is given */
4933 if (!s || !strcmp(s, "1"))
4934 really_do_swap_account = 1;
4935 else if (!strcmp(s, "0"))
4936 really_do_swap_account = 0;
4937 return 1;
4938}
4939__setup("swapaccount", enable_swap_account);
4914 4940
4915static int __init disable_swap_account(char *s) 4941static int __init disable_swap_account(char *s)
4916{ 4942{
4917 really_do_swap_account = 0; 4943 enable_swap_account("0");
4918 return 1; 4944 return 1;
4919} 4945}
4920__setup("noswapaccount", disable_swap_account); 4946__setup("noswapaccount", disable_swap_account);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 124324134ff6..46ab2c044b0e 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -51,6 +51,7 @@
51#include <linux/slab.h> 51#include <linux/slab.h>
52#include <linux/swapops.h> 52#include <linux/swapops.h>
53#include <linux/hugetlb.h> 53#include <linux/hugetlb.h>
54#include <linux/memory_hotplug.h>
54#include "internal.h" 55#include "internal.h"
55 56
56int sysctl_memory_failure_early_kill __read_mostly = 0; 57int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -1230,11 +1231,10 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1230 return 1; 1231 return 1;
1231 1232
1232 /* 1233 /*
1233 * The lock_system_sleep prevents a race with memory hotplug, 1234 * The lock_memory_hotplug prevents a race with memory hotplug.
1234 * because the isolation assumes there's only a single user.
1235 * This is a big hammer, a better would be nicer. 1235 * This is a big hammer, a better would be nicer.
1236 */ 1236 */
1237 lock_system_sleep(); 1237 lock_memory_hotplug();
1238 1238
1239 /* 1239 /*
1240 * Isolate the page, so that it doesn't get reallocated if it 1240 * Isolate the page, so that it doesn't get reallocated if it
@@ -1264,7 +1264,7 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1264 ret = 1; 1264 ret = 1;
1265 } 1265 }
1266 unset_migratetype_isolate(p); 1266 unset_migratetype_isolate(p);
1267 unlock_system_sleep(); 1267 unlock_memory_hotplug();
1268 return ret; 1268 return ret;
1269} 1269}
1270 1270
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9260314a221e..2c6523af5473 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -34,6 +34,23 @@
34 34
35#include "internal.h" 35#include "internal.h"
36 36
37DEFINE_MUTEX(mem_hotplug_mutex);
38
39void lock_memory_hotplug(void)
40{
41 mutex_lock(&mem_hotplug_mutex);
42
43 /* for exclusive hibernation if CONFIG_HIBERNATION=y */
44 lock_system_sleep();
45}
46
47void unlock_memory_hotplug(void)
48{
49 unlock_system_sleep();
50 mutex_unlock(&mem_hotplug_mutex);
51}
52
53
37/* add this memory to iomem resource */ 54/* add this memory to iomem resource */
38static struct resource *register_memory_resource(u64 start, u64 size) 55static struct resource *register_memory_resource(u64 start, u64 size)
39{ 56{
@@ -493,7 +510,7 @@ int mem_online_node(int nid)
493 pg_data_t *pgdat; 510 pg_data_t *pgdat;
494 int ret; 511 int ret;
495 512
496 lock_system_sleep(); 513 lock_memory_hotplug();
497 pgdat = hotadd_new_pgdat(nid, 0); 514 pgdat = hotadd_new_pgdat(nid, 0);
498 if (pgdat) { 515 if (pgdat) {
499 ret = -ENOMEM; 516 ret = -ENOMEM;
@@ -504,7 +521,7 @@ int mem_online_node(int nid)
504 BUG_ON(ret); 521 BUG_ON(ret);
505 522
506out: 523out:
507 unlock_system_sleep(); 524 unlock_memory_hotplug();
508 return ret; 525 return ret;
509} 526}
510 527
@@ -516,7 +533,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
516 struct resource *res; 533 struct resource *res;
517 int ret; 534 int ret;
518 535
519 lock_system_sleep(); 536 lock_memory_hotplug();
520 537
521 res = register_memory_resource(start, size); 538 res = register_memory_resource(start, size);
522 ret = -EEXIST; 539 ret = -EEXIST;
@@ -563,7 +580,7 @@ error:
563 release_memory_resource(res); 580 release_memory_resource(res);
564 581
565out: 582out:
566 unlock_system_sleep(); 583 unlock_memory_hotplug();
567 return ret; 584 return ret;
568} 585}
569EXPORT_SYMBOL_GPL(add_memory); 586EXPORT_SYMBOL_GPL(add_memory);
@@ -791,7 +808,7 @@ static int offline_pages(unsigned long start_pfn,
791 if (!test_pages_in_a_zone(start_pfn, end_pfn)) 808 if (!test_pages_in_a_zone(start_pfn, end_pfn))
792 return -EINVAL; 809 return -EINVAL;
793 810
794 lock_system_sleep(); 811 lock_memory_hotplug();
795 812
796 zone = page_zone(pfn_to_page(start_pfn)); 813 zone = page_zone(pfn_to_page(start_pfn));
797 node = zone_to_nid(zone); 814 node = zone_to_nid(zone);
@@ -880,7 +897,7 @@ repeat:
880 writeback_set_ratelimit(); 897 writeback_set_ratelimit();
881 898
882 memory_notify(MEM_OFFLINE, &arg); 899 memory_notify(MEM_OFFLINE, &arg);
883 unlock_system_sleep(); 900 unlock_memory_hotplug();
884 return 0; 901 return 0;
885 902
886failed_removal: 903failed_removal:
@@ -891,7 +908,7 @@ failed_removal:
891 undo_isolate_page_range(start_pfn, end_pfn); 908 undo_isolate_page_range(start_pfn, end_pfn);
892 909
893out: 910out:
894 unlock_system_sleep(); 911 unlock_memory_hotplug();
895 return ret; 912 return ret;
896} 913}
897 914
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 4a57f135b76e..11ff260fb282 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1307,15 +1307,18 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1307 goto out; 1307 goto out;
1308 1308
1309 /* Find the mm_struct */ 1309 /* Find the mm_struct */
1310 rcu_read_lock();
1310 read_lock(&tasklist_lock); 1311 read_lock(&tasklist_lock);
1311 task = pid ? find_task_by_vpid(pid) : current; 1312 task = pid ? find_task_by_vpid(pid) : current;
1312 if (!task) { 1313 if (!task) {
1313 read_unlock(&tasklist_lock); 1314 read_unlock(&tasklist_lock);
1315 rcu_read_unlock();
1314 err = -ESRCH; 1316 err = -ESRCH;
1315 goto out; 1317 goto out;
1316 } 1318 }
1317 mm = get_task_mm(task); 1319 mm = get_task_mm(task);
1318 read_unlock(&tasklist_lock); 1320 read_unlock(&tasklist_lock);
1321 rcu_read_unlock();
1319 1322
1320 err = -EINVAL; 1323 err = -EINVAL;
1321 if (!mm) 1324 if (!mm)
diff --git a/mm/nommu.c b/mm/nommu.c
index 3613517c7592..27a9ac588516 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1717,6 +1717,7 @@ void exit_mmap(struct mm_struct *mm)
1717 mm->mmap = vma->vm_next; 1717 mm->mmap = vma->vm_next;
1718 delete_vma_from_mm(vma); 1718 delete_vma_from_mm(vma);
1719 delete_vma(mm, vma); 1719 delete_vma(mm, vma);
1720 cond_resched();
1720 } 1721 }
1721 1722
1722 kleave(""); 1723 kleave("");
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 07a654486f75..ff7e15872398 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -104,19 +104,24 @@ gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
104 * only be modified with pm_mutex held, unless the suspend/hibernate code is 104 * only be modified with pm_mutex held, unless the suspend/hibernate code is
105 * guaranteed not to run in parallel with that modification). 105 * guaranteed not to run in parallel with that modification).
106 */ 106 */
107void set_gfp_allowed_mask(gfp_t mask) 107
108static gfp_t saved_gfp_mask;
109
110void pm_restore_gfp_mask(void)
108{ 111{
109 WARN_ON(!mutex_is_locked(&pm_mutex)); 112 WARN_ON(!mutex_is_locked(&pm_mutex));
110 gfp_allowed_mask = mask; 113 if (saved_gfp_mask) {
114 gfp_allowed_mask = saved_gfp_mask;
115 saved_gfp_mask = 0;
116 }
111} 117}
112 118
113gfp_t clear_gfp_allowed_mask(gfp_t mask) 119void pm_restrict_gfp_mask(void)
114{ 120{
115 gfp_t ret = gfp_allowed_mask;
116
117 WARN_ON(!mutex_is_locked(&pm_mutex)); 121 WARN_ON(!mutex_is_locked(&pm_mutex));
118 gfp_allowed_mask &= ~mask; 122 WARN_ON(saved_gfp_mask);
119 return ret; 123 saved_gfp_mask = gfp_allowed_mask;
124 gfp_allowed_mask &= ~GFP_IOFS;
120} 125}
121#endif /* CONFIG_PM_SLEEP */ 126#endif /* CONFIG_PM_SLEEP */
122 127
@@ -3008,14 +3013,6 @@ static __init_refok int __build_all_zonelists(void *data)
3008 build_zonelist_cache(pgdat); 3013 build_zonelist_cache(pgdat);
3009 } 3014 }
3010 3015
3011#ifdef CONFIG_MEMORY_HOTPLUG
3012 /* Setup real pagesets for the new zone */
3013 if (data) {
3014 struct zone *zone = data;
3015 setup_zone_pageset(zone);
3016 }
3017#endif
3018
3019 /* 3016 /*
3020 * Initialize the boot_pagesets that are going to be used 3017 * Initialize the boot_pagesets that are going to be used
3021 * for bootstrapping processors. The real pagesets for 3018 * for bootstrapping processors. The real pagesets for
@@ -3064,7 +3061,11 @@ void build_all_zonelists(void *data)
3064 } else { 3061 } else {
3065 /* we have to stop all cpus to guarantee there is no user 3062 /* we have to stop all cpus to guarantee there is no user
3066 of zonelist */ 3063 of zonelist */
3067 stop_machine(__build_all_zonelists, data, NULL); 3064#ifdef CONFIG_MEMORY_HOTPLUG
3065 if (data)
3066 setup_zone_pageset((struct zone *)data);
3067#endif
3068 stop_machine(__build_all_zonelists, NULL, NULL);
3068 /* cpuset refresh routine should be here */ 3069 /* cpuset refresh routine should be here */
3069 } 3070 }
3070 vm_total_pages = nr_free_pagecache_pages(); 3071 vm_total_pages = nr_free_pagecache_pages();
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 8b1a2ce21ee5..38cc58b8b2b0 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -139,7 +139,6 @@ int walk_page_range(unsigned long addr, unsigned long end,
139 pgd_t *pgd; 139 pgd_t *pgd;
140 unsigned long next; 140 unsigned long next;
141 int err = 0; 141 int err = 0;
142 struct vm_area_struct *vma;
143 142
144 if (addr >= end) 143 if (addr >= end)
145 return err; 144 return err;
@@ -149,15 +148,17 @@ int walk_page_range(unsigned long addr, unsigned long end,
149 148
150 pgd = pgd_offset(walk->mm, addr); 149 pgd = pgd_offset(walk->mm, addr);
151 do { 150 do {
151 struct vm_area_struct *uninitialized_var(vma);
152
152 next = pgd_addr_end(addr, end); 153 next = pgd_addr_end(addr, end);
153 154
155#ifdef CONFIG_HUGETLB_PAGE
154 /* 156 /*
155 * handle hugetlb vma individually because pagetable walk for 157 * handle hugetlb vma individually because pagetable walk for
156 * the hugetlb page is dependent on the architecture and 158 * the hugetlb page is dependent on the architecture and
157 * we can't handled it in the same manner as non-huge pages. 159 * we can't handled it in the same manner as non-huge pages.
158 */ 160 */
159 vma = find_vma(walk->mm, addr); 161 vma = find_vma(walk->mm, addr);
160#ifdef CONFIG_HUGETLB_PAGE
161 if (vma && is_vm_hugetlb_page(vma)) { 162 if (vma && is_vm_hugetlb_page(vma)) {
162 if (vma->vm_end < next) 163 if (vma->vm_end < next)
163 next = vma->vm_end; 164 next = vma->vm_end;
diff --git a/mm/slub.c b/mm/slub.c
index 981fb730aa04..bec0e355fbad 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3401,13 +3401,13 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
3401 3401
3402 for_each_free_object(p, s, page->freelist) { 3402 for_each_free_object(p, s, page->freelist) {
3403 set_bit(slab_index(p, s, addr), map); 3403 set_bit(slab_index(p, s, addr), map);
3404 if (!check_object(s, page, p, 0)) 3404 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3405 return 0; 3405 return 0;
3406 } 3406 }
3407 3407
3408 for_each_object(p, s, addr, page->objects) 3408 for_each_object(p, s, addr, page->objects)
3409 if (!test_bit(slab_index(p, s, addr), map)) 3409 if (!test_bit(slab_index(p, s, addr), map))
3410 if (!check_object(s, page, p, 1)) 3410 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
3411 return 0; 3411 return 0;
3412 return 1; 3412 return 1;
3413} 3413}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a3d66b3dc5cb..eb5cc7d00c5a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -31,8 +31,6 @@
31#include <asm/tlbflush.h> 31#include <asm/tlbflush.h>
32#include <asm/shmparam.h> 32#include <asm/shmparam.h>
33 33
34bool vmap_lazy_unmap __read_mostly = true;
35
36/*** Page table manipulation functions ***/ 34/*** Page table manipulation functions ***/
37 35
38static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end) 36static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
@@ -503,9 +501,6 @@ static unsigned long lazy_max_pages(void)
503{ 501{
504 unsigned int log; 502 unsigned int log;
505 503
506 if (!vmap_lazy_unmap)
507 return 0;
508
509 log = fls(num_online_cpus()); 504 log = fls(num_online_cpus());
510 505
511 return log * (32UL * 1024 * 1024 / PAGE_SIZE); 506 return log * (32UL * 1024 * 1024 / PAGE_SIZE);
@@ -566,7 +561,6 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
566 if (va->va_end > *end) 561 if (va->va_end > *end)
567 *end = va->va_end; 562 *end = va->va_end;
568 nr += (va->va_end - va->va_start) >> PAGE_SHIFT; 563 nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
569 unmap_vmap_area(va);
570 list_add_tail(&va->purge_list, &valist); 564 list_add_tail(&va->purge_list, &valist);
571 va->flags |= VM_LAZY_FREEING; 565 va->flags |= VM_LAZY_FREEING;
572 va->flags &= ~VM_LAZY_FREE; 566 va->flags &= ~VM_LAZY_FREE;
@@ -611,10 +605,11 @@ static void purge_vmap_area_lazy(void)
611} 605}
612 606
613/* 607/*
614 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been 608 * Free a vmap area, caller ensuring that the area has been unmapped
615 * called for the correct range previously. 609 * and flush_cache_vunmap had been called for the correct range
610 * previously.
616 */ 611 */
617static void free_unmap_vmap_area_noflush(struct vmap_area *va) 612static void free_vmap_area_noflush(struct vmap_area *va)
618{ 613{
619 va->flags |= VM_LAZY_FREE; 614 va->flags |= VM_LAZY_FREE;
620 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr); 615 atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
@@ -623,6 +618,16 @@ static void free_unmap_vmap_area_noflush(struct vmap_area *va)
623} 618}
624 619
625/* 620/*
621 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
622 * called for the correct range previously.
623 */
624static void free_unmap_vmap_area_noflush(struct vmap_area *va)
625{
626 unmap_vmap_area(va);
627 free_vmap_area_noflush(va);
628}
629
630/*
626 * Free and unmap a vmap area 631 * Free and unmap a vmap area
627 */ 632 */
628static void free_unmap_vmap_area(struct vmap_area *va) 633static void free_unmap_vmap_area(struct vmap_area *va)
@@ -798,7 +803,7 @@ static void free_vmap_block(struct vmap_block *vb)
798 spin_unlock(&vmap_block_tree_lock); 803 spin_unlock(&vmap_block_tree_lock);
799 BUG_ON(tmp != vb); 804 BUG_ON(tmp != vb);
800 805
801 free_unmap_vmap_area_noflush(vb->va); 806 free_vmap_area_noflush(vb->va);
802 call_rcu(&vb->rcu_head, rcu_free_vb); 807 call_rcu(&vb->rcu_head, rcu_free_vb);
803} 808}
804 809
@@ -936,6 +941,8 @@ static void vb_free(const void *addr, unsigned long size)
936 rcu_read_unlock(); 941 rcu_read_unlock();
937 BUG_ON(!vb); 942 BUG_ON(!vb);
938 943
944 vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
945
939 spin_lock(&vb->lock); 946 spin_lock(&vb->lock);
940 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order)); 947 BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
941 948
@@ -988,7 +995,6 @@ void vm_unmap_aliases(void)
988 995
989 s = vb->va->va_start + (i << PAGE_SHIFT); 996 s = vb->va->va_start + (i << PAGE_SHIFT);
990 e = vb->va->va_start + (j << PAGE_SHIFT); 997 e = vb->va->va_start + (j << PAGE_SHIFT);
991 vunmap_page_range(s, e);
992 flush = 1; 998 flush = 1;
993 999
994 if (s < start) 1000 if (s < start)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 42eac4d33216..8f62f17ee1c7 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -750,8 +750,6 @@ static const char * const vmstat_text[] = {
750 "nr_shmem", 750 "nr_shmem",
751 "nr_dirtied", 751 "nr_dirtied",
752 "nr_written", 752 "nr_written",
753 "nr_dirty_threshold",
754 "nr_dirty_background_threshold",
755 753
756#ifdef CONFIG_NUMA 754#ifdef CONFIG_NUMA
757 "numa_hit", 755 "numa_hit",
@@ -761,6 +759,8 @@ static const char * const vmstat_text[] = {
761 "numa_local", 759 "numa_local",
762 "numa_other", 760 "numa_other",
763#endif 761#endif
762 "nr_dirty_threshold",
763 "nr_dirty_background_threshold",
764 764
765#ifdef CONFIG_VM_EVENT_COUNTERS 765#ifdef CONFIG_VM_EVENT_COUNTERS
766 "pgpgin", 766 "pgpgin",
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index aab1cabb8035..5f19415ec9c0 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -1,9 +1,6 @@
1# 1#
2# Makefile for CEPH filesystem. 2# Makefile for CEPH filesystem.
3# 3#
4
5ifneq ($(KERNELRELEASE),)
6
7obj-$(CONFIG_CEPH_LIB) += libceph.o 4obj-$(CONFIG_CEPH_LIB) += libceph.o
8 5
9libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ 6libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
@@ -16,22 +13,3 @@ libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
16 ceph_fs.o ceph_strings.o ceph_hash.o \ 13 ceph_fs.o ceph_strings.o ceph_hash.o \
17 pagevec.o 14 pagevec.o
18 15
19else
20#Otherwise we were called directly from the command
21# line; invoke the kernel build system.
22
23KERNELDIR ?= /lib/modules/$(shell uname -r)/build
24PWD := $(shell pwd)
25
26default: all
27
28all:
29 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
30
31modules_install:
32 $(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
33
34clean:
35 $(MAKE) -C $(KERNELDIR) M=$(PWD) clean
36
37endif
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 53d8abfa25d5..bf3e6a13c215 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -19,7 +19,7 @@ struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp)
19 if (b->vec.iov_base) { 19 if (b->vec.iov_base) {
20 b->is_vmalloc = false; 20 b->is_vmalloc = false;
21 } else { 21 } else {
22 b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL); 22 b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL);
23 if (!b->vec.iov_base) { 23 if (!b->vec.iov_base) {
24 kfree(b); 24 kfree(b);
25 return NULL; 25 return NULL;
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 7552495aff7a..fceeb37d7161 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -45,9 +45,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
45 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); 45 nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
46 lopt_size += nr_table_entries * sizeof(struct request_sock *); 46 lopt_size += nr_table_entries * sizeof(struct request_sock *);
47 if (lopt_size > PAGE_SIZE) 47 if (lopt_size > PAGE_SIZE)
48 lopt = __vmalloc(lopt_size, 48 lopt = vzalloc(lopt_size);
49 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
50 PAGE_KERNEL);
51 else 49 else
52 lopt = kzalloc(lopt_size, GFP_KERNEL); 50 lopt = kzalloc(lopt_size, GFP_KERNEL);
53 if (lopt == NULL) 51 if (lopt == NULL)
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 265985370fa1..e424a09e83f6 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -239,7 +239,8 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
239 dccp_update_gsr(sk, seqno); 239 dccp_update_gsr(sk, seqno);
240 240
241 if (dh->dccph_type != DCCP_PKT_SYNC && 241 if (dh->dccph_type != DCCP_PKT_SYNC &&
242 (ackno != DCCP_PKT_WITHOUT_ACK_SEQ)) 242 ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
243 after48(ackno, dp->dccps_gar))
243 dp->dccps_gar = ackno; 244 dp->dccps_gar = ackno;
244 } else { 245 } else {
245 unsigned long now = jiffies; 246 unsigned long now = jiffies;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index a76b78de679f..6f97268ed85f 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1556,6 +1556,8 @@ static int __dn_getsockopt(struct socket *sock, int level,int optname, char __us
1556 if (r_len > sizeof(struct linkinfo_dn)) 1556 if (r_len > sizeof(struct linkinfo_dn))
1557 r_len = sizeof(struct linkinfo_dn); 1557 r_len = sizeof(struct linkinfo_dn);
1558 1558
1559 memset(&link, 0, sizeof(link));
1560
1559 switch(sock->state) { 1561 switch(sock->state) {
1560 case SS_CONNECTING: 1562 case SS_CONNECTING:
1561 link.idn_linkstate = LL_CONNECTING; 1563 link.idn_linkstate = LL_CONNECTING;
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index f8c1ae4b41f0..13992e1d2726 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -31,6 +31,7 @@
31#include <linux/skbuff.h> 31#include <linux/skbuff.h>
32#include <linux/udp.h> 32#include <linux/udp.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/vmalloc.h>
34#include <net/sock.h> 35#include <net/sock.h>
35#include <net/inet_common.h> 36#include <net/inet_common.h>
36#include <linux/stat.h> 37#include <linux/stat.h>
@@ -276,12 +277,12 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
276#endif 277#endif
277#ifdef CONFIG_ECONET_AUNUDP 278#ifdef CONFIG_ECONET_AUNUDP
278 struct msghdr udpmsg; 279 struct msghdr udpmsg;
279 struct iovec iov[msg->msg_iovlen+1]; 280 struct iovec iov[2];
280 struct aunhdr ah; 281 struct aunhdr ah;
281 struct sockaddr_in udpdest; 282 struct sockaddr_in udpdest;
282 __kernel_size_t size; 283 __kernel_size_t size;
283 int i;
284 mm_segment_t oldfs; 284 mm_segment_t oldfs;
285 char *userbuf;
285#endif 286#endif
286 287
287 /* 288 /*
@@ -297,23 +298,14 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
297 298
298 mutex_lock(&econet_mutex); 299 mutex_lock(&econet_mutex);
299 300
300 if (saddr == NULL) { 301 if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
301 struct econet_sock *eo = ec_sk(sk); 302 mutex_unlock(&econet_mutex);
302 303 return -EINVAL;
303 addr.station = eo->station; 304 }
304 addr.net = eo->net; 305 addr.station = saddr->addr.station;
305 port = eo->port; 306 addr.net = saddr->addr.net;
306 cb = eo->cb; 307 port = saddr->port;
307 } else { 308 cb = saddr->cb;
308 if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
309 mutex_unlock(&econet_mutex);
310 return -EINVAL;
311 }
312 addr.station = saddr->addr.station;
313 addr.net = saddr->addr.net;
314 port = saddr->port;
315 cb = saddr->cb;
316 }
317 309
318 /* Look for a device with the right network number. */ 310 /* Look for a device with the right network number. */
319 dev = net2dev_map[addr.net]; 311 dev = net2dev_map[addr.net];
@@ -328,17 +320,17 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
328 } 320 }
329 } 321 }
330 322
331 if (len + 15 > dev->mtu) {
332 mutex_unlock(&econet_mutex);
333 return -EMSGSIZE;
334 }
335
336 if (dev->type == ARPHRD_ECONET) { 323 if (dev->type == ARPHRD_ECONET) {
337 /* Real hardware Econet. We're not worthy etc. */ 324 /* Real hardware Econet. We're not worthy etc. */
338#ifdef CONFIG_ECONET_NATIVE 325#ifdef CONFIG_ECONET_NATIVE
339 unsigned short proto = 0; 326 unsigned short proto = 0;
340 int res; 327 int res;
341 328
329 if (len + 15 > dev->mtu) {
330 mutex_unlock(&econet_mutex);
331 return -EMSGSIZE;
332 }
333
342 dev_hold(dev); 334 dev_hold(dev);
343 335
344 skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev), 336 skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
@@ -351,7 +343,6 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
351 343
352 eb = (struct ec_cb *)&skb->cb; 344 eb = (struct ec_cb *)&skb->cb;
353 345
354 /* BUG: saddr may be NULL */
355 eb->cookie = saddr->cookie; 346 eb->cookie = saddr->cookie;
356 eb->sec = *saddr; 347 eb->sec = *saddr;
357 eb->sent = ec_tx_done; 348 eb->sent = ec_tx_done;
@@ -415,6 +406,11 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
415 return -ENETDOWN; /* No socket - can't send */ 406 return -ENETDOWN; /* No socket - can't send */
416 } 407 }
417 408
409 if (len > 32768) {
410 err = -E2BIG;
411 goto error;
412 }
413
418 /* Make up a UDP datagram and hand it off to some higher intellect. */ 414 /* Make up a UDP datagram and hand it off to some higher intellect. */
419 415
420 memset(&udpdest, 0, sizeof(udpdest)); 416 memset(&udpdest, 0, sizeof(udpdest));
@@ -446,36 +442,26 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
446 442
447 /* tack our header on the front of the iovec */ 443 /* tack our header on the front of the iovec */
448 size = sizeof(struct aunhdr); 444 size = sizeof(struct aunhdr);
449 /*
450 * XXX: that is b0rken. We can't mix userland and kernel pointers
451 * in iovec, since on a lot of platforms copy_from_user() will
452 * *not* work with the kernel and userland ones at the same time,
453 * regardless of what we do with set_fs(). And we are talking about
454 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
455 * apply. Any suggestions on fixing that code? -- AV
456 */
457 iov[0].iov_base = (void *)&ah; 445 iov[0].iov_base = (void *)&ah;
458 iov[0].iov_len = size; 446 iov[0].iov_len = size;
459 for (i = 0; i < msg->msg_iovlen; i++) { 447
460 void __user *base = msg->msg_iov[i].iov_base; 448 userbuf = vmalloc(len);
461 size_t iov_len = msg->msg_iov[i].iov_len; 449 if (userbuf == NULL) {
462 /* Check it now since we switch to KERNEL_DS later. */ 450 err = -ENOMEM;
463 if (!access_ok(VERIFY_READ, base, iov_len)) { 451 goto error;
464 mutex_unlock(&econet_mutex);
465 return -EFAULT;
466 }
467 iov[i+1].iov_base = base;
468 iov[i+1].iov_len = iov_len;
469 size += iov_len;
470 } 452 }
471 453
454 iov[1].iov_base = userbuf;
455 iov[1].iov_len = len;
456 err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
457 if (err)
458 goto error_free_buf;
459
472 /* Get a skbuff (no data, just holds our cb information) */ 460 /* Get a skbuff (no data, just holds our cb information) */
473 if ((skb = sock_alloc_send_skb(sk, 0, 461 if ((skb = sock_alloc_send_skb(sk, 0,
474 msg->msg_flags & MSG_DONTWAIT, 462 msg->msg_flags & MSG_DONTWAIT,
475 &err)) == NULL) { 463 &err)) == NULL)
476 mutex_unlock(&econet_mutex); 464 goto error_free_buf;
477 return err;
478 }
479 465
480 eb = (struct ec_cb *)&skb->cb; 466 eb = (struct ec_cb *)&skb->cb;
481 467
@@ -491,7 +477,7 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
491 udpmsg.msg_name = (void *)&udpdest; 477 udpmsg.msg_name = (void *)&udpdest;
492 udpmsg.msg_namelen = sizeof(udpdest); 478 udpmsg.msg_namelen = sizeof(udpdest);
493 udpmsg.msg_iov = &iov[0]; 479 udpmsg.msg_iov = &iov[0];
494 udpmsg.msg_iovlen = msg->msg_iovlen + 1; 480 udpmsg.msg_iovlen = 2;
495 udpmsg.msg_control = NULL; 481 udpmsg.msg_control = NULL;
496 udpmsg.msg_controllen = 0; 482 udpmsg.msg_controllen = 0;
497 udpmsg.msg_flags=0; 483 udpmsg.msg_flags=0;
@@ -499,9 +485,13 @@ static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
499 oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */ 485 oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */
500 err = sock_sendmsg(udpsock, &udpmsg, size); 486 err = sock_sendmsg(udpsock, &udpmsg, size);
501 set_fs(oldfs); 487 set_fs(oldfs);
488
489error_free_buf:
490 vfree(userbuf);
502#else 491#else
503 err = -EPROTOTYPE; 492 err = -EPROTOTYPE;
504#endif 493#endif
494 error:
505 mutex_unlock(&econet_mutex); 495 mutex_unlock(&econet_mutex);
506 496
507 return err; 497 return err;
@@ -671,6 +661,9 @@ static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
671 err = 0; 661 err = 0;
672 switch (cmd) { 662 switch (cmd) {
673 case SIOCSIFADDR: 663 case SIOCSIFADDR:
664 if (!capable(CAP_NET_ADMIN))
665 return -EPERM;
666
674 edev = dev->ec_ptr; 667 edev = dev->ec_ptr;
675 if (edev == NULL) { 668 if (edev == NULL) {
676 /* Magic up a new one. */ 669 /* Magic up a new one. */
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 200eb538fbb3..0f280348e0fd 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -365,7 +365,7 @@ static struct tnode *tnode_alloc(size_t size)
365 if (size <= PAGE_SIZE) 365 if (size <= PAGE_SIZE)
366 return kzalloc(size, GFP_KERNEL); 366 return kzalloc(size, GFP_KERNEL);
367 else 367 else
368 return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 368 return vzalloc(size);
369} 369}
370 370
371static void __tnode_vfree(struct work_struct *arg) 371static void __tnode_vfree(struct work_struct *arg)
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 1b344f30b463..3c0369a3a663 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -133,8 +133,7 @@ int __inet_inherit_port(struct sock *sk, struct sock *child)
133 } 133 }
134 } 134 }
135 } 135 }
136 sk_add_bind_node(child, &tb->owners); 136 inet_bind_hash(child, tb, port);
137 inet_csk(child)->icsk_bind_hash = tb;
138 spin_unlock(&head->lock); 137 spin_unlock(&head->lock);
139 138
140 return 0; 139 return 0;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index e91911d7aae2..1b4ec21497a4 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -26,6 +26,8 @@ static int zero;
26static int tcp_retr1_max = 255; 26static int tcp_retr1_max = 255;
27static int ip_local_port_range_min[] = { 1, 1 }; 27static int ip_local_port_range_min[] = { 1, 1 };
28static int ip_local_port_range_max[] = { 65535, 65535 }; 28static int ip_local_port_range_max[] = { 65535, 65535 };
29static int tcp_adv_win_scale_min = -31;
30static int tcp_adv_win_scale_max = 31;
29 31
30/* Update system visible IP port range */ 32/* Update system visible IP port range */
31static void set_local_port_range(int range[2]) 33static void set_local_port_range(int range[2])
@@ -426,7 +428,9 @@ static struct ctl_table ipv4_table[] = {
426 .data = &sysctl_tcp_adv_win_scale, 428 .data = &sysctl_tcp_adv_win_scale,
427 .maxlen = sizeof(int), 429 .maxlen = sizeof(int),
428 .mode = 0644, 430 .mode = 0644,
429 .proc_handler = proc_dointvec 431 .proc_handler = proc_dointvec_minmax,
432 .extra1 = &tcp_adv_win_scale_min,
433 .extra2 = &tcp_adv_win_scale_max,
430 }, 434 },
431 { 435 {
432 .procname = "tcp_tw_reuse", 436 .procname = "tcp_tw_reuse",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 081419969485..f15c36a706ec 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2246,7 +2246,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
2246 /* Values greater than interface MTU won't take effect. However 2246 /* Values greater than interface MTU won't take effect. However
2247 * at the point when this call is done we typically don't yet 2247 * at the point when this call is done we typically don't yet
2248 * know which interface is going to be used */ 2248 * know which interface is going to be used */
2249 if (val < 64 || val > MAX_TCP_WINDOW) { 2249 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2250 err = -EINVAL; 2250 err = -EINVAL;
2251 break; 2251 break;
2252 } 2252 }
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 69ccbc1dde9c..e13da6de1fc7 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2043,7 +2043,9 @@ get_req:
2043 } 2043 }
2044get_sk: 2044get_sk:
2045 sk_nulls_for_each_from(sk, node) { 2045 sk_nulls_for_each_from(sk, node) {
2046 if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) { 2046 if (!net_eq(sock_net(sk), net))
2047 continue;
2048 if (sk->sk_family == st->family) {
2047 cur = sk; 2049 cur = sk;
2048 goto out; 2050 goto out;
2049 } 2051 }
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 2fc35b32df9e..23cc8e1ce8d4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2758,13 +2758,13 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2758 ifa->state = INET6_IFADDR_STATE_DEAD; 2758 ifa->state = INET6_IFADDR_STATE_DEAD;
2759 spin_unlock_bh(&ifa->state_lock); 2759 spin_unlock_bh(&ifa->state_lock);
2760 2760
2761 if (state == INET6_IFADDR_STATE_DEAD) { 2761 if (state != INET6_IFADDR_STATE_DEAD) {
2762 in6_ifa_put(ifa);
2763 } else {
2764 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2762 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2765 atomic_notifier_call_chain(&inet6addr_chain, 2763 atomic_notifier_call_chain(&inet6addr_chain,
2766 NETDEV_DOWN, ifa); 2764 NETDEV_DOWN, ifa);
2767 } 2765 }
2766
2767 in6_ifa_put(ifa);
2768 write_lock_bh(&idev->lock); 2768 write_lock_bh(&idev->lock);
2769 } 2769 }
2770 } 2770 }
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index 4d6f8653ec88..8e8ea9cb7093 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -92,7 +92,7 @@ config MAC80211_MESH
92config MAC80211_LEDS 92config MAC80211_LEDS
93 bool "Enable LED triggers" 93 bool "Enable LED triggers"
94 depends on MAC80211 94 depends on MAC80211
95 select NEW_LEDS 95 depends on LEDS_CLASS
96 select LEDS_TRIGGERS 96 select LEDS_TRIGGERS
97 ---help--- 97 ---help---
98 This option enables a few LED triggers for different 98 This option enables a few LED triggers for different
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 9dab9573be41..92ce94f5146b 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -989,20 +989,26 @@ call_refreshresult(struct rpc_task *task)
989 dprint_status(task); 989 dprint_status(task);
990 990
991 task->tk_status = 0; 991 task->tk_status = 0;
992 task->tk_action = call_allocate; 992 task->tk_action = call_refresh;
993 if (status >= 0 && rpcauth_uptodatecred(task))
994 return;
995 switch (status) { 993 switch (status) {
996 case -EACCES: 994 case 0:
997 rpc_exit(task, -EACCES); 995 if (rpcauth_uptodatecred(task))
998 return; 996 task->tk_action = call_allocate;
999 case -ENOMEM:
1000 rpc_exit(task, -ENOMEM);
1001 return; 997 return;
1002 case -ETIMEDOUT: 998 case -ETIMEDOUT:
1003 rpc_delay(task, 3*HZ); 999 rpc_delay(task, 3*HZ);
1000 case -EAGAIN:
1001 status = -EACCES;
1002 if (!task->tk_cred_retry)
1003 break;
1004 task->tk_cred_retry--;
1005 dprintk("RPC: %5u %s: retry refresh creds\n",
1006 task->tk_pid, __func__);
1007 return;
1004 } 1008 }
1005 task->tk_action = call_refresh; 1009 dprintk("RPC: %5u %s: refresh creds failed with error %d\n",
1010 task->tk_pid, __func__, status);
1011 rpc_exit(task, status);
1006} 1012}
1007 1013
1008/* 1014/*
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3c95304a0817..2268e6798124 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1343,9 +1343,25 @@ static void unix_destruct_scm(struct sk_buff *skb)
1343 sock_wfree(skb); 1343 sock_wfree(skb);
1344} 1344}
1345 1345
1346#define MAX_RECURSION_LEVEL 4
1347
1346static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb) 1348static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1347{ 1349{
1348 int i; 1350 int i;
1351 unsigned char max_level = 0;
1352 int unix_sock_count = 0;
1353
1354 for (i = scm->fp->count - 1; i >= 0; i--) {
1355 struct sock *sk = unix_get_socket(scm->fp->fp[i]);
1356
1357 if (sk) {
1358 unix_sock_count++;
1359 max_level = max(max_level,
1360 unix_sk(sk)->recursion_level);
1361 }
1362 }
1363 if (unlikely(max_level > MAX_RECURSION_LEVEL))
1364 return -ETOOMANYREFS;
1349 1365
1350 /* 1366 /*
1351 * Need to duplicate file references for the sake of garbage 1367 * Need to duplicate file references for the sake of garbage
@@ -1356,9 +1372,11 @@ static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1356 if (!UNIXCB(skb).fp) 1372 if (!UNIXCB(skb).fp)
1357 return -ENOMEM; 1373 return -ENOMEM;
1358 1374
1359 for (i = scm->fp->count-1; i >= 0; i--) 1375 if (unix_sock_count) {
1360 unix_inflight(scm->fp->fp[i]); 1376 for (i = scm->fp->count - 1; i >= 0; i--)
1361 return 0; 1377 unix_inflight(scm->fp->fp[i]);
1378 }
1379 return max_level;
1362} 1380}
1363 1381
1364static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds) 1382static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
@@ -1393,6 +1411,7 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1393 struct sk_buff *skb; 1411 struct sk_buff *skb;
1394 long timeo; 1412 long timeo;
1395 struct scm_cookie tmp_scm; 1413 struct scm_cookie tmp_scm;
1414 int max_level;
1396 1415
1397 if (NULL == siocb->scm) 1416 if (NULL == siocb->scm)
1398 siocb->scm = &tmp_scm; 1417 siocb->scm = &tmp_scm;
@@ -1431,8 +1450,9 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
1431 goto out; 1450 goto out;
1432 1451
1433 err = unix_scm_to_skb(siocb->scm, skb, true); 1452 err = unix_scm_to_skb(siocb->scm, skb, true);
1434 if (err) 1453 if (err < 0)
1435 goto out_free; 1454 goto out_free;
1455 max_level = err + 1;
1436 unix_get_secdata(siocb->scm, skb); 1456 unix_get_secdata(siocb->scm, skb);
1437 1457
1438 skb_reset_transport_header(skb); 1458 skb_reset_transport_header(skb);
@@ -1514,6 +1534,8 @@ restart:
1514 if (sock_flag(other, SOCK_RCVTSTAMP)) 1534 if (sock_flag(other, SOCK_RCVTSTAMP))
1515 __net_timestamp(skb); 1535 __net_timestamp(skb);
1516 skb_queue_tail(&other->sk_receive_queue, skb); 1536 skb_queue_tail(&other->sk_receive_queue, skb);
1537 if (max_level > unix_sk(other)->recursion_level)
1538 unix_sk(other)->recursion_level = max_level;
1517 unix_state_unlock(other); 1539 unix_state_unlock(other);
1518 other->sk_data_ready(other, len); 1540 other->sk_data_ready(other, len);
1519 sock_put(other); 1541 sock_put(other);
@@ -1544,6 +1566,7 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1544 int sent = 0; 1566 int sent = 0;
1545 struct scm_cookie tmp_scm; 1567 struct scm_cookie tmp_scm;
1546 bool fds_sent = false; 1568 bool fds_sent = false;
1569 int max_level;
1547 1570
1548 if (NULL == siocb->scm) 1571 if (NULL == siocb->scm)
1549 siocb->scm = &tmp_scm; 1572 siocb->scm = &tmp_scm;
@@ -1607,10 +1630,11 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1607 1630
1608 /* Only send the fds in the first buffer */ 1631 /* Only send the fds in the first buffer */
1609 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent); 1632 err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
1610 if (err) { 1633 if (err < 0) {
1611 kfree_skb(skb); 1634 kfree_skb(skb);
1612 goto out_err; 1635 goto out_err;
1613 } 1636 }
1637 max_level = err + 1;
1614 fds_sent = true; 1638 fds_sent = true;
1615 1639
1616 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); 1640 err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
@@ -1626,6 +1650,8 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
1626 goto pipe_err_free; 1650 goto pipe_err_free;
1627 1651
1628 skb_queue_tail(&other->sk_receive_queue, skb); 1652 skb_queue_tail(&other->sk_receive_queue, skb);
1653 if (max_level > unix_sk(other)->recursion_level)
1654 unix_sk(other)->recursion_level = max_level;
1629 unix_state_unlock(other); 1655 unix_state_unlock(other);
1630 other->sk_data_ready(other, size); 1656 other->sk_data_ready(other, size);
1631 sent += size; 1657 sent += size;
@@ -1845,6 +1871,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1845 unix_state_lock(sk); 1871 unix_state_lock(sk);
1846 skb = skb_dequeue(&sk->sk_receive_queue); 1872 skb = skb_dequeue(&sk->sk_receive_queue);
1847 if (skb == NULL) { 1873 if (skb == NULL) {
1874 unix_sk(sk)->recursion_level = 0;
1848 if (copied >= target) 1875 if (copied >= target)
1849 goto unlock; 1876 goto unlock;
1850 1877
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index c8df6fda0b1f..f89f83bf828e 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -96,7 +96,7 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
96unsigned int unix_tot_inflight; 96unsigned int unix_tot_inflight;
97 97
98 98
99static struct sock *unix_get_socket(struct file *filp) 99struct sock *unix_get_socket(struct file *filp)
100{ 100{
101 struct sock *u_sock = NULL; 101 struct sock *u_sock = NULL;
102 struct inode *inode = filp->f_path.dentry->d_inode; 102 struct inode *inode = filp->f_path.dentry->d_inode;
@@ -259,9 +259,16 @@ static void inc_inflight_move_tail(struct unix_sock *u)
259} 259}
260 260
261static bool gc_in_progress = false; 261static bool gc_in_progress = false;
262#define UNIX_INFLIGHT_TRIGGER_GC 16000
262 263
263void wait_for_unix_gc(void) 264void wait_for_unix_gc(void)
264{ 265{
266 /*
267 * If number of inflight sockets is insane,
268 * force a garbage collect right now.
269 */
270 if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
271 unix_gc();
265 wait_event(unix_gc_wait, gc_in_progress == false); 272 wait_event(unix_gc_wait, gc_in_progress == false);
266} 273}
267 274
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c
index a2023ec52329..1e98bc0fe0a5 100644
--- a/net/xfrm/xfrm_hash.c
+++ b/net/xfrm/xfrm_hash.c
@@ -19,7 +19,7 @@ struct hlist_head *xfrm_hash_alloc(unsigned int sz)
19 if (sz <= PAGE_SIZE) 19 if (sz <= PAGE_SIZE)
20 n = kzalloc(sz, GFP_KERNEL); 20 n = kzalloc(sz, GFP_KERNEL);
21 else if (hashdist) 21 else if (hashdist)
22 n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL); 22 n = vzalloc(sz);
23 else 23 else
24 n = (struct hlist_head *) 24 n = (struct hlist_head *)
25 __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 25 __get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
diff --git a/scripts/gfp-translate b/scripts/gfp-translate
index d81b968d864e..c9230e158a8f 100644
--- a/scripts/gfp-translate
+++ b/scripts/gfp-translate
@@ -63,7 +63,12 @@ fi
63 63
64# Extract GFP flags from the kernel source 64# Extract GFP flags from the kernel source
65TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1 65TMPFILE=`mktemp -t gfptranslate-XXXXXX` || exit 1
66grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE 66grep -q ___GFP $SOURCE/include/linux/gfp.h
67if [ $? -eq 0 ]; then
68 grep "^#define ___GFP" $SOURCE/include/linux/gfp.h | sed -e 's/u$//' | grep -v GFP_BITS > $TMPFILE
69else
70 grep "^#define __GFP" $SOURCE/include/linux/gfp.h | sed -e 's/(__force gfp_t)//' | sed -e 's/u)/)/' | grep -v GFP_BITS | sed -e 's/)\//) \//' > $TMPFILE
71fi
67 72
68# Parse the flags 73# Parse the flags
69IFS=" 74IFS="
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 184eb6a0b505..e57826ced380 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -164,6 +164,7 @@ struct menu {
164 struct menu *list; 164 struct menu *list;
165 struct symbol *sym; 165 struct symbol *sym;
166 struct property *prompt; 166 struct property *prompt;
167 struct expr *visibility;
167 struct expr *dep; 168 struct expr *dep;
168 unsigned int flags; 169 unsigned int flags;
169 char *help; 170 char *help;
diff --git a/scripts/kconfig/lkc.h b/scripts/kconfig/lkc.h
index 753cdbd7b805..3f7240df0f3b 100644
--- a/scripts/kconfig/lkc.h
+++ b/scripts/kconfig/lkc.h
@@ -107,6 +107,7 @@ void menu_end_menu(void);
107void menu_add_entry(struct symbol *sym); 107void menu_add_entry(struct symbol *sym);
108void menu_end_entry(void); 108void menu_end_entry(void);
109void menu_add_dep(struct expr *dep); 109void menu_add_dep(struct expr *dep);
110void menu_add_visibility(struct expr *dep);
110struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep); 111struct property *menu_add_prop(enum prop_type type, char *prompt, struct expr *expr, struct expr *dep);
111struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep); 112struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr *dep);
112void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep); 113void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep);
diff --git a/scripts/kconfig/menu.c b/scripts/kconfig/menu.c
index 7e83aef42c6d..b9d9aa18e6d6 100644
--- a/scripts/kconfig/menu.c
+++ b/scripts/kconfig/menu.c
@@ -152,6 +152,12 @@ struct property *menu_add_prompt(enum prop_type type, char *prompt, struct expr
152 return menu_add_prop(type, prompt, NULL, dep); 152 return menu_add_prop(type, prompt, NULL, dep);
153} 153}
154 154
155void menu_add_visibility(struct expr *expr)
156{
157 current_entry->visibility = expr_alloc_and(current_entry->visibility,
158 expr);
159}
160
155void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep) 161void menu_add_expr(enum prop_type type, struct expr *expr, struct expr *dep)
156{ 162{
157 menu_add_prop(type, NULL, expr, dep); 163 menu_add_prop(type, NULL, expr, dep);
@@ -410,6 +416,11 @@ bool menu_is_visible(struct menu *menu)
410 if (!menu->prompt) 416 if (!menu->prompt)
411 return false; 417 return false;
412 418
419 if (menu->visibility) {
420 if (expr_calc_value(menu->visibility) == no)
421 return no;
422 }
423
413 sym = menu->sym; 424 sym = menu->sym;
414 if (sym) { 425 if (sym) {
415 sym_calc_value(sym); 426 sym_calc_value(sym);
diff --git a/scripts/kconfig/zconf.gperf b/scripts/kconfig/zconf.gperf
index d8bc74249622..c9e690eb7545 100644
--- a/scripts/kconfig/zconf.gperf
+++ b/scripts/kconfig/zconf.gperf
@@ -38,6 +38,7 @@ hex, T_TYPE, TF_COMMAND, S_HEX
38string, T_TYPE, TF_COMMAND, S_STRING 38string, T_TYPE, TF_COMMAND, S_STRING
39select, T_SELECT, TF_COMMAND 39select, T_SELECT, TF_COMMAND
40range, T_RANGE, TF_COMMAND 40range, T_RANGE, TF_COMMAND
41visible, T_VISIBLE, TF_COMMAND
41option, T_OPTION, TF_COMMAND 42option, T_OPTION, TF_COMMAND
42on, T_ON, TF_PARAM 43on, T_ON, TF_PARAM
43modules, T_OPT_MODULES, TF_OPTION 44modules, T_OPT_MODULES, TF_OPTION
diff --git a/scripts/kconfig/zconf.hash.c_shipped b/scripts/kconfig/zconf.hash.c_shipped
index c1748faf4634..4055d5de1750 100644
--- a/scripts/kconfig/zconf.hash.c_shipped
+++ b/scripts/kconfig/zconf.hash.c_shipped
@@ -32,7 +32,7 @@
32struct kconf_id; 32struct kconf_id;
33 33
34static struct kconf_id *kconf_id_lookup(register const char *str, register unsigned int len); 34static struct kconf_id *kconf_id_lookup(register const char *str, register unsigned int len);
35/* maximum key range = 47, duplicates = 0 */ 35/* maximum key range = 50, duplicates = 0 */
36 36
37#ifdef __GNUC__ 37#ifdef __GNUC__
38__inline 38__inline
@@ -46,32 +46,32 @@ kconf_id_hash (register const char *str, register unsigned int len)
46{ 46{
47 static unsigned char asso_values[] = 47 static unsigned char asso_values[] =
48 { 48 {
49 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 49 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
50 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 50 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
51 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 51 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
52 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 52 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
53 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 53 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
54 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 54 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
55 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 55 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
56 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 56 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
57 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 57 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
58 49, 49, 49, 49, 49, 49, 49, 49, 11, 5, 58 52, 52, 52, 52, 52, 52, 52, 52, 40, 5,
59 0, 0, 5, 49, 5, 20, 49, 49, 5, 20, 59 0, 0, 5, 52, 0, 20, 52, 52, 10, 20,
60 5, 0, 30, 49, 0, 15, 0, 10, 0, 49, 60 5, 0, 35, 52, 0, 30, 0, 15, 0, 52,
61 25, 49, 49, 49, 49, 49, 49, 49, 49, 49, 61 15, 52, 52, 52, 52, 52, 52, 52, 52, 52,
62 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 62 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
63 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 63 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
64 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 64 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
65 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 65 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
66 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 66 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
67 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 67 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
68 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 68 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
69 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 69 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
70 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 70 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
71 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 71 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
72 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 72 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
73 49, 49, 49, 49, 49, 49, 49, 49, 49, 49, 73 52, 52, 52, 52, 52, 52, 52, 52, 52, 52,
74 49, 49, 49, 49, 49, 49 74 52, 52, 52, 52, 52, 52
75 }; 75 };
76 register int hval = len; 76 register int hval = len;
77 77
@@ -102,25 +102,26 @@ struct kconf_id_strings_t
102 char kconf_id_strings_str12[sizeof("default")]; 102 char kconf_id_strings_str12[sizeof("default")];
103 char kconf_id_strings_str13[sizeof("def_bool")]; 103 char kconf_id_strings_str13[sizeof("def_bool")];
104 char kconf_id_strings_str14[sizeof("help")]; 104 char kconf_id_strings_str14[sizeof("help")];
105 char kconf_id_strings_str15[sizeof("bool")];
106 char kconf_id_strings_str16[sizeof("config")]; 105 char kconf_id_strings_str16[sizeof("config")];
107 char kconf_id_strings_str17[sizeof("def_tristate")]; 106 char kconf_id_strings_str17[sizeof("def_tristate")];
108 char kconf_id_strings_str18[sizeof("boolean")]; 107 char kconf_id_strings_str18[sizeof("hex")];
109 char kconf_id_strings_str19[sizeof("defconfig_list")]; 108 char kconf_id_strings_str19[sizeof("defconfig_list")];
110 char kconf_id_strings_str21[sizeof("string")];
111 char kconf_id_strings_str22[sizeof("if")]; 109 char kconf_id_strings_str22[sizeof("if")];
112 char kconf_id_strings_str23[sizeof("int")]; 110 char kconf_id_strings_str23[sizeof("int")];
113 char kconf_id_strings_str26[sizeof("select")];
114 char kconf_id_strings_str27[sizeof("modules")]; 111 char kconf_id_strings_str27[sizeof("modules")];
115 char kconf_id_strings_str28[sizeof("tristate")]; 112 char kconf_id_strings_str28[sizeof("tristate")];
116 char kconf_id_strings_str29[sizeof("menu")]; 113 char kconf_id_strings_str29[sizeof("menu")];
117 char kconf_id_strings_str31[sizeof("source")];
118 char kconf_id_strings_str32[sizeof("comment")]; 114 char kconf_id_strings_str32[sizeof("comment")];
119 char kconf_id_strings_str33[sizeof("hex")];
120 char kconf_id_strings_str35[sizeof("menuconfig")]; 115 char kconf_id_strings_str35[sizeof("menuconfig")];
121 char kconf_id_strings_str36[sizeof("prompt")]; 116 char kconf_id_strings_str36[sizeof("string")];
122 char kconf_id_strings_str37[sizeof("depends")]; 117 char kconf_id_strings_str37[sizeof("visible")];
118 char kconf_id_strings_str41[sizeof("prompt")];
119 char kconf_id_strings_str42[sizeof("depends")];
120 char kconf_id_strings_str44[sizeof("bool")];
121 char kconf_id_strings_str46[sizeof("select")];
122 char kconf_id_strings_str47[sizeof("boolean")];
123 char kconf_id_strings_str48[sizeof("mainmenu")]; 123 char kconf_id_strings_str48[sizeof("mainmenu")];
124 char kconf_id_strings_str51[sizeof("source")];
124 }; 125 };
125static struct kconf_id_strings_t kconf_id_strings_contents = 126static struct kconf_id_strings_t kconf_id_strings_contents =
126 { 127 {
@@ -136,25 +137,26 @@ static struct kconf_id_strings_t kconf_id_strings_contents =
136 "default", 137 "default",
137 "def_bool", 138 "def_bool",
138 "help", 139 "help",
139 "bool",
140 "config", 140 "config",
141 "def_tristate", 141 "def_tristate",
142 "boolean", 142 "hex",
143 "defconfig_list", 143 "defconfig_list",
144 "string",
145 "if", 144 "if",
146 "int", 145 "int",
147 "select",
148 "modules", 146 "modules",
149 "tristate", 147 "tristate",
150 "menu", 148 "menu",
151 "source",
152 "comment", 149 "comment",
153 "hex",
154 "menuconfig", 150 "menuconfig",
151 "string",
152 "visible",
155 "prompt", 153 "prompt",
156 "depends", 154 "depends",
157 "mainmenu" 155 "bool",
156 "select",
157 "boolean",
158 "mainmenu",
159 "source"
158 }; 160 };
159#define kconf_id_strings ((const char *) &kconf_id_strings_contents) 161#define kconf_id_strings ((const char *) &kconf_id_strings_contents)
160#ifdef __GNUC__ 162#ifdef __GNUC__
@@ -168,11 +170,11 @@ kconf_id_lookup (register const char *str, register unsigned int len)
168{ 170{
169 enum 171 enum
170 { 172 {
171 TOTAL_KEYWORDS = 31, 173 TOTAL_KEYWORDS = 32,
172 MIN_WORD_LENGTH = 2, 174 MIN_WORD_LENGTH = 2,
173 MAX_WORD_LENGTH = 14, 175 MAX_WORD_LENGTH = 14,
174 MIN_HASH_VALUE = 2, 176 MIN_HASH_VALUE = 2,
175 MAX_HASH_VALUE = 48 177 MAX_HASH_VALUE = 51
176 }; 178 };
177 179
178 static struct kconf_id wordlist[] = 180 static struct kconf_id wordlist[] =
@@ -191,31 +193,35 @@ kconf_id_lookup (register const char *str, register unsigned int len)
191 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_UNKNOWN}, 193 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str12, T_DEFAULT, TF_COMMAND, S_UNKNOWN},
192 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN}, 194 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str13, T_DEFAULT, TF_COMMAND, S_BOOLEAN},
193 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_HELP, TF_COMMAND}, 195 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str14, T_HELP, TF_COMMAND},
194 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str15, T_TYPE, TF_COMMAND, S_BOOLEAN}, 196 {-1},
195 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str16, T_CONFIG, TF_COMMAND}, 197 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str16, T_CONFIG, TF_COMMAND},
196 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_DEFAULT, TF_COMMAND, S_TRISTATE}, 198 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str17, T_DEFAULT, TF_COMMAND, S_TRISTATE},
197 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_TYPE, TF_COMMAND, S_BOOLEAN}, 199 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str18, T_TYPE, TF_COMMAND, S_HEX},
198 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str19, T_OPT_DEFCONFIG_LIST,TF_OPTION}, 200 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str19, T_OPT_DEFCONFIG_LIST,TF_OPTION},
199 {-1}, 201 {-1}, {-1},
200 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str21, T_TYPE, TF_COMMAND, S_STRING},
201 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_IF, TF_COMMAND|TF_PARAM}, 202 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str22, T_IF, TF_COMMAND|TF_PARAM},
202 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_TYPE, TF_COMMAND, S_INT}, 203 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str23, T_TYPE, TF_COMMAND, S_INT},
203 {-1}, {-1}, 204 {-1}, {-1}, {-1},
204 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str26, T_SELECT, TF_COMMAND},
205 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_OPT_MODULES, TF_OPTION}, 205 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str27, T_OPT_MODULES, TF_OPTION},
206 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_TYPE, TF_COMMAND, S_TRISTATE}, 206 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str28, T_TYPE, TF_COMMAND, S_TRISTATE},
207 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29, T_MENU, TF_COMMAND}, 207 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str29, T_MENU, TF_COMMAND},
208 {-1}, 208 {-1}, {-1},
209 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str31, T_SOURCE, TF_COMMAND},
210 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND}, 209 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str32, T_COMMENT, TF_COMMAND},
211 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str33, T_TYPE, TF_COMMAND, S_HEX}, 210 {-1}, {-1},
212 {-1},
213 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35, T_MENUCONFIG, TF_COMMAND}, 211 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str35, T_MENUCONFIG, TF_COMMAND},
214 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36, T_PROMPT, TF_COMMAND}, 212 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str36, T_TYPE, TF_COMMAND, S_STRING},
215 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str37, T_DEPENDS, TF_COMMAND}, 213 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str37, T_VISIBLE, TF_COMMAND},
216 {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, {-1}, 214 {-1}, {-1}, {-1},
215 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str41, T_PROMPT, TF_COMMAND},
216 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str42, T_DEPENDS, TF_COMMAND},
217 {-1}, 217 {-1},
218 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str48, T_MAINMENU, TF_COMMAND} 218 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str44, T_TYPE, TF_COMMAND, S_BOOLEAN},
219 {-1},
220 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str46, T_SELECT, TF_COMMAND},
221 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str47, T_TYPE, TF_COMMAND, S_BOOLEAN},
222 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str48, T_MAINMENU, TF_COMMAND},
223 {-1}, {-1},
224 {(int)(long)&((struct kconf_id_strings_t *)0)->kconf_id_strings_str51, T_SOURCE, TF_COMMAND}
219 }; 225 };
220 226
221 if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH) 227 if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
diff --git a/scripts/kconfig/zconf.tab.c_shipped b/scripts/kconfig/zconf.tab.c_shipped
index 699d4b265186..4c5495ea205e 100644
--- a/scripts/kconfig/zconf.tab.c_shipped
+++ b/scripts/kconfig/zconf.tab.c_shipped
@@ -160,18 +160,19 @@ static struct menu *current_menu, *current_entry;
160 T_DEFAULT = 275, 160 T_DEFAULT = 275,
161 T_SELECT = 276, 161 T_SELECT = 276,
162 T_RANGE = 277, 162 T_RANGE = 277,
163 T_OPTION = 278, 163 T_VISIBLE = 278,
164 T_ON = 279, 164 T_OPTION = 279,
165 T_WORD = 280, 165 T_ON = 280,
166 T_WORD_QUOTE = 281, 166 T_WORD = 281,
167 T_UNEQUAL = 282, 167 T_WORD_QUOTE = 282,
168 T_CLOSE_PAREN = 283, 168 T_UNEQUAL = 283,
169 T_OPEN_PAREN = 284, 169 T_CLOSE_PAREN = 284,
170 T_EOL = 285, 170 T_OPEN_PAREN = 285,
171 T_OR = 286, 171 T_EOL = 286,
172 T_AND = 287, 172 T_OR = 287,
173 T_EQUAL = 288, 173 T_AND = 288,
174 T_NOT = 289 174 T_EQUAL = 289,
175 T_NOT = 290
175 }; 176 };
176#endif 177#endif
177 178
@@ -419,20 +420,20 @@ union yyalloc
419/* YYFINAL -- State number of the termination state. */ 420/* YYFINAL -- State number of the termination state. */
420#define YYFINAL 11 421#define YYFINAL 11
421/* YYLAST -- Last index in YYTABLE. */ 422/* YYLAST -- Last index in YYTABLE. */
422#define YYLAST 277 423#define YYLAST 290
423 424
424/* YYNTOKENS -- Number of terminals. */ 425/* YYNTOKENS -- Number of terminals. */
425#define YYNTOKENS 35 426#define YYNTOKENS 36
426/* YYNNTS -- Number of nonterminals. */ 427/* YYNNTS -- Number of nonterminals. */
427#define YYNNTS 48 428#define YYNNTS 50
428/* YYNRULES -- Number of rules. */ 429/* YYNRULES -- Number of rules. */
429#define YYNRULES 113 430#define YYNRULES 118
430/* YYNRULES -- Number of states. */ 431/* YYNRULES -- Number of states. */
431#define YYNSTATES 185 432#define YYNSTATES 191
432 433
433/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */ 434/* YYTRANSLATE(YYLEX) -- Bison symbol number corresponding to YYLEX. */
434#define YYUNDEFTOK 2 435#define YYUNDEFTOK 2
435#define YYMAXUTOK 289 436#define YYMAXUTOK 290
436 437
437#define YYTRANSLATE(YYX) \ 438#define YYTRANSLATE(YYX) \
438 ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK) 439 ((unsigned int) (YYX) <= YYMAXUTOK ? yytranslate[YYX] : YYUNDEFTOK)
@@ -468,7 +469,8 @@ static const yytype_uint8 yytranslate[] =
468 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 469 2, 2, 2, 2, 2, 2, 1, 2, 3, 4,
469 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 470 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
470 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 471 15, 16, 17, 18, 19, 20, 21, 22, 23, 24,
471 25, 26, 27, 28, 29, 30, 31, 32, 33, 34 472 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
473 35
472}; 474};
473 475
474#if YYDEBUG 476#if YYDEBUG
@@ -478,72 +480,73 @@ static const yytype_uint16 yyprhs[] =
478{ 480{
479 0, 0, 3, 6, 8, 11, 13, 14, 17, 20, 481 0, 0, 3, 6, 8, 11, 13, 14, 17, 20,
480 23, 26, 31, 36, 40, 42, 44, 46, 48, 50, 482 23, 26, 31, 36, 40, 42, 44, 46, 48, 50,
481 52, 54, 56, 58, 60, 62, 64, 66, 70, 73, 483 52, 54, 56, 58, 60, 62, 64, 66, 68, 72,
482 77, 80, 84, 87, 88, 91, 94, 97, 100, 103, 484 75, 79, 82, 86, 89, 90, 93, 96, 99, 102,
483 106, 110, 115, 120, 125, 131, 135, 136, 140, 141, 485 105, 108, 112, 117, 122, 127, 133, 137, 138, 142,
484 144, 148, 151, 153, 157, 158, 161, 164, 167, 170, 486 143, 146, 150, 153, 155, 159, 160, 163, 166, 169,
485 173, 178, 182, 185, 190, 191, 194, 198, 200, 204, 487 172, 175, 180, 184, 187, 192, 193, 196, 200, 202,
486 205, 208, 211, 214, 218, 222, 225, 227, 231, 232, 488 206, 207, 210, 213, 216, 220, 224, 228, 230, 234,
487 235, 238, 241, 245, 249, 252, 255, 258, 259, 262, 489 235, 238, 241, 244, 248, 252, 255, 258, 261, 262,
488 265, 268, 273, 274, 277, 279, 281, 284, 287, 290, 490 265, 268, 271, 276, 277, 280, 283, 286, 287, 290,
489 292, 295, 296, 299, 301, 305, 309, 313, 316, 320, 491 292, 294, 297, 300, 303, 305, 308, 309, 312, 314,
490 324, 326, 328, 329 492 318, 322, 326, 329, 333, 337, 339, 341, 342
491}; 493};
492 494
493/* YYRHS -- A `-1'-separated list of the rules' RHS. */ 495/* YYRHS -- A `-1'-separated list of the rules' RHS. */
494static const yytype_int8 yyrhs[] = 496static const yytype_int8 yyrhs[] =
495{ 497{
496 36, 0, -1, 78, 37, -1, 37, -1, 62, 38, 498 37, 0, -1, 81, 38, -1, 38, -1, 63, 39,
497 -1, 38, -1, -1, 38, 40, -1, 38, 54, -1, 499 -1, 39, -1, -1, 39, 41, -1, 39, 55, -1,
498 38, 66, -1, 38, 77, -1, 38, 25, 1, 30, 500 39, 67, -1, 39, 80, -1, 39, 26, 1, 31,
499 -1, 38, 39, 1, 30, -1, 38, 1, 30, -1, 501 -1, 39, 40, 1, 31, -1, 39, 1, 31, -1,
500 16, -1, 18, -1, 19, -1, 21, -1, 17, -1, 502 16, -1, 18, -1, 19, -1, 21, -1, 17, -1,
501 22, -1, 20, -1, 30, -1, 60, -1, 70, -1, 503 22, -1, 20, -1, 23, -1, 31, -1, 61, -1,
502 43, -1, 45, -1, 68, -1, 25, 1, 30, -1, 504 71, -1, 44, -1, 46, -1, 69, -1, 26, 1,
503 1, 30, -1, 10, 25, 30, -1, 42, 46, -1, 505 31, -1, 1, 31, -1, 10, 26, 31, -1, 43,
504 11, 25, 30, -1, 44, 46, -1, -1, 46, 47, 506 47, -1, 11, 26, 31, -1, 45, 47, -1, -1,
505 -1, 46, 48, -1, 46, 74, -1, 46, 72, -1, 507 47, 48, -1, 47, 49, -1, 47, 75, -1, 47,
506 46, 41, -1, 46, 30, -1, 19, 75, 30, -1, 508 73, -1, 47, 42, -1, 47, 31, -1, 19, 78,
507 18, 76, 79, 30, -1, 20, 80, 79, 30, -1, 509 31, -1, 18, 79, 82, 31, -1, 20, 83, 82,
508 21, 25, 79, 30, -1, 22, 81, 81, 79, 30, 510 31, -1, 21, 26, 82, 31, -1, 22, 84, 84,
509 -1, 23, 49, 30, -1, -1, 49, 25, 50, -1, 511 82, 31, -1, 24, 50, 31, -1, -1, 50, 26,
510 -1, 33, 76, -1, 7, 82, 30, -1, 51, 55, 512 51, -1, -1, 34, 79, -1, 7, 85, 31, -1,
511 -1, 77, -1, 52, 57, 53, -1, -1, 55, 56, 513 52, 56, -1, 80, -1, 53, 58, 54, -1, -1,
512 -1, 55, 74, -1, 55, 72, -1, 55, 30, -1, 514 56, 57, -1, 56, 75, -1, 56, 73, -1, 56,
513 55, 41, -1, 18, 76, 79, 30, -1, 19, 75, 515 31, -1, 56, 42, -1, 18, 79, 82, 31, -1,
514 30, -1, 17, 30, -1, 20, 25, 79, 30, -1, 516 19, 78, 31, -1, 17, 31, -1, 20, 26, 82,
515 -1, 57, 40, -1, 14, 80, 78, -1, 77, -1, 517 31, -1, -1, 58, 41, -1, 14, 83, 81, -1,
516 58, 61, 59, -1, -1, 61, 40, -1, 61, 66, 518 80, -1, 59, 62, 60, -1, -1, 62, 41, -1,
517 -1, 61, 54, -1, 3, 76, 78, -1, 4, 76, 519 62, 67, -1, 62, 55, -1, 3, 79, 81, -1,
518 30, -1, 63, 73, -1, 77, -1, 64, 67, 65, 520 4, 79, 31, -1, 64, 76, 74, -1, 80, -1,
519 -1, -1, 67, 40, -1, 67, 66, -1, 67, 54, 521 65, 68, 66, -1, -1, 68, 41, -1, 68, 67,
520 -1, 6, 76, 30, -1, 9, 76, 30, -1, 69, 522 -1, 68, 55, -1, 6, 79, 31, -1, 9, 79,
521 73, -1, 12, 30, -1, 71, 13, -1, -1, 73, 523 31, -1, 70, 74, -1, 12, 31, -1, 72, 13,
522 74, -1, 73, 30, -1, 73, 41, -1, 16, 24, 524 -1, -1, 74, 75, -1, 74, 31, -1, 74, 42,
523 80, 30, -1, -1, 76, 79, -1, 25, -1, 26, 525 -1, 16, 25, 83, 31, -1, -1, 76, 77, -1,
524 -1, 5, 30, -1, 8, 30, -1, 15, 30, -1, 526 76, 31, -1, 23, 82, -1, -1, 79, 82, -1,
525 30, -1, 78, 30, -1, -1, 14, 80, -1, 81, 527 26, -1, 27, -1, 5, 31, -1, 8, 31, -1,
526 -1, 81, 33, 81, -1, 81, 27, 81, -1, 29, 528 15, 31, -1, 31, -1, 81, 31, -1, -1, 14,
527 80, 28, -1, 34, 80, -1, 80, 31, 80, -1, 529 83, -1, 84, -1, 84, 34, 84, -1, 84, 28,
528 80, 32, 80, -1, 25, -1, 26, -1, -1, 25, 530 84, -1, 30, 83, 29, -1, 35, 83, -1, 83,
529 -1 531 32, 83, -1, 83, 33, 83, -1, 26, -1, 27,
532 -1, -1, 26, -1
530}; 533};
531 534
532/* YYRLINE[YYN] -- source line where rule number YYN was defined. */ 535/* YYRLINE[YYN] -- source line where rule number YYN was defined. */
533static const yytype_uint16 yyrline[] = 536static const yytype_uint16 yyrline[] =
534{ 537{
535 0, 107, 107, 107, 109, 109, 111, 113, 114, 115, 538 0, 108, 108, 108, 110, 110, 112, 114, 115, 116,
536 116, 117, 118, 122, 126, 126, 126, 126, 126, 126, 539 117, 118, 119, 123, 127, 127, 127, 127, 127, 127,
537 126, 130, 131, 132, 133, 134, 135, 139, 140, 146, 540 127, 127, 131, 132, 133, 134, 135, 136, 140, 141,
538 154, 160, 168, 178, 180, 181, 182, 183, 184, 185, 541 147, 155, 161, 169, 179, 181, 182, 183, 184, 185,
539 188, 196, 202, 212, 218, 224, 227, 229, 240, 241, 542 186, 189, 197, 203, 213, 219, 225, 228, 230, 241,
540 246, 255, 260, 268, 271, 273, 274, 275, 276, 277, 543 242, 247, 256, 261, 269, 272, 274, 275, 276, 277,
541 280, 286, 297, 303, 313, 315, 320, 328, 336, 339, 544 278, 281, 287, 298, 304, 314, 316, 321, 329, 337,
542 341, 342, 343, 348, 355, 362, 367, 375, 378, 380, 545 340, 342, 343, 344, 349, 356, 363, 368, 376, 379,
543 381, 382, 385, 393, 400, 407, 413, 420, 422, 423, 546 381, 382, 383, 386, 394, 401, 408, 414, 421, 423,
544 424, 427, 435, 437, 442, 443, 446, 447, 448, 452, 547 424, 425, 428, 436, 438, 439, 442, 449, 451, 456,
545 453, 456, 457, 460, 461, 462, 463, 464, 465, 466, 548 457, 460, 461, 462, 466, 467, 470, 471, 474, 475,
546 469, 470, 473, 474 549 476, 477, 478, 479, 480, 483, 484, 487, 488
547}; 550};
548#endif 551#endif
549 552
@@ -556,7 +559,7 @@ static const char *const yytname[] =
556 "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG", 559 "T_SOURCE", "T_CHOICE", "T_ENDCHOICE", "T_COMMENT", "T_CONFIG",
557 "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS", 560 "T_MENUCONFIG", "T_HELP", "T_HELPTEXT", "T_IF", "T_ENDIF", "T_DEPENDS",
558 "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", "T_SELECT", "T_RANGE", 561 "T_OPTIONAL", "T_PROMPT", "T_TYPE", "T_DEFAULT", "T_SELECT", "T_RANGE",
559 "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL", 562 "T_VISIBLE", "T_OPTION", "T_ON", "T_WORD", "T_WORD_QUOTE", "T_UNEQUAL",
560 "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL", 563 "T_CLOSE_PAREN", "T_OPEN_PAREN", "T_EOL", "T_OR", "T_AND", "T_EQUAL",
561 "T_NOT", "$accept", "input", "start", "stmt_list", "option_name", 564 "T_NOT", "$accept", "input", "start", "stmt_list", "option_name",
562 "common_stmt", "option_error", "config_entry_start", "config_stmt", 565 "common_stmt", "option_error", "config_entry_start", "config_stmt",
@@ -567,8 +570,8 @@ static const char *const yytname[] =
567 "if_entry", "if_end", "if_stmt", "if_block", "mainmenu_stmt", "menu", 570 "if_entry", "if_end", "if_stmt", "if_block", "mainmenu_stmt", "menu",
568 "menu_entry", "menu_end", "menu_stmt", "menu_block", "source_stmt", 571 "menu_entry", "menu_end", "menu_stmt", "menu_block", "source_stmt",
569 "comment", "comment_stmt", "help_start", "help", "depends_list", 572 "comment", "comment_stmt", "help_start", "help", "depends_list",
570 "depends", "prompt_stmt_opt", "prompt", "end", "nl", "if_expr", "expr", 573 "depends", "visibility_list", "visible", "prompt_stmt_opt", "prompt",
571 "symbol", "word_opt", 0 574 "end", "nl", "if_expr", "expr", "symbol", "word_opt", 0
572}; 575};
573#endif 576#endif
574 577
@@ -580,25 +583,25 @@ static const yytype_uint16 yytoknum[] =
580 0, 256, 257, 258, 259, 260, 261, 262, 263, 264, 583 0, 256, 257, 258, 259, 260, 261, 262, 263, 264,
581 265, 266, 267, 268, 269, 270, 271, 272, 273, 274, 584 265, 266, 267, 268, 269, 270, 271, 272, 273, 274,
582 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 585 275, 276, 277, 278, 279, 280, 281, 282, 283, 284,
583 285, 286, 287, 288, 289 586 285, 286, 287, 288, 289, 290
584}; 587};
585# endif 588# endif
586 589
587/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */ 590/* YYR1[YYN] -- Symbol number of symbol that rule YYN derives. */
588static const yytype_uint8 yyr1[] = 591static const yytype_uint8 yyr1[] =
589{ 592{
590 0, 35, 36, 36, 37, 37, 38, 38, 38, 38, 593 0, 36, 37, 37, 38, 38, 39, 39, 39, 39,
591 38, 38, 38, 38, 39, 39, 39, 39, 39, 39, 594 39, 39, 39, 39, 40, 40, 40, 40, 40, 40,
592 39, 40, 40, 40, 40, 40, 40, 41, 41, 42, 595 40, 40, 41, 41, 41, 41, 41, 41, 42, 42,
593 43, 44, 45, 46, 46, 46, 46, 46, 46, 46, 596 43, 44, 45, 46, 47, 47, 47, 47, 47, 47,
594 47, 47, 47, 47, 47, 48, 49, 49, 50, 50, 597 47, 48, 48, 48, 48, 48, 49, 50, 50, 51,
595 51, 52, 53, 54, 55, 55, 55, 55, 55, 55, 598 51, 52, 53, 54, 55, 56, 56, 56, 56, 56,
596 56, 56, 56, 56, 57, 57, 58, 59, 60, 61, 599 56, 57, 57, 57, 57, 58, 58, 59, 60, 61,
597 61, 61, 61, 62, 63, 64, 65, 66, 67, 67, 600 62, 62, 62, 62, 63, 64, 65, 66, 67, 68,
598 67, 67, 68, 69, 70, 71, 72, 73, 73, 73, 601 68, 68, 68, 69, 70, 71, 72, 73, 74, 74,
599 73, 74, 75, 75, 76, 76, 77, 77, 77, 78, 602 74, 74, 75, 76, 76, 76, 77, 78, 78, 79,
600 78, 79, 79, 80, 80, 80, 80, 80, 80, 80, 603 79, 80, 80, 80, 81, 81, 82, 82, 83, 83,
601 81, 81, 82, 82 604 83, 83, 83, 83, 83, 84, 84, 85, 85
602}; 605};
603 606
604/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */ 607/* YYR2[YYN] -- Number of symbols composing right hand side of rule YYN. */
@@ -606,16 +609,16 @@ static const yytype_uint8 yyr2[] =
606{ 609{
607 0, 2, 2, 1, 2, 1, 0, 2, 2, 2, 610 0, 2, 2, 1, 2, 1, 0, 2, 2, 2,
608 2, 4, 4, 3, 1, 1, 1, 1, 1, 1, 611 2, 4, 4, 3, 1, 1, 1, 1, 1, 1,
609 1, 1, 1, 1, 1, 1, 1, 3, 2, 3, 612 1, 1, 1, 1, 1, 1, 1, 1, 3, 2,
610 2, 3, 2, 0, 2, 2, 2, 2, 2, 2, 613 3, 2, 3, 2, 0, 2, 2, 2, 2, 2,
611 3, 4, 4, 4, 5, 3, 0, 3, 0, 2, 614 2, 3, 4, 4, 4, 5, 3, 0, 3, 0,
612 3, 2, 1, 3, 0, 2, 2, 2, 2, 2, 615 2, 3, 2, 1, 3, 0, 2, 2, 2, 2,
613 4, 3, 2, 4, 0, 2, 3, 1, 3, 0, 616 2, 4, 3, 2, 4, 0, 2, 3, 1, 3,
614 2, 2, 2, 3, 3, 2, 1, 3, 0, 2, 617 0, 2, 2, 2, 3, 3, 3, 1, 3, 0,
615 2, 2, 3, 3, 2, 2, 2, 0, 2, 2, 618 2, 2, 2, 3, 3, 2, 2, 2, 0, 2,
616 2, 4, 0, 2, 1, 1, 2, 2, 2, 1, 619 2, 2, 4, 0, 2, 2, 2, 0, 2, 1,
617 2, 0, 2, 1, 3, 3, 3, 2, 3, 3, 620 1, 2, 2, 2, 1, 2, 0, 2, 1, 3,
618 1, 1, 0, 1 621 3, 3, 2, 3, 3, 1, 1, 0, 1
619}; 622};
620 623
621/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state 624/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
@@ -623,165 +626,172 @@ static const yytype_uint8 yyr2[] =
623 means the default is an error. */ 626 means the default is an error. */
624static const yytype_uint8 yydefact[] = 627static const yytype_uint8 yydefact[] =
625{ 628{
626 6, 0, 99, 0, 3, 0, 6, 6, 94, 95, 629 6, 0, 104, 0, 3, 0, 6, 6, 99, 100,
627 0, 1, 0, 0, 0, 0, 112, 0, 0, 0, 630 0, 1, 0, 0, 0, 0, 117, 0, 0, 0,
628 0, 0, 0, 14, 18, 15, 16, 20, 17, 19, 631 0, 0, 0, 14, 18, 15, 16, 20, 17, 19,
629 0, 21, 0, 7, 33, 24, 33, 25, 54, 64, 632 21, 0, 22, 0, 7, 34, 25, 34, 26, 55,
630 8, 69, 22, 87, 78, 9, 26, 87, 23, 10, 633 65, 8, 70, 23, 93, 79, 9, 27, 88, 24,
631 0, 100, 2, 73, 13, 0, 96, 0, 113, 0, 634 10, 0, 105, 2, 74, 13, 0, 101, 0, 118,
632 97, 0, 0, 0, 110, 111, 0, 0, 0, 103, 635 0, 102, 0, 0, 0, 115, 116, 0, 0, 0,
633 98, 0, 0, 0, 0, 0, 0, 0, 0, 0, 636 108, 103, 0, 0, 0, 0, 0, 0, 0, 88,
634 0, 74, 82, 50, 83, 29, 31, 0, 107, 0, 637 0, 0, 75, 83, 51, 84, 30, 32, 0, 112,
635 0, 66, 0, 0, 11, 12, 0, 0, 0, 0, 638 0, 0, 67, 0, 0, 11, 12, 0, 0, 0,
636 92, 0, 0, 0, 46, 0, 39, 38, 34, 35, 639 0, 97, 0, 0, 0, 47, 0, 40, 39, 35,
637 0, 37, 36, 0, 0, 92, 0, 58, 59, 55, 640 36, 0, 38, 37, 0, 0, 97, 0, 59, 60,
638 57, 56, 65, 53, 52, 70, 72, 68, 71, 67, 641 56, 58, 57, 66, 54, 53, 71, 73, 69, 72,
639 89, 90, 88, 79, 81, 77, 80, 76, 106, 108, 642 68, 106, 95, 0, 94, 80, 82, 78, 81, 77,
640 109, 105, 104, 28, 85, 0, 101, 0, 101, 101, 643 90, 91, 89, 111, 113, 114, 110, 109, 29, 86,
641 101, 0, 0, 0, 86, 62, 101, 0, 101, 0, 644 0, 106, 0, 106, 106, 106, 0, 0, 0, 87,
642 0, 0, 40, 93, 0, 0, 101, 48, 45, 27, 645 63, 106, 0, 106, 0, 96, 0, 0, 41, 98,
643 0, 61, 0, 91, 102, 41, 42, 43, 0, 0, 646 0, 0, 106, 49, 46, 28, 0, 62, 0, 107,
644 47, 60, 63, 44, 49 647 92, 42, 43, 44, 0, 0, 48, 61, 64, 45,
648 50
645}; 649};
646 650
647/* YYDEFGOTO[NTERM-NUM]. */ 651/* YYDEFGOTO[NTERM-NUM]. */
648static const yytype_int16 yydefgoto[] = 652static const yytype_int16 yydefgoto[] =
649{ 653{
650 -1, 3, 4, 5, 32, 33, 107, 34, 35, 36, 654 -1, 3, 4, 5, 33, 34, 108, 35, 36, 37,
651 37, 73, 108, 109, 152, 180, 38, 39, 123, 40, 655 38, 74, 109, 110, 157, 186, 39, 40, 124, 41,
652 75, 119, 76, 41, 127, 42, 77, 6, 43, 44, 656 76, 120, 77, 42, 128, 43, 78, 6, 44, 45,
653 135, 45, 79, 46, 47, 48, 110, 111, 78, 112, 657 137, 46, 80, 47, 48, 49, 111, 112, 81, 113,
654 147, 148, 49, 7, 161, 68, 69, 59 658 79, 134, 152, 153, 50, 7, 165, 69, 70, 60
655}; 659};
656 660
657/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing 661/* YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing
658 STATE-NUM. */ 662 STATE-NUM. */
659#define YYPACT_NINF -89 663#define YYPACT_NINF -90
660static const yytype_int16 yypact[] = 664static const yytype_int16 yypact[] =
661{ 665{
662 3, 4, -89, 20, -89, 100, -89, 7, -89, -89, 666 4, 42, -90, 96, -90, 111, -90, 15, -90, -90,
663 -8, -89, 17, 4, 28, 4, 37, 36, 4, 68, 667 75, -90, 82, 42, 104, 42, 110, 107, 42, 115,
664 87, -18, 69, -89, -89, -89, -89, -89, -89, -89, 668 125, -4, 121, -90, -90, -90, -90, -90, -90, -90,
665 128, -89, 138, -89, -89, -89, -89, -89, -89, -89, 669 -90, 162, -90, 163, -90, -90, -90, -90, -90, -90,
666 -89, -89, -89, -89, -89, -89, -89, -89, -89, -89, 670 -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
667 127, -89, -89, 110, -89, 126, -89, 136, -89, 137, 671 -90, 139, -90, -90, 138, -90, 142, -90, 143, -90,
668 -89, 147, 150, 152, -89, -89, -18, -18, 171, -14, 672 152, -90, 164, 167, 168, -90, -90, -4, -4, 77,
669 -89, 153, 157, 34, 67, 180, 233, 220, 207, 220, 673 -18, -90, 177, 185, 33, 71, 195, 247, 236, -2,
670 154, -89, -89, -89, -89, -89, -89, 0, -89, -18, 674 236, 171, -90, -90, -90, -90, -90, -90, 41, -90,
671 -18, 110, 44, 44, -89, -89, 163, 174, 182, 4, 675 -4, -4, 138, 97, 97, -90, -90, 186, 187, 194,
672 4, -18, 194, 44, -89, 219, -89, -89, -89, -89, 676 42, 42, -4, 196, 97, -90, 219, -90, -90, -90,
673 223, -89, -89, 203, 4, 4, 215, -89, -89, -89, 677 -90, 210, -90, -90, 204, 42, 42, 199, -90, -90,
674 -89, -89, -89, -89, -89, -89, -89, -89, -89, -89, 678 -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
675 -89, -89, -89, -89, -89, -89, -89, -89, -89, 213, 679 -90, 222, -90, 223, -90, -90, -90, -90, -90, -90,
676 -89, -89, -89, -89, -89, -18, 232, 227, 232, -5, 680 -90, -90, -90, -90, 215, -90, -90, -90, -90, -90,
677 232, 44, 35, 234, -89, -89, 232, 235, 232, 224, 681 -4, 222, 228, 222, -5, 222, 97, 35, 229, -90,
678 -18, 236, -89, -89, 237, 238, 232, 216, -89, -89, 682 -90, 222, 232, 222, -4, -90, 135, 233, -90, -90,
679 240, -89, 241, -89, 71, -89, -89, -89, 242, 4, 683 234, 235, 222, 240, -90, -90, 237, -90, 239, -13,
680 -89, -89, -89, -89, -89 684 -90, -90, -90, -90, 244, 42, -90, -90, -90, -90,
685 -90
681}; 686};
682 687
683/* YYPGOTO[NTERM-NUM]. */ 688/* YYPGOTO[NTERM-NUM]. */
684static const yytype_int16 yypgoto[] = 689static const yytype_int16 yypgoto[] =
685{ 690{
686 -89, -89, 255, 267, -89, 47, -57, -89, -89, -89, 691 -90, -90, 269, 271, -90, 23, -70, -90, -90, -90,
687 -89, 239, -89, -89, -89, -89, -89, -89, -89, 130, 692 -90, 243, -90, -90, -90, -90, -90, -90, -90, -48,
688 -89, -89, -89, -89, -89, -89, -89, -89, -89, -89, 693 -90, -90, -90, -90, -90, -90, -90, -90, -90, -90,
689 -89, 181, -89, -89, -89, -89, -89, 199, 229, 16, 694 -90, -20, -90, -90, -90, -90, -90, 206, 205, -68,
690 162, -1, 74, -7, 103, -65, -88, -89 695 -90, -90, 169, -1, 27, -7, 118, -66, -89, -90
691}; 696};
692 697
693/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If 698/* YYTABLE[YYPACT[STATE-NUM]]. What to do in state STATE-NUM. If
694 positive, shift that token. If negative, reduce the rule which 699 positive, shift that token. If negative, reduce the rule which
695 number is the opposite. If zero, do what YYDEFACT says. 700 number is the opposite. If zero, do what YYDEFACT says.
696 If YYTABLE_NINF, syntax error. */ 701 If YYTABLE_NINF, syntax error. */
697#define YYTABLE_NINF -85 702#define YYTABLE_NINF -86
698static const yytype_int16 yytable[] = 703static const yytype_int16 yytable[] =
699{ 704{
700 10, 87, 88, 53, 141, 142, 1, 64, 65, 160, 705 10, 88, 89, 54, 146, 147, 119, 1, 122, 164,
701 1, 66, 55, 92, 57, 151, 67, 61, 118, 93, 706 93, 141, 56, 142, 58, 156, 94, 62, 1, 90,
702 11, 131, 2, 131, 139, 140, 89, 90, 138, 8, 707 91, 131, 65, 66, 144, 145, 67, 90, 91, 132,
703 9, 89, 90, 2, -30, 96, 149, 51, -30, -30, 708 127, 68, 136, -31, 97, 2, 154, -31, -31, -31,
704 -30, -30, -30, -30, -30, -30, 97, 54, -30, -30, 709 -31, -31, -31, -31, -31, 98, 52, -31, -31, 99,
705 98, -30, 99, 100, 101, 102, 103, 104, 56, 105, 710 -31, 100, 101, 102, 103, 104, -31, 105, 129, 106,
706 167, 91, 58, 166, 106, 168, 60, -32, 96, 64, 711 138, 173, 92, 141, 107, 142, 174, 172, 8, 9,
707 65, -32, -32, -32, -32, -32, -32, -32, -32, 97, 712 143, -33, 97, 90, 91, -33, -33, -33, -33, -33,
708 159, -32, -32, 98, -32, 99, 100, 101, 102, 103, 713 -33, -33, -33, 98, 166, -33, -33, 99, -33, 100,
709 104, 121, 105, 62, 132, 174, 132, 106, 146, 70, 714 101, 102, 103, 104, -33, 105, 11, 106, 179, 151,
710 -5, 12, 89, 90, 13, 14, 15, 16, 17, 18, 715 123, 126, 107, 135, 125, 130, 2, 139, 2, 90,
711 19, 20, 63, 156, 21, 22, 23, 24, 25, 26, 716 91, -5, 12, 55, 161, 13, 14, 15, 16, 17,
712 27, 28, 29, 122, 125, 30, 133, -4, 12, 71, 717 18, 19, 20, 65, 66, 21, 22, 23, 24, 25,
713 31, 13, 14, 15, 16, 17, 18, 19, 20, 72, 718 26, 27, 28, 29, 30, 57, 59, 31, 61, -4,
714 51, 21, 22, 23, 24, 25, 26, 27, 28, 29, 719 12, 63, 32, 13, 14, 15, 16, 17, 18, 19,
715 124, 129, 30, 137, -84, 96, 81, 31, -84, -84, 720 20, 64, 71, 21, 22, 23, 24, 25, 26, 27,
716 -84, -84, -84, -84, -84, -84, 82, 83, -84, -84, 721 28, 29, 30, 72, 73, 31, 180, 90, 91, 52,
717 98, -84, -84, -84, -84, -84, -84, 84, 184, 105, 722 32, -85, 97, 82, 83, -85, -85, -85, -85, -85,
718 85, 96, 86, 94, 130, -51, -51, 95, -51, -51, 723 -85, -85, -85, 84, 190, -85, -85, 99, -85, -85,
719 -51, -51, 97, 143, -51, -51, 98, 113, 114, 115, 724 -85, -85, -85, -85, -85, 85, 97, 106, 86, 87,
720 116, 2, 89, 90, 144, 105, 145, 126, 96, 134, 725 -52, -52, 140, -52, -52, -52, -52, 98, 95, -52,
721 117, -75, -75, -75, -75, -75, -75, -75, -75, 150, 726 -52, 99, 114, 115, 116, 117, 96, 148, 149, 150,
722 153, -75, -75, 98, 13, 14, 15, 16, 17, 18, 727 158, 106, 155, 159, 97, 163, 118, -76, -76, -76,
723 19, 20, 105, 155, 21, 22, 154, 130, 14, 15, 728 -76, -76, -76, -76, -76, 160, 164, -76, -76, 99,
724 158, 17, 18, 19, 20, 90, 160, 21, 22, 179, 729 13, 14, 15, 16, 17, 18, 19, 20, 91, 106,
725 31, 163, 164, 165, 173, 89, 90, 162, 128, 170, 730 21, 22, 14, 15, 140, 17, 18, 19, 20, 168,
726 136, 172, 52, 31, 169, 171, 175, 176, 177, 178, 731 175, 21, 22, 177, 181, 182, 183, 32, 187, 167,
727 181, 182, 183, 50, 120, 74, 80, 157 732 188, 169, 170, 171, 185, 189, 53, 51, 32, 176,
733 75, 178, 121, 0, 133, 162, 0, 0, 0, 0,
734 184
728}; 735};
729 736
730static const yytype_uint8 yycheck[] = 737static const yytype_int16 yycheck[] =
731{ 738{
732 1, 66, 67, 10, 92, 93, 3, 25, 26, 14, 739 1, 67, 68, 10, 93, 94, 76, 3, 76, 14,
733 3, 29, 13, 27, 15, 103, 34, 18, 75, 33, 740 28, 81, 13, 81, 15, 104, 34, 18, 3, 32,
734 0, 78, 30, 80, 89, 90, 31, 32, 28, 25, 741 33, 23, 26, 27, 90, 91, 30, 32, 33, 31,
735 26, 31, 32, 30, 0, 1, 101, 30, 4, 5, 742 78, 35, 80, 0, 1, 31, 102, 4, 5, 6,
736 6, 7, 8, 9, 10, 11, 12, 30, 14, 15, 743 7, 8, 9, 10, 11, 12, 31, 14, 15, 16,
737 16, 17, 18, 19, 20, 21, 22, 23, 30, 25, 744 17, 18, 19, 20, 21, 22, 23, 24, 78, 26,
738 25, 68, 25, 151, 30, 30, 30, 0, 1, 25, 745 80, 26, 69, 133, 31, 133, 31, 156, 26, 27,
739 26, 4, 5, 6, 7, 8, 9, 10, 11, 12, 746 29, 0, 1, 32, 33, 4, 5, 6, 7, 8,
740 145, 14, 15, 16, 17, 18, 19, 20, 21, 22, 747 9, 10, 11, 12, 150, 14, 15, 16, 17, 18,
741 23, 75, 25, 25, 78, 160, 80, 30, 99, 30, 748 19, 20, 21, 22, 23, 24, 0, 26, 164, 100,
742 0, 1, 31, 32, 4, 5, 6, 7, 8, 9, 749 77, 78, 31, 80, 77, 78, 31, 80, 31, 32,
743 10, 11, 25, 114, 14, 15, 16, 17, 18, 19, 750 33, 0, 1, 31, 115, 4, 5, 6, 7, 8,
744 20, 21, 22, 76, 77, 25, 79, 0, 1, 1, 751 9, 10, 11, 26, 27, 14, 15, 16, 17, 18,
745 30, 4, 5, 6, 7, 8, 9, 10, 11, 1, 752 19, 20, 21, 22, 23, 31, 26, 26, 31, 0,
746 30, 14, 15, 16, 17, 18, 19, 20, 21, 22, 753 1, 26, 31, 4, 5, 6, 7, 8, 9, 10,
747 76, 77, 25, 79, 0, 1, 30, 30, 4, 5, 754 11, 26, 31, 14, 15, 16, 17, 18, 19, 20,
748 6, 7, 8, 9, 10, 11, 30, 30, 14, 15, 755 21, 22, 23, 1, 1, 26, 31, 32, 33, 31,
749 16, 17, 18, 19, 20, 21, 22, 30, 179, 25, 756 31, 0, 1, 31, 31, 4, 5, 6, 7, 8,
750 30, 1, 30, 30, 30, 5, 6, 30, 8, 9, 757 9, 10, 11, 31, 185, 14, 15, 16, 17, 18,
751 10, 11, 12, 30, 14, 15, 16, 17, 18, 19, 758 19, 20, 21, 22, 23, 31, 1, 26, 31, 31,
752 20, 30, 31, 32, 30, 25, 24, 77, 1, 79, 759 5, 6, 31, 8, 9, 10, 11, 12, 31, 14,
753 30, 4, 5, 6, 7, 8, 9, 10, 11, 25, 760 15, 16, 17, 18, 19, 20, 31, 31, 31, 25,
754 1, 14, 15, 16, 4, 5, 6, 7, 8, 9, 761 1, 26, 26, 13, 1, 26, 31, 4, 5, 6,
755 10, 11, 25, 30, 14, 15, 13, 30, 5, 6, 762 7, 8, 9, 10, 11, 31, 14, 14, 15, 16,
756 25, 8, 9, 10, 11, 32, 14, 14, 15, 33, 763 4, 5, 6, 7, 8, 9, 10, 11, 33, 26,
757 30, 148, 149, 150, 30, 31, 32, 30, 77, 156, 764 14, 15, 5, 6, 31, 8, 9, 10, 11, 31,
758 79, 158, 7, 30, 30, 30, 30, 30, 30, 166, 765 31, 14, 15, 31, 31, 31, 31, 31, 31, 151,
759 30, 30, 30, 6, 75, 36, 47, 115 766 31, 153, 154, 155, 34, 31, 7, 6, 31, 161,
767 37, 163, 76, -1, 79, 116, -1, -1, -1, -1,
768 172
760}; 769};
761 770
762/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing 771/* YYSTOS[STATE-NUM] -- The (internal number of the) accessing
763 symbol of state STATE-NUM. */ 772 symbol of state STATE-NUM. */
764static const yytype_uint8 yystos[] = 773static const yytype_uint8 yystos[] =
765{ 774{
766 0, 3, 30, 36, 37, 38, 62, 78, 25, 26, 775 0, 3, 31, 37, 38, 39, 63, 81, 26, 27,
767 76, 0, 1, 4, 5, 6, 7, 8, 9, 10, 776 79, 0, 1, 4, 5, 6, 7, 8, 9, 10,
768 11, 14, 15, 16, 17, 18, 19, 20, 21, 22, 777 11, 14, 15, 16, 17, 18, 19, 20, 21, 22,
769 25, 30, 39, 40, 42, 43, 44, 45, 51, 52, 778 23, 26, 31, 40, 41, 43, 44, 45, 46, 52,
770 54, 58, 60, 63, 64, 66, 68, 69, 70, 77, 779 53, 55, 59, 61, 64, 65, 67, 69, 70, 71,
771 38, 30, 37, 78, 30, 76, 30, 76, 25, 82, 780 80, 39, 31, 38, 81, 31, 79, 31, 79, 26,
772 30, 76, 25, 25, 25, 26, 29, 34, 80, 81, 781 85, 31, 79, 26, 26, 26, 27, 30, 35, 83,
773 30, 1, 1, 46, 46, 55, 57, 61, 73, 67, 782 84, 31, 1, 1, 47, 47, 56, 58, 62, 76,
774 73, 30, 30, 30, 30, 30, 30, 80, 80, 31, 783 68, 74, 31, 31, 31, 31, 31, 31, 83, 83,
775 32, 78, 27, 33, 30, 30, 1, 12, 16, 18, 784 32, 33, 81, 28, 34, 31, 31, 1, 12, 16,
776 19, 20, 21, 22, 23, 25, 30, 41, 47, 48, 785 18, 19, 20, 21, 22, 24, 26, 31, 42, 48,
777 71, 72, 74, 17, 18, 19, 20, 30, 41, 56, 786 49, 72, 73, 75, 17, 18, 19, 20, 31, 42,
778 72, 74, 40, 53, 77, 40, 54, 59, 66, 77, 787 57, 73, 75, 41, 54, 80, 41, 55, 60, 67,
779 30, 41, 74, 40, 54, 65, 66, 77, 28, 80, 788 80, 23, 31, 74, 77, 41, 55, 66, 67, 80,
780 80, 81, 81, 30, 30, 24, 76, 75, 76, 80, 789 31, 42, 75, 29, 83, 83, 84, 84, 31, 31,
781 25, 81, 49, 1, 13, 30, 76, 75, 25, 80, 790 25, 79, 78, 79, 83, 26, 84, 50, 1, 13,
782 14, 79, 30, 79, 79, 79, 81, 25, 30, 30, 791 31, 79, 78, 26, 14, 82, 83, 82, 31, 82,
783 79, 30, 79, 30, 80, 30, 30, 30, 79, 33, 792 82, 82, 84, 26, 31, 31, 82, 31, 82, 83,
784 50, 30, 30, 30, 76 793 31, 31, 31, 31, 82, 34, 51, 31, 31, 31,
794 79
785}; 795};
786 796
787#define yyerrok (yyerrstatus = 0) 797#define yyerrok (yyerrstatus = 0)
@@ -1292,7 +1302,7 @@ yydestruct (yymsg, yytype, yyvaluep)
1292 1302
1293 switch (yytype) 1303 switch (yytype)
1294 { 1304 {
1295 case 52: /* "choice_entry" */ 1305 case 53: /* "choice_entry" */
1296 1306
1297 { 1307 {
1298 fprintf(stderr, "%s:%d: missing end statement for this entry\n", 1308 fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1302,7 +1312,7 @@ yydestruct (yymsg, yytype, yyvaluep)
1302}; 1312};
1303 1313
1304 break; 1314 break;
1305 case 58: /* "if_entry" */ 1315 case 59: /* "if_entry" */
1306 1316
1307 { 1317 {
1308 fprintf(stderr, "%s:%d: missing end statement for this entry\n", 1318 fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1312,7 +1322,7 @@ yydestruct (yymsg, yytype, yyvaluep)
1312}; 1322};
1313 1323
1314 break; 1324 break;
1315 case 64: /* "menu_entry" */ 1325 case 65: /* "menu_entry" */
1316 1326
1317 { 1327 {
1318 fprintf(stderr, "%s:%d: missing end statement for this entry\n", 1328 fprintf(stderr, "%s:%d: missing end statement for this entry\n",
@@ -1644,17 +1654,17 @@ yyreduce:
1644 { zconf_error("invalid statement"); ;} 1654 { zconf_error("invalid statement"); ;}
1645 break; 1655 break;
1646 1656
1647 case 27: 1657 case 28:
1648 1658
1649 { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); ;} 1659 { zconf_error("unknown option \"%s\"", (yyvsp[(1) - (3)].string)); ;}
1650 break; 1660 break;
1651 1661
1652 case 28: 1662 case 29:
1653 1663
1654 { zconf_error("invalid option"); ;} 1664 { zconf_error("invalid option"); ;}
1655 break; 1665 break;
1656 1666
1657 case 29: 1667 case 30:
1658 1668
1659 { 1669 {
1660 struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0); 1670 struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0);
@@ -1664,7 +1674,7 @@ yyreduce:
1664;} 1674;}
1665 break; 1675 break;
1666 1676
1667 case 30: 1677 case 31:
1668 1678
1669 { 1679 {
1670 menu_end_entry(); 1680 menu_end_entry();
@@ -1672,7 +1682,7 @@ yyreduce:
1672;} 1682;}
1673 break; 1683 break;
1674 1684
1675 case 31: 1685 case 32:
1676 1686
1677 { 1687 {
1678 struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0); 1688 struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), 0);
@@ -1682,7 +1692,7 @@ yyreduce:
1682;} 1692;}
1683 break; 1693 break;
1684 1694
1685 case 32: 1695 case 33:
1686 1696
1687 { 1697 {
1688 if (current_entry->prompt) 1698 if (current_entry->prompt)
@@ -1694,7 +1704,7 @@ yyreduce:
1694;} 1704;}
1695 break; 1705 break;
1696 1706
1697 case 40: 1707 case 41:
1698 1708
1699 { 1709 {
1700 menu_set_type((yyvsp[(1) - (3)].id)->stype); 1710 menu_set_type((yyvsp[(1) - (3)].id)->stype);
@@ -1704,7 +1714,7 @@ yyreduce:
1704;} 1714;}
1705 break; 1715 break;
1706 1716
1707 case 41: 1717 case 42:
1708 1718
1709 { 1719 {
1710 menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr)); 1720 menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
@@ -1712,7 +1722,7 @@ yyreduce:
1712;} 1722;}
1713 break; 1723 break;
1714 1724
1715 case 42: 1725 case 43:
1716 1726
1717 { 1727 {
1718 menu_add_expr(P_DEFAULT, (yyvsp[(2) - (4)].expr), (yyvsp[(3) - (4)].expr)); 1728 menu_add_expr(P_DEFAULT, (yyvsp[(2) - (4)].expr), (yyvsp[(3) - (4)].expr));
@@ -1724,7 +1734,7 @@ yyreduce:
1724;} 1734;}
1725 break; 1735 break;
1726 1736
1727 case 43: 1737 case 44:
1728 1738
1729 { 1739 {
1730 menu_add_symbol(P_SELECT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr)); 1740 menu_add_symbol(P_SELECT, sym_lookup((yyvsp[(2) - (4)].string), 0), (yyvsp[(3) - (4)].expr));
@@ -1732,7 +1742,7 @@ yyreduce:
1732;} 1742;}
1733 break; 1743 break;
1734 1744
1735 case 44: 1745 case 45:
1736 1746
1737 { 1747 {
1738 menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[(2) - (5)].symbol), (yyvsp[(3) - (5)].symbol)), (yyvsp[(4) - (5)].expr)); 1748 menu_add_expr(P_RANGE, expr_alloc_comp(E_RANGE,(yyvsp[(2) - (5)].symbol), (yyvsp[(3) - (5)].symbol)), (yyvsp[(4) - (5)].expr));
@@ -1740,7 +1750,7 @@ yyreduce:
1740;} 1750;}
1741 break; 1751 break;
1742 1752
1743 case 47: 1753 case 48:
1744 1754
1745 { 1755 {
1746 struct kconf_id *id = kconf_id_lookup((yyvsp[(2) - (3)].string), strlen((yyvsp[(2) - (3)].string))); 1756 struct kconf_id *id = kconf_id_lookup((yyvsp[(2) - (3)].string), strlen((yyvsp[(2) - (3)].string)));
@@ -1752,17 +1762,17 @@ yyreduce:
1752;} 1762;}
1753 break; 1763 break;
1754 1764
1755 case 48: 1765 case 49:
1756 1766
1757 { (yyval.string) = NULL; ;} 1767 { (yyval.string) = NULL; ;}
1758 break; 1768 break;
1759 1769
1760 case 49: 1770 case 50:
1761 1771
1762 { (yyval.string) = (yyvsp[(2) - (2)].string); ;} 1772 { (yyval.string) = (yyvsp[(2) - (2)].string); ;}
1763 break; 1773 break;
1764 1774
1765 case 50: 1775 case 51:
1766 1776
1767 { 1777 {
1768 struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), SYMBOL_CHOICE); 1778 struct symbol *sym = sym_lookup((yyvsp[(2) - (3)].string), SYMBOL_CHOICE);
@@ -1773,14 +1783,14 @@ yyreduce:
1773;} 1783;}
1774 break; 1784 break;
1775 1785
1776 case 51: 1786 case 52:
1777 1787
1778 { 1788 {
1779 (yyval.menu) = menu_add_menu(); 1789 (yyval.menu) = menu_add_menu();
1780;} 1790;}
1781 break; 1791 break;
1782 1792
1783 case 52: 1793 case 53:
1784 1794
1785 { 1795 {
1786 if (zconf_endtoken((yyvsp[(1) - (1)].id), T_CHOICE, T_ENDCHOICE)) { 1796 if (zconf_endtoken((yyvsp[(1) - (1)].id), T_CHOICE, T_ENDCHOICE)) {
@@ -1790,7 +1800,7 @@ yyreduce:
1790;} 1800;}
1791 break; 1801 break;
1792 1802
1793 case 60: 1803 case 61:
1794 1804
1795 { 1805 {
1796 menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr)); 1806 menu_add_prompt(P_PROMPT, (yyvsp[(2) - (4)].string), (yyvsp[(3) - (4)].expr));
@@ -1798,7 +1808,7 @@ yyreduce:
1798;} 1808;}
1799 break; 1809 break;
1800 1810
1801 case 61: 1811 case 62:
1802 1812
1803 { 1813 {
1804 if ((yyvsp[(1) - (3)].id)->stype == S_BOOLEAN || (yyvsp[(1) - (3)].id)->stype == S_TRISTATE) { 1814 if ((yyvsp[(1) - (3)].id)->stype == S_BOOLEAN || (yyvsp[(1) - (3)].id)->stype == S_TRISTATE) {
@@ -1811,7 +1821,7 @@ yyreduce:
1811;} 1821;}
1812 break; 1822 break;
1813 1823
1814 case 62: 1824 case 63:
1815 1825
1816 { 1826 {
1817 current_entry->sym->flags |= SYMBOL_OPTIONAL; 1827 current_entry->sym->flags |= SYMBOL_OPTIONAL;
@@ -1819,7 +1829,7 @@ yyreduce:
1819;} 1829;}
1820 break; 1830 break;
1821 1831
1822 case 63: 1832 case 64:
1823 1833
1824 { 1834 {
1825 if ((yyvsp[(1) - (4)].id)->stype == S_UNKNOWN) { 1835 if ((yyvsp[(1) - (4)].id)->stype == S_UNKNOWN) {
@@ -1831,7 +1841,7 @@ yyreduce:
1831;} 1841;}
1832 break; 1842 break;
1833 1843
1834 case 66: 1844 case 67:
1835 1845
1836 { 1846 {
1837 printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno()); 1847 printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
@@ -1841,7 +1851,7 @@ yyreduce:
1841;} 1851;}
1842 break; 1852 break;
1843 1853
1844 case 67: 1854 case 68:
1845 1855
1846 { 1856 {
1847 if (zconf_endtoken((yyvsp[(1) - (1)].id), T_IF, T_ENDIF)) { 1857 if (zconf_endtoken((yyvsp[(1) - (1)].id), T_IF, T_ENDIF)) {
@@ -1851,14 +1861,14 @@ yyreduce:
1851;} 1861;}
1852 break; 1862 break;
1853 1863
1854 case 73: 1864 case 74:
1855 1865
1856 { 1866 {
1857 menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL); 1867 menu_add_prompt(P_MENU, (yyvsp[(2) - (3)].string), NULL);
1858;} 1868;}
1859 break; 1869 break;
1860 1870
1861 case 74: 1871 case 75:
1862 1872
1863 { 1873 {
1864 menu_add_entry(NULL); 1874 menu_add_entry(NULL);
@@ -1867,14 +1877,14 @@ yyreduce:
1867;} 1877;}
1868 break; 1878 break;
1869 1879
1870 case 75: 1880 case 76:
1871 1881
1872 { 1882 {
1873 (yyval.menu) = menu_add_menu(); 1883 (yyval.menu) = menu_add_menu();
1874;} 1884;}
1875 break; 1885 break;
1876 1886
1877 case 76: 1887 case 77:
1878 1888
1879 { 1889 {
1880 if (zconf_endtoken((yyvsp[(1) - (1)].id), T_MENU, T_ENDMENU)) { 1890 if (zconf_endtoken((yyvsp[(1) - (1)].id), T_MENU, T_ENDMENU)) {
@@ -1884,7 +1894,7 @@ yyreduce:
1884;} 1894;}
1885 break; 1895 break;
1886 1896
1887 case 82: 1897 case 83:
1888 1898
1889 { 1899 {
1890 printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string)); 1900 printd(DEBUG_PARSE, "%s:%d:source %s\n", zconf_curname(), zconf_lineno(), (yyvsp[(2) - (3)].string));
@@ -1892,7 +1902,7 @@ yyreduce:
1892;} 1902;}
1893 break; 1903 break;
1894 1904
1895 case 83: 1905 case 84:
1896 1906
1897 { 1907 {
1898 menu_add_entry(NULL); 1908 menu_add_entry(NULL);
@@ -1901,14 +1911,14 @@ yyreduce:
1901;} 1911;}
1902 break; 1912 break;
1903 1913
1904 case 84: 1914 case 85:
1905 1915
1906 { 1916 {
1907 menu_end_entry(); 1917 menu_end_entry();
1908;} 1918;}
1909 break; 1919 break;
1910 1920
1911 case 85: 1921 case 86:
1912 1922
1913 { 1923 {
1914 printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno()); 1924 printd(DEBUG_PARSE, "%s:%d:help\n", zconf_curname(), zconf_lineno());
@@ -1916,14 +1926,14 @@ yyreduce:
1916;} 1926;}
1917 break; 1927 break;
1918 1928
1919 case 86: 1929 case 87:
1920 1930
1921 { 1931 {
1922 current_entry->help = (yyvsp[(2) - (2)].string); 1932 current_entry->help = (yyvsp[(2) - (2)].string);
1923;} 1933;}
1924 break; 1934 break;
1925 1935
1926 case 91: 1936 case 92:
1927 1937
1928 { 1938 {
1929 menu_add_dep((yyvsp[(3) - (4)].expr)); 1939 menu_add_dep((yyvsp[(3) - (4)].expr));
@@ -1931,84 +1941,91 @@ yyreduce:
1931;} 1941;}
1932 break; 1942 break;
1933 1943
1934 case 93: 1944 case 96:
1945
1946 {
1947 menu_add_visibility((yyvsp[(2) - (2)].expr));
1948;}
1949 break;
1950
1951 case 98:
1935 1952
1936 { 1953 {
1937 menu_add_prompt(P_PROMPT, (yyvsp[(1) - (2)].string), (yyvsp[(2) - (2)].expr)); 1954 menu_add_prompt(P_PROMPT, (yyvsp[(1) - (2)].string), (yyvsp[(2) - (2)].expr));
1938;} 1955;}
1939 break; 1956 break;
1940 1957
1941 case 96: 1958 case 101:
1942 1959
1943 { (yyval.id) = (yyvsp[(1) - (2)].id); ;} 1960 { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
1944 break; 1961 break;
1945 1962
1946 case 97: 1963 case 102:
1947 1964
1948 { (yyval.id) = (yyvsp[(1) - (2)].id); ;} 1965 { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
1949 break; 1966 break;
1950 1967
1951 case 98: 1968 case 103:
1952 1969
1953 { (yyval.id) = (yyvsp[(1) - (2)].id); ;} 1970 { (yyval.id) = (yyvsp[(1) - (2)].id); ;}
1954 break; 1971 break;
1955 1972
1956 case 101: 1973 case 106:
1957 1974
1958 { (yyval.expr) = NULL; ;} 1975 { (yyval.expr) = NULL; ;}
1959 break; 1976 break;
1960 1977
1961 case 102: 1978 case 107:
1962 1979
1963 { (yyval.expr) = (yyvsp[(2) - (2)].expr); ;} 1980 { (yyval.expr) = (yyvsp[(2) - (2)].expr); ;}
1964 break; 1981 break;
1965 1982
1966 case 103: 1983 case 108:
1967 1984
1968 { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); ;} 1985 { (yyval.expr) = expr_alloc_symbol((yyvsp[(1) - (1)].symbol)); ;}
1969 break; 1986 break;
1970 1987
1971 case 104: 1988 case 109:
1972 1989
1973 { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;} 1990 { (yyval.expr) = expr_alloc_comp(E_EQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;}
1974 break; 1991 break;
1975 1992
1976 case 105: 1993 case 110:
1977 1994
1978 { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;} 1995 { (yyval.expr) = expr_alloc_comp(E_UNEQUAL, (yyvsp[(1) - (3)].symbol), (yyvsp[(3) - (3)].symbol)); ;}
1979 break; 1996 break;
1980 1997
1981 case 106: 1998 case 111:
1982 1999
1983 { (yyval.expr) = (yyvsp[(2) - (3)].expr); ;} 2000 { (yyval.expr) = (yyvsp[(2) - (3)].expr); ;}
1984 break; 2001 break;
1985 2002
1986 case 107: 2003 case 112:
1987 2004
1988 { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); ;} 2005 { (yyval.expr) = expr_alloc_one(E_NOT, (yyvsp[(2) - (2)].expr)); ;}
1989 break; 2006 break;
1990 2007
1991 case 108: 2008 case 113:
1992 2009
1993 { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;} 2010 { (yyval.expr) = expr_alloc_two(E_OR, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;}
1994 break; 2011 break;
1995 2012
1996 case 109: 2013 case 114:
1997 2014
1998 { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;} 2015 { (yyval.expr) = expr_alloc_two(E_AND, (yyvsp[(1) - (3)].expr), (yyvsp[(3) - (3)].expr)); ;}
1999 break; 2016 break;
2000 2017
2001 case 110: 2018 case 115:
2002 2019
2003 { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); ;} 2020 { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), 0); free((yyvsp[(1) - (1)].string)); ;}
2004 break; 2021 break;
2005 2022
2006 case 111: 2023 case 116:
2007 2024
2008 { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); ;} 2025 { (yyval.symbol) = sym_lookup((yyvsp[(1) - (1)].string), SYMBOL_CONST); free((yyvsp[(1) - (1)].string)); ;}
2009 break; 2026 break;
2010 2027
2011 case 112: 2028 case 117:
2012 2029
2013 { (yyval.string) = NULL; ;} 2030 { (yyval.string) = NULL; ;}
2014 break; 2031 break;
@@ -2278,6 +2295,7 @@ static const char *zconf_tokenname(int token)
2278 case T_IF: return "if"; 2295 case T_IF: return "if";
2279 case T_ENDIF: return "endif"; 2296 case T_ENDIF: return "endif";
2280 case T_DEPENDS: return "depends"; 2297 case T_DEPENDS: return "depends";
2298 case T_VISIBLE: return "visible";
2281 } 2299 }
2282 return "<token>"; 2300 return "<token>";
2283} 2301}
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 2abd3c7ff15d..49fb4ab664c3 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -36,7 +36,7 @@ static struct menu *current_menu, *current_entry;
36#define YYERROR_VERBOSE 36#define YYERROR_VERBOSE
37#endif 37#endif
38%} 38%}
39%expect 28 39%expect 30
40 40
41%union 41%union
42{ 42{
@@ -68,6 +68,7 @@ static struct menu *current_menu, *current_entry;
68%token <id>T_DEFAULT 68%token <id>T_DEFAULT
69%token <id>T_SELECT 69%token <id>T_SELECT
70%token <id>T_RANGE 70%token <id>T_RANGE
71%token <id>T_VISIBLE
71%token <id>T_OPTION 72%token <id>T_OPTION
72%token <id>T_ON 73%token <id>T_ON
73%token <string> T_WORD 74%token <string> T_WORD
@@ -123,7 +124,7 @@ stmt_list:
123; 124;
124 125
125option_name: 126option_name:
126 T_DEPENDS | T_PROMPT | T_TYPE | T_SELECT | T_OPTIONAL | T_RANGE | T_DEFAULT 127 T_DEPENDS | T_PROMPT | T_TYPE | T_SELECT | T_OPTIONAL | T_RANGE | T_DEFAULT | T_VISIBLE
127; 128;
128 129
129common_stmt: 130common_stmt:
@@ -359,7 +360,7 @@ menu: T_MENU prompt T_EOL
359 printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno()); 360 printd(DEBUG_PARSE, "%s:%d:menu\n", zconf_curname(), zconf_lineno());
360}; 361};
361 362
362menu_entry: menu depends_list 363menu_entry: menu visibility_list depends_list
363{ 364{
364 $$ = menu_add_menu(); 365 $$ = menu_add_menu();
365}; 366};
@@ -430,6 +431,19 @@ depends: T_DEPENDS T_ON expr T_EOL
430 printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno()); 431 printd(DEBUG_PARSE, "%s:%d:depends on\n", zconf_curname(), zconf_lineno());
431}; 432};
432 433
434/* visibility option */
435
436visibility_list:
437 /* empty */
438 | visibility_list visible
439 | visibility_list T_EOL
440;
441
442visible: T_VISIBLE if_expr
443{
444 menu_add_visibility($2);
445};
446
433/* prompt statement */ 447/* prompt statement */
434 448
435prompt_stmt_opt: 449prompt_stmt_opt:
@@ -526,6 +540,7 @@ static const char *zconf_tokenname(int token)
526 case T_IF: return "if"; 540 case T_IF: return "if";
527 case T_ENDIF: return "endif"; 541 case T_ENDIF: return "endif";
528 case T_DEPENDS: return "depends"; 542 case T_DEPENDS: return "depends";
543 case T_VISIBLE: return "visible";
529 } 544 }
530 return "<token>"; 545 return "<token>";
531} 546}
diff --git a/sound/atmel/abdac.c b/sound/atmel/abdac.c
index f2f41c854221..6e2409181895 100644
--- a/sound/atmel/abdac.c
+++ b/sound/atmel/abdac.c
@@ -420,9 +420,9 @@ static int __devinit atmel_abdac_probe(struct platform_device *pdev)
420 return PTR_ERR(pclk); 420 return PTR_ERR(pclk);
421 } 421 }
422 sample_clk = clk_get(&pdev->dev, "sample_clk"); 422 sample_clk = clk_get(&pdev->dev, "sample_clk");
423 if (IS_ERR(pclk)) { 423 if (IS_ERR(sample_clk)) {
424 dev_dbg(&pdev->dev, "no sample clock\n"); 424 dev_dbg(&pdev->dev, "no sample clock\n");
425 retval = PTR_ERR(pclk); 425 retval = PTR_ERR(sample_clk);
426 goto out_put_pclk; 426 goto out_put_pclk;
427 } 427 }
428 clk_enable(pclk); 428 clk_enable(pclk);
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index 5c8c7dff8ede..b753ec661fcf 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1510,16 +1510,19 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
1510static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file) 1510static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
1511{ 1511{
1512 struct snd_pcm_substream *substream; 1512 struct snd_pcm_substream *substream;
1513 struct snd_pcm_runtime *runtime;
1514 int i;
1513 1515
1514 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_PLAYBACK]; 1516 for (i = 0; i < 2; i++) {
1515 if (substream != NULL) { 1517 substream = pcm_oss_file->streams[i];
1516 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); 1518 if (!substream)
1517 substream->runtime->oss.prepare = 1; 1519 continue;
1518 } 1520 runtime = substream->runtime;
1519 substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
1520 if (substream != NULL) {
1521 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL); 1521 snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
1522 substream->runtime->oss.prepare = 1; 1522 runtime->oss.prepare = 1;
1523 runtime->oss.buffer_used = 0;
1524 runtime->oss.prev_hw_ptr_period = 0;
1525 runtime->oss.period_ptr = 0;
1523 } 1526 }
1524 return 0; 1527 return 0;
1525} 1528}
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index a1707cca9c66..b75db8e9cc0f 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -223,7 +223,7 @@ static void xrun_log(struct snd_pcm_substream *substream,
223 entry->jiffies = jiffies; 223 entry->jiffies = jiffies;
224 entry->pos = pos; 224 entry->pos = pos;
225 entry->period_size = runtime->period_size; 225 entry->period_size = runtime->period_size;
226 entry->buffer_size = runtime->buffer_size;; 226 entry->buffer_size = runtime->buffer_size;
227 entry->old_hw_ptr = runtime->status->hw_ptr; 227 entry->old_hw_ptr = runtime->status->hw_ptr;
228 entry->hw_ptr_base = runtime->hw_ptr_base; 228 entry->hw_ptr_base = runtime->hw_ptr_base;
229 log->idx = (log->idx + 1) % XRUN_LOG_CNT; 229 log->idx = (log->idx + 1) % XRUN_LOG_CNT;
diff --git a/sound/oss/dev_table.c b/sound/oss/dev_table.c
index 727bdb9ba2dc..d8cf3e58dc76 100644
--- a/sound/oss/dev_table.c
+++ b/sound/oss/dev_table.c
@@ -71,7 +71,7 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
71 if (sound_nblocks >= MAX_MEM_BLOCKS) 71 if (sound_nblocks >= MAX_MEM_BLOCKS)
72 sound_nblocks = MAX_MEM_BLOCKS - 1; 72 sound_nblocks = MAX_MEM_BLOCKS - 1;
73 73
74 op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct audio_operations))); 74 op = (struct audio_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct audio_operations)));
75 sound_nblocks++; 75 sound_nblocks++;
76 if (sound_nblocks >= MAX_MEM_BLOCKS) 76 if (sound_nblocks >= MAX_MEM_BLOCKS)
77 sound_nblocks = MAX_MEM_BLOCKS - 1; 77 sound_nblocks = MAX_MEM_BLOCKS - 1;
@@ -81,7 +81,6 @@ int sound_install_audiodrv(int vers, char *name, struct audio_driver *driver,
81 sound_unload_audiodev(num); 81 sound_unload_audiodev(num);
82 return -(ENOMEM); 82 return -(ENOMEM);
83 } 83 }
84 memset((char *) op, 0, sizeof(struct audio_operations));
85 init_waitqueue_head(&op->in_sleeper); 84 init_waitqueue_head(&op->in_sleeper);
86 init_waitqueue_head(&op->out_sleeper); 85 init_waitqueue_head(&op->out_sleeper);
87 init_waitqueue_head(&op->poll_sleeper); 86 init_waitqueue_head(&op->poll_sleeper);
@@ -128,7 +127,7 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver,
128 /* FIXME: This leaks a mixer_operations struct every time its called 127 /* FIXME: This leaks a mixer_operations struct every time its called
129 until you unload sound! */ 128 until you unload sound! */
130 129
131 op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vmalloc(sizeof(struct mixer_operations))); 130 op = (struct mixer_operations *) (sound_mem_blocks[sound_nblocks] = vzalloc(sizeof(struct mixer_operations)));
132 sound_nblocks++; 131 sound_nblocks++;
133 if (sound_nblocks >= MAX_MEM_BLOCKS) 132 if (sound_nblocks >= MAX_MEM_BLOCKS)
134 sound_nblocks = MAX_MEM_BLOCKS - 1; 133 sound_nblocks = MAX_MEM_BLOCKS - 1;
@@ -137,7 +136,6 @@ int sound_install_mixer(int vers, char *name, struct mixer_operations *driver,
137 printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name); 136 printk(KERN_ERR "Sound: Can't allocate mixer driver for (%s)\n", name);
138 return -ENOMEM; 137 return -ENOMEM;
139 } 138 }
140 memset((char *) op, 0, sizeof(struct mixer_operations));
141 memcpy((char *) op, (char *) driver, driver_size); 139 memcpy((char *) op, (char *) driver, driver_size);
142 140
143 strlcpy(op->name, name, sizeof(op->name)); 141 strlcpy(op->name, name, sizeof(op->name));
diff --git a/sound/oss/midibuf.c b/sound/oss/midibuf.c
index 782b3b84dac6..ceedb1eff203 100644
--- a/sound/oss/midibuf.c
+++ b/sound/oss/midibuf.c
@@ -178,7 +178,7 @@ int MIDIbuf_open(int dev, struct file *file)
178 return err; 178 return err;
179 179
180 parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT; 180 parms[dev].prech_timeout = MAX_SCHEDULE_TIMEOUT;
181 midi_in_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); 181 midi_in_buf[dev] = vmalloc(sizeof(struct midi_buf));
182 182
183 if (midi_in_buf[dev] == NULL) 183 if (midi_in_buf[dev] == NULL)
184 { 184 {
@@ -188,7 +188,7 @@ int MIDIbuf_open(int dev, struct file *file)
188 } 188 }
189 midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; 189 midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0;
190 190
191 midi_out_buf[dev] = (struct midi_buf *) vmalloc(sizeof(struct midi_buf)); 191 midi_out_buf[dev] = vmalloc(sizeof(struct midi_buf));
192 192
193 if (midi_out_buf[dev] == NULL) 193 if (midi_out_buf[dev] == NULL)
194 { 194 {
diff --git a/sound/oss/pss.c b/sound/oss/pss.c
index e19dd5dcc2de..9b800ce5100e 100644
--- a/sound/oss/pss.c
+++ b/sound/oss/pss.c
@@ -859,7 +859,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg,
859 return 0; 859 return 0;
860 860
861 case SNDCTL_COPR_LOAD: 861 case SNDCTL_COPR_LOAD:
862 buf = (copr_buffer *) vmalloc(sizeof(copr_buffer)); 862 buf = vmalloc(sizeof(copr_buffer));
863 if (buf == NULL) 863 if (buf == NULL)
864 return -ENOSPC; 864 return -ENOSPC;
865 if (copy_from_user(buf, arg, sizeof(copr_buffer))) { 865 if (copy_from_user(buf, arg, sizeof(copr_buffer))) {
@@ -871,7 +871,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg,
871 return err; 871 return err;
872 872
873 case SNDCTL_COPR_SENDMSG: 873 case SNDCTL_COPR_SENDMSG:
874 mbuf = (copr_msg *)vmalloc(sizeof(copr_msg)); 874 mbuf = vmalloc(sizeof(copr_msg));
875 if (mbuf == NULL) 875 if (mbuf == NULL)
876 return -ENOSPC; 876 return -ENOSPC;
877 if (copy_from_user(mbuf, arg, sizeof(copr_msg))) { 877 if (copy_from_user(mbuf, arg, sizeof(copr_msg))) {
@@ -895,7 +895,7 @@ static int pss_coproc_ioctl(void *dev_info, unsigned int cmd, void __user *arg,
895 895
896 case SNDCTL_COPR_RCVMSG: 896 case SNDCTL_COPR_RCVMSG:
897 err = 0; 897 err = 0;
898 mbuf = (copr_msg *)vmalloc(sizeof(copr_msg)); 898 mbuf = vmalloc(sizeof(copr_msg));
899 if (mbuf == NULL) 899 if (mbuf == NULL)
900 return -ENOSPC; 900 return -ENOSPC;
901 data = (unsigned short *)mbuf->data; 901 data = (unsigned short *)mbuf->data;
diff --git a/sound/oss/sequencer.c b/sound/oss/sequencer.c
index e85789e53816..5ea1098ac427 100644
--- a/sound/oss/sequencer.c
+++ b/sound/oss/sequencer.c
@@ -1646,13 +1646,13 @@ void sequencer_init(void)
1646{ 1646{
1647 if (sequencer_ok) 1647 if (sequencer_ok)
1648 return; 1648 return;
1649 queue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * EV_SZ); 1649 queue = vmalloc(SEQ_MAX_QUEUE * EV_SZ);
1650 if (queue == NULL) 1650 if (queue == NULL)
1651 { 1651 {
1652 printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n"); 1652 printk(KERN_ERR "sequencer: Can't allocate memory for sequencer output queue\n");
1653 return; 1653 return;
1654 } 1654 }
1655 iqueue = (unsigned char *)vmalloc(SEQ_MAX_QUEUE * IEV_SZ); 1655 iqueue = vmalloc(SEQ_MAX_QUEUE * IEV_SZ);
1656 if (iqueue == NULL) 1656 if (iqueue == NULL)
1657 { 1657 {
1658 printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n"); 1658 printk(KERN_ERR "sequencer: Can't allocate memory for sequencer input queue\n");
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index 62895a719fcb..22dbd91811a4 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -435,7 +435,7 @@ void __devexit asihpi_adapter_remove(struct pci_dev *pci_dev)
435 struct hpi_message hm; 435 struct hpi_message hm;
436 struct hpi_response hr; 436 struct hpi_response hr;
437 struct hpi_adapter *pa; 437 struct hpi_adapter *pa;
438 pa = (struct hpi_adapter *)pci_get_drvdata(pci_dev); 438 pa = pci_get_drvdata(pci_dev);
439 439
440 hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM, 440 hpi_init_message_response(&hm, &hr, HPI_OBJ_SUBSYSTEM,
441 HPI_SUBSYS_DELETE_ADAPTER); 441 HPI_SUBSYS_DELETE_ADAPTER);
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index 4679ed83a43b..2f3cacbd5528 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1129,10 +1129,11 @@ snd_azf3328_codec_setdmaa(struct snd_azf3328 *chip,
1129 1129
1130 count_areas = size/2; 1130 count_areas = size/2;
1131 addr_area2 = addr+count_areas; 1131 addr_area2 = addr+count_areas;
1132 count_areas--; /* max. index */
1133 snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n", 1132 snd_azf3328_dbgcodec("setdma: buffers %08lx[%u] / %08lx[%u]\n",
1134 addr, count_areas, addr_area2, count_areas); 1133 addr, count_areas, addr_area2, count_areas);
1135 1134
1135 count_areas--; /* max. index */
1136
1136 /* build combined I/O buffer length word */ 1137 /* build combined I/O buffer length word */
1137 lengths = (count_areas << 16) | (count_areas); 1138 lengths = (count_areas << 16) | (count_areas);
1138 spin_lock_irqsave(&chip->reg_lock, flags); 1139 spin_lock_irqsave(&chip->reg_lock, flags);
@@ -1740,11 +1741,15 @@ static const struct snd_pcm_hardware snd_azf3328_hardware =
1740 .rate_max = AZF_FREQ_66200, 1741 .rate_max = AZF_FREQ_66200,
1741 .channels_min = 1, 1742 .channels_min = 1,
1742 .channels_max = 2, 1743 .channels_max = 2,
1743 .buffer_bytes_max = 65536, 1744 .buffer_bytes_max = (64*1024),
1744 .period_bytes_min = 64, 1745 .period_bytes_min = 1024,
1745 .period_bytes_max = 65536, 1746 .period_bytes_max = (32*1024),
1746 .periods_min = 1, 1747 /* We simply have two DMA areas (instead of a list of descriptors
1747 .periods_max = 1024, 1748 such as other cards); I believe that this is a fixed hardware
1749 attribute and there isn't much driver magic to be done to expand it.
1750 Thus indicate that we have at least and at most 2 periods. */
1751 .periods_min = 2,
1752 .periods_max = 2,
1748 /* FIXME: maybe that card actually has a FIFO? 1753 /* FIXME: maybe that card actually has a FIFO?
1749 * Hmm, it seems newer revisions do have one, but we still don't know 1754 * Hmm, it seems newer revisions do have one, but we still don't know
1750 * its size... */ 1755 * its size... */
@@ -1980,8 +1985,13 @@ snd_azf3328_timer_stop(struct snd_timer *timer)
1980 chip = snd_timer_chip(timer); 1985 chip = snd_timer_chip(timer);
1981 spin_lock_irqsave(&chip->reg_lock, flags); 1986 spin_lock_irqsave(&chip->reg_lock, flags);
1982 /* disable timer countdown and interrupt */ 1987 /* disable timer countdown and interrupt */
1983 /* FIXME: should we write TIMER_IRQ_ACK here? */ 1988 /* Hmm, should we write TIMER_IRQ_ACK here?
1984 snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0); 1989 YES indeed, otherwise a rogue timer operation - which prompts
1990 ALSA(?) to call repeated stop() in vain, but NOT start() -
1991 will never end (value 0x03 is kept shown in control byte).
1992 Simply manually poking 0x04 _once_ immediately successfully stops
1993 the hardware/ALSA interrupt activity. */
1994 snd_azf3328_ctrl_outb(chip, IDX_IO_TIMER_VALUE + 3, 0x04);
1985 spin_unlock_irqrestore(&chip->reg_lock, flags); 1995 spin_unlock_irqrestore(&chip->reg_lock, flags);
1986 snd_azf3328_dbgcallleave(); 1996 snd_azf3328_dbgcallleave();
1987 return 0; 1997 return 0;
diff --git a/sound/pci/ctxfi/ctpcm.c b/sound/pci/ctxfi/ctpcm.c
index 85ab43e89212..457d21189b0d 100644
--- a/sound/pci/ctxfi/ctpcm.c
+++ b/sound/pci/ctxfi/ctpcm.c
@@ -129,8 +129,6 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream)
129 129
130 apcm->substream = substream; 130 apcm->substream = substream;
131 apcm->interrupt = ct_atc_pcm_interrupt; 131 apcm->interrupt = ct_atc_pcm_interrupt;
132 runtime->private_data = apcm;
133 runtime->private_free = ct_atc_pcm_free_substream;
134 if (IEC958 == substream->pcm->device) { 132 if (IEC958 == substream->pcm->device) {
135 runtime->hw = ct_spdif_passthru_playback_hw; 133 runtime->hw = ct_spdif_passthru_playback_hw;
136 atc->spdif_out_passthru(atc, 1); 134 atc->spdif_out_passthru(atc, 1);
@@ -155,8 +153,12 @@ static int ct_pcm_playback_open(struct snd_pcm_substream *substream)
155 } 153 }
156 154
157 apcm->timer = ct_timer_instance_new(atc->timer, apcm); 155 apcm->timer = ct_timer_instance_new(atc->timer, apcm);
158 if (!apcm->timer) 156 if (!apcm->timer) {
157 kfree(apcm);
159 return -ENOMEM; 158 return -ENOMEM;
159 }
160 runtime->private_data = apcm;
161 runtime->private_free = ct_atc_pcm_free_substream;
160 162
161 return 0; 163 return 0;
162} 164}
@@ -278,8 +280,6 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream)
278 apcm->started = 0; 280 apcm->started = 0;
279 apcm->substream = substream; 281 apcm->substream = substream;
280 apcm->interrupt = ct_atc_pcm_interrupt; 282 apcm->interrupt = ct_atc_pcm_interrupt;
281 runtime->private_data = apcm;
282 runtime->private_free = ct_atc_pcm_free_substream;
283 runtime->hw = ct_pcm_capture_hw; 283 runtime->hw = ct_pcm_capture_hw;
284 runtime->hw.rate_max = atc->rsr * atc->msr; 284 runtime->hw.rate_max = atc->rsr * atc->msr;
285 285
@@ -298,8 +298,12 @@ static int ct_pcm_capture_open(struct snd_pcm_substream *substream)
298 } 298 }
299 299
300 apcm->timer = ct_timer_instance_new(atc->timer, apcm); 300 apcm->timer = ct_timer_instance_new(atc->timer, apcm);
301 if (!apcm->timer) 301 if (!apcm->timer) {
302 kfree(apcm);
302 return -ENOMEM; 303 return -ENOMEM;
304 }
305 runtime->private_data = apcm;
306 runtime->private_free = ct_atc_pcm_free_substream;
303 307
304 return 0; 308 return 0;
305} 309}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 6361f752b5f3..846d1ead47fd 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3100,6 +3100,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3100 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), 3100 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
3101 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 3101 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
3102 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), 3102 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
3103 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_HP_LAPTOP),
3103 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD), 3104 SND_PCI_QUIRK(0x1179, 0xff1e, "Toshiba Satellite C650D", CXT5066_IDEAPAD),
3104 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5), 3105 SND_PCI_QUIRK(0x1179, 0xff50, "Toshiba Satellite P500-PSPGSC-01800T", CXT5066_OLPC_XO_1_5),
3105 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5), 3106 SND_PCI_QUIRK(0x1179, 0xffe0, "Toshiba Satellite Pro T130-15F", CXT5066_OLPC_XO_1_5),
@@ -3110,6 +3111,7 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3110 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD), 3111 SND_PCI_QUIRK(0x17aa, 0x21b2, "Thinkpad X100e", CXT5066_IDEAPAD),
3111 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD), 3112 SND_PCI_QUIRK(0x17aa, 0x21b3, "Thinkpad Edge 13 (197)", CXT5066_IDEAPAD),
3112 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD), 3113 SND_PCI_QUIRK(0x17aa, 0x21b4, "Thinkpad Edge", CXT5066_IDEAPAD),
3114 SND_PCI_QUIRK(0x17aa, 0x21c8, "Thinkpad Edge 11", CXT5066_IDEAPAD),
3113 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), 3115 SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
3114 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD), 3116 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G series", CXT5066_IDEAPAD),
3115 SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD), 3117 SND_PCI_QUIRK(0x17aa, 0x390a, "Lenovo S10-3t", CXT5066_IDEAPAD),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 5f00589cb791..8fddc9d08726 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1614,6 +1614,7 @@ do_sku:
1614 spec->init_amp = ALC_INIT_GPIO3; 1614 spec->init_amp = ALC_INIT_GPIO3;
1615 break; 1615 break;
1616 case 5: 1616 case 5:
1617 default:
1617 spec->init_amp = ALC_INIT_DEFAULT; 1618 spec->init_amp = ALC_INIT_DEFAULT;
1618 break; 1619 break;
1619 } 1620 }
@@ -2014,6 +2015,36 @@ static struct hda_verb alc888_acer_aspire_6530g_verbs[] = {
2014}; 2015};
2015 2016
2016/* 2017/*
2018 *ALC888 Acer Aspire 7730G model
2019 */
2020
2021static struct hda_verb alc888_acer_aspire_7730G_verbs[] = {
2022/* Bias voltage on for external mic port */
2023 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN | PIN_VREF80},
2024/* Front Mic: set to PIN_IN (empty by default) */
2025 {0x12, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_IN},
2026/* Unselect Front Mic by default in input mixer 3 */
2027 {0x22, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0xb)},
2028/* Enable unsolicited event for HP jack */
2029 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
2030/* Enable speaker output */
2031 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
2032 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
2033 {0x14, AC_VERB_SET_EAPD_BTLENABLE, 2},
2034/* Enable headphone output */
2035 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT | PIN_HP},
2036 {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
2037 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
2038 {0x15, AC_VERB_SET_EAPD_BTLENABLE, 2},
2039/*Enable internal subwoofer */
2040 {0x17, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
2041 {0x17, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
2042 {0x17, AC_VERB_SET_CONNECT_SEL, 0x02},
2043 {0x17, AC_VERB_SET_EAPD_BTLENABLE, 2},
2044 { }
2045};
2046
2047/*
2017 * ALC889 Acer Aspire 8930G model 2048 * ALC889 Acer Aspire 8930G model
2018 */ 2049 */
2019 2050
@@ -2200,6 +2231,16 @@ static void alc888_acer_aspire_6530g_setup(struct hda_codec *codec)
2200 spec->autocfg.speaker_pins[2] = 0x17; 2231 spec->autocfg.speaker_pins[2] = 0x17;
2201} 2232}
2202 2233
2234static void alc888_acer_aspire_7730g_setup(struct hda_codec *codec)
2235{
2236 struct alc_spec *spec = codec->spec;
2237
2238 spec->autocfg.hp_pins[0] = 0x15;
2239 spec->autocfg.speaker_pins[0] = 0x14;
2240 spec->autocfg.speaker_pins[1] = 0x16;
2241 spec->autocfg.speaker_pins[2] = 0x17;
2242}
2243
2203static void alc889_acer_aspire_8930g_setup(struct hda_codec *codec) 2244static void alc889_acer_aspire_8930g_setup(struct hda_codec *codec)
2204{ 2245{
2205 struct alc_spec *spec = codec->spec; 2246 struct alc_spec *spec = codec->spec;
@@ -9524,13 +9565,6 @@ static struct hda_verb alc883_acer_eapd_verbs[] = {
9524 { } 9565 { }
9525}; 9566};
9526 9567
9527static struct hda_verb alc888_acer_aspire_7730G_verbs[] = {
9528 {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
9529 {0x17, AC_VERB_SET_CONNECT_SEL, 0x02},
9530 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
9531 { } /* end */
9532};
9533
9534static void alc888_6st_dell_setup(struct hda_codec *codec) 9568static void alc888_6st_dell_setup(struct hda_codec *codec)
9535{ 9569{
9536 struct alc_spec *spec = codec->spec; 9570 struct alc_spec *spec = codec->spec;
@@ -9831,7 +9865,6 @@ static struct snd_pci_quirk alc882_cfg_tbl[] = {
9831 SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763), 9865 SND_PCI_QUIRK(0x17aa, 0x3bfc, "Lenovo NB0763", ALC883_LENOVO_NB0763),
9832 SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763), 9866 SND_PCI_QUIRK(0x17aa, 0x3bfd, "Lenovo NB0763", ALC883_LENOVO_NB0763),
9833 SND_PCI_QUIRK(0x17aa, 0x101d, "Lenovo Sky", ALC888_LENOVO_SKY), 9867 SND_PCI_QUIRK(0x17aa, 0x101d, "Lenovo Sky", ALC888_LENOVO_SKY),
9834 SND_PCI_QUIRK(0x17c0, 0x4071, "MEDION MD2", ALC883_MEDION_MD2),
9835 SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG), 9868 SND_PCI_QUIRK(0x17c0, 0x4085, "MEDION MD96630", ALC888_LENOVO_MS7195_DIG),
9836 SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG), 9869 SND_PCI_QUIRK(0x17f2, 0x5000, "Albatron KI690-AM2", ALC883_6ST_DIG),
9837 SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66), 9870 SND_PCI_QUIRK(0x1991, 0x5625, "Haier W66", ALC883_HAIER_W66),
@@ -10328,7 +10361,7 @@ static struct alc_config_preset alc882_presets[] = {
10328 .const_channel_count = 6, 10361 .const_channel_count = 6,
10329 .input_mux = &alc883_capture_source, 10362 .input_mux = &alc883_capture_source,
10330 .unsol_event = alc_automute_amp_unsol_event, 10363 .unsol_event = alc_automute_amp_unsol_event,
10331 .setup = alc888_acer_aspire_6530g_setup, 10364 .setup = alc888_acer_aspire_7730g_setup,
10332 .init_hook = alc_automute_amp, 10365 .init_hook = alc_automute_amp,
10333 }, 10366 },
10334 [ALC883_MEDION] = { 10367 [ALC883_MEDION] = {
@@ -14623,7 +14656,10 @@ static int alc275_setup_dual_adc(struct hda_codec *codec)
14623/* different alc269-variants */ 14656/* different alc269-variants */
14624enum { 14657enum {
14625 ALC269_TYPE_NORMAL, 14658 ALC269_TYPE_NORMAL,
14659 ALC269_TYPE_ALC258,
14626 ALC269_TYPE_ALC259, 14660 ALC269_TYPE_ALC259,
14661 ALC269_TYPE_ALC269VB,
14662 ALC269_TYPE_ALC270,
14627 ALC269_TYPE_ALC271X, 14663 ALC269_TYPE_ALC271X,
14628}; 14664};
14629 14665
@@ -15023,7 +15059,7 @@ static int alc269_fill_coef(struct hda_codec *codec)
15023static int patch_alc269(struct hda_codec *codec) 15059static int patch_alc269(struct hda_codec *codec)
15024{ 15060{
15025 struct alc_spec *spec; 15061 struct alc_spec *spec;
15026 int board_config; 15062 int board_config, coef;
15027 int err; 15063 int err;
15028 15064
15029 spec = kzalloc(sizeof(*spec), GFP_KERNEL); 15065 spec = kzalloc(sizeof(*spec), GFP_KERNEL);
@@ -15034,14 +15070,23 @@ static int patch_alc269(struct hda_codec *codec)
15034 15070
15035 alc_auto_parse_customize_define(codec); 15071 alc_auto_parse_customize_define(codec);
15036 15072
15037 if ((alc_read_coef_idx(codec, 0) & 0x00f0) == 0x0010){ 15073 coef = alc_read_coef_idx(codec, 0);
15074 if ((coef & 0x00f0) == 0x0010) {
15038 if (codec->bus->pci->subsystem_vendor == 0x1025 && 15075 if (codec->bus->pci->subsystem_vendor == 0x1025 &&
15039 spec->cdefine.platform_type == 1) { 15076 spec->cdefine.platform_type == 1) {
15040 alc_codec_rename(codec, "ALC271X"); 15077 alc_codec_rename(codec, "ALC271X");
15041 spec->codec_variant = ALC269_TYPE_ALC271X; 15078 spec->codec_variant = ALC269_TYPE_ALC271X;
15042 } else { 15079 } else if ((coef & 0xf000) == 0x1000) {
15080 spec->codec_variant = ALC269_TYPE_ALC270;
15081 } else if ((coef & 0xf000) == 0x2000) {
15043 alc_codec_rename(codec, "ALC259"); 15082 alc_codec_rename(codec, "ALC259");
15044 spec->codec_variant = ALC269_TYPE_ALC259; 15083 spec->codec_variant = ALC269_TYPE_ALC259;
15084 } else if ((coef & 0xf000) == 0x3000) {
15085 alc_codec_rename(codec, "ALC258");
15086 spec->codec_variant = ALC269_TYPE_ALC258;
15087 } else {
15088 alc_codec_rename(codec, "ALC269VB");
15089 spec->codec_variant = ALC269_TYPE_ALC269VB;
15045 } 15090 }
15046 } else 15091 } else
15047 alc_fix_pll_init(codec, 0x20, 0x04, 15); 15092 alc_fix_pll_init(codec, 0x20, 0x04, 15);
@@ -15104,7 +15149,7 @@ static int patch_alc269(struct hda_codec *codec)
15104 spec->stream_digital_capture = &alc269_pcm_digital_capture; 15149 spec->stream_digital_capture = &alc269_pcm_digital_capture;
15105 15150
15106 if (!spec->adc_nids) { /* wasn't filled automatically? use default */ 15151 if (!spec->adc_nids) { /* wasn't filled automatically? use default */
15107 if (spec->codec_variant != ALC269_TYPE_NORMAL) { 15152 if (spec->codec_variant == ALC269_TYPE_NORMAL) {
15108 spec->adc_nids = alc269_adc_nids; 15153 spec->adc_nids = alc269_adc_nids;
15109 spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids); 15154 spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids);
15110 spec->capsrc_nids = alc269_capsrc_nids; 15155 spec->capsrc_nids = alc269_capsrc_nids;
@@ -16898,7 +16943,7 @@ static struct alc_config_preset alc861vd_presets[] = {
16898static int alc861vd_auto_create_input_ctls(struct hda_codec *codec, 16943static int alc861vd_auto_create_input_ctls(struct hda_codec *codec,
16899 const struct auto_pin_cfg *cfg) 16944 const struct auto_pin_cfg *cfg)
16900{ 16945{
16901 return alc_auto_create_input_ctls(codec, cfg, 0x15, 0x09, 0); 16946 return alc_auto_create_input_ctls(codec, cfg, 0x0b, 0x22, 0);
16902} 16947}
16903 16948
16904 16949
@@ -18952,6 +18997,8 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
18952 return 0x02; 18997 return 0x02;
18953 else if (nid >= 0x0c && nid <= 0x0e) 18998 else if (nid >= 0x0c && nid <= 0x0e)
18954 return nid - 0x0c + 0x02; 18999 return nid - 0x0c + 0x02;
19000 else if (nid == 0x26) /* ALC887-VD has this DAC too */
19001 return 0x25;
18955 else 19002 else
18956 return 0; 19003 return 0;
18957} 19004}
@@ -18960,7 +19007,7 @@ static inline hda_nid_t alc662_mix_to_dac(hda_nid_t nid)
18960static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin, 19007static hda_nid_t alc662_dac_to_mix(struct hda_codec *codec, hda_nid_t pin,
18961 hda_nid_t dac) 19008 hda_nid_t dac)
18962{ 19009{
18963 hda_nid_t mix[4]; 19010 hda_nid_t mix[5];
18964 int i, num; 19011 int i, num;
18965 19012
18966 num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix)); 19013 num = snd_hda_get_connections(codec, pin, mix, ARRAY_SIZE(mix));
@@ -19298,6 +19345,7 @@ static const struct alc_fixup alc662_fixups[] = {
19298 19345
19299static struct snd_pci_quirk alc662_fixup_tbl[] = { 19346static struct snd_pci_quirk alc662_fixup_tbl[] = {
19300 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE), 19347 SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
19348 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
19301 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), 19349 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
19302 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), 19350 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
19303 {} 19351 {}
@@ -19419,7 +19467,10 @@ static int patch_alc888(struct hda_codec *codec)
19419{ 19467{
19420 if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){ 19468 if ((alc_read_coef_idx(codec, 0) & 0x00f0)==0x0030){
19421 kfree(codec->chip_name); 19469 kfree(codec->chip_name);
19422 codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL); 19470 if (codec->vendor_id == 0x10ec0887)
19471 codec->chip_name = kstrdup("ALC887-VD", GFP_KERNEL);
19472 else
19473 codec->chip_name = kstrdup("ALC888-VD", GFP_KERNEL);
19423 if (!codec->chip_name) { 19474 if (!codec->chip_name) {
19424 alc_free(codec); 19475 alc_free(codec);
19425 return -ENOMEM; 19476 return -ENOMEM;
@@ -19909,7 +19960,7 @@ static struct hda_codec_preset snd_hda_preset_realtek[] = {
19909 { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A", 19960 { .id = 0x10ec0885, .rev = 0x100103, .name = "ALC889A",
19910 .patch = patch_alc882 }, 19961 .patch = patch_alc882 },
19911 { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 }, 19962 { .id = 0x10ec0885, .name = "ALC885", .patch = patch_alc882 },
19912 { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc882 }, 19963 { .id = 0x10ec0887, .name = "ALC887", .patch = patch_alc888 },
19913 { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200", 19964 { .id = 0x10ec0888, .rev = 0x100101, .name = "ALC1200",
19914 .patch = patch_alc882 }, 19965 .patch = patch_alc882 },
19915 { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 }, 19966 { .id = 0x10ec0888, .name = "ALC888", .patch = patch_alc888 },
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 93fa59cc60ef..efa4225f5fd6 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -389,6 +389,11 @@ static hda_nid_t stac92hd83xxx_dmic_nids[STAC92HD83XXX_NUM_DMICS + 1] = {
389 0x11, 0x20, 0 389 0x11, 0x20, 0
390}; 390};
391 391
392#define STAC92HD87B_NUM_DMICS 1
393static hda_nid_t stac92hd87b_dmic_nids[STAC92HD87B_NUM_DMICS + 1] = {
394 0x11, 0
395};
396
392#define STAC92HD83XXX_NUM_CAPS 2 397#define STAC92HD83XXX_NUM_CAPS 2
393static unsigned long stac92hd83xxx_capvols[] = { 398static unsigned long stac92hd83xxx_capvols[] = {
394 HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT), 399 HDA_COMPOSE_AMP_VAL(0x17, 3, 0, HDA_OUTPUT),
@@ -1622,6 +1627,8 @@ static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
1622static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = { 1627static struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
1623 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1, 1628 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02a1,
1624 "Alienware M17x", STAC_ALIENWARE_M17X), 1629 "Alienware M17x", STAC_ALIENWARE_M17X),
1630 SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
1631 "Alienware M17x", STAC_ALIENWARE_M17X),
1625 {} /* terminator */ 1632 {} /* terminator */
1626}; 1633};
1627 1634
@@ -3486,10 +3493,8 @@ static int stac92xx_auto_create_dmic_input_ctls(struct hda_codec *codec,
3486 return err; 3493 return err;
3487 } 3494 }
3488 3495
3489 if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1) { 3496 if (snd_hda_get_bool_hint(codec, "separate_dmux") != 1)
3490 snd_hda_add_imux_item(imux, label, index, NULL); 3497 snd_hda_add_imux_item(imux, label, index, NULL);
3491 spec->num_analog_muxes++;
3492 }
3493 } 3498 }
3494 3499
3495 return 0; 3500 return 0;
@@ -5452,12 +5457,17 @@ again:
5452 stac92hd83xxx_brd_tbl[spec->board_config]); 5457 stac92hd83xxx_brd_tbl[spec->board_config]);
5453 5458
5454 switch (codec->vendor_id) { 5459 switch (codec->vendor_id) {
5460 case 0x111d76d1:
5461 case 0x111d76d9:
5462 spec->dmic_nids = stac92hd87b_dmic_nids;
5463 spec->num_dmics = stac92xx_connected_ports(codec,
5464 stac92hd87b_dmic_nids,
5465 STAC92HD87B_NUM_DMICS);
5466 /* Fall through */
5455 case 0x111d7666: 5467 case 0x111d7666:
5456 case 0x111d7667: 5468 case 0x111d7667:
5457 case 0x111d7668: 5469 case 0x111d7668:
5458 case 0x111d7669: 5470 case 0x111d7669:
5459 case 0x111d76d1:
5460 case 0x111d76d9:
5461 spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); 5471 spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids);
5462 spec->pin_nids = stac92hd88xxx_pin_nids; 5472 spec->pin_nids = stac92hd88xxx_pin_nids;
5463 spec->mono_nid = 0; 5473 spec->mono_nid = 0;
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 400f9ebd243e..629a5494347a 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -1866,6 +1866,12 @@ static struct ac97_quirk ac97_quirks[] __devinitdata = {
1866 }, 1866 },
1867 { 1867 {
1868 .subvendor = 0x1028, 1868 .subvendor = 0x1028,
1869 .subdevice = 0x0182,
1870 .name = "Dell Latitude D610", /* STAC9750/51 */
1871 .type = AC97_TUNE_HP_ONLY
1872 },
1873 {
1874 .subvendor = 0x1028,
1869 .subdevice = 0x0186, 1875 .subdevice = 0x0186,
1870 .name = "Dell Latitude D810", /* cf. Malone #41015 */ 1876 .name = "Dell Latitude D810", /* cf. Malone #41015 */
1871 .type = AC97_TUNE_HP_MUTE_LED 1877 .type = AC97_TUNE_HP_MUTE_LED
diff --git a/sound/pci/mixart/mixart_hwdep.h b/sound/pci/mixart/mixart_hwdep.h
index a46f5083db99..812e288ef2e7 100644
--- a/sound/pci/mixart/mixart_hwdep.h
+++ b/sound/pci/mixart/mixart_hwdep.h
@@ -25,11 +25,21 @@
25 25
26#include <sound/hwdep.h> 26#include <sound/hwdep.h>
27 27
28#ifndef readl_be
28#define readl_be(x) be32_to_cpu(__raw_readl(x)) 29#define readl_be(x) be32_to_cpu(__raw_readl(x))
30#endif
31
32#ifndef writel_be
29#define writel_be(data,addr) __raw_writel(cpu_to_be32(data),addr) 33#define writel_be(data,addr) __raw_writel(cpu_to_be32(data),addr)
34#endif
30 35
36#ifndef readl_le
31#define readl_le(x) le32_to_cpu(__raw_readl(x)) 37#define readl_le(x) le32_to_cpu(__raw_readl(x))
38#endif
39
40#ifndef writel_le
32#define writel_le(data,addr) __raw_writel(cpu_to_le32(data),addr) 41#define writel_le(data,addr) __raw_writel(cpu_to_le32(data),addr)
42#endif
33 43
34#define MIXART_MEM(mgr,x) ((mgr)->mem[0].virt + (x)) 44#define MIXART_MEM(mgr,x) ((mgr)->mem[0].virt + (x))
35#define MIXART_REG(mgr,x) ((mgr)->mem[1].virt + (x)) 45#define MIXART_REG(mgr,x) ((mgr)->mem[1].virt + (x))
diff --git a/sound/ppc/pmac.c b/sound/ppc/pmac.c
index 85081172403f..b47cfd45b3b9 100644
--- a/sound/ppc/pmac.c
+++ b/sound/ppc/pmac.c
@@ -1228,10 +1228,8 @@ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
1228 chip->rsrc[i].start + 1, 1228 chip->rsrc[i].start + 1,
1229 rnames[i]) == NULL) { 1229 rnames[i]) == NULL) {
1230 printk(KERN_ERR "snd: can't request rsrc " 1230 printk(KERN_ERR "snd: can't request rsrc "
1231 " %d (%s: 0x%016llx:%016llx)\n", 1231 " %d (%s: %pR)\n",
1232 i, rnames[i], 1232 i, rnames[i], &chip->rsrc[i]);
1233 (unsigned long long)chip->rsrc[i].start,
1234 (unsigned long long)chip->rsrc[i].end);
1235 err = -ENODEV; 1233 err = -ENODEV;
1236 goto __error; 1234 goto __error;
1237 } 1235 }
@@ -1256,10 +1254,8 @@ int __devinit snd_pmac_new(struct snd_card *card, struct snd_pmac **chip_return)
1256 chip->rsrc[i].start + 1, 1254 chip->rsrc[i].start + 1,
1257 rnames[i]) == NULL) { 1255 rnames[i]) == NULL) {
1258 printk(KERN_ERR "snd: can't request rsrc " 1256 printk(KERN_ERR "snd: can't request rsrc "
1259 " %d (%s: 0x%016llx:%016llx)\n", 1257 " %d (%s: %pR)\n",
1260 i, rnames[i], 1258 i, rnames[i], &chip->rsrc[i]);
1261 (unsigned long long)chip->rsrc[i].start,
1262 (unsigned long long)chip->rsrc[i].end);
1263 err = -ENODEV; 1259 err = -ENODEV;
1264 goto __error; 1260 goto __error;
1265 } 1261 }
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index e720d5e6f04c..bee3c94f58b0 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -16,7 +16,8 @@ config SND_ATMEL_SOC_SSC
16 16
17config SND_AT91_SOC_SAM9G20_WM8731 17config SND_AT91_SOC_SAM9G20_WM8731
18 tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board" 18 tristate "SoC Audio support for WM8731-based At91sam9g20 evaluation board"
19 depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC 19 depends on ATMEL_SSC && ARCH_AT91SAM9G20 && SND_ATMEL_SOC && \
20 AT91_PROGRAMMABLE_CLOCKS
20 select SND_ATMEL_SOC_SSC 21 select SND_ATMEL_SOC_SSC
21 select SND_SOC_WM8731 22 select SND_SOC_WM8731
22 help 23 help
@@ -25,7 +26,7 @@ config SND_AT91_SOC_SAM9G20_WM8731
25 26
26config SND_AT32_SOC_PLAYPAQ 27config SND_AT32_SOC_PLAYPAQ
27 tristate "SoC Audio support for PlayPaq with WM8510" 28 tristate "SoC Audio support for PlayPaq with WM8510"
28 depends on SND_ATMEL_SOC && BOARD_PLAYPAQ 29 depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
29 select SND_ATMEL_SOC_SSC 30 select SND_ATMEL_SOC_SSC
30 select SND_SOC_WM8510 31 select SND_SOC_WM8510
31 help 32 help
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c
index 293569dfd0ed..e521ada80542 100644
--- a/sound/soc/atmel/sam9g20_wm8731.c
+++ b/sound/soc/atmel/sam9g20_wm8731.c
@@ -222,9 +222,9 @@ static int __init at91sam9g20ek_init(void)
222 } 222 }
223 223
224 pllb = clk_get(NULL, "pllb"); 224 pllb = clk_get(NULL, "pllb");
225 if (IS_ERR(mclk)) { 225 if (IS_ERR(pllb)) {
226 printk(KERN_ERR "ASoC: Failed to get PLLB\n"); 226 printk(KERN_ERR "ASoC: Failed to get PLLB\n");
227 ret = PTR_ERR(mclk); 227 ret = PTR_ERR(pllb);
228 goto err_mclk; 228 goto err_mclk;
229 } 229 }
230 ret = clk_set_parent(mclk, pllb); 230 ret = clk_set_parent(mclk, pllb);
@@ -240,6 +240,7 @@ static int __init at91sam9g20ek_init(void)
240 if (!at91sam9g20ek_snd_device) { 240 if (!at91sam9g20ek_snd_device) {
241 printk(KERN_ERR "ASoC: Platform device allocation failed\n"); 241 printk(KERN_ERR "ASoC: Platform device allocation failed\n");
242 ret = -ENOMEM; 242 ret = -ENOMEM;
243 goto err_mclk;
243 } 244 }
244 245
245 platform_set_drvdata(at91sam9g20ek_snd_device, 246 platform_set_drvdata(at91sam9g20ek_snd_device,
@@ -248,11 +249,13 @@ static int __init at91sam9g20ek_init(void)
248 ret = platform_device_add(at91sam9g20ek_snd_device); 249 ret = platform_device_add(at91sam9g20ek_snd_device);
249 if (ret) { 250 if (ret) {
250 printk(KERN_ERR "ASoC: Platform device allocation failed\n"); 251 printk(KERN_ERR "ASoC: Platform device allocation failed\n");
251 platform_device_put(at91sam9g20ek_snd_device); 252 goto err_device_add;
252 } 253 }
253 254
254 return ret; 255 return ret;
255 256
257err_device_add:
258 platform_device_put(at91sam9g20ek_snd_device);
256err_mclk: 259err_mclk:
257 clk_put(mclk); 260 clk_put(mclk);
258 mclk = NULL; 261 mclk = NULL;
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
index e3d283561c19..86e0f8586dc3 100644
--- a/sound/soc/atmel/snd-soc-afeb9260.c
+++ b/sound/soc/atmel/snd-soc-afeb9260.c
@@ -167,7 +167,6 @@ static int __init afeb9260_soc_init(void)
167 167
168 return 0; 168 return 0;
169err1: 169err1:
170 platform_device_del(afeb9260_snd_device);
171 platform_device_put(afeb9260_snd_device); 170 platform_device_put(afeb9260_snd_device);
172 return err; 171 return err;
173} 172}
diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c
index bc22ee93a75d..d63e28773eb1 100644
--- a/sound/soc/codecs/max98088.c
+++ b/sound/soc/codecs/max98088.c
@@ -28,6 +28,11 @@
28#include <sound/max98088.h> 28#include <sound/max98088.h>
29#include "max98088.h" 29#include "max98088.h"
30 30
31enum max98088_type {
32 MAX98088,
33 MAX98089,
34};
35
31struct max98088_cdata { 36struct max98088_cdata {
32 unsigned int rate; 37 unsigned int rate;
33 unsigned int fmt; 38 unsigned int fmt;
@@ -36,6 +41,7 @@ struct max98088_cdata {
36 41
37struct max98088_priv { 42struct max98088_priv {
38 u8 reg_cache[M98088_REG_CNT]; 43 u8 reg_cache[M98088_REG_CNT];
44 enum max98088_type devtype;
39 void *control_data; 45 void *control_data;
40 struct max98088_pdata *pdata; 46 struct max98088_pdata *pdata;
41 unsigned int sysclk; 47 unsigned int sysclk;
@@ -2013,7 +2019,10 @@ err_access:
2013 2019
2014static int max98088_remove(struct snd_soc_codec *codec) 2020static int max98088_remove(struct snd_soc_codec *codec)
2015{ 2021{
2022 struct max98088_priv *max98088 = snd_soc_codec_get_drvdata(codec);
2023
2016 max98088_set_bias_level(codec, SND_SOC_BIAS_OFF); 2024 max98088_set_bias_level(codec, SND_SOC_BIAS_OFF);
2025 kfree(max98088->eq_texts);
2017 2026
2018 return 0; 2027 return 0;
2019} 2028}
@@ -2040,6 +2049,8 @@ static int max98088_i2c_probe(struct i2c_client *i2c,
2040 if (max98088 == NULL) 2049 if (max98088 == NULL)
2041 return -ENOMEM; 2050 return -ENOMEM;
2042 2051
2052 max98088->devtype = id->driver_data;
2053
2043 i2c_set_clientdata(i2c, max98088); 2054 i2c_set_clientdata(i2c, max98088);
2044 max98088->control_data = i2c; 2055 max98088->control_data = i2c;
2045 max98088->pdata = i2c->dev.platform_data; 2056 max98088->pdata = i2c->dev.platform_data;
@@ -2059,7 +2070,8 @@ static int __devexit max98088_i2c_remove(struct i2c_client *client)
2059} 2070}
2060 2071
2061static const struct i2c_device_id max98088_i2c_id[] = { 2072static const struct i2c_device_id max98088_i2c_id[] = {
2062 { "max98088", 0 }, 2073 { "max98088", MAX98088 },
2074 { "max98089", MAX98089 },
2063 { } 2075 { }
2064}; 2076};
2065MODULE_DEVICE_TABLE(i2c, max98088_i2c_id); 2077MODULE_DEVICE_TABLE(i2c, max98088_i2c_id);
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 00d67cc8e206..061f9e5a497b 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -383,6 +383,7 @@ static struct snd_soc_codec_driver soc_codec_dev_stac9766 = {
383 .reg_cache_size = sizeof(stac9766_reg), 383 .reg_cache_size = sizeof(stac9766_reg),
384 .reg_word_size = sizeof(u16), 384 .reg_word_size = sizeof(u16),
385 .reg_cache_step = 2, 385 .reg_cache_step = 2,
386 .reg_cache_default = stac9766_reg,
386}; 387};
387 388
388static __devinit int stac9766_probe(struct platform_device *pdev) 389static __devinit int stac9766_probe(struct platform_device *pdev)
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index fc687790188b..77b8f9ae29be 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1176,7 +1176,7 @@ EXPORT_SYMBOL_GPL(aic3x_set_gpio);
1176int aic3x_get_gpio(struct snd_soc_codec *codec, int gpio) 1176int aic3x_get_gpio(struct snd_soc_codec *codec, int gpio)
1177{ 1177{
1178 u8 reg = gpio ? AIC3X_GPIO2_REG : AIC3X_GPIO1_REG; 1178 u8 reg = gpio ? AIC3X_GPIO2_REG : AIC3X_GPIO1_REG;
1179 u8 val, bit = gpio ? 2: 1; 1179 u8 val = 0, bit = gpio ? 2 : 1;
1180 1180
1181 aic3x_read(codec, reg, &val); 1181 aic3x_read(codec, reg, &val);
1182 return (val >> bit) & 1; 1182 return (val >> bit) & 1;
@@ -1204,7 +1204,7 @@ EXPORT_SYMBOL_GPL(aic3x_set_headset_detection);
1204 1204
1205int aic3x_headset_detected(struct snd_soc_codec *codec) 1205int aic3x_headset_detected(struct snd_soc_codec *codec)
1206{ 1206{
1207 u8 val; 1207 u8 val = 0;
1208 aic3x_read(codec, AIC3X_HEADSET_DETECT_CTRL_B, &val); 1208 aic3x_read(codec, AIC3X_HEADSET_DETECT_CTRL_B, &val);
1209 return (val >> 4) & 1; 1209 return (val >> 4) & 1;
1210} 1210}
@@ -1212,7 +1212,7 @@ EXPORT_SYMBOL_GPL(aic3x_headset_detected);
1212 1212
1213int aic3x_button_pressed(struct snd_soc_codec *codec) 1213int aic3x_button_pressed(struct snd_soc_codec *codec)
1214{ 1214{
1215 u8 val; 1215 u8 val = 0;
1216 aic3x_read(codec, AIC3X_HEADSET_DETECT_CTRL_B, &val); 1216 aic3x_read(codec, AIC3X_HEADSET_DETECT_CTRL_B, &val);
1217 return (val >> 5) & 1; 1217 return (val >> 5) & 1;
1218} 1218}
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index ee4fb201de60..d2c243095673 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -78,8 +78,10 @@ static int tpa6130a2_i2c_write(int reg, u8 value)
78 78
79 if (data->power_state) { 79 if (data->power_state) {
80 val = i2c_smbus_write_byte_data(tpa6130a2_client, reg, value); 80 val = i2c_smbus_write_byte_data(tpa6130a2_client, reg, value);
81 if (val < 0) 81 if (val < 0) {
82 dev_err(&tpa6130a2_client->dev, "Write failed\n"); 82 dev_err(&tpa6130a2_client->dev, "Write failed\n");
83 return val;
84 }
83 } 85 }
84 86
85 /* Either powered on or off, we save the context */ 87 /* Either powered on or off, we save the context */
diff --git a/sound/soc/codecs/uda134x.c b/sound/soc/codecs/uda134x.c
index 7540a509a6f5..464f0cfa4c7a 100644
--- a/sound/soc/codecs/uda134x.c
+++ b/sound/soc/codecs/uda134x.c
@@ -597,6 +597,7 @@ static struct snd_soc_codec_driver soc_codec_dev_uda134x = {
597 .resume = uda134x_soc_resume, 597 .resume = uda134x_soc_resume,
598 .reg_cache_size = sizeof(uda134x_reg), 598 .reg_cache_size = sizeof(uda134x_reg),
599 .reg_word_size = sizeof(u8), 599 .reg_word_size = sizeof(u8),
600 .reg_cache_default = uda134x_reg,
600 .reg_cache_step = 1, 601 .reg_cache_step = 1,
601 .read = uda134x_read_reg_cache, 602 .read = uda134x_read_reg_cache,
602 .write = uda134x_write, 603 .write = uda134x_write,
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index f4f1fba38eb9..7611add7f8c3 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -831,7 +831,7 @@ static int wm8350_set_dai_sysclk(struct snd_soc_dai *codec_dai,
831 } 831 }
832 832
833 /* MCLK direction */ 833 /* MCLK direction */
834 if (dir == WM8350_MCLK_DIR_OUT) 834 if (dir == SND_SOC_CLOCK_OUT)
835 wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_2, 835 wm8350_set_bits(wm8350, WM8350_CLOCK_CONTROL_2,
836 WM8350_MCLK_DIR); 836 WM8350_MCLK_DIR);
837 else 837 else
@@ -1586,6 +1586,13 @@ static int wm8350_codec_probe(struct snd_soc_codec *codec)
1586 wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME, 1586 wm8350_set_bits(wm8350, WM8350_ROUT2_VOLUME,
1587 WM8350_OUT2_VU | WM8350_OUT2R_MUTE); 1587 WM8350_OUT2_VU | WM8350_OUT2R_MUTE);
1588 1588
1589 /* Make sure AIF tristating is disabled by default */
1590 wm8350_clear_bits(wm8350, WM8350_AI_FORMATING, WM8350_AIF_TRI);
1591
1592 /* Make sure we've got a sane companding setup too */
1593 wm8350_clear_bits(wm8350, WM8350_ADC_DAC_COMP,
1594 WM8350_DAC_COMP | WM8350_LOOPBACK);
1595
1589 /* Make sure jack detect is disabled to start off with */ 1596 /* Make sure jack detect is disabled to start off with */
1590 wm8350_clear_bits(wm8350, WM8350_JACK_DETECT, 1597 wm8350_clear_bits(wm8350, WM8350_JACK_DETECT,
1591 WM8350_JDL_ENA | WM8350_JDR_ENA); 1598 WM8350_JDL_ENA | WM8350_JDR_ENA);
diff --git a/sound/soc/codecs/wm8523.c b/sound/soc/codecs/wm8523.c
index 712ef7c76f90..9a433a5396cb 100644
--- a/sound/soc/codecs/wm8523.c
+++ b/sound/soc/codecs/wm8523.c
@@ -146,7 +146,6 @@ static int wm8523_startup(struct snd_pcm_substream *substream,
146 return -EINVAL; 146 return -EINVAL;
147 } 147 }
148 148
149 return 0;
150 snd_pcm_hw_constraint_list(substream->runtime, 0, 149 snd_pcm_hw_constraint_list(substream->runtime, 0,
151 SNDRV_PCM_HW_PARAM_RATE, 150 SNDRV_PCM_HW_PARAM_RATE,
152 &wm8523->rate_constraint); 151 &wm8523->rate_constraint);
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index 631385802eb4..e725c09a3e79 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -526,7 +526,7 @@ static int wm8731_probe(struct snd_soc_codec *codec)
526 snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0); 526 snd_soc_update_bits(codec, WM8731_RINVOL, 0x100, 0);
527 527
528 /* Disable bypass path by default */ 528 /* Disable bypass path by default */
529 snd_soc_update_bits(codec, WM8731_APANA, 0x4, 0); 529 snd_soc_update_bits(codec, WM8731_APANA, 0x8, 0);
530 530
531 snd_soc_add_controls(codec, wm8731_snd_controls, 531 snd_soc_add_controls(codec, wm8731_snd_controls,
532 ARRAY_SIZE(wm8731_snd_controls)); 532 ARRAY_SIZE(wm8731_snd_controls));
diff --git a/sound/soc/codecs/wm8776.c b/sound/soc/codecs/wm8776.c
index 04182c464e35..0132a27140ae 100644
--- a/sound/soc/codecs/wm8776.c
+++ b/sound/soc/codecs/wm8776.c
@@ -34,7 +34,6 @@
34/* codec private data */ 34/* codec private data */
35struct wm8776_priv { 35struct wm8776_priv {
36 enum snd_soc_control_type control_type; 36 enum snd_soc_control_type control_type;
37 u16 reg_cache[WM8776_CACHEREGNUM];
38 int sysclk[2]; 37 int sysclk[2];
39}; 38};
40 39
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 33be84e506ea..fca60a0b57b8 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -2498,6 +2498,8 @@ static int wm8904_remove(struct snd_soc_codec *codec)
2498 2498
2499 wm8904_set_bias_level(codec, SND_SOC_BIAS_OFF); 2499 wm8904_set_bias_level(codec, SND_SOC_BIAS_OFF);
2500 regulator_bulk_free(ARRAY_SIZE(wm8904->supplies), wm8904->supplies); 2500 regulator_bulk_free(ARRAY_SIZE(wm8904->supplies), wm8904->supplies);
2501 kfree(wm8904->retune_mobile_texts);
2502 kfree(wm8904->drc_texts);
2501 2503
2502 return 0; 2504 return 0;
2503} 2505}
diff --git a/sound/soc/codecs/wm8961.c b/sound/soc/codecs/wm8961.c
index 4f326f604104..8340485c9851 100644
--- a/sound/soc/codecs/wm8961.c
+++ b/sound/soc/codecs/wm8961.c
@@ -711,7 +711,7 @@ static int wm8961_hw_params(struct snd_pcm_substream *substream,
711 if (fs <= 24000) 711 if (fs <= 24000)
712 reg |= WM8961_DACSLOPE; 712 reg |= WM8961_DACSLOPE;
713 else 713 else
714 reg &= WM8961_DACSLOPE; 714 reg &= ~WM8961_DACSLOPE;
715 snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg); 715 snd_soc_write(codec, WM8961_ADC_DAC_CONTROL_2, reg);
716 716
717 return 0; 717 return 0;
@@ -736,7 +736,7 @@ static int wm8961_set_sysclk(struct snd_soc_dai *dai, int clk_id,
736 freq /= 2; 736 freq /= 2;
737 } else { 737 } else {
738 dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq); 738 dev_dbg(codec->dev, "Using MCLK/1 for %dHz MCLK\n", freq);
739 reg &= WM8961_MCLKDIV; 739 reg &= ~WM8961_MCLKDIV;
740 } 740 }
741 741
742 snd_soc_write(codec, WM8961_CLOCKING1, reg); 742 snd_soc_write(codec, WM8961_CLOCKING1, reg);
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 894d0cd3aa9b..e8092745a207 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -3500,8 +3500,11 @@ static ssize_t wm8962_beep_set(struct device *dev,
3500{ 3500{
3501 struct wm8962_priv *wm8962 = dev_get_drvdata(dev); 3501 struct wm8962_priv *wm8962 = dev_get_drvdata(dev);
3502 long int time; 3502 long int time;
3503 int ret;
3503 3504
3504 strict_strtol(buf, 10, &time); 3505 ret = strict_strtol(buf, 10, &time);
3506 if (ret != 0)
3507 return ret;
3505 3508
3506 input_event(wm8962->beep, EV_SND, SND_TONE, time); 3509 input_event(wm8962->beep, EV_SND, SND_TONE, time);
3507 3510
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 0db59c3aa5d4..4d3e6f1ac584 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -3903,6 +3903,8 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3903 return -ENOMEM; 3903 return -ENOMEM;
3904 snd_soc_codec_set_drvdata(codec, wm8994); 3904 snd_soc_codec_set_drvdata(codec, wm8994);
3905 3905
3906 codec->reg_cache = &wm8994->reg_cache;
3907
3906 wm8994->pdata = dev_get_platdata(codec->dev->parent); 3908 wm8994->pdata = dev_get_platdata(codec->dev->parent);
3907 wm8994->codec = codec; 3909 wm8994->codec = codec;
3908 3910
@@ -4059,6 +4061,8 @@ static int wm8994_codec_remove(struct snd_soc_codec *codec)
4059 wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994); 4061 wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC2_DET, wm8994);
4060 wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994); 4062 wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_SHRT, wm8994);
4061 wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994); 4063 wm8994_free_irq(codec->control_data, WM8994_IRQ_MIC1_DET, wm8994);
4064 kfree(wm8994->retune_mobile_texts);
4065 kfree(wm8994->drc_texts);
4062 kfree(wm8994); 4066 kfree(wm8994);
4063 4067
4064 return 0; 4068 return 0;
@@ -4071,6 +4075,8 @@ static struct snd_soc_codec_driver soc_codec_dev_wm8994 = {
4071 .resume = wm8994_resume, 4075 .resume = wm8994_resume,
4072 .read = wm8994_read, 4076 .read = wm8994_read,
4073 .write = wm8994_write, 4077 .write = wm8994_write,
4078 .readable_register = wm8994_readable,
4079 .volatile_register = wm8994_volatile,
4074 .set_bias_level = wm8994_set_bias_level, 4080 .set_bias_level = wm8994_set_bias_level,
4075}; 4081};
4076 4082
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 2b07b17a6b2d..bc9e6b0b3f6f 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -157,12 +157,23 @@ static int evm_aic3x_init(struct snd_soc_pcm_runtime *rtd)
157} 157}
158 158
159/* davinci-evm digital audio interface glue - connects codec <--> CPU */ 159/* davinci-evm digital audio interface glue - connects codec <--> CPU */
160static struct snd_soc_dai_link evm_dai = { 160static struct snd_soc_dai_link dm6446_evm_dai = {
161 .name = "TLV320AIC3X", 161 .name = "TLV320AIC3X",
162 .stream_name = "AIC3X", 162 .stream_name = "AIC3X",
163 .cpu_dai_name = "davinci-mcasp.0", 163 .cpu_dai_name = "davinci-mcbsp",
164 .codec_dai_name = "tlv320aic3x-hifi", 164 .codec_dai_name = "tlv320aic3x-hifi",
165 .codec_name = "tlv320aic3x-codec.0-001a", 165 .codec_name = "tlv320aic3x-codec.1-001b",
166 .platform_name = "davinci-pcm-audio",
167 .init = evm_aic3x_init,
168 .ops = &evm_ops,
169};
170
171static struct snd_soc_dai_link dm355_evm_dai = {
172 .name = "TLV320AIC3X",
173 .stream_name = "AIC3X",
174 .cpu_dai_name = "davinci-mcbsp.1",
175 .codec_dai_name = "tlv320aic3x-hifi",
176 .codec_name = "tlv320aic3x-codec.1-001b",
166 .platform_name = "davinci-pcm-audio", 177 .platform_name = "davinci-pcm-audio",
167 .init = evm_aic3x_init, 178 .init = evm_aic3x_init,
168 .ops = &evm_ops, 179 .ops = &evm_ops,
@@ -172,10 +183,10 @@ static struct snd_soc_dai_link dm365_evm_dai = {
172#ifdef CONFIG_SND_DM365_AIC3X_CODEC 183#ifdef CONFIG_SND_DM365_AIC3X_CODEC
173 .name = "TLV320AIC3X", 184 .name = "TLV320AIC3X",
174 .stream_name = "AIC3X", 185 .stream_name = "AIC3X",
175 .cpu_dai_name = "davinci-i2s", 186 .cpu_dai_name = "davinci-mcbsp",
176 .codec_dai_name = "tlv320aic3x-hifi", 187 .codec_dai_name = "tlv320aic3x-hifi",
177 .init = evm_aic3x_init, 188 .init = evm_aic3x_init,
178 .codec_name = "tlv320aic3x-codec.0-001a", 189 .codec_name = "tlv320aic3x-codec.1-0018",
179 .ops = &evm_ops, 190 .ops = &evm_ops,
180#elif defined(CONFIG_SND_DM365_VOICE_CODEC) 191#elif defined(CONFIG_SND_DM365_VOICE_CODEC)
181 .name = "Voice Codec - CQ93VC", 192 .name = "Voice Codec - CQ93VC",
@@ -219,10 +230,17 @@ static struct snd_soc_dai_link da8xx_evm_dai = {
219 .ops = &evm_ops, 230 .ops = &evm_ops,
220}; 231};
221 232
222/* davinci dm6446, dm355 evm audio machine driver */ 233/* davinci dm6446 evm audio machine driver */
223static struct snd_soc_card snd_soc_card_evm = { 234static struct snd_soc_card dm6446_snd_soc_card_evm = {
224 .name = "DaVinci EVM", 235 .name = "DaVinci DM6446 EVM",
225 .dai_link = &evm_dai, 236 .dai_link = &dm6446_evm_dai,
237 .num_links = 1,
238};
239
240/* davinci dm355 evm audio machine driver */
241static struct snd_soc_card dm355_snd_soc_card_evm = {
242 .name = "DaVinci DM355 EVM",
243 .dai_link = &dm355_evm_dai,
226 .num_links = 1, 244 .num_links = 1,
227}; 245};
228 246
@@ -261,10 +279,10 @@ static int __init evm_init(void)
261 int ret; 279 int ret;
262 280
263 if (machine_is_davinci_evm()) { 281 if (machine_is_davinci_evm()) {
264 evm_snd_dev_data = &snd_soc_card_evm; 282 evm_snd_dev_data = &dm6446_snd_soc_card_evm;
265 index = 0; 283 index = 0;
266 } else if (machine_is_davinci_dm355_evm()) { 284 } else if (machine_is_davinci_dm355_evm()) {
267 evm_snd_dev_data = &snd_soc_card_evm; 285 evm_snd_dev_data = &dm355_snd_soc_card_evm;
268 index = 1; 286 index = 1;
269 } else if (machine_is_davinci_dm365_evm()) { 287 } else if (machine_is_davinci_dm365_evm()) {
270 evm_snd_dev_data = &dm365_snd_soc_card_evm; 288 evm_snd_dev_data = &dm365_snd_soc_card_evm;
diff --git a/sound/soc/davinci/davinci-i2s.c b/sound/soc/davinci/davinci-i2s.c
index d46b545d41f4..9e0e565e6ed9 100644
--- a/sound/soc/davinci/davinci-i2s.c
+++ b/sound/soc/davinci/davinci-i2s.c
@@ -426,9 +426,6 @@ static int davinci_i2s_hw_params(struct snd_pcm_substream *substream,
426 snd_pcm_format_t fmt; 426 snd_pcm_format_t fmt;
427 unsigned element_cnt = 1; 427 unsigned element_cnt = 1;
428 428
429 dai->capture_dma_data = dev->dma_params;
430 dai->playback_dma_data = dev->dma_params;
431
432 /* general line settings */ 429 /* general line settings */
433 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG); 430 spcr = davinci_mcbsp_read_reg(dev, DAVINCI_MCBSP_SPCR_REG);
434 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) { 431 if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
@@ -601,6 +598,15 @@ static int davinci_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
601 return ret; 598 return ret;
602} 599}
603 600
601static int davinci_i2s_startup(struct snd_pcm_substream *substream,
602 struct snd_soc_dai *dai)
603{
604 struct davinci_mcbsp_dev *dev = snd_soc_dai_get_drvdata(dai);
605
606 snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
607 return 0;
608}
609
604static void davinci_i2s_shutdown(struct snd_pcm_substream *substream, 610static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
605 struct snd_soc_dai *dai) 611 struct snd_soc_dai *dai)
606{ 612{
@@ -612,6 +618,7 @@ static void davinci_i2s_shutdown(struct snd_pcm_substream *substream,
612#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000 618#define DAVINCI_I2S_RATES SNDRV_PCM_RATE_8000_96000
613 619
614static struct snd_soc_dai_ops davinci_i2s_dai_ops = { 620static struct snd_soc_dai_ops davinci_i2s_dai_ops = {
621 .startup = davinci_i2s_startup,
615 .shutdown = davinci_i2s_shutdown, 622 .shutdown = davinci_i2s_shutdown,
616 .prepare = davinci_i2s_prepare, 623 .prepare = davinci_i2s_prepare,
617 .trigger = davinci_i2s_trigger, 624 .trigger = davinci_i2s_trigger,
@@ -749,7 +756,7 @@ static struct platform_driver davinci_mcbsp_driver = {
749 .probe = davinci_i2s_probe, 756 .probe = davinci_i2s_probe,
750 .remove = davinci_i2s_remove, 757 .remove = davinci_i2s_remove,
751 .driver = { 758 .driver = {
752 .name = "davinci-i2s", 759 .name = "davinci-mcbsp",
753 .owner = THIS_MODULE, 760 .owner = THIS_MODULE,
754 }, 761 },
755}; 762};
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 86918ee12419..fb55d2c5d704 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -715,9 +715,6 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
715 int word_length; 715 int word_length;
716 u8 fifo_level; 716 u8 fifo_level;
717 717
718 cpu_dai->capture_dma_data = dev->dma_params;
719 cpu_dai->playback_dma_data = dev->dma_params;
720
721 davinci_hw_common_param(dev, substream->stream); 718 davinci_hw_common_param(dev, substream->stream);
722 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) 719 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
723 fifo_level = dev->txnumevt; 720 fifo_level = dev->txnumevt;
@@ -799,7 +796,17 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
799 return ret; 796 return ret;
800} 797}
801 798
799static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
800 struct snd_soc_dai *dai)
801{
802 struct davinci_audio_dev *dev = snd_soc_dai_get_drvdata(dai);
803
804 snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
805 return 0;
806}
807
802static struct snd_soc_dai_ops davinci_mcasp_dai_ops = { 808static struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
809 .startup = davinci_mcasp_startup,
803 .trigger = davinci_mcasp_trigger, 810 .trigger = davinci_mcasp_trigger,
804 .hw_params = davinci_mcasp_hw_params, 811 .hw_params = davinci_mcasp_hw_params,
805 .set_fmt = davinci_mcasp_set_dai_fmt, 812 .set_fmt = davinci_mcasp_set_dai_fmt,
diff --git a/sound/soc/davinci/davinci-sffsdr.c b/sound/soc/davinci/davinci-sffsdr.c
index 009b6521a1bf..6c6666a1f942 100644
--- a/sound/soc/davinci/davinci-sffsdr.c
+++ b/sound/soc/davinci/davinci-sffsdr.c
@@ -84,7 +84,7 @@ static struct snd_soc_ops sffsdr_ops = {
84static struct snd_soc_dai_link sffsdr_dai = { 84static struct snd_soc_dai_link sffsdr_dai = {
85 .name = "PCM3008", /* Codec name */ 85 .name = "PCM3008", /* Codec name */
86 .stream_name = "PCM3008 HiFi", 86 .stream_name = "PCM3008 HiFi",
87 .cpu_dai_name = "davinci-asp.0", 87 .cpu_dai_name = "davinci-mcbsp",
88 .codec_dai_name = "pcm3008-hifi", 88 .codec_dai_name = "pcm3008-hifi",
89 .codec_name = "pcm3008-codec", 89 .codec_name = "pcm3008-codec",
90 .platform_name = "davinci-pcm-audio", 90 .platform_name = "davinci-pcm-audio",
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c
index ea232f6a2c21..9d2afccc3a2d 100644
--- a/sound/soc/davinci/davinci-vcif.c
+++ b/sound/soc/davinci/davinci-vcif.c
@@ -97,9 +97,6 @@ static int davinci_vcif_hw_params(struct snd_pcm_substream *substream,
97 &davinci_vcif_dev->dma_params[substream->stream]; 97 &davinci_vcif_dev->dma_params[substream->stream];
98 u32 w; 98 u32 w;
99 99
100 dai->capture_dma_data = davinci_vcif_dev->dma_params;
101 dai->playback_dma_data = davinci_vcif_dev->dma_params;
102
103 /* Restart the codec before setup */ 100 /* Restart the codec before setup */
104 davinci_vcif_stop(substream); 101 davinci_vcif_stop(substream);
105 davinci_vcif_start(substream); 102 davinci_vcif_start(substream);
@@ -174,9 +171,19 @@ static int davinci_vcif_trigger(struct snd_pcm_substream *substream, int cmd,
174 return ret; 171 return ret;
175} 172}
176 173
174static int davinci_vcif_startup(struct snd_pcm_substream *substream,
175 struct snd_soc_dai *dai)
176{
177 struct davinci_vcif_dev *dev = snd_soc_dai_get_drvdata(dai);
178
179 snd_soc_dai_set_dma_data(dai, substream, dev->dma_params);
180 return 0;
181}
182
177#define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000 183#define DAVINCI_VCIF_RATES SNDRV_PCM_RATE_8000_48000
178 184
179static struct snd_soc_dai_ops davinci_vcif_dai_ops = { 185static struct snd_soc_dai_ops davinci_vcif_dai_ops = {
186 .startup = davinci_vcif_startup,
180 .trigger = davinci_vcif_trigger, 187 .trigger = davinci_vcif_trigger,
181 .hw_params = davinci_vcif_hw_params, 188 .hw_params = davinci_vcif_hw_params,
182}; 189};
@@ -240,7 +247,10 @@ fail:
240 247
241static int davinci_vcif_remove(struct platform_device *pdev) 248static int davinci_vcif_remove(struct platform_device *pdev)
242{ 249{
250 struct davinci_vcif_dev *davinci_vcif_dev = dev_get_drvdata(&pdev->dev);
251
243 snd_soc_unregister_dai(&pdev->dev); 252 snd_soc_unregister_dai(&pdev->dev);
253 kfree(davinci_vcif_dev);
244 254
245 return 0; 255 return 0;
246} 256}
diff --git a/sound/soc/ep93xx/simone.c b/sound/soc/ep93xx/simone.c
index 4b0d19913728..286817946c56 100644
--- a/sound/soc/ep93xx/simone.c
+++ b/sound/soc/ep93xx/simone.c
@@ -54,24 +54,26 @@ static int __init simone_init(void)
54 54
55 ret = platform_device_add(simone_snd_ac97_device); 55 ret = platform_device_add(simone_snd_ac97_device);
56 if (ret) 56 if (ret)
57 goto fail; 57 goto fail1;
58 58
59 simone_snd_device = platform_device_alloc("soc-audio", -1); 59 simone_snd_device = platform_device_alloc("soc-audio", -1);
60 if (!simone_snd_device) { 60 if (!simone_snd_device) {
61 ret = -ENOMEM; 61 ret = -ENOMEM;
62 goto fail; 62 goto fail2;
63 } 63 }
64 64
65 platform_set_drvdata(simone_snd_device, &snd_soc_simone); 65 platform_set_drvdata(simone_snd_device, &snd_soc_simone);
66 ret = platform_device_add(simone_snd_device); 66 ret = platform_device_add(simone_snd_device);
67 if (ret) { 67 if (ret)
68 platform_device_put(simone_snd_device); 68 goto fail3;
69 goto fail;
70 }
71 69
72 return ret; 70 return 0;
73 71
74fail: 72fail3:
73 platform_device_put(simone_snd_device);
74fail2:
75 platform_device_del(simone_snd_ac97_device);
76fail1:
75 platform_device_put(simone_snd_ac97_device); 77 platform_device_put(simone_snd_ac97_device);
76 return ret; 78 return ret;
77} 79}
diff --git a/sound/soc/fsl/efika-audio-fabric.c b/sound/soc/fsl/efika-audio-fabric.c
index 53251e6b5bd5..108b5d8bd0e9 100644
--- a/sound/soc/fsl/efika-audio-fabric.c
+++ b/sound/soc/fsl/efika-audio-fabric.c
@@ -76,6 +76,7 @@ static __init int efika_fabric_init(void)
76 rc = platform_device_add(pdev); 76 rc = platform_device_add(pdev);
77 if (rc) { 77 if (rc) {
78 pr_err("efika_fabric_init: platform_device_add() failed\n"); 78 pr_err("efika_fabric_init: platform_device_add() failed\n");
79 platform_device_put(pdev);
79 return -ENODEV; 80 return -ENODEV;
80 } 81 }
81 return 0; 82 return 0;
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index dce6b551cd78..f92dca07cd35 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -9,7 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/of_device.h> 10#include <linux/of_device.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/of_device.h>
13#include <linux/of_platform.h> 12#include <linux/of_platform.h>
14 13
15#include <sound/soc.h> 14#include <sound/soc.h>
diff --git a/sound/soc/fsl/mpc5200_psc_i2s.c b/sound/soc/fsl/mpc5200_psc_i2s.c
index 74ffed41340f..9018fa5bf0db 100644
--- a/sound/soc/fsl/mpc5200_psc_i2s.c
+++ b/sound/soc/fsl/mpc5200_psc_i2s.c
@@ -160,7 +160,7 @@ static int __devinit psc_i2s_of_probe(struct platform_device *op,
160 rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai)); 160 rc = snd_soc_register_dais(&op->dev, psc_i2s_dai, ARRAY_SIZE(psc_i2s_dai));
161 if (rc != 0) { 161 if (rc != 0) {
162 pr_err("Failed to register DAI\n"); 162 pr_err("Failed to register DAI\n");
163 return 0; 163 return rc;
164 } 164 }
165 165
166 psc_dma = dev_get_drvdata(&op->dev); 166 psc_dma = dev_get_drvdata(&op->dev);
diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c
index 0d7dcf1e4863..7d7847a1e66b 100644
--- a/sound/soc/fsl/mpc8610_hpcd.c
+++ b/sound/soc/fsl/mpc8610_hpcd.c
@@ -498,6 +498,7 @@ static int mpc8610_hpcd_probe(struct platform_device *pdev)
498 dev_err(&pdev->dev, "platform device add failed\n"); 498 dev_err(&pdev->dev, "platform device add failed\n");
499 goto error; 499 goto error;
500 } 500 }
501 dev_set_drvdata(&pdev->dev, sound_device);
501 502
502 of_node_put(codec_np); 503 of_node_put(codec_np);
503 504
diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c
index 63b9eaa1ebc2..026b756961e0 100644
--- a/sound/soc/fsl/p1022_ds.c
+++ b/sound/soc/fsl/p1022_ds.c
@@ -498,6 +498,7 @@ static int p1022_ds_probe(struct platform_device *pdev)
498 dev_err(&pdev->dev, "platform device add failed\n"); 498 dev_err(&pdev->dev, "platform device add failed\n");
499 goto error; 499 goto error;
500 } 500 }
501 dev_set_drvdata(&pdev->dev, sound_device);
501 502
502 of_node_put(codec_np); 503 of_node_put(codec_np);
503 504
diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c
index 25f27ec1dd6e..ba4d85e317ed 100644
--- a/sound/soc/fsl/pcm030-audio-fabric.c
+++ b/sound/soc/fsl/pcm030-audio-fabric.c
@@ -76,6 +76,7 @@ static __init int pcm030_fabric_init(void)
76 rc = platform_device_add(pdev); 76 rc = platform_device_add(pdev);
77 if (rc) { 77 if (rc) {
78 pr_err("pcm030_fabric_init: platform_device_add() failed\n"); 78 pr_err("pcm030_fabric_init: platform_device_add() failed\n");
79 platform_device_put(pdev);
79 return -ENODEV; 80 return -ENODEV;
80 } 81 }
81 return 0; 82 return 0;
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index b59675257ce5..dd4fffdbd177 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -34,8 +34,8 @@ static int eukrea_tlv320_hw_params(struct snd_pcm_substream *substream,
34 struct snd_pcm_hw_params *params) 34 struct snd_pcm_hw_params *params)
35{ 35{
36 struct snd_soc_pcm_runtime *rtd = substream->private_data; 36 struct snd_soc_pcm_runtime *rtd = substream->private_data;
37 struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; 37 struct snd_soc_dai *codec_dai = rtd->codec_dai;
38 struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; 38 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
39 int ret; 39 int ret;
40 40
41 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S | 41 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_I2S |
@@ -79,10 +79,10 @@ static struct snd_soc_ops eukrea_tlv320_snd_ops = {
79static struct snd_soc_dai_link eukrea_tlv320_dai = { 79static struct snd_soc_dai_link eukrea_tlv320_dai = {
80 .name = "tlv320aic23", 80 .name = "tlv320aic23",
81 .stream_name = "TLV320AIC23", 81 .stream_name = "TLV320AIC23",
82 .codec_dai = "tlv320aic23-hifi", 82 .codec_dai_name = "tlv320aic23-hifi",
83 .platform_name = "imx-pcm-audio.0", 83 .platform_name = "imx-pcm-audio.0",
84 .codec_name = "tlv320aic23-codec.0-001a", 84 .codec_name = "tlv320aic23-codec.0-001a",
85 .cpu_dai = "imx-ssi.0", 85 .cpu_dai_name = "imx-ssi.0",
86 .ops = &eukrea_tlv320_snd_ops, 86 .ops = &eukrea_tlv320_snd_ops,
87}; 87};
88 88
diff --git a/sound/soc/imx/imx-pcm-dma-mx2.c b/sound/soc/imx/imx-pcm-dma-mx2.c
index fd493ee1428e..671ef8dd524c 100644
--- a/sound/soc/imx/imx-pcm-dma-mx2.c
+++ b/sound/soc/imx/imx-pcm-dma-mx2.c
@@ -20,6 +20,7 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/platform_device.h> 21#include <linux/platform_device.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/dmaengine.h>
23 24
24#include <sound/core.h> 25#include <sound/core.h>
25#include <sound/initval.h> 26#include <sound/initval.h>
@@ -27,165 +28,146 @@
27#include <sound/pcm_params.h> 28#include <sound/pcm_params.h>
28#include <sound/soc.h> 29#include <sound/soc.h>
29 30
30#include <mach/dma-mx1-mx2.h> 31#include <mach/dma.h>
31 32
32#include "imx-ssi.h" 33#include "imx-ssi.h"
33 34
34struct imx_pcm_runtime_data { 35struct imx_pcm_runtime_data {
35 int sg_count; 36 int period_bytes;
36 struct scatterlist *sg_list;
37 int period;
38 int periods; 37 int periods;
39 unsigned long dma_addr;
40 int dma; 38 int dma;
41 struct snd_pcm_substream *substream;
42 unsigned long offset; 39 unsigned long offset;
43 unsigned long size; 40 unsigned long size;
44 unsigned long period_cnt;
45 void *buf; 41 void *buf;
46 int period_time; 42 int period_time;
43 struct dma_async_tx_descriptor *desc;
44 struct dma_chan *dma_chan;
45 struct imx_dma_data dma_data;
47}; 46};
48 47
49/* Called by the DMA framework when a period has elapsed */ 48static void audio_dma_irq(void *data)
50static void imx_ssi_dma_progression(int channel, void *data,
51 struct scatterlist *sg)
52{ 49{
53 struct snd_pcm_substream *substream = data; 50 struct snd_pcm_substream *substream = (struct snd_pcm_substream *)data;
54 struct snd_pcm_runtime *runtime = substream->runtime; 51 struct snd_pcm_runtime *runtime = substream->runtime;
55 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 52 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
56 53
57 if (!sg) 54 iprtd->offset += iprtd->period_bytes;
58 return; 55 iprtd->offset %= iprtd->period_bytes * iprtd->periods;
59
60 runtime = iprtd->substream->runtime;
61 56
62 iprtd->offset = sg->dma_address - runtime->dma_addr; 57 snd_pcm_period_elapsed(substream);
63
64 snd_pcm_period_elapsed(iprtd->substream);
65} 58}
66 59
67static void imx_ssi_dma_callback(int channel, void *data) 60static bool filter(struct dma_chan *chan, void *param)
68{ 61{
69 pr_err("%s shouldn't be called\n", __func__); 62 struct imx_pcm_runtime_data *iprtd = param;
70}
71 63
72static void snd_imx_dma_err_callback(int channel, void *data, int err) 64 if (!imx_dma_is_general_purpose(chan))
73{ 65 return false;
74 struct snd_pcm_substream *substream = data;
75 struct snd_soc_pcm_runtime *rtd = substream->private_data;
76 struct imx_pcm_dma_params *dma_params =
77 snd_soc_dai_get_dma_data(rtd->dai->cpu_dai, substream);
78 struct snd_pcm_runtime *runtime = substream->runtime;
79 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
80 int ret;
81 66
82 pr_err("DMA timeout on channel %d -%s%s%s%s\n", 67 chan->private = &iprtd->dma_data;
83 channel,
84 err & IMX_DMA_ERR_BURST ? " burst" : "",
85 err & IMX_DMA_ERR_REQUEST ? " request" : "",
86 err & IMX_DMA_ERR_TRANSFER ? " transfer" : "",
87 err & IMX_DMA_ERR_BUFFER ? " buffer" : "");
88 68
89 imx_dma_disable(iprtd->dma); 69 return true;
90 ret = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count,
91 IMX_DMA_LENGTH_LOOP, dma_params->dma_addr,
92 substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
93 DMA_MODE_WRITE : DMA_MODE_READ);
94 if (!ret)
95 imx_dma_enable(iprtd->dma);
96} 70}
97 71
98static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream) 72static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream,
73 struct snd_pcm_hw_params *params)
99{ 74{
100 struct snd_soc_pcm_runtime *rtd = substream->private_data; 75 struct snd_soc_pcm_runtime *rtd = substream->private_data;
101 struct imx_pcm_dma_params *dma_params; 76 struct imx_pcm_dma_params *dma_params;
102 struct snd_pcm_runtime *runtime = substream->runtime; 77 struct snd_pcm_runtime *runtime = substream->runtime;
103 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 78 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
79 struct dma_slave_config slave_config;
80 dma_cap_mask_t mask;
81 enum dma_slave_buswidth buswidth;
104 int ret; 82 int ret;
105 83
106 dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); 84 dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
107 85
108 iprtd->dma = imx_dma_request_by_prio(DRV_NAME, DMA_PRIO_HIGH); 86 iprtd->dma_data.peripheral_type = IMX_DMATYPE_SSI;
109 if (iprtd->dma < 0) { 87 iprtd->dma_data.priority = DMA_PRIO_HIGH;
110 pr_err("Failed to claim the audio DMA\n"); 88 iprtd->dma_data.dma_request = dma_params->dma;
111 return -ENODEV;
112 }
113 89
114 ret = imx_dma_setup_handlers(iprtd->dma, 90 /* Try to grab a DMA channel */
115 imx_ssi_dma_callback, 91 dma_cap_zero(mask);
116 snd_imx_dma_err_callback, substream); 92 dma_cap_set(DMA_SLAVE, mask);
117 if (ret) 93 iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
118 goto out; 94 if (!iprtd->dma_chan)
95 return -EINVAL;
119 96
120 ret = imx_dma_setup_progression_handler(iprtd->dma, 97 switch (params_format(params)) {
121 imx_ssi_dma_progression); 98 case SNDRV_PCM_FORMAT_S16_LE:
122 if (ret) { 99 buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
123 pr_err("Failed to setup the DMA handler\n"); 100 break;
124 goto out; 101 case SNDRV_PCM_FORMAT_S20_3LE:
102 case SNDRV_PCM_FORMAT_S24_LE:
103 buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES;
104 break;
105 default:
106 return 0;
125 } 107 }
126 108
127 ret = imx_dma_config_channel(iprtd->dma, 109 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
128 IMX_DMA_MEMSIZE_16 | IMX_DMA_TYPE_FIFO, 110 slave_config.direction = DMA_TO_DEVICE;
129 IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 111 slave_config.dst_addr = dma_params->dma_addr;
130 dma_params->dma, 1); 112 slave_config.dst_addr_width = buswidth;
131 if (ret < 0) { 113 slave_config.dst_maxburst = dma_params->burstsize;
132 pr_err("Cannot configure DMA channel: %d\n", ret); 114 } else {
133 goto out; 115 slave_config.direction = DMA_FROM_DEVICE;
116 slave_config.src_addr = dma_params->dma_addr;
117 slave_config.src_addr_width = buswidth;
118 slave_config.src_maxburst = dma_params->burstsize;
134 } 119 }
135 120
136 imx_dma_config_burstlen(iprtd->dma, dma_params->burstsize * 2); 121 ret = dmaengine_slave_config(iprtd->dma_chan, &slave_config);
122 if (ret)
123 return ret;
137 124
138 return 0; 125 return 0;
139out:
140 imx_dma_free(iprtd->dma);
141 return ret;
142} 126}
143 127
144static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream, 128static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
145 struct snd_pcm_hw_params *params) 129 struct snd_pcm_hw_params *params)
146{ 130{
131 struct snd_soc_pcm_runtime *rtd = substream->private_data;
147 struct snd_pcm_runtime *runtime = substream->runtime; 132 struct snd_pcm_runtime *runtime = substream->runtime;
148 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 133 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
149 int i;
150 unsigned long dma_addr; 134 unsigned long dma_addr;
135 struct dma_chan *chan;
136 struct imx_pcm_dma_params *dma_params;
137 int ret;
151 138
152 imx_ssi_dma_alloc(substream); 139 dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
140 ret = imx_ssi_dma_alloc(substream, params);
141 if (ret)
142 return ret;
143 chan = iprtd->dma_chan;
153 144
154 iprtd->size = params_buffer_bytes(params); 145 iprtd->size = params_buffer_bytes(params);
155 iprtd->periods = params_periods(params); 146 iprtd->periods = params_periods(params);
156 iprtd->period = params_period_bytes(params); 147 iprtd->period_bytes = params_period_bytes(params);
157 iprtd->offset = 0; 148 iprtd->offset = 0;
158 iprtd->period_time = HZ / (params_rate(params) / 149 iprtd->period_time = HZ / (params_rate(params) /
159 params_period_size(params)); 150 params_period_size(params));
160 151
161 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 152 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer);
162 153
163 if (iprtd->sg_count != iprtd->periods) {
164 kfree(iprtd->sg_list);
165
166 iprtd->sg_list = kcalloc(iprtd->periods + 1,
167 sizeof(struct scatterlist), GFP_KERNEL);
168 if (!iprtd->sg_list)
169 return -ENOMEM;
170 iprtd->sg_count = iprtd->periods + 1;
171 }
172
173 sg_init_table(iprtd->sg_list, iprtd->sg_count);
174 dma_addr = runtime->dma_addr; 154 dma_addr = runtime->dma_addr;
175 155
176 for (i = 0; i < iprtd->periods; i++) { 156 iprtd->buf = (unsigned int *)substream->dma_buffer.area;
177 iprtd->sg_list[i].page_link = 0; 157
178 iprtd->sg_list[i].offset = 0; 158 iprtd->desc = chan->device->device_prep_dma_cyclic(chan, dma_addr,
179 iprtd->sg_list[i].dma_address = dma_addr; 159 iprtd->period_bytes * iprtd->periods,
180 iprtd->sg_list[i].length = iprtd->period; 160 iprtd->period_bytes,
181 dma_addr += iprtd->period; 161 substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
162 DMA_TO_DEVICE : DMA_FROM_DEVICE);
163 if (!iprtd->desc) {
164 dev_err(&chan->dev->device, "cannot prepare slave dma\n");
165 return -EINVAL;
182 } 166 }
183 167
184 /* close the loop */ 168 iprtd->desc->callback = audio_dma_irq;
185 iprtd->sg_list[iprtd->sg_count - 1].offset = 0; 169 iprtd->desc->callback_param = substream;
186 iprtd->sg_list[iprtd->sg_count - 1].length = 0; 170
187 iprtd->sg_list[iprtd->sg_count - 1].page_link =
188 ((unsigned long) iprtd->sg_list | 0x01) & ~0x02;
189 return 0; 171 return 0;
190} 172}
191 173
@@ -194,41 +176,21 @@ static int snd_imx_pcm_hw_free(struct snd_pcm_substream *substream)
194 struct snd_pcm_runtime *runtime = substream->runtime; 176 struct snd_pcm_runtime *runtime = substream->runtime;
195 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 177 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
196 178
197 if (iprtd->dma >= 0) { 179 if (iprtd->dma_chan) {
198 imx_dma_free(iprtd->dma); 180 dma_release_channel(iprtd->dma_chan);
199 iprtd->dma = -EINVAL; 181 iprtd->dma_chan = NULL;
200 } 182 }
201 183
202 kfree(iprtd->sg_list);
203 iprtd->sg_list = NULL;
204
205 return 0; 184 return 0;
206} 185}
207 186
208static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream) 187static int snd_imx_pcm_prepare(struct snd_pcm_substream *substream)
209{ 188{
210 struct snd_pcm_runtime *runtime = substream->runtime;
211 struct snd_soc_pcm_runtime *rtd = substream->private_data; 189 struct snd_soc_pcm_runtime *rtd = substream->private_data;
212 struct imx_pcm_dma_params *dma_params; 190 struct imx_pcm_dma_params *dma_params;
213 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
214 int err;
215 191
216 dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); 192 dma_params = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
217 193
218 iprtd->substream = substream;
219 iprtd->buf = (unsigned int *)substream->dma_buffer.area;
220 iprtd->period_cnt = 0;
221
222 pr_debug("%s: buf: %p period: %d periods: %d\n",
223 __func__, iprtd->buf, iprtd->period, iprtd->periods);
224
225 err = imx_dma_setup_sg(iprtd->dma, iprtd->sg_list, iprtd->sg_count,
226 IMX_DMA_LENGTH_LOOP, dma_params->dma_addr,
227 substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
228 DMA_MODE_WRITE : DMA_MODE_READ);
229 if (err)
230 return err;
231
232 return 0; 194 return 0;
233} 195}
234 196
@@ -241,14 +203,14 @@ static int snd_imx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
241 case SNDRV_PCM_TRIGGER_START: 203 case SNDRV_PCM_TRIGGER_START:
242 case SNDRV_PCM_TRIGGER_RESUME: 204 case SNDRV_PCM_TRIGGER_RESUME:
243 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 205 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
244 imx_dma_enable(iprtd->dma); 206 dmaengine_submit(iprtd->desc);
245 207
246 break; 208 break;
247 209
248 case SNDRV_PCM_TRIGGER_STOP: 210 case SNDRV_PCM_TRIGGER_STOP:
249 case SNDRV_PCM_TRIGGER_SUSPEND: 211 case SNDRV_PCM_TRIGGER_SUSPEND:
250 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 212 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
251 imx_dma_disable(iprtd->dma); 213 dmaengine_terminate_all(iprtd->dma_chan);
252 214
253 break; 215 break;
254 default: 216 default:
@@ -263,6 +225,9 @@ static snd_pcm_uframes_t snd_imx_pcm_pointer(struct snd_pcm_substream *substream
263 struct snd_pcm_runtime *runtime = substream->runtime; 225 struct snd_pcm_runtime *runtime = substream->runtime;
264 struct imx_pcm_runtime_data *iprtd = runtime->private_data; 226 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
265 227
228 pr_debug("%s: %ld %ld\n", __func__, iprtd->offset,
229 bytes_to_frames(substream->runtime, iprtd->offset));
230
266 return bytes_to_frames(substream->runtime, iprtd->offset); 231 return bytes_to_frames(substream->runtime, iprtd->offset);
267} 232}
268 233
@@ -279,7 +244,7 @@ static struct snd_pcm_hardware snd_imx_hardware = {
279 .channels_max = 2, 244 .channels_max = 2,
280 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, 245 .buffer_bytes_max = IMX_SSI_DMABUF_SIZE,
281 .period_bytes_min = 128, 246 .period_bytes_min = 128,
282 .period_bytes_max = 16 * 1024, 247 .period_bytes_max = 65535, /* Limited by SDMA engine */
283 .periods_min = 2, 248 .periods_min = 2,
284 .periods_max = 255, 249 .periods_max = 255,
285 .fifo_size = 0, 250 .fifo_size = 0,
@@ -304,11 +269,23 @@ static int snd_imx_open(struct snd_pcm_substream *substream)
304 } 269 }
305 270
306 snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware); 271 snd_soc_set_runtime_hwparams(substream, &snd_imx_hardware);
272
273 return 0;
274}
275
276static int snd_imx_close(struct snd_pcm_substream *substream)
277{
278 struct snd_pcm_runtime *runtime = substream->runtime;
279 struct imx_pcm_runtime_data *iprtd = runtime->private_data;
280
281 kfree(iprtd);
282
307 return 0; 283 return 0;
308} 284}
309 285
310static struct snd_pcm_ops imx_pcm_ops = { 286static struct snd_pcm_ops imx_pcm_ops = {
311 .open = snd_imx_open, 287 .open = snd_imx_open,
288 .close = snd_imx_close,
312 .ioctl = snd_pcm_lib_ioctl, 289 .ioctl = snd_pcm_lib_ioctl,
313 .hw_params = snd_imx_pcm_hw_params, 290 .hw_params = snd_imx_pcm_hw_params,
314 .hw_free = snd_imx_pcm_hw_free, 291 .hw_free = snd_imx_pcm_hw_free,
@@ -340,7 +317,6 @@ static struct platform_driver imx_pcm_driver = {
340 .name = "imx-pcm-audio", 317 .name = "imx-pcm-audio",
341 .owner = THIS_MODULE, 318 .owner = THIS_MODULE,
342 }, 319 },
343
344 .probe = imx_soc_platform_probe, 320 .probe = imx_soc_platform_probe,
345 .remove = __devexit_p(imx_soc_platform_remove), 321 .remove = __devexit_p(imx_soc_platform_remove),
346}; 322};
@@ -356,4 +332,3 @@ static void __exit snd_imx_pcm_exit(void)
356 platform_driver_unregister(&imx_pcm_driver); 332 platform_driver_unregister(&imx_pcm_driver);
357} 333}
358module_exit(snd_imx_pcm_exit); 334module_exit(snd_imx_pcm_exit);
359
diff --git a/sound/soc/imx/imx-ssi.c b/sound/soc/imx/imx-ssi.c
index d4bd345b0a8d..390b6ffc2658 100644
--- a/sound/soc/imx/imx-ssi.c
+++ b/sound/soc/imx/imx-ssi.c
@@ -439,7 +439,22 @@ void imx_pcm_free(struct snd_pcm *pcm)
439} 439}
440EXPORT_SYMBOL_GPL(imx_pcm_free); 440EXPORT_SYMBOL_GPL(imx_pcm_free);
441 441
442static int imx_ssi_dai_probe(struct snd_soc_dai *dai)
443{
444 struct imx_ssi *ssi = dev_get_drvdata(dai->dev);
445 uint32_t val;
446
447 snd_soc_dai_set_drvdata(dai, ssi);
448
449 val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) |
450 SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize);
451 writel(val, ssi->base + SSI_SFCSR);
452
453 return 0;
454}
455
442static struct snd_soc_dai_driver imx_ssi_dai = { 456static struct snd_soc_dai_driver imx_ssi_dai = {
457 .probe = imx_ssi_dai_probe,
443 .playback = { 458 .playback = {
444 .channels_min = 2, 459 .channels_min = 2,
445 .channels_max = 2, 460 .channels_max = 2,
@@ -455,20 +470,6 @@ static struct snd_soc_dai_driver imx_ssi_dai = {
455 .ops = &imx_ssi_pcm_dai_ops, 470 .ops = &imx_ssi_pcm_dai_ops,
456}; 471};
457 472
458static int imx_ssi_dai_probe(struct snd_soc_dai *dai)
459{
460 struct imx_ssi *ssi = dev_get_drvdata(dai->dev);
461 uint32_t val;
462
463 snd_soc_dai_set_drvdata(dai, ssi);
464
465 val = SSI_SFCSR_TFWM0(ssi->dma_params_tx.burstsize) |
466 SSI_SFCSR_RFWM0(ssi->dma_params_rx.burstsize);
467 writel(val, ssi->base + SSI_SFCSR);
468
469 return 0;
470}
471
472static struct snd_soc_dai_driver imx_ac97_dai = { 473static struct snd_soc_dai_driver imx_ac97_dai = {
473 .probe = imx_ssi_dai_probe, 474 .probe = imx_ssi_dai_probe,
474 .ac97_control = 1, 475 .ac97_control = 1,
@@ -677,9 +678,25 @@ static int imx_ssi_probe(struct platform_device *pdev)
677 goto failed_register; 678 goto failed_register;
678 } 679 }
679 680
680 ssi->soc_platform_pdev = platform_device_alloc("imx-fiq-pcm-audio", pdev->id); 681 ssi->soc_platform_pdev_fiq = platform_device_alloc("imx-fiq-pcm-audio", pdev->id);
681 if (!ssi->soc_platform_pdev) 682 if (!ssi->soc_platform_pdev_fiq) {
683 ret = -ENOMEM;
684 goto failed_pdev_fiq_alloc;
685 }
686
687 platform_set_drvdata(ssi->soc_platform_pdev_fiq, ssi);
688 ret = platform_device_add(ssi->soc_platform_pdev_fiq);
689 if (ret) {
690 dev_err(&pdev->dev, "failed to add platform device\n");
691 goto failed_pdev_fiq_add;
692 }
693
694 ssi->soc_platform_pdev = platform_device_alloc("imx-pcm-audio", pdev->id);
695 if (!ssi->soc_platform_pdev) {
696 ret = -ENOMEM;
682 goto failed_pdev_alloc; 697 goto failed_pdev_alloc;
698 }
699
683 platform_set_drvdata(ssi->soc_platform_pdev, ssi); 700 platform_set_drvdata(ssi->soc_platform_pdev, ssi);
684 ret = platform_device_add(ssi->soc_platform_pdev); 701 ret = platform_device_add(ssi->soc_platform_pdev);
685 if (ret) { 702 if (ret) {
@@ -692,6 +709,10 @@ static int imx_ssi_probe(struct platform_device *pdev)
692failed_pdev_add: 709failed_pdev_add:
693 platform_device_put(ssi->soc_platform_pdev); 710 platform_device_put(ssi->soc_platform_pdev);
694failed_pdev_alloc: 711failed_pdev_alloc:
712 platform_device_del(ssi->soc_platform_pdev_fiq);
713failed_pdev_fiq_add:
714 platform_device_put(ssi->soc_platform_pdev_fiq);
715failed_pdev_fiq_alloc:
695 snd_soc_unregister_dai(&pdev->dev); 716 snd_soc_unregister_dai(&pdev->dev);
696failed_register: 717failed_register:
697failed_ac97: 718failed_ac97:
@@ -712,8 +733,8 @@ static int __devexit imx_ssi_remove(struct platform_device *pdev)
712 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 733 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
713 struct imx_ssi *ssi = platform_get_drvdata(pdev); 734 struct imx_ssi *ssi = platform_get_drvdata(pdev);
714 735
715 platform_device_del(ssi->soc_platform_pdev); 736 platform_device_unregister(ssi->soc_platform_pdev);
716 platform_device_put(ssi->soc_platform_pdev); 737 platform_device_unregister(ssi->soc_platform_pdev_fiq);
717 738
718 snd_soc_unregister_dai(&pdev->dev); 739 snd_soc_unregister_dai(&pdev->dev);
719 740
diff --git a/sound/soc/imx/imx-ssi.h b/sound/soc/imx/imx-ssi.h
index 53b780d9b2b0..a4406a134892 100644
--- a/sound/soc/imx/imx-ssi.h
+++ b/sound/soc/imx/imx-ssi.h
@@ -185,6 +185,9 @@
185 185
186#define DRV_NAME "imx-ssi" 186#define DRV_NAME "imx-ssi"
187 187
188#include <linux/dmaengine.h>
189#include <mach/dma.h>
190
188struct imx_pcm_dma_params { 191struct imx_pcm_dma_params {
189 int dma; 192 int dma;
190 unsigned long dma_addr; 193 unsigned long dma_addr;
@@ -212,6 +215,7 @@ struct imx_ssi {
212 int enabled; 215 int enabled;
213 216
214 struct platform_device *soc_platform_pdev; 217 struct platform_device *soc_platform_pdev;
218 struct platform_device *soc_platform_pdev_fiq;
215}; 219};
216 220
217struct snd_soc_platform *imx_ssi_fiq_init(struct platform_device *pdev, 221struct snd_soc_platform *imx_ssi_fiq_init(struct platform_device *pdev,
diff --git a/sound/soc/imx/phycore-ac97.c b/sound/soc/imx/phycore-ac97.c
index 6a65dd705519..9eabc28667e6 100644
--- a/sound/soc/imx/phycore-ac97.c
+++ b/sound/soc/imx/phycore-ac97.c
@@ -20,9 +20,6 @@
20#include <sound/soc-dapm.h> 20#include <sound/soc-dapm.h>
21#include <asm/mach-types.h> 21#include <asm/mach-types.h>
22 22
23#include "../codecs/wm9712.h"
24#include "imx-ssi.h"
25
26static struct snd_soc_card imx_phycore; 23static struct snd_soc_card imx_phycore;
27 24
28static struct snd_soc_ops imx_phycore_hifi_ops = { 25static struct snd_soc_ops imx_phycore_hifi_ops = {
@@ -41,11 +38,12 @@ static struct snd_soc_dai_link imx_phycore_dai_ac97[] = {
41}; 38};
42 39
43static struct snd_soc_card imx_phycore = { 40static struct snd_soc_card imx_phycore = {
44 .name = "PhyCORE-audio", 41 .name = "PhyCORE-ac97-audio",
45 .dai_link = imx_phycore_dai_ac97, 42 .dai_link = imx_phycore_dai_ac97,
46 .num_links = ARRAY_SIZE(imx_phycore_dai_ac97), 43 .num_links = ARRAY_SIZE(imx_phycore_dai_ac97),
47}; 44};
48 45
46static struct platform_device *imx_phycore_snd_ac97_device;
49static struct platform_device *imx_phycore_snd_device; 47static struct platform_device *imx_phycore_snd_device;
50 48
51static int __init imx_phycore_init(void) 49static int __init imx_phycore_init(void)
@@ -56,29 +54,42 @@ static int __init imx_phycore_init(void)
56 /* return happy. We might run on a totally different machine */ 54 /* return happy. We might run on a totally different machine */
57 return 0; 55 return 0;
58 56
59 imx_phycore_snd_device = platform_device_alloc("soc-audio", -1); 57 imx_phycore_snd_ac97_device = platform_device_alloc("soc-audio", -1);
60 if (!imx_phycore_snd_device) 58 if (!imx_phycore_snd_ac97_device)
61 return -ENOMEM; 59 return -ENOMEM;
62 60
63 platform_set_drvdata(imx_phycore_snd_device, &imx_phycore); 61 platform_set_drvdata(imx_phycore_snd_ac97_device, &imx_phycore);
64 ret = platform_device_add(imx_phycore_snd_device); 62 ret = platform_device_add(imx_phycore_snd_ac97_device);
63 if (ret)
64 goto fail1;
65 65
66 imx_phycore_snd_device = platform_device_alloc("wm9712-codec", -1); 66 imx_phycore_snd_device = platform_device_alloc("wm9712-codec", -1);
67 if (!imx_phycore_snd_device) 67 if (!imx_phycore_snd_device) {
68 return -ENOMEM; 68 ret = -ENOMEM;
69 goto fail2;
70 }
69 ret = platform_device_add(imx_phycore_snd_device); 71 ret = platform_device_add(imx_phycore_snd_device);
70 72
71 if (ret) { 73 if (ret) {
72 printk(KERN_ERR "ASoC: Platform device allocation failed\n"); 74 printk(KERN_ERR "ASoC: Platform device allocation failed\n");
73 platform_device_put(imx_phycore_snd_device); 75 goto fail3;
74 } 76 }
75 77
78 return 0;
79
80fail3:
81 platform_device_put(imx_phycore_snd_device);
82fail2:
83 platform_device_del(imx_phycore_snd_ac97_device);
84fail1:
85 platform_device_put(imx_phycore_snd_ac97_device);
76 return ret; 86 return ret;
77} 87}
78 88
79static void __exit imx_phycore_exit(void) 89static void __exit imx_phycore_exit(void)
80{ 90{
81 platform_device_unregister(imx_phycore_snd_device); 91 platform_device_unregister(imx_phycore_snd_device);
92 platform_device_unregister(imx_phycore_snd_ac97_device);
82} 93}
83 94
84late_initcall(imx_phycore_init); 95late_initcall(imx_phycore_init);
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index 293dc748797c..dac6732da969 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -49,7 +49,7 @@ static unsigned short nuc900_ac97_read(struct snd_ac97 *ac97,
49 mutex_lock(&ac97_mutex); 49 mutex_lock(&ac97_mutex);
50 50
51 val = nuc900_checkready(); 51 val = nuc900_checkready();
52 if (!!val) { 52 if (val) {
53 dev_err(nuc900_audio->dev, "AC97 codec is not ready\n"); 53 dev_err(nuc900_audio->dev, "AC97 codec is not ready\n");
54 goto out; 54 goto out;
55 } 55 }
@@ -102,7 +102,7 @@ static void nuc900_ac97_write(struct snd_ac97 *ac97, unsigned short reg,
102 mutex_lock(&ac97_mutex); 102 mutex_lock(&ac97_mutex);
103 103
104 tmp = nuc900_checkready(); 104 tmp = nuc900_checkready();
105 if (!!tmp) 105 if (tmp)
106 dev_err(nuc900_audio->dev, "AC97 codec is not ready\n"); 106 dev_err(nuc900_audio->dev, "AC97 codec is not ready\n");
107 107
108 /* clear the R_WB bit and write register index */ 108 /* clear the R_WB bit and write register index */
@@ -149,7 +149,7 @@ static void nuc900_ac97_warm_reset(struct snd_ac97 *ac97)
149 udelay(100); 149 udelay(100);
150 150
151 val = nuc900_checkready(); 151 val = nuc900_checkready();
152 if (!!val) 152 if (val)
153 dev_err(nuc900_audio->dev, "AC97 codec is not ready\n"); 153 dev_err(nuc900_audio->dev, "AC97 codec is not ready\n");
154 154
155 mutex_unlock(&ac97_mutex); 155 mutex_unlock(&ac97_mutex);
@@ -263,8 +263,7 @@ static int nuc900_ac97_trigger(struct snd_pcm_substream *substream,
263 return ret; 263 return ret;
264} 264}
265 265
266static int nuc900_ac97_probe(struct platform_device *pdev, 266static int nuc900_ac97_probe(struct snd_soc_dai *dai)
267 struct snd_soc_dai *dai)
268{ 267{
269 struct nuc900_audio *nuc900_audio = nuc900_ac97_data; 268 struct nuc900_audio *nuc900_audio = nuc900_ac97_data;
270 unsigned long val; 269 unsigned long val;
@@ -284,12 +283,12 @@ static int nuc900_ac97_probe(struct platform_device *pdev,
284 return 0; 283 return 0;
285} 284}
286 285
287static void nuc900_ac97_remove(struct platform_device *pdev, 286static int nuc900_ac97_remove(struct snd_soc_dai *dai)
288 struct snd_soc_dai *dai)
289{ 287{
290 struct nuc900_audio *nuc900_audio = nuc900_ac97_data; 288 struct nuc900_audio *nuc900_audio = nuc900_ac97_data;
291 289
292 clk_disable(nuc900_audio->clk); 290 clk_disable(nuc900_audio->clk);
291 return 0;
293} 292}
294 293
295static struct snd_soc_dai_ops nuc900_ac97_dai_ops = { 294static struct snd_soc_dai_ops nuc900_ac97_dai_ops = {
@@ -313,7 +312,7 @@ static struct snd_soc_dai_driver nuc900_ac97_dai = {
313 .channels_max = 2, 312 .channels_max = 2,
314 }, 313 },
315 .ops = &nuc900_ac97_dai_ops, 314 .ops = &nuc900_ac97_dai_ops,
316} 315};
317 316
318static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev) 317static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
319{ 318{
@@ -384,7 +383,6 @@ out0:
384 383
385static int __devexit nuc900_ac97_drvremove(struct platform_device *pdev) 384static int __devexit nuc900_ac97_drvremove(struct platform_device *pdev)
386{ 385{
387
388 snd_soc_unregister_dai(&pdev->dev); 386 snd_soc_unregister_dai(&pdev->dev);
389 387
390 clk_put(nuc900_ac97_data->clk); 388 clk_put(nuc900_ac97_data->clk);
@@ -392,6 +390,7 @@ static int __devexit nuc900_ac97_drvremove(struct platform_device *pdev)
392 release_mem_region(nuc900_ac97_data->res->start, 390 release_mem_region(nuc900_ac97_data->res->start,
393 resource_size(nuc900_ac97_data->res)); 391 resource_size(nuc900_ac97_data->res));
394 392
393 kfree(nuc900_ac97_data);
395 nuc900_ac97_data = NULL; 394 nuc900_ac97_data = NULL;
396 395
397 return 0; 396 return 0;
diff --git a/sound/soc/nuc900/nuc900-audio.h b/sound/soc/nuc900/nuc900-audio.h
index aeed8ead2b2b..59f7e8ed1a68 100644
--- a/sound/soc/nuc900/nuc900-audio.h
+++ b/sound/soc/nuc900/nuc900-audio.h
@@ -110,4 +110,6 @@ struct nuc900_audio {
110 110
111}; 111};
112 112
113extern struct nuc900_audio *nuc900_ac97_data;
114
113#endif /*end _NUC900_AUDIO_H */ 115#endif /*end _NUC900_AUDIO_H */
diff --git a/sound/soc/nuc900/nuc900-pcm.c b/sound/soc/nuc900/nuc900-pcm.c
index 195d1ac94771..8263f56dc665 100644
--- a/sound/soc/nuc900/nuc900-pcm.c
+++ b/sound/soc/nuc900/nuc900-pcm.c
@@ -50,12 +50,12 @@ static int nuc900_dma_hw_params(struct snd_pcm_substream *substream,
50 unsigned long flags; 50 unsigned long flags;
51 int ret = 0; 51 int ret = 0;
52 52
53 spin_lock_irqsave(&nuc900_audio->lock, flags);
54
55 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params)); 53 ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(params));
56 if (ret < 0) 54 if (ret < 0)
57 return ret; 55 return ret;
58 56
57 spin_lock_irqsave(&nuc900_audio->lock, flags);
58
59 nuc900_audio->substream = substream; 59 nuc900_audio->substream = substream;
60 nuc900_audio->dma_addr[substream->stream] = runtime->dma_addr; 60 nuc900_audio->dma_addr[substream->stream] = runtime->dma_addr;
61 nuc900_audio->buffersize[substream->stream] = 61 nuc900_audio->buffersize[substream->stream] =
@@ -169,6 +169,7 @@ static int nuc900_dma_prepare(struct snd_pcm_substream *substream)
169 struct snd_pcm_runtime *runtime = substream->runtime; 169 struct snd_pcm_runtime *runtime = substream->runtime;
170 struct nuc900_audio *nuc900_audio = runtime->private_data; 170 struct nuc900_audio *nuc900_audio = runtime->private_data;
171 unsigned long flags, val; 171 unsigned long flags, val;
172 int ret = 0;
172 173
173 spin_lock_irqsave(&nuc900_audio->lock, flags); 174 spin_lock_irqsave(&nuc900_audio->lock, flags);
174 175
@@ -197,10 +198,10 @@ static int nuc900_dma_prepare(struct snd_pcm_substream *substream)
197 AUDIO_WRITE(nuc900_audio->mmio + ACTL_RESET, val); 198 AUDIO_WRITE(nuc900_audio->mmio + ACTL_RESET, val);
198 break; 199 break;
199 default: 200 default:
200 return -EINVAL; 201 ret = -EINVAL;
201 } 202 }
202 spin_unlock_irqrestore(&nuc900_audio->lock, flags); 203 spin_unlock_irqrestore(&nuc900_audio->lock, flags);
203 return 0; 204 return ret;
204} 205}
205 206
206static int nuc900_dma_trigger(struct snd_pcm_substream *substream, int cmd) 207static int nuc900_dma_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -332,7 +333,7 @@ static struct snd_soc_platform_driver nuc900_soc_platform = {
332 .ops = &nuc900_dma_ops, 333 .ops = &nuc900_dma_ops,
333 .pcm_new = nuc900_dma_new, 334 .pcm_new = nuc900_dma_new,
334 .pcm_free = nuc900_dma_free_dma_buffers, 335 .pcm_free = nuc900_dma_free_dma_buffers,
335} 336};
336 337
337static int __devinit nuc900_soc_platform_probe(struct platform_device *pdev) 338static int __devinit nuc900_soc_platform_probe(struct platform_device *pdev)
338{ 339{
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index d542ea2ff6be..a088db6d5091 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -12,8 +12,8 @@ config SND_OMAP_SOC_MCPDM
12config SND_OMAP_SOC_N810 12config SND_OMAP_SOC_N810
13 tristate "SoC Audio support for Nokia N810" 13 tristate "SoC Audio support for Nokia N810"
14 depends on SND_OMAP_SOC && MACH_NOKIA_N810 && I2C 14 depends on SND_OMAP_SOC && MACH_NOKIA_N810 && I2C
15 depends on OMAP_MUX
15 select SND_OMAP_SOC_MCBSP 16 select SND_OMAP_SOC_MCBSP
16 select OMAP_MUX
17 select SND_SOC_TLV320AIC3X 17 select SND_SOC_TLV320AIC3X
18 help 18 help
19 Say Y if you want to add support for SoC audio on Nokia N810. 19 Say Y if you want to add support for SoC audio on Nokia N810.
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index d211c9fa5a91..7e84f24b9a88 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -644,15 +644,23 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
644 644
645 645
646 case OMAP_MCBSP_CLKR_SRC_CLKR: 646 case OMAP_MCBSP_CLKR_SRC_CLKR:
647 if (cpu_class_is_omap1())
648 break;
647 omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKR); 649 omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKR);
648 break; 650 break;
649 case OMAP_MCBSP_CLKR_SRC_CLKX: 651 case OMAP_MCBSP_CLKR_SRC_CLKX:
652 if (cpu_class_is_omap1())
653 break;
650 omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKX); 654 omap2_mcbsp1_mux_clkr_src(CLKR_SRC_CLKX);
651 break; 655 break;
652 case OMAP_MCBSP_FSR_SRC_FSR: 656 case OMAP_MCBSP_FSR_SRC_FSR:
657 if (cpu_class_is_omap1())
658 break;
653 omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSR); 659 omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSR);
654 break; 660 break;
655 case OMAP_MCBSP_FSR_SRC_FSX: 661 case OMAP_MCBSP_FSR_SRC_FSX:
662 if (cpu_class_is_omap1())
663 break;
656 omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSX); 664 omap2_mcbsp1_mux_fsr_src(FSR_SRC_FSX);
657 break; 665 break;
658 default: 666 default:
diff --git a/sound/soc/omap/omap3pandora.c b/sound/soc/omap/omap3pandora.c
index dbd9d96b5f92..4ee33ce2cb98 100644
--- a/sound/soc/omap/omap3pandora.c
+++ b/sound/soc/omap/omap3pandora.c
@@ -306,6 +306,7 @@ static int __init omap3pandora_soc_init(void)
306 pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n", 306 pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n",
307 dev_name(&omap3pandora_snd_device->dev), 307 dev_name(&omap3pandora_snd_device->dev),
308 PTR_ERR(omap3pandora_dac_reg)); 308 PTR_ERR(omap3pandora_dac_reg));
309 ret = PTR_ERR(omap3pandora_dac_reg);
309 goto fail3; 310 goto fail3;
310 } 311 }
311 312
diff --git a/sound/soc/omap/osk5912.c b/sound/soc/omap/osk5912.c
index f0e662556428..65ae00e976ef 100644
--- a/sound/soc/omap/osk5912.c
+++ b/sound/soc/omap/osk5912.c
@@ -177,7 +177,8 @@ static int __init osk_soc_init(void)
177 tlv320aic23_mclk = clk_get(dev, "mclk"); 177 tlv320aic23_mclk = clk_get(dev, "mclk");
178 if (IS_ERR(tlv320aic23_mclk)) { 178 if (IS_ERR(tlv320aic23_mclk)) {
179 printk(KERN_ERR "Could not get mclk clock\n"); 179 printk(KERN_ERR "Could not get mclk clock\n");
180 return -ENODEV; 180 err = PTR_ERR(tlv320aic23_mclk);
181 goto err2;
181 } 182 }
182 183
183 /* 184 /*
@@ -188,7 +189,7 @@ static int __init osk_soc_init(void)
188 if (clk_set_rate(tlv320aic23_mclk, CODEC_CLOCK)) { 189 if (clk_set_rate(tlv320aic23_mclk, CODEC_CLOCK)) {
189 printk(KERN_ERR "Cannot set MCLK for AIC23 CODEC\n"); 190 printk(KERN_ERR "Cannot set MCLK for AIC23 CODEC\n");
190 err = -ECANCELED; 191 err = -ECANCELED;
191 goto err1; 192 goto err3;
192 } 193 }
193 } 194 }
194 195
@@ -196,9 +197,12 @@ static int __init osk_soc_init(void)
196 (uint) clk_get_rate(tlv320aic23_mclk), CODEC_CLOCK); 197 (uint) clk_get_rate(tlv320aic23_mclk), CODEC_CLOCK);
197 198
198 return 0; 199 return 0;
199err1: 200
201err3:
200 clk_put(tlv320aic23_mclk); 202 clk_put(tlv320aic23_mclk);
203err2:
201 platform_device_del(osk_snd_device); 204 platform_device_del(osk_snd_device);
205err1:
202 platform_device_put(osk_snd_device); 206 platform_device_put(osk_snd_device);
203 207
204 return err; 208 return err;
@@ -207,6 +211,7 @@ err1:
207 211
208static void __exit osk_soc_exit(void) 212static void __exit osk_soc_exit(void)
209{ 213{
214 clk_put(tlv320aic23_mclk);
210 platform_device_unregister(osk_snd_device); 215 platform_device_unregister(osk_snd_device);
211} 216}
212 217
diff --git a/sound/soc/pxa/Kconfig b/sound/soc/pxa/Kconfig
index 37f191bbfdd9..580f48571303 100644
--- a/sound/soc/pxa/Kconfig
+++ b/sound/soc/pxa/Kconfig
@@ -1,6 +1,7 @@
1config SND_PXA2XX_SOC 1config SND_PXA2XX_SOC
2 tristate "SoC Audio for the Intel PXA2xx chip" 2 tristate "SoC Audio for the Intel PXA2xx chip"
3 depends on ARCH_PXA 3 depends on ARCH_PXA
4 select SND_ARM
4 select SND_PXA2XX_LIB 5 select SND_PXA2XX_LIB
5 help 6 help
6 Say Y or M if you want to add support for codecs attached to 7 Say Y or M if you want to add support for codecs attached to
diff --git a/sound/soc/pxa/corgi.c b/sound/soc/pxa/corgi.c
index 97e9423615c9..f451acd4935b 100644
--- a/sound/soc/pxa/corgi.c
+++ b/sound/soc/pxa/corgi.c
@@ -100,8 +100,13 @@ static int corgi_startup(struct snd_pcm_substream *substream)
100 struct snd_soc_pcm_runtime *rtd = substream->private_data; 100 struct snd_soc_pcm_runtime *rtd = substream->private_data;
101 struct snd_soc_codec *codec = rtd->codec; 101 struct snd_soc_codec *codec = rtd->codec;
102 102
103 mutex_lock(&codec->mutex);
104
103 /* check the jack status at stream startup */ 105 /* check the jack status at stream startup */
104 corgi_ext_control(codec); 106 corgi_ext_control(codec);
107
108 mutex_unlock(&codec->mutex);
109
105 return 0; 110 return 0;
106} 111}
107 112
diff --git a/sound/soc/pxa/magician.c b/sound/soc/pxa/magician.c
index b8207ced4072..5ef0526924b9 100644
--- a/sound/soc/pxa/magician.c
+++ b/sound/soc/pxa/magician.c
@@ -72,9 +72,13 @@ static int magician_startup(struct snd_pcm_substream *substream)
72 struct snd_soc_pcm_runtime *rtd = substream->private_data; 72 struct snd_soc_pcm_runtime *rtd = substream->private_data;
73 struct snd_soc_codec *codec = rtd->codec; 73 struct snd_soc_codec *codec = rtd->codec;
74 74
75 mutex_lock(&codec->mutex);
76
75 /* check the jack status at stream startup */ 77 /* check the jack status at stream startup */
76 magician_ext_control(codec); 78 magician_ext_control(codec);
77 79
80 mutex_unlock(&codec->mutex);
81
78 return 0; 82 return 0;
79} 83}
80 84
diff --git a/sound/soc/pxa/poodle.c b/sound/soc/pxa/poodle.c
index af84ee9c5e11..84edd0385a21 100644
--- a/sound/soc/pxa/poodle.c
+++ b/sound/soc/pxa/poodle.c
@@ -77,8 +77,13 @@ static int poodle_startup(struct snd_pcm_substream *substream)
77 struct snd_soc_pcm_runtime *rtd = substream->private_data; 77 struct snd_soc_pcm_runtime *rtd = substream->private_data;
78 struct snd_soc_codec *codec = rtd->codec; 78 struct snd_soc_codec *codec = rtd->codec;
79 79
80 mutex_lock(&codec->mutex);
81
80 /* check the jack status at stream startup */ 82 /* check the jack status at stream startup */
81 poodle_ext_control(codec); 83 poodle_ext_control(codec);
84
85 mutex_unlock(&codec->mutex);
86
82 return 0; 87 return 0;
83} 88}
84 89
diff --git a/sound/soc/pxa/spitz.c b/sound/soc/pxa/spitz.c
index f470f360f4dd..0b30d7de24ec 100644
--- a/sound/soc/pxa/spitz.c
+++ b/sound/soc/pxa/spitz.c
@@ -108,8 +108,13 @@ static int spitz_startup(struct snd_pcm_substream *substream)
108 struct snd_soc_pcm_runtime *rtd = substream->private_data; 108 struct snd_soc_pcm_runtime *rtd = substream->private_data;
109 struct snd_soc_codec *codec = rtd->codec; 109 struct snd_soc_codec *codec = rtd->codec;
110 110
111 mutex_lock(&codec->mutex);
112
111 /* check the jack status at stream startup */ 113 /* check the jack status at stream startup */
112 spitz_ext_control(codec); 114 spitz_ext_control(codec);
115
116 mutex_unlock(&codec->mutex);
117
113 return 0; 118 return 0;
114} 119}
115 120
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index 73d0edd8ded9..7b983f935454 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -81,8 +81,13 @@ static int tosa_startup(struct snd_pcm_substream *substream)
81 struct snd_soc_pcm_runtime *rtd = substream->private_data; 81 struct snd_soc_pcm_runtime *rtd = substream->private_data;
82 struct snd_soc_codec *codec = rtd->codec; 82 struct snd_soc_codec *codec = rtd->codec;
83 83
84 mutex_lock(&codec->mutex);
85
84 /* check the jack status at stream startup */ 86 /* check the jack status at stream startup */
85 tosa_ext_control(codec); 87 tosa_ext_control(codec);
88
89 mutex_unlock(&codec->mutex);
90
86 return 0; 91 return 0;
87} 92}
88 93
diff --git a/sound/soc/s3c24xx/Kconfig b/sound/soc/s3c24xx/Kconfig
index 8a6b53ccd203..d85bf8a0abb2 100644
--- a/sound/soc/s3c24xx/Kconfig
+++ b/sound/soc/s3c24xx/Kconfig
@@ -2,6 +2,7 @@ config SND_S3C24XX_SOC
2 tristate "SoC Audio for the Samsung S3CXXXX chips" 2 tristate "SoC Audio for the Samsung S3CXXXX chips"
3 depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210 3 depends on ARCH_S3C2410 || ARCH_S3C64XX || ARCH_S5PC100 || ARCH_S5PV210
4 select S3C64XX_DMA if ARCH_S3C64XX 4 select S3C64XX_DMA if ARCH_S3C64XX
5 select S3C2410_DMA if ARCH_S3C2410
5 help 6 help
6 Say Y or M if you want to add support for codecs attached to 7 Say Y or M if you want to add support for codecs attached to
7 the S3C24XX AC97 or I2S interfaces. You will also need to 8 the S3C24XX AC97 or I2S interfaces. You will also need to
diff --git a/sound/soc/s3c24xx/rx1950_uda1380.c b/sound/soc/s3c24xx/rx1950_uda1380.c
index ffd5cf2fb0a9..468cc11fdf47 100644
--- a/sound/soc/s3c24xx/rx1950_uda1380.c
+++ b/sound/soc/s3c24xx/rx1950_uda1380.c
@@ -50,7 +50,6 @@ static unsigned int rates[] = {
50 16000, 50 16000,
51 44100, 51 44100,
52 48000, 52 48000,
53 88200,
54}; 53};
55 54
56static struct snd_pcm_hw_constraint_list hw_rates = { 55static struct snd_pcm_hw_constraint_list hw_rates = {
@@ -130,7 +129,6 @@ static const struct snd_soc_dapm_route audio_map[] = {
130}; 129};
131 130
132static struct platform_device *s3c24xx_snd_device; 131static struct platform_device *s3c24xx_snd_device;
133static struct clk *xtal;
134 132
135static int rx1950_startup(struct snd_pcm_substream *substream) 133static int rx1950_startup(struct snd_pcm_substream *substream)
136{ 134{
@@ -179,10 +177,8 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream,
179 case 44100: 177 case 44100:
180 case 88200: 178 case 88200:
181 clk_source = S3C24XX_CLKSRC_MPLL; 179 clk_source = S3C24XX_CLKSRC_MPLL;
182 fs_mode = S3C2410_IISMOD_256FS; 180 fs_mode = S3C2410_IISMOD_384FS;
183 div = clk_get_rate(xtal) / (256 * rate); 181 div = 1;
184 if (clk_get_rate(xtal) % (256 * rate) > (128 * rate))
185 div++;
186 break; 182 break;
187 default: 183 default:
188 printk(KERN_ERR "%s: rate %d is not supported\n", 184 printk(KERN_ERR "%s: rate %d is not supported\n",
@@ -210,7 +206,7 @@ static int rx1950_hw_params(struct snd_pcm_substream *substream,
210 206
211 /* set MCLK division for sample rate */ 207 /* set MCLK division for sample rate */
212 ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK, 208 ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
213 S3C2410_IISMOD_384FS); 209 fs_mode);
214 if (ret < 0) 210 if (ret < 0)
215 return ret; 211 return ret;
216 212
@@ -295,17 +291,8 @@ static int __init rx1950_init(void)
295 goto err_plat_add; 291 goto err_plat_add;
296 } 292 }
297 293
298 xtal = clk_get(&s3c24xx_snd_device->dev, "xtal");
299
300 if (IS_ERR(xtal)) {
301 ret = PTR_ERR(xtal);
302 platform_device_unregister(s3c24xx_snd_device);
303 goto err_clk;
304 }
305
306 return 0; 294 return 0;
307 295
308err_clk:
309err_plat_add: 296err_plat_add:
310err_plat_alloc: 297err_plat_alloc:
311err_gpio_conf: 298err_gpio_conf:
@@ -320,7 +307,6 @@ static void __exit rx1950_exit(void)
320 platform_device_unregister(s3c24xx_snd_device); 307 platform_device_unregister(s3c24xx_snd_device);
321 snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios), 308 snd_soc_jack_free_gpios(&hp_jack, ARRAY_SIZE(hp_jack_gpios),
322 hp_jack_gpios); 309 hp_jack_gpios);
323 clk_put(xtal);
324 gpio_free(S3C2410_GPA(1)); 310 gpio_free(S3C2410_GPA(1));
325} 311}
326 312
diff --git a/sound/soc/s3c24xx/smdk_spdif.c b/sound/soc/s3c24xx/smdk_spdif.c
index f31d22ad7c88..c8bd90488a87 100644
--- a/sound/soc/s3c24xx/smdk_spdif.c
+++ b/sound/soc/s3c24xx/smdk_spdif.c
@@ -38,7 +38,7 @@ static int set_audio_clock_heirachy(struct platform_device *pdev)
38 } 38 }
39 39
40 mout_epll = clk_get(NULL, "mout_epll"); 40 mout_epll = clk_get(NULL, "mout_epll");
41 if (IS_ERR(fout_epll)) { 41 if (IS_ERR(mout_epll)) {
42 printk(KERN_WARNING "%s: Cannot find mout_epll.\n", 42 printk(KERN_WARNING "%s: Cannot find mout_epll.\n",
43 __func__); 43 __func__);
44 ret = -EINVAL; 44 ret = -EINVAL;
@@ -54,7 +54,7 @@ static int set_audio_clock_heirachy(struct platform_device *pdev)
54 } 54 }
55 55
56 sclk_spdif = clk_get(NULL, "sclk_spdif"); 56 sclk_spdif = clk_get(NULL, "sclk_spdif");
57 if (IS_ERR(fout_epll)) { 57 if (IS_ERR(sclk_spdif)) {
58 printk(KERN_WARNING "%s: Cannot find sclk_spdif.\n", 58 printk(KERN_WARNING "%s: Cannot find sclk_spdif.\n",
59 __func__); 59 __func__);
60 ret = -EINVAL; 60 ret = -EINVAL;
diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c
index 8778faa174a6..3052f64b2403 100644
--- a/sound/soc/s6000/s6000-i2s.c
+++ b/sound/soc/s6000/s6000-i2s.c
@@ -434,7 +434,7 @@ static struct snd_soc_dai_driver s6000_i2s_dai = {
434 .rate_max = 1562500, 434 .rate_max = 1562500,
435 }, 435 },
436 .ops = &s6000_i2s_dai_ops, 436 .ops = &s6000_i2s_dai_ops,
437} 437};
438 438
439static int __devinit s6000_i2s_probe(struct platform_device *pdev) 439static int __devinit s6000_i2s_probe(struct platform_device *pdev)
440{ 440{
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
index 271fd222bf19..ab3ccaec72d2 100644
--- a/sound/soc/s6000/s6000-pcm.c
+++ b/sound/soc/s6000/s6000-pcm.c
@@ -473,7 +473,7 @@ static int s6000_pcm_new(struct snd_card *card,
473 } 473 }
474 474
475 res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED, 475 res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED,
476 s6000_soc_platform.name, pcm); 476 "s6000-audio", pcm);
477 if (res) { 477 if (res) {
478 printk(KERN_ERR "s6000-pcm couldn't get IRQ\n"); 478 printk(KERN_ERR "s6000-pcm couldn't get IRQ\n");
479 return res; 479 return res;
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
index 96c05e137538..c1244c5bc730 100644
--- a/sound/soc/s6000/s6105-ipcam.c
+++ b/sound/soc/s6000/s6105-ipcam.c
@@ -167,7 +167,7 @@ static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
167 167
168 snd_soc_dapm_sync(codec); 168 snd_soc_dapm_sync(codec);
169 169
170 snd_ctl_add(codec->snd_card, snd_ctl_new1(&audio_out_mux, codec)); 170 snd_ctl_add(codec->card->snd_card, snd_ctl_new1(&audio_out_mux, codec));
171 171
172 return 0; 172 return 0;
173} 173}
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 507e709f2807..4c2404b1b862 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -132,6 +132,8 @@ struct fsi_priv {
132 struct fsi_stream playback; 132 struct fsi_stream playback;
133 struct fsi_stream capture; 133 struct fsi_stream capture;
134 134
135 long rate;
136
135 u32 mst_ctrl; 137 u32 mst_ctrl;
136}; 138};
137 139
@@ -854,10 +856,17 @@ static void fsi_dai_shutdown(struct snd_pcm_substream *substream,
854{ 856{
855 struct fsi_priv *fsi = fsi_get_priv(substream); 857 struct fsi_priv *fsi = fsi_get_priv(substream);
856 int is_play = fsi_is_play(substream); 858 int is_play = fsi_is_play(substream);
859 struct fsi_master *master = fsi_get_master(fsi);
860 int (*set_rate)(struct device *dev, int is_porta, int rate, int enable);
857 861
858 fsi_irq_disable(fsi, is_play); 862 fsi_irq_disable(fsi, is_play);
859 fsi_clk_ctrl(fsi, 0); 863 fsi_clk_ctrl(fsi, 0);
860 864
865 set_rate = master->info->set_rate;
866 if (set_rate && fsi->rate)
867 set_rate(dai->dev, fsi_is_port_a(fsi), fsi->rate, 0);
868 fsi->rate = 0;
869
861 pm_runtime_put_sync(dai->dev); 870 pm_runtime_put_sync(dai->dev);
862} 871}
863 872
@@ -891,20 +900,20 @@ static int fsi_dai_hw_params(struct snd_pcm_substream *substream,
891{ 900{
892 struct fsi_priv *fsi = fsi_get_priv(substream); 901 struct fsi_priv *fsi = fsi_get_priv(substream);
893 struct fsi_master *master = fsi_get_master(fsi); 902 struct fsi_master *master = fsi_get_master(fsi);
894 int (*set_rate)(int is_porta, int rate) = master->info->set_rate; 903 int (*set_rate)(struct device *dev, int is_porta, int rate, int enable);
895 int fsi_ver = master->core->ver; 904 int fsi_ver = master->core->ver;
896 int is_play = fsi_is_play(substream); 905 long rate = params_rate(params);
897 int ret; 906 int ret;
898 907
899 /* if slave mode, set_rate is not needed */ 908 set_rate = master->info->set_rate;
900 if (!fsi_is_master_mode(fsi, is_play)) 909 if (!set_rate)
901 return 0; 910 return 0;
902 911
903 /* it is error if no set_rate */ 912 ret = set_rate(dai->dev, fsi_is_port_a(fsi), rate, 1);
904 if (!set_rate) 913 if (ret < 0) /* error */
905 return -EIO; 914 return ret;
906 915
907 ret = set_rate(fsi_is_port_a(fsi), params_rate(params)); 916 fsi->rate = rate;
908 if (ret > 0) { 917 if (ret > 0) {
909 u32 data = 0; 918 u32 data = 0;
910 919
diff --git a/sound/soc/sh/ssi.c b/sound/soc/sh/ssi.c
index 40bbdf1591dc..05192d97b377 100644
--- a/sound/soc/sh/ssi.c
+++ b/sound/soc/sh/ssi.c
@@ -387,7 +387,7 @@ static int __devinit sh4_soc_dai_probe(struct platform_device *pdev)
387 387
388static int __devexit sh4_soc_dai_remove(struct platform_device *pdev) 388static int __devexit sh4_soc_dai_remove(struct platform_device *pdev)
389{ 389{
390 snd_soc_unregister_dai(&pdev->dev, ARRAY_SIZE(sh4_ssi_dai)); 390 snd_soc_unregister_dais(&pdev->dev, ARRAY_SIZE(sh4_ssi_dai));
391 return 0; 391 return 0;
392} 392}
393 393
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 614a8b30d87b..441285ade024 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3043,8 +3043,10 @@ int snd_soc_register_dais(struct device *dev,
3043 for (i = 0; i < count; i++) { 3043 for (i = 0; i < count; i++) {
3044 3044
3045 dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL); 3045 dai = kzalloc(sizeof(struct snd_soc_dai), GFP_KERNEL);
3046 if (dai == NULL) 3046 if (dai == NULL) {
3047 return -ENOMEM; 3047 ret = -ENOMEM;
3048 goto err;
3049 }
3048 3050
3049 /* create DAI component name */ 3051 /* create DAI component name */
3050 dai->name = fmt_multiple_name(dev, &dai_drv[i]); 3052 dai->name = fmt_multiple_name(dev, &dai_drv[i]);
@@ -3263,9 +3265,6 @@ int snd_soc_register_codec(struct device *dev,
3263 return 0; 3265 return 0;
3264 3266
3265error: 3267error:
3266 for (i--; i >= 0; i--)
3267 snd_soc_unregister_dai(dev);
3268
3269 if (codec->reg_cache) 3268 if (codec->reg_cache)
3270 kfree(codec->reg_cache); 3269 kfree(codec->reg_cache);
3271 kfree(codec->name); 3270 kfree(codec->name);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 7d85c6496afa..75ed6491222d 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -683,12 +683,12 @@ static int dapm_seq_compare(struct snd_soc_dapm_widget *a,
683 struct snd_soc_dapm_widget *b, 683 struct snd_soc_dapm_widget *b,
684 int sort[]) 684 int sort[])
685{ 685{
686 if (a->codec != b->codec)
687 return (unsigned long)a - (unsigned long)b;
688 if (sort[a->id] != sort[b->id]) 686 if (sort[a->id] != sort[b->id])
689 return sort[a->id] - sort[b->id]; 687 return sort[a->id] - sort[b->id];
690 if (a->reg != b->reg) 688 if (a->reg != b->reg)
691 return a->reg - b->reg; 689 return a->reg - b->reg;
690 if (a->codec != b->codec)
691 return (unsigned long)a->codec - (unsigned long)b->codec;
692 692
693 return 0; 693 return 0;
694} 694}
diff --git a/sound/spi/at73c213.c b/sound/spi/at73c213.c
index 1bc56b2b94e2..337a00241a1f 100644
--- a/sound/spi/at73c213.c
+++ b/sound/spi/at73c213.c
@@ -155,7 +155,7 @@ static int snd_at73c213_set_bitrate(struct snd_at73c213 *chip)
155 if (max_tries < 1) 155 if (max_tries < 1)
156 max_tries = 1; 156 max_tries = 1;
157 157
158 /* ssc_div must be a power of 2. */ 158 /* ssc_div must be even. */
159 ssc_div = (ssc_div + 1) & ~1UL; 159 ssc_div = (ssc_div + 1) & ~1UL;
160 160
161 if ((ssc_rate / (ssc_div * 2 * 16)) < BITRATE_MIN) { 161 if ((ssc_rate / (ssc_div * 2 * 16)) < BITRATE_MIN) {
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 93bd2ff001fb..564491fa18b2 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -197,7 +197,7 @@ static void sig_atexit(void)
197 if (child_pid > 0) 197 if (child_pid > 0)
198 kill(child_pid, SIGTERM); 198 kill(child_pid, SIGTERM);
199 199
200 if (signr == -1) 200 if (signr == -1 || signr == SIGUSR1)
201 return; 201 return;
202 202
203 signal(signr, SIG_DFL); 203 signal(signr, SIG_DFL);
@@ -515,6 +515,7 @@ static int __cmd_record(int argc, const char **argv)
515 atexit(sig_atexit); 515 atexit(sig_atexit);
516 signal(SIGCHLD, sig_handler); 516 signal(SIGCHLD, sig_handler);
517 signal(SIGINT, sig_handler); 517 signal(SIGINT, sig_handler);
518 signal(SIGUSR1, sig_handler);
518 519
519 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { 520 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
520 perror("failed to create pipes"); 521 perror("failed to create pipes");
@@ -606,6 +607,7 @@ static int __cmd_record(int argc, const char **argv)
606 execvp(argv[0], (char **)argv); 607 execvp(argv[0], (char **)argv);
607 608
608 perror(argv[0]); 609 perror(argv[0]);
610 kill(getppid(), SIGUSR1);
609 exit(-1); 611 exit(-1);
610 } 612 }
611 613
@@ -697,17 +699,18 @@ static int __cmd_record(int argc, const char **argv)
697 if (err < 0) 699 if (err < 0)
698 err = event__synthesize_kernel_mmap(process_synthesized_event, 700 err = event__synthesize_kernel_mmap(process_synthesized_event,
699 session, machine, "_stext"); 701 session, machine, "_stext");
700 if (err < 0) { 702 if (err < 0)
701 pr_err("Couldn't record kernel reference relocation symbol.\n"); 703 pr_err("Couldn't record kernel reference relocation symbol\n"
702 return err; 704 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
703 } 705 "Check /proc/kallsyms permission or run as root.\n");
704 706
705 err = event__synthesize_modules(process_synthesized_event, 707 err = event__synthesize_modules(process_synthesized_event,
706 session, machine); 708 session, machine);
707 if (err < 0) { 709 if (err < 0)
708 pr_err("Couldn't record kernel reference relocation symbol.\n"); 710 pr_err("Couldn't record kernel module information.\n"
709 return err; 711 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
710 } 712 "Check /proc/modules permission or run as root.\n");
713
711 if (perf_guest) 714 if (perf_guest)
712 perf_session__process_machines(session, event__synthesize_guest_os); 715 perf_session__process_machines(session, event__synthesize_guest_os);
713 716
@@ -761,7 +764,7 @@ static int __cmd_record(int argc, const char **argv)
761 } 764 }
762 } 765 }
763 766
764 if (quiet) 767 if (quiet || signr == SIGUSR1)
765 return 0; 768 return 0;
766 769
767 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking); 770 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index d7e67b167ea3..64a85bafde63 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -946,11 +946,16 @@ perf_header__find_attr(u64 id, struct perf_header *header)
946 946
947 /* 947 /*
948 * We set id to -1 if the data file doesn't contain sample 948 * We set id to -1 if the data file doesn't contain sample
949 * ids. Check for this and avoid walking through the entire 949 * ids. This can happen when the data file contains one type
950 * list of ids which may be large. 950 * of event and in that case, the header can still store the
951 * event attribute information. Check for this and avoid
952 * walking through the entire list of ids which may be large.
951 */ 953 */
952 if (id == -1ULL) 954 if (id == -1ULL) {
955 if (header->attrs > 0)
956 return &header->attr[0]->attr;
953 return NULL; 957 return NULL;
958 }
954 959
955 for (i = 0; i < header->attrs; i++) { 960 for (i = 0; i < header->attrs; i++) {
956 struct perf_header_attr *attr = header->attr[i]; 961 struct perf_header_attr *attr = header->attr[i];
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index b39f499e575a..d628c8d1cf5e 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -295,7 +295,9 @@ static void symbols__insert_by_name(struct rb_root *self, struct symbol *sym)
295{ 295{
296 struct rb_node **p = &self->rb_node; 296 struct rb_node **p = &self->rb_node;
297 struct rb_node *parent = NULL; 297 struct rb_node *parent = NULL;
298 struct symbol_name_rb_node *symn = ((void *)sym) - sizeof(*parent), *s; 298 struct symbol_name_rb_node *symn, *s;
299
300 symn = container_of(sym, struct symbol_name_rb_node, sym);
299 301
300 while (*p != NULL) { 302 while (*p != NULL) {
301 parent = *p; 303 parent = *p;
@@ -530,7 +532,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
530 struct machine *machine = kmaps->machine; 532 struct machine *machine = kmaps->machine;
531 struct map *curr_map = map; 533 struct map *curr_map = map;
532 struct symbol *pos; 534 struct symbol *pos;
533 int count = 0; 535 int count = 0, moved = 0;
534 struct rb_root *root = &self->symbols[map->type]; 536 struct rb_root *root = &self->symbols[map->type];
535 struct rb_node *next = rb_first(root); 537 struct rb_node *next = rb_first(root);
536 int kernel_range = 0; 538 int kernel_range = 0;
@@ -588,6 +590,11 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
588 char dso_name[PATH_MAX]; 590 char dso_name[PATH_MAX];
589 struct dso *dso; 591 struct dso *dso;
590 592
593 if (count == 0) {
594 curr_map = map;
595 goto filter_symbol;
596 }
597
591 if (self->kernel == DSO_TYPE_GUEST_KERNEL) 598 if (self->kernel == DSO_TYPE_GUEST_KERNEL)
592 snprintf(dso_name, sizeof(dso_name), 599 snprintf(dso_name, sizeof(dso_name),
593 "[guest.kernel].%d", 600 "[guest.kernel].%d",
@@ -613,7 +620,7 @@ static int dso__split_kallsyms(struct dso *self, struct map *map,
613 map_groups__insert(kmaps, curr_map); 620 map_groups__insert(kmaps, curr_map);
614 ++kernel_range; 621 ++kernel_range;
615 } 622 }
616 623filter_symbol:
617 if (filter && filter(curr_map, pos)) { 624 if (filter && filter(curr_map, pos)) {
618discard_symbol: rb_erase(&pos->rb_node, root); 625discard_symbol: rb_erase(&pos->rb_node, root);
619 symbol__delete(pos); 626 symbol__delete(pos);
@@ -621,8 +628,9 @@ discard_symbol: rb_erase(&pos->rb_node, root);
621 if (curr_map != map) { 628 if (curr_map != map) {
622 rb_erase(&pos->rb_node, root); 629 rb_erase(&pos->rb_node, root);
623 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos); 630 symbols__insert(&curr_map->dso->symbols[curr_map->type], pos);
624 } 631 ++moved;
625 count++; 632 } else
633 ++count;
626 } 634 }
627 } 635 }
628 636
@@ -632,7 +640,7 @@ discard_symbol: rb_erase(&pos->rb_node, root);
632 dso__set_loaded(curr_map->dso, curr_map->type); 640 dso__set_loaded(curr_map->dso, curr_map->type);
633 } 641 }
634 642
635 return count; 643 return count + moved;
636} 644}
637 645
638int dso__load_kallsyms(struct dso *self, const char *filename, 646int dso__load_kallsyms(struct dso *self, const char *filename,
@@ -2123,14 +2131,55 @@ static struct dso *machine__create_kernel(struct machine *self)
2123 return kernel; 2131 return kernel;
2124} 2132}
2125 2133
2134struct process_args {
2135 u64 start;
2136};
2137
2138static int symbol__in_kernel(void *arg, const char *name,
2139 char type __used, u64 start)
2140{
2141 struct process_args *args = arg;
2142
2143 if (strchr(name, '['))
2144 return 0;
2145
2146 args->start = start;
2147 return 1;
2148}
2149
2150/* Figure out the start address of kernel map from /proc/kallsyms */
2151static u64 machine__get_kernel_start_addr(struct machine *machine)
2152{
2153 const char *filename;
2154 char path[PATH_MAX];
2155 struct process_args args;
2156
2157 if (machine__is_host(machine)) {
2158 filename = "/proc/kallsyms";
2159 } else {
2160 if (machine__is_default_guest(machine))
2161 filename = (char *)symbol_conf.default_guest_kallsyms;
2162 else {
2163 sprintf(path, "%s/proc/kallsyms", machine->root_dir);
2164 filename = path;
2165 }
2166 }
2167
2168 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
2169 return 0;
2170
2171 return args.start;
2172}
2173
2126int __machine__create_kernel_maps(struct machine *self, struct dso *kernel) 2174int __machine__create_kernel_maps(struct machine *self, struct dso *kernel)
2127{ 2175{
2128 enum map_type type; 2176 enum map_type type;
2177 u64 start = machine__get_kernel_start_addr(self);
2129 2178
2130 for (type = 0; type < MAP__NR_TYPES; ++type) { 2179 for (type = 0; type < MAP__NR_TYPES; ++type) {
2131 struct kmap *kmap; 2180 struct kmap *kmap;
2132 2181
2133 self->vmlinux_maps[type] = map__new2(0, kernel, type); 2182 self->vmlinux_maps[type] = map__new2(start, kernel, type);
2134 if (self->vmlinux_maps[type] == NULL) 2183 if (self->vmlinux_maps[type] == NULL)
2135 return -1; 2184 return -1;
2136 2185
diff --git a/usr/initramfs_data.S b/usr/initramfs_data.S
index 792a750d9441..c14322d1c0cf 100644
--- a/usr/initramfs_data.S
+++ b/usr/initramfs_data.S
@@ -22,14 +22,15 @@
22*/ 22*/
23 23
24#include <linux/stringify.h> 24#include <linux/stringify.h>
25#include <asm-generic/vmlinux.lds.h>
25 26
26.section .init.ramfs,"a" 27.section .init.ramfs,"a"
27__irf_start: 28__irf_start:
28.incbin __stringify(INITRAMFS_IMAGE) 29.incbin __stringify(INITRAMFS_IMAGE)
29__irf_end: 30__irf_end:
30.section .init.ramfs.info,"a" 31.section .init.ramfs.info,"a"
31.globl __initramfs_size 32.globl VMLINUX_SYMBOL(__initramfs_size)
32__initramfs_size: 33VMLINUX_SYMBOL(__initramfs_size):
33#ifdef CONFIG_64BIT 34#ifdef CONFIG_64BIT
34 .quad __irf_end - __irf_start 35 .quad __irf_end - __irf_start
35#else 36#else