aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-06-21 01:29:08 -0400
committerDavid S. Miller <davem@davemloft.net>2011-06-21 01:29:08 -0400
commit9f6ec8d697c08963d83880ccd35c13c5ace716ea (patch)
treead8d93cf6fcdd09b86ade09f5fcbbc66cdb1cca2
parent4aa3a715551c93eda32d79bd52042ce500bd5383 (diff)
parent56299378726d5f2ba8d3c8cbbd13cb280ba45e4f (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/wireless/iwlwifi/iwl-agn-rxon.c drivers/net/wireless/rtlwifi/pci.c net/netfilter/ipvs/ip_vs_core.c
-rw-r--r--CREDITS8
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-adp887056
-rw-r--r--Documentation/accounting/cgroupstats.txt4
-rw-r--r--Documentation/cgroups/blkio-controller.txt31
-rw-r--r--Documentation/cgroups/cgroups.txt60
-rw-r--r--Documentation/cgroups/cpuacct.txt21
-rw-r--r--Documentation/cgroups/cpusets.txt28
-rw-r--r--Documentation/cgroups/devices.txt6
-rw-r--r--Documentation/cgroups/freezer-subsystem.txt20
-rw-r--r--Documentation/cgroups/memory.txt58
-rw-r--r--Documentation/feature-removal-schedule.txt17
-rw-r--r--Documentation/filesystems/proc.txt1
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/kmemleak.txt4
-rw-r--r--Documentation/md.txt2
-rw-r--r--Documentation/printk-formats.txt119
-rw-r--r--Documentation/scheduler/sched-design-CFS.txt7
-rw-r--r--Documentation/scheduler/sched-rt-group.txt7
-rw-r--r--Documentation/vm/hwpoison.txt6
-rw-r--r--MAINTAINERS50
-rw-r--r--Makefile19
-rw-r--r--arch/alpha/kernel/osf_sys.c11
-rw-r--r--arch/arm/boot/compressed/head.S6
-rw-r--r--arch/arm/configs/davinci_all_defconfig2
-rw-r--r--arch/arm/configs/netx_defconfig2
-rw-r--r--arch/arm/configs/viper_defconfig2
-rw-r--r--arch/arm/configs/xcep_defconfig2
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/kernel/devtree.c3
-rw-r--r--arch/arm/kernel/entry-armv.S6
-rw-r--r--arch/arm/kernel/entry-common.S2
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c2
-rw-r--r--arch/arm/mach-davinci/devices.c2
-rw-r--r--arch/arm/mach-davinci/gpio.c7
-rw-r--r--arch/arm/mach-ep93xx/core.c6
-rw-r--r--arch/arm/mach-exynos4/Kconfig6
-rw-r--r--arch/arm/mach-exynos4/Makefile2
-rw-r--r--arch/arm/mach-exynos4/cpu.c2
-rw-r--r--arch/arm/mach-exynos4/include/mach/regs-usb-phy.h2
-rw-r--r--arch/arm/mach-exynos4/setup-usb-phy.c (renamed from arch/arm/mach-exynos4/usb-phy.c)0
-rw-r--r--arch/arm/mach-exynos4/time.c2
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c1
-rw-r--r--arch/arm/mach-footbridge/include/mach/debug-macro.S5
-rw-r--r--arch/arm/mach-msm/timer.c14
-rw-r--r--arch/arm/mach-mxs/ocotp.c2
-rw-r--r--arch/arm/mach-omap1/Makefile4
-rw-r--r--arch/arm/mach-omap1/dma.c11
-rw-r--r--arch/arm/mach-omap1/pm_bus.c8
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c2
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c6
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c11
-rw-r--r--arch/arm/mach-omap2/board-apollon.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c5
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c8
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c7
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c2
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c8
-rw-r--r--arch/arm/mach-omap2/board-overo.c4
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c3
-rw-r--r--arch/arm/mach-omap2/board-zoom-display.c2
-rw-r--r--arch/arm/mach-omap2/common-board-devices.c20
-rw-r--r--arch/arm/mach-omap2/common-board-devices.h2
-rw-r--r--arch/arm/mach-omap2/devices.c2
-rw-r--r--arch/arm/mach-omap2/hsmmc.c16
-rw-r--r--arch/arm/mach-omap2/hsmmc.h1
-rw-r--r--arch/arm/mach-omap2/mux.c5
-rw-r--r--arch/arm/mach-omap2/mux.h6
-rw-r--r--arch/arm/mach-omap2/mux44xx.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c2
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c4
-rw-r--r--arch/arm/mach-omap2/pm-debug.c4
-rw-r--r--arch/arm/mach-pxa/spitz_pm.c1
-rw-r--r--arch/arm/mach-s3c2410/Makefile1
-rw-r--r--arch/arm/mach-s3c2410/irq.c34
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c8
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c4
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c208
-rw-r--r--arch/arm/mach-shmobile/intc-sh73a0.c6
-rw-r--r--arch/arm/mach-shmobile/setup-sh7367.c14
-rw-r--r--arch/arm/mach-tegra/board-harmony-power.c4
-rw-r--r--arch/arm/mach-tegra/board-harmony.h3
-rw-r--r--arch/arm/mach-u300/clock.h2
-rw-r--r--arch/arm/mach-u300/include/mach/u300-regs.h22
-rw-r--r--arch/arm/mach-u300/timer.c3
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c3
-rw-r--r--arch/arm/mach-vexpress/v2m.c15
-rw-r--r--arch/arm/mm/context.c17
-rw-r--r--arch/arm/mm/init.c12
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S2
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S2
-rw-r--r--arch/arm/mm/proc-v7.S10
-rw-r--r--arch/arm/plat-mxc/devices/platform-imx-dma.c6
-rw-r--r--arch/arm/plat-nomadik/include/plat/gpio.h1
-rw-r--r--arch/arm/plat-omap/include/plat/flash.h1
-rw-r--r--arch/arm/plat-omap/include/plat/iovmm.h3
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h3
-rw-r--r--arch/arm/plat-omap/iovmm.c4
-rw-r--r--arch/arm/plat-omap/omap_device.c19
-rw-r--r--arch/arm/plat-omap/sram.c12
-rw-r--r--arch/arm/plat-s3c24xx/dma.c2
-rw-r--r--arch/arm/plat-s3c24xx/irq.c6
-rw-r--r--arch/arm/plat-s5p/dev-onenand.c12
-rw-r--r--arch/arm/plat-s5p/include/plat/map-s5p.h2
-rw-r--r--arch/arm/plat-samsung/dev-onenand.c12
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/avr32/configs/atngw100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig3
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atstk1002_defconfig1
-rw-r--r--arch/avr32/configs/atstk1003_defconfig1
-rw-r--r--arch/avr32/configs/atstk1004_defconfig1
-rw-r--r--arch/avr32/configs/atstk1006_defconfig1
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/hammerhead_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/configs/mimc200_defconfig1
-rw-r--r--arch/avr32/include/asm/processor.h1
-rw-r--r--arch/avr32/mach-at32ap/at32ap700x.c3
-rw-r--r--arch/avr32/mach-at32ap/include/mach/cpu.h12
-rw-r--r--arch/avr32/mach-at32ap/intc.c4
-rw-r--r--arch/blackfin/configs/CM-BF548_defconfig2
-rw-r--r--arch/m68k/Kconfig.nommu52
-rw-r--r--arch/m68k/kernel/m68k_ksyms.c3
-rw-r--r--arch/m68k/kernel/vmlinux.lds_no.S20
-rw-r--r--arch/m68k/lib/memcpy.c9
-rw-r--r--arch/m68k/lib/memset.c9
-rw-r--r--arch/m68k/lib/muldi3.c21
-rw-r--r--arch/mips/configs/mtx1_defconfig2
-rw-r--r--arch/mn10300/kernel/traps.c4
-rw-r--r--arch/mn10300/kernel/vmlinux.lds.S1
-rw-r--r--arch/mn10300/mm/cache-dbg-flush-by-reg.S4
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/dtc-src/.gitignore3
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig2
-rw-r--r--arch/powerpc/configs/ps3_defconfig2
-rw-r--r--arch/powerpc/include/asm/rio.h2
-rw-r--r--arch/powerpc/kernel/cputable.c2
-rw-r--r--arch/powerpc/kernel/prom.c27
-rw-r--r--arch/powerpc/mm/init_32.c15
-rw-r--r--arch/powerpc/mm/init_64.c14
-rw-r--r--arch/powerpc/mm/mem.c19
-rw-r--r--arch/powerpc/sysdev/fsl_lbc.c6
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgalloc.h8
-rw-r--r--arch/s390/include/asm/pgtable.h39
-rw-r--r--arch/s390/include/asm/qdio.h119
-rw-r--r--arch/s390/include/asm/tlb.h94
-rw-r--r--arch/s390/kvm/kvm-s390.c1
-rw-r--r--arch/s390/kvm/sie64a.S2
-rw-r--r--arch/s390/mm/pgtable.c292
-rw-r--r--arch/sh/boards/mach-ecovec24/setup.c48
-rw-r--r--arch/sh/boot/compressed/Makefile22
-rw-r--r--arch/sh/configs/titan_defconfig2
-rw-r--r--arch/sh/include/asm/cmpxchg-grb.h21
-rw-r--r--arch/sh/include/asm/processor_64.h1
-rw-r--r--arch/sh/include/cpu-sh4/cpu/sh7724.h8
-rw-r--r--arch/sh/kernel/cpu/sh4a/setup-sh7724.c40
-rw-r--r--arch/sh/kernel/process_32.c2
-rw-r--r--arch/sh/mm/cache-debugfs.c25
-rw-r--r--arch/sparc/Kconfig18
-rw-r--r--arch/sparc/include/asm/floppy_32.h8
-rw-r--r--arch/sparc/include/asm/floppy_64.h4
-rw-r--r--arch/sparc/include/asm/leon.h3
-rw-r--r--arch/sparc/include/asm/leon_pci.h21
-rw-r--r--arch/sparc/include/asm/pci_32.h24
-rw-r--r--arch/sparc/include/asm/pcic.h2
-rw-r--r--arch/sparc/include/asm/system_32.h2
-rw-r--r--arch/sparc/include/asm/system_64.h2
-rw-r--r--arch/sparc/kernel/Makefile4
-rw-r--r--arch/sparc/kernel/apc.c2
-rw-r--r--arch/sparc/kernel/auxio_32.c2
-rw-r--r--arch/sparc/kernel/chmc.c2
-rw-r--r--arch/sparc/kernel/entry.S8
-rw-r--r--arch/sparc/kernel/leon_kernel.c31
-rw-r--r--arch/sparc/kernel/leon_pci.c253
-rw-r--r--arch/sparc/kernel/leon_pci_grpci2.c897
-rw-r--r--arch/sparc/kernel/module.c2
-rw-r--r--arch/sparc/kernel/pci_common.c4
-rw-r--r--arch/sparc/kernel/pci_schizo.c6
-rw-r--r--arch/sparc/kernel/prom_irqtrans.c2
-rw-r--r--arch/sparc/kernel/psycho_common.c2
-rw-r--r--arch/sparc/kernel/sbus.c4
-rw-r--r--arch/sparc/kernel/setup_32.c2
-rw-r--r--arch/sparc/kernel/setup_64.c2
-rw-r--r--arch/sparc/kernel/smp_32.c6
-rw-r--r--arch/sparc/kernel/sun4d_irq.c126
-rw-r--r--arch/sparc/kernel/sys_sparc32.c2
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c6
-rw-r--r--arch/sparc/kernel/time_64.c2
-rw-r--r--arch/sparc/kernel/traps_64.c2
-rw-r--r--arch/sparc/kernel/unaligned_64.c6
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c6
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c4
-rw-r--r--arch/sparc/kernel/viohs.c2
-rw-r--r--arch/sparc/kernel/visemul.c14
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/init_32.c2
-rw-r--r--arch/sparc/mm/init_64.c6
-rw-r--r--arch/sparc/mm/srmmu.c4
-rw-r--r--arch/sparc/mm/sun4c.c8
-rw-r--r--arch/sparc/mm/tsb.c6
-rw-r--r--arch/sparc/prom/console_32.c2
-rw-r--r--arch/sparc/prom/init_32.c2
-rw-r--r--arch/sparc/prom/mp.c2
-rw-r--r--arch/unicore32/Kconfig4
-rw-r--r--arch/unicore32/Makefile38
-rw-r--r--arch/unicore32/boot/compressed/Makefile2
-rw-r--r--arch/unicore32/configs/unicore32_defconfig (renamed from arch/unicore32/configs/debug_defconfig)8
-rw-r--r--arch/unicore32/include/asm/Kbuild59
-rw-r--r--arch/unicore32/kernel/Makefile1
-rw-r--r--arch/unicore32/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/include/asm/memblock.h2
-rw-r--r--arch/x86/include/asm/pvclock.h9
-rw-r--r--arch/x86/kernel/amd_iommu.c48
-rw-r--r--arch/x86/kernel/amd_iommu_init.c8
-rw-r--r--arch/x86/kernel/apic/apic.c3
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c8
-rw-r--r--arch/x86/kernel/devicetree.c11
-rw-r--r--arch/x86/kernel/process.c4
-rw-r--r--arch/x86/kernel/process_32.c1
-rw-r--r--arch/x86/kernel/process_64.c1
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kvm/emulate.c82
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/mm/memblock.c4
-rw-r--r--arch/x86/oprofile/op_model_amd.c13
-rw-r--r--arch/x86/platform/efi/efi.c29
-rw-r--r--arch/x86/xen/enlighten.c9
-rw-r--r--arch/x86/xen/mmu.c12
-rw-r--r--arch/x86/xen/multicalls.c12
-rw-r--r--arch/x86/xen/setup.c10
-rw-r--r--arch/x86/xen/smp.c7
-rw-r--r--drivers/ata/libata-eh.c12
-rw-r--r--drivers/base/power/clock_ops.c16
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c12
-rw-r--r--drivers/char/hpet.c25
-rw-r--r--drivers/cpufreq/cpufreq_stats.c9
-rw-r--r--drivers/cpufreq/powernow-k8.c6
-rw-r--r--drivers/dma/shdma.c13
-rw-r--r--drivers/firmware/iscsi_ibft_find.c2
-rw-r--r--drivers/gpio/Kconfig28
-rw-r--r--drivers/gpio/gpio-exynos4.c29
-rw-r--r--drivers/gpio/gpio-nomadik.c40
-rw-r--r--drivers/gpio/gpio-omap.c28
-rw-r--r--drivers/gpu/drm/drm_bufs.c17
-rw-r--r--drivers/gpu/drm/drm_crtc.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c19
-rw-r--r--drivers/gpu/drm/drm_ioc32.c9
-rw-r--r--drivers/gpu/drm/drm_pci.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c32
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c12
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c4
-rw-r--r--drivers/gpu/drm/i915/intel_display.c89
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c15
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c16
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c8
-rw-r--r--drivers/gpu/drm/i915/intel_modes.c30
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c14
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c59
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c118
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vm.c1
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c17
-rw-r--r--drivers/gpu/drm/nouveau/nvreg.h2
-rw-r--r--drivers/gpu/drm/radeon/Kconfig9
-rw-r--r--drivers/gpu/drm/radeon/atombios.h1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c19
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.c326
-rw-r--r--drivers/gpu/drm/radeon/cayman_blit_shaders.h3
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c130
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c561
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h8
-rw-r--r--drivers/gpu/drm/radeon/ni.c13
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h8
-rw-r--r--drivers/gpu/drm/radeon/r600.c32
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c28
-rw-r--r--drivers/gpu/drm/radeon/r600d.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c39
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c71
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c140
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_fence.c51
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rv770.c3
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c3
-rw-r--r--drivers/hid/Kconfig1
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h4
-rw-r--r--drivers/hid/hid-magicmouse.c10
-rw-r--r--drivers/hid/hid-multitouch.c62
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hid/usbhid/hiddev.c8
-rw-r--r--drivers/hwmon/asus_atk0110.c5
-rw-r--r--drivers/hwmon/coretemp.c27
-rw-r--r--drivers/hwmon/ibmaem.c2
-rw-r--r--drivers/hwmon/ibmpex.c1
-rw-r--r--drivers/hwmon/max6642.c22
-rw-r--r--drivers/hwmon/s3c-hwmon.c2
-rw-r--r--drivers/ide/ide-cd.c3
-rw-r--r--drivers/input/evdev.c3
-rw-r--r--drivers/input/input.c2
-rw-r--r--drivers/input/keyboard/omap-keypad.c1
-rw-r--r--drivers/input/keyboard/sh_keysc.c2
-rw-r--r--drivers/input/mousedev.c4
-rw-r--r--drivers/isdn/gigaset/interface.c4
-rw-r--r--drivers/isdn/hardware/mISDN/hfcsusb.c1
-rw-r--r--drivers/leds/Kconfig20
-rw-r--r--drivers/md/bitmap.c104
-rw-r--r--drivers/md/bitmap.h10
-rw-r--r--drivers/md/md.c41
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/raid1.c24
-rw-r--r--drivers/md/raid1.h2
-rw-r--r--drivers/md/raid5.c16
-rw-r--r--drivers/media/dvb/dvb-usb/anysee.c17
-rw-r--r--drivers/media/media-devnode.c4
-rw-r--r--drivers/media/video/cx23885/cx23885-cards.c2
-rw-r--r--drivers/media/video/gspca/coarse_expo_autogain.h116
-rw-r--r--drivers/media/video/gspca/ov519.c8
-rw-r--r--drivers/media/video/gspca/sonixj.c2
-rw-r--r--drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h2
-rw-r--r--drivers/media/video/ivtv/ivtv-driver.c10
-rw-r--r--drivers/media/video/ivtv/ivtv-firmware.c11
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.c129
-rw-r--r--drivers/media/video/ivtv/ivtv-ioctl.h3
-rw-r--r--drivers/media/video/ivtv/ivtv-streams.c4
-rw-r--r--drivers/media/video/ivtv/ivtv-vbi.c2
-rw-r--r--drivers/media/video/ivtv/ivtvfb.c33
-rw-r--r--drivers/media/video/omap3isp/isp.c2
-rw-r--r--drivers/media/video/soc_camera.c2
-rw-r--r--drivers/media/video/uvc/uvc_entity.c2
-rw-r--r--drivers/misc/apds990x.c2
-rw-r--r--drivers/misc/cs5535-mfgpt.c2
-rw-r--r--drivers/misc/sgi-xp/xpnet.c6
-rw-r--r--drivers/misc/spear13xx_pcie_gadget.c2
-rw-r--r--drivers/mmc/host/mmci.c12
-rw-r--r--drivers/mmc/host/omap_hsmmc.c3
-rw-r--r--drivers/net/3c503.c3
-rw-r--r--drivers/net/arm/am79c961a.c126
-rw-r--r--drivers/net/arm/ep93xx_eth.c82
-rw-r--r--drivers/net/bfin_mac.c20
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/dl2k.c2
-rw-r--r--drivers/net/fs_enet/mac-fcc.c2
-rw-r--r--drivers/net/gianfar.c29
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/gianfar_ethtool.c64
-rw-r--r--drivers/net/hp100.c4
-rw-r--r--drivers/net/hplance.c2
-rw-r--r--drivers/net/igb/igb_main.c3
-rw-r--r--drivers/net/netxen/netxen_nic_main.c4
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/dp83640.c24
-rw-r--r--drivers/net/ppp_async.c4
-rw-r--r--drivers/net/pxa168_eth.c2
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c1
-rw-r--r--drivers/net/r8169.c10
-rw-r--r--drivers/net/smc91x.c6
-rw-r--r--drivers/net/tun.c24
-rw-r--r--drivers/net/usb/Kconfig10
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/kalmia.c384
-rw-r--r--drivers/net/wan/farsync.c4
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-4965.c9
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.c30
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-core.h2
-rw-r--r--drivers/net/wireless/iwlegacy/iwl-dev.h13
-rw-r--r--drivers/net/wireless/iwlegacy/iwl4965-base.c20
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/mwl8k.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c28
-rw-r--r--drivers/oprofile/buffer_sync.c21
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/intel-iommu.c2
-rw-r--r--drivers/pci/pci.c4
-rw-r--r--drivers/pcmcia/pxa2xx_vpac270.c1
-rw-r--r--drivers/ptp/ptp_chardev.c11
-rw-r--r--drivers/rtc/Kconfig9
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/interface.c2
-rw-r--r--drivers/rtc/rtc-dev.c3
-rw-r--r--drivers/rtc/rtc-m41t93.c2
-rw-r--r--drivers/rtc/rtc-puv3.c (renamed from arch/unicore32/kernel/rtc.c)14
-rw-r--r--drivers/s390/cio/qdio_main.c6
-rw-r--r--drivers/s390/net/qeth_core.h2
-rw-r--r--drivers/s390/net/qeth_core_main.c57
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c45
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c11
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h9
-rw-r--r--drivers/sh/clk/core.c2
-rw-r--r--drivers/spi/amba-pl022.c1
-rw-r--r--drivers/spi/omap2_mcspi.c2
-rw-r--r--drivers/spi/spi_bfin5xx.c7
-rw-r--r--drivers/staging/Kconfig18
-rw-r--r--drivers/staging/altera-stapl/altera-jtag.c2
-rw-r--r--drivers/staging/altera-stapl/altera.c2
-rw-r--r--drivers/staging/altera-stapl/altera.h (renamed from include/staging/altera.h)0
-rw-r--r--drivers/staging/ath6kl/Kconfig1
-rw-r--r--drivers/staging/ath6kl/os/linux/cfg80211.c3
-rw-r--r--drivers/staging/brcm80211/brcmfmac/wl_iw.c2
-rw-r--r--drivers/staging/gma500/psb_drv.c15
-rw-r--r--drivers/staging/gma500/psb_fb.c10
-rw-r--r--drivers/staging/gma500/psb_intel_bios.c13
-rw-r--r--drivers/staging/iio/accel/adis16201.h2
-rw-r--r--drivers/staging/iio/accel/adis16203.h2
-rw-r--r--drivers/staging/iio/dac/max517.c2
-rw-r--r--drivers/staging/iio/imu/adis16400_ring.c10
-rw-r--r--drivers/staging/iio/industrialio-trigger.c1
-rw-r--r--drivers/staging/mei/init.c4
-rw-r--r--drivers/staging/olpc_dcon/Kconfig1
-rw-r--r--drivers/staging/rts_pstor/sd.c2
-rw-r--r--drivers/staging/usbip/stub_dev.c21
-rw-r--r--drivers/staging/usbip/stub_rx.c20
-rw-r--r--drivers/tty/serial/pch_uart.c4
-rw-r--r--drivers/tty/tty_buffer.c14
-rw-r--r--drivers/usb/class/cdc-acm.c2
-rw-r--r--drivers/usb/core/hub.c6
-rw-r--r--drivers/usb/core/inode.c1
-rw-r--r--drivers/usb/gadget/Kconfig5
-rw-r--r--drivers/usb/gadget/amd5536udc.c1
-rw-r--r--drivers/usb/gadget/at91_udc.c1
-rw-r--r--drivers/usb/gadget/dummy_hcd.c1
-rw-r--r--drivers/usb/gadget/inode.c4
-rw-r--r--drivers/usb/gadget/mv_udc_core.c8
-rw-r--r--drivers/usb/gadget/net2280.c1
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c5
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c22
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c7
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c1
-rw-r--r--drivers/usb/host/ohci-pxa27x.c7
-rw-r--r--drivers/usb/host/xhci-dbg.c8
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-pci.c14
-rw-r--r--drivers/usb/host/xhci-ring.c2
-rw-r--r--drivers/usb/host/xhci.c18
-rw-r--r--drivers/usb/host/xhci.h6
-rw-r--r--drivers/usb/musb/musb_core.c1
-rw-r--r--drivers/usb/renesas_usbhs/mod_gadget.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/option.c34
-rw-r--r--drivers/usb/storage/transport.c29
-rw-r--r--drivers/usb/storage/unusual_devs.h19
-rw-r--r--drivers/usb/storage/usb.c13
-rw-r--r--drivers/usb/storage/usb.h2
-rw-r--r--drivers/video/arcfb.c5
-rw-r--r--drivers/video/aty/atyfb_base.c10
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp8870_bl.c1012
-rw-r--r--drivers/video/bf537-lq035.c1
-rw-r--r--drivers/video/broadsheetfb.c4
-rw-r--r--drivers/video/efifb.c34
-rw-r--r--drivers/video/hecubafb.c5
-rw-r--r--drivers/video/imxfb.c4
-rw-r--r--drivers/video/metronomefb.c4
-rw-r--r--drivers/video/modedb.c1
-rw-r--r--drivers/video/pxa168fb.c17
-rw-r--r--drivers/video/s3c-fb.c22
-rw-r--r--drivers/video/savage/savagefb_driver.c16
-rw-r--r--drivers/video/sh_mobile_hdmi.c18
-rw-r--r--drivers/video/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/vga16fb.c2
-rw-r--r--drivers/video/xen-fbfront.c3
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/xen/events.c20
-rw-r--r--drivers/xen/swiotlb-xen.c12
-rw-r--r--fs/afs/dir.c8
-rw-r--r--fs/afs/fsclient.c3
-rw-r--r--fs/afs/inode.c10
-rw-r--r--fs/afs/super.c74
-rw-r--r--fs/afs/write.c21
-rw-r--r--fs/bad_inode.c3
-rw-r--r--fs/btrfs/btrfs_inode.h3
-rw-r--r--fs/btrfs/ctree.c32
-rw-r--r--fs/btrfs/ctree.h37
-rw-r--r--fs/btrfs/delayed-inode.c40
-rw-r--r--fs/btrfs/delayed-inode.h5
-rw-r--r--fs/btrfs/disk-io.c53
-rw-r--r--fs/btrfs/extent-tree.c162
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/file.c10
-rw-r--r--fs/btrfs/free-space-cache.c231
-rw-r--r--fs/btrfs/inode-map.c34
-rw-r--r--fs/btrfs/inode.c266
-rw-r--r--fs/btrfs/ioctl.c51
-rw-r--r--fs/btrfs/relocation.c48
-rw-r--r--fs/btrfs/scrub.c192
-rw-r--r--fs/btrfs/super.c10
-rw-r--r--fs/btrfs/sysfs.c146
-rw-r--r--fs/btrfs/transaction.c377
-rw-r--r--fs/btrfs/transaction.h29
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/volumes.c10
-rw-r--r--fs/btrfs/xattr.c2
-rw-r--r--fs/buffer.c4
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/ceph/caps.c10
-rw-r--r--fs/ceph/dir.c11
-rw-r--r--fs/ceph/export.c4
-rw-r--r--fs/ceph/file.c35
-rw-r--r--fs/ceph/inode.c18
-rw-r--r--fs/ceph/ioctl.c6
-rw-r--r--fs/ceph/locks.c29
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/xattr.c6
-rw-r--r--fs/cifs/Kconfig3
-rw-r--r--fs/cifs/cache.c6
-rw-r--r--fs/cifs/cifsencrypt.c2
-rw-r--r--fs/cifs/cifsfs.c36
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/connect.c29
-rw-r--r--fs/cifs/fscache.c51
-rw-r--r--fs/coda/pioctl.c2
-rw-r--r--fs/dcookies.c3
-rw-r--r--fs/exec.c7
-rw-r--r--fs/fat/file.c2
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/glock.c9
-rw-r--r--fs/isofs/inode.c3
-rw-r--r--fs/jfs/jfs_logmgr.c2
-rw-r--r--fs/logfs/dir.c8
-rw-r--r--fs/namei.c39
-rw-r--r--fs/nfsd/Kconfig1
-rw-r--r--fs/nfsd/nfsctl.c19
-rw-r--r--fs/nfsd/vfs.c19
-rw-r--r--fs/nilfs2/btree.c39
-rw-r--r--fs/nilfs2/inode.c7
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/proc/base.c6
-rw-r--r--fs/proc/namespaces.c9
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/proc/root.c11
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/super.c2
-rw-r--r--fs/sysfs/mount.c37
-rw-r--r--fs/sysfs/sysfs.h2
-rw-r--r--fs/timerfd.c5
-rw-r--r--fs/ubifs/super.c136
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c50
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c75
-rw-r--r--fs/xfs/xfs_log.c11
-rw-r--r--include/asm-generic/gpio.h10
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_pciids.h11
-rw-r--r--include/linux/basic_mmio_gpio.h1
-rw-r--r--include/linux/clocksource.h1
-rw-r--r--include/linux/device_cgroup.h10
-rw-r--r--include/linux/ethtool.h6
-rw-r--r--include/linux/fs.h12
-rw-r--r--include/linux/gpio.h11
-rw-r--r--include/linux/i2c/adp8870.h153
-rw-r--r--include/linux/if_packet.h2
-rw-r--r--include/linux/if_vlan.h25
-rw-r--r--include/linux/input/sh_keysc.h2
-rw-r--r--include/linux/interrupt.h1
-rw-r--r--include/linux/irqreturn.h6
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/kmod.h8
-rw-r--r--include/linux/kmsg_dump.h1
-rw-r--r--include/linux/kobject_ns.h10
-rw-r--r--include/linux/memcontrol.h6
-rw-r--r--include/linux/netdevice.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h3
-rw-r--r--include/linux/percpu.h3
-rw-r--r--include/linux/perf_event.h8
-rw-r--r--include/linux/sched.h1
-rw-r--r--include/linux/seqlock.h1
-rw-r--r--include/linux/skbuff.h5
-rw-r--r--include/linux/smp.h5
-rw-r--r--include/linux/sunrpc/gss_krb5_enctypes.h4
-rw-r--r--include/linux/swap.h8
-rw-r--r--include/linux/swiotlb.h1
-rw-r--r--include/linux/sysfs.h7
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/linux/uts.h2
-rw-r--r--include/media/v4l2-dev.h4
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/nf_conntrack.h6
-rw-r--r--include/trace/events/irq.h3
-rw-r--r--include/trace/events/vmscan.h83
-rw-r--r--init/Kconfig10
-rw-r--r--init/calibrate.c3
-rw-r--r--init/main.c1
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/exit.c31
-rw-r--r--kernel/gcov/Kconfig3
-rw-r--r--kernel/irq/handle.c6
-rw-r--r--kernel/irq/irqdesc.c14
-rw-r--r--kernel/irq/manage.c27
-rw-r--r--kernel/irq/spurious.c31
-rw-r--r--kernel/kmod.c16
-rw-r--r--kernel/lockdep.c2
-rw-r--r--kernel/rcutree.c398
-rw-r--r--kernel/rcutree.h12
-rw-r--r--kernel/rcutree_plugin.h419
-rw-r--r--kernel/rcutree_trace.c32
-rw-r--r--kernel/sched.c33
-rw-r--r--kernel/sched_rt.c6
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/smp.c5
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/sysctl.c6
-rw-r--r--kernel/time/clockevents.c5
-rw-r--r--kernel/time/clocksource.c24
-rw-r--r--kernel/timer.c15
-rw-r--r--kernel/trace/ftrace.c9
-rw-r--r--kernel/trace/trace_kprobe.c8
-rw-r--r--kernel/trace/trace_printk.c5
-rw-r--r--lib/bitmap.c2
-rw-r--r--lib/kobject.c26
-rw-r--r--lib/swiotlb.c5
-rw-r--r--lib/vsprintf.c4
-rw-r--r--mm/compaction.c76
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/hugetlb.c12
-rw-r--r--mm/ksm.c6
-rw-r--r--mm/memcontrol.c81
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory.c8
-rw-r--r--mm/memory_hotplug.c6
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmap.c12
-rw-r--r--mm/page_cgroup.c71
-rw-r--r--mm/rmap.c106
-rw-r--r--mm/slab.c9
-rw-r--r--mm/slub.c12
-rw-r--r--mm/thrash.c105
-rw-r--r--mm/vmscan.c20
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_core.c60
-rw-r--r--net/bluetooth/hci_event.c18
-rw-r--r--net/bluetooth/l2cap_sock.c1
-rw-r--r--net/bluetooth/rfcomm/sock.c1
-rw-r--r--net/bluetooth/sco.c13
-rw-r--r--net/bridge/br_device.c1
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c6
-rw-r--r--net/caif/cfmuxl.c2
-rw-r--r--net/ceph/osd_client.c15
-rw-r--r--net/core/dev.c23
-rw-r--r--net/core/net-sysfs.c23
-rw-r--r--net/core/net_namespace.c28
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/ieee802154/nl-phy.c3
-rw-r--r--net/ipv4/af_inet.c1
-rw-r--r--net/ipv4/inet_diag.c14
-rw-r--r--net/ipv4/ip_output.c6
-rw-r--r--net/ipv4/netfilter/ip_queue.c6
-rw-r--r--net/ipv4/netfilter/ip_tables.c2
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c6
-rw-r--r--net/ipv4/netfilter/ipt_MASQUERADE.c2
-rw-r--r--net/ipv4/netfilter/ipt_ecn.c7
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_core.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_helper.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_rule.c2
-rw-r--r--net/ipv4/netfilter/nf_nat_standalone.c4
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c82
-rw-r--r--net/ipv4/tcp_ipv4.c1
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/netfilter/ip6_queue.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c2
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c2
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/irda/iriap.c5
-rw-r--r--net/l2tp/l2tp_debugfs.c2
-rw-r--r--net/mac80211/ibss.c6
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c17
-rw-r--r--net/netfilter/nf_conntrack_core.c7
-rw-r--r--net/netfilter/nf_conntrack_ftp.c2
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c10
-rw-r--r--net/netfilter/nf_conntrack_irc.c3
-rw-r--r--net/netfilter/nf_conntrack_pptp.c3
-rw-r--r--net/netfilter/nf_conntrack_sane.c2
-rw-r--r--net/netfilter/nf_conntrack_sip.c2
-rw-r--r--net/netfilter/nfnetlink_log.c3
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_generic.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_mech.c3
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/xfrm/xfrm_replay.c4
-rw-r--r--scripts/Makefile.asm-generic1
-rwxr-xr-xscripts/checkpatch.pl5
-rwxr-xr-xscripts/depmod.sh48
-rw-r--r--security/apparmor/lsm.c2
-rw-r--r--security/device_cgroup.c8
-rw-r--r--security/keys/request_key.c3
-rw-r--r--security/selinux/hooks.c25
-rw-r--r--security/selinux/selinuxfs.c37
-rw-r--r--security/selinux/ss/policydb.c3
-rw-r--r--security/tomoyo/mount.c2
-rw-r--r--sound/core/misc.c40
-rw-r--r--sound/firewire/isight.c1
-rw-r--r--sound/pci/asihpi/hpidspcd.c2
-rw-r--r--sound/pci/emu10k1/emu10k1_main.c8
-rw-r--r--sound/pci/fm801.c13
-rw-r--r--sound/pci/hda/hda_beep.h9
-rw-r--r--sound/pci/hda/patch_analog.c16
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_realtek.c18
-rw-r--r--sound/pci/hda/patch_via.c11
-rw-r--r--sound/pci/lola/lola.c2
-rw-r--r--sound/pci/rme9652/hdspm.c8
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c5
-rw-r--r--sound/soc/blackfin/bf5xx-ad1836.c4
-rw-r--r--sound/soc/codecs/ad1836.c14
-rw-r--r--sound/soc/codecs/ad1836.h6
-rw-r--r--sound/soc/codecs/wm8804.c9
-rw-r--r--sound/soc/codecs/wm8915.c3
-rw-r--r--sound/soc/codecs/wm8962.c4
-rw-r--r--sound/soc/codecs/wm_hubs.c8
-rw-r--r--sound/soc/fsl/fsl_dma.c9
-rw-r--r--sound/soc/samsung/i2s.c4
-rw-r--r--sound/soc/soc-cache.c3
-rw-r--r--sound/soc/soc-dapm.c22
-rw-r--r--sound/usb/6fire/firmware.c2
-rw-r--r--sound/usb/6fire/pcm.c4
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/perf/Makefile2
-rw-r--r--tools/perf/builtin-test.c2
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN7
-rw-r--r--tools/perf/util/event.c16
-rw-r--r--tools/perf/util/event.h2
-rw-r--r--tools/perf/util/evlist.c68
-rw-r--r--tools/perf/util/evlist.h6
-rw-r--r--tools/perf/util/evsel.c16
-rw-r--r--tools/perf/util/evsel.h7
-rw-r--r--tools/perf/util/python.c14
-rw-r--r--tools/perf/util/session.c12
-rw-r--r--tools/perf/util/trace-event-parse.c1
-rw-r--r--virt/kvm/kvm_main.c15
776 files changed, 10588 insertions, 5119 deletions
diff --git a/CREDITS b/CREDITS
index a7ea8e343836..d78359f5f64d 100644
--- a/CREDITS
+++ b/CREDITS
@@ -518,6 +518,14 @@ N: Zach Brown
518E: zab@zabbo.net 518E: zab@zabbo.net
519D: maestro pci sound 519D: maestro pci sound
520 520
521M: David Brownell
522D: Kernel engineer, mentor, and friend. Maintained USB EHCI and
523D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
524D: device drivers. His encouragement also helped many engineers get
525D: started working on the Linux kernel. David passed away in early
526D: 2011, and will be greatly missed.
527W: https://lkml.org/lkml/2011/4/5/36
528
521N: Gary Brubaker 529N: Gary Brubaker
522E: xavyer@ix.netcom.com 530E: xavyer@ix.netcom.com
523D: USB Serial Empeg Empeg-car Mark I/II Driver 531D: USB Serial Empeg Empeg-car Mark I/II Driver
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870 b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
new file mode 100644
index 000000000000..aa11dbdd794b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-adp8870
@@ -0,0 +1,56 @@
1What: /sys/class/backlight/<backlight>/<ambient light zone>_max
2What: /sys/class/backlight/<backlight>/l1_daylight_max
3What: /sys/class/backlight/<backlight>/l2_bright_max
4What: /sys/class/backlight/<backlight>/l3_office_max
5What: /sys/class/backlight/<backlight>/l4_indoor_max
6What: /sys/class/backlight/<backlight>/l5_dark_max
7Date: Mai 2011
8KernelVersion: 2.6.40
9Contact: device-drivers-devel@blackfin.uclinux.org
10Description:
11 Control the maximum brightness for <ambient light zone>
12 on this <backlight>. Values are between 0 and 127. This file
13 will also show the brightness level stored for this
14 <ambient light zone>.
15
16What: /sys/class/backlight/<backlight>/<ambient light zone>_dim
17What: /sys/class/backlight/<backlight>/l2_bright_dim
18What: /sys/class/backlight/<backlight>/l3_office_dim
19What: /sys/class/backlight/<backlight>/l4_indoor_dim
20What: /sys/class/backlight/<backlight>/l5_dark_dim
21Date: Mai 2011
22KernelVersion: 2.6.40
23Contact: device-drivers-devel@blackfin.uclinux.org
24Description:
25 Control the dim brightness for <ambient light zone>
26 on this <backlight>. Values are between 0 and 127, typically
27 set to 0. Full off when the backlight is disabled.
28 This file will also show the dim brightness level stored for
29 this <ambient light zone>.
30
31What: /sys/class/backlight/<backlight>/ambient_light_level
32Date: Mai 2011
33KernelVersion: 2.6.40
34Contact: device-drivers-devel@blackfin.uclinux.org
35Description:
36 Get conversion value of the light sensor.
37 This value is updated every 80 ms (when the light sensor
38 is enabled). Returns integer between 0 (dark) and
39 8000 (max ambient brightness)
40
41What: /sys/class/backlight/<backlight>/ambient_light_zone
42Date: Mai 2011
43KernelVersion: 2.6.40
44Contact: device-drivers-devel@blackfin.uclinux.org
45Description:
46 Get/Set current ambient light zone. Reading returns
47 integer between 1..5 (1 = daylight, 2 = bright, ..., 5 = dark).
48 Writing a value between 1..5 forces the backlight controller
49 to enter the corresponding ambient light zone.
50 Writing 0 returns to normal/automatic ambient light level
51 operation. The ambient light sensing feature on these devices
52 is an extension to the API documented in
53 Documentation/ABI/stable/sysfs-class-backlight.
54 It can be enabled by writing the value stored in
55 /sys/class/backlight/<backlight>/max_brightness to
56 /sys/class/backlight/<backlight>/brightness. \ No newline at end of file
diff --git a/Documentation/accounting/cgroupstats.txt b/Documentation/accounting/cgroupstats.txt
index eda40fd39cad..d16a9849e60e 100644
--- a/Documentation/accounting/cgroupstats.txt
+++ b/Documentation/accounting/cgroupstats.txt
@@ -21,7 +21,7 @@ information will not be available.
21To extract cgroup statistics a utility very similar to getdelays.c 21To extract cgroup statistics a utility very similar to getdelays.c
22has been developed, the sample output of the utility is shown below 22has been developed, the sample output of the utility is shown below
23 23
24~/balbir/cgroupstats # ./getdelays -C "/cgroup/a" 24~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup/a"
25sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0 25sleeping 1, blocked 0, running 1, stopped 0, uninterruptible 0
26~/balbir/cgroupstats # ./getdelays -C "/cgroup" 26~/balbir/cgroupstats # ./getdelays -C "/sys/fs/cgroup"
27sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2 27sleeping 155, blocked 0, running 1, stopped 0, uninterruptible 2
diff --git a/Documentation/cgroups/blkio-controller.txt b/Documentation/cgroups/blkio-controller.txt
index 465351d4cf85..cd45c8ea7463 100644
--- a/Documentation/cgroups/blkio-controller.txt
+++ b/Documentation/cgroups/blkio-controller.txt
@@ -28,16 +28,19 @@ cgroups. Here is what you can do.
28- Enable group scheduling in CFQ 28- Enable group scheduling in CFQ
29 CONFIG_CFQ_GROUP_IOSCHED=y 29 CONFIG_CFQ_GROUP_IOSCHED=y
30 30
31- Compile and boot into kernel and mount IO controller (blkio). 31- Compile and boot into kernel and mount IO controller (blkio); see
32 cgroups.txt, Why are cgroups needed?.
32 33
33 mount -t cgroup -o blkio none /cgroup 34 mount -t tmpfs cgroup_root /sys/fs/cgroup
35 mkdir /sys/fs/cgroup/blkio
36 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
34 37
35- Create two cgroups 38- Create two cgroups
36 mkdir -p /cgroup/test1/ /cgroup/test2 39 mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
37 40
38- Set weights of group test1 and test2 41- Set weights of group test1 and test2
39 echo 1000 > /cgroup/test1/blkio.weight 42 echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
40 echo 500 > /cgroup/test2/blkio.weight 43 echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
41 44
42- Create two same size files (say 512MB each) on same disk (file1, file2) and 45- Create two same size files (say 512MB each) on same disk (file1, file2) and
43 launch two dd threads in different cgroup to read those files. 46 launch two dd threads in different cgroup to read those files.
@@ -46,12 +49,12 @@ cgroups. Here is what you can do.
46 echo 3 > /proc/sys/vm/drop_caches 49 echo 3 > /proc/sys/vm/drop_caches
47 50
48 dd if=/mnt/sdb/zerofile1 of=/dev/null & 51 dd if=/mnt/sdb/zerofile1 of=/dev/null &
49 echo $! > /cgroup/test1/tasks 52 echo $! > /sys/fs/cgroup/blkio/test1/tasks
50 cat /cgroup/test1/tasks 53 cat /sys/fs/cgroup/blkio/test1/tasks
51 54
52 dd if=/mnt/sdb/zerofile2 of=/dev/null & 55 dd if=/mnt/sdb/zerofile2 of=/dev/null &
53 echo $! > /cgroup/test2/tasks 56 echo $! > /sys/fs/cgroup/blkio/test2/tasks
54 cat /cgroup/test2/tasks 57 cat /sys/fs/cgroup/blkio/test2/tasks
55 58
56- At macro level, first dd should finish first. To get more precise data, keep 59- At macro level, first dd should finish first. To get more precise data, keep
57 on looking at (with the help of script), at blkio.disk_time and 60 on looking at (with the help of script), at blkio.disk_time and
@@ -68,13 +71,13 @@ Throttling/Upper Limit policy
68- Enable throttling in block layer 71- Enable throttling in block layer
69 CONFIG_BLK_DEV_THROTTLING=y 72 CONFIG_BLK_DEV_THROTTLING=y
70 73
71- Mount blkio controller 74- Mount blkio controller (see cgroups.txt, Why are cgroups needed?)
72 mount -t cgroup -o blkio none /cgroup/blkio 75 mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
73 76
74- Specify a bandwidth rate on particular device for root group. The format 77- Specify a bandwidth rate on particular device for root group. The format
75 for policy is "<major>:<minor> <byes_per_second>". 78 for policy is "<major>:<minor> <byes_per_second>".
76 79
77 echo "8:16 1048576" > /cgroup/blkio/blkio.read_bps_device 80 echo "8:16 1048576" > /sys/fs/cgroup/blkio/blkio.read_bps_device
78 81
79 Above will put a limit of 1MB/second on reads happening for root group 82 Above will put a limit of 1MB/second on reads happening for root group
80 on device having major/minor number 8:16. 83 on device having major/minor number 8:16.
@@ -108,7 +111,7 @@ Hierarchical Cgroups
108 CFQ and throttling will practically treat all groups at same level. 111 CFQ and throttling will practically treat all groups at same level.
109 112
110 pivot 113 pivot
111 / | \ \ 114 / / \ \
112 root test1 test2 test3 115 root test1 test2 test3
113 116
114 Down the line we can implement hierarchical accounting/control support 117 Down the line we can implement hierarchical accounting/control support
@@ -149,7 +152,7 @@ Proportional weight policy files
149 152
150 Following is the format. 153 Following is the format.
151 154
152 #echo dev_maj:dev_minor weight > /path/to/cgroup/blkio.weight_device 155 # echo dev_maj:dev_minor weight > blkio.weight_device
153 Configure weight=300 on /dev/sdb (8:16) in this cgroup 156 Configure weight=300 on /dev/sdb (8:16) in this cgroup
154 # echo 8:16 300 > blkio.weight_device 157 # echo 8:16 300 > blkio.weight_device
155 # cat blkio.weight_device 158 # cat blkio.weight_device
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 0ed99f08f1f3..cd67e90003c0 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -138,11 +138,11 @@ With the ability to classify tasks differently for different resources
138the admin can easily set up a script which receives exec notifications 138the admin can easily set up a script which receives exec notifications
139and depending on who is launching the browser he can 139and depending on who is launching the browser he can
140 140
141 # echo browser_pid > /mnt/<restype>/<userclass>/tasks 141 # echo browser_pid > /sys/fs/cgroup/<restype>/<userclass>/tasks
142 142
143With only a single hierarchy, he now would potentially have to create 143With only a single hierarchy, he now would potentially have to create
144a separate cgroup for every browser launched and associate it with 144a separate cgroup for every browser launched and associate it with
145approp network and other resource class. This may lead to 145appropriate network and other resource class. This may lead to
146proliferation of such cgroups. 146proliferation of such cgroups.
147 147
148Also lets say that the administrator would like to give enhanced network 148Also lets say that the administrator would like to give enhanced network
@@ -153,9 +153,9 @@ apps enhanced CPU power,
153With ability to write pids directly to resource classes, it's just a 153With ability to write pids directly to resource classes, it's just a
154matter of : 154matter of :
155 155
156 # echo pid > /mnt/network/<new_class>/tasks 156 # echo pid > /sys/fs/cgroup/network/<new_class>/tasks
157 (after some time) 157 (after some time)
158 # echo pid > /mnt/network/<orig_class>/tasks 158 # echo pid > /sys/fs/cgroup/network/<orig_class>/tasks
159 159
160Without this ability, he would have to split the cgroup into 160Without this ability, he would have to split the cgroup into
161multiple separate ones and then associate the new cgroups with the 161multiple separate ones and then associate the new cgroups with the
@@ -310,21 +310,24 @@ subsystem, this is the case for the cpuset.
310To start a new job that is to be contained within a cgroup, using 310To start a new job that is to be contained within a cgroup, using
311the "cpuset" cgroup subsystem, the steps are something like: 311the "cpuset" cgroup subsystem, the steps are something like:
312 312
313 1) mkdir /dev/cgroup 313 1) mount -t tmpfs cgroup_root /sys/fs/cgroup
314 2) mount -t cgroup -ocpuset cpuset /dev/cgroup 314 2) mkdir /sys/fs/cgroup/cpuset
315 3) Create the new cgroup by doing mkdir's and write's (or echo's) in 315 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
316 the /dev/cgroup virtual file system. 316 4) Create the new cgroup by doing mkdir's and write's (or echo's) in
317 4) Start a task that will be the "founding father" of the new job. 317 the /sys/fs/cgroup virtual file system.
318 5) Attach that task to the new cgroup by writing its pid to the 318 5) Start a task that will be the "founding father" of the new job.
319 /dev/cgroup tasks file for that cgroup. 319 6) Attach that task to the new cgroup by writing its pid to the
320 6) fork, exec or clone the job tasks from this founding father task. 320 /sys/fs/cgroup/cpuset/tasks file for that cgroup.
321 7) fork, exec or clone the job tasks from this founding father task.
321 322
322For example, the following sequence of commands will setup a cgroup 323For example, the following sequence of commands will setup a cgroup
323named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 324named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
324and then start a subshell 'sh' in that cgroup: 325and then start a subshell 'sh' in that cgroup:
325 326
326 mount -t cgroup cpuset -ocpuset /dev/cgroup 327 mount -t tmpfs cgroup_root /sys/fs/cgroup
327 cd /dev/cgroup 328 mkdir /sys/fs/cgroup/cpuset
329 mount -t cgroup cpuset -ocpuset /sys/fs/cgroup/cpuset
330 cd /sys/fs/cgroup/cpuset
328 mkdir Charlie 331 mkdir Charlie
329 cd Charlie 332 cd Charlie
330 /bin/echo 2-3 > cpuset.cpus 333 /bin/echo 2-3 > cpuset.cpus
@@ -345,7 +348,7 @@ Creating, modifying, using the cgroups can be done through the cgroup
345virtual filesystem. 348virtual filesystem.
346 349
347To mount a cgroup hierarchy with all available subsystems, type: 350To mount a cgroup hierarchy with all available subsystems, type:
348# mount -t cgroup xxx /dev/cgroup 351# mount -t cgroup xxx /sys/fs/cgroup
349 352
350The "xxx" is not interpreted by the cgroup code, but will appear in 353The "xxx" is not interpreted by the cgroup code, but will appear in
351/proc/mounts so may be any useful identifying string that you like. 354/proc/mounts so may be any useful identifying string that you like.
@@ -354,23 +357,32 @@ Note: Some subsystems do not work without some user input first. For instance,
354if cpusets are enabled the user will have to populate the cpus and mems files 357if cpusets are enabled the user will have to populate the cpus and mems files
355for each new cgroup created before that group can be used. 358for each new cgroup created before that group can be used.
356 359
360As explained in section `1.2 Why are cgroups needed?' you should create
361different hierarchies of cgroups for each single resource or group of
362resources you want to control. Therefore, you should mount a tmpfs on
363/sys/fs/cgroup and create directories for each cgroup resource or resource
364group.
365
366# mount -t tmpfs cgroup_root /sys/fs/cgroup
367# mkdir /sys/fs/cgroup/rg1
368
357To mount a cgroup hierarchy with just the cpuset and memory 369To mount a cgroup hierarchy with just the cpuset and memory
358subsystems, type: 370subsystems, type:
359# mount -t cgroup -o cpuset,memory hier1 /dev/cgroup 371# mount -t cgroup -o cpuset,memory hier1 /sys/fs/cgroup/rg1
360 372
361To change the set of subsystems bound to a mounted hierarchy, just 373To change the set of subsystems bound to a mounted hierarchy, just
362remount with different options: 374remount with different options:
363# mount -o remount,cpuset,blkio hier1 /dev/cgroup 375# mount -o remount,cpuset,blkio hier1 /sys/fs/cgroup/rg1
364 376
365Now memory is removed from the hierarchy and blkio is added. 377Now memory is removed from the hierarchy and blkio is added.
366 378
367Note this will add blkio to the hierarchy but won't remove memory or 379Note this will add blkio to the hierarchy but won't remove memory or
368cpuset, because the new options are appended to the old ones: 380cpuset, because the new options are appended to the old ones:
369# mount -o remount,blkio /dev/cgroup 381# mount -o remount,blkio /sys/fs/cgroup/rg1
370 382
371To Specify a hierarchy's release_agent: 383To Specify a hierarchy's release_agent:
372# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \ 384# mount -t cgroup -o cpuset,release_agent="/sbin/cpuset_release_agent" \
373 xxx /dev/cgroup 385 xxx /sys/fs/cgroup/rg1
374 386
375Note that specifying 'release_agent' more than once will return failure. 387Note that specifying 'release_agent' more than once will return failure.
376 388
@@ -379,17 +391,17 @@ when the hierarchy consists of a single (root) cgroup. Supporting
379the ability to arbitrarily bind/unbind subsystems from an existing 391the ability to arbitrarily bind/unbind subsystems from an existing
380cgroup hierarchy is intended to be implemented in the future. 392cgroup hierarchy is intended to be implemented in the future.
381 393
382Then under /dev/cgroup you can find a tree that corresponds to the 394Then under /sys/fs/cgroup/rg1 you can find a tree that corresponds to the
383tree of the cgroups in the system. For instance, /dev/cgroup 395tree of the cgroups in the system. For instance, /sys/fs/cgroup/rg1
384is the cgroup that holds the whole system. 396is the cgroup that holds the whole system.
385 397
386If you want to change the value of release_agent: 398If you want to change the value of release_agent:
387# echo "/sbin/new_release_agent" > /dev/cgroup/release_agent 399# echo "/sbin/new_release_agent" > /sys/fs/cgroup/rg1/release_agent
388 400
389It can also be changed via remount. 401It can also be changed via remount.
390 402
391If you want to create a new cgroup under /dev/cgroup: 403If you want to create a new cgroup under /sys/fs/cgroup/rg1:
392# cd /dev/cgroup 404# cd /sys/fs/cgroup/rg1
393# mkdir my_cgroup 405# mkdir my_cgroup
394 406
395Now you want to do something with this cgroup. 407Now you want to do something with this cgroup.
diff --git a/Documentation/cgroups/cpuacct.txt b/Documentation/cgroups/cpuacct.txt
index 8b930946c52a..9ad85df4b983 100644
--- a/Documentation/cgroups/cpuacct.txt
+++ b/Documentation/cgroups/cpuacct.txt
@@ -10,26 +10,25 @@ directly present in its group.
10 10
11Accounting groups can be created by first mounting the cgroup filesystem. 11Accounting groups can be created by first mounting the cgroup filesystem.
12 12
13# mkdir /cgroups 13# mount -t cgroup -ocpuacct none /sys/fs/cgroup
14# mount -t cgroup -ocpuacct none /cgroups 14
15 15With the above step, the initial or the parent accounting group becomes
16With the above step, the initial or the parent accounting group 16visible at /sys/fs/cgroup. At bootup, this group includes all the tasks in
17becomes visible at /cgroups. At bootup, this group includes all the 17the system. /sys/fs/cgroup/tasks lists the tasks in this cgroup.
18tasks in the system. /cgroups/tasks lists the tasks in this cgroup. 18/sys/fs/cgroup/cpuacct.usage gives the CPU time (in nanoseconds) obtained
19/cgroups/cpuacct.usage gives the CPU time (in nanoseconds) obtained by 19by this group which is essentially the CPU time obtained by all the tasks
20this group which is essentially the CPU time obtained by all the tasks
21in the system. 20in the system.
22 21
23New accounting groups can be created under the parent group /cgroups. 22New accounting groups can be created under the parent group /sys/fs/cgroup.
24 23
25# cd /cgroups 24# cd /sys/fs/cgroup
26# mkdir g1 25# mkdir g1
27# echo $$ > g1 26# echo $$ > g1
28 27
29The above steps create a new group g1 and move the current shell 28The above steps create a new group g1 and move the current shell
30process (bash) into it. CPU time consumed by this bash and its children 29process (bash) into it. CPU time consumed by this bash and its children
31can be obtained from g1/cpuacct.usage and the same is accumulated in 30can be obtained from g1/cpuacct.usage and the same is accumulated in
32/cgroups/cpuacct.usage also. 31/sys/fs/cgroup/cpuacct.usage also.
33 32
34cpuacct.stat file lists a few statistics which further divide the 33cpuacct.stat file lists a few statistics which further divide the
35CPU time obtained by the cgroup into user and system times. Currently 34CPU time obtained by the cgroup into user and system times. Currently
diff --git a/Documentation/cgroups/cpusets.txt b/Documentation/cgroups/cpusets.txt
index 98a30829af7a..5b0d78e55ccc 100644
--- a/Documentation/cgroups/cpusets.txt
+++ b/Documentation/cgroups/cpusets.txt
@@ -661,21 +661,21 @@ than stress the kernel.
661 661
662To start a new job that is to be contained within a cpuset, the steps are: 662To start a new job that is to be contained within a cpuset, the steps are:
663 663
664 1) mkdir /dev/cpuset 664 1) mkdir /sys/fs/cgroup/cpuset
665 2) mount -t cgroup -ocpuset cpuset /dev/cpuset 665 2) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
666 3) Create the new cpuset by doing mkdir's and write's (or echo's) in 666 3) Create the new cpuset by doing mkdir's and write's (or echo's) in
667 the /dev/cpuset virtual file system. 667 the /sys/fs/cgroup/cpuset virtual file system.
668 4) Start a task that will be the "founding father" of the new job. 668 4) Start a task that will be the "founding father" of the new job.
669 5) Attach that task to the new cpuset by writing its pid to the 669 5) Attach that task to the new cpuset by writing its pid to the
670 /dev/cpuset tasks file for that cpuset. 670 /sys/fs/cgroup/cpuset tasks file for that cpuset.
671 6) fork, exec or clone the job tasks from this founding father task. 671 6) fork, exec or clone the job tasks from this founding father task.
672 672
673For example, the following sequence of commands will setup a cpuset 673For example, the following sequence of commands will setup a cpuset
674named "Charlie", containing just CPUs 2 and 3, and Memory Node 1, 674named "Charlie", containing just CPUs 2 and 3, and Memory Node 1,
675and then start a subshell 'sh' in that cpuset: 675and then start a subshell 'sh' in that cpuset:
676 676
677 mount -t cgroup -ocpuset cpuset /dev/cpuset 677 mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
678 cd /dev/cpuset 678 cd /sys/fs/cgroup/cpuset
679 mkdir Charlie 679 mkdir Charlie
680 cd Charlie 680 cd Charlie
681 /bin/echo 2-3 > cpuset.cpus 681 /bin/echo 2-3 > cpuset.cpus
@@ -710,14 +710,14 @@ Creating, modifying, using the cpusets can be done through the cpuset
710virtual filesystem. 710virtual filesystem.
711 711
712To mount it, type: 712To mount it, type:
713# mount -t cgroup -o cpuset cpuset /dev/cpuset 713# mount -t cgroup -o cpuset cpuset /sys/fs/cgroup/cpuset
714 714
715Then under /dev/cpuset you can find a tree that corresponds to the 715Then under /sys/fs/cgroup/cpuset you can find a tree that corresponds to the
716tree of the cpusets in the system. For instance, /dev/cpuset 716tree of the cpusets in the system. For instance, /sys/fs/cgroup/cpuset
717is the cpuset that holds the whole system. 717is the cpuset that holds the whole system.
718 718
719If you want to create a new cpuset under /dev/cpuset: 719If you want to create a new cpuset under /sys/fs/cgroup/cpuset:
720# cd /dev/cpuset 720# cd /sys/fs/cgroup/cpuset
721# mkdir my_cpuset 721# mkdir my_cpuset
722 722
723Now you want to do something with this cpuset. 723Now you want to do something with this cpuset.
@@ -765,12 +765,12 @@ wrapper around the cgroup filesystem.
765 765
766The command 766The command
767 767
768mount -t cpuset X /dev/cpuset 768mount -t cpuset X /sys/fs/cgroup/cpuset
769 769
770is equivalent to 770is equivalent to
771 771
772mount -t cgroup -ocpuset,noprefix X /dev/cpuset 772mount -t cgroup -ocpuset,noprefix X /sys/fs/cgroup/cpuset
773echo "/sbin/cpuset_release_agent" > /dev/cpuset/release_agent 773echo "/sbin/cpuset_release_agent" > /sys/fs/cgroup/cpuset/release_agent
774 774
7752.2 Adding/removing cpus 7752.2 Adding/removing cpus
776------------------------ 776------------------------
diff --git a/Documentation/cgroups/devices.txt b/Documentation/cgroups/devices.txt
index 57ca4c89fe5c..16624a7f8222 100644
--- a/Documentation/cgroups/devices.txt
+++ b/Documentation/cgroups/devices.txt
@@ -22,16 +22,16 @@ removed from the child(ren).
22An entry is added using devices.allow, and removed using 22An entry is added using devices.allow, and removed using
23devices.deny. For instance 23devices.deny. For instance
24 24
25 echo 'c 1:3 mr' > /cgroups/1/devices.allow 25 echo 'c 1:3 mr' > /sys/fs/cgroup/1/devices.allow
26 26
27allows cgroup 1 to read and mknod the device usually known as 27allows cgroup 1 to read and mknod the device usually known as
28/dev/null. Doing 28/dev/null. Doing
29 29
30 echo a > /cgroups/1/devices.deny 30 echo a > /sys/fs/cgroup/1/devices.deny
31 31
32will remove the default 'a *:* rwm' entry. Doing 32will remove the default 'a *:* rwm' entry. Doing
33 33
34 echo a > /cgroups/1/devices.allow 34 echo a > /sys/fs/cgroup/1/devices.allow
35 35
36will add the 'a *:* rwm' entry to the whitelist. 36will add the 'a *:* rwm' entry to the whitelist.
37 37
diff --git a/Documentation/cgroups/freezer-subsystem.txt b/Documentation/cgroups/freezer-subsystem.txt
index 41f37fea1276..c21d77742a07 100644
--- a/Documentation/cgroups/freezer-subsystem.txt
+++ b/Documentation/cgroups/freezer-subsystem.txt
@@ -59,28 +59,28 @@ is non-freezable.
59 59
60* Examples of usage : 60* Examples of usage :
61 61
62 # mkdir /containers 62 # mkdir /sys/fs/cgroup/freezer
63 # mount -t cgroup -ofreezer freezer /containers 63 # mount -t cgroup -ofreezer freezer /sys/fs/cgroup/freezer
64 # mkdir /containers/0 64 # mkdir /sys/fs/cgroup/freezer/0
65 # echo $some_pid > /containers/0/tasks 65 # echo $some_pid > /sys/fs/cgroup/freezer/0/tasks
66 66
67to get status of the freezer subsystem : 67to get status of the freezer subsystem :
68 68
69 # cat /containers/0/freezer.state 69 # cat /sys/fs/cgroup/freezer/0/freezer.state
70 THAWED 70 THAWED
71 71
72to freeze all tasks in the container : 72to freeze all tasks in the container :
73 73
74 # echo FROZEN > /containers/0/freezer.state 74 # echo FROZEN > /sys/fs/cgroup/freezer/0/freezer.state
75 # cat /containers/0/freezer.state 75 # cat /sys/fs/cgroup/freezer/0/freezer.state
76 FREEZING 76 FREEZING
77 # cat /containers/0/freezer.state 77 # cat /sys/fs/cgroup/freezer/0/freezer.state
78 FROZEN 78 FROZEN
79 79
80to unfreeze all tasks in the container : 80to unfreeze all tasks in the container :
81 81
82 # echo THAWED > /containers/0/freezer.state 82 # echo THAWED > /sys/fs/cgroup/freezer/0/freezer.state
83 # cat /containers/0/freezer.state 83 # cat /sys/fs/cgroup/freezer/0/freezer.state
84 THAWED 84 THAWED
85 85
86This is the basic mechanism which should do the right thing for user space task 86This is the basic mechanism which should do the right thing for user space task
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 7c163477fcd8..06eb6d957c83 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -1,8 +1,8 @@
1Memory Resource Controller 1Memory Resource Controller
2 2
3NOTE: The Memory Resource Controller has been generically been referred 3NOTE: The Memory Resource Controller has generically been referred to as the
4 to as the memory controller in this document. Do not confuse memory 4 memory controller in this document. Do not confuse memory controller
5 controller used here with the memory controller that is used in hardware. 5 used here with the memory controller that is used in hardware.
6 6
7(For editors) 7(For editors)
8In this document: 8In this document:
@@ -70,6 +70,7 @@ Brief summary of control files.
70 (See sysctl's vm.swappiness) 70 (See sysctl's vm.swappiness)
71 memory.move_charge_at_immigrate # set/show controls of moving charges 71 memory.move_charge_at_immigrate # set/show controls of moving charges
72 memory.oom_control # set/show oom controls. 72 memory.oom_control # set/show oom controls.
73 memory.numa_stat # show the number of memory usage per numa node
73 74
741. History 751. History
75 76
@@ -181,7 +182,7 @@ behind this approach is that a cgroup that aggressively uses a shared
181page will eventually get charged for it (once it is uncharged from 182page will eventually get charged for it (once it is uncharged from
182the cgroup that brought it in -- this will happen on memory pressure). 183the cgroup that brought it in -- this will happen on memory pressure).
183 184
184Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.. 185Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
185When you do swapoff and make swapped-out pages of shmem(tmpfs) to 186When you do swapoff and make swapped-out pages of shmem(tmpfs) to
186be backed into memory in force, charges for pages are accounted against the 187be backed into memory in force, charges for pages are accounted against the
187caller of swapoff rather than the users of shmem. 188caller of swapoff rather than the users of shmem.
@@ -213,7 +214,7 @@ affecting global LRU, memory+swap limit is better than just limiting swap from
213OS point of view. 214OS point of view.
214 215
215* What happens when a cgroup hits memory.memsw.limit_in_bytes 216* What happens when a cgroup hits memory.memsw.limit_in_bytes
216When a cgroup his memory.memsw.limit_in_bytes, it's useless to do swap-out 217When a cgroup hits memory.memsw.limit_in_bytes, it's useless to do swap-out
217in this cgroup. Then, swap-out will not be done by cgroup routine and file 218in this cgroup. Then, swap-out will not be done by cgroup routine and file
218caches are dropped. But as mentioned above, global LRU can do swapout memory 219caches are dropped. But as mentioned above, global LRU can do swapout memory
219from it for sanity of the system's memory management state. You can't forbid 220from it for sanity of the system's memory management state. You can't forbid
@@ -263,16 +264,17 @@ b. Enable CONFIG_RESOURCE_COUNTERS
263c. Enable CONFIG_CGROUP_MEM_RES_CTLR 264c. Enable CONFIG_CGROUP_MEM_RES_CTLR
264d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension) 265d. Enable CONFIG_CGROUP_MEM_RES_CTLR_SWAP (to use swap extension)
265 266
2661. Prepare the cgroups 2671. Prepare the cgroups (see cgroups.txt, Why are cgroups needed?)
267# mkdir -p /cgroups 268# mount -t tmpfs none /sys/fs/cgroup
268# mount -t cgroup none /cgroups -o memory 269# mkdir /sys/fs/cgroup/memory
270# mount -t cgroup none /sys/fs/cgroup/memory -o memory
269 271
2702. Make the new group and move bash into it 2722. Make the new group and move bash into it
271# mkdir /cgroups/0 273# mkdir /sys/fs/cgroup/memory/0
272# echo $$ > /cgroups/0/tasks 274# echo $$ > /sys/fs/cgroup/memory/0/tasks
273 275
274Since now we're in the 0 cgroup, we can alter the memory limit: 276Since now we're in the 0 cgroup, we can alter the memory limit:
275# echo 4M > /cgroups/0/memory.limit_in_bytes 277# echo 4M > /sys/fs/cgroup/memory/0/memory.limit_in_bytes
276 278
277NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo, 279NOTE: We can use a suffix (k, K, m, M, g or G) to indicate values in kilo,
278mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.) 280mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
@@ -280,11 +282,11 @@ mega or gigabytes. (Here, Kilo, Mega, Giga are Kibibytes, Mebibytes, Gibibytes.)
280NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited). 282NOTE: We can write "-1" to reset the *.limit_in_bytes(unlimited).
281NOTE: We cannot set limits on the root cgroup any more. 283NOTE: We cannot set limits on the root cgroup any more.
282 284
283# cat /cgroups/0/memory.limit_in_bytes 285# cat /sys/fs/cgroup/memory/0/memory.limit_in_bytes
2844194304 2864194304
285 287
286We can check the usage: 288We can check the usage:
287# cat /cgroups/0/memory.usage_in_bytes 289# cat /sys/fs/cgroup/memory/0/memory.usage_in_bytes
2881216512 2901216512
289 291
290A successful write to this file does not guarantee a successful set of 292A successful write to this file does not guarantee a successful set of
@@ -464,6 +466,24 @@ value for efficient access. (Of course, when necessary, it's synchronized.)
464If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP) 466If you want to know more exact memory usage, you should use RSS+CACHE(+SWAP)
465value in memory.stat(see 5.2). 467value in memory.stat(see 5.2).
466 468
4695.6 numa_stat
470
471This is similar to numa_maps but operates on a per-memcg basis. This is
472useful for providing visibility into the numa locality information within
473an memcg since the pages are allowed to be allocated from any physical
474node. One of the usecases is evaluating application performance by
475combining this information with the application's cpu allocation.
476
477We export "total", "file", "anon" and "unevictable" pages per-node for
478each memcg. The ouput format of memory.numa_stat is:
479
480total=<total pages> N0=<node 0 pages> N1=<node 1 pages> ...
481file=<total file pages> N0=<node 0 pages> N1=<node 1 pages> ...
482anon=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
483unevictable=<total anon pages> N0=<node 0 pages> N1=<node 1 pages> ...
484
485And we have total = file + anon + unevictable.
486
4676. Hierarchy support 4876. Hierarchy support
468 488
469The memory controller supports a deep hierarchy and hierarchical accounting. 489The memory controller supports a deep hierarchy and hierarchical accounting.
@@ -471,13 +491,13 @@ The hierarchy is created by creating the appropriate cgroups in the
471cgroup filesystem. Consider for example, the following cgroup filesystem 491cgroup filesystem. Consider for example, the following cgroup filesystem
472hierarchy 492hierarchy
473 493
474 root 494 root
475 / | \ 495 / | \
476 / | \ 496 / | \
477 a b c 497 a b c
478 | \ 498 | \
479 | \ 499 | \
480 d e 500 d e
481 501
482In the diagram above, with hierarchical accounting enabled, all memory 502In the diagram above, with hierarchical accounting enabled, all memory
483usage of e, is accounted to its ancestors up until the root (i.e, c and root), 503usage of e, is accounted to its ancestors up until the root (i.e, c and root),
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 1a9446b59153..72e238465b0b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -481,23 +481,6 @@ Who: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
481 481
482---------------------------- 482----------------------------
483 483
484What: namespace cgroup (ns_cgroup)
485When: 2.6.38
486Why: The ns_cgroup leads to some problems:
487 * cgroup creation is out-of-control
488 * cgroup name can conflict when pids are looping
489 * it is not possible to have a single process handling
490 a lot of namespaces without falling in a exponential creation time
491 * we may want to create a namespace without creating a cgroup
492
493 The ns_cgroup is replaced by a compatibility flag 'clone_children',
494 where a newly created cgroup will copy the parent cgroup values.
495 The userspace has to manually create a cgroup and add a task to
496 the 'tasks' file.
497Who: Daniel Lezcano <daniel.lezcano@free.fr>
498
499----------------------------
500
501What: iwlwifi disable_hw_scan module parameters 484What: iwlwifi disable_hw_scan module parameters
502When: 2.6.40 485When: 2.6.40
503Why: Hareware scan is the prefer method for iwlwifi devices for 486Why: Hareware scan is the prefer method for iwlwifi devices for
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index f48178024067..db3b1aba32a3 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -843,6 +843,7 @@ Provides counts of softirq handlers serviced since boot time, for each cpu.
843 TASKLET: 0 0 0 290 843 TASKLET: 0 0 0 290
844 SCHED: 27035 26983 26971 26746 844 SCHED: 27035 26983 26971 26746
845 HRTIMER: 0 0 0 0 845 HRTIMER: 0 0 0 0
846 RCU: 1678 1769 2178 2250
846 847
847 848
8481.3 IDE devices in /proc/ide 8491.3 IDE devices in /proc/ide
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index d9a203b058f1..fd248a318211 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2598,6 +2598,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2598 unlock ejectable media); 2598 unlock ejectable media);
2599 m = MAX_SECTORS_64 (don't transfer more 2599 m = MAX_SECTORS_64 (don't transfer more
2600 than 64 sectors = 32 KB at a time); 2600 than 64 sectors = 32 KB at a time);
2601 n = INITIAL_READ10 (force a retry of the
2602 initial READ(10) command);
2601 o = CAPACITY_OK (accept the capacity 2603 o = CAPACITY_OK (accept the capacity
2602 reported by the device); 2604 reported by the device);
2603 r = IGNORE_RESIDUE (the device reports 2605 r = IGNORE_RESIDUE (the device reports
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index 090e6ee04536..51063e681ca4 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -11,7 +11,9 @@ with the difference that the orphan objects are not freed but only
11reported via /sys/kernel/debug/kmemleak. A similar method is used by the 11reported via /sys/kernel/debug/kmemleak. A similar method is used by the
12Valgrind tool (memcheck --leak-check) to detect the memory leaks in 12Valgrind tool (memcheck --leak-check) to detect the memory leaks in
13user-space applications. 13user-space applications.
14Kmemleak is supported on x86, arm, powerpc, sparc, sh, microblaze and tile. 14
15Please check DEBUG_KMEMLEAK dependencies in lib/Kconfig.debug for supported
16architectures.
15 17
16Usage 18Usage
17----- 19-----
diff --git a/Documentation/md.txt b/Documentation/md.txt
index 2366b1c8cf19..f0eee83ff78a 100644
--- a/Documentation/md.txt
+++ b/Documentation/md.txt
@@ -555,7 +555,7 @@ also have
555 sync_min 555 sync_min
556 sync_max 556 sync_max
557 The two values, given as numbers of sectors, indicate a range 557 The two values, given as numbers of sectors, indicate a range
558 withing the array where 'check'/'repair' will operate. Must be 558 within the array where 'check'/'repair' will operate. Must be
559 a multiple of chunk_size. When it reaches "sync_max" it will 559 a multiple of chunk_size. When it reaches "sync_max" it will
560 pause, rather than complete. 560 pause, rather than complete.
561 You can use 'select' or 'poll' on "sync_completed" to wait for 561 You can use 'select' or 'poll' on "sync_completed" to wait for
diff --git a/Documentation/printk-formats.txt b/Documentation/printk-formats.txt
index 1b5a5ddbc3ef..5df176ed59b8 100644
--- a/Documentation/printk-formats.txt
+++ b/Documentation/printk-formats.txt
@@ -9,7 +9,121 @@ If variable is of Type, use printk format specifier:
9 size_t %zu or %zx 9 size_t %zu or %zx
10 ssize_t %zd or %zx 10 ssize_t %zd or %zx
11 11
12Raw pointer value SHOULD be printed with %p. 12Raw pointer value SHOULD be printed with %p. The kernel supports
13the following extended format specifiers for pointer types:
14
15Symbols/Function Pointers:
16
17 %pF versatile_init+0x0/0x110
18 %pf versatile_init
19 %pS versatile_init+0x0/0x110
20 %ps versatile_init
21 %pB prev_fn_of_versatile_init+0x88/0x88
22
23 For printing symbols and function pointers. The 'S' and 's' specifiers
24 result in the symbol name with ('S') or without ('s') offsets. Where
25 this is used on a kernel without KALLSYMS - the symbol address is
26 printed instead.
27
28 The 'B' specifier results in the symbol name with offsets and should be
29 used when printing stack backtraces. The specifier takes into
30 consideration the effect of compiler optimisations which may occur
31 when tail-call's are used and marked with the noreturn GCC attribute.
32
33 On ia64, ppc64 and parisc64 architectures function pointers are
34 actually function descriptors which must first be resolved. The 'F' and
35 'f' specifiers perform this resolution and then provide the same
36 functionality as the 'S' and 's' specifiers.
37
38Kernel Pointers:
39
40 %pK 0x01234567 or 0x0123456789abcdef
41
42 For printing kernel pointers which should be hidden from unprivileged
43 users. The behaviour of %pK depends on the kptr_restrict sysctl - see
44 Documentation/sysctl/kernel.txt for more details.
45
46Struct Resources:
47
48 %pr [mem 0x60000000-0x6fffffff flags 0x2200] or
49 [mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
50 %pR [mem 0x60000000-0x6fffffff pref] or
51 [mem 0x0000000060000000-0x000000006fffffff pref]
52
53 For printing struct resources. The 'R' and 'r' specifiers result in a
54 printed resource with ('R') or without ('r') a decoded flags member.
55
56MAC/FDDI addresses:
57
58 %pM 00:01:02:03:04:05
59 %pMF 00-01-02-03-04-05
60 %pm 000102030405
61
62 For printing 6-byte MAC/FDDI addresses in hex notation. The 'M' and 'm'
63 specifiers result in a printed address with ('M') or without ('m') byte
64 separators. The default byte separator is the colon (':').
65
66 Where FDDI addresses are concerned the 'F' specifier can be used after
67 the 'M' specifier to use dash ('-') separators instead of the default
68 separator.
69
70IPv4 addresses:
71
72 %pI4 1.2.3.4
73 %pi4 001.002.003.004
74 %p[Ii][hnbl]
75
76 For printing IPv4 dot-separated decimal addresses. The 'I4' and 'i4'
77 specifiers result in a printed address with ('i4') or without ('I4')
78 leading zeros.
79
80 The additional 'h', 'n', 'b', and 'l' specifiers are used to specify
81 host, network, big or little endian order addresses respectively. Where
82 no specifier is provided the default network/big endian order is used.
83
84IPv6 addresses:
85
86 %pI6 0001:0002:0003:0004:0005:0006:0007:0008
87 %pi6 00010002000300040005000600070008
88 %pI6c 1:2:3:4:5:6:7:8
89
90 For printing IPv6 network-order 16-bit hex addresses. The 'I6' and 'i6'
91 specifiers result in a printed address with ('I6') or without ('i6')
92 colon-separators. Leading zeros are always used.
93
94 The additional 'c' specifier can be used with the 'I' specifier to
95 print a compressed IPv6 address as described by
96 http://tools.ietf.org/html/rfc5952
97
98UUID/GUID addresses:
99
100 %pUb 00010203-0405-0607-0809-0a0b0c0d0e0f
101 %pUB 00010203-0405-0607-0809-0A0B0C0D0E0F
102 %pUl 03020100-0504-0706-0809-0a0b0c0e0e0f
103 %pUL 03020100-0504-0706-0809-0A0B0C0E0E0F
104
105 For printing 16-byte UUID/GUIDs addresses. The additional 'l', 'L',
106 'b' and 'B' specifiers are used to specify a little endian order in
107 lower ('l') or upper case ('L') hex characters - and big endian order
108 in lower ('b') or upper case ('B') hex characters.
109
110 Where no additional specifiers are used the default little endian
111 order with lower case hex characters will be printed.
112
113struct va_format:
114
115 %pV
116
117 For printing struct va_format structures. These contain a format string
118 and va_list as follows:
119
120 struct va_format {
121 const char *fmt;
122 va_list *va;
123 };
124
125 Do not use this feature without some mechanism to verify the
126 correctness of the format string and va_list arguments.
13 127
14u64 SHOULD be printed with %llu/%llx, (unsigned long long): 128u64 SHOULD be printed with %llu/%llx, (unsigned long long):
15 129
@@ -32,4 +146,5 @@ Reminder: sizeof() result is of type size_t.
32Thank you for your cooperation and attention. 146Thank you for your cooperation and attention.
33 147
34 148
35By Randy Dunlap <rdunlap@xenotime.net> 149By Randy Dunlap <rdunlap@xenotime.net> and
150Andrew Murray <amurray@mpc-data.co.uk>
diff --git a/Documentation/scheduler/sched-design-CFS.txt b/Documentation/scheduler/sched-design-CFS.txt
index 99961993257a..91ecff07cede 100644
--- a/Documentation/scheduler/sched-design-CFS.txt
+++ b/Documentation/scheduler/sched-design-CFS.txt
@@ -223,9 +223,10 @@ When CONFIG_FAIR_GROUP_SCHED is defined, a "cpu.shares" file is created for each
223group created using the pseudo filesystem. See example steps below to create 223group created using the pseudo filesystem. See example steps below to create
224task groups and modify their CPU share using the "cgroups" pseudo filesystem. 224task groups and modify their CPU share using the "cgroups" pseudo filesystem.
225 225
226 # mkdir /dev/cpuctl 226 # mount -t tmpfs cgroup_root /sys/fs/cgroup
227 # mount -t cgroup -ocpu none /dev/cpuctl 227 # mkdir /sys/fs/cgroup/cpu
228 # cd /dev/cpuctl 228 # mount -t cgroup -ocpu none /sys/fs/cgroup/cpu
229 # cd /sys/fs/cgroup/cpu
229 230
230 # mkdir multimedia # create "multimedia" group of tasks 231 # mkdir multimedia # create "multimedia" group of tasks
231 # mkdir browser # create "browser" group of tasks 232 # mkdir browser # create "browser" group of tasks
diff --git a/Documentation/scheduler/sched-rt-group.txt b/Documentation/scheduler/sched-rt-group.txt
index 605b0d40329d..71b54d549987 100644
--- a/Documentation/scheduler/sched-rt-group.txt
+++ b/Documentation/scheduler/sched-rt-group.txt
@@ -129,9 +129,8 @@ priority!
129Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real 129Enabling CONFIG_RT_GROUP_SCHED lets you explicitly allocate real
130CPU bandwidth to task groups. 130CPU bandwidth to task groups.
131 131
132This uses the /cgroup virtual file system and 132This uses the cgroup virtual file system and "<cgroup>/cpu.rt_runtime_us"
133"/cgroup/<cgroup>/cpu.rt_runtime_us" to control the CPU time reserved for each 133to control the CPU time reserved for each control group.
134control group.
135 134
136For more information on working with control groups, you should read 135For more information on working with control groups, you should read
137Documentation/cgroups/cgroups.txt as well. 136Documentation/cgroups/cgroups.txt as well.
@@ -150,7 +149,7 @@ For now, this can be simplified to just the following (but see Future plans):
150=============== 149===============
151 150
152There is work in progress to make the scheduling period for each group 151There is work in progress to make the scheduling period for each group
153("/cgroup/<cgroup>/cpu.rt_period_us") configurable as well. 152("<cgroup>/cpu.rt_period_us") configurable as well.
154 153
155The constraint on the period is that a subgroup must have a smaller or 154The constraint on the period is that a subgroup must have a smaller or
156equal period to its parent. But realistically its not very useful _yet_ 155equal period to its parent. But realistically its not very useful _yet_
diff --git a/Documentation/vm/hwpoison.txt b/Documentation/vm/hwpoison.txt
index 12f9ba20ccb7..550068466605 100644
--- a/Documentation/vm/hwpoison.txt
+++ b/Documentation/vm/hwpoison.txt
@@ -129,12 +129,12 @@ Limit injection to pages owned by memgroup. Specified by inode number
129of the memcg. 129of the memcg.
130 130
131Example: 131Example:
132 mkdir /cgroup/hwpoison 132 mkdir /sys/fs/cgroup/mem/hwpoison
133 133
134 usemem -m 100 -s 1000 & 134 usemem -m 100 -s 1000 &
135 echo `jobs -p` > /cgroup/hwpoison/tasks 135 echo `jobs -p` > /sys/fs/cgroup/mem/hwpoison/tasks
136 136
137 memcg_ino=$(ls -id /cgroup/hwpoison | cut -f1 -d' ') 137 memcg_ino=$(ls -id /sys/fs/cgroup/mem/hwpoison | cut -f1 -d' ')
138 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg 138 echo $memcg_ino > /debug/hwpoison/corrupt-filter-memcg
139 139
140 page-types -p `pidof init` --hwpoison # shall do nothing 140 page-types -p `pidof init` --hwpoison # shall do nothing
diff --git a/MAINTAINERS b/MAINTAINERS
index 29801f760b6f..f0358cd91de3 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1739,7 +1739,7 @@ S: Supported
1739F: drivers/net/enic/ 1739F: drivers/net/enic/
1740 1740
1741CIRRUS LOGIC EP93XX ETHERNET DRIVER 1741CIRRUS LOGIC EP93XX ETHERNET DRIVER
1742M: Lennert Buytenhek <kernel@wantstofly.org> 1742M: Hartley Sweeten <hsweeten@visionengravers.com>
1743L: netdev@vger.kernel.org 1743L: netdev@vger.kernel.org
1744S: Maintained 1744S: Maintained
1745F: drivers/net/arm/ep93xx_eth.c 1745F: drivers/net/arm/ep93xx_eth.c
@@ -1889,7 +1889,6 @@ L: cpufreq@vger.kernel.org
1889W: http://www.codemonkey.org.uk/projects/cpufreq/ 1889W: http://www.codemonkey.org.uk/projects/cpufreq/
1890T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git 1890T: git git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq.git
1891S: Maintained 1891S: Maintained
1892F: arch/x86/kernel/cpu/cpufreq/
1893F: drivers/cpufreq/ 1892F: drivers/cpufreq/
1894F: include/linux/cpufreq.h 1893F: include/linux/cpufreq.h
1895 1894
@@ -2292,8 +2291,7 @@ F: drivers/scsi/eata_pio.*
2292 2291
2293EBTABLES 2292EBTABLES
2294M: Bart De Schuymer <bart.de.schuymer@pandora.be> 2293M: Bart De Schuymer <bart.de.schuymer@pandora.be>
2295L: ebtables-user@lists.sourceforge.net 2294L: netfilter-devel@vger.kernel.org
2296L: ebtables-devel@lists.sourceforge.net
2297W: http://ebtables.sourceforge.net/ 2295W: http://ebtables.sourceforge.net/
2298S: Maintained 2296S: Maintained
2299F: include/linux/netfilter_bridge/ebt_*.h 2297F: include/linux/netfilter_bridge/ebt_*.h
@@ -3820,6 +3818,12 @@ S: Maintained
3820F: drivers/leds/ 3818F: drivers/leds/
3821F: include/linux/leds.h 3819F: include/linux/leds.h
3822 3820
3821LEGACY EEPROM DRIVER
3822M: Jean Delvare <khali@linux-fr.org>
3823S: Maintained
3824F: Documentation/misc-devices/eeprom
3825F: drivers/misc/eeprom/eeprom.c
3826
3823LEGO USB Tower driver 3827LEGO USB Tower driver
3824M: Juergen Stuber <starblue@users.sourceforge.net> 3828M: Juergen Stuber <starblue@users.sourceforge.net>
3825L: legousb-devel@lists.sourceforge.net 3829L: legousb-devel@lists.sourceforge.net
@@ -4145,7 +4149,7 @@ F: include/linux/mm.h
4145F: mm/ 4149F: mm/
4146 4150
4147MEMORY RESOURCE CONTROLLER 4151MEMORY RESOURCE CONTROLLER
4148M: Balbir Singh <balbir@linux.vnet.ibm.com> 4152M: Balbir Singh <bsingharora@gmail.com>
4149M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> 4153M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
4150M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> 4154M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
4151L: linux-mm@kvack.org 4155L: linux-mm@kvack.org
@@ -4252,8 +4256,7 @@ F: drivers/mmc/
4252F: include/linux/mmc/ 4256F: include/linux/mmc/
4253 4257
4254MULTIMEDIA CARD (MMC) ETC. OVER SPI 4258MULTIMEDIA CARD (MMC) ETC. OVER SPI
4255M: David Brownell <dbrownell@users.sourceforge.net> 4259S: Orphan
4256S: Odd Fixes
4257F: drivers/mmc/host/mmc_spi.c 4260F: drivers/mmc/host/mmc_spi.c
4258F: include/linux/spi/mmc_spi.h 4261F: include/linux/spi/mmc_spi.h
4259 4262
@@ -4603,7 +4606,6 @@ F: drivers/media/video/omap3isp/*
4603 4606
4604OMAP USB SUPPORT 4607OMAP USB SUPPORT
4605M: Felipe Balbi <balbi@ti.com> 4608M: Felipe Balbi <balbi@ti.com>
4606M: David Brownell <dbrownell@users.sourceforge.net>
4607L: linux-usb@vger.kernel.org 4609L: linux-usb@vger.kernel.org
4608L: linux-omap@vger.kernel.org 4610L: linux-omap@vger.kernel.org
4609T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git 4611T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
@@ -4892,7 +4894,7 @@ F: mm/percpu*.c
4892F: arch/*/include/asm/percpu.h 4894F: arch/*/include/asm/percpu.h
4893 4895
4894PER-TASK DELAY ACCOUNTING 4896PER-TASK DELAY ACCOUNTING
4895M: Balbir Singh <balbir@linux.vnet.ibm.com> 4897M: Balbir Singh <bsingharora@gmail.com>
4896S: Maintained 4898S: Maintained
4897F: include/linux/delayacct.h 4899F: include/linux/delayacct.h
4898F: kernel/delayacct.c 4900F: kernel/delayacct.c
@@ -4947,6 +4949,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.gi
4947F: drivers/input/serio/i8042-unicore32io.h 4949F: drivers/input/serio/i8042-unicore32io.h
4948F: drivers/i2c/busses/i2c-puv3.c 4950F: drivers/i2c/busses/i2c-puv3.c
4949F: drivers/video/fb-puv3.c 4951F: drivers/video/fb-puv3.c
4952F: drivers/rtc/rtc-puv3.c
4950 4953
4951PMC SIERRA MaxRAID DRIVER 4954PMC SIERRA MaxRAID DRIVER
4952M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com> 4955M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
@@ -5984,7 +5987,6 @@ F: Documentation/serial/specialix.txt
5984F: drivers/staging/tty/specialix* 5987F: drivers/staging/tty/specialix*
5985 5988
5986SPI SUBSYSTEM 5989SPI SUBSYSTEM
5987M: David Brownell <dbrownell@users.sourceforge.net>
5988M: Grant Likely <grant.likely@secretlab.ca> 5990M: Grant Likely <grant.likely@secretlab.ca>
5989L: spi-devel-general@lists.sourceforge.net 5991L: spi-devel-general@lists.sourceforge.net
5990Q: http://patchwork.kernel.org/project/spi-devel-general/list/ 5992Q: http://patchwork.kernel.org/project/spi-devel-general/list/
@@ -6100,7 +6102,7 @@ F: include/target/
6100F: Documentation/target/ 6102F: Documentation/target/
6101 6103
6102TASKSTATS STATISTICS INTERFACE 6104TASKSTATS STATISTICS INTERFACE
6103M: Balbir Singh <balbir@linux.vnet.ibm.com> 6105M: Balbir Singh <bsingharora@gmail.com>
6104S: Maintained 6106S: Maintained
6105F: Documentation/accounting/taskstats* 6107F: Documentation/accounting/taskstats*
6106F: include/linux/taskstats* 6108F: include/linux/taskstats*
@@ -6432,9 +6434,8 @@ S: Maintained
6432F: drivers/usb/misc/rio500* 6434F: drivers/usb/misc/rio500*
6433 6435
6434USB EHCI DRIVER 6436USB EHCI DRIVER
6435M: David Brownell <dbrownell@users.sourceforge.net>
6436L: linux-usb@vger.kernel.org 6437L: linux-usb@vger.kernel.org
6437S: Odd Fixes 6438S: Orphan
6438F: Documentation/usb/ehci.txt 6439F: Documentation/usb/ehci.txt
6439F: drivers/usb/host/ehci* 6440F: drivers/usb/host/ehci*
6440 6441
@@ -6448,9 +6449,10 @@ S: Maintained
6448F: drivers/media/video/et61x251/ 6449F: drivers/media/video/et61x251/
6449 6450
6450USB GADGET/PERIPHERAL SUBSYSTEM 6451USB GADGET/PERIPHERAL SUBSYSTEM
6451M: David Brownell <dbrownell@users.sourceforge.net> 6452M: Felipe Balbi <balbi@ti.com>
6452L: linux-usb@vger.kernel.org 6453L: linux-usb@vger.kernel.org
6453W: http://www.linux-usb.org/gadget 6454W: http://www.linux-usb.org/gadget
6455T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
6454S: Maintained 6456S: Maintained
6455F: drivers/usb/gadget/ 6457F: drivers/usb/gadget/
6456F: include/linux/usb/gadget* 6458F: include/linux/usb/gadget*
@@ -6460,7 +6462,7 @@ M: Jiri Kosina <jkosina@suse.cz>
6460L: linux-usb@vger.kernel.org 6462L: linux-usb@vger.kernel.org
6461T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git 6463T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid.git
6462S: Maintained 6464S: Maintained
6463F: Documentation/usb/hiddev.txt 6465F: Documentation/hid/hiddev.txt
6464F: drivers/hid/usbhid/ 6466F: drivers/hid/usbhid/
6465 6467
6466USB ISP116X DRIVER 6468USB ISP116X DRIVER
@@ -6492,9 +6494,8 @@ S: Maintained
6492F: sound/usb/midi.* 6494F: sound/usb/midi.*
6493 6495
6494USB OHCI DRIVER 6496USB OHCI DRIVER
6495M: David Brownell <dbrownell@users.sourceforge.net>
6496L: linux-usb@vger.kernel.org 6497L: linux-usb@vger.kernel.org
6497S: Odd Fixes 6498S: Orphan
6498F: Documentation/usb/ohci.txt 6499F: Documentation/usb/ohci.txt
6499F: drivers/usb/host/ohci* 6500F: drivers/usb/host/ohci*
6500 6501
@@ -6720,6 +6721,14 @@ S: Maintained
6720F: Documentation/filesystems/vfat.txt 6721F: Documentation/filesystems/vfat.txt
6721F: fs/fat/ 6722F: fs/fat/
6722 6723
6724VIDEOBUF2 FRAMEWORK
6725M: Pawel Osciak <pawel@osciak.com>
6726M: Marek Szyprowski <m.szyprowski@samsung.com>
6727L: linux-media@vger.kernel.org
6728S: Maintained
6729F: drivers/media/video/videobuf2-*
6730F: include/media/videobuf2-*
6731
6723VIRTIO CONSOLE DRIVER 6732VIRTIO CONSOLE DRIVER
6724M: Amit Shah <amit.shah@redhat.com> 6733M: Amit Shah <amit.shah@redhat.com>
6725L: virtualization@lists.linux-foundation.org 6734L: virtualization@lists.linux-foundation.org
@@ -6997,6 +7006,13 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.
6997S: Maintained 7006S: Maintained
6998F: drivers/platform/x86 7007F: drivers/platform/x86
6999 7008
7009X86 MCE INFRASTRUCTURE
7010M: Tony Luck <tony.luck@intel.com>
7011M: Borislav Petkov <bp@amd64.org>
7012L: linux-edac@vger.kernel.org
7013S: Maintained
7014F: arch/x86/kernel/cpu/mcheck/*
7015
7000XEN HYPERVISOR INTERFACE 7016XEN HYPERVISOR INTERFACE
7001M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> 7017M: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
7002M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> 7018M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
diff --git a/Makefile b/Makefile
index afb8e0d26f2c..41330a06e4ec 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 0 2PATCHLEVEL = 0
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc4
5NAME = Sneaky Weasel 5NAME = Sneaky Weasel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -378,7 +378,7 @@ KBUILD_LDFLAGS_MODULE := -T $(srctree)/scripts/module-common.lds
378 378
379# Read KERNELRELEASE from include/config/kernel.release (if it exists) 379# Read KERNELRELEASE from include/config/kernel.release (if it exists)
380KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null) 380KERNELRELEASE = $(shell cat include/config/kernel.release 2> /dev/null)
381KERNELVERSION = $(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(EXTRAVERSION) 381KERNELVERSION = $(VERSION)$(if $(PATCHLEVEL),.$(PATCHLEVEL)$(if $(SUBLEVEL),.$(SUBLEVEL)))$(EXTRAVERSION)
382 382
383export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION 383export VERSION PATCHLEVEL SUBLEVEL KERNELRELEASE KERNELVERSION
384export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC 384export ARCH SRCARCH CONFIG_SHELL HOSTCC HOSTCFLAGS CROSS_COMPILE AS LD CC
@@ -1005,7 +1005,7 @@ endef
1005 1005
1006define filechk_version.h 1006define filechk_version.h
1007 (echo \#define LINUX_VERSION_CODE $(shell \ 1007 (echo \#define LINUX_VERSION_CODE $(shell \
1008 expr $(VERSION) \* 65536 + $(PATCHLEVEL) \* 256 + $(SUBLEVEL)); \ 1008 expr $(VERSION) \* 65536 + 0$(PATCHLEVEL) \* 256 + 0$(SUBLEVEL)); \
1009 echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';) 1009 echo '#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))';)
1010endef 1010endef
1011 1011
@@ -1110,11 +1110,6 @@ modules_install: _modinst_ _modinst_post
1110 1110
1111PHONY += _modinst_ 1111PHONY += _modinst_
1112_modinst_: 1112_modinst_:
1113 @if [ -z "`$(DEPMOD) -V 2>/dev/null | grep module-init-tools`" ]; then \
1114 echo "Warning: you may need to install module-init-tools"; \
1115 echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt";\
1116 sleep 1; \
1117 fi
1118 @rm -rf $(MODLIB)/kernel 1113 @rm -rf $(MODLIB)/kernel
1119 @rm -f $(MODLIB)/source 1114 @rm -f $(MODLIB)/source
1120 @mkdir -p $(MODLIB)/kernel 1115 @mkdir -p $(MODLIB)/kernel
@@ -1531,12 +1526,8 @@ quiet_cmd_rmfiles = $(if $(wildcard $(rm-files)),CLEAN $(wildcard $(rm-files))
1531 1526
1532# Run depmod only if we have System.map and depmod is executable 1527# Run depmod only if we have System.map and depmod is executable
1533quiet_cmd_depmod = DEPMOD $(KERNELRELEASE) 1528quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
1534 cmd_depmod = \ 1529 cmd_depmod = $(CONFIG_SHELL) $(srctree)/scripts/depmod.sh $(DEPMOD) \
1535 if [ -r System.map -a -x $(DEPMOD) ]; then \ 1530 $(KERNELRELEASE)
1536 $(DEPMOD) -ae -F System.map \
1537 $(if $(strip $(INSTALL_MOD_PATH)), -b $(INSTALL_MOD_PATH) ) \
1538 $(KERNELRELEASE); \
1539 fi
1540 1531
1541# Create temporary dir for module support files 1532# Create temporary dir for module support files
1542# clean it up only when building all modules 1533# clean it up only when building all modules
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 376f22130791..326f0a2d56e5 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -409,7 +409,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char __user *, name, int, namelen)
409 return -EFAULT; 409 return -EFAULT;
410 410
411 len = namelen; 411 len = namelen;
412 if (namelen > 32) 412 if (len > 32)
413 len = 32; 413 len = 32;
414 414
415 down_read(&uts_sem); 415 down_read(&uts_sem);
@@ -594,7 +594,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, command, char __user *, buf, long, count)
594 down_read(&uts_sem); 594 down_read(&uts_sem);
595 res = sysinfo_table[offset]; 595 res = sysinfo_table[offset];
596 len = strlen(res)+1; 596 len = strlen(res)+1;
597 if (len > count) 597 if ((unsigned long)len > (unsigned long)count)
598 len = count; 598 len = count;
599 if (copy_to_user(buf, res, len)) 599 if (copy_to_user(buf, res, len))
600 err = -EFAULT; 600 err = -EFAULT;
@@ -649,7 +649,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned long, op, void __user *, buffer,
649 return 1; 649 return 1;
650 650
651 case GSI_GET_HWRPB: 651 case GSI_GET_HWRPB:
652 if (nbytes < sizeof(*hwrpb)) 652 if (nbytes > sizeof(*hwrpb))
653 return -EINVAL; 653 return -EINVAL;
654 if (copy_to_user(buffer, hwrpb, nbytes) != 0) 654 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
655 return -EFAULT; 655 return -EFAULT;
@@ -1008,6 +1008,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1008{ 1008{
1009 struct rusage r; 1009 struct rusage r;
1010 long ret, err; 1010 long ret, err;
1011 unsigned int status = 0;
1011 mm_segment_t old_fs; 1012 mm_segment_t old_fs;
1012 1013
1013 if (!ur) 1014 if (!ur)
@@ -1016,13 +1017,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
1016 old_fs = get_fs(); 1017 old_fs = get_fs();
1017 1018
1018 set_fs (KERNEL_DS); 1019 set_fs (KERNEL_DS);
1019 ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r); 1020 ret = sys_wait4(pid, (unsigned int __user *) &status, options,
1021 (struct rusage __user *) &r);
1020 set_fs (old_fs); 1022 set_fs (old_fs);
1021 1023
1022 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur))) 1024 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
1023 return -EFAULT; 1025 return -EFAULT;
1024 1026
1025 err = 0; 1027 err = 0;
1028 err |= put_user(status, ustatus);
1026 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec); 1029 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
1027 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec); 1030 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
1028 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec); 1031 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index f9da41921c52..942fad97e447 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -691,9 +691,9 @@ proc_types:
691 691
692 .word 0x41069260 @ ARM926EJ-S (v5TEJ) 692 .word 0x41069260 @ ARM926EJ-S (v5TEJ)
693 .word 0xff0ffff0 693 .word 0xff0ffff0
694 b __arm926ejs_mmu_cache_on 694 W(b) __arm926ejs_mmu_cache_on
695 b __armv4_mmu_cache_off 695 W(b) __armv4_mmu_cache_off
696 b __armv5tej_mmu_cache_flush 696 W(b) __armv5tej_mmu_cache_flush
697 697
698 .word 0x00007000 @ ARM7 IDs 698 .word 0x00007000 @ ARM7 IDs
699 .word 0x0000f000 699 .word 0x0000f000
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 889922ad229c..67b5abb6f857 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -157,7 +157,7 @@ CONFIG_LEDS_GPIO=m
157CONFIG_LEDS_TRIGGERS=y 157CONFIG_LEDS_TRIGGERS=y
158CONFIG_LEDS_TRIGGER_TIMER=m 158CONFIG_LEDS_TRIGGER_TIMER=m
159CONFIG_LEDS_TRIGGER_HEARTBEAT=m 159CONFIG_LEDS_TRIGGER_HEARTBEAT=m
160CONFIG_RTC_CLASS=m 160CONFIG_RTC_CLASS=y
161CONFIG_EXT2_FS=y 161CONFIG_EXT2_FS=y
162CONFIG_EXT3_FS=y 162CONFIG_EXT3_FS=y
163CONFIG_XFS_FS=m 163CONFIG_XFS_FS=m
diff --git a/arch/arm/configs/netx_defconfig b/arch/arm/configs/netx_defconfig
index 316af5479d90..9c0ad7993986 100644
--- a/arch/arm/configs/netx_defconfig
+++ b/arch/arm/configs/netx_defconfig
@@ -60,7 +60,7 @@ CONFIG_FB_ARMCLCD=y
60# CONFIG_VGA_CONSOLE is not set 60# CONFIG_VGA_CONSOLE is not set
61CONFIG_FRAMEBUFFER_CONSOLE=y 61CONFIG_FRAMEBUFFER_CONSOLE=y
62CONFIG_LOGO=y 62CONFIG_LOGO=y
63CONFIG_RTC_CLASS=m 63CONFIG_RTC_CLASS=y
64CONFIG_INOTIFY=y 64CONFIG_INOTIFY=y
65CONFIG_TMPFS=y 65CONFIG_TMPFS=y
66CONFIG_JFFS2_FS=y 66CONFIG_JFFS2_FS=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 8b0c717378fa..1d01ddd33122 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -142,7 +142,7 @@ CONFIG_USB_GADGETFS=m
142CONFIG_USB_FILE_STORAGE=m 142CONFIG_USB_FILE_STORAGE=m
143CONFIG_USB_G_SERIAL=m 143CONFIG_USB_G_SERIAL=m
144CONFIG_USB_G_PRINTER=m 144CONFIG_USB_G_PRINTER=m
145CONFIG_RTC_CLASS=m 145CONFIG_RTC_CLASS=y
146CONFIG_RTC_DRV_DS1307=m 146CONFIG_RTC_DRV_DS1307=m
147CONFIG_RTC_DRV_SA1100=m 147CONFIG_RTC_DRV_SA1100=m
148CONFIG_EXT2_FS=m 148CONFIG_EXT2_FS=m
diff --git a/arch/arm/configs/xcep_defconfig b/arch/arm/configs/xcep_defconfig
index 5b5504143647..721832ffe2d7 100644
--- a/arch/arm/configs/xcep_defconfig
+++ b/arch/arm/configs/xcep_defconfig
@@ -73,7 +73,7 @@ CONFIG_SENSORS_MAX6650=m
73# CONFIG_VGA_CONSOLE is not set 73# CONFIG_VGA_CONSOLE is not set
74# CONFIG_HID_SUPPORT is not set 74# CONFIG_HID_SUPPORT is not set
75# CONFIG_USB_SUPPORT is not set 75# CONFIG_USB_SUPPORT is not set
76CONFIG_RTC_CLASS=m 76CONFIG_RTC_CLASS=y
77CONFIG_RTC_DRV_SA1100=m 77CONFIG_RTC_DRV_SA1100=m
78CONFIG_DMADEVICES=y 78CONFIG_DMADEVICES=y
79# CONFIG_DNOTIFY is not set 79# CONFIG_DNOTIFY is not set
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index 960f65514d88..59577ad3f4ef 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -158,7 +158,7 @@ CONFIG_LEDS_TRIGGER_HEARTBEAT=m
158CONFIG_LEDS_TRIGGER_BACKLIGHT=m 158CONFIG_LEDS_TRIGGER_BACKLIGHT=m
159CONFIG_LEDS_TRIGGER_GPIO=m 159CONFIG_LEDS_TRIGGER_GPIO=m
160CONFIG_LEDS_TRIGGER_DEFAULT_ON=m 160CONFIG_LEDS_TRIGGER_DEFAULT_ON=m
161CONFIG_RTC_CLASS=m 161CONFIG_RTC_CLASS=y
162CONFIG_RTC_DRV_ISL1208=m 162CONFIG_RTC_DRV_ISL1208=m
163CONFIG_RTC_DRV_PXA=m 163CONFIG_RTC_DRV_PXA=m
164CONFIG_EXT2_FS=y 164CONFIG_EXT2_FS=y
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index a701e4226a6c..0cdd7b456cb2 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -76,6 +76,9 @@ struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
76 unsigned long dt_root; 76 unsigned long dt_root;
77 const char *model; 77 const char *model;
78 78
79 if (!dt_phys)
80 return NULL;
81
79 devtree = phys_to_virt(dt_phys); 82 devtree = phys_to_virt(dt_phys);
80 83
81 /* check device tree validity */ 84 /* check device tree validity */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index e8d885676807..90c62cd51ca9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -435,6 +435,10 @@ __irq_usr:
435 usr_entry 435 usr_entry
436 kuser_cmpxchg_check 436 kuser_cmpxchg_check
437 437
438#ifdef CONFIG_IRQSOFF_TRACER
439 bl trace_hardirqs_off
440#endif
441
438 get_thread_info tsk 442 get_thread_info tsk
439#ifdef CONFIG_PREEMPT 443#ifdef CONFIG_PREEMPT
440 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 444 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
@@ -453,7 +457,7 @@ __irq_usr:
453#endif 457#endif
454 458
455 mov why, #0 459 mov why, #0
456 b ret_to_user 460 b ret_to_user_from_irq
457 UNWIND(.fnend ) 461 UNWIND(.fnend )
458ENDPROC(__irq_usr) 462ENDPROC(__irq_usr)
459 463
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 1e7b04a40a31..b2a27b6b0046 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -64,6 +64,7 @@ work_resched:
64ENTRY(ret_to_user) 64ENTRY(ret_to_user)
65ret_slow_syscall: 65ret_slow_syscall:
66 disable_irq @ disable interrupts 66 disable_irq @ disable interrupts
67ENTRY(ret_to_user_from_irq)
67 ldr r1, [tsk, #TI_FLAGS] 68 ldr r1, [tsk, #TI_FLAGS]
68 tst r1, #_TIF_WORK_MASK 69 tst r1, #_TIF_WORK_MASK
69 bne work_pending 70 bne work_pending
@@ -75,6 +76,7 @@ no_work_pending:
75 arch_ret_to_user r1, lr 76 arch_ret_to_user r1, lr
76 77
77 restore_user_regs fast = 0, offset = 0 78 restore_user_regs fast = 0, offset = 0
79ENDPROC(ret_to_user_from_irq)
78ENDPROC(ret_to_user) 80ENDPROC(ret_to_user)
79 81
80/* 82/*
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index d52eec268b47..6807cb1e76dd 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -139,7 +139,7 @@ static void dump_instr(const char *lvl, struct pt_regs *regs)
139 fs = get_fs(); 139 fs = get_fs();
140 set_fs(KERNEL_DS); 140 set_fs(KERNEL_DS);
141 141
142 for (i = -4; i < 1; i++) { 142 for (i = -4; i < 1 + !!thumb; i++) {
143 unsigned int val, bad; 143 unsigned int val, bad;
144 144
145 if (thumb) 145 if (thumb)
@@ -563,7 +563,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
563 if (!pmd_present(*pmd)) 563 if (!pmd_present(*pmd))
564 goto bad_access; 564 goto bad_access;
565 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 565 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
566 if (!pte_present(*pte) || !pte_dirty(*pte)) { 566 if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
567 pte_unmap_unlock(pte, ptl); 567 pte_unmap_unlock(pte, ptl);
568 goto bad_access; 568 goto bad_access;
569 } 569 }
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 4e66881c7aee..fc4e98ea7543 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -494,7 +494,7 @@ static struct platform_device da850_mcasp_device = {
494 .resource = da850_mcasp_resources, 494 .resource = da850_mcasp_resources,
495}; 495};
496 496
497struct platform_device davinci_pcm_device = { 497static struct platform_device davinci_pcm_device = {
498 .name = "davinci-pcm-audio", 498 .name = "davinci-pcm-audio",
499 .id = -1, 499 .id = -1,
500}; 500};
diff --git a/arch/arm/mach-davinci/devices.c b/arch/arm/mach-davinci/devices.c
index 8f4f736aa267..806a2f02b980 100644
--- a/arch/arm/mach-davinci/devices.c
+++ b/arch/arm/mach-davinci/devices.c
@@ -298,7 +298,7 @@ static void davinci_init_wdt(void)
298 298
299/*-------------------------------------------------------------------------*/ 299/*-------------------------------------------------------------------------*/
300 300
301struct platform_device davinci_pcm_device = { 301static struct platform_device davinci_pcm_device = {
302 .name = "davinci-pcm-audio", 302 .name = "davinci-pcm-audio",
303 .id = -1, 303 .id = -1,
304}; 304};
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index a0b838894ac9..e7221398e5af 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -252,9 +252,11 @@ static struct irq_chip gpio_irqchip = {
252static void 252static void
253gpio_irq_handler(unsigned irq, struct irq_desc *desc) 253gpio_irq_handler(unsigned irq, struct irq_desc *desc)
254{ 254{
255 struct davinci_gpio_regs __iomem *g = irq2regs(irq); 255 struct davinci_gpio_regs __iomem *g;
256 u32 mask = 0xffff; 256 u32 mask = 0xffff;
257 257
258 g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc);
259
258 /* we only care about one bank */ 260 /* we only care about one bank */
259 if (irq & 1) 261 if (irq & 1)
260 mask <<= 16; 262 mask <<= 16;
@@ -422,8 +424,7 @@ static int __init davinci_gpio_irq_setup(void)
422 424
423 /* set up all irqs in this bank */ 425 /* set up all irqs in this bank */
424 irq_set_chained_handler(bank_irq, gpio_irq_handler); 426 irq_set_chained_handler(bank_irq, gpio_irq_handler);
425 irq_set_chip_data(bank_irq, (__force void *)g); 427 irq_set_handler_data(bank_irq, (__force void *)g);
426 irq_set_handler_data(bank_irq, (void *)irq);
427 428
428 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { 429 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
429 irq_set_chip(irq, &gpio_irqchip); 430 irq_set_chip(irq, &gpio_irqchip);
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 82079545adc4..1d4b65fd673e 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -402,11 +402,15 @@ static struct resource ep93xx_eth_resource[] = {
402 } 402 }
403}; 403};
404 404
405static u64 ep93xx_eth_dma_mask = DMA_BIT_MASK(32);
406
405static struct platform_device ep93xx_eth_device = { 407static struct platform_device ep93xx_eth_device = {
406 .name = "ep93xx-eth", 408 .name = "ep93xx-eth",
407 .id = -1, 409 .id = -1,
408 .dev = { 410 .dev = {
409 .platform_data = &ep93xx_eth_data, 411 .platform_data = &ep93xx_eth_data,
412 .coherent_dma_mask = DMA_BIT_MASK(32),
413 .dma_mask = &ep93xx_eth_dma_mask,
410 }, 414 },
411 .num_resources = ARRAY_SIZE(ep93xx_eth_resource), 415 .num_resources = ARRAY_SIZE(ep93xx_eth_resource),
412 .resource = ep93xx_eth_resource, 416 .resource = ep93xx_eth_resource,
diff --git a/arch/arm/mach-exynos4/Kconfig b/arch/arm/mach-exynos4/Kconfig
index b92c1e557145..1435fc31c4b2 100644
--- a/arch/arm/mach-exynos4/Kconfig
+++ b/arch/arm/mach-exynos4/Kconfig
@@ -91,6 +91,11 @@ config EXYNOS4_SETUP_FIMC
91 help 91 help
92 Common setup code for the camera interfaces. 92 Common setup code for the camera interfaces.
93 93
94config EXYNOS4_SETUP_USB_PHY
95 bool
96 help
97 Common setup code for USB PHY controller
98
94# machine support 99# machine support
95 100
96menu "EXYNOS4 Machines" 101menu "EXYNOS4 Machines"
@@ -176,6 +181,7 @@ config MACH_NURI
176 select EXYNOS4_SETUP_I2C3 181 select EXYNOS4_SETUP_I2C3
177 select EXYNOS4_SETUP_I2C5 182 select EXYNOS4_SETUP_I2C5
178 select EXYNOS4_SETUP_SDHCI 183 select EXYNOS4_SETUP_SDHCI
184 select EXYNOS4_SETUP_USB_PHY
179 select SAMSUNG_DEV_PWM 185 select SAMSUNG_DEV_PWM
180 help 186 help
181 Machine support for Samsung Mobile NURI Board. 187 Machine support for Samsung Mobile NURI Board.
diff --git a/arch/arm/mach-exynos4/Makefile b/arch/arm/mach-exynos4/Makefile
index a9bb94fabaa7..60fe5ecf3599 100644
--- a/arch/arm/mach-exynos4/Makefile
+++ b/arch/arm/mach-exynos4/Makefile
@@ -56,4 +56,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
56obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o 56obj-$(CONFIG_EXYNOS4_SETUP_SDHCI) += setup-sdhci.o
57obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o 57obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
58 58
59obj-$(CONFIG_USB_SUPPORT) += usb-phy.o 59obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 08813a6f66b1..9babe4473e88 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -98,7 +98,7 @@ static struct map_desc exynos4_iodesc[] __initdata = {
98 .length = SZ_4K, 98 .length = SZ_4K,
99 .type = MT_DEVICE, 99 .type = MT_DEVICE,
100 }, { 100 }, {
101 .virtual = (unsigned long)S5P_VA_USB_HSPHY, 101 .virtual = (unsigned long)S3C_VA_USB_HSPHY,
102 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY), 102 .pfn = __phys_to_pfn(EXYNOS4_PA_HSPHY),
103 .length = SZ_4K, 103 .length = SZ_4K,
104 .type = MT_DEVICE, 104 .type = MT_DEVICE,
diff --git a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
index 703118d5173c..c337cf3a71bf 100644
--- a/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
+++ b/arch/arm/mach-exynos4/include/mach/regs-usb-phy.h
@@ -11,7 +11,7 @@
11#ifndef __PLAT_S5P_REGS_USB_PHY_H 11#ifndef __PLAT_S5P_REGS_USB_PHY_H
12#define __PLAT_S5P_REGS_USB_PHY_H 12#define __PLAT_S5P_REGS_USB_PHY_H
13 13
14#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S5P_VA_USB_HSPHY) 14#define EXYNOS4_HSOTG_PHYREG(x) ((x) + S3C_VA_USB_HSPHY)
15 15
16#define EXYNOS4_PHYPWR EXYNOS4_HSOTG_PHYREG(0x00) 16#define EXYNOS4_PHYPWR EXYNOS4_HSOTG_PHYREG(0x00)
17#define PHY1_HSIC_NORMAL_MASK (0xf << 9) 17#define PHY1_HSIC_NORMAL_MASK (0xf << 9)
diff --git a/arch/arm/mach-exynos4/usb-phy.c b/arch/arm/mach-exynos4/setup-usb-phy.c
index 0883c1b824b9..0883c1b824b9 100644
--- a/arch/arm/mach-exynos4/usb-phy.c
+++ b/arch/arm/mach-exynos4/setup-usb-phy.c
diff --git a/arch/arm/mach-exynos4/time.c b/arch/arm/mach-exynos4/time.c
index 86b9fa0d3639..ebb8f38d5405 100644
--- a/arch/arm/mach-exynos4/time.c
+++ b/arch/arm/mach-exynos4/time.c
@@ -206,6 +206,7 @@ static cycle_t exynos4_pwm4_read(struct clocksource *cs)
206 return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40)); 206 return (cycle_t) ~__raw_readl(S3C_TIMERREG(0x40));
207} 207}
208 208
209#ifdef CONFIG_PM
209static void exynos4_pwm4_resume(struct clocksource *cs) 210static void exynos4_pwm4_resume(struct clocksource *cs)
210{ 211{
211 unsigned long pclk; 212 unsigned long pclk;
@@ -218,6 +219,7 @@ static void exynos4_pwm4_resume(struct clocksource *cs)
218 exynos4_pwm_init(4, ~0); 219 exynos4_pwm_init(4, ~0);
219 exynos4_pwm_start(4, 1); 220 exynos4_pwm_start(4, 1);
220} 221}
222#endif
221 223
222struct clocksource pwm_clocksource = { 224struct clocksource pwm_clocksource = {
223 .name = "pwm_timer4", 225 .name = "pwm_timer4",
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 5f1f9867fc70..121ad1d4fa39 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -103,6 +103,7 @@ static void __init footbridge_timer_init(void)
103 clockevents_calc_mult_shift(ce, mem_fclk_21285, 5); 103 clockevents_calc_mult_shift(ce, mem_fclk_21285, 5);
104 ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce); 104 ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce);
105 ce->min_delta_ns = clockevent_delta2ns(0x000004, ce); 105 ce->min_delta_ns = clockevent_delta2ns(0x000004, ce);
106 ce->cpumask = cpumask_of(smp_processor_id());
106 107
107 clockevents_register_device(ce); 108 clockevents_register_device(ce);
108} 109}
diff --git a/arch/arm/mach-footbridge/include/mach/debug-macro.S b/arch/arm/mach-footbridge/include/mach/debug-macro.S
index 30b971d65815..1be2eeb7a0a0 100644
--- a/arch/arm/mach-footbridge/include/mach/debug-macro.S
+++ b/arch/arm/mach-footbridge/include/mach/debug-macro.S
@@ -26,6 +26,7 @@
26#include <asm/hardware/debug-8250.S> 26#include <asm/hardware/debug-8250.S>
27 27
28#else 28#else
29#include <mach/hardware.h>
29 /* For EBSA285 debugging */ 30 /* For EBSA285 debugging */
30 .equ dc21285_high, ARMCSR_BASE & 0xff000000 31 .equ dc21285_high, ARMCSR_BASE & 0xff000000
31 .equ dc21285_low, ARMCSR_BASE & 0x00ffffff 32 .equ dc21285_low, ARMCSR_BASE & 0x00ffffff
@@ -36,8 +37,8 @@
36 .else 37 .else
37 mov \rp, #0 38 mov \rp, #0
38 .endif 39 .endif
39 orr \rv, \rp, #0x42000000 40 orr \rv, \rp, #dc21285_high
40 orr \rp, \rp, #dc21285_high 41 orr \rp, \rp, #0x42000000
41 .endm 42 .endm
42 43
43 .macro senduart,rd,rx 44 .macro senduart,rd,rx
diff --git a/arch/arm/mach-msm/timer.c b/arch/arm/mach-msm/timer.c
index 38b95e949d13..63621f152c98 100644
--- a/arch/arm/mach-msm/timer.c
+++ b/arch/arm/mach-msm/timer.c
@@ -23,6 +23,8 @@
23#include <linux/io.h> 23#include <linux/io.h>
24 24
25#include <asm/mach/time.h> 25#include <asm/mach/time.h>
26#include <asm/hardware/gic.h>
27
26#include <mach/msm_iomap.h> 28#include <mach/msm_iomap.h>
27#include <mach/cpu.h> 29#include <mach/cpu.h>
28 30
@@ -55,10 +57,12 @@ enum timer_location {
55#if defined(CONFIG_ARCH_QSD8X50) 57#if defined(CONFIG_ARCH_QSD8X50)
56#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */ 58#define DGT_HZ (19200000 / 4) /* 19.2 MHz / 4 by default */
57#define MSM_DGT_SHIFT (0) 59#define MSM_DGT_SHIFT (0)
58#elif defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60) || \ 60#elif defined(CONFIG_ARCH_MSM7X30)
59 defined(CONFIG_ARCH_MSM8960)
60#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */ 61#define DGT_HZ (24576000 / 4) /* 24.576 MHz (LPXO) / 4 by default */
61#define MSM_DGT_SHIFT (0) 62#define MSM_DGT_SHIFT (0)
63#elif defined(CONFIG_ARCH_MSM8X60) || defined(CONFIG_ARCH_MSM8960)
64#define DGT_HZ (27000000 / 4) /* 27 MHz (PXO) / 4 by default */
65#define MSM_DGT_SHIFT (0)
62#else 66#else
63#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */ 67#define DGT_HZ 19200000 /* 19.2 MHz or 600 KHz after shift */
64#define MSM_DGT_SHIFT (5) 68#define MSM_DGT_SHIFT (5)
@@ -100,7 +104,11 @@ static cycle_t msm_read_timer_count(struct clocksource *cs)
100{ 104{
101 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource); 105 struct msm_clock *clk = container_of(cs, struct msm_clock, clocksource);
102 106
103 return readl(clk->global_counter); 107 /*
108 * Shift timer count down by a constant due to unreliable lower bits
109 * on some targets.
110 */
111 return readl(clk->global_counter) >> clk->shift;
104} 112}
105 113
106static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt) 114static struct msm_clock *clockevent_to_clock(struct clock_event_device *evt)
diff --git a/arch/arm/mach-mxs/ocotp.c b/arch/arm/mach-mxs/ocotp.c
index 65157a35dbba..54add60f94c9 100644
--- a/arch/arm/mach-mxs/ocotp.c
+++ b/arch/arm/mach-mxs/ocotp.c
@@ -16,6 +16,8 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/mutex.h> 17#include <linux/mutex.h>
18 18
19#include <asm/processor.h> /* for cpu_relax() */
20
19#include <mach/mxs.h> 21#include <mach/mxs.h>
20 22
21#define OCOTP_WORD_OFFSET 0x20 23#define OCOTP_WORD_OFFSET 0x20
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index af98117043d2..5b114d1558c8 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -4,14 +4,14 @@
4 4
5# Common support 5# Common support
6obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o 6obj-y := io.o id.o sram.o time.o irq.o mux.o flash.o serial.o devices.o dma.o
7obj-y += clock.o clock_data.o opp_data.o reset.o 7obj-y += clock.o clock_data.o opp_data.o reset.o pm_bus.o
8 8
9obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o 9obj-$(CONFIG_OMAP_MCBSP) += mcbsp.o
10 10
11obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o 11obj-$(CONFIG_OMAP_32K_TIMER) += timer32k.o
12 12
13# Power Management 13# Power Management
14obj-$(CONFIG_PM) += pm.o sleep.o pm_bus.o 14obj-$(CONFIG_PM) += pm.o sleep.o
15 15
16# DSP 16# DSP
17obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o 17obj-$(CONFIG_OMAP_MBOX_FWK) += mailbox_mach.o
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index d8559344c6e2..f5a52204b89f 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -284,14 +284,15 @@ static int __init omap1_system_dma_init(void)
284 dma_base = ioremap(res[0].start, resource_size(&res[0])); 284 dma_base = ioremap(res[0].start, resource_size(&res[0]));
285 if (!dma_base) { 285 if (!dma_base) {
286 pr_err("%s: Unable to ioremap\n", __func__); 286 pr_err("%s: Unable to ioremap\n", __func__);
287 return -ENODEV; 287 ret = -ENODEV;
288 goto exit_device_put;
288 } 289 }
289 290
290 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); 291 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
291 if (ret) { 292 if (ret) {
292 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n", 293 dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
293 __func__, pdev->name, pdev->id); 294 __func__, pdev->name, pdev->id);
294 goto exit_device_del; 295 goto exit_device_put;
295 } 296 }
296 297
297 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); 298 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
@@ -299,7 +300,7 @@ static int __init omap1_system_dma_init(void)
299 dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n", 300 dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
300 __func__, pdev->name); 301 __func__, pdev->name);
301 ret = -ENOMEM; 302 ret = -ENOMEM;
302 goto exit_device_put; 303 goto exit_device_del;
303 } 304 }
304 305
305 d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL); 306 d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
@@ -380,10 +381,10 @@ exit_release_d:
380 kfree(d); 381 kfree(d);
381exit_release_p: 382exit_release_p:
382 kfree(p); 383 kfree(p);
383exit_device_put:
384 platform_device_put(pdev);
385exit_device_del: 384exit_device_del:
386 platform_device_del(pdev); 385 platform_device_del(pdev);
386exit_device_put:
387 platform_device_put(pdev);
387 388
388 return ret; 389 return ret;
389} 390}
diff --git a/arch/arm/mach-omap1/pm_bus.c b/arch/arm/mach-omap1/pm_bus.c
index fe31d933f0ed..334fb8871bc3 100644
--- a/arch/arm/mach-omap1/pm_bus.c
+++ b/arch/arm/mach-omap1/pm_bus.c
@@ -56,9 +56,13 @@ static struct dev_power_domain default_power_domain = {
56 USE_PLATFORM_PM_SLEEP_OPS 56 USE_PLATFORM_PM_SLEEP_OPS
57 }, 57 },
58}; 58};
59#define OMAP1_PWR_DOMAIN (&default_power_domain)
60#else
61#define OMAP1_PWR_DOMAIN NULL
62#endif /* CONFIG_PM_RUNTIME */
59 63
60static struct pm_clk_notifier_block platform_bus_notifier = { 64static struct pm_clk_notifier_block platform_bus_notifier = {
61 .pwr_domain = &default_power_domain, 65 .pwr_domain = OMAP1_PWR_DOMAIN,
62 .con_ids = { "ick", "fck", NULL, }, 66 .con_ids = { "ick", "fck", NULL, },
63}; 67};
64 68
@@ -72,4 +76,4 @@ static int __init omap1_pm_runtime_init(void)
72 return 0; 76 return 0;
73} 77}
74core_initcall(omap1_pm_runtime_init); 78core_initcall(omap1_pm_runtime_init);
75#endif /* CONFIG_PM_RUNTIME */ 79
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index d54969be0a54..5de6eac0a725 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -26,13 +26,13 @@
26#include <linux/err.h> 26#include <linux/err.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/gpio.h>
29 30
30#include <mach/hardware.h> 31#include <mach/hardware.h>
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 33#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 34#include <asm/mach/map.h>
34 35
35#include <mach/gpio.h>
36#include <plat/board.h> 36#include <plat/board.h>
37#include <plat/common.h> 37#include <plat/common.h>
38#include <plat/gpmc.h> 38#include <plat/gpmc.h>
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index ae2963a98041..5dac974be625 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -622,19 +622,19 @@ static struct omap_device_pad serial3_pads[] __initdata = {
622 OMAP_MUX_MODE0), 622 OMAP_MUX_MODE0),
623}; 623};
624 624
625static struct omap_board_data serial1_data = { 625static struct omap_board_data serial1_data __initdata = {
626 .id = 0, 626 .id = 0,
627 .pads = serial1_pads, 627 .pads = serial1_pads,
628 .pads_cnt = ARRAY_SIZE(serial1_pads), 628 .pads_cnt = ARRAY_SIZE(serial1_pads),
629}; 629};
630 630
631static struct omap_board_data serial2_data = { 631static struct omap_board_data serial2_data __initdata = {
632 .id = 1, 632 .id = 1,
633 .pads = serial2_pads, 633 .pads = serial2_pads,
634 .pads_cnt = ARRAY_SIZE(serial2_pads), 634 .pads_cnt = ARRAY_SIZE(serial2_pads),
635}; 635};
636 636
637static struct omap_board_data serial3_data = { 637static struct omap_board_data serial3_data __initdata = {
638 .id = 2, 638 .id = 2,
639 .pads = serial3_pads, 639 .pads = serial3_pads,
640 .pads_cnt = ARRAY_SIZE(serial3_pads), 640 .pads_cnt = ARRAY_SIZE(serial3_pads),
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 73fa90bb6953..63de2d396e2d 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -258,7 +258,7 @@ static struct gpio sdp4430_eth_gpios[] __initdata = {
258 { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" }, 258 { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" },
259}; 259};
260 260
261static int omap_ethernet_init(void) 261static int __init omap_ethernet_init(void)
262{ 262{
263 int status; 263 int status;
264 264
@@ -322,6 +322,7 @@ static struct omap2_hsmmc_info mmc[] = {
322 .gpio_wp = -EINVAL, 322 .gpio_wp = -EINVAL,
323 .nonremovable = true, 323 .nonremovable = true,
324 .ocr_mask = MMC_VDD_29_30, 324 .ocr_mask = MMC_VDD_29_30,
325 .no_off_init = true,
325 }, 326 },
326 { 327 {
327 .mmc = 1, 328 .mmc = 1,
@@ -681,19 +682,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
681 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 682 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
682}; 683};
683 684
684static struct omap_board_data serial2_data = { 685static struct omap_board_data serial2_data __initdata = {
685 .id = 1, 686 .id = 1,
686 .pads = serial2_pads, 687 .pads = serial2_pads,
687 .pads_cnt = ARRAY_SIZE(serial2_pads), 688 .pads_cnt = ARRAY_SIZE(serial2_pads),
688}; 689};
689 690
690static struct omap_board_data serial3_data = { 691static struct omap_board_data serial3_data __initdata = {
691 .id = 2, 692 .id = 2,
692 .pads = serial3_pads, 693 .pads = serial3_pads,
693 .pads_cnt = ARRAY_SIZE(serial3_pads), 694 .pads_cnt = ARRAY_SIZE(serial3_pads),
694}; 695};
695 696
696static struct omap_board_data serial4_data = { 697static struct omap_board_data serial4_data __initdata = {
697 .id = 3, 698 .id = 3,
698 .pads = serial4_pads, 699 .pads = serial4_pads,
699 .pads_cnt = ARRAY_SIZE(serial4_pads), 700 .pads_cnt = ARRAY_SIZE(serial4_pads),
@@ -729,7 +730,7 @@ static void __init omap_4430sdp_init(void)
729 730
730 if (omap_rev() == OMAP4430_REV_ES1_0) 731 if (omap_rev() == OMAP4430_REV_ES1_0)
731 package = OMAP_PACKAGE_CBL; 732 package = OMAP_PACKAGE_CBL;
732 omap4_mux_init(board_mux, package); 733 omap4_mux_init(board_mux, NULL, package);
733 734
734 omap_board_config = sdp4430_config; 735 omap_board_config = sdp4430_config;
735 omap_board_config_size = ARRAY_SIZE(sdp4430_config); 736 omap_board_config_size = ARRAY_SIZE(sdp4430_config);
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index f3beb8eeef77..b124bdfb4239 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -27,13 +27,13 @@
27#include <linux/err.h> 27#include <linux/err.h>
28#include <linux/clk.h> 28#include <linux/clk.h>
29#include <linux/smc91x.h> 29#include <linux/smc91x.h>
30#include <linux/gpio.h>
30 31
31#include <mach/hardware.h> 32#include <mach/hardware.h>
32#include <asm/mach-types.h> 33#include <asm/mach-types.h>
33#include <asm/mach/arch.h> 34#include <asm/mach/arch.h>
34#include <asm/mach/flash.h> 35#include <asm/mach/flash.h>
35 36
36#include <mach/gpio.h>
37#include <plat/led.h> 37#include <plat/led.h>
38#include <plat/usb.h> 38#include <plat/usb.h>
39#include <plat/board.h> 39#include <plat/board.h>
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c63115bc1536..77456dec93ea 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -63,8 +63,6 @@
63#define SB_T35_SMSC911X_CS 4 63#define SB_T35_SMSC911X_CS 4
64#define SB_T35_SMSC911X_GPIO 65 64#define SB_T35_SMSC911X_GPIO 65
65 65
66#define NAND_BLOCK_SIZE SZ_128K
67
68#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) 66#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
69#include <linux/smsc911x.h> 67#include <linux/smsc911x.h>
70#include <plat/gpmc-smsc911x.h> 68#include <plat/gpmc-smsc911x.h>
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 08f08e812492..c3a9fd35034a 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -48,6 +48,7 @@
48 48
49#include "mux.h" 49#include "mux.h"
50#include "control.h" 50#include "control.h"
51#include "common-board-devices.h"
51 52
52#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) 53#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
53static struct gpio_led cm_t3517_leds[] = { 54static struct gpio_led cm_t3517_leds[] = {
@@ -177,7 +178,7 @@ static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = {
177 .reset_gpio_port[2] = -EINVAL, 178 .reset_gpio_port[2] = -EINVAL,
178}; 179};
179 180
180static int cm_t3517_init_usbh(void) 181static int __init cm_t3517_init_usbh(void)
181{ 182{
182 int err; 183 int err;
183 184
@@ -203,8 +204,6 @@ static inline int cm_t3517_init_usbh(void)
203#endif 204#endif
204 205
205#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) 206#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
206#define NAND_BLOCK_SIZE SZ_128K
207
208static struct mtd_partition cm_t3517_nand_partitions[] = { 207static struct mtd_partition cm_t3517_nand_partitions[] = {
209 { 208 {
210 .name = "xloader", 209 .name = "xloader",
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index cf520d7dd614..34956ec83296 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -61,8 +61,6 @@
61#include "timer-gp.h" 61#include "timer-gp.h"
62#include "common-board-devices.h" 62#include "common-board-devices.h"
63 63
64#define NAND_BLOCK_SIZE SZ_128K
65
66#define OMAP_DM9000_GPIO_IRQ 25 64#define OMAP_DM9000_GPIO_IRQ 25
67#define OMAP3_DEVKIT_TS_GPIO 27 65#define OMAP3_DEVKIT_TS_GPIO 27
68 66
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index be71426359f2..7f21d24bd437 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -54,8 +54,6 @@
54#include "pm.h" 54#include "pm.h"
55#include "common-board-devices.h" 55#include "common-board-devices.h"
56 56
57#define NAND_BLOCK_SIZE SZ_128K
58
59/* 57/*
60 * OMAP3 Beagle revision 58 * OMAP3 Beagle revision
61 * Run time detection of Beagle revision is done by reading GPIO. 59 * Run time detection of Beagle revision is done by reading GPIO.
@@ -106,6 +104,9 @@ static void __init omap3_beagle_init_rev(void)
106 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) 104 beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
107 | (gpio_get_value(173) << 2); 105 | (gpio_get_value(173) << 2);
108 106
107 gpio_free_array(omap3_beagle_rev_gpios,
108 ARRAY_SIZE(omap3_beagle_rev_gpios));
109
109 switch (beagle_rev) { 110 switch (beagle_rev) {
110 case 7: 111 case 7:
111 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); 112 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
@@ -579,6 +580,9 @@ static void __init omap3_beagle_init(void)
579 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, 580 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
580 ARRAY_SIZE(omap3beagle_nand_partitions)); 581 ARRAY_SIZE(omap3beagle_nand_partitions));
581 582
583 /* Ensure msecure is mux'd to be able to set the RTC. */
584 omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH);
585
582 /* Ensure SDRC pins are mux'd for self-refresh */ 586 /* Ensure SDRC pins are mux'd for self-refresh */
583 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); 587 omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
584 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); 588 omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 1d10736c6d3c..23f71d40883e 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -30,6 +30,7 @@
30#include <linux/leds.h> 30#include <linux/leds.h>
31#include <linux/input.h> 31#include <linux/input.h>
32#include <linux/input/matrix_keypad.h> 32#include <linux/input/matrix_keypad.h>
33#include <linux/gpio.h>
33#include <linux/gpio_keys.h> 34#include <linux/gpio_keys.h>
34#include <linux/mmc/host.h> 35#include <linux/mmc/host.h>
35#include <linux/mmc/card.h> 36#include <linux/mmc/card.h>
@@ -41,7 +42,6 @@
41 42
42#include <plat/board.h> 43#include <plat/board.h>
43#include <plat/common.h> 44#include <plat/common.h>
44#include <mach/gpio.h>
45#include <mach/hardware.h> 45#include <mach/hardware.h>
46#include <plat/mcspi.h> 46#include <plat/mcspi.h>
47#include <plat/usb.h> 47#include <plat/usb.h>
@@ -57,8 +57,6 @@
57#define PANDORA_WIFI_NRESET_GPIO 23 57#define PANDORA_WIFI_NRESET_GPIO 23
58#define OMAP3_PANDORA_TS_GPIO 94 58#define OMAP3_PANDORA_TS_GPIO 94
59 59
60#define NAND_BLOCK_SIZE SZ_128K
61
62static struct mtd_partition omap3pandora_nand_partitions[] = { 60static struct mtd_partition omap3pandora_nand_partitions[] = {
63 { 61 {
64 .name = "xloader", 62 .name = "xloader",
@@ -86,7 +84,8 @@ static struct mtd_partition omap3pandora_nand_partitions[] = {
86 84
87static struct omap_nand_platform_data pandora_nand_data = { 85static struct omap_nand_platform_data pandora_nand_data = {
88 .cs = 0, 86 .cs = 0,
89 .devsize = 1, /* '0' for 8-bit, '1' for 16-bit device */ 87 .devsize = NAND_BUSWIDTH_16,
88 .xfer_type = NAND_OMAP_PREFETCH_DMA,
90 .parts = omap3pandora_nand_partitions, 89 .parts = omap3pandora_nand_partitions,
91 .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions), 90 .nr_parts = ARRAY_SIZE(omap3pandora_nand_partitions),
92}; 91};
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index 82872d7d313b..5f649faf7377 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -56,8 +56,6 @@
56 56
57#include <asm/setup.h> 57#include <asm/setup.h>
58 58
59#define NAND_BLOCK_SIZE SZ_128K
60
61#define OMAP3_AC_GPIO 136 59#define OMAP3_AC_GPIO 136
62#define OMAP3_TS_GPIO 162 60#define OMAP3_TS_GPIO 162
63#define TB_BL_PWM_TIMER 9 61#define TB_BL_PWM_TIMER 9
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 90485fced973..0cfe2005cb50 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -526,19 +526,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
526 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), 526 OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
527}; 527};
528 528
529static struct omap_board_data serial2_data = { 529static struct omap_board_data serial2_data __initdata = {
530 .id = 1, 530 .id = 1,
531 .pads = serial2_pads, 531 .pads = serial2_pads,
532 .pads_cnt = ARRAY_SIZE(serial2_pads), 532 .pads_cnt = ARRAY_SIZE(serial2_pads),
533}; 533};
534 534
535static struct omap_board_data serial3_data = { 535static struct omap_board_data serial3_data __initdata = {
536 .id = 2, 536 .id = 2,
537 .pads = serial3_pads, 537 .pads = serial3_pads,
538 .pads_cnt = ARRAY_SIZE(serial3_pads), 538 .pads_cnt = ARRAY_SIZE(serial3_pads),
539}; 539};
540 540
541static struct omap_board_data serial4_data = { 541static struct omap_board_data serial4_data __initdata = {
542 .id = 3, 542 .id = 3,
543 .pads = serial4_pads, 543 .pads = serial4_pads,
544 .pads_cnt = ARRAY_SIZE(serial4_pads), 544 .pads_cnt = ARRAY_SIZE(serial4_pads),
@@ -687,7 +687,7 @@ static void __init omap4_panda_init(void)
687 687
688 if (omap_rev() == OMAP4430_REV_ES1_0) 688 if (omap_rev() == OMAP4430_REV_ES1_0)
689 package = OMAP_PACKAGE_CBL; 689 package = OMAP_PACKAGE_CBL;
690 omap4_mux_init(board_mux, package); 690 omap4_mux_init(board_mux, NULL, package);
691 691
692 if (wl12xx_set_platform_data(&omap_panda_wlan_data)) 692 if (wl12xx_set_platform_data(&omap_panda_wlan_data))
693 pr_err("error setting wl12xx data\n"); 693 pr_err("error setting wl12xx data\n");
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 1555918e3ffa..175e1ab2b04d 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/init.h> 25#include <linux/init.h>
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/gpio.h>
27#include <linux/kernel.h> 28#include <linux/kernel.h>
28#include <linux/platform_device.h> 29#include <linux/platform_device.h>
29#include <linux/i2c/twl.h> 30#include <linux/i2c/twl.h>
@@ -45,7 +46,6 @@
45#include <plat/common.h> 46#include <plat/common.h>
46#include <video/omapdss.h> 47#include <video/omapdss.h>
47#include <video/omap-panel-generic-dpi.h> 48#include <video/omap-panel-generic-dpi.h>
48#include <mach/gpio.h>
49#include <plat/gpmc.h> 49#include <plat/gpmc.h>
50#include <mach/hardware.h> 50#include <mach/hardware.h>
51#include <plat/nand.h> 51#include <plat/nand.h>
@@ -65,8 +65,6 @@
65#define OVERO_GPIO_USBH_CPEN 168 65#define OVERO_GPIO_USBH_CPEN 168
66#define OVERO_GPIO_USBH_NRESET 183 66#define OVERO_GPIO_USBH_NRESET 183
67 67
68#define NAND_BLOCK_SIZE SZ_128K
69
70#define OVERO_SMSC911X_CS 5 68#define OVERO_SMSC911X_CS 5
71#define OVERO_SMSC911X_GPIO 176 69#define OVERO_SMSC911X_GPIO 176
72#define OVERO_SMSC911X2_CS 4 70#define OVERO_SMSC911X2_CS 4
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index f6247e71a194..990366726c58 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -488,6 +488,7 @@ static struct regulator_init_data rx51_vmmc2 = {
488 .name = "V28_A", 488 .name = "V28_A",
489 .min_uV = 2800000, 489 .min_uV = 2800000,
490 .max_uV = 3000000, 490 .max_uV = 3000000,
491 .always_on = true, /* due VIO leak to AIC34 VDDs */
491 .apply_uV = true, 492 .apply_uV = true,
492 .valid_modes_mask = REGULATOR_MODE_NORMAL 493 .valid_modes_mask = REGULATOR_MODE_NORMAL
493 | REGULATOR_MODE_STANDBY, 494 | REGULATOR_MODE_STANDBY,
@@ -582,7 +583,7 @@ static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
582{ 583{
583 /* FIXME this gpio setup is just a placeholder for now */ 584 /* FIXME this gpio setup is just a placeholder for now */
584 gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm"); 585 gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
585 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en"); 586 gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "speaker_en");
586 587
587 return 0; 588 return 0;
588} 589}
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c
index c7c6beb1ec24..d4683ba5f721 100644
--- a/arch/arm/mach-omap2/board-zoom-display.c
+++ b/arch/arm/mach-omap2/board-zoom-display.c
@@ -26,7 +26,7 @@ static struct gpio zoom_lcd_gpios[] __initdata = {
26 { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" }, 26 { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" },
27}; 27};
28 28
29static void zoom_lcd_panel_init(void) 29static void __init zoom_lcd_panel_init(void)
30{ 30{
31 zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? 31 zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
32 LCD_PANEL_RESET_GPIO_PROD : 32 LCD_PANEL_RESET_GPIO_PROD :
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c
index e94903b2c65b..94ccf464677b 100644
--- a/arch/arm/mach-omap2/common-board-devices.c
+++ b/arch/arm/mach-omap2/common-board-devices.c
@@ -85,17 +85,17 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
85 struct spi_board_info *spi_bi = &ads7846_spi_board_info; 85 struct spi_board_info *spi_bi = &ads7846_spi_board_info;
86 int err; 86 int err;
87 87
88 err = gpio_request(gpio_pendown, "TS PenDown"); 88 if (board_pdata && board_pdata->get_pendown_state) {
89 if (err) { 89 err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
90 pr_err("Could not obtain gpio for TS PenDown: %d\n", err); 90 if (err) {
91 return; 91 pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
92 } 92 return;
93 93 }
94 gpio_direction_input(gpio_pendown); 94 gpio_export(gpio_pendown, 0);
95 gpio_export(gpio_pendown, 0);
96 95
97 if (gpio_debounce) 96 if (gpio_debounce)
98 gpio_set_debounce(gpio_pendown, gpio_debounce); 97 gpio_set_debounce(gpio_pendown, gpio_debounce);
98 }
99 99
100 ads7846_config.gpio_pendown = gpio_pendown; 100 ads7846_config.gpio_pendown = gpio_pendown;
101 101
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h
index eb80b3b0ef47..679719051df5 100644
--- a/arch/arm/mach-omap2/common-board-devices.h
+++ b/arch/arm/mach-omap2/common-board-devices.h
@@ -1,6 +1,8 @@
1#ifndef __OMAP_COMMON_BOARD_DEVICES__ 1#ifndef __OMAP_COMMON_BOARD_DEVICES__
2#define __OMAP_COMMON_BOARD_DEVICES__ 2#define __OMAP_COMMON_BOARD_DEVICES__
3 3
4#define NAND_BLOCK_SIZE SZ_128K
5
4struct twl4030_platform_data; 6struct twl4030_platform_data;
5struct mtd_partition; 7struct mtd_partition;
6 8
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 7b8558564591..5b8ca680ed93 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -97,7 +97,7 @@ static int __init omap4_l3_init(void)
97 97
98 WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name); 98 WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name);
99 99
100 return PTR_ERR(od); 100 return IS_ERR(od) ? PTR_ERR(od) : 0;
101} 101}
102postcore_initcall(omap4_l3_init); 102postcore_initcall(omap4_l3_init);
103 103
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index b2f30bed5a20..66868c5d5a29 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -145,6 +145,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
145 int power_on, int vdd) 145 int power_on, int vdd)
146{ 146{
147 u32 reg; 147 u32 reg;
148 unsigned long timeout;
148 149
149 if (power_on) { 150 if (power_on) {
150 reg = omap4_ctrl_pad_readl(control_pbias_offset); 151 reg = omap4_ctrl_pad_readl(control_pbias_offset);
@@ -157,9 +158,15 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
157 OMAP4_MMC1_PWRDNZ_MASK | 158 OMAP4_MMC1_PWRDNZ_MASK |
158 OMAP4_USBC1_ICUSB_PWRDNZ_MASK); 159 OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
159 omap4_ctrl_pad_writel(reg, control_pbias_offset); 160 omap4_ctrl_pad_writel(reg, control_pbias_offset);
160 /* 4 microsec delay for comparator to generate an error*/ 161
161 udelay(4); 162 timeout = jiffies + msecs_to_jiffies(5);
162 reg = omap4_ctrl_pad_readl(control_pbias_offset); 163 do {
164 reg = omap4_ctrl_pad_readl(control_pbias_offset);
165 if (!(reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK))
166 break;
167 usleep_range(100, 200);
168 } while (!time_after(jiffies, timeout));
169
163 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { 170 if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
164 pr_err("Pbias Voltage is not same as LDO\n"); 171 pr_err("Pbias Voltage is not same as LDO\n");
165 /* Caution : On VMODE_ERROR Power Down MMC IO */ 172 /* Caution : On VMODE_ERROR Power Down MMC IO */
@@ -331,6 +338,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
331 if (c->no_off) 338 if (c->no_off)
332 mmc->slots[0].no_off = 1; 339 mmc->slots[0].no_off = 1;
333 340
341 if (c->no_off_init)
342 mmc->slots[0].no_regulator_off_init = c->no_off_init;
343
334 if (c->vcc_aux_disable_is_sleep) 344 if (c->vcc_aux_disable_is_sleep)
335 mmc->slots[0].vcc_aux_disable_is_sleep = 1; 345 mmc->slots[0].vcc_aux_disable_is_sleep = 1;
336 346
diff --git a/arch/arm/mach-omap2/hsmmc.h b/arch/arm/mach-omap2/hsmmc.h
index f119348827d4..f757e78d4d4f 100644
--- a/arch/arm/mach-omap2/hsmmc.h
+++ b/arch/arm/mach-omap2/hsmmc.h
@@ -18,6 +18,7 @@ struct omap2_hsmmc_info {
18 bool nonremovable; /* Nonremovable e.g. eMMC */ 18 bool nonremovable; /* Nonremovable e.g. eMMC */
19 bool power_saving; /* Try to sleep or power off when possible */ 19 bool power_saving; /* Try to sleep or power off when possible */
20 bool no_off; /* power_saving and power is not to go off */ 20 bool no_off; /* power_saving and power is not to go off */
21 bool no_off_init; /* no power off when not in MMC sleep state */
21 bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */ 22 bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */
22 int gpio_cd; /* or -EINVAL */ 23 int gpio_cd; /* or -EINVAL */
23 int gpio_wp; /* or -EINVAL */ 24 int gpio_wp; /* or -EINVAL */
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index a4ab1e364313..c7fb22abc219 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -83,6 +83,9 @@ void omap_mux_write(struct omap_mux_partition *partition, u16 val,
83void omap_mux_write_array(struct omap_mux_partition *partition, 83void omap_mux_write_array(struct omap_mux_partition *partition,
84 struct omap_board_mux *board_mux) 84 struct omap_board_mux *board_mux)
85{ 85{
86 if (!board_mux)
87 return;
88
86 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) { 89 while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
87 omap_mux_write(partition, board_mux->value, 90 omap_mux_write(partition, board_mux->value,
88 board_mux->reg_offset); 91 board_mux->reg_offset);
@@ -906,7 +909,7 @@ static struct omap_mux *omap_mux_get_by_gpio(
906u16 omap_mux_get_gpio(int gpio) 909u16 omap_mux_get_gpio(int gpio)
907{ 910{
908 struct omap_mux_partition *partition; 911 struct omap_mux_partition *partition;
909 struct omap_mux *m; 912 struct omap_mux *m = NULL;
910 913
911 list_for_each_entry(partition, &mux_partitions, node) { 914 list_for_each_entry(partition, &mux_partitions, node) {
912 m = omap_mux_get_by_gpio(partition, gpio); 915 m = omap_mux_get_by_gpio(partition, gpio);
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 137f321c029f..2132308ad1e4 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -323,10 +323,12 @@ int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
323 323
324/** 324/**
325 * omap4_mux_init() - initialize mux system with board specific set 325 * omap4_mux_init() - initialize mux system with board specific set
326 * @board_mux: Board specific mux table 326 * @board_subset: Board specific mux table
327 * @board_wkup_subset: Board specific mux table for wakeup instance
327 * @flags: OMAP package type used for the board 328 * @flags: OMAP package type used for the board
328 */ 329 */
329int omap4_mux_init(struct omap_board_mux *board_mux, int flags); 330int omap4_mux_init(struct omap_board_mux *board_subset,
331 struct omap_board_mux *board_wkup_subset, int flags);
330 332
331/** 333/**
332 * omap_mux_init - private mux init function, do not call 334 * omap_mux_init - private mux init function, do not call
diff --git a/arch/arm/mach-omap2/mux44xx.c b/arch/arm/mach-omap2/mux44xx.c
index 9a66445112ae..f5a74daab2ff 100644
--- a/arch/arm/mach-omap2/mux44xx.c
+++ b/arch/arm/mach-omap2/mux44xx.c
@@ -1309,7 +1309,8 @@ static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
1309#define omap4_wkup_cbl_cbs_ball NULL 1309#define omap4_wkup_cbl_cbs_ball NULL
1310#endif 1310#endif
1311 1311
1312int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags) 1312int __init omap4_mux_init(struct omap_board_mux *board_subset,
1313 struct omap_board_mux *board_wkup_subset, int flags)
1313{ 1314{
1314 struct omap_ball *package_balls_core; 1315 struct omap_ball *package_balls_core;
1315 struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball; 1316 struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball;
@@ -1347,7 +1348,7 @@ int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
1347 OMAP_MUX_GPIO_IN_MODE3, 1348 OMAP_MUX_GPIO_IN_MODE3,
1348 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE, 1349 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE,
1349 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE, 1350 OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE,
1350 omap4_wkup_muxmodes, NULL, board_subset, 1351 omap4_wkup_muxmodes, NULL, board_wkup_subset,
1351 package_balls_wkup); 1352 package_balls_wkup);
1352 1353
1353 return ret; 1354 return ret;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e03429453ce7..293fa6cd50e1 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1628,7 +1628,7 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
1628 void *data) 1628 void *data)
1629{ 1629{
1630 struct omap_hwmod *temp_oh; 1630 struct omap_hwmod *temp_oh;
1631 int ret; 1631 int ret = 0;
1632 1632
1633 if (!fn) 1633 if (!fn)
1634 return -EINVAL; 1634 return -EINVAL;
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index abc548a0c98d..e1c69ffe0f69 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -5109,7 +5109,7 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
5109 &omap44xx_iva_seq1_hwmod, 5109 &omap44xx_iva_seq1_hwmod,
5110 5110
5111 /* kbd class */ 5111 /* kbd class */
5112/* &omap44xx_kbd_hwmod, */ 5112 &omap44xx_kbd_hwmod,
5113 5113
5114 /* mailbox class */ 5114 /* mailbox class */
5115 &omap44xx_mailbox_hwmod, 5115 &omap44xx_mailbox_hwmod,
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index f47813edd951..58775e3c8476 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -56,8 +56,10 @@ int omap4430_phy_init(struct device *dev)
56 /* Power down the phy */ 56 /* Power down the phy */
57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); 57 __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
58 58
59 if (!dev) 59 if (!dev) {
60 iounmap(ctrl_base);
60 return 0; 61 return 0;
62 }
61 63
62 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick"); 64 phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
63 if (IS_ERR(phyclk)) { 65 if (IS_ERR(phyclk)) {
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index a5a83b358ddd..e01da45c0537 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -189,7 +189,7 @@ static struct dentry *pm_dbg_dir;
189 189
190static int pm_dbg_init_done; 190static int pm_dbg_init_done;
191 191
192static int __init pm_dbg_init(void); 192static int pm_dbg_init(void);
193 193
194enum { 194enum {
195 DEBUG_FILE_COUNTERS = 0, 195 DEBUG_FILE_COUNTERS = 0,
@@ -595,7 +595,7 @@ static int option_set(void *data, u64 val)
595 595
596DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n"); 596DEFINE_SIMPLE_ATTRIBUTE(pm_dbg_option_fops, option_get, option_set, "%llu\n");
597 597
598static int __init pm_dbg_init(void) 598static int pm_dbg_init(void)
599{ 599{
600 int i; 600 int i;
601 struct dentry *d; 601 struct dentry *d;
diff --git a/arch/arm/mach-pxa/spitz_pm.c b/arch/arm/mach-pxa/spitz_pm.c
index 7fe74067d85f..094279aefe9c 100644
--- a/arch/arm/mach-pxa/spitz_pm.c
+++ b/arch/arm/mach-pxa/spitz_pm.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/gpio.h>
17#include <linux/interrupt.h> 18#include <linux/interrupt.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
19#include <linux/apm-emulation.h> 20#include <linux/apm-emulation.h>
diff --git a/arch/arm/mach-s3c2410/Makefile b/arch/arm/mach-s3c2410/Makefile
index 0d468e96e83e..81695353d8f4 100644
--- a/arch/arm/mach-s3c2410/Makefile
+++ b/arch/arm/mach-s3c2410/Makefile
@@ -10,7 +10,6 @@ obj-n :=
10obj- := 10obj- :=
11 11
12obj-$(CONFIG_CPU_S3C2410) += s3c2410.o 12obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
13obj-$(CONFIG_CPU_S3C2410) += irq.o
14obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o 13obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o
15obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o 14obj-$(CONFIG_CPU_S3C2410_DMA) += dma.o
16obj-$(CONFIG_S3C2410_PM) += pm.o sleep.o 15obj-$(CONFIG_S3C2410_PM) += pm.o sleep.o
diff --git a/arch/arm/mach-s3c2410/irq.c b/arch/arm/mach-s3c2410/irq.c
deleted file mode 100644
index 2854129f8cc7..000000000000
--- a/arch/arm/mach-s3c2410/irq.c
+++ /dev/null
@@ -1,34 +0,0 @@
1/* linux/arch/arm/mach-s3c2410/irq.c
2 *
3 * Copyright (c) 2006 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20*/
21
22#include <linux/init.h>
23#include <linux/module.h>
24#include <linux/interrupt.h>
25#include <linux/ioport.h>
26#include <linux/syscore_ops.h>
27
28#include <plat/cpu.h>
29#include <plat/pm.h>
30
31struct syscore_ops s3c24xx_irq_syscore_ops = {
32 .suspend = s3c24xx_irq_suspend,
33 .resume = s3c24xx_irq_resume,
34};
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
index 22046e2f53c2..153af8b359ec 100644
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -101,12 +101,14 @@ static void s5pv210_set_refresh(enum s5pv210_dmc_port ch, unsigned long freq)
101 unsigned long tmp, tmp1; 101 unsigned long tmp, tmp1;
102 void __iomem *reg = NULL; 102 void __iomem *reg = NULL;
103 103
104 if (ch == DMC0) 104 if (ch == DMC0) {
105 reg = (S5P_VA_DMC0 + 0x30); 105 reg = (S5P_VA_DMC0 + 0x30);
106 else if (ch == DMC1) 106 } else if (ch == DMC1) {
107 reg = (S5P_VA_DMC1 + 0x30); 107 reg = (S5P_VA_DMC1 + 0x30);
108 else 108 } else {
109 printk(KERN_ERR "Cannot find DMC port\n"); 109 printk(KERN_ERR "Cannot find DMC port\n");
110 return;
111 }
110 112
111 /* Find current DRAM frequency */ 113 /* Find current DRAM frequency */
112 tmp = s5pv210_dram_conf[ch].freq; 114 tmp = s5pv210_dram_conf[ch].freq;
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index c95258c274c1..1e2aba23e0d6 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -382,10 +382,8 @@ void ag5evm_sdhi1_set_pwr(struct platform_device *pdev, int state)
382} 382}
383 383
384static struct sh_mobile_sdhi_info sh_sdhi1_platdata = { 384static struct sh_mobile_sdhi_info sh_sdhi1_platdata = {
385 .dma_slave_tx = SHDMA_SLAVE_SDHI1_TX,
386 .dma_slave_rx = SHDMA_SLAVE_SDHI1_RX,
387 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE, 385 .tmio_flags = TMIO_MMC_WRPROTECT_DISABLE,
388 .tmio_caps = MMC_CAP_NONREMOVABLE, 386 .tmio_caps = MMC_CAP_NONREMOVABLE | MMC_CAP_SDIO_IRQ,
389 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34, 387 .tmio_ocr_mask = MMC_VDD_32_33 | MMC_VDD_33_34,
390 .set_pwr = ag5evm_sdhi1_set_pwr, 388 .set_pwr = ag5evm_sdhi1_set_pwr,
391}; 389};
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 776f20560e72..7e1d37584321 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -126,7 +126,7 @@
126 * ------+--------------------+--------------------+------- 126 * ------+--------------------+--------------------+-------
127 * IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low 127 * IRQ0 | ICR1A.IRQ0SA=0010 | SDHI2 card detect | Low
128 * IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High 128 * IRQ6 | ICR1A.IRQ6SA=0011 | Ether(LAN9220) | High
129 * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Tuch Panel | Low 129 * IRQ7 | ICR1A.IRQ7SA=0010 | LCD Touch Panel | Low
130 * IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low 130 * IRQ8 | ICR2A.IRQ8SA=0010 | MMC/SD card detect | Low
131 * IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low 131 * IRQ9 | ICR2A.IRQ9SA=0010 | KEY(TCA6408) | Low
132 * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High 132 * IRQ21 | ICR4A.IRQ21SA=0011 | Sensor(ADXL345) | High
@@ -165,10 +165,10 @@
165 * USB1 can become Host by r8a66597, and become Function by renesas_usbhs. 165 * USB1 can become Host by r8a66597, and become Function by renesas_usbhs.
166 * But don't select both drivers in same time. 166 * But don't select both drivers in same time.
167 * These uses same IRQ number for request_irq(), and aren't supporting 167 * These uses same IRQ number for request_irq(), and aren't supporting
168 * IRQF_SHARD / IORESOURCE_IRQ_SHAREABLE. 168 * IRQF_SHARED / IORESOURCE_IRQ_SHAREABLE.
169 * 169 *
170 * Actually these are old/new version of USB driver. 170 * Actually these are old/new version of USB driver.
171 * This mean its register will be broken if it supports SHARD IRQ, 171 * This mean its register will be broken if it supports shared IRQ,
172 */ 172 */
173 173
174/* 174/*
@@ -562,7 +562,121 @@ out:
562 clk_put(hdmi_ick); 562 clk_put(hdmi_ick);
563} 563}
564 564
565/* USB1 (Host) */ 565/* USBHS0 is connected to CN22 which takes a USB Mini-B plug
566 *
567 * The sh7372 SoC has IRQ7 set aside for USBHS0 hotplug,
568 * but on this particular board IRQ7 is already used by
569 * the touch screen. This leaves us with software polling.
570 */
571#define USBHS0_POLL_INTERVAL (HZ * 5)
572
573struct usbhs_private {
574 unsigned int usbphyaddr;
575 unsigned int usbcrcaddr;
576 struct renesas_usbhs_platform_info info;
577 struct delayed_work work;
578 struct platform_device *pdev;
579};
580
581#define usbhs_get_priv(pdev) \
582 container_of(renesas_usbhs_get_info(pdev), \
583 struct usbhs_private, info)
584
585#define usbhs_is_connected(priv) \
586 (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
587
588static int usbhs_get_vbus(struct platform_device *pdev)
589{
590 return usbhs_is_connected(usbhs_get_priv(pdev));
591}
592
593static void usbhs_phy_reset(struct platform_device *pdev)
594{
595 struct usbhs_private *priv = usbhs_get_priv(pdev);
596
597 /* init phy */
598 __raw_writew(0x8a0a, priv->usbcrcaddr);
599}
600
601static int usbhs0_get_id(struct platform_device *pdev)
602{
603 return USBHS_GADGET;
604}
605
606static void usbhs0_work_function(struct work_struct *work)
607{
608 struct usbhs_private *priv = container_of(work, struct usbhs_private,
609 work.work);
610
611 renesas_usbhs_call_notify_hotplug(priv->pdev);
612 schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
613}
614
615static int usbhs0_hardware_init(struct platform_device *pdev)
616{
617 struct usbhs_private *priv = usbhs_get_priv(pdev);
618
619 priv->pdev = pdev;
620 INIT_DELAYED_WORK(&priv->work, usbhs0_work_function);
621 schedule_delayed_work(&priv->work, USBHS0_POLL_INTERVAL);
622 return 0;
623}
624
625static void usbhs0_hardware_exit(struct platform_device *pdev)
626{
627 struct usbhs_private *priv = usbhs_get_priv(pdev);
628
629 cancel_delayed_work_sync(&priv->work);
630}
631
632static struct usbhs_private usbhs0_private = {
633 .usbcrcaddr = 0xe605810c, /* USBCR2 */
634 .info = {
635 .platform_callback = {
636 .hardware_init = usbhs0_hardware_init,
637 .hardware_exit = usbhs0_hardware_exit,
638 .phy_reset = usbhs_phy_reset,
639 .get_id = usbhs0_get_id,
640 .get_vbus = usbhs_get_vbus,
641 },
642 .driver_param = {
643 .buswait_bwait = 4,
644 },
645 },
646};
647
648static struct resource usbhs0_resources[] = {
649 [0] = {
650 .name = "USBHS0",
651 .start = 0xe6890000,
652 .end = 0xe68900e6 - 1,
653 .flags = IORESOURCE_MEM,
654 },
655 [1] = {
656 .start = evt2irq(0x1ca0) /* USB0_USB0I0 */,
657 .flags = IORESOURCE_IRQ,
658 },
659};
660
661static struct platform_device usbhs0_device = {
662 .name = "renesas_usbhs",
663 .id = 0,
664 .dev = {
665 .platform_data = &usbhs0_private.info,
666 },
667 .num_resources = ARRAY_SIZE(usbhs0_resources),
668 .resource = usbhs0_resources,
669};
670
671/* USBHS1 is connected to CN31 which takes a USB Mini-AB plug
672 *
673 * Use J30 to select between Host and Function. This setting
674 * can however not be detected by software. Hotplug of USBHS1
675 * is provided via IRQ8.
676 */
677#define IRQ8 evt2irq(0x0300)
678
679/* USBHS1 USB Host support via r8a66597_hcd */
566static void usb1_host_port_power(int port, int power) 680static void usb1_host_port_power(int port, int power)
567{ 681{
568 if (!power) /* only power-on is supported for now */ 682 if (!power) /* only power-on is supported for now */
@@ -579,9 +693,9 @@ static struct r8a66597_platdata usb1_host_data = {
579 693
580static struct resource usb1_host_resources[] = { 694static struct resource usb1_host_resources[] = {
581 [0] = { 695 [0] = {
582 .name = "USBHS", 696 .name = "USBHS1",
583 .start = 0xE68B0000, 697 .start = 0xe68b0000,
584 .end = 0xE68B00E6 - 1, 698 .end = 0xe68b00e6 - 1,
585 .flags = IORESOURCE_MEM, 699 .flags = IORESOURCE_MEM,
586 }, 700 },
587 [1] = { 701 [1] = {
@@ -602,37 +716,14 @@ static struct platform_device usb1_host_device = {
602 .resource = usb1_host_resources, 716 .resource = usb1_host_resources,
603}; 717};
604 718
605/* USB1 (Function) */ 719/* USBHS1 USB Function support via renesas_usbhs */
720
606#define USB_PHY_MODE (1 << 4) 721#define USB_PHY_MODE (1 << 4)
607#define USB_PHY_INT_EN ((1 << 3) | (1 << 2)) 722#define USB_PHY_INT_EN ((1 << 3) | (1 << 2))
608#define USB_PHY_ON (1 << 1) 723#define USB_PHY_ON (1 << 1)
609#define USB_PHY_OFF (1 << 0) 724#define USB_PHY_OFF (1 << 0)
610#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF) 725#define USB_PHY_INT_CLR (USB_PHY_ON | USB_PHY_OFF)
611 726
612struct usbhs_private {
613 unsigned int irq;
614 unsigned int usbphyaddr;
615 unsigned int usbcrcaddr;
616 struct renesas_usbhs_platform_info info;
617};
618
619#define usbhs_get_priv(pdev) \
620 container_of(renesas_usbhs_get_info(pdev), \
621 struct usbhs_private, info)
622
623#define usbhs_is_connected(priv) \
624 (!((1 << 7) & __raw_readw(priv->usbcrcaddr)))
625
626static int usbhs1_get_id(struct platform_device *pdev)
627{
628 return USBHS_GADGET;
629}
630
631static int usbhs1_get_vbus(struct platform_device *pdev)
632{
633 return usbhs_is_connected(usbhs_get_priv(pdev));
634}
635
636static irqreturn_t usbhs1_interrupt(int irq, void *data) 727static irqreturn_t usbhs1_interrupt(int irq, void *data)
637{ 728{
638 struct platform_device *pdev = data; 729 struct platform_device *pdev = data;
@@ -654,12 +745,10 @@ static int usbhs1_hardware_init(struct platform_device *pdev)
654 struct usbhs_private *priv = usbhs_get_priv(pdev); 745 struct usbhs_private *priv = usbhs_get_priv(pdev);
655 int ret; 746 int ret;
656 747
657 irq_set_irq_type(priv->irq, IRQ_TYPE_LEVEL_HIGH);
658
659 /* clear interrupt status */ 748 /* clear interrupt status */
660 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); 749 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
661 750
662 ret = request_irq(priv->irq, usbhs1_interrupt, 0, 751 ret = request_irq(IRQ8, usbhs1_interrupt, IRQF_TRIGGER_HIGH,
663 dev_name(&pdev->dev), pdev); 752 dev_name(&pdev->dev), pdev);
664 if (ret) { 753 if (ret) {
665 dev_err(&pdev->dev, "request_irq err\n"); 754 dev_err(&pdev->dev, "request_irq err\n");
@@ -679,15 +768,12 @@ static void usbhs1_hardware_exit(struct platform_device *pdev)
679 /* clear interrupt status */ 768 /* clear interrupt status */
680 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr); 769 __raw_writew(USB_PHY_MODE | USB_PHY_INT_CLR, priv->usbphyaddr);
681 770
682 free_irq(priv->irq, pdev); 771 free_irq(IRQ8, pdev);
683} 772}
684 773
685static void usbhs1_phy_reset(struct platform_device *pdev) 774static int usbhs1_get_id(struct platform_device *pdev)
686{ 775{
687 struct usbhs_private *priv = usbhs_get_priv(pdev); 776 return USBHS_GADGET;
688
689 /* init phy */
690 __raw_writew(0x8a0a, priv->usbcrcaddr);
691} 777}
692 778
693static u32 usbhs1_pipe_cfg[] = { 779static u32 usbhs1_pipe_cfg[] = {
@@ -710,16 +796,15 @@ static u32 usbhs1_pipe_cfg[] = {
710}; 796};
711 797
712static struct usbhs_private usbhs1_private = { 798static struct usbhs_private usbhs1_private = {
713 .irq = evt2irq(0x0300), /* IRQ8 */ 799 .usbphyaddr = 0xe60581e2, /* USBPHY1INTAP */
714 .usbphyaddr = 0xE60581E2, /* USBPHY1INTAP */ 800 .usbcrcaddr = 0xe6058130, /* USBCR4 */
715 .usbcrcaddr = 0xE6058130, /* USBCR4 */
716 .info = { 801 .info = {
717 .platform_callback = { 802 .platform_callback = {
718 .hardware_init = usbhs1_hardware_init, 803 .hardware_init = usbhs1_hardware_init,
719 .hardware_exit = usbhs1_hardware_exit, 804 .hardware_exit = usbhs1_hardware_exit,
720 .phy_reset = usbhs1_phy_reset,
721 .get_id = usbhs1_get_id, 805 .get_id = usbhs1_get_id,
722 .get_vbus = usbhs1_get_vbus, 806 .phy_reset = usbhs_phy_reset,
807 .get_vbus = usbhs_get_vbus,
723 }, 808 },
724 .driver_param = { 809 .driver_param = {
725 .buswait_bwait = 4, 810 .buswait_bwait = 4,
@@ -731,9 +816,9 @@ static struct usbhs_private usbhs1_private = {
731 816
732static struct resource usbhs1_resources[] = { 817static struct resource usbhs1_resources[] = {
733 [0] = { 818 [0] = {
734 .name = "USBHS", 819 .name = "USBHS1",
735 .start = 0xE68B0000, 820 .start = 0xe68b0000,
736 .end = 0xE68B00E6 - 1, 821 .end = 0xe68b00e6 - 1,
737 .flags = IORESOURCE_MEM, 822 .flags = IORESOURCE_MEM,
738 }, 823 },
739 [1] = { 824 [1] = {
@@ -752,7 +837,6 @@ static struct platform_device usbhs1_device = {
752 .resource = usbhs1_resources, 837 .resource = usbhs1_resources,
753}; 838};
754 839
755
756/* LED */ 840/* LED */
757static struct gpio_led mackerel_leds[] = { 841static struct gpio_led mackerel_leds[] = {
758 { 842 {
@@ -1203,6 +1287,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
1203 &nor_flash_device, 1287 &nor_flash_device,
1204 &smc911x_device, 1288 &smc911x_device,
1205 &lcdc_device, 1289 &lcdc_device,
1290 &usbhs0_device,
1206 &usb1_host_device, 1291 &usb1_host_device,
1207 &usbhs1_device, 1292 &usbhs1_device,
1208 &leds_device, 1293 &leds_device,
@@ -1301,6 +1386,7 @@ static void __init mackerel_map_io(void)
1301 1386
1302#define GPIO_PORT9CR 0xE6051009 1387#define GPIO_PORT9CR 0xE6051009
1303#define GPIO_PORT10CR 0xE605100A 1388#define GPIO_PORT10CR 0xE605100A
1389#define GPIO_PORT167CR 0xE60520A7
1304#define GPIO_PORT168CR 0xE60520A8 1390#define GPIO_PORT168CR 0xE60520A8
1305#define SRCR4 0xe61580bc 1391#define SRCR4 0xe61580bc
1306#define USCCR1 0xE6058144 1392#define USCCR1 0xE6058144
@@ -1354,17 +1440,17 @@ static void __init mackerel_init(void)
1354 gpio_request(GPIO_PORT151, NULL); /* LCDDON */ 1440 gpio_request(GPIO_PORT151, NULL); /* LCDDON */
1355 gpio_direction_output(GPIO_PORT151, 1); 1441 gpio_direction_output(GPIO_PORT151, 1);
1356 1442
1357 /* USB enable */ 1443 /* USBHS0 */
1358 gpio_request(GPIO_FN_VBUS0_1, NULL); 1444 gpio_request(GPIO_FN_VBUS0_0, NULL);
1359 gpio_request(GPIO_FN_IDIN_1_18, NULL); 1445 gpio_pull_down(GPIO_PORT168CR); /* VBUS0_0 pull down */
1360 gpio_request(GPIO_FN_PWEN_1_115, NULL); 1446
1361 gpio_request(GPIO_FN_OVCN_1_114, NULL); 1447 /* USBHS1 */
1362 gpio_request(GPIO_FN_EXTLP_1, NULL); 1448 gpio_request(GPIO_FN_VBUS0_1, NULL);
1363 gpio_request(GPIO_FN_OVCN2_1, NULL); 1449 gpio_pull_down(GPIO_PORT167CR); /* VBUS0_1 pull down */
1364 gpio_pull_down(GPIO_PORT168CR); 1450 gpio_request(GPIO_FN_IDIN_1_113, NULL);
1365 1451
1366 /* setup USB phy */ 1452 /* USB phy tweak to make the r8a66597_hcd host driver work */
1367 __raw_writew(0x8a0a, 0xE6058130); /* USBCR4 */ 1453 __raw_writew(0x8a0a, 0xe6058130); /* USBCR4 */
1368 1454
1369 /* enable FSI2 port A (ak4643) */ 1455 /* enable FSI2 port A (ak4643) */
1370 gpio_request(GPIO_FN_FSIAIBT, NULL); 1456 gpio_request(GPIO_FN_FSIAIBT, NULL);
diff --git a/arch/arm/mach-shmobile/intc-sh73a0.c b/arch/arm/mach-shmobile/intc-sh73a0.c
index 5d0e1503ece6..a911a60e7719 100644
--- a/arch/arm/mach-shmobile/intc-sh73a0.c
+++ b/arch/arm/mach-shmobile/intc-sh73a0.c
@@ -250,6 +250,11 @@ static irqreturn_t sh73a0_intcs_demux(int irq, void *dev_id)
250 return IRQ_HANDLED; 250 return IRQ_HANDLED;
251} 251}
252 252
253static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
254{
255 return 0; /* always allow wakeup */
256}
257
253void __init sh73a0_init_irq(void) 258void __init sh73a0_init_irq(void)
254{ 259{
255 void __iomem *gic_dist_base = __io(0xf0001000); 260 void __iomem *gic_dist_base = __io(0xf0001000);
@@ -257,6 +262,7 @@ void __init sh73a0_init_irq(void)
257 void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE); 262 void __iomem *intevtsa = ioremap_nocache(0xffd20100, PAGE_SIZE);
258 263
259 gic_init(0, 29, gic_dist_base, gic_cpu_base); 264 gic_init(0, 29, gic_dist_base, gic_cpu_base);
265 gic_arch_extn.irq_set_wake = sh73a0_set_wake;
260 266
261 register_intc_controller(&intcs_desc); 267 register_intc_controller(&intcs_desc);
262 268
diff --git a/arch/arm/mach-shmobile/setup-sh7367.c b/arch/arm/mach-shmobile/setup-sh7367.c
index 2c10190dbb55..e546017f15de 100644
--- a/arch/arm/mach-shmobile/setup-sh7367.c
+++ b/arch/arm/mach-shmobile/setup-sh7367.c
@@ -38,7 +38,7 @@ static struct plat_sci_port scif0_platform_data = {
38 .flags = UPF_BOOT_AUTOCONF, 38 .flags = UPF_BOOT_AUTOCONF,
39 .scscr = SCSCR_RE | SCSCR_TE, 39 .scscr = SCSCR_RE | SCSCR_TE,
40 .scbrr_algo_id = SCBRR_ALGO_4, 40 .scbrr_algo_id = SCBRR_ALGO_4,
41 .type = PORT_SCIF, 41 .type = PORT_SCIFA,
42 .irqs = { evt2irq(0xc00), evt2irq(0xc00), 42 .irqs = { evt2irq(0xc00), evt2irq(0xc00),
43 evt2irq(0xc00), evt2irq(0xc00) }, 43 evt2irq(0xc00), evt2irq(0xc00) },
44}; 44};
@@ -57,7 +57,7 @@ static struct plat_sci_port scif1_platform_data = {
57 .flags = UPF_BOOT_AUTOCONF, 57 .flags = UPF_BOOT_AUTOCONF,
58 .scscr = SCSCR_RE | SCSCR_TE, 58 .scscr = SCSCR_RE | SCSCR_TE,
59 .scbrr_algo_id = SCBRR_ALGO_4, 59 .scbrr_algo_id = SCBRR_ALGO_4,
60 .type = PORT_SCIF, 60 .type = PORT_SCIFA,
61 .irqs = { evt2irq(0xc20), evt2irq(0xc20), 61 .irqs = { evt2irq(0xc20), evt2irq(0xc20),
62 evt2irq(0xc20), evt2irq(0xc20) }, 62 evt2irq(0xc20), evt2irq(0xc20) },
63}; 63};
@@ -76,7 +76,7 @@ static struct plat_sci_port scif2_platform_data = {
76 .flags = UPF_BOOT_AUTOCONF, 76 .flags = UPF_BOOT_AUTOCONF,
77 .scscr = SCSCR_RE | SCSCR_TE, 77 .scscr = SCSCR_RE | SCSCR_TE,
78 .scbrr_algo_id = SCBRR_ALGO_4, 78 .scbrr_algo_id = SCBRR_ALGO_4,
79 .type = PORT_SCIF, 79 .type = PORT_SCIFA,
80 .irqs = { evt2irq(0xc40), evt2irq(0xc40), 80 .irqs = { evt2irq(0xc40), evt2irq(0xc40),
81 evt2irq(0xc40), evt2irq(0xc40) }, 81 evt2irq(0xc40), evt2irq(0xc40) },
82}; 82};
@@ -95,7 +95,7 @@ static struct plat_sci_port scif3_platform_data = {
95 .flags = UPF_BOOT_AUTOCONF, 95 .flags = UPF_BOOT_AUTOCONF,
96 .scscr = SCSCR_RE | SCSCR_TE, 96 .scscr = SCSCR_RE | SCSCR_TE,
97 .scbrr_algo_id = SCBRR_ALGO_4, 97 .scbrr_algo_id = SCBRR_ALGO_4,
98 .type = PORT_SCIF, 98 .type = PORT_SCIFA,
99 .irqs = { evt2irq(0xc60), evt2irq(0xc60), 99 .irqs = { evt2irq(0xc60), evt2irq(0xc60),
100 evt2irq(0xc60), evt2irq(0xc60) }, 100 evt2irq(0xc60), evt2irq(0xc60) },
101}; 101};
@@ -114,7 +114,7 @@ static struct plat_sci_port scif4_platform_data = {
114 .flags = UPF_BOOT_AUTOCONF, 114 .flags = UPF_BOOT_AUTOCONF,
115 .scscr = SCSCR_RE | SCSCR_TE, 115 .scscr = SCSCR_RE | SCSCR_TE,
116 .scbrr_algo_id = SCBRR_ALGO_4, 116 .scbrr_algo_id = SCBRR_ALGO_4,
117 .type = PORT_SCIF, 117 .type = PORT_SCIFA,
118 .irqs = { evt2irq(0xd20), evt2irq(0xd20), 118 .irqs = { evt2irq(0xd20), evt2irq(0xd20),
119 evt2irq(0xd20), evt2irq(0xd20) }, 119 evt2irq(0xd20), evt2irq(0xd20) },
120}; 120};
@@ -133,7 +133,7 @@ static struct plat_sci_port scif5_platform_data = {
133 .flags = UPF_BOOT_AUTOCONF, 133 .flags = UPF_BOOT_AUTOCONF,
134 .scscr = SCSCR_RE | SCSCR_TE, 134 .scscr = SCSCR_RE | SCSCR_TE,
135 .scbrr_algo_id = SCBRR_ALGO_4, 135 .scbrr_algo_id = SCBRR_ALGO_4,
136 .type = PORT_SCIF, 136 .type = PORT_SCIFA,
137 .irqs = { evt2irq(0xd40), evt2irq(0xd40), 137 .irqs = { evt2irq(0xd40), evt2irq(0xd40),
138 evt2irq(0xd40), evt2irq(0xd40) }, 138 evt2irq(0xd40), evt2irq(0xd40) },
139}; 139};
@@ -152,7 +152,7 @@ static struct plat_sci_port scif6_platform_data = {
152 .flags = UPF_BOOT_AUTOCONF, 152 .flags = UPF_BOOT_AUTOCONF,
153 .scscr = SCSCR_RE | SCSCR_TE, 153 .scscr = SCSCR_RE | SCSCR_TE,
154 .scbrr_algo_id = SCBRR_ALGO_4, 154 .scbrr_algo_id = SCBRR_ALGO_4,
155 .type = PORT_SCIF, 155 .type = PORT_SCIFB,
156 .irqs = { evt2irq(0xd60), evt2irq(0xd60), 156 .irqs = { evt2irq(0xd60), evt2irq(0xd60),
157 evt2irq(0xd60), evt2irq(0xd60) }, 157 evt2irq(0xd60), evt2irq(0xd60) },
158}; 158};
diff --git a/arch/arm/mach-tegra/board-harmony-power.c b/arch/arm/mach-tegra/board-harmony-power.c
index c84442cabe07..5ad8b2f94f8d 100644
--- a/arch/arm/mach-tegra/board-harmony-power.c
+++ b/arch/arm/mach-tegra/board-harmony-power.c
@@ -24,6 +24,8 @@
24 24
25#include <mach/irqs.h> 25#include <mach/irqs.h>
26 26
27#include "board-harmony.h"
28
27#define PMC_CTRL 0x0 29#define PMC_CTRL 0x0
28#define PMC_CTRL_INTR_LOW (1 << 17) 30#define PMC_CTRL_INTR_LOW (1 << 17)
29 31
@@ -98,7 +100,7 @@ static struct tps6586x_platform_data tps_platform = {
98 .irq_base = TEGRA_NR_IRQS, 100 .irq_base = TEGRA_NR_IRQS,
99 .num_subdevs = ARRAY_SIZE(tps_devs), 101 .num_subdevs = ARRAY_SIZE(tps_devs),
100 .subdevs = tps_devs, 102 .subdevs = tps_devs,
101 .gpio_base = TEGRA_NR_GPIOS, 103 .gpio_base = HARMONY_GPIO_TPS6586X(0),
102}; 104};
103 105
104static struct i2c_board_info __initdata harmony_regulators[] = { 106static struct i2c_board_info __initdata harmony_regulators[] = {
diff --git a/arch/arm/mach-tegra/board-harmony.h b/arch/arm/mach-tegra/board-harmony.h
index 1e57b071f52d..d85142edaf6b 100644
--- a/arch/arm/mach-tegra/board-harmony.h
+++ b/arch/arm/mach-tegra/board-harmony.h
@@ -17,7 +17,8 @@
17#ifndef _MACH_TEGRA_BOARD_HARMONY_H 17#ifndef _MACH_TEGRA_BOARD_HARMONY_H
18#define _MACH_TEGRA_BOARD_HARMONY_H 18#define _MACH_TEGRA_BOARD_HARMONY_H
19 19
20#define HARMONY_GPIO_WM8903(_x_) (TEGRA_NR_GPIOS + (_x_)) 20#define HARMONY_GPIO_TPS6586X(_x_) (TEGRA_NR_GPIOS + (_x_))
21#define HARMONY_GPIO_WM8903(_x_) (HARMONY_GPIO_TPS6586X(4) + (_x_))
21 22
22#define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5 23#define TEGRA_GPIO_SD2_CD TEGRA_GPIO_PI5
23#define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1 24#define TEGRA_GPIO_SD2_WP TEGRA_GPIO_PH1
diff --git a/arch/arm/mach-u300/clock.h b/arch/arm/mach-u300/clock.h
index c34f3ea3017c..4f50ca8f901e 100644
--- a/arch/arm/mach-u300/clock.h
+++ b/arch/arm/mach-u300/clock.h
@@ -31,7 +31,7 @@ struct clk {
31 bool reset; 31 bool reset;
32 __u16 clk_val; 32 __u16 clk_val;
33 __s8 usecount; 33 __s8 usecount;
34 __u32 res_reg; 34 void __iomem * res_reg;
35 __u16 res_mask; 35 __u16 res_mask;
36 36
37 bool hw_ctrld; 37 bool hw_ctrld;
diff --git a/arch/arm/mach-u300/include/mach/u300-regs.h b/arch/arm/mach-u300/include/mach/u300-regs.h
index 8b85df4c8d8f..035fdc9dbdb0 100644
--- a/arch/arm/mach-u300/include/mach/u300-regs.h
+++ b/arch/arm/mach-u300/include/mach/u300-regs.h
@@ -18,6 +18,12 @@
18 * the defines are used for setting up the I/O memory mapping. 18 * the defines are used for setting up the I/O memory mapping.
19 */ 19 */
20 20
21#ifdef __ASSEMBLER__
22#define IOMEM(a) (a)
23#else
24#define IOMEM(a) (void __iomem *) a
25#endif
26
21/* NAND Flash CS0 */ 27/* NAND Flash CS0 */
22#define U300_NAND_CS0_PHYS_BASE 0x80000000 28#define U300_NAND_CS0_PHYS_BASE 0x80000000
23 29
@@ -48,13 +54,6 @@
48#endif 54#endif
49 55
50/* 56/*
51 * All the following peripherals are specified at their PHYSICAL address,
52 * so if you need to access them (in the kernel), you MUST use the macros
53 * defined in <asm/io.h> to map to the IO_ADDRESS_AHB() IO_ADDRESS_FAST()
54 * etc.
55 */
56
57/*
58 * AHB peripherals 57 * AHB peripherals
59 */ 58 */
60 59
@@ -63,11 +62,11 @@
63 62
64/* Vectored Interrupt Controller 0, servicing 32 interrupts */ 63/* Vectored Interrupt Controller 0, servicing 32 interrupts */
65#define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000) 64#define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000)
66#define U300_INTCON0_VBASE (U300_AHB_PER_VIRT_BASE+0x1000) 65#define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000)
67 66
68/* Vectored Interrupt Controller 1, servicing 32 interrupts */ 67/* Vectored Interrupt Controller 1, servicing 32 interrupts */
69#define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000) 68#define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000)
70#define U300_INTCON1_VBASE (U300_AHB_PER_VIRT_BASE+0x2000) 69#define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000)
71 70
72/* Memory Stick Pro (MSPRO) controller */ 71/* Memory Stick Pro (MSPRO) controller */
73#define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000) 72#define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000)
@@ -115,7 +114,7 @@
115 114
116/* SYSCON */ 115/* SYSCON */
117#define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000) 116#define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000)
118#define U300_SYSCON_VBASE (U300_SLOW_PER_VIRT_BASE+0x1000) 117#define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000)
119 118
120/* Watchdog */ 119/* Watchdog */
121#define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000) 120#define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000)
@@ -125,7 +124,7 @@
125 124
126/* APP side special timer */ 125/* APP side special timer */
127#define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000) 126#define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000)
128#define U300_TIMER_APP_VBASE (U300_SLOW_PER_VIRT_BASE+0x4000) 127#define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000)
129 128
130/* Keypad */ 129/* Keypad */
131#define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000) 130#define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000)
@@ -181,5 +180,4 @@
181 * Virtual accessor macros for static devices 180 * Virtual accessor macros for static devices
182 */ 181 */
183 182
184
185#endif 183#endif
diff --git a/arch/arm/mach-u300/timer.c b/arch/arm/mach-u300/timer.c
index 891cf44591e0..18d7fa0603c2 100644
--- a/arch/arm/mach-u300/timer.c
+++ b/arch/arm/mach-u300/timer.c
@@ -411,8 +411,7 @@ static void __init u300_timer_init(void)
411 /* Use general purpose timer 2 as clock source */ 411 /* Use general purpose timer 2 as clock source */
412 if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC, 412 if (clocksource_mmio_init(U300_TIMER_APP_VBASE + U300_TIMER_APP_GPT2CC,
413 "GPT2", rate, 300, 32, clocksource_mmio_readl_up)) 413 "GPT2", rate, 300, 32, clocksource_mmio_readl_up))
414 printk(KERN_ERR "timer: failed to initialize clock " 414 pr_err("timer: failed to initialize U300 clock source\n");
415 "source %s\n", clocksource_u300_1mhz.name);
416 415
417 clockevents_calc_mult_shift(&clockevent_u300_1mhz, 416 clockevents_calc_mult_shift(&clockevent_u300_1mhz,
418 rate, APPTIMER_MIN_RANGE); 417 rate, APPTIMER_MIN_RANGE);
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index c3c417656bd9..4598b06c8c55 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -159,6 +159,9 @@ static void __init db8500_add_gpios(void)
159 /* No custom data yet */ 159 /* No custom data yet */
160 }; 160 };
161 161
162 if (cpu_is_u8500v2())
163 pdata.supports_sleepmode = true;
164
162 dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base), 165 dbx500_add_gpios(ARRAY_AND_SIZE(db8500_gpio_base),
163 IRQ_DB8500_GPIO0, &pdata); 166 IRQ_DB8500_GPIO0, &pdata);
164} 167}
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 285edcd2da2a..9e6b93b1a043 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -46,12 +46,6 @@ static struct map_desc v2m_io_desc[] __initdata = {
46 }, 46 },
47}; 47};
48 48
49static void __init v2m_init_early(void)
50{
51 ct_desc->init_early();
52 versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
53}
54
55static void __init v2m_timer_init(void) 49static void __init v2m_timer_init(void)
56{ 50{
57 u32 scctrl; 51 u32 scctrl;
@@ -365,6 +359,13 @@ static struct clk_lookup v2m_lookups[] = {
365 }, 359 },
366}; 360};
367 361
362static void __init v2m_init_early(void)
363{
364 ct_desc->init_early();
365 clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
366 versatile_sched_clock_init(MMIO_P2V(V2M_SYS_24MHZ), 24000000);
367}
368
368static void v2m_power_off(void) 369static void v2m_power_off(void)
369{ 370{
370 if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0)) 371 if (v2m_cfg_write(SYS_CFG_SHUTDOWN | SYS_CFG_SITE_MB, 0))
@@ -418,8 +419,6 @@ static void __init v2m_init(void)
418{ 419{
419 int i; 420 int i;
420 421
421 clkdev_add_table(v2m_lookups, ARRAY_SIZE(v2m_lookups));
422
423 platform_device_register(&v2m_pcie_i2c_device); 422 platform_device_register(&v2m_pcie_i2c_device);
424 platform_device_register(&v2m_ddc_i2c_device); 423 platform_device_register(&v2m_ddc_i2c_device);
425 platform_device_register(&v2m_flash_device); 424 platform_device_register(&v2m_flash_device);
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 8bfae964b133..b0ee9ba3cfab 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -24,7 +24,9 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
24 24
25/* 25/*
26 * We fork()ed a process, and we need a new context for the child 26 * We fork()ed a process, and we need a new context for the child
27 * to run in. 27 * to run in. We reserve version 0 for initial tasks so we will
28 * always allocate an ASID. The ASID 0 is reserved for the TTBR
29 * register changing sequence.
28 */ 30 */
29void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 31void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
30{ 32{
@@ -34,11 +36,8 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
34 36
35static void flush_context(void) 37static void flush_context(void)
36{ 38{
37 u32 ttb; 39 /* set the reserved ASID before flushing the TLB */
38 /* Copy TTBR1 into TTBR0 */ 40 asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
39 asm volatile("mrc p15, 0, %0, c2, c0, 1\n"
40 "mcr p15, 0, %0, c2, c0, 0"
41 : "=r" (ttb));
42 isb(); 41 isb();
43 local_flush_tlb_all(); 42 local_flush_tlb_all();
44 if (icache_is_vivt_asid_tagged()) { 43 if (icache_is_vivt_asid_tagged()) {
@@ -94,7 +93,7 @@ static void reset_context(void *info)
94 return; 93 return;
95 94
96 smp_rmb(); 95 smp_rmb();
97 asid = cpu_last_asid + cpu; 96 asid = cpu_last_asid + cpu + 1;
98 97
99 flush_context(); 98 flush_context();
100 set_mm_context(mm, asid); 99 set_mm_context(mm, asid);
@@ -144,13 +143,13 @@ void __new_context(struct mm_struct *mm)
144 * to start a new version and flush the TLB. 143 * to start a new version and flush the TLB.
145 */ 144 */
146 if (unlikely((asid & ~ASID_MASK) == 0)) { 145 if (unlikely((asid & ~ASID_MASK) == 0)) {
147 asid = cpu_last_asid + smp_processor_id(); 146 asid = cpu_last_asid + smp_processor_id() + 1;
148 flush_context(); 147 flush_context();
149#ifdef CONFIG_SMP 148#ifdef CONFIG_SMP
150 smp_wmb(); 149 smp_wmb();
151 smp_call_function(reset_context, NULL, 1); 150 smp_call_function(reset_context, NULL, 1);
152#endif 151#endif
153 cpu_last_asid += NR_CPUS - 1; 152 cpu_last_asid += NR_CPUS;
154 } 153 }
155 154
156 set_mm_context(mm, asid); 155 set_mm_context(mm, asid);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 2c2cce9cd8c8..c19571c40a21 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -331,6 +331,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
331#endif 331#endif
332#ifdef CONFIG_BLK_DEV_INITRD 332#ifdef CONFIG_BLK_DEV_INITRD
333 if (phys_initrd_size && 333 if (phys_initrd_size &&
334 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
335 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
336 phys_initrd_start, phys_initrd_size);
337 phys_initrd_start = phys_initrd_size = 0;
338 }
339 if (phys_initrd_size &&
334 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) { 340 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
335 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n", 341 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
336 phys_initrd_start, phys_initrd_size); 342 phys_initrd_start, phys_initrd_size);
@@ -635,7 +641,8 @@ void __init mem_init(void)
635 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 641 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
636 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 642 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
637 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 643 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
638 " .data : 0x%p" " - 0x%p" " (%4d kB)\n", 644 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
645 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
639 646
640 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 647 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
641 (PAGE_SIZE)), 648 (PAGE_SIZE)),
@@ -657,7 +664,8 @@ void __init mem_init(void)
657 664
658 MLK_ROUNDUP(__init_begin, __init_end), 665 MLK_ROUNDUP(__init_begin, __init_end),
659 MLK_ROUNDUP(_text, _etext), 666 MLK_ROUNDUP(_text, _etext),
660 MLK_ROUNDUP(_sdata, _edata)); 667 MLK_ROUNDUP(_sdata, _edata),
668 MLK_ROUNDUP(__bss_start, __bss_stop));
661 669
662#undef MLK 670#undef MLK
663#undef MLM 671#undef MLM
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
index e4c165ca6696..537ffcb0646d 100644
--- a/arch/arm/mm/proc-arm7tdmi.S
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -146,7 +146,7 @@ __arm7tdmi_proc_info:
146 .long 0 146 .long 0
147 .long 0 147 .long 0
148 .long v4_cache_fns 148 .long v4_cache_fns
149 .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info 149 .size __arm7tdmi_proc_info, . - __arm7tdmi_proc_info
150 150
151 .type __triscenda7_proc_info, #object 151 .type __triscenda7_proc_info, #object
152__triscenda7_proc_info: 152__triscenda7_proc_info:
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
index 7b7ebd4d096d..546b54da1005 100644
--- a/arch/arm/mm/proc-arm9tdmi.S
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -116,7 +116,7 @@ __arm9tdmi_proc_info:
116 .long 0 116 .long 0
117 .long 0 117 .long 0
118 .long v4_cache_fns 118 .long v4_cache_fns
119 .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info 119 .size __arm9tdmi_proc_info, . - __arm9tdmi_proc_info
120 120
121 .type __p2001_proc_info, #object 121 .type __p2001_proc_info, #object
122__p2001_proc_info: 122__p2001_proc_info:
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b3b566ec83d3..3c3867850a30 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -108,16 +108,18 @@ ENTRY(cpu_v7_switch_mm)
108#ifdef CONFIG_ARM_ERRATA_430973 108#ifdef CONFIG_ARM_ERRATA_430973
109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB 109 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
110#endif 110#endif
111 mrc p15, 0, r2, c2, c0, 1 @ load TTB 1 111#ifdef CONFIG_ARM_ERRATA_754322
112 mcr p15, 0, r2, c2, c0, 0 @ into TTB 0 112 dsb
113#endif
114 mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
115 isb
1161: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
113 isb 117 isb
114#ifdef CONFIG_ARM_ERRATA_754322 118#ifdef CONFIG_ARM_ERRATA_754322
115 dsb 119 dsb
116#endif 120#endif
117 mcr p15, 0, r1, c13, c0, 1 @ set context ID 121 mcr p15, 0, r1, c13, c0, 1 @ set context ID
118 isb 122 isb
119 mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
120 isb
121#endif 123#endif
122 mov pc, lr 124 mov pc, lr
123ENDPROC(cpu_v7_switch_mm) 125ENDPROC(cpu_v7_switch_mm)
diff --git a/arch/arm/plat-mxc/devices/platform-imx-dma.c b/arch/arm/plat-mxc/devices/platform-imx-dma.c
index 3538b85ede91..b130f60ca6b7 100644
--- a/arch/arm/plat-mxc/devices/platform-imx-dma.c
+++ b/arch/arm/plat-mxc/devices/platform-imx-dma.c
@@ -139,7 +139,7 @@ static struct sdma_script_start_addrs addr_imx35_to2 = {
139#endif 139#endif
140 140
141#ifdef CONFIG_SOC_IMX51 141#ifdef CONFIG_SOC_IMX51
142static struct sdma_script_start_addrs addr_imx51_to1 = { 142static struct sdma_script_start_addrs addr_imx51 = {
143 .ap_2_ap_addr = 642, 143 .ap_2_ap_addr = 642,
144 .uart_2_mcu_addr = 817, 144 .uart_2_mcu_addr = 817,
145 .mcu_2_app_addr = 747, 145 .mcu_2_app_addr = 747,
@@ -196,7 +196,9 @@ static int __init imxXX_add_imx_dma(void)
196 196
197#if defined(CONFIG_SOC_IMX51) 197#if defined(CONFIG_SOC_IMX51)
198 if (cpu_is_mx51()) { 198 if (cpu_is_mx51()) {
199 imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51_to1; 199 int to_version = mx51_revision() >> 4;
200 imx51_imx_sdma_data.pdata.to_version = to_version;
201 imx51_imx_sdma_data.pdata.script_addrs = &addr_imx51;
200 ret = imx_add_imx_sdma(&imx51_imx_sdma_data); 202 ret = imx_add_imx_sdma(&imx51_imx_sdma_data);
201 } else 203 } else
202#endif 204#endif
diff --git a/arch/arm/plat-nomadik/include/plat/gpio.h b/arch/arm/plat-nomadik/include/plat/gpio.h
index ea19a5b2f227..d5d7e651269c 100644
--- a/arch/arm/plat-nomadik/include/plat/gpio.h
+++ b/arch/arm/plat-nomadik/include/plat/gpio.h
@@ -90,6 +90,7 @@ struct nmk_gpio_platform_data {
90 int num_gpio; 90 int num_gpio;
91 u32 (*get_secondary_status)(unsigned int bank); 91 u32 (*get_secondary_status)(unsigned int bank);
92 void (*set_ioforce)(bool enable); 92 void (*set_ioforce)(bool enable);
93 bool supports_sleepmode;
93}; 94};
94 95
95#endif /* __ASM_PLAT_GPIO_H */ 96#endif /* __ASM_PLAT_GPIO_H */
diff --git a/arch/arm/plat-omap/include/plat/flash.h b/arch/arm/plat-omap/include/plat/flash.h
index 3083195123ea..0d88499b79e9 100644
--- a/arch/arm/plat-omap/include/plat/flash.h
+++ b/arch/arm/plat-omap/include/plat/flash.h
@@ -11,6 +11,7 @@
11 11
12#include <linux/mtd/map.h> 12#include <linux/mtd/map.h>
13 13
14struct platform_device;
14extern void omap1_set_vpp(struct platform_device *pdev, int enable); 15extern void omap1_set_vpp(struct platform_device *pdev, int enable);
15 16
16#endif 17#endif
diff --git a/arch/arm/plat-omap/include/plat/iovmm.h b/arch/arm/plat-omap/include/plat/iovmm.h
index 32a2f6c4d39e..e992b9655fbc 100644
--- a/arch/arm/plat-omap/include/plat/iovmm.h
+++ b/arch/arm/plat-omap/include/plat/iovmm.h
@@ -29,9 +29,6 @@ struct iovm_struct {
29 * lower 16 bit is used for h/w and upper 16 bit is for s/w. 29 * lower 16 bit is used for h/w and upper 16 bit is for s/w.
30 */ 30 */
31#define IOVMF_SW_SHIFT 16 31#define IOVMF_SW_SHIFT 16
32#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT)
33#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1)
34#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL
35 32
36/* 33/*
37 * iovma: h/w flags derived from cam and ram attribute 34 * iovma: h/w flags derived from cam and ram attribute
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index f38fef9f1310..c7b874186c27 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -101,6 +101,9 @@ struct omap_mmc_platform_data {
101 /* If using power_saving and the MMC power is not to go off */ 101 /* If using power_saving and the MMC power is not to go off */
102 unsigned no_off:1; 102 unsigned no_off:1;
103 103
104 /* eMMC does not handle power off when not in sleep state */
105 unsigned no_regulator_off_init:1;
106
104 /* Regulator off remapped to sleep */ 107 /* Regulator off remapped to sleep */
105 unsigned vcc_aux_disable_is_sleep:1; 108 unsigned vcc_aux_disable_is_sleep:1;
106 109
diff --git a/arch/arm/plat-omap/iovmm.c b/arch/arm/plat-omap/iovmm.c
index 51ef43e8def6..83a37c54342f 100644
--- a/arch/arm/plat-omap/iovmm.c
+++ b/arch/arm/plat-omap/iovmm.c
@@ -648,7 +648,6 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
648 return PTR_ERR(va); 648 return PTR_ERR(va);
649 } 649 }
650 650
651 flags &= IOVMF_HW_MASK;
652 flags |= IOVMF_DISCONT; 651 flags |= IOVMF_DISCONT;
653 flags |= IOVMF_MMIO; 652 flags |= IOVMF_MMIO;
654 653
@@ -706,7 +705,6 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
706 if (!va) 705 if (!va)
707 return -ENOMEM; 706 return -ENOMEM;
708 707
709 flags &= IOVMF_HW_MASK;
710 flags |= IOVMF_DISCONT; 708 flags |= IOVMF_DISCONT;
711 flags |= IOVMF_ALLOC; 709 flags |= IOVMF_ALLOC;
712 710
@@ -795,7 +793,6 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
795 if (!va) 793 if (!va)
796 return -ENOMEM; 794 return -ENOMEM;
797 795
798 flags &= IOVMF_HW_MASK;
799 flags |= IOVMF_LINEAR; 796 flags |= IOVMF_LINEAR;
800 flags |= IOVMF_MMIO; 797 flags |= IOVMF_MMIO;
801 798
@@ -853,7 +850,6 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
853 return -ENOMEM; 850 return -ENOMEM;
854 pa = virt_to_phys(va); 851 pa = virt_to_phys(va);
855 852
856 flags &= IOVMF_HW_MASK;
857 flags |= IOVMF_LINEAR; 853 flags |= IOVMF_LINEAR;
858 flags |= IOVMF_ALLOC; 854 flags |= IOVMF_ALLOC;
859 855
diff --git a/arch/arm/plat-omap/omap_device.c b/arch/arm/plat-omap/omap_device.c
index a37b8eb65b76..49fc0df0c21f 100644
--- a/arch/arm/plat-omap/omap_device.c
+++ b/arch/arm/plat-omap/omap_device.c
@@ -84,6 +84,7 @@
84#include <linux/io.h> 84#include <linux/io.h>
85#include <linux/clk.h> 85#include <linux/clk.h>
86#include <linux/clkdev.h> 86#include <linux/clkdev.h>
87#include <linux/pm_runtime.h>
87 88
88#include <plat/omap_device.h> 89#include <plat/omap_device.h>
89#include <plat/omap_hwmod.h> 90#include <plat/omap_hwmod.h>
@@ -539,20 +540,34 @@ int omap_early_device_register(struct omap_device *od)
539static int _od_runtime_suspend(struct device *dev) 540static int _od_runtime_suspend(struct device *dev)
540{ 541{
541 struct platform_device *pdev = to_platform_device(dev); 542 struct platform_device *pdev = to_platform_device(dev);
543 int ret;
544
545 ret = pm_generic_runtime_suspend(dev);
546
547 if (!ret)
548 omap_device_idle(pdev);
549
550 return ret;
551}
542 552
543 return omap_device_idle(pdev); 553static int _od_runtime_idle(struct device *dev)
554{
555 return pm_generic_runtime_idle(dev);
544} 556}
545 557
546static int _od_runtime_resume(struct device *dev) 558static int _od_runtime_resume(struct device *dev)
547{ 559{
548 struct platform_device *pdev = to_platform_device(dev); 560 struct platform_device *pdev = to_platform_device(dev);
549 561
550 return omap_device_enable(pdev); 562 omap_device_enable(pdev);
563
564 return pm_generic_runtime_resume(dev);
551} 565}
552 566
553static struct dev_power_domain omap_device_power_domain = { 567static struct dev_power_domain omap_device_power_domain = {
554 .ops = { 568 .ops = {
555 .runtime_suspend = _od_runtime_suspend, 569 .runtime_suspend = _od_runtime_suspend,
570 .runtime_idle = _od_runtime_idle,
556 .runtime_resume = _od_runtime_resume, 571 .runtime_resume = _od_runtime_resume,
557 USE_PLATFORM_PM_SLEEP_OPS 572 USE_PLATFORM_PM_SLEEP_OPS
558 } 573 }
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index a3f50b34a90d..6af3d0b1f8d0 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -166,7 +166,7 @@ static void __init omap_detect_sram(void)
166 else if (cpu_is_omap1611()) 166 else if (cpu_is_omap1611())
167 omap_sram_size = SZ_256K; 167 omap_sram_size = SZ_256K;
168 else { 168 else {
169 printk(KERN_ERR "Could not detect SRAM size\n"); 169 pr_err("Could not detect SRAM size\n");
170 omap_sram_size = 0x4000; 170 omap_sram_size = 0x4000;
171 } 171 }
172 } 172 }
@@ -221,10 +221,10 @@ static void __init omap_map_sram(void)
221 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE); 221 omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
222 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc)); 222 iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
223 223
224 printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n", 224 pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n",
225 __pfn_to_phys(omap_sram_io_desc[0].pfn), 225 (long long) __pfn_to_phys(omap_sram_io_desc[0].pfn),
226 omap_sram_io_desc[0].virtual, 226 omap_sram_io_desc[0].virtual,
227 omap_sram_io_desc[0].length); 227 omap_sram_io_desc[0].length);
228 228
229 /* 229 /*
230 * Normally devicemaps_init() would flush caches and tlb after 230 * Normally devicemaps_init() would flush caches and tlb after
@@ -252,7 +252,7 @@ static void __init omap_map_sram(void)
252void *omap_sram_push_address(unsigned long size) 252void *omap_sram_push_address(unsigned long size)
253{ 253{
254 if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) { 254 if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
255 printk(KERN_ERR "Not enough space in SRAM\n"); 255 pr_err("Not enough space in SRAM\n");
256 return NULL; 256 return NULL;
257 } 257 }
258 258
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index c10d10c56e2e..2abf9660bc6c 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -1199,7 +1199,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
1199 1199
1200#ifdef CONFIG_PM 1200#ifdef CONFIG_PM
1201 1201
1202static void s3c2410_dma_suspend_chan(s3c2410_dma_chan *cp) 1202static void s3c2410_dma_suspend_chan(struct s3c2410_dma_chan *cp)
1203{ 1203{
1204 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number); 1204 printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
1205 1205
diff --git a/arch/arm/plat-s3c24xx/irq.c b/arch/arm/plat-s3c24xx/irq.c
index 9aee7e1668b1..fc8c5f89954d 100644
--- a/arch/arm/plat-s3c24xx/irq.c
+++ b/arch/arm/plat-s3c24xx/irq.c
@@ -23,6 +23,7 @@
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/ioport.h> 24#include <linux/ioport.h>
25#include <linux/sysdev.h> 25#include <linux/sysdev.h>
26#include <linux/syscore_ops.h>
26 27
27#include <asm/irq.h> 28#include <asm/irq.h>
28#include <asm/mach/irq.h> 29#include <asm/mach/irq.h>
@@ -668,3 +669,8 @@ void __init s3c24xx_init_irq(void)
668 669
669 irqdbf("s3c2410: registered interrupt handlers\n"); 670 irqdbf("s3c2410: registered interrupt handlers\n");
670} 671}
672
673struct syscore_ops s3c24xx_irq_syscore_ops = {
674 .suspend = s3c24xx_irq_suspend,
675 .resume = s3c24xx_irq_resume,
676};
diff --git a/arch/arm/plat-s5p/dev-onenand.c b/arch/arm/plat-s5p/dev-onenand.c
index 6db926202caa..20336c8f2479 100644
--- a/arch/arm/plat-s5p/dev-onenand.c
+++ b/arch/arm/plat-s5p/dev-onenand.c
@@ -15,8 +15,6 @@
15 15
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18#include <linux/mtd/mtd.h>
19#include <linux/mtd/onenand.h>
20 18
21#include <mach/irqs.h> 19#include <mach/irqs.h>
22#include <mach/map.h> 20#include <mach/map.h>
@@ -45,13 +43,3 @@ struct platform_device s5p_device_onenand = {
45 .num_resources = ARRAY_SIZE(s5p_onenand_resources), 43 .num_resources = ARRAY_SIZE(s5p_onenand_resources),
46 .resource = s5p_onenand_resources, 44 .resource = s5p_onenand_resources,
47}; 45};
48
49void s5p_onenand_set_platdata(struct onenand_platform_data *pdata)
50{
51 struct onenand_platform_data *pd;
52
53 pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
54 if (!pd)
55 printk(KERN_ERR "%s: no memory for platform data\n", __func__);
56 s5p_device_onenand.dev.platform_data = pd;
57}
diff --git a/arch/arm/plat-s5p/include/plat/map-s5p.h b/arch/arm/plat-s5p/include/plat/map-s5p.h
index a6c3d327ce72..d973d39666a3 100644
--- a/arch/arm/plat-s5p/include/plat/map-s5p.h
+++ b/arch/arm/plat-s5p/include/plat/map-s5p.h
@@ -39,7 +39,7 @@
39#define S5P_VA_TWD S5P_VA_COREPERI(0x600) 39#define S5P_VA_TWD S5P_VA_COREPERI(0x600)
40#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000) 40#define S5P_VA_GIC_DIST S5P_VA_COREPERI(0x1000)
41 41
42#define S5P_VA_USB_HSPHY S3C_ADDR(0x02900000) 42#define S3C_VA_USB_HSPHY S3C_ADDR(0x02900000)
43 43
44#define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000)) 44#define VA_VIC(x) (S3C_VA_IRQ + ((x) * 0x10000))
45#define VA_VIC0 VA_VIC(0) 45#define VA_VIC0 VA_VIC(0)
diff --git a/arch/arm/plat-samsung/dev-onenand.c b/arch/arm/plat-samsung/dev-onenand.c
index 45ec73287d8c..f54ae71f0cd2 100644
--- a/arch/arm/plat-samsung/dev-onenand.c
+++ b/arch/arm/plat-samsung/dev-onenand.c
@@ -13,8 +13,6 @@
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/mtd/mtd.h>
17#include <linux/mtd/onenand.h>
18 16
19#include <mach/irqs.h> 17#include <mach/irqs.h>
20#include <mach/map.h> 18#include <mach/map.h>
@@ -43,13 +41,3 @@ struct platform_device s3c_device_onenand = {
43 .num_resources = ARRAY_SIZE(s3c_onenand_resources), 41 .num_resources = ARRAY_SIZE(s3c_onenand_resources),
44 .resource = s3c_onenand_resources, 42 .resource = s3c_onenand_resources,
45}; 43};
46
47void s3c_onenand_set_platdata(struct onenand_platform_data *pdata)
48{
49 struct onenand_platform_data *pd;
50
51 pd = kmemdup(pdata, sizeof(struct onenand_platform_data), GFP_KERNEL);
52 if (!pd)
53 printk(KERN_ERR "%s: no memory for platform data\n", __func__);
54 s3c_device_onenand.dev.platform_data = pd;
55}
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index b61b8ee7cc52..4af108ff4112 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -75,10 +75,8 @@ extern struct platform_device s5pc100_device_spi1;
75extern struct platform_device s5pc100_device_spi2; 75extern struct platform_device s5pc100_device_spi2;
76extern struct platform_device s5pv210_device_spi0; 76extern struct platform_device s5pv210_device_spi0;
77extern struct platform_device s5pv210_device_spi1; 77extern struct platform_device s5pv210_device_spi1;
78extern struct platform_device s5p6440_device_spi0; 78extern struct platform_device s5p64x0_device_spi0;
79extern struct platform_device s5p6440_device_spi1; 79extern struct platform_device s5p64x0_device_spi1;
80extern struct platform_device s5p6450_device_spi0;
81extern struct platform_device s5p6450_device_spi1;
82 80
83extern struct platform_device s3c_device_hwmon; 81extern struct platform_device s3c_device_hwmon;
84 82
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index 6f9ca56de1f6..a06bfccc2840 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 7eece0af34c9..d8f1fe80d210 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 387eb9d6e423..d4c5b19ec950 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 19f6ceeeff7b..77ca4f905d2c 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12# CONFIG_SLUB_DEBUG is not set 13# CONFIG_SLUB_DEBUG is not set
@@ -109,7 +110,7 @@ CONFIG_LEDS_GPIO=y
109CONFIG_LEDS_TRIGGERS=y 110CONFIG_LEDS_TRIGGERS=y
110CONFIG_LEDS_TRIGGER_TIMER=y 111CONFIG_LEDS_TRIGGER_TIMER=y
111CONFIG_LEDS_TRIGGER_HEARTBEAT=y 112CONFIG_LEDS_TRIGGER_HEARTBEAT=y
112CONFIG_RTC_CLASS=m 113CONFIG_RTC_CLASS=y
113CONFIG_RTC_DRV_S35390A=m 114CONFIG_RTC_DRV_S35390A=m
114CONFIG_RTC_DRV_AT32AP700X=m 115CONFIG_RTC_DRV_AT32AP700X=m
115CONFIG_DMADEVICES=y 116CONFIG_DMADEVICES=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index f0fe237133a9..6e0dca4d3131 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index e4a7c1dc8380..7f2a344a5fa8 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index 6f37f70c2c37..085eeba88f67 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index 4fb01f5ab42f..d1a887e64055 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index 9faaf9b900f2..956f2819ad45 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 3d2a5d85f970..40c69f38c61a 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 1ed8f22d4fe2..511eb8af356d 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -5,6 +5,7 @@ CONFIG_POSIX_MQUEUE=y
5CONFIG_LOG_BUF_SHIFT=14 5CONFIG_LOG_BUF_SHIFT=14
6CONFIG_RELAY=y 6CONFIG_RELAY=y
7CONFIG_BLK_DEV_INITRD=y 7CONFIG_BLK_DEV_INITRD=y
8CONFIG_CC_OPTIMIZE_FOR_SIZE=y
8# CONFIG_SYSCTL_SYSCALL is not set 9# CONFIG_SYSCTL_SYSCALL is not set
9# CONFIG_BASE_FULL is not set 10# CONFIG_BASE_FULL is not set
10# CONFIG_COMPAT_BRK is not set 11# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index aeadc955db32..19973b06170c 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -6,6 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14
6CONFIG_SYSFS_DEPRECATED_V2=y 6CONFIG_SYSFS_DEPRECATED_V2=y
7CONFIG_RELAY=y 7CONFIG_RELAY=y
8CONFIG_BLK_DEV_INITRD=y 8CONFIG_BLK_DEV_INITRD=y
9CONFIG_CC_OPTIMIZE_FOR_SIZE=y
9# CONFIG_SYSCTL_SYSCALL is not set 10# CONFIG_SYSCTL_SYSCALL is not set
10# CONFIG_BASE_FULL is not set 11# CONFIG_BASE_FULL is not set
11# CONFIG_COMPAT_BRK is not set 12# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index 1692beeb7ed3..6f45681196d1 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12# CONFIG_COMPAT_BRK is not set 13# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 8b670a6530bf..3befab966827 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12CONFIG_MODULES=y 13CONFIG_MODULES=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 5a51f2e7ffb9..1bee51f22154 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -7,6 +7,7 @@ CONFIG_BSD_PROCESS_ACCT_V3=y
7CONFIG_LOG_BUF_SHIFT=14 7CONFIG_LOG_BUF_SHIFT=14
8CONFIG_SYSFS_DEPRECATED_V2=y 8CONFIG_SYSFS_DEPRECATED_V2=y
9CONFIG_BLK_DEV_INITRD=y 9CONFIG_BLK_DEV_INITRD=y
10CONFIG_CC_OPTIMIZE_FOR_SIZE=y
10# CONFIG_SYSCTL_SYSCALL is not set 11# CONFIG_SYSCTL_SYSCALL is not set
11# CONFIG_BASE_FULL is not set 12# CONFIG_BASE_FULL is not set
12# CONFIG_COMPAT_BRK is not set 13# CONFIG_COMPAT_BRK is not set
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h
index 49a88f5a9d2f..108502bc6770 100644
--- a/arch/avr32/include/asm/processor.h
+++ b/arch/avr32/include/asm/processor.h
@@ -131,7 +131,6 @@ struct thread_struct {
131 */ 131 */
132#define start_thread(regs, new_pc, new_sp) \ 132#define start_thread(regs, new_pc, new_sp) \
133 do { \ 133 do { \
134 set_fs(USER_DS); \
135 memset(regs, 0, sizeof(*regs)); \ 134 memset(regs, 0, sizeof(*regs)); \
136 regs->sr = MODE_USER; \ 135 regs->sr = MODE_USER; \
137 regs->pc = new_pc & ~1; \ 136 regs->pc = new_pc & ~1; \
diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c
index aa677e2a3823..7fbf0dcb9afe 100644
--- a/arch/avr32/mach-at32ap/at32ap700x.c
+++ b/arch/avr32/mach-at32ap/at32ap700x.c
@@ -1043,8 +1043,9 @@ void __init at32_map_usart(unsigned int hw_id, unsigned int line, int flags)
1043 data->regs = (void __iomem *)pdev->resource[0].start; 1043 data->regs = (void __iomem *)pdev->resource[0].start;
1044 } 1044 }
1045 1045
1046 pdev->id = line;
1046 pdata = pdev->dev.platform_data; 1047 pdata = pdev->dev.platform_data;
1047 pdata->num = portnr; 1048 pdata->num = line;
1048 at32_usarts[line] = pdev; 1049 at32_usarts[line] = pdev;
1049} 1050}
1050 1051
diff --git a/arch/avr32/mach-at32ap/include/mach/cpu.h b/arch/avr32/mach-at32ap/include/mach/cpu.h
index 9c96a130f3a8..8181293115e4 100644
--- a/arch/avr32/mach-at32ap/include/mach/cpu.h
+++ b/arch/avr32/mach-at32ap/include/mach/cpu.h
@@ -31,8 +31,20 @@
31#define cpu_is_at91sam9263() (0) 31#define cpu_is_at91sam9263() (0)
32#define cpu_is_at91sam9rl() (0) 32#define cpu_is_at91sam9rl() (0)
33#define cpu_is_at91cap9() (0) 33#define cpu_is_at91cap9() (0)
34#define cpu_is_at91cap9_revB() (0)
35#define cpu_is_at91cap9_revC() (0)
34#define cpu_is_at91sam9g10() (0) 36#define cpu_is_at91sam9g10() (0)
37#define cpu_is_at91sam9g20() (0)
35#define cpu_is_at91sam9g45() (0) 38#define cpu_is_at91sam9g45() (0)
36#define cpu_is_at91sam9g45es() (0) 39#define cpu_is_at91sam9g45es() (0)
40#define cpu_is_at91sam9m10() (0)
41#define cpu_is_at91sam9g46() (0)
42#define cpu_is_at91sam9m11() (0)
43#define cpu_is_at91sam9x5() (0)
44#define cpu_is_at91sam9g15() (0)
45#define cpu_is_at91sam9g35() (0)
46#define cpu_is_at91sam9x35() (0)
47#define cpu_is_at91sam9g25() (0)
48#define cpu_is_at91sam9x25() (0)
37 49
38#endif /* __ASM_ARCH_CPU_H */ 50#endif /* __ASM_ARCH_CPU_H */
diff --git a/arch/avr32/mach-at32ap/intc.c b/arch/avr32/mach-at32ap/intc.c
index 3e3646186c9f..c9ac2f8e8f64 100644
--- a/arch/avr32/mach-at32ap/intc.c
+++ b/arch/avr32/mach-at32ap/intc.c
@@ -167,14 +167,12 @@ static int intc_suspend(void)
167 return 0; 167 return 0;
168} 168}
169 169
170static int intc_resume(void) 170static void intc_resume(void)
171{ 171{
172 int i; 172 int i;
173 173
174 for (i = 0; i < 64; i++) 174 for (i = 0; i < 64; i++)
175 intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]); 175 intc_writel(&intc0, INTPR0 + 4 * i, intc0.saved_ipr[i]);
176
177 return 0;
178} 176}
179#else 177#else
180#define intc_suspend NULL 178#define intc_suspend NULL
diff --git a/arch/blackfin/configs/CM-BF548_defconfig b/arch/blackfin/configs/CM-BF548_defconfig
index 31d954216c05..9f1d08401fca 100644
--- a/arch/blackfin/configs/CM-BF548_defconfig
+++ b/arch/blackfin/configs/CM-BF548_defconfig
@@ -112,7 +112,7 @@ CONFIG_USB_G_SERIAL=m
112CONFIG_USB_G_PRINTER=m 112CONFIG_USB_G_PRINTER=m
113CONFIG_MMC=m 113CONFIG_MMC=m
114CONFIG_SDH_BFIN=m 114CONFIG_SDH_BFIN=m
115CONFIG_RTC_CLASS=m 115CONFIG_RTC_CLASS=y
116CONFIG_RTC_DRV_BFIN=m 116CONFIG_RTC_DRV_BFIN=m
117CONFIG_EXT2_FS=m 117CONFIG_EXT2_FS=m
118# CONFIG_DNOTIFY is not set 118# CONFIG_DNOTIFY is not set
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu
index fc98f9b9d4d2..b004dc1b1710 100644
--- a/arch/m68k/Kconfig.nommu
+++ b/arch/m68k/Kconfig.nommu
@@ -14,6 +14,33 @@ config GENERIC_CLOCKEVENTS
14 bool 14 bool
15 default n 15 default n
16 16
17config M68000
18 bool
19 help
20 The Freescale (was Motorola) 68000 CPU is the first generation of
21 the well known M68K family of processors. The CPU core as well as
22 being available as a stand alone CPU was also used in many
23 System-On-Chip devices (eg 68328, 68302, etc). It does not contain
24 a paging MMU.
25
26config MCPU32
27 bool
28 help
29 The Freescale (was then Motorola) CPU32 is a CPU core that is
30 based on the 68020 processor. For the most part it is used in
31 System-On-Chip parts, and does not contain a paging MMU.
32
33config COLDFIRE
34 bool
35 select GENERIC_GPIO
36 select ARCH_REQUIRE_GPIOLIB
37 help
38 The Freescale ColdFire family of processors is a modern derivitive
39 of the 68000 processor family. They are mainly targeted at embedded
40 applications, and are all System-On-Chip (SOC) devices, as opposed
41 to stand alone CPUs. They implement a subset of the original 68000
42 processor instruction set.
43
17config COLDFIRE_SW_A7 44config COLDFIRE_SW_A7
18 bool 45 bool
19 default n 46 default n
@@ -36,26 +63,31 @@ choice
36 63
37config M68328 64config M68328
38 bool "MC68328" 65 bool "MC68328"
66 select M68000
39 help 67 help
40 Motorola 68328 processor support. 68 Motorola 68328 processor support.
41 69
42config M68EZ328 70config M68EZ328
43 bool "MC68EZ328" 71 bool "MC68EZ328"
72 select M68000
44 help 73 help
45 Motorola 68EX328 processor support. 74 Motorola 68EX328 processor support.
46 75
47config M68VZ328 76config M68VZ328
48 bool "MC68VZ328" 77 bool "MC68VZ328"
78 select M68000
49 help 79 help
50 Motorola 68VZ328 processor support. 80 Motorola 68VZ328 processor support.
51 81
52config M68360 82config M68360
53 bool "MC68360" 83 bool "MC68360"
84 select MCPU32
54 help 85 help
55 Motorola 68360 processor support. 86 Motorola 68360 processor support.
56 87
57config M5206 88config M5206
58 bool "MCF5206" 89 bool "MCF5206"
90 select COLDFIRE
59 select COLDFIRE_SW_A7 91 select COLDFIRE_SW_A7
60 select HAVE_MBAR 92 select HAVE_MBAR
61 help 93 help
@@ -63,6 +95,7 @@ config M5206
63 95
64config M5206e 96config M5206e
65 bool "MCF5206e" 97 bool "MCF5206e"
98 select COLDFIRE
66 select COLDFIRE_SW_A7 99 select COLDFIRE_SW_A7
67 select HAVE_MBAR 100 select HAVE_MBAR
68 help 101 help
@@ -70,6 +103,7 @@ config M5206e
70 103
71config M520x 104config M520x
72 bool "MCF520x" 105 bool "MCF520x"
106 select COLDFIRE
73 select GENERIC_CLOCKEVENTS 107 select GENERIC_CLOCKEVENTS
74 select HAVE_CACHE_SPLIT 108 select HAVE_CACHE_SPLIT
75 help 109 help
@@ -77,6 +111,7 @@ config M520x
77 111
78config M523x 112config M523x
79 bool "MCF523x" 113 bool "MCF523x"
114 select COLDFIRE
80 select GENERIC_CLOCKEVENTS 115 select GENERIC_CLOCKEVENTS
81 select HAVE_CACHE_SPLIT 116 select HAVE_CACHE_SPLIT
82 select HAVE_IPSBAR 117 select HAVE_IPSBAR
@@ -85,6 +120,7 @@ config M523x
85 120
86config M5249 121config M5249
87 bool "MCF5249" 122 bool "MCF5249"
123 select COLDFIRE
88 select COLDFIRE_SW_A7 124 select COLDFIRE_SW_A7
89 select HAVE_MBAR 125 select HAVE_MBAR
90 help 126 help
@@ -92,6 +128,7 @@ config M5249
92 128
93config M5271 129config M5271
94 bool "MCF5271" 130 bool "MCF5271"
131 select COLDFIRE
95 select HAVE_CACHE_SPLIT 132 select HAVE_CACHE_SPLIT
96 select HAVE_IPSBAR 133 select HAVE_IPSBAR
97 help 134 help
@@ -99,6 +136,7 @@ config M5271
99 136
100config M5272 137config M5272
101 bool "MCF5272" 138 bool "MCF5272"
139 select COLDFIRE
102 select COLDFIRE_SW_A7 140 select COLDFIRE_SW_A7
103 select HAVE_MBAR 141 select HAVE_MBAR
104 help 142 help
@@ -106,6 +144,7 @@ config M5272
106 144
107config M5275 145config M5275
108 bool "MCF5275" 146 bool "MCF5275"
147 select COLDFIRE
109 select HAVE_CACHE_SPLIT 148 select HAVE_CACHE_SPLIT
110 select HAVE_IPSBAR 149 select HAVE_IPSBAR
111 help 150 help
@@ -113,6 +152,7 @@ config M5275
113 152
114config M528x 153config M528x
115 bool "MCF528x" 154 bool "MCF528x"
155 select COLDFIRE
116 select GENERIC_CLOCKEVENTS 156 select GENERIC_CLOCKEVENTS
117 select HAVE_CACHE_SPLIT 157 select HAVE_CACHE_SPLIT
118 select HAVE_IPSBAR 158 select HAVE_IPSBAR
@@ -121,6 +161,7 @@ config M528x
121 161
122config M5307 162config M5307
123 bool "MCF5307" 163 bool "MCF5307"
164 select COLDFIRE
124 select COLDFIRE_SW_A7 165 select COLDFIRE_SW_A7
125 select HAVE_CACHE_CB 166 select HAVE_CACHE_CB
126 select HAVE_MBAR 167 select HAVE_MBAR
@@ -129,12 +170,14 @@ config M5307
129 170
130config M532x 171config M532x
131 bool "MCF532x" 172 bool "MCF532x"
173 select COLDFIRE
132 select HAVE_CACHE_CB 174 select HAVE_CACHE_CB
133 help 175 help
134 Freescale (Motorola) ColdFire 532x processor support. 176 Freescale (Motorola) ColdFire 532x processor support.
135 177
136config M5407 178config M5407
137 bool "MCF5407" 179 bool "MCF5407"
180 select COLDFIRE
138 select COLDFIRE_SW_A7 181 select COLDFIRE_SW_A7
139 select HAVE_CACHE_CB 182 select HAVE_CACHE_CB
140 select HAVE_MBAR 183 select HAVE_MBAR
@@ -143,6 +186,7 @@ config M5407
143 186
144config M547x 187config M547x
145 bool "MCF547x" 188 bool "MCF547x"
189 select COLDFIRE
146 select HAVE_CACHE_CB 190 select HAVE_CACHE_CB
147 select HAVE_MBAR 191 select HAVE_MBAR
148 help 192 help
@@ -150,6 +194,7 @@ config M547x
150 194
151config M548x 195config M548x
152 bool "MCF548x" 196 bool "MCF548x"
197 select COLDFIRE
153 select HAVE_CACHE_CB 198 select HAVE_CACHE_CB
154 select HAVE_MBAR 199 select HAVE_MBAR
155 help 200 help
@@ -168,13 +213,6 @@ config M54xx
168 depends on (M548x || M547x) 213 depends on (M548x || M547x)
169 default y 214 default y
170 215
171config COLDFIRE
172 bool
173 depends on (M5206 || M5206e || M520x || M523x || M5249 || M527x || M5272 || M528x || M5307 || M532x || M5407 || M54xx)
174 select GENERIC_GPIO
175 select ARCH_REQUIRE_GPIOLIB
176 default y
177
178config CLOCK_SET 216config CLOCK_SET
179 bool "Enable setting the CPU clock frequency" 217 bool "Enable setting the CPU clock frequency"
180 default n 218 default n
diff --git a/arch/m68k/kernel/m68k_ksyms.c b/arch/m68k/kernel/m68k_ksyms.c
index 33f82769547c..1b7a14d1a000 100644
--- a/arch/m68k/kernel/m68k_ksyms.c
+++ b/arch/m68k/kernel/m68k_ksyms.c
@@ -14,8 +14,7 @@ EXPORT_SYMBOL(__ashrdi3);
14EXPORT_SYMBOL(__lshrdi3); 14EXPORT_SYMBOL(__lshrdi3);
15EXPORT_SYMBOL(__muldi3); 15EXPORT_SYMBOL(__muldi3);
16 16
17#if !defined(__mc68020__) && !defined(__mc68030__) && \ 17#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
18 !defined(__mc68040__) && !defined(__mc68060__) && !defined(__mcpu32__)
19/* 18/*
20 * Simpler 68k and ColdFire parts also need a few other gcc functions. 19 * Simpler 68k and ColdFire parts also need a few other gcc functions.
21 */ 20 */
diff --git a/arch/m68k/kernel/vmlinux.lds_no.S b/arch/m68k/kernel/vmlinux.lds_no.S
index f4d715cdca0e..7dc4087a9545 100644
--- a/arch/m68k/kernel/vmlinux.lds_no.S
+++ b/arch/m68k/kernel/vmlinux.lds_no.S
@@ -84,52 +84,52 @@ SECTIONS {
84 /* Kernel symbol table: Normal symbols */ 84 /* Kernel symbol table: Normal symbols */
85 . = ALIGN(4); 85 . = ALIGN(4);
86 __start___ksymtab = .; 86 __start___ksymtab = .;
87 *(__ksymtab) 87 *(SORT(___ksymtab+*))
88 __stop___ksymtab = .; 88 __stop___ksymtab = .;
89 89
90 /* Kernel symbol table: GPL-only symbols */ 90 /* Kernel symbol table: GPL-only symbols */
91 __start___ksymtab_gpl = .; 91 __start___ksymtab_gpl = .;
92 *(__ksymtab_gpl) 92 *(SORT(___ksymtab_gpl+*))
93 __stop___ksymtab_gpl = .; 93 __stop___ksymtab_gpl = .;
94 94
95 /* Kernel symbol table: Normal unused symbols */ 95 /* Kernel symbol table: Normal unused symbols */
96 __start___ksymtab_unused = .; 96 __start___ksymtab_unused = .;
97 *(__ksymtab_unused) 97 *(SORT(___ksymtab_unused+*))
98 __stop___ksymtab_unused = .; 98 __stop___ksymtab_unused = .;
99 99
100 /* Kernel symbol table: GPL-only unused symbols */ 100 /* Kernel symbol table: GPL-only unused symbols */
101 __start___ksymtab_unused_gpl = .; 101 __start___ksymtab_unused_gpl = .;
102 *(__ksymtab_unused_gpl) 102 *(SORT(___ksymtab_unused_gpl+*))
103 __stop___ksymtab_unused_gpl = .; 103 __stop___ksymtab_unused_gpl = .;
104 104
105 /* Kernel symbol table: GPL-future symbols */ 105 /* Kernel symbol table: GPL-future symbols */
106 __start___ksymtab_gpl_future = .; 106 __start___ksymtab_gpl_future = .;
107 *(__ksymtab_gpl_future) 107 *(SORT(___ksymtab_gpl_future+*))
108 __stop___ksymtab_gpl_future = .; 108 __stop___ksymtab_gpl_future = .;
109 109
110 /* Kernel symbol table: Normal symbols */ 110 /* Kernel symbol table: Normal symbols */
111 __start___kcrctab = .; 111 __start___kcrctab = .;
112 *(__kcrctab) 112 *(SORT(___kcrctab+*))
113 __stop___kcrctab = .; 113 __stop___kcrctab = .;
114 114
115 /* Kernel symbol table: GPL-only symbols */ 115 /* Kernel symbol table: GPL-only symbols */
116 __start___kcrctab_gpl = .; 116 __start___kcrctab_gpl = .;
117 *(__kcrctab_gpl) 117 *(SORT(___kcrctab_gpl+*))
118 __stop___kcrctab_gpl = .; 118 __stop___kcrctab_gpl = .;
119 119
120 /* Kernel symbol table: Normal unused symbols */ 120 /* Kernel symbol table: Normal unused symbols */
121 __start___kcrctab_unused = .; 121 __start___kcrctab_unused = .;
122 *(__kcrctab_unused) 122 *(SORT(___kcrctab_unused+*))
123 __stop___kcrctab_unused = .; 123 __stop___kcrctab_unused = .;
124 124
125 /* Kernel symbol table: GPL-only unused symbols */ 125 /* Kernel symbol table: GPL-only unused symbols */
126 __start___kcrctab_unused_gpl = .; 126 __start___kcrctab_unused_gpl = .;
127 *(__kcrctab_unused_gpl) 127 *(SORT(___kcrctab_unused_gpl+*))
128 __stop___kcrctab_unused_gpl = .; 128 __stop___kcrctab_unused_gpl = .;
129 129
130 /* Kernel symbol table: GPL-future symbols */ 130 /* Kernel symbol table: GPL-future symbols */
131 __start___kcrctab_gpl_future = .; 131 __start___kcrctab_gpl_future = .;
132 *(__kcrctab_gpl_future) 132 *(SORT(___kcrctab_gpl_future+*))
133 __stop___kcrctab_gpl_future = .; 133 __stop___kcrctab_gpl_future = .;
134 134
135 /* Kernel symbol table: strings */ 135 /* Kernel symbol table: strings */
diff --git a/arch/m68k/lib/memcpy.c b/arch/m68k/lib/memcpy.c
index 62182c81e91c..064889316974 100644
--- a/arch/m68k/lib/memcpy.c
+++ b/arch/m68k/lib/memcpy.c
@@ -34,8 +34,10 @@ void *memcpy(void *to, const void *from, size_t n)
34 if (temp) { 34 if (temp) {
35 long *lto = to; 35 long *lto = to;
36 const long *lfrom = from; 36 const long *lfrom = from;
37#if defined(__mc68020__) || defined(__mc68030__) || \ 37#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
38 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) 38 for (; temp; temp--)
39 *lto++ = *lfrom++;
40#else
39 asm volatile ( 41 asm volatile (
40 " movel %2,%3\n" 42 " movel %2,%3\n"
41 " andw #7,%3\n" 43 " andw #7,%3\n"
@@ -56,9 +58,6 @@ void *memcpy(void *to, const void *from, size_t n)
56 " jpl 4b" 58 " jpl 4b"
57 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1) 59 : "=a" (lfrom), "=a" (lto), "=d" (temp), "=&d" (temp1)
58 : "0" (lfrom), "1" (lto), "2" (temp)); 60 : "0" (lfrom), "1" (lto), "2" (temp));
59#else
60 for (; temp; temp--)
61 *lto++ = *lfrom++;
62#endif 61#endif
63 to = lto; 62 to = lto;
64 from = lfrom; 63 from = lfrom;
diff --git a/arch/m68k/lib/memset.c b/arch/m68k/lib/memset.c
index f649e6a2e644..8a7639f0a2fe 100644
--- a/arch/m68k/lib/memset.c
+++ b/arch/m68k/lib/memset.c
@@ -32,8 +32,10 @@ void *memset(void *s, int c, size_t count)
32 temp = count >> 2; 32 temp = count >> 2;
33 if (temp) { 33 if (temp) {
34 long *ls = s; 34 long *ls = s;
35#if defined(__mc68020__) || defined(__mc68030__) || \ 35#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
36 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__) 36 for (; temp; temp--)
37 *ls++ = c;
38#else
37 size_t temp1; 39 size_t temp1;
38 asm volatile ( 40 asm volatile (
39 " movel %1,%2\n" 41 " movel %1,%2\n"
@@ -55,9 +57,6 @@ void *memset(void *s, int c, size_t count)
55 " jpl 1b" 57 " jpl 1b"
56 : "=a" (ls), "=d" (temp), "=&d" (temp1) 58 : "=a" (ls), "=d" (temp), "=&d" (temp1)
57 : "d" (c), "0" (ls), "1" (temp)); 59 : "d" (c), "0" (ls), "1" (temp));
58#else
59 for (; temp; temp--)
60 *ls++ = c;
61#endif 60#endif
62 s = ls; 61 s = ls;
63 } 62 }
diff --git a/arch/m68k/lib/muldi3.c b/arch/m68k/lib/muldi3.c
index 079bafca073e..79e928a525d0 100644
--- a/arch/m68k/lib/muldi3.c
+++ b/arch/m68k/lib/muldi3.c
@@ -19,17 +19,7 @@ along with GNU CC; see the file COPYING. If not, write to
19the Free Software Foundation, 59 Temple Place - Suite 330, 19the Free Software Foundation, 59 Temple Place - Suite 330,
20Boston, MA 02111-1307, USA. */ 20Boston, MA 02111-1307, USA. */
21 21
22#if defined(__mc68020__) || defined(__mc68030__) || \ 22#if defined(CONFIG_M68000) || defined(CONFIG_COLDFIRE)
23 defined(__mc68040__) || defined(__mc68060__) || defined(__mcpu32__)
24
25#define umul_ppmm(w1, w0, u, v) \
26 __asm__ ("mulu%.l %3,%1:%0" \
27 : "=d" ((USItype)(w0)), \
28 "=d" ((USItype)(w1)) \
29 : "%0" ((USItype)(u)), \
30 "dmi" ((USItype)(v)))
31
32#else
33 23
34#define SI_TYPE_SIZE 32 24#define SI_TYPE_SIZE 32
35#define __BITS4 (SI_TYPE_SIZE / 4) 25#define __BITS4 (SI_TYPE_SIZE / 4)
@@ -61,6 +51,15 @@ Boston, MA 02111-1307, USA. */
61 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \ 51 (w0) = __ll_lowpart (__x1) * __ll_B + __ll_lowpart (__x0); \
62 } while (0) 52 } while (0)
63 53
54#else
55
56#define umul_ppmm(w1, w0, u, v) \
57 __asm__ ("mulu%.l %3,%1:%0" \
58 : "=d" ((USItype)(w0)), \
59 "=d" ((USItype)(w1)) \
60 : "%0" ((USItype)(u)), \
61 "dmi" ((USItype)(v)))
62
64#endif 63#endif
65 64
66#define __umulsidi3(u, v) \ 65#define __umulsidi3(u, v) \
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 37862b2ce363..807c97eed8a8 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -678,7 +678,7 @@ CONFIG_LEDS_TRIGGERS=y
678CONFIG_LEDS_TRIGGER_TIMER=y 678CONFIG_LEDS_TRIGGER_TIMER=y
679CONFIG_LEDS_TRIGGER_HEARTBEAT=y 679CONFIG_LEDS_TRIGGER_HEARTBEAT=y
680CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 680CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
681CONFIG_RTC_CLASS=m 681CONFIG_RTC_CLASS=y
682CONFIG_RTC_INTF_DEV_UIE_EMUL=y 682CONFIG_RTC_INTF_DEV_UIE_EMUL=y
683CONFIG_RTC_DRV_TEST=m 683CONFIG_RTC_DRV_TEST=m
684CONFIG_RTC_DRV_DS1307=m 684CONFIG_RTC_DRV_DS1307=m
diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c
index f03cb278828f..bd3e5e73826e 100644
--- a/arch/mn10300/kernel/traps.c
+++ b/arch/mn10300/kernel/traps.c
@@ -28,7 +28,7 @@
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/system.h> 30#include <asm/system.h>
31#include <asm/uaccess.h> 31#include <linux/uaccess.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/atomic.h> 33#include <asm/atomic.h>
34#include <asm/smp.h> 34#include <asm/smp.h>
@@ -156,7 +156,7 @@ int die_if_no_fixup(const char *str, struct pt_regs *regs,
156 156
157 case EXCEP_TRAP: 157 case EXCEP_TRAP:
158 case EXCEP_UNIMPINS: 158 case EXCEP_UNIMPINS:
159 if (get_user(opcode, (uint8_t __user *)regs->pc) != 0) 159 if (probe_kernel_read(&opcode, (u8 *)regs->pc, 1) < 0)
160 break; 160 break;
161 if (opcode == 0xff) { 161 if (opcode == 0xff) {
162 if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0)) 162 if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0))
diff --git a/arch/mn10300/kernel/vmlinux.lds.S b/arch/mn10300/kernel/vmlinux.lds.S
index 6f702a6ab395..13c4814c29f8 100644
--- a/arch/mn10300/kernel/vmlinux.lds.S
+++ b/arch/mn10300/kernel/vmlinux.lds.S
@@ -44,6 +44,7 @@ SECTIONS
44 RO_DATA(PAGE_SIZE) 44 RO_DATA(PAGE_SIZE)
45 45
46 /* writeable */ 46 /* writeable */
47 _sdata = .; /* Start of rw data section */
47 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE) 48 RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
48 _edata = .; 49 _edata = .;
49 50
diff --git a/arch/mn10300/mm/cache-dbg-flush-by-reg.S b/arch/mn10300/mm/cache-dbg-flush-by-reg.S
index 665919f2ab62..a775ea5d7cee 100644
--- a/arch/mn10300/mm/cache-dbg-flush-by-reg.S
+++ b/arch/mn10300/mm/cache-dbg-flush-by-reg.S
@@ -120,14 +120,14 @@ debugger_local_cache_flushinv_one:
120 # conditionally purge this line in all ways 120 # conditionally purge this line in all ways
121 mov d1,(L1_CACHE_WAYDISP*0,a0) 121 mov d1,(L1_CACHE_WAYDISP*0,a0)
122 122
123debugger_local_cache_flushinv_no_dcache: 123debugger_local_cache_flushinv_one_no_dcache:
124 # 124 #
125 # now try to flush the icache 125 # now try to flush the icache
126 # 126 #
127 mov CHCTR,a0 127 mov CHCTR,a0
128 movhu (a0),d0 128 movhu (a0),d0
129 btst CHCTR_ICEN,d0 129 btst CHCTR_ICEN,d0
130 beq mn10300_local_icache_inv_range_reg_end 130 beq debugger_local_cache_flushinv_one_end
131 131
132 LOCAL_CLI_SAVE(d1) 132 LOCAL_CLI_SAVE(d1)
133 133
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 3d80c3e9cf60..12da77ec0228 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -1,5 +1,4 @@
1addnote 1addnote
2dtc
3empty.c 2empty.c
4hack-coff 3hack-coff
5infblock.c 4infblock.c
diff --git a/arch/powerpc/boot/dtc-src/.gitignore b/arch/powerpc/boot/dtc-src/.gitignore
deleted file mode 100644
index a7c3f94e5e75..000000000000
--- a/arch/powerpc/boot/dtc-src/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
1dtc-lexer.lex.c
2dtc-parser.tab.c
3dtc-parser.tab.h
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 7f7e4a878602..22e719575c60 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -85,7 +85,7 @@ CONFIG_USB_OHCI_HCD=m
85CONFIG_USB_OHCI_HCD_PPC_OF_BE=y 85CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
86# CONFIG_USB_OHCI_HCD_PCI is not set 86# CONFIG_USB_OHCI_HCD_PCI is not set
87CONFIG_USB_STORAGE=m 87CONFIG_USB_STORAGE=m
88CONFIG_RTC_CLASS=m 88CONFIG_RTC_CLASS=y
89CONFIG_RTC_DRV_PCF8563=m 89CONFIG_RTC_DRV_PCF8563=m
90CONFIG_EXT2_FS=m 90CONFIG_EXT2_FS=m
91CONFIG_EXT3_FS=m 91CONFIG_EXT3_FS=m
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index 6472322bf13b..185c292b0f1c 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -141,7 +141,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y
141# CONFIG_USB_EHCI_HCD_PPC_OF is not set 141# CONFIG_USB_EHCI_HCD_PPC_OF is not set
142CONFIG_USB_OHCI_HCD=m 142CONFIG_USB_OHCI_HCD=m
143CONFIG_USB_STORAGE=m 143CONFIG_USB_STORAGE=m
144CONFIG_RTC_CLASS=m 144CONFIG_RTC_CLASS=y
145CONFIG_RTC_DRV_PS3=m 145CONFIG_RTC_DRV_PS3=m
146CONFIG_EXT2_FS=m 146CONFIG_EXT2_FS=m
147CONFIG_EXT3_FS=m 147CONFIG_EXT3_FS=m
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h
index d902abd33995..b1d2deceeedb 100644
--- a/arch/powerpc/include/asm/rio.h
+++ b/arch/powerpc/include/asm/rio.h
@@ -14,7 +14,7 @@
14#define ASM_PPC_RIO_H 14#define ASM_PPC_RIO_H
15 15
16extern void platform_rio_init(void); 16extern void platform_rio_init(void);
17#ifdef CONFIG_RAPIDIO 17#ifdef CONFIG_FSL_RIO
18extern int fsl_rio_mcheck_exception(struct pt_regs *); 18extern int fsl_rio_mcheck_exception(struct pt_regs *);
19#else 19#else
20static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; } 20static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; }
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 34d2722b9451..9fb933248ab6 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1979,7 +1979,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1979 .pvr_value = 0x80240000, 1979 .pvr_value = 0x80240000,
1980 .cpu_name = "e5500", 1980 .cpu_name = "e5500",
1981 .cpu_features = CPU_FTRS_E5500, 1981 .cpu_features = CPU_FTRS_E5500,
1982 .cpu_user_features = COMMON_USER_BOOKE, 1982 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
1983 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | 1983 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
1984 MMU_FTR_USE_TLBILX, 1984 MMU_FTR_USE_TLBILX,
1985 .icache_bsize = 64, 1985 .icache_bsize = 64,
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f2c906b1d8d3..8c3112a57cf2 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -82,11 +82,29 @@ static int __init early_parse_mem(char *p)
82} 82}
83early_param("mem", early_parse_mem); 83early_param("mem", early_parse_mem);
84 84
85/*
86 * overlaps_initrd - check for overlap with page aligned extension of
87 * initrd.
88 */
89static inline int overlaps_initrd(unsigned long start, unsigned long size)
90{
91#ifdef CONFIG_BLK_DEV_INITRD
92 if (!initrd_start)
93 return 0;
94
95 return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
96 start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
97#else
98 return 0;
99#endif
100}
101
85/** 102/**
86 * move_device_tree - move tree to an unused area, if needed. 103 * move_device_tree - move tree to an unused area, if needed.
87 * 104 *
88 * The device tree may be allocated beyond our memory limit, or inside the 105 * The device tree may be allocated beyond our memory limit, or inside the
89 * crash kernel region for kdump. If so, move it out of the way. 106 * crash kernel region for kdump, or within the page aligned range of initrd.
107 * If so, move it out of the way.
90 */ 108 */
91static void __init move_device_tree(void) 109static void __init move_device_tree(void)
92{ 110{
@@ -99,7 +117,8 @@ static void __init move_device_tree(void)
99 size = be32_to_cpu(initial_boot_params->totalsize); 117 size = be32_to_cpu(initial_boot_params->totalsize);
100 118
101 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || 119 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
102 overlaps_crashkernel(start, size)) { 120 overlaps_crashkernel(start, size) ||
121 overlaps_initrd(start, size)) {
103 p = __va(memblock_alloc(size, PAGE_SIZE)); 122 p = __va(memblock_alloc(size, PAGE_SIZE));
104 memcpy(p, initial_boot_params, size); 123 memcpy(p, initial_boot_params, size);
105 initial_boot_params = (struct boot_param_header *)p; 124 initial_boot_params = (struct boot_param_header *)p;
@@ -555,7 +574,9 @@ static void __init early_reserve_mem(void)
555#ifdef CONFIG_BLK_DEV_INITRD 574#ifdef CONFIG_BLK_DEV_INITRD
556 /* then reserve the initrd, if any */ 575 /* then reserve the initrd, if any */
557 if (initrd_start && (initrd_end > initrd_start)) 576 if (initrd_start && (initrd_end > initrd_start))
558 memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); 577 memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
578 _ALIGN_UP(initrd_end, PAGE_SIZE) -
579 _ALIGN_DOWN(initrd_start, PAGE_SIZE));
559#endif /* CONFIG_BLK_DEV_INITRD */ 580#endif /* CONFIG_BLK_DEV_INITRD */
560 581
561#ifdef CONFIG_PPC32 582#ifdef CONFIG_PPC32
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index d65b591e5556..5de0f254dbb5 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -223,21 +223,6 @@ void free_initmem(void)
223#undef FREESEC 223#undef FREESEC
224} 224}
225 225
226#ifdef CONFIG_BLK_DEV_INITRD
227void free_initrd_mem(unsigned long start, unsigned long end)
228{
229 if (start < end)
230 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
231 for (; start < end; start += PAGE_SIZE) {
232 ClearPageReserved(virt_to_page(start));
233 init_page_count(virt_to_page(start));
234 free_page(start);
235 totalram_pages++;
236 }
237}
238#endif
239
240
241#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ 226#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
242void setup_initial_memory_limit(phys_addr_t first_memblock_base, 227void setup_initial_memory_limit(phys_addr_t first_memblock_base,
243 phys_addr_t first_memblock_size) 228 phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 6374b2196a17..f6dbb4c20e64 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -99,20 +99,6 @@ void free_initmem(void)
99 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); 99 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
100} 100}
101 101
102#ifdef CONFIG_BLK_DEV_INITRD
103void free_initrd_mem(unsigned long start, unsigned long end)
104{
105 if (start < end)
106 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
107 for (; start < end; start += PAGE_SIZE) {
108 ClearPageReserved(virt_to_page(start));
109 init_page_count(virt_to_page(start));
110 free_page(start);
111 totalram_pages++;
112 }
113}
114#endif
115
116static void pgd_ctor(void *addr) 102static void pgd_ctor(void *addr)
117{ 103{
118 memset(addr, 0, PGD_TABLE_SIZE); 104 memset(addr, 0, PGD_TABLE_SIZE);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 57e545b84bf1..29d4dde65c45 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -382,6 +382,25 @@ void __init mem_init(void)
382 mem_init_done = 1; 382 mem_init_done = 1;
383} 383}
384 384
385#ifdef CONFIG_BLK_DEV_INITRD
386void __init free_initrd_mem(unsigned long start, unsigned long end)
387{
388 if (start >= end)
389 return;
390
391 start = _ALIGN_DOWN(start, PAGE_SIZE);
392 end = _ALIGN_UP(end, PAGE_SIZE);
393 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
394
395 for (; start < end; start += PAGE_SIZE) {
396 ClearPageReserved(virt_to_page(start));
397 init_page_count(virt_to_page(start));
398 free_page(start);
399 totalram_pages++;
400 }
401}
402#endif
403
385/* 404/*
386 * This is called when a page has been modified by the kernel. 405 * This is called when a page has been modified by the kernel.
387 * It just marks the page as not i-cache clean. We do the i-cache 406 * It just marks the page as not i-cache clean. We do the i-cache
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c
index 0608b1657da4..d917573cf1a8 100644
--- a/arch/powerpc/sysdev/fsl_lbc.c
+++ b/arch/powerpc/sysdev/fsl_lbc.c
@@ -196,9 +196,6 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl,
196 out_be32(&lbc->lteccr, LTECCR_CLEAR); 196 out_be32(&lbc->lteccr, LTECCR_CLEAR);
197 out_be32(&lbc->ltedr, LTEDR_ENABLE); 197 out_be32(&lbc->ltedr, LTEDR_ENABLE);
198 198
199 /* Enable interrupts for any detected events */
200 out_be32(&lbc->lteir, LTEIR_ENABLE);
201
202 /* Set the monitor timeout value to the maximum for erratum A001 */ 199 /* Set the monitor timeout value to the maximum for erratum A001 */
203 if (of_device_is_compatible(node, "fsl,elbc")) 200 if (of_device_is_compatible(node, "fsl,elbc"))
204 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); 201 clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS);
@@ -322,6 +319,9 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev)
322 goto err; 319 goto err;
323 } 320 }
324 321
322 /* Enable interrupts for any detected events */
323 out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE);
324
325 return 0; 325 return 0;
326 326
327err: 327err:
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 9fab2aa9c2c8..90d77bd078f5 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -89,6 +89,7 @@ config S390
89 select HAVE_GET_USER_PAGES_FAST 89 select HAVE_GET_USER_PAGES_FAST
90 select HAVE_ARCH_MUTEX_CPU_RELAX 90 select HAVE_ARCH_MUTEX_CPU_RELAX
91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5 91 select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
92 select HAVE_RCU_TABLE_FREE if SMP
92 select ARCH_INLINE_SPIN_TRYLOCK 93 select ARCH_INLINE_SPIN_TRYLOCK
93 select ARCH_INLINE_SPIN_TRYLOCK_BH 94 select ARCH_INLINE_SPIN_TRYLOCK_BH
94 select ARCH_INLINE_SPIN_LOCK 95 select ARCH_INLINE_SPIN_LOCK
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index f6314af3b354..38e71ebcd3c2 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -17,15 +17,15 @@
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19 19
20#define check_pgt_cache() do {} while (0)
21
22unsigned long *crst_table_alloc(struct mm_struct *); 20unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *); 21void crst_table_free(struct mm_struct *, unsigned long *);
24void crst_table_free_rcu(struct mm_struct *, unsigned long *);
25 22
26unsigned long *page_table_alloc(struct mm_struct *); 23unsigned long *page_table_alloc(struct mm_struct *);
27void page_table_free(struct mm_struct *, unsigned long *); 24void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mm_struct *, unsigned long *); 25#ifdef CONFIG_HAVE_RCU_TABLE_FREE
26void page_table_free_rcu(struct mmu_gather *, unsigned long *);
27void __tlb_remove_table(void *_table);
28#endif
29 29
30static inline void clear_table(unsigned long *s, unsigned long val, size_t n) 30static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
31{ 31{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index e4efacfe1b63..801fbe1d837d 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START;
293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 293 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
294 */ 294 */
295 295
296/* Page status table bits for virtualization */
297#define RCP_ACC_BITS 0xf000000000000000UL
298#define RCP_FP_BIT 0x0800000000000000UL
299#define RCP_PCL_BIT 0x0080000000000000UL
300#define RCP_HR_BIT 0x0040000000000000UL
301#define RCP_HC_BIT 0x0020000000000000UL
302#define RCP_GR_BIT 0x0004000000000000UL
303#define RCP_GC_BIT 0x0002000000000000UL
304
305/* User dirty / referenced bit for KVM's migration feature */
306#define KVM_UR_BIT 0x0000800000000000UL
307#define KVM_UC_BIT 0x0000400000000000UL
308
309#ifndef __s390x__ 296#ifndef __s390x__
310 297
311/* Bits in the segment table address-space-control-element */ 298/* Bits in the segment table address-space-control-element */
@@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START;
325#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 312#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
326#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV) 313#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
327 314
315/* Page status table bits for virtualization */
316#define RCP_ACC_BITS 0xf0000000UL
317#define RCP_FP_BIT 0x08000000UL
318#define RCP_PCL_BIT 0x00800000UL
319#define RCP_HR_BIT 0x00400000UL
320#define RCP_HC_BIT 0x00200000UL
321#define RCP_GR_BIT 0x00040000UL
322#define RCP_GC_BIT 0x00020000UL
323
324/* User dirty / referenced bit for KVM's migration feature */
325#define KVM_UR_BIT 0x00008000UL
326#define KVM_UC_BIT 0x00004000UL
327
328#else /* __s390x__ */ 328#else /* __s390x__ */
329 329
330/* Bits in the segment/region table address-space-control-element */ 330/* Bits in the segment/region table address-space-control-element */
@@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START;
367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 367#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 368#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
369 369
370/* Page status table bits for virtualization */
371#define RCP_ACC_BITS 0xf000000000000000UL
372#define RCP_FP_BIT 0x0800000000000000UL
373#define RCP_PCL_BIT 0x0080000000000000UL
374#define RCP_HR_BIT 0x0040000000000000UL
375#define RCP_HC_BIT 0x0020000000000000UL
376#define RCP_GR_BIT 0x0004000000000000UL
377#define RCP_GC_BIT 0x0002000000000000UL
378
379/* User dirty / referenced bit for KVM's migration feature */
380#define KVM_UR_BIT 0x0000800000000000UL
381#define KVM_UC_BIT 0x0000400000000000UL
382
370#endif /* __s390x__ */ 383#endif /* __s390x__ */
371 384
372/* 385/*
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 350e7ee5952d..15c97625df8d 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -139,110 +139,47 @@ struct slib {
139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q]; 139 struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
140} __attribute__ ((packed, aligned(2048))); 140} __attribute__ ((packed, aligned(2048)));
141 141
142/** 142#define SBAL_EFLAGS_LAST_ENTRY 0x40
143 * struct sbal_flags - storage block address list flags 143#define SBAL_EFLAGS_CONTIGUOUS 0x20
144 * @last: last entry 144#define SBAL_EFLAGS_FIRST_FRAG 0x04
145 * @cont: contiguous storage 145#define SBAL_EFLAGS_MIDDLE_FRAG 0x08
146 * @frag: fragmentation 146#define SBAL_EFLAGS_LAST_FRAG 0x0c
147 */ 147#define SBAL_EFLAGS_MASK 0x6f
148struct sbal_flags {
149 u8 : 1;
150 u8 last : 1;
151 u8 cont : 1;
152 u8 : 1;
153 u8 frag : 2;
154 u8 : 2;
155} __attribute__ ((packed));
156
157#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
158#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
159#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
160#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
161#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
162 148
163#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL 149#define SBAL_SFLAGS0_PCI_REQ 0x40
150#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
164 151
165/* Awesome OpenFCP extensions */ 152/* Awesome OpenFCP extensions */
166#define SBAL_FLAGS0_TYPE_STATUS 0x00UL 153#define SBAL_SFLAGS0_TYPE_STATUS 0x00
167#define SBAL_FLAGS0_TYPE_WRITE 0x08UL 154#define SBAL_SFLAGS0_TYPE_WRITE 0x08
168#define SBAL_FLAGS0_TYPE_READ 0x10UL 155#define SBAL_SFLAGS0_TYPE_READ 0x10
169#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL 156#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
170#define SBAL_FLAGS0_MORE_SBALS 0x04UL 157#define SBAL_SFLAGS0_MORE_SBALS 0x04
171#define SBAL_FLAGS0_COMMAND 0x02UL 158#define SBAL_SFLAGS0_COMMAND 0x02
172#define SBAL_FLAGS0_LAST_SBAL 0x00UL 159#define SBAL_SFLAGS0_LAST_SBAL 0x00
173#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND 160#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
174#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS 161#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
175#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND 162#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
176#define SBAL_FLAGS0_PCI 0x40
177
178/**
179 * struct sbal_sbalf_0 - sbal flags for sbale 0
180 * @pci: PCI indicator
181 * @cont: data continuation
182 * @sbtype: storage-block type (FCP)
183 */
184struct sbal_sbalf_0 {
185 u8 : 1;
186 u8 pci : 1;
187 u8 cont : 1;
188 u8 sbtype : 2;
189 u8 : 3;
190} __attribute__ ((packed));
191
192/**
193 * struct sbal_sbalf_1 - sbal flags for sbale 1
194 * @key: storage key
195 */
196struct sbal_sbalf_1 {
197 u8 : 4;
198 u8 key : 4;
199} __attribute__ ((packed));
200
201/**
202 * struct sbal_sbalf_14 - sbal flags for sbale 14
203 * @erridx: error index
204 */
205struct sbal_sbalf_14 {
206 u8 : 4;
207 u8 erridx : 4;
208} __attribute__ ((packed));
209
210/**
211 * struct sbal_sbalf_15 - sbal flags for sbale 15
212 * @reason: reason for error state
213 */
214struct sbal_sbalf_15 {
215 u8 reason;
216} __attribute__ ((packed));
217
218/**
219 * union sbal_sbalf - storage block address list flags
220 * @i0: sbalf0
221 * @i1: sbalf1
222 * @i14: sbalf14
223 * @i15: sblaf15
224 * @value: raw value
225 */
226union sbal_sbalf {
227 struct sbal_sbalf_0 i0;
228 struct sbal_sbalf_1 i1;
229 struct sbal_sbalf_14 i14;
230 struct sbal_sbalf_15 i15;
231 u8 value;
232};
233 163
234/** 164/**
235 * struct qdio_buffer_element - SBAL entry 165 * struct qdio_buffer_element - SBAL entry
236 * @flags: flags 166 * @eflags: SBAL entry flags
167 * @scount: SBAL count
168 * @sflags: whole SBAL flags
237 * @length: length 169 * @length: length
238 * @addr: address 170 * @addr: address
239*/ 171*/
240struct qdio_buffer_element { 172struct qdio_buffer_element {
241 u32 flags; 173 u8 eflags;
174 /* private: */
175 u8 res1;
176 /* public: */
177 u8 scount;
178 u8 sflags;
242 u32 length; 179 u32 length;
243#ifdef CONFIG_32BIT 180#ifdef CONFIG_32BIT
244 /* private: */ 181 /* private: */
245 void *reserved; 182 void *res2;
246 /* public: */ 183 /* public: */
247#endif 184#endif
248 void *addr; 185 void *addr;
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 77eee5477a52..c687a2c83462 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -26,67 +26,60 @@
26#include <linux/swap.h> 26#include <linux/swap.h>
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/pgalloc.h> 28#include <asm/pgalloc.h>
29#include <asm/smp.h>
30#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
31 30
32struct mmu_gather { 31struct mmu_gather {
33 struct mm_struct *mm; 32 struct mm_struct *mm;
33#ifdef CONFIG_HAVE_RCU_TABLE_FREE
34 struct mmu_table_batch *batch;
35#endif
34 unsigned int fullmm; 36 unsigned int fullmm;
35 unsigned int nr_ptes; 37 unsigned int need_flush;
36 unsigned int nr_pxds;
37 unsigned int max;
38 void **array;
39 void *local[8];
40}; 38};
41 39
42static inline void __tlb_alloc_page(struct mmu_gather *tlb) 40#ifdef CONFIG_HAVE_RCU_TABLE_FREE
43{ 41struct mmu_table_batch {
44 unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0); 42 struct rcu_head rcu;
43 unsigned int nr;
44 void *tables[0];
45};
45 46
46 if (addr) { 47#define MAX_TABLE_BATCH \
47 tlb->array = (void *) addr; 48 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
48 tlb->max = PAGE_SIZE / sizeof(void *); 49
49 } 50extern void tlb_table_flush(struct mmu_gather *tlb);
50} 51extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
52#endif
51 53
52static inline void tlb_gather_mmu(struct mmu_gather *tlb, 54static inline void tlb_gather_mmu(struct mmu_gather *tlb,
53 struct mm_struct *mm, 55 struct mm_struct *mm,
54 unsigned int full_mm_flush) 56 unsigned int full_mm_flush)
55{ 57{
56 tlb->mm = mm; 58 tlb->mm = mm;
57 tlb->max = ARRAY_SIZE(tlb->local);
58 tlb->array = tlb->local;
59 tlb->fullmm = full_mm_flush; 59 tlb->fullmm = full_mm_flush;
60 tlb->need_flush = 0;
61#ifdef CONFIG_HAVE_RCU_TABLE_FREE
62 tlb->batch = NULL;
63#endif
60 if (tlb->fullmm) 64 if (tlb->fullmm)
61 __tlb_flush_mm(mm); 65 __tlb_flush_mm(mm);
62 else
63 __tlb_alloc_page(tlb);
64 tlb->nr_ptes = 0;
65 tlb->nr_pxds = tlb->max;
66} 66}
67 67
68static inline void tlb_flush_mmu(struct mmu_gather *tlb) 68static inline void tlb_flush_mmu(struct mmu_gather *tlb)
69{ 69{
70 if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max)) 70 if (!tlb->need_flush)
71 __tlb_flush_mm(tlb->mm); 71 return;
72 while (tlb->nr_ptes > 0) 72 tlb->need_flush = 0;
73 page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]); 73 __tlb_flush_mm(tlb->mm);
74 while (tlb->nr_pxds < tlb->max) 74#ifdef CONFIG_HAVE_RCU_TABLE_FREE
75 crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]); 75 tlb_table_flush(tlb);
76#endif
76} 77}
77 78
78static inline void tlb_finish_mmu(struct mmu_gather *tlb, 79static inline void tlb_finish_mmu(struct mmu_gather *tlb,
79 unsigned long start, unsigned long end) 80 unsigned long start, unsigned long end)
80{ 81{
81 tlb_flush_mmu(tlb); 82 tlb_flush_mmu(tlb);
82
83 rcu_table_freelist_finish();
84
85 /* keep the page table cache within bounds */
86 check_pgt_cache();
87
88 if (tlb->array != tlb->local)
89 free_pages((unsigned long) tlb->array, 0);
90} 83}
91 84
92/* 85/*
@@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
112static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 105static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
113 unsigned long address) 106 unsigned long address)
114{ 107{
115 if (!tlb->fullmm) { 108#ifdef CONFIG_HAVE_RCU_TABLE_FREE
116 tlb->array[tlb->nr_ptes++] = pte; 109 if (!tlb->fullmm)
117 if (tlb->nr_ptes >= tlb->nr_pxds) 110 return page_table_free_rcu(tlb, (unsigned long *) pte);
118 tlb_flush_mmu(tlb); 111#endif
119 } else 112 page_table_free(tlb->mm, (unsigned long *) pte);
120 page_table_free(tlb->mm, (unsigned long *) pte);
121} 113}
122 114
123/* 115/*
@@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
133#ifdef __s390x__ 125#ifdef __s390x__
134 if (tlb->mm->context.asce_limit <= (1UL << 31)) 126 if (tlb->mm->context.asce_limit <= (1UL << 31))
135 return; 127 return;
136 if (!tlb->fullmm) { 128#ifdef CONFIG_HAVE_RCU_TABLE_FREE
137 tlb->array[--tlb->nr_pxds] = pmd; 129 if (!tlb->fullmm)
138 if (tlb->nr_ptes >= tlb->nr_pxds) 130 return tlb_remove_table(tlb, pmd);
139 tlb_flush_mmu(tlb); 131#endif
140 } else 132 crst_table_free(tlb->mm, (unsigned long *) pmd);
141 crst_table_free(tlb->mm, (unsigned long *) pmd);
142#endif 133#endif
143} 134}
144 135
@@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
155#ifdef __s390x__ 146#ifdef __s390x__
156 if (tlb->mm->context.asce_limit <= (1UL << 42)) 147 if (tlb->mm->context.asce_limit <= (1UL << 42))
157 return; 148 return;
158 if (!tlb->fullmm) { 149#ifdef CONFIG_HAVE_RCU_TABLE_FREE
159 tlb->array[--tlb->nr_pxds] = pud; 150 if (!tlb->fullmm)
160 if (tlb->nr_ptes >= tlb->nr_pxds) 151 return tlb_remove_table(tlb, pud);
161 tlb_flush_mmu(tlb); 152#endif
162 } else 153 crst_table_free(tlb->mm, (unsigned long *) pud);
163 crst_table_free(tlb->mm, (unsigned long *) pud);
164#endif 154#endif
165} 155}
166 156
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 30ca85cce314..67345ae7ce8d 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -731,6 +731,7 @@ static int __init kvm_s390_init(void)
731 } 731 }
732 memcpy(facilities, S390_lowcore.stfle_fac_list, 16); 732 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
733 facilities[0] &= 0xff00fff3f47c0000ULL; 733 facilities[0] &= 0xff00fff3f47c0000ULL;
734 facilities[1] &= 0x201c000000000000ULL;
734 return 0; 735 return 0;
735} 736}
736 737
diff --git a/arch/s390/kvm/sie64a.S b/arch/s390/kvm/sie64a.S
index ab0e041ac54c..5faa1b1b23fa 100644
--- a/arch/s390/kvm/sie64a.S
+++ b/arch/s390/kvm/sie64a.S
@@ -93,4 +93,6 @@ sie_err:
93 93
94 .section __ex_table,"a" 94 .section __ex_table,"a"
95 .quad sie_inst,sie_err 95 .quad sie_inst,sie_err
96 .quad sie_exit,sie_err
97 .quad sie_reenter,sie_err
96 .previous 98 .previous
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index b09763fe5da1..37a23c223705 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -24,94 +24,12 @@
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
26 26
27struct rcu_table_freelist {
28 struct rcu_head rcu;
29 struct mm_struct *mm;
30 unsigned int pgt_index;
31 unsigned int crst_index;
32 unsigned long *table[0];
33};
34
35#define RCU_FREELIST_SIZE \
36 ((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
37 / sizeof(unsigned long))
38
39static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
40
41static void __page_table_free(struct mm_struct *mm, unsigned long *table);
42
43static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
44{
45 struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
46 struct rcu_table_freelist *batch = *batchp;
47
48 if (batch)
49 return batch;
50 batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
51 if (batch) {
52 batch->mm = mm;
53 batch->pgt_index = 0;
54 batch->crst_index = RCU_FREELIST_SIZE;
55 *batchp = batch;
56 }
57 return batch;
58}
59
60static void rcu_table_freelist_callback(struct rcu_head *head)
61{
62 struct rcu_table_freelist *batch =
63 container_of(head, struct rcu_table_freelist, rcu);
64
65 while (batch->pgt_index > 0)
66 __page_table_free(batch->mm, batch->table[--batch->pgt_index]);
67 while (batch->crst_index < RCU_FREELIST_SIZE)
68 crst_table_free(batch->mm, batch->table[batch->crst_index++]);
69 free_page((unsigned long) batch);
70}
71
72void rcu_table_freelist_finish(void)
73{
74 struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
75 struct rcu_table_freelist *batch = *batchp;
76
77 if (!batch)
78 goto out;
79 call_rcu(&batch->rcu, rcu_table_freelist_callback);
80 *batchp = NULL;
81out:
82 put_cpu_var(rcu_table_freelist);
83}
84
85static void smp_sync(void *arg)
86{
87}
88
89#ifndef CONFIG_64BIT 27#ifndef CONFIG_64BIT
90#define ALLOC_ORDER 1 28#define ALLOC_ORDER 1
91#define TABLES_PER_PAGE 4 29#define FRAG_MASK 0x0f
92#define FRAG_MASK 15UL
93#define SECOND_HALVES 10UL
94
95void clear_table_pgstes(unsigned long *table)
96{
97 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
98 memset(table + 256, 0, PAGE_SIZE/4);
99 clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
100 memset(table + 768, 0, PAGE_SIZE/4);
101}
102
103#else 30#else
104#define ALLOC_ORDER 2 31#define ALLOC_ORDER 2
105#define TABLES_PER_PAGE 2 32#define FRAG_MASK 0x03
106#define FRAG_MASK 3UL
107#define SECOND_HALVES 2UL
108
109void clear_table_pgstes(unsigned long *table)
110{
111 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
112 memset(table + 256, 0, PAGE_SIZE/2);
113}
114
115#endif 33#endif
116 34
117unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; 35unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
@@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
140 free_pages((unsigned long) table, ALLOC_ORDER); 58 free_pages((unsigned long) table, ALLOC_ORDER);
141} 59}
142 60
143void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
144{
145 struct rcu_table_freelist *batch;
146
147 preempt_disable();
148 if (atomic_read(&mm->mm_users) < 2 &&
149 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
150 crst_table_free(mm, table);
151 goto out;
152 }
153 batch = rcu_table_freelist_get(mm);
154 if (!batch) {
155 smp_call_function(smp_sync, NULL, 1);
156 crst_table_free(mm, table);
157 goto out;
158 }
159 batch->table[--batch->crst_index] = table;
160 if (batch->pgt_index >= batch->crst_index)
161 rcu_table_freelist_finish();
162out:
163 preempt_enable();
164}
165
166#ifdef CONFIG_64BIT 61#ifdef CONFIG_64BIT
167int crst_table_upgrade(struct mm_struct *mm, unsigned long limit) 62int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
168{ 63{
@@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
238} 133}
239#endif 134#endif
240 135
136static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
137{
138 unsigned int old, new;
139
140 do {
141 old = atomic_read(v);
142 new = old ^ bits;
143 } while (atomic_cmpxchg(v, old, new) != old);
144 return new;
145}
146
241/* 147/*
242 * page table entry allocation/free routines. 148 * page table entry allocation/free routines.
243 */ 149 */
150#ifdef CONFIG_PGSTE
151static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
152{
153 struct page *page;
154 unsigned long *table;
155
156 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
157 if (!page)
158 return NULL;
159 pgtable_page_ctor(page);
160 atomic_set(&page->_mapcount, 3);
161 table = (unsigned long *) page_to_phys(page);
162 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
163 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
164 return table;
165}
166
167static inline void page_table_free_pgste(unsigned long *table)
168{
169 struct page *page;
170
171 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
172 pgtable_page_ctor(page);
173 atomic_set(&page->_mapcount, -1);
174 __free_page(page);
175}
176#endif
177
244unsigned long *page_table_alloc(struct mm_struct *mm) 178unsigned long *page_table_alloc(struct mm_struct *mm)
245{ 179{
246 struct page *page; 180 struct page *page;
247 unsigned long *table; 181 unsigned long *table;
248 unsigned long bits; 182 unsigned int mask, bit;
249 183
250 bits = (mm->context.has_pgste) ? 3UL : 1UL; 184#ifdef CONFIG_PGSTE
185 if (mm_has_pgste(mm))
186 return page_table_alloc_pgste(mm);
187#endif
188 /* Allocate fragments of a 4K page as 1K/2K page table */
251 spin_lock_bh(&mm->context.list_lock); 189 spin_lock_bh(&mm->context.list_lock);
252 page = NULL; 190 mask = FRAG_MASK;
253 if (!list_empty(&mm->context.pgtable_list)) { 191 if (!list_empty(&mm->context.pgtable_list)) {
254 page = list_first_entry(&mm->context.pgtable_list, 192 page = list_first_entry(&mm->context.pgtable_list,
255 struct page, lru); 193 struct page, lru);
256 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1)) 194 table = (unsigned long *) page_to_phys(page);
257 page = NULL; 195 mask = atomic_read(&page->_mapcount);
196 mask = mask | (mask >> 4);
258 } 197 }
259 if (!page) { 198 if ((mask & FRAG_MASK) == FRAG_MASK) {
260 spin_unlock_bh(&mm->context.list_lock); 199 spin_unlock_bh(&mm->context.list_lock);
261 page = alloc_page(GFP_KERNEL|__GFP_REPEAT); 200 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
262 if (!page) 201 if (!page)
263 return NULL; 202 return NULL;
264 pgtable_page_ctor(page); 203 pgtable_page_ctor(page);
265 page->flags &= ~FRAG_MASK; 204 atomic_set(&page->_mapcount, 1);
266 table = (unsigned long *) page_to_phys(page); 205 table = (unsigned long *) page_to_phys(page);
267 if (mm->context.has_pgste) 206 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
268 clear_table_pgstes(table);
269 else
270 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
271 spin_lock_bh(&mm->context.list_lock); 207 spin_lock_bh(&mm->context.list_lock);
272 list_add(&page->lru, &mm->context.pgtable_list); 208 list_add(&page->lru, &mm->context.pgtable_list);
209 } else {
210 for (bit = 1; mask & bit; bit <<= 1)
211 table += PTRS_PER_PTE;
212 mask = atomic_xor_bits(&page->_mapcount, bit);
213 if ((mask & FRAG_MASK) == FRAG_MASK)
214 list_del(&page->lru);
273 } 215 }
274 table = (unsigned long *) page_to_phys(page);
275 while (page->flags & bits) {
276 table += 256;
277 bits <<= 1;
278 }
279 page->flags |= bits;
280 if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
281 list_move_tail(&page->lru, &mm->context.pgtable_list);
282 spin_unlock_bh(&mm->context.list_lock); 216 spin_unlock_bh(&mm->context.list_lock);
283 return table; 217 return table;
284} 218}
285 219
286static void __page_table_free(struct mm_struct *mm, unsigned long *table) 220void page_table_free(struct mm_struct *mm, unsigned long *table)
287{ 221{
288 struct page *page; 222 struct page *page;
289 unsigned long bits; 223 unsigned int bit, mask;
290 224
291 bits = ((unsigned long) table) & 15; 225#ifdef CONFIG_PGSTE
292 table = (unsigned long *)(((unsigned long) table) ^ bits); 226 if (mm_has_pgste(mm))
227 return page_table_free_pgste(table);
228#endif
229 /* Free 1K/2K page table fragment of a 4K page */
293 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 230 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
294 page->flags ^= bits; 231 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
295 if (!(page->flags & FRAG_MASK)) { 232 spin_lock_bh(&mm->context.list_lock);
233 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
234 list_del(&page->lru);
235 mask = atomic_xor_bits(&page->_mapcount, bit);
236 if (mask & FRAG_MASK)
237 list_add(&page->lru, &mm->context.pgtable_list);
238 spin_unlock_bh(&mm->context.list_lock);
239 if (mask == 0) {
296 pgtable_page_dtor(page); 240 pgtable_page_dtor(page);
241 atomic_set(&page->_mapcount, -1);
297 __free_page(page); 242 __free_page(page);
298 } 243 }
299} 244}
300 245
301void page_table_free(struct mm_struct *mm, unsigned long *table) 246#ifdef CONFIG_HAVE_RCU_TABLE_FREE
247
248static void __page_table_free_rcu(void *table, unsigned bit)
302{ 249{
303 struct page *page; 250 struct page *page;
304 unsigned long bits;
305 251
306 bits = (mm->context.has_pgste) ? 3UL : 1UL; 252#ifdef CONFIG_PGSTE
307 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 253 if (bit == FRAG_MASK)
254 return page_table_free_pgste(table);
255#endif
256 /* Free 1K/2K page table fragment of a 4K page */
308 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 257 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
309 spin_lock_bh(&mm->context.list_lock); 258 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
310 page->flags ^= bits;
311 if (page->flags & FRAG_MASK) {
312 /* Page now has some free pgtable fragments. */
313 if (!list_empty(&page->lru))
314 list_move(&page->lru, &mm->context.pgtable_list);
315 page = NULL;
316 } else
317 /* All fragments of the 4K page have been freed. */
318 list_del(&page->lru);
319 spin_unlock_bh(&mm->context.list_lock);
320 if (page) {
321 pgtable_page_dtor(page); 259 pgtable_page_dtor(page);
260 atomic_set(&page->_mapcount, -1);
322 __free_page(page); 261 __free_page(page);
323 } 262 }
324} 263}
325 264
326void page_table_free_rcu(struct mm_struct *mm, unsigned long *table) 265void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
327{ 266{
328 struct rcu_table_freelist *batch; 267 struct mm_struct *mm;
329 struct page *page; 268 struct page *page;
330 unsigned long bits; 269 unsigned int bit, mask;
331 270
332 preempt_disable(); 271 mm = tlb->mm;
333 if (atomic_read(&mm->mm_users) < 2 && 272#ifdef CONFIG_PGSTE
334 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) { 273 if (mm_has_pgste(mm)) {
335 page_table_free(mm, table); 274 table = (unsigned long *) (__pa(table) | FRAG_MASK);
336 goto out; 275 tlb_remove_table(tlb, table);
337 } 276 return;
338 batch = rcu_table_freelist_get(mm);
339 if (!batch) {
340 smp_call_function(smp_sync, NULL, 1);
341 page_table_free(mm, table);
342 goto out;
343 } 277 }
344 bits = (mm->context.has_pgste) ? 3UL : 1UL; 278#endif
345 bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long); 279 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
346 page = pfn_to_page(__pa(table) >> PAGE_SHIFT); 280 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
347 spin_lock_bh(&mm->context.list_lock); 281 spin_lock_bh(&mm->context.list_lock);
348 /* Delayed freeing with rcu prevents reuse of pgtable fragments */ 282 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
349 list_del_init(&page->lru); 283 list_del(&page->lru);
284 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
285 if (mask & FRAG_MASK)
286 list_add_tail(&page->lru, &mm->context.pgtable_list);
350 spin_unlock_bh(&mm->context.list_lock); 287 spin_unlock_bh(&mm->context.list_lock);
351 table = (unsigned long *)(((unsigned long) table) | bits); 288 table = (unsigned long *) (__pa(table) | (bit << 4));
352 batch->table[batch->pgt_index++] = table; 289 tlb_remove_table(tlb, table);
353 if (batch->pgt_index >= batch->crst_index)
354 rcu_table_freelist_finish();
355out:
356 preempt_enable();
357} 290}
358 291
292void __tlb_remove_table(void *_table)
293{
294 void *table = (void *)((unsigned long) _table & PAGE_MASK);
295 unsigned type = (unsigned long) _table & ~PAGE_MASK;
296
297 if (type)
298 __page_table_free_rcu(table, type);
299 else
300 free_pages((unsigned long) table, ALLOC_ORDER);
301}
302
303#endif
304
359/* 305/*
360 * switch on pgstes for its userspace process (for kvm) 306 * switch on pgstes for its userspace process (for kvm)
361 */ 307 */
@@ -369,7 +315,7 @@ int s390_enable_sie(void)
369 return -EINVAL; 315 return -EINVAL;
370 316
371 /* Do we have pgstes? if yes, we are done */ 317 /* Do we have pgstes? if yes, we are done */
372 if (tsk->mm->context.has_pgste) 318 if (mm_has_pgste(tsk->mm))
373 return 0; 319 return 0;
374 320
375 /* lets check if we are allowed to replace the mm */ 321 /* lets check if we are allowed to replace the mm */
diff --git a/arch/sh/boards/mach-ecovec24/setup.c b/arch/sh/boards/mach-ecovec24/setup.c
index 3a32741cc0ac..513cb1a2e6c8 100644
--- a/arch/sh/boards/mach-ecovec24/setup.c
+++ b/arch/sh/boards/mach-ecovec24/setup.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/usb/r8a66597.h> 22#include <linux/usb/r8a66597.h>
23#include <linux/usb/renesas_usbhs.h>
23#include <linux/i2c.h> 24#include <linux/i2c.h>
24#include <linux/i2c/tsc2007.h> 25#include <linux/i2c/tsc2007.h>
25#include <linux/spi/spi.h> 26#include <linux/spi/spi.h>
@@ -232,6 +233,52 @@ static struct platform_device usb1_common_device = {
232 .resource = usb1_common_resources, 233 .resource = usb1_common_resources,
233}; 234};
234 235
236/*
237 * USBHS
238 */
239static int usbhs_get_id(struct platform_device *pdev)
240{
241 return gpio_get_value(GPIO_PTB3);
242}
243
244static struct renesas_usbhs_platform_info usbhs_info = {
245 .platform_callback = {
246 .get_id = usbhs_get_id,
247 },
248 .driver_param = {
249 .buswait_bwait = 4,
250 .detection_delay = 5,
251 },
252};
253
254static struct resource usbhs_resources[] = {
255 [0] = {
256 .start = 0xa4d90000,
257 .end = 0xa4d90124 - 1,
258 .flags = IORESOURCE_MEM,
259 },
260 [1] = {
261 .start = 66,
262 .end = 66,
263 .flags = IORESOURCE_IRQ,
264 },
265};
266
267static struct platform_device usbhs_device = {
268 .name = "renesas_usbhs",
269 .id = 1,
270 .dev = {
271 .dma_mask = NULL, /* not use dma */
272 .coherent_dma_mask = 0xffffffff,
273 .platform_data = &usbhs_info,
274 },
275 .num_resources = ARRAY_SIZE(usbhs_resources),
276 .resource = usbhs_resources,
277 .archdata = {
278 .hwblk_id = HWBLK_USB1,
279 },
280};
281
235/* LCDC */ 282/* LCDC */
236const static struct fb_videomode ecovec_lcd_modes[] = { 283const static struct fb_videomode ecovec_lcd_modes[] = {
237 { 284 {
@@ -897,6 +944,7 @@ static struct platform_device *ecovec_devices[] __initdata = {
897 &sh_eth_device, 944 &sh_eth_device,
898 &usb0_host_device, 945 &usb0_host_device,
899 &usb1_common_device, 946 &usb1_common_device,
947 &usbhs_device,
900 &lcdc_device, 948 &lcdc_device,
901 &ceu0_device, 949 &ceu0_device,
902 &ceu1_device, 950 &ceu1_device,
diff --git a/arch/sh/boot/compressed/Makefile b/arch/sh/boot/compressed/Makefile
index 780e083e4d17..23bc849d9c64 100644
--- a/arch/sh/boot/compressed/Makefile
+++ b/arch/sh/boot/compressed/Makefile
@@ -27,8 +27,6 @@ IMAGE_OFFSET := $(shell /bin/bash -c 'printf "0x%08x" \
27 $(CONFIG_BOOT_LINK_OFFSET)]') 27 $(CONFIG_BOOT_LINK_OFFSET)]')
28endif 28endif
29 29
30LIBGCC := $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
31
32ifeq ($(CONFIG_MCOUNT),y) 30ifeq ($(CONFIG_MCOUNT),y)
33ORIG_CFLAGS := $(KBUILD_CFLAGS) 31ORIG_CFLAGS := $(KBUILD_CFLAGS)
34KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) 32KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS))
@@ -37,7 +35,25 @@ endif
37LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \ 35LDFLAGS_vmlinux := --oformat $(ld-bfd) -Ttext $(IMAGE_OFFSET) -e startup \
38 -T $(obj)/../../kernel/vmlinux.lds 36 -T $(obj)/../../kernel/vmlinux.lds
39 37
40$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(LIBGCC) FORCE 38#
39# Pull in the necessary libgcc bits from the in-kernel implementation.
40#
41lib1funcs-$(CONFIG_SUPERH32) := ashiftrt.S ashldi3.c ashrsi3.S ashlsi3.S \
42 lshrsi3.S
43lib1funcs-obj := \
44 $(addsuffix .o, $(basename $(addprefix $(obj)/, $(lib1funcs-y))))
45
46lib1funcs-dir := $(srctree)/arch/$(SRCARCH)/lib
47ifeq ($(BITS),64)
48 lib1funcs-dir := $(addsuffix $(BITS), $(lib1funcs-dir))
49endif
50
51KBUILD_CFLAGS += -I$(lib1funcs-dir)
52
53$(addprefix $(obj)/,$(lib1funcs-y)): $(obj)/%: $(lib1funcs-dir)/% FORCE
54 $(call cmd,shipped)
55
56$(obj)/vmlinux: $(OBJECTS) $(obj)/piggy.o $(lib1funcs-obj) FORCE
41 $(call if_changed,ld) 57 $(call if_changed,ld)
42 @: 58 @:
43 59
diff --git a/arch/sh/configs/titan_defconfig b/arch/sh/configs/titan_defconfig
index 0f558914e760..e2cbd92d520b 100644
--- a/arch/sh/configs/titan_defconfig
+++ b/arch/sh/configs/titan_defconfig
@@ -227,7 +227,7 @@ CONFIG_USB_SERIAL=m
227CONFIG_USB_SERIAL_GENERIC=y 227CONFIG_USB_SERIAL_GENERIC=y
228CONFIG_USB_SERIAL_ARK3116=m 228CONFIG_USB_SERIAL_ARK3116=m
229CONFIG_USB_SERIAL_PL2303=m 229CONFIG_USB_SERIAL_PL2303=m
230CONFIG_RTC_CLASS=m 230CONFIG_RTC_CLASS=y
231CONFIG_RTC_DRV_SH=m 231CONFIG_RTC_DRV_SH=m
232CONFIG_EXT2_FS=y 232CONFIG_EXT2_FS=y
233CONFIG_EXT3_FS=y 233CONFIG_EXT3_FS=y
diff --git a/arch/sh/include/asm/cmpxchg-grb.h b/arch/sh/include/asm/cmpxchg-grb.h
index 4676bf57693a..f848dec9e483 100644
--- a/arch/sh/include/asm/cmpxchg-grb.h
+++ b/arch/sh/include/asm/cmpxchg-grb.h
@@ -15,8 +15,9 @@ static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val)
15 " mov.l %2, @%1 \n\t" /* store new value */ 15 " mov.l %2, @%1 \n\t" /* store new value */
16 "1: mov r1, r15 \n\t" /* LOGOUT */ 16 "1: mov r1, r15 \n\t" /* LOGOUT */
17 : "=&r" (retval), 17 : "=&r" (retval),
18 "+r" (m) 18 "+r" (m),
19 : "r" (val) 19 "+r" (val) /* inhibit r15 overloading */
20 :
20 : "memory", "r0", "r1"); 21 : "memory", "r0", "r1");
21 22
22 return retval; 23 return retval;
@@ -36,8 +37,9 @@ static inline unsigned long xchg_u8(volatile u8 *m, unsigned long val)
36 " mov.b %2, @%1 \n\t" /* store new value */ 37 " mov.b %2, @%1 \n\t" /* store new value */
37 "1: mov r1, r15 \n\t" /* LOGOUT */ 38 "1: mov r1, r15 \n\t" /* LOGOUT */
38 : "=&r" (retval), 39 : "=&r" (retval),
39 "+r" (m) 40 "+r" (m),
40 : "r" (val) 41 "+r" (val) /* inhibit r15 overloading */
42 :
41 : "memory" , "r0", "r1"); 43 : "memory" , "r0", "r1");
42 44
43 return retval; 45 return retval;
@@ -54,13 +56,14 @@ static inline unsigned long __cmpxchg_u32(volatile int *m, unsigned long old,
54 " nop \n\t" 56 " nop \n\t"
55 " mov r15, r1 \n\t" /* r1 = saved sp */ 57 " mov r15, r1 \n\t" /* r1 = saved sp */
56 " mov #-8, r15 \n\t" /* LOGIN */ 58 " mov #-8, r15 \n\t" /* LOGIN */
57 " mov.l @%1, %0 \n\t" /* load old value */ 59 " mov.l @%3, %0 \n\t" /* load old value */
58 " cmp/eq %0, %2 \n\t" 60 " cmp/eq %0, %1 \n\t"
59 " bf 1f \n\t" /* if not equal */ 61 " bf 1f \n\t" /* if not equal */
60 " mov.l %3, @%1 \n\t" /* store new value */ 62 " mov.l %2, @%3 \n\t" /* store new value */
61 "1: mov r1, r15 \n\t" /* LOGOUT */ 63 "1: mov r1, r15 \n\t" /* LOGOUT */
62 : "=&r" (retval) 64 : "=&r" (retval),
63 : "r" (m), "r" (old), "r" (new) 65 "+r" (old), "+r" (new) /* old or new can be r15 */
66 : "r" (m)
64 : "memory" , "r0", "r1", "t"); 67 : "memory" , "r0", "r1", "t");
65 68
66 return retval; 69 return retval;
diff --git a/arch/sh/include/asm/processor_64.h b/arch/sh/include/asm/processor_64.h
index 2a541ddb5a1b..e25c4c7d6b63 100644
--- a/arch/sh/include/asm/processor_64.h
+++ b/arch/sh/include/asm/processor_64.h
@@ -150,7 +150,6 @@ struct thread_struct {
150#define SR_USER (SR_MMU | SR_FD) 150#define SR_USER (SR_MMU | SR_FD)
151 151
152#define start_thread(_regs, new_pc, new_sp) \ 152#define start_thread(_regs, new_pc, new_sp) \
153 set_fs(USER_DS); \
154 _regs->sr = SR_USER; /* User mode. */ \ 153 _regs->sr = SR_USER; /* User mode. */ \
155 _regs->pc = new_pc - 4; /* Compensate syscall exit */ \ 154 _regs->pc = new_pc - 4; /* Compensate syscall exit */ \
156 _regs->pc |= 1; /* Set SHmedia ! */ \ 155 _regs->pc |= 1; /* Set SHmedia ! */ \
diff --git a/arch/sh/include/cpu-sh4/cpu/sh7724.h b/arch/sh/include/cpu-sh4/cpu/sh7724.h
index 3daef8ecbc63..cbc47e6bcab5 100644
--- a/arch/sh/include/cpu-sh4/cpu/sh7724.h
+++ b/arch/sh/include/cpu-sh4/cpu/sh7724.h
@@ -298,6 +298,14 @@ enum {
298 SHDMA_SLAVE_SCIF4_RX, 298 SHDMA_SLAVE_SCIF4_RX,
299 SHDMA_SLAVE_SCIF5_TX, 299 SHDMA_SLAVE_SCIF5_TX,
300 SHDMA_SLAVE_SCIF5_RX, 300 SHDMA_SLAVE_SCIF5_RX,
301 SHDMA_SLAVE_USB0D0_TX,
302 SHDMA_SLAVE_USB0D0_RX,
303 SHDMA_SLAVE_USB0D1_TX,
304 SHDMA_SLAVE_USB0D1_RX,
305 SHDMA_SLAVE_USB1D0_TX,
306 SHDMA_SLAVE_USB1D0_RX,
307 SHDMA_SLAVE_USB1D1_TX,
308 SHDMA_SLAVE_USB1D1_RX,
301 SHDMA_SLAVE_SDHI0_TX, 309 SHDMA_SLAVE_SDHI0_TX,
302 SHDMA_SLAVE_SDHI0_RX, 310 SHDMA_SLAVE_SDHI0_RX,
303 SHDMA_SLAVE_SDHI1_TX, 311 SHDMA_SLAVE_SDHI1_TX,
diff --git a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
index 0333fe9e3881..134a397b1918 100644
--- a/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/setup-sh7724.c
@@ -93,6 +93,46 @@ static const struct sh_dmae_slave_config sh7724_dmae_slaves[] = {
93 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 93 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT),
94 .mid_rid = 0x36, 94 .mid_rid = 0x36,
95 }, { 95 }, {
96 .slave_id = SHDMA_SLAVE_USB0D0_TX,
97 .addr = 0xA4D80100,
98 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
99 .mid_rid = 0x73,
100 }, {
101 .slave_id = SHDMA_SLAVE_USB0D0_RX,
102 .addr = 0xA4D80100,
103 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
104 .mid_rid = 0x73,
105 }, {
106 .slave_id = SHDMA_SLAVE_USB0D1_TX,
107 .addr = 0xA4D80120,
108 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
109 .mid_rid = 0x77,
110 }, {
111 .slave_id = SHDMA_SLAVE_USB0D1_RX,
112 .addr = 0xA4D80120,
113 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
114 .mid_rid = 0x77,
115 }, {
116 .slave_id = SHDMA_SLAVE_USB1D0_TX,
117 .addr = 0xA4D90100,
118 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
119 .mid_rid = 0xab,
120 }, {
121 .slave_id = SHDMA_SLAVE_USB1D0_RX,
122 .addr = 0xA4D90100,
123 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
124 .mid_rid = 0xab,
125 }, {
126 .slave_id = SHDMA_SLAVE_USB1D1_TX,
127 .addr = 0xA4D90120,
128 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
129 .mid_rid = 0xaf,
130 }, {
131 .slave_id = SHDMA_SLAVE_USB1D1_RX,
132 .addr = 0xA4D90120,
133 .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT),
134 .mid_rid = 0xaf,
135 }, {
96 .slave_id = SHDMA_SLAVE_SDHI0_TX, 136 .slave_id = SHDMA_SLAVE_SDHI0_TX,
97 .addr = 0x04ce0030, 137 .addr = 0x04ce0030,
98 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT), 138 .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_16BIT),
diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c
index b473f0c06fbc..aaf6d59c2012 100644
--- a/arch/sh/kernel/process_32.c
+++ b/arch/sh/kernel/process_32.c
@@ -102,8 +102,6 @@ EXPORT_SYMBOL(kernel_thread);
102void start_thread(struct pt_regs *regs, unsigned long new_pc, 102void start_thread(struct pt_regs *regs, unsigned long new_pc,
103 unsigned long new_sp) 103 unsigned long new_sp)
104{ 104{
105 set_fs(USER_DS);
106
107 regs->pr = 0; 105 regs->pr = 0;
108 regs->sr = SR_FD; 106 regs->sr = SR_FD;
109 regs->pc = new_pc; 107 regs->pc = new_pc;
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 52411462c409..115725198038 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -26,9 +26,9 @@ static int cache_seq_show(struct seq_file *file, void *iter)
26{ 26{
27 unsigned int cache_type = (unsigned int)file->private; 27 unsigned int cache_type = (unsigned int)file->private;
28 struct cache_info *cache; 28 struct cache_info *cache;
29 unsigned int waysize, way, cache_size; 29 unsigned int waysize, way;
30 unsigned long ccr, base; 30 unsigned long ccr;
31 static unsigned long addrstart = 0; 31 unsigned long addrstart = 0;
32 32
33 /* 33 /*
34 * Go uncached immediately so we don't skew the results any 34 * Go uncached immediately so we don't skew the results any
@@ -45,28 +45,13 @@ static int cache_seq_show(struct seq_file *file, void *iter)
45 } 45 }
46 46
47 if (cache_type == CACHE_TYPE_DCACHE) { 47 if (cache_type == CACHE_TYPE_DCACHE) {
48 base = CACHE_OC_ADDRESS_ARRAY; 48 addrstart = CACHE_OC_ADDRESS_ARRAY;
49 cache = &current_cpu_data.dcache; 49 cache = &current_cpu_data.dcache;
50 } else { 50 } else {
51 base = CACHE_IC_ADDRESS_ARRAY; 51 addrstart = CACHE_IC_ADDRESS_ARRAY;
52 cache = &current_cpu_data.icache; 52 cache = &current_cpu_data.icache;
53 } 53 }
54 54
55 /*
56 * Due to the amount of data written out (depending on the cache size),
57 * we may be iterated over multiple times. In this case, keep track of
58 * the entry position in addrstart, and rewind it when we've hit the
59 * end of the cache.
60 *
61 * Likewise, the same code is used for multiple caches, so care must
62 * be taken for bouncing addrstart back and forth so the appropriate
63 * cache is hit.
64 */
65 cache_size = cache->ways * cache->sets * cache->linesz;
66 if (((addrstart & 0xff000000) != base) ||
67 (addrstart & 0x00ffffff) > cache_size)
68 addrstart = base;
69
70 waysize = cache->sets; 55 waysize = cache->sets;
71 56
72 /* 57 /*
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index af32e17fa170..253986bd6bb6 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -26,7 +26,6 @@ config SPARC
26 select HAVE_DMA_API_DEBUG 26 select HAVE_DMA_API_DEBUG
27 select HAVE_ARCH_JUMP_LABEL 27 select HAVE_ARCH_JUMP_LABEL
28 select HAVE_GENERIC_HARDIRQS 28 select HAVE_GENERIC_HARDIRQS
29 select GENERIC_HARDIRQS_NO_DEPRECATED
30 select GENERIC_IRQ_SHOW 29 select GENERIC_IRQ_SHOW
31 select USE_GENERIC_SMP_HELPERS if SMP 30 select USE_GENERIC_SMP_HELPERS if SMP
32 31
@@ -528,6 +527,23 @@ config PCI_DOMAINS
528config PCI_SYSCALL 527config PCI_SYSCALL
529 def_bool PCI 528 def_bool PCI
530 529
530config PCIC_PCI
531 bool
532 depends on PCI && SPARC32 && !SPARC_LEON
533 default y
534
535config LEON_PCI
536 bool
537 depends on PCI && SPARC_LEON
538 default y
539
540config GRPCI2
541 bool "GRPCI2 Host Bridge Support"
542 depends on LEON_PCI
543 default y
544 help
545 Say Y here to include the GRPCI2 Host Bridge Driver.
546
531source "drivers/pci/Kconfig" 547source "drivers/pci/Kconfig"
532 548
533source "drivers/pcmcia/Kconfig" 549source "drivers/pcmcia/Kconfig"
diff --git a/arch/sparc/include/asm/floppy_32.h b/arch/sparc/include/asm/floppy_32.h
index 482c79e2a416..7440915e86d8 100644
--- a/arch/sparc/include/asm/floppy_32.h
+++ b/arch/sparc/include/asm/floppy_32.h
@@ -138,7 +138,7 @@ static unsigned char sun_82072_fd_inb(int port)
138 return sun_fdc->data_82072; 138 return sun_fdc->data_82072;
139 case 7: /* FD_DIR */ 139 case 7: /* FD_DIR */
140 return sun_read_dir(); 140 return sun_read_dir();
141 }; 141 }
142 panic("sun_82072_fd_inb: How did I get here?"); 142 panic("sun_82072_fd_inb: How did I get here?");
143} 143}
144 144
@@ -161,7 +161,7 @@ static void sun_82072_fd_outb(unsigned char value, int port)
161 case 4: /* FD_STATUS */ 161 case 4: /* FD_STATUS */
162 sun_fdc->status_82072 = value; 162 sun_fdc->status_82072 = value;
163 break; 163 break;
164 }; 164 }
165 return; 165 return;
166} 166}
167 167
@@ -186,7 +186,7 @@ static unsigned char sun_82077_fd_inb(int port)
186 return sun_fdc->data_82077; 186 return sun_fdc->data_82077;
187 case 7: /* FD_DIR */ 187 case 7: /* FD_DIR */
188 return sun_read_dir(); 188 return sun_read_dir();
189 }; 189 }
190 panic("sun_82077_fd_inb: How did I get here?"); 190 panic("sun_82077_fd_inb: How did I get here?");
191} 191}
192 192
@@ -212,7 +212,7 @@ static void sun_82077_fd_outb(unsigned char value, int port)
212 case 3: /* FD_TDR */ 212 case 3: /* FD_TDR */
213 sun_fdc->tapectl_82077 = value; 213 sun_fdc->tapectl_82077 = value;
214 break; 214 break;
215 }; 215 }
216 return; 216 return;
217} 217}
218 218
diff --git a/arch/sparc/include/asm/floppy_64.h b/arch/sparc/include/asm/floppy_64.h
index 6597ce874d78..bcef1f5a2a6d 100644
--- a/arch/sparc/include/asm/floppy_64.h
+++ b/arch/sparc/include/asm/floppy_64.h
@@ -111,7 +111,7 @@ static unsigned char sun_82077_fd_inb(unsigned long port)
111 case 7: /* FD_DIR */ 111 case 7: /* FD_DIR */
112 /* XXX: Is DCL on 0x80 in sun4m? */ 112 /* XXX: Is DCL on 0x80 in sun4m? */
113 return sbus_readb(&sun_fdc->dir_82077); 113 return sbus_readb(&sun_fdc->dir_82077);
114 }; 114 }
115 panic("sun_82072_fd_inb: How did I get here?"); 115 panic("sun_82072_fd_inb: How did I get here?");
116} 116}
117 117
@@ -135,7 +135,7 @@ static void sun_82077_fd_outb(unsigned char value, unsigned long port)
135 case 4: /* FD_STATUS */ 135 case 4: /* FD_STATUS */
136 sbus_writeb(value, &sun_fdc->status_82077); 136 sbus_writeb(value, &sun_fdc->status_82077);
137 break; 137 break;
138 }; 138 }
139 return; 139 return;
140} 140}
141 141
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 6bdaf1e43d2a..a4e457f003ed 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -318,6 +318,9 @@ struct device_node;
318extern unsigned int leon_build_device_irq(unsigned int real_irq, 318extern unsigned int leon_build_device_irq(unsigned int real_irq,
319 irq_flow_handler_t flow_handler, 319 irq_flow_handler_t flow_handler,
320 const char *name, int do_ack); 320 const char *name, int do_ack);
321extern void leon_update_virq_handling(unsigned int virq,
322 irq_flow_handler_t flow_handler,
323 const char *name, int do_ack);
321extern void leon_clear_clock_irq(void); 324extern void leon_clear_clock_irq(void);
322extern void leon_load_profile_irq(int cpu, unsigned int limit); 325extern void leon_load_profile_irq(int cpu, unsigned int limit);
323extern void leon_init_timers(irq_handler_t counter_fn); 326extern void leon_init_timers(irq_handler_t counter_fn);
diff --git a/arch/sparc/include/asm/leon_pci.h b/arch/sparc/include/asm/leon_pci.h
new file mode 100644
index 000000000000..42b4b31a82fe
--- /dev/null
+++ b/arch/sparc/include/asm/leon_pci.h
@@ -0,0 +1,21 @@
1/*
2 * asm/leon_pci.h
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 */
6
7#ifndef _ASM_LEON_PCI_H_
8#define _ASM_LEON_PCI_H_
9
10/* PCI related definitions */
11struct leon_pci_info {
12 struct pci_ops *ops;
13 struct resource io_space;
14 struct resource mem_space;
15 int (*map_irq)(struct pci_dev *dev, u8 slot, u8 pin);
16};
17
18extern void leon_pci_init(struct platform_device *ofdev,
19 struct leon_pci_info *info);
20
21#endif /* _ASM_LEON_PCI_H_ */
diff --git a/arch/sparc/include/asm/pci_32.h b/arch/sparc/include/asm/pci_32.h
index 332ac9ab36bc..862e3ce92b15 100644
--- a/arch/sparc/include/asm/pci_32.h
+++ b/arch/sparc/include/asm/pci_32.h
@@ -47,7 +47,31 @@ extern struct device_node *pci_device_to_OF_node(struct pci_dev *pdev);
47 47
48#endif /* __KERNEL__ */ 48#endif /* __KERNEL__ */
49 49
50#ifndef CONFIG_LEON_PCI
50/* generic pci stuff */ 51/* generic pci stuff */
51#include <asm-generic/pci.h> 52#include <asm-generic/pci.h>
53#else
54/*
55 * On LEON PCI Memory space is mapped 1:1 with physical address space.
56 *
57 * I/O space is located at low 64Kbytes in PCI I/O space. The I/O addresses
58 * are converted into CPU addresses to virtual addresses that are mapped with
59 * MMU to the PCI Host PCI I/O space window which are translated to the low
60 * 64Kbytes by the Host controller.
61 */
62
63extern void
64pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
65 struct resource *res);
66
67extern void
68pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
69 struct pci_bus_region *region);
70
71static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
72{
73 return PCI_IRQ_NONE;
74}
75#endif
52 76
53#endif /* __SPARC_PCI_H */ 77#endif /* __SPARC_PCI_H */
diff --git a/arch/sparc/include/asm/pcic.h b/arch/sparc/include/asm/pcic.h
index 7eb5d78f5211..6676cbcc8b6a 100644
--- a/arch/sparc/include/asm/pcic.h
+++ b/arch/sparc/include/asm/pcic.h
@@ -29,7 +29,7 @@ struct linux_pcic {
29 int pcic_imdim; 29 int pcic_imdim;
30}; 30};
31 31
32#ifdef CONFIG_PCI 32#ifdef CONFIG_PCIC_PCI
33extern int pcic_present(void); 33extern int pcic_present(void);
34extern int pcic_probe(void); 34extern int pcic_probe(void);
35extern void pci_time_init(void); 35extern void pci_time_init(void);
diff --git a/arch/sparc/include/asm/system_32.h b/arch/sparc/include/asm/system_32.h
index 47a7e862474e..aba16092a81b 100644
--- a/arch/sparc/include/asm/system_32.h
+++ b/arch/sparc/include/asm/system_32.h
@@ -220,7 +220,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
220 switch (size) { 220 switch (size) {
221 case 4: 221 case 4:
222 return xchg_u32(ptr, x); 222 return xchg_u32(ptr, x);
223 }; 223 }
224 __xchg_called_with_bad_pointer(); 224 __xchg_called_with_bad_pointer();
225 return x; 225 return x;
226} 226}
diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h
index 3c96d3bb9f15..10bcabce97b2 100644
--- a/arch/sparc/include/asm/system_64.h
+++ b/arch/sparc/include/asm/system_64.h
@@ -234,7 +234,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
234 return xchg32(ptr, x); 234 return xchg32(ptr, x);
235 case 8: 235 case 8:
236 return xchg64(ptr, x); 236 return xchg64(ptr, x);
237 }; 237 }
238 __xchg_called_with_bad_pointer(); 238 __xchg_called_with_bad_pointer();
239 return x; 239 return x;
240} 240}
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 9cff2709a96d..b90b4a1d070a 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -73,7 +73,9 @@ obj-$(CONFIG_SPARC64_SMP) += cpumap.o
73 73
74obj-y += dma.o 74obj-y += dma.o
75 75
76obj-$(CONFIG_SPARC32_PCI) += pcic.o 76obj-$(CONFIG_PCIC_PCI) += pcic.o
77obj-$(CONFIG_LEON_PCI) += leon_pci.o
78obj-$(CONFIG_GRPCI2) += leon_pci_grpci2.o
77 79
78obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o 80obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o
79obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o 81obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index 1e34f29e58bb..caef9deb5866 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -123,7 +123,7 @@ static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg)
123 123
124 default: 124 default:
125 return -EINVAL; 125 return -EINVAL;
126 }; 126 }
127 127
128 return 0; 128 return 0;
129} 129}
diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c
index 8505e0ac78ba..acf5151f3c1d 100644
--- a/arch/sparc/kernel/auxio_32.c
+++ b/arch/sparc/kernel/auxio_32.c
@@ -101,7 +101,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off)
101 break; 101 break;
102 default: 102 default:
103 panic("Can't set AUXIO register on this machine."); 103 panic("Can't set AUXIO register on this machine.");
104 }; 104 }
105 spin_unlock_irqrestore(&auxio_lock, flags); 105 spin_unlock_irqrestore(&auxio_lock, flags);
106} 106}
107EXPORT_SYMBOL(set_auxio); 107EXPORT_SYMBOL(set_auxio);
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 668c7be5d365..5f450260981d 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -664,7 +664,7 @@ static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 va
664 case 0x0: 664 case 0x0:
665 bp->interleave = 16; 665 bp->interleave = 16;
666 break; 666 break;
667 }; 667 }
668 668
669 /* UK[10] is reserved, and UK[11] is not set for the SDRAM 669 /* UK[10] is reserved, and UK[11] is not set for the SDRAM
670 * bank size definition. 670 * bank size definition.
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 8341963f4c84..9fe08a1ea6c6 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -229,7 +229,7 @@ real_irq_entry:
229#ifdef CONFIG_SMP 229#ifdef CONFIG_SMP
230 .globl patchme_maybe_smp_msg 230 .globl patchme_maybe_smp_msg
231 231
232 cmp %l7, 12 232 cmp %l7, 11
233patchme_maybe_smp_msg: 233patchme_maybe_smp_msg:
234 bgu maybe_smp4m_msg 234 bgu maybe_smp4m_msg
235 nop 235 nop
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
293 WRITE_PAUSE 293 WRITE_PAUSE
294 wr %l4, PSR_ET, %psr 294 wr %l4, PSR_ET, %psr
295 WRITE_PAUSE 295 WRITE_PAUSE
296 sll %o2, 28, %o2 ! shift for simpler checks below 296 sll %o3, 28, %o2 ! shift for simpler checks below
297maybe_smp4m_msg_check_single: 297maybe_smp4m_msg_check_single:
298 andcc %o2, 0x1, %g0 298 andcc %o2, 0x1, %g0
299 beq,a maybe_smp4m_msg_check_mask 299 beq,a maybe_smp4m_msg_check_mask
@@ -1604,7 +1604,7 @@ restore_current:
1604 retl 1604 retl
1605 nop 1605 nop
1606 1606
1607#ifdef CONFIG_PCI 1607#ifdef CONFIG_PCIC_PCI
1608#include <asm/pcic.h> 1608#include <asm/pcic.h>
1609 1609
1610 .align 4 1610 .align 4
@@ -1650,7 +1650,7 @@ pcic_nmi_trap_patch:
1650 rd %psr, %l0 1650 rd %psr, %l0
1651 .word 0 1651 .word 0
1652 1652
1653#endif /* CONFIG_PCI */ 1653#endif /* CONFIG_PCIC_PCI */
1654 1654
1655 .globl flushw_all 1655 .globl flushw_all
1656flushw_all: 1656flushw_all:
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 2f538ac2e139..d17255a2bbac 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -236,6 +236,21 @@ static unsigned int _leon_build_device_irq(struct platform_device *op,
236 return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0); 236 return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0);
237} 237}
238 238
239void leon_update_virq_handling(unsigned int virq,
240 irq_flow_handler_t flow_handler,
241 const char *name, int do_ack)
242{
243 unsigned long mask = (unsigned long)irq_get_chip_data(virq);
244
245 mask &= ~LEON_DO_ACK_HW;
246 if (do_ack)
247 mask |= LEON_DO_ACK_HW;
248
249 irq_set_chip_and_handler_name(virq, &leon_irq,
250 flow_handler, name);
251 irq_set_chip_data(virq, (void *)mask);
252}
253
239void __init leon_init_timers(irq_handler_t counter_fn) 254void __init leon_init_timers(irq_handler_t counter_fn)
240{ 255{
241 int irq, eirq; 256 int irq, eirq;
@@ -361,6 +376,22 @@ void __init leon_init_timers(irq_handler_t counter_fn)
361 prom_halt(); 376 prom_halt();
362 } 377 }
363 378
379#ifdef CONFIG_SMP
380 {
381 unsigned long flags;
382
383 /*
384 * In SMP, sun4m adds a IPI handler to IRQ trap handler that
385 * LEON never must take, sun4d and LEON overwrites the branch
386 * with a NOP.
387 */
388 local_irq_save(flags);
389 patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */
390 local_flush_cache_all();
391 local_irq_restore(flags);
392 }
393#endif
394
364 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 395 LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
365 LEON3_GPTIMER_EN | 396 LEON3_GPTIMER_EN |
366 LEON3_GPTIMER_RL | 397 LEON3_GPTIMER_RL |
diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c
new file mode 100644
index 000000000000..a8a9a275037d
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci.c
@@ -0,0 +1,253 @@
1/*
2 * leon_pci.c: LEON Host PCI support
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 *
6 * Code is partially derived from pcic.c
7 */
8
9#include <linux/of_device.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <asm/leon.h>
13#include <asm/leon_pci.h>
14
15/* The LEON architecture does not rely on a BIOS or bootloader to setup
16 * PCI for us. The Linux generic routines are used to setup resources,
17 * reset values of confuration-space registers settings ae preseved.
18 */
19void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info)
20{
21 struct pci_bus *root_bus;
22
23 root_bus = pci_scan_bus_parented(&ofdev->dev, 0, info->ops, info);
24 if (root_bus) {
25 root_bus->resource[0] = &info->io_space;
26 root_bus->resource[1] = &info->mem_space;
27 root_bus->resource[2] = NULL;
28
29 /* Init all PCI devices into PCI tree */
30 pci_bus_add_devices(root_bus);
31
32 /* Setup IRQs of all devices using custom routines */
33 pci_fixup_irqs(pci_common_swizzle, info->map_irq);
34
35 /* Assign devices with resources */
36 pci_assign_unassigned_resources();
37 }
38}
39
40/* PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is
41 * accessed through a Window which is translated to low 64KB in PCI space, the
42 * first 4KB is not used so 60KB is available.
43 *
44 * This function is used by generic code to translate resource addresses into
45 * PCI addresses.
46 */
47void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
48 struct resource *res)
49{
50 struct leon_pci_info *info = dev->bus->sysdata;
51
52 region->start = res->start;
53 region->end = res->end;
54
55 if (res->flags & IORESOURCE_IO) {
56 region->start -= (info->io_space.start - 0x1000);
57 region->end -= (info->io_space.start - 0x1000);
58 }
59}
60EXPORT_SYMBOL(pcibios_resource_to_bus);
61
62/* see pcibios_resource_to_bus() comment */
63void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
64 struct pci_bus_region *region)
65{
66 struct leon_pci_info *info = dev->bus->sysdata;
67
68 res->start = region->start;
69 res->end = region->end;
70
71 if (res->flags & IORESOURCE_IO) {
72 res->start += (info->io_space.start - 0x1000);
73 res->end += (info->io_space.start - 0x1000);
74 }
75}
76EXPORT_SYMBOL(pcibios_bus_to_resource);
77
78void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
79{
80 struct leon_pci_info *info = pbus->sysdata;
81 struct pci_dev *dev;
82 int i, has_io, has_mem;
83 u16 cmd;
84
85 /* Generic PCI bus probing sets these to point at
86 * &io{port,mem}_resouce which is wrong for us.
87 */
88 if (pbus->self == NULL) {
89 pbus->resource[0] = &info->io_space;
90 pbus->resource[1] = &info->mem_space;
91 pbus->resource[2] = NULL;
92 }
93
94 list_for_each_entry(dev, &pbus->devices, bus_list) {
95 /*
96 * We can not rely on that the bootloader has enabled I/O
97 * or memory access to PCI devices. Instead we enable it here
98 * if the device has BARs of respective type.
99 */
100 has_io = has_mem = 0;
101 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
102 unsigned long f = dev->resource[i].flags;
103 if (f & IORESOURCE_IO)
104 has_io = 1;
105 else if (f & IORESOURCE_MEM)
106 has_mem = 1;
107 }
108 /* ROM BARs are mapped into 32-bit memory space */
109 if (dev->resource[PCI_ROM_RESOURCE].end != 0) {
110 dev->resource[PCI_ROM_RESOURCE].flags |=
111 IORESOURCE_ROM_ENABLE;
112 has_mem = 1;
113 }
114 pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd);
115 if (has_io && !(cmd & PCI_COMMAND_IO)) {
116#ifdef CONFIG_PCI_DEBUG
117 printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n",
118 pci_name(dev));
119#endif
120 cmd |= PCI_COMMAND_IO;
121 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
122 cmd);
123 }
124 if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) {
125#ifdef CONFIG_PCI_DEBUG
126 printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev"
127 "%s\n", pci_name(dev));
128#endif
129 cmd |= PCI_COMMAND_MEMORY;
130 pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND,
131 cmd);
132 }
133 }
134}
135
136/*
137 * Other archs parse arguments here.
138 */
139char * __devinit pcibios_setup(char *str)
140{
141 return str;
142}
143
144resource_size_t pcibios_align_resource(void *data, const struct resource *res,
145 resource_size_t size, resource_size_t align)
146{
147 return res->start;
148}
149
150int pcibios_enable_device(struct pci_dev *dev, int mask)
151{
152 return pci_enable_resources(dev, mask);
153}
154
155struct device_node *pci_device_to_OF_node(struct pci_dev *pdev)
156{
157 /*
158 * Currently the OpenBoot nodes are not connected with the PCI device,
159 * this is because the LEON PROM does not create PCI nodes. Eventually
160 * this will change and the same approach as pcic.c can be used to
161 * match PROM nodes with pci devices.
162 */
163 return NULL;
164}
165EXPORT_SYMBOL(pci_device_to_OF_node);
166
167void __devinit pcibios_update_irq(struct pci_dev *dev, int irq)
168{
169#ifdef CONFIG_PCI_DEBUG
170 printk(KERN_DEBUG "LEONPCI: Assigning IRQ %02d to %s\n", irq,
171 pci_name(dev));
172#endif
173 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
174}
175
176/* in/out routines taken from pcic.c
177 *
178 * This probably belongs here rather than ioport.c because
179 * we do not want this crud linked into SBus kernels.
180 * Also, think for a moment about likes of floppy.c that
181 * include architecture specific parts. They may want to redefine ins/outs.
182 *
183 * We do not use horrible macros here because we want to
184 * advance pointer by sizeof(size).
185 */
186void outsb(unsigned long addr, const void *src, unsigned long count)
187{
188 while (count) {
189 count -= 1;
190 outb(*(const char *)src, addr);
191 src += 1;
192 /* addr += 1; */
193 }
194}
195EXPORT_SYMBOL(outsb);
196
197void outsw(unsigned long addr, const void *src, unsigned long count)
198{
199 while (count) {
200 count -= 2;
201 outw(*(const short *)src, addr);
202 src += 2;
203 /* addr += 2; */
204 }
205}
206EXPORT_SYMBOL(outsw);
207
208void outsl(unsigned long addr, const void *src, unsigned long count)
209{
210 while (count) {
211 count -= 4;
212 outl(*(const long *)src, addr);
213 src += 4;
214 /* addr += 4; */
215 }
216}
217EXPORT_SYMBOL(outsl);
218
219void insb(unsigned long addr, void *dst, unsigned long count)
220{
221 while (count) {
222 count -= 1;
223 *(unsigned char *)dst = inb(addr);
224 dst += 1;
225 /* addr += 1; */
226 }
227}
228EXPORT_SYMBOL(insb);
229
230void insw(unsigned long addr, void *dst, unsigned long count)
231{
232 while (count) {
233 count -= 2;
234 *(unsigned short *)dst = inw(addr);
235 dst += 2;
236 /* addr += 2; */
237 }
238}
239EXPORT_SYMBOL(insw);
240
241void insl(unsigned long addr, void *dst, unsigned long count)
242{
243 while (count) {
244 count -= 4;
245 /*
246 * XXX I am sure we are in for an unaligned trap here.
247 */
248 *(unsigned long *)dst = inl(addr);
249 dst += 4;
250 /* addr += 4; */
251 }
252}
253EXPORT_SYMBOL(insl);
diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c
new file mode 100644
index 000000000000..44dc093ee33a
--- /dev/null
+++ b/arch/sparc/kernel/leon_pci_grpci2.c
@@ -0,0 +1,897 @@
1/*
2 * leon_pci_grpci2.c: GRPCI2 Host PCI driver
3 *
4 * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom
5 *
6 */
7
8#include <linux/of_device.h>
9#include <linux/kernel.h>
10#include <linux/pci.h>
11#include <linux/delay.h>
12#include <linux/module.h>
13#include <asm/io.h>
14#include <asm/leon.h>
15#include <asm/vaddrs.h>
16#include <asm/sections.h>
17#include <asm/leon_pci.h>
18
19#include "irq.h"
20
21struct grpci2_barcfg {
22 unsigned long pciadr; /* PCI Space Address */
23 unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */
24};
25
26/* Device Node Configuration options:
27 * - barcfgs : Custom Configuration of Host's 6 target BARs
28 * - irq_mask : Limit which PCI interrupts are enabled
29 * - do_reset : Force PCI Reset on startup
30 *
31 * barcfgs
32 * =======
33 *
34 * Optional custom Target BAR configuration (see struct grpci2_barcfg). All
35 * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes)
36 *
37 * -1 means not configured (let host driver do default setup).
38 *
39 * [i*2+0] = PCI Address of BAR[i] on target interface
40 * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address
41 *
42 *
43 * irq_mask
44 * ========
45 *
46 * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default
47 * all are enabled. Use this when PCI interrupt pins are floating on PCB.
48 * int, len=4.
49 * bit0 = PCI INTA#
50 * bit1 = PCI INTB#
51 * bit2 = PCI INTC#
52 * bit3 = PCI INTD#
53 *
54 *
55 * reset
56 * =====
57 *
58 * Force PCI reset on startup. int, len=4
59 */
60
61/* Enable Debugging Configuration Space Access */
62#undef GRPCI2_DEBUG_CFGACCESS
63
64/*
65 * GRPCI2 APB Register MAP
66 */
67struct grpci2_regs {
68 unsigned int ctrl; /* 0x00 Control */
69 unsigned int sts_cap; /* 0x04 Status / Capabilities */
70 int res1; /* 0x08 */
71 unsigned int io_map; /* 0x0C I/O Map address */
72 unsigned int dma_ctrl; /* 0x10 DMA */
73 unsigned int dma_bdbase; /* 0x14 DMA */
74 int res2[2]; /* 0x18 */
75 unsigned int bars[6]; /* 0x20 read-only PCI BARs */
76 int res3[2]; /* 0x38 */
77 unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */
78
79 /* PCI Trace Buffer Registers (OPTIONAL) */
80 unsigned int t_ctrl; /* 0x80 */
81 unsigned int t_cnt; /* 0x84 */
82 unsigned int t_adpat; /* 0x88 */
83 unsigned int t_admask; /* 0x8C */
84 unsigned int t_sigpat; /* 0x90 */
85 unsigned int t_sigmask; /* 0x94 */
86 unsigned int t_adstate; /* 0x98 */
87 unsigned int t_sigstate; /* 0x9C */
88};
89
90#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
91#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
92
93#define CTRL_BUS_BIT 16
94
95#define CTRL_RESET (1<<31)
96#define CTRL_SI (1<<27)
97#define CTRL_PE (1<<26)
98#define CTRL_EI (1<<25)
99#define CTRL_ER (1<<24)
100#define CTRL_BUS (0xff<<CTRL_BUS_BIT)
101#define CTRL_HOSTINT 0xf
102
103#define STS_HOST_BIT 31
104#define STS_MST_BIT 30
105#define STS_TAR_BIT 29
106#define STS_DMA_BIT 28
107#define STS_DI_BIT 27
108#define STS_HI_BIT 26
109#define STS_IRQMODE_BIT 24
110#define STS_TRACE_BIT 23
111#define STS_CFGERRVALID_BIT 20
112#define STS_CFGERR_BIT 19
113#define STS_INTTYPE_BIT 12
114#define STS_INTSTS_BIT 8
115#define STS_FDEPTH_BIT 2
116#define STS_FNUM_BIT 0
117
118#define STS_HOST (1<<STS_HOST_BIT)
119#define STS_MST (1<<STS_MST_BIT)
120#define STS_TAR (1<<STS_TAR_BIT)
121#define STS_DMA (1<<STS_DMA_BIT)
122#define STS_DI (1<<STS_DI_BIT)
123#define STS_HI (1<<STS_HI_BIT)
124#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT)
125#define STS_TRACE (1<<STS_TRACE_BIT)
126#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT)
127#define STS_CFGERR (1<<STS_CFGERR_BIT)
128#define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT)
129#define STS_INTSTS (0xf<<STS_INTSTS_BIT)
130#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT)
131#define STS_FNUM (0x3<<STS_FNUM_BIT)
132
133#define STS_ISYSERR (1<<17)
134#define STS_IDMA (1<<16)
135#define STS_IDMAERR (1<<15)
136#define STS_IMSTABRT (1<<14)
137#define STS_ITGTABRT (1<<13)
138#define STS_IPARERR (1<<12)
139
140#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR)
141
142struct grpci2_bd_chan {
143 unsigned int ctrl; /* 0x00 DMA Control */
144 unsigned int nchan; /* 0x04 Next DMA Channel Address */
145 unsigned int nbd; /* 0x08 Next Data Descriptor in chan */
146 unsigned int res; /* 0x0C Reserved */
147};
148
149#define BD_CHAN_EN 0x80000000
150#define BD_CHAN_TYPE 0x00300000
151#define BD_CHAN_BDCNT 0x0000ffff
152#define BD_CHAN_EN_BIT 31
153#define BD_CHAN_TYPE_BIT 20
154#define BD_CHAN_BDCNT_BIT 0
155
156struct grpci2_bd_data {
157 unsigned int ctrl; /* 0x00 DMA Data Control */
158 unsigned int pci_adr; /* 0x04 PCI Start Address */
159 unsigned int ahb_adr; /* 0x08 AHB Start address */
160 unsigned int next; /* 0x0C Next Data Descriptor in chan */
161};
162
163#define BD_DATA_EN 0x80000000
164#define BD_DATA_IE 0x40000000
165#define BD_DATA_DR 0x20000000
166#define BD_DATA_TYPE 0x00300000
167#define BD_DATA_ER 0x00080000
168#define BD_DATA_LEN 0x0000ffff
169#define BD_DATA_EN_BIT 31
170#define BD_DATA_IE_BIT 30
171#define BD_DATA_DR_BIT 29
172#define BD_DATA_TYPE_BIT 20
173#define BD_DATA_ER_BIT 19
174#define BD_DATA_LEN_BIT 0
175
176/* GRPCI2 Capability */
177struct grpci2_cap_first {
178 unsigned int ctrl;
179 unsigned int pci2ahb_map[6];
180 unsigned int ext2ahb_map;
181 unsigned int io_map;
182 unsigned int pcibar_size[6];
183};
184#define CAP9_CTRL_OFS 0
185#define CAP9_BAR_OFS 0x4
186#define CAP9_IOMAP_OFS 0x20
187#define CAP9_BARSIZE_OFS 0x24
188
189struct grpci2_priv {
190 struct leon_pci_info info; /* must be on top of this structure */
191 struct grpci2_regs *regs;
192 char irq;
193 char irq_mode; /* IRQ Mode from CAPSTS REG */
194 char bt_enabled;
195 char do_reset;
196 char irq_mask;
197 u32 pciid; /* PCI ID of Host */
198 unsigned char irq_map[4];
199
200 /* Virtual IRQ numbers */
201 unsigned int virq_err;
202 unsigned int virq_dma;
203
204 /* AHB PCI Windows */
205 unsigned long pci_area; /* MEMORY */
206 unsigned long pci_area_end;
207 unsigned long pci_io; /* I/O */
208 unsigned long pci_conf; /* CONFIGURATION */
209 unsigned long pci_conf_end;
210 unsigned long pci_io_va;
211
212 struct grpci2_barcfg tgtbars[6];
213};
214
215DEFINE_SPINLOCK(grpci2_dev_lock);
216struct grpci2_priv *grpci2priv;
217
218int grpci2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
219{
220 struct grpci2_priv *priv = dev->bus->sysdata;
221 int irq_group;
222
223 /* Use default IRQ decoding on PCI BUS0 according slot numbering */
224 irq_group = slot & 0x3;
225 pin = ((pin - 1) + irq_group) & 0x3;
226
227 return priv->irq_map[pin];
228}
229
230static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
231 unsigned int devfn, int where, u32 *val)
232{
233 unsigned int *pci_conf;
234 unsigned long flags;
235 u32 tmp;
236
237 if (where & 0x3)
238 return -EINVAL;
239
240 if (bus == 0 && PCI_SLOT(devfn) != 0)
241 devfn += (0x8 * 6);
242
243 /* Select bus */
244 spin_lock_irqsave(&grpci2_dev_lock, flags);
245 REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
246 (bus << 16));
247 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
248
249 /* clear old status */
250 REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
251
252 pci_conf = (unsigned int *) (priv->pci_conf |
253 (devfn << 8) | (where & 0xfc));
254 tmp = LEON3_BYPASS_LOAD_PA(pci_conf);
255
256 /* Wait until GRPCI2 signals that CFG access is done, it should be
257 * done instantaneously unless a DMA operation is ongoing...
258 */
259 while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
260 ;
261
262 if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) {
263 *val = 0xffffffff;
264 } else {
265 /* Bus always little endian (unaffected by byte-swapping) */
266 *val = flip_dword(tmp);
267 }
268
269 return 0;
270}
271
272static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus,
273 unsigned int devfn, int where, u32 *val)
274{
275 u32 v;
276 int ret;
277
278 if (where & 0x1)
279 return -EINVAL;
280 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
281 *val = 0xffff & (v >> (8 * (where & 0x3)));
282 return ret;
283}
284
285static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus,
286 unsigned int devfn, int where, u32 *val)
287{
288 u32 v;
289 int ret;
290
291 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
292 *val = 0xff & (v >> (8 * (where & 3)));
293
294 return ret;
295}
296
297static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
298 unsigned int devfn, int where, u32 val)
299{
300 unsigned int *pci_conf;
301 unsigned long flags;
302
303 if (where & 0x3)
304 return -EINVAL;
305
306 if (bus == 0 && PCI_SLOT(devfn) != 0)
307 devfn += (0x8 * 6);
308
309 /* Select bus */
310 spin_lock_irqsave(&grpci2_dev_lock, flags);
311 REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) |
312 (bus << 16));
313 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
314
315 /* clear old status */
316 REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID));
317
318 pci_conf = (unsigned int *) (priv->pci_conf |
319 (devfn << 8) | (where & 0xfc));
320 LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
321
322 /* Wait until GRPCI2 signals that CFG access is done, it should be
323 * done instantaneously unless a DMA operation is ongoing...
324 */
325 while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0)
326 ;
327
328 return 0;
329}
330
331static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus,
332 unsigned int devfn, int where, u32 val)
333{
334 int ret;
335 u32 v;
336
337 if (where & 0x1)
338 return -EINVAL;
339 ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v);
340 if (ret)
341 return ret;
342 v = (v & ~(0xffff << (8 * (where & 0x3)))) |
343 ((0xffff & val) << (8 * (where & 0x3)));
344 return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
345}
346
347static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus,
348 unsigned int devfn, int where, u32 val)
349{
350 int ret;
351 u32 v;
352
353 ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v);
354 if (ret != 0)
355 return ret;
356 v = (v & ~(0xff << (8 * (where & 0x3)))) |
357 ((0xff & val) << (8 * (where & 0x3)));
358 return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v);
359}
360
361/* Read from Configuration Space. When entering here the PCI layer has taken
362 * the pci_lock spinlock and IRQ is off.
363 */
364static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn,
365 int where, int size, u32 *val)
366{
367 struct grpci2_priv *priv = grpci2priv;
368 unsigned int busno = bus->number;
369 int ret;
370
371 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0)) {
372 *val = ~0;
373 return 0;
374 }
375
376 switch (size) {
377 case 1:
378 ret = grpci2_cfg_r8(priv, busno, devfn, where, val);
379 break;
380 case 2:
381 ret = grpci2_cfg_r16(priv, busno, devfn, where, val);
382 break;
383 case 4:
384 ret = grpci2_cfg_r32(priv, busno, devfn, where, val);
385 break;
386 default:
387 ret = -EINVAL;
388 break;
389 }
390
391#ifdef GRPCI2_DEBUG_CFGACCESS
392 printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x "
393 "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where,
394 *val, size);
395#endif
396
397 return ret;
398}
399
400/* Write to Configuration Space. When entering here the PCI layer has taken
401 * the pci_lock spinlock and IRQ is off.
402 */
403static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn,
404 int where, int size, u32 val)
405{
406 struct grpci2_priv *priv = grpci2priv;
407 unsigned int busno = bus->number;
408
409 if (PCI_SLOT(devfn) > 15 || (PCI_SLOT(devfn) == 0 && busno == 0))
410 return 0;
411
412#ifdef GRPCI2_DEBUG_CFGACCESS
413 printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d "
414 "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn),
415 where, size, val);
416#endif
417
418 switch (size) {
419 default:
420 return -EINVAL;
421 case 1:
422 return grpci2_cfg_w8(priv, busno, devfn, where, val);
423 case 2:
424 return grpci2_cfg_w16(priv, busno, devfn, where, val);
425 case 4:
426 return grpci2_cfg_w32(priv, busno, devfn, where, val);
427 }
428}
429
430static struct pci_ops grpci2_ops = {
431 .read = grpci2_read_config,
432 .write = grpci2_write_config,
433};
434
435/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration
436 * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller
437 * this is not needed and the standard IRQ controller can be used.
438 */
439
440static void grpci2_mask_irq(struct irq_data *data)
441{
442 unsigned long flags;
443 unsigned int irqidx;
444 struct grpci2_priv *priv = grpci2priv;
445
446 irqidx = (unsigned int)data->chip_data - 1;
447 if (irqidx > 3) /* only mask PCI interrupts here */
448 return;
449
450 spin_lock_irqsave(&grpci2_dev_lock, flags);
451 REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx));
452 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
453}
454
455static void grpci2_unmask_irq(struct irq_data *data)
456{
457 unsigned long flags;
458 unsigned int irqidx;
459 struct grpci2_priv *priv = grpci2priv;
460
461 irqidx = (unsigned int)data->chip_data - 1;
462 if (irqidx > 3) /* only unmask PCI interrupts here */
463 return;
464
465 spin_lock_irqsave(&grpci2_dev_lock, flags);
466 REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx));
467 spin_unlock_irqrestore(&grpci2_dev_lock, flags);
468}
469
470static unsigned int grpci2_startup_irq(struct irq_data *data)
471{
472 grpci2_unmask_irq(data);
473 return 0;
474}
475
476static void grpci2_shutdown_irq(struct irq_data *data)
477{
478 grpci2_mask_irq(data);
479}
480
481static struct irq_chip grpci2_irq = {
482 .name = "grpci2",
483 .irq_startup = grpci2_startup_irq,
484 .irq_shutdown = grpci2_shutdown_irq,
485 .irq_mask = grpci2_mask_irq,
486 .irq_unmask = grpci2_unmask_irq,
487};
488
489/* Handle one or multiple IRQs from the PCI core */
490static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc)
491{
492 struct grpci2_priv *priv = grpci2priv;
493 int i, ack = 0;
494 unsigned int ctrl, sts_cap, pci_ints;
495
496 ctrl = REGLOAD(priv->regs->ctrl);
497 sts_cap = REGLOAD(priv->regs->sts_cap);
498
499 /* Error Interrupt? */
500 if (sts_cap & STS_ERR_IRQ) {
501 generic_handle_irq(priv->virq_err);
502 ack = 1;
503 }
504
505 /* PCI Interrupt? */
506 pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT;
507 if (pci_ints) {
508 /* Call respective PCI Interrupt handler */
509 for (i = 0; i < 4; i++) {
510 if (pci_ints & (1 << i))
511 generic_handle_irq(priv->irq_map[i]);
512 }
513 ack = 1;
514 }
515
516 /*
517 * Decode DMA Interrupt only when shared with Err and PCI INTX#, when
518 * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they
519 * goes directly to DMA ISR.
520 */
521 if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) {
522 generic_handle_irq(priv->virq_dma);
523 ack = 1;
524 }
525
526 /*
527 * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ
528 * Controller, this must be done after IRQ sources have been handled to
529 * avoid double IRQ generation
530 */
531 if (ack)
532 desc->irq_data.chip->irq_eoi(&desc->irq_data);
533}
534
535/* Create a virtual IRQ */
536static unsigned int grpci2_build_device_irq(unsigned int irq)
537{
538 unsigned int virq = 0, pil;
539
540 pil = 1 << 8;
541 virq = irq_alloc(irq, pil);
542 if (virq == 0)
543 goto out;
544
545 irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq,
546 "pcilvl");
547 irq_set_chip_data(virq, (void *)irq);
548
549out:
550 return virq;
551}
552
553void grpci2_hw_init(struct grpci2_priv *priv)
554{
555 u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
556 struct grpci2_regs *regs = priv->regs;
557 int i;
558 struct grpci2_barcfg *barcfg = priv->tgtbars;
559
560 /* Reset any earlier setup */
561 if (priv->do_reset) {
562 printk(KERN_INFO "GRPCI2: Resetting PCI bus\n");
563 REGSTORE(regs->ctrl, CTRL_RESET);
564 ssleep(1); /* Wait for boards to settle */
565 }
566 REGSTORE(regs->ctrl, 0);
567 REGSTORE(regs->sts_cap, ~0); /* Clear Status */
568 REGSTORE(regs->dma_ctrl, 0);
569 REGSTORE(regs->dma_bdbase, 0);
570
571 /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */
572 REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff);
573
574 /* set 1:1 mapping between AHB -> PCI memory space, for all Masters
575 * Each AHB master has it's own mapping registers. Max 16 AHB masters.
576 */
577 for (i = 0; i < 16; i++)
578 REGSTORE(regs->ahbmst_map[i], priv->pci_area);
579
580 /* Get the GRPCI2 Host PCI ID */
581 grpci2_cfg_r32(priv, 0, 0, PCI_VENDOR_ID, &priv->pciid);
582
583 /* Get address to first (always defined) capability structure */
584 grpci2_cfg_r8(priv, 0, 0, PCI_CAPABILITY_LIST, &capptr);
585
586 /* Enable/Disable Byte twisting */
587 grpci2_cfg_r32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, &io_map);
588 io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0);
589 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_IOMAP_OFS, io_map);
590
591 /* Setup the Host's PCI Target BARs for other peripherals to access,
592 * and do DMA to the host's memory. The target BARs can be sized and
593 * enabled individually.
594 *
595 * User may set custom target BARs, but default is:
596 * The first BARs is used to map kernel low (DMA is part of normal
597 * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the
598 * PCI bus, the other BARs are disabled. We assume that the first BAR
599 * is always available.
600 */
601 for (i = 0; i < 6; i++) {
602 if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) {
603 /* Target BARs must have the proper alignment */
604 ahbadr = barcfg[i].ahbadr;
605 pciadr = barcfg[i].pciadr;
606 bar_sz = ((pciadr - 1) & ~pciadr) + 1;
607 } else {
608 if (i == 0) {
609 /* Map main memory */
610 bar_sz = 0xf0000008; /* 256MB prefetchable */
611 ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN(
612 (unsigned long) &_end));
613 pciadr = ahbadr;
614 } else {
615 bar_sz = 0;
616 ahbadr = 0;
617 pciadr = 0;
618 }
619 }
620 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BARSIZE_OFS+i*4, bar_sz);
621 grpci2_cfg_w32(priv, 0, 0, PCI_BASE_ADDRESS_0+i*4, pciadr);
622 grpci2_cfg_w32(priv, 0, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr);
623 printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n",
624 i, pciadr, ahbadr);
625 }
626
627 /* set as bus master and enable pci memory responses */
628 grpci2_cfg_r32(priv, 0, 0, PCI_COMMAND, &data);
629 data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
630 grpci2_cfg_w32(priv, 0, 0, PCI_COMMAND, data);
631
632 /* Enable Error respone (CPU-TRAP) on illegal memory access. */
633 REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE);
634}
635
636static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
637{
638 printk(KERN_ERR "GRPCI2: Jump IRQ happened\n");
639 return IRQ_NONE;
640}
641
642/* Handle GRPCI2 Error Interrupt */
643static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
644{
645 struct grpci2_priv *priv = arg;
646 struct grpci2_regs *regs = priv->regs;
647 unsigned int status;
648
649 status = REGLOAD(regs->sts_cap);
650 if ((status & STS_ERR_IRQ) == 0)
651 return IRQ_NONE;
652
653 if (status & STS_IPARERR)
654 printk(KERN_ERR "GRPCI2: Parity Error\n");
655
656 if (status & STS_ITGTABRT)
657 printk(KERN_ERR "GRPCI2: Target Abort\n");
658
659 if (status & STS_IMSTABRT)
660 printk(KERN_ERR "GRPCI2: Master Abort\n");
661
662 if (status & STS_ISYSERR)
663 printk(KERN_ERR "GRPCI2: System Error\n");
664
665 /* Clear handled INT TYPE IRQs */
666 REGSTORE(regs->sts_cap, status & STS_ERR_IRQ);
667
668 return IRQ_HANDLED;
669}
670
671static int __devinit grpci2_of_probe(struct platform_device *ofdev)
672{
673 struct grpci2_regs *regs;
674 struct grpci2_priv *priv;
675 int err, i, len;
676 const int *tmp;
677 unsigned int capability;
678
679 if (grpci2priv) {
680 printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n");
681 return -ENODEV;
682 }
683
684 if (ofdev->num_resources < 3) {
685 printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n");
686 return -EIO;
687 }
688
689 /* Find Device Address */
690 regs = of_ioremap(&ofdev->resource[0], 0,
691 resource_size(&ofdev->resource[0]),
692 "grlib-grpci2 regs");
693 if (regs == NULL) {
694 printk(KERN_ERR "GRPCI2: ioremap failed\n");
695 return -EIO;
696 }
697
698 /*
699 * Check that we're in Host Slot and that we can act as a Host Bridge
700 * and not only as target.
701 */
702 capability = REGLOAD(regs->sts_cap);
703 if ((capability & STS_HOST) || !(capability & STS_MST)) {
704 printk(KERN_INFO "GRPCI2: not in host system slot\n");
705 err = -EIO;
706 goto err1;
707 }
708
709 priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL);
710 if (grpci2priv == NULL) {
711 err = -ENOMEM;
712 goto err1;
713 }
714 memset(grpci2priv, 0, sizeof(*grpci2priv));
715 priv->regs = regs;
716 priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
717 priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
718
719 printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq);
720
721 /* Byte twisting should be made configurable from kernel command line */
722 priv->bt_enabled = 1;
723
724 /* Let user do custom Target BAR assignment */
725 tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len);
726 if (tmp && (len == 2*4*6))
727 memcpy(priv->tgtbars, tmp, 2*4*6);
728 else
729 memset(priv->tgtbars, -1, 2*4*6);
730
731 /* Limit IRQ unmasking in irq_mode 2 and 3 */
732 tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len);
733 if (tmp && (len == 4))
734 priv->do_reset = *tmp;
735 else
736 priv->irq_mask = 0xf;
737
738 /* Optional PCI reset. Force PCI reset on startup */
739 tmp = of_get_property(ofdev->dev.of_node, "reset", &len);
740 if (tmp && (len == 4))
741 priv->do_reset = *tmp;
742 else
743 priv->do_reset = 0;
744
745 /* Find PCI Memory, I/O and Configuration Space Windows */
746 priv->pci_area = ofdev->resource[1].start;
747 priv->pci_area_end = ofdev->resource[1].end+1;
748 priv->pci_io = ofdev->resource[2].start;
749 priv->pci_conf = ofdev->resource[2].start + 0x10000;
750 priv->pci_conf_end = priv->pci_conf + 0x10000;
751 priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000);
752 if (!priv->pci_io_va) {
753 err = -EIO;
754 goto err2;
755 }
756
757 printk(KERN_INFO
758 "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n"
759 " I/O SPACE [0x%08lx - 0x%08lx]\n"
760 " CONFIG SPACE [0x%08lx - 0x%08lx]\n",
761 priv->pci_area, priv->pci_area_end-1,
762 priv->pci_io, priv->pci_conf-1,
763 priv->pci_conf, priv->pci_conf_end-1);
764
765 /*
766 * I/O Space resources in I/O Window mapped into Virtual Adr Space
767 * We never use low 4KB because some devices seem have problems using
768 * address 0.
769 */
770 memset(&priv->info.io_space, 0, sizeof(struct resource));
771 priv->info.io_space.name = "GRPCI2 PCI I/O Space";
772 priv->info.io_space.start = priv->pci_io_va + 0x1000;
773 priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1;
774 priv->info.io_space.flags = IORESOURCE_IO;
775
776 /*
777 * GRPCI2 has no prefetchable memory, map everything as
778 * non-prefetchable memory
779 */
780 memset(&priv->info.mem_space, 0, sizeof(struct resource));
781 priv->info.mem_space.name = "GRPCI2 PCI MEM Space";
782 priv->info.mem_space.start = priv->pci_area;
783 priv->info.mem_space.end = priv->pci_area_end - 1;
784 priv->info.mem_space.flags = IORESOURCE_MEM;
785
786 if (request_resource(&iomem_resource, &priv->info.mem_space) < 0)
787 goto err3;
788 if (request_resource(&ioport_resource, &priv->info.io_space) < 0)
789 goto err4;
790
791 grpci2_hw_init(priv);
792
793 /*
794 * Get PCI Interrupt to System IRQ mapping and setup IRQ handling
795 * Error IRQ always on PCI INTA.
796 */
797 if (priv->irq_mode < 2) {
798 /* All PCI interrupts are shared using the same system IRQ */
799 leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq,
800 "pcilvl", 0);
801
802 priv->irq_map[0] = grpci2_build_device_irq(1);
803 priv->irq_map[1] = grpci2_build_device_irq(2);
804 priv->irq_map[2] = grpci2_build_device_irq(3);
805 priv->irq_map[3] = grpci2_build_device_irq(4);
806
807 priv->virq_err = grpci2_build_device_irq(5);
808 if (priv->irq_mode & 1)
809 priv->virq_dma = ofdev->archdata.irqs[1];
810 else
811 priv->virq_dma = grpci2_build_device_irq(6);
812
813 /* Enable IRQs on LEON IRQ controller */
814 err = request_irq(priv->irq, grpci2_jump_interrupt, 0,
815 "GRPCI2_JUMP", priv);
816 if (err)
817 printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n");
818 } else {
819 /* All PCI interrupts have an unique IRQ interrupt */
820 for (i = 0; i < 4; i++) {
821 /* Make LEON IRQ layer handle level IRQ by acking */
822 leon_update_virq_handling(ofdev->archdata.irqs[i],
823 handle_fasteoi_irq, "pcilvl",
824 1);
825 priv->irq_map[i] = ofdev->archdata.irqs[i];
826 }
827 priv->virq_err = priv->irq_map[0];
828 if (priv->irq_mode & 1)
829 priv->virq_dma = ofdev->archdata.irqs[4];
830 else
831 priv->virq_dma = priv->irq_map[0];
832
833 /* Unmask all PCI interrupts, request_irq will not do that */
834 REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf));
835 }
836
837 /* Setup IRQ handler for non-configuration space access errors */
838 err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED,
839 "GRPCI2_ERR", priv);
840 if (err) {
841 printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err);
842 goto err5;
843 }
844
845 /*
846 * Enable Error Interrupts. PCI interrupts are unmasked once request_irq
847 * is called by the PCI Device drivers
848 */
849 REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI);
850
851 /* Init common layer and scan buses */
852 priv->info.ops = &grpci2_ops;
853 priv->info.map_irq = grpci2_map_irq;
854 leon_pci_init(ofdev, &priv->info);
855
856 return 0;
857
858err5:
859 release_resource(&priv->info.io_space);
860err4:
861 release_resource(&priv->info.mem_space);
862err3:
863 err = -ENOMEM;
864 iounmap((void *)priv->pci_io_va);
865err2:
866 kfree(priv);
867err1:
868 of_iounmap(&ofdev->resource[0], regs,
869 resource_size(&ofdev->resource[0]));
870 return err;
871}
872
873static struct of_device_id grpci2_of_match[] = {
874 {
875 .name = "GAISLER_GRPCI2",
876 },
877 {
878 .name = "01_07c",
879 },
880 {},
881};
882
883static struct platform_driver grpci2_of_driver = {
884 .driver = {
885 .name = "grpci2",
886 .owner = THIS_MODULE,
887 .of_match_table = grpci2_of_match,
888 },
889 .probe = grpci2_of_probe,
890};
891
892static int __init grpci2_init(void)
893{
894 return platform_driver_register(&grpci2_of_driver);
895}
896
897subsys_initcall(grpci2_init);
diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c
index 8d348c474a2f..99ba5baa9497 100644
--- a/arch/sparc/kernel/module.c
+++ b/arch/sparc/kernel/module.c
@@ -214,7 +214,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
214 me->name, 214 me->name,
215 (int) (ELF_R_TYPE(rel[i].r_info) & 0xff)); 215 (int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
216 return -ENOEXEC; 216 return -ENOEXEC;
217 }; 217 }
218 } 218 }
219 return 0; 219 return 0;
220} 220}
diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c
index 6e3874b64488..a6895987fb70 100644
--- a/arch/sparc/kernel/pci_common.c
+++ b/arch/sparc/kernel/pci_common.c
@@ -281,7 +281,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
281 case 4: 281 case 4:
282 *value = ret & 0xffffffff; 282 *value = ret & 0xffffffff;
283 break; 283 break;
284 }; 284 }
285 285
286 286
287 return PCIBIOS_SUCCESSFUL; 287 return PCIBIOS_SUCCESSFUL;
@@ -456,7 +456,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
456 456
457 default: 457 default:
458 break; 458 break;
459 }; 459 }
460 } 460 }
461 461
462 if (!saw_io || !saw_mem) { 462 if (!saw_io || !saw_mem) {
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 283fbc329a43..f030b02edddd 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -264,7 +264,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
264 default: 264 default:
265 type_string = "ECC Error"; 265 type_string = "ECC Error";
266 break; 266 break;
267 }; 267 }
268 printk("%s: IOMMU Error, type[%s]\n", 268 printk("%s: IOMMU Error, type[%s]\n",
269 pbm->name, type_string); 269 pbm->name, type_string);
270 270
@@ -319,7 +319,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm,
319 default: 319 default:
320 type_string = "ECC Error"; 320 type_string = "ECC Error";
321 break; 321 break;
322 }; 322 }
323 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " 323 printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) "
324 "sz(%dK) vpg(%08lx)]\n", 324 "sz(%dK) vpg(%08lx)]\n",
325 pbm->name, i, type_string, 325 pbm->name, i, type_string,
@@ -1328,7 +1328,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm,
1328 default: 1328 default:
1329 chipset_name = "SCHIZO"; 1329 chipset_name = "SCHIZO";
1330 break; 1330 break;
1331 }; 1331 }
1332 1332
1333 /* For SCHIZO, three OBP regs: 1333 /* For SCHIZO, three OBP regs:
1334 * 1) PBM controller regs 1334 * 1) PBM controller regs
diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c
index 570b98f6e897..40e4936bd479 100644
--- a/arch/sparc/kernel/prom_irqtrans.c
+++ b/arch/sparc/kernel/prom_irqtrans.c
@@ -694,7 +694,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp,
694 case 3: 694 case 3:
695 iclr = reg_base + SYSIO_ICLR_SLOT3; 695 iclr = reg_base + SYSIO_ICLR_SLOT3;
696 break; 696 break;
697 }; 697 }
698 698
699 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 699 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
700 } 700 }
diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c
index fe2af66bb198..8db48e808ed4 100644
--- a/arch/sparc/kernel/psycho_common.c
+++ b/arch/sparc/kernel/psycho_common.c
@@ -228,7 +228,7 @@ void psycho_check_iommu_error(struct pci_pbm_info *pbm,
228 default: 228 default:
229 type_str = "ECC Error"; 229 type_str = "ECC Error";
230 break; 230 break;
231 }; 231 }
232 printk(KERN_ERR "%s: IOMMU Error, type[%s]\n", 232 printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
233 pbm->name, type_str); 233 pbm->name, type_str);
234 234
diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c
index 2ca32d13abcf..a161b9c77f05 100644
--- a/arch/sparc/kernel/sbus.c
+++ b/arch/sparc/kernel/sbus.c
@@ -97,7 +97,7 @@ void sbus_set_sbus64(struct device *dev, int bursts)
97 97
98 default: 98 default:
99 return; 99 return;
100 }; 100 }
101 101
102 val = upa_readq(cfg_reg); 102 val = upa_readq(cfg_reg);
103 if (val & (1UL << 14UL)) { 103 if (val & (1UL << 14UL)) {
@@ -244,7 +244,7 @@ static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino)
244 case 3: 244 case 3:
245 iclr = reg_base + SYSIO_ICLR_SLOT3; 245 iclr = reg_base + SYSIO_ICLR_SLOT3;
246 break; 246 break;
247 }; 247 }
248 248
249 iclr += ((unsigned long)sbus_level - 1UL) * 8UL; 249 iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
250 } 250 }
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 3249d3f3234d..d26e1f6c717a 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -267,7 +267,7 @@ void __init setup_arch(char **cmdline_p)
267 default: 267 default:
268 printk("UNKNOWN!\n"); 268 printk("UNKNOWN!\n");
269 break; 269 break;
270 }; 270 }
271 271
272#ifdef CONFIG_DUMMY_CONSOLE 272#ifdef CONFIG_DUMMY_CONSOLE
273 conswitchp = &dummy_con; 273 conswitchp = &dummy_con;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index f3b6850cc8db..c4dd0999da86 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -209,7 +209,7 @@ void __init per_cpu_patch(void)
209 default: 209 default:
210 prom_printf("Unknown cpu type, halting.\n"); 210 prom_printf("Unknown cpu type, halting.\n");
211 prom_halt(); 211 prom_halt();
212 }; 212 }
213 213
214 *(unsigned int *) (addr + 0) = insns[0]; 214 *(unsigned int *) (addr + 0) = insns[0];
215 wmb(); 215 wmb();
diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c
index d5b3958be0b4..21b125341bf7 100644
--- a/arch/sparc/kernel/smp_32.c
+++ b/arch/sparc/kernel/smp_32.c
@@ -114,7 +114,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
114 printk("UNKNOWN!\n"); 114 printk("UNKNOWN!\n");
115 BUG(); 115 BUG();
116 break; 116 break;
117 }; 117 }
118} 118}
119 119
120void cpu_panic(void) 120void cpu_panic(void)
@@ -374,7 +374,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
374 printk("UNKNOWN!\n"); 374 printk("UNKNOWN!\n");
375 BUG(); 375 BUG();
376 break; 376 break;
377 }; 377 }
378} 378}
379 379
380/* Set this up early so that things like the scheduler can init 380/* Set this up early so that things like the scheduler can init
@@ -447,7 +447,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
447 printk("UNKNOWN!\n"); 447 printk("UNKNOWN!\n");
448 BUG(); 448 BUG();
449 break; 449 break;
450 }; 450 }
451 451
452 if (!ret) { 452 if (!ret) {
453 cpumask_set_cpu(cpu, &smp_commenced_mask); 453 cpumask_set_cpu(cpu, &smp_commenced_mask);
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c
index a9ea60eb2c10..1d13c5bda0b1 100644
--- a/arch/sparc/kernel/sun4d_irq.c
+++ b/arch/sparc/kernel/sun4d_irq.c
@@ -103,10 +103,9 @@ static void sun4d_sbus_handler_irq(int sbusl)
103 103
104 sbil = (sbusl << 2); 104 sbil = (sbusl << 2);
105 /* Loop for each pending SBI */ 105 /* Loop for each pending SBI */
106 for (sbino = 0; bus_mask; sbino++) { 106 for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) {
107 unsigned int idx, mask; 107 unsigned int idx, mask;
108 108
109 bus_mask >>= 1;
110 if (!(bus_mask & 1)) 109 if (!(bus_mask & 1))
111 continue; 110 continue;
112 /* XXX This seems to ACK the irq twice. acquire_sbi() 111 /* XXX This seems to ACK the irq twice. acquire_sbi()
@@ -118,19 +117,16 @@ static void sun4d_sbus_handler_irq(int sbusl)
118 mask &= (0xf << sbil); 117 mask &= (0xf << sbil);
119 118
120 /* Loop for each pending SBI slot */ 119 /* Loop for each pending SBI slot */
121 idx = 0;
122 slot = (1 << sbil); 120 slot = (1 << sbil);
123 while (mask != 0) { 121 for (idx = 0; mask != 0; idx++, slot <<= 1) {
124 unsigned int pil; 122 unsigned int pil;
125 struct irq_bucket *p; 123 struct irq_bucket *p;
126 124
127 idx++;
128 slot <<= 1;
129 if (!(mask & slot)) 125 if (!(mask & slot))
130 continue; 126 continue;
131 127
132 mask &= ~slot; 128 mask &= ~slot;
133 pil = sun4d_encode_irq(sbino, sbil, idx); 129 pil = sun4d_encode_irq(sbino, sbusl, idx);
134 130
135 p = irq_map[pil]; 131 p = irq_map[pil];
136 while (p) { 132 while (p) {
@@ -218,10 +214,10 @@ static void sun4d_unmask_irq(struct irq_data *data)
218 214
219#ifdef CONFIG_SMP 215#ifdef CONFIG_SMP
220 spin_lock_irqsave(&sun4d_imsk_lock, flags); 216 spin_lock_irqsave(&sun4d_imsk_lock, flags);
221 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | ~(1 << real_irq)); 217 cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq));
222 spin_unlock_irqrestore(&sun4d_imsk_lock, flags); 218 spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
223#else 219#else
224 cc_set_imsk(cc_get_imsk() | ~(1 << real_irq)); 220 cc_set_imsk(cc_get_imsk() & ~(1 << real_irq));
225#endif 221#endif
226} 222}
227 223
@@ -299,26 +295,68 @@ static void __init sun4d_load_profile_irqs(void)
299 } 295 }
300} 296}
301 297
298unsigned int _sun4d_build_device_irq(unsigned int real_irq,
299 unsigned int pil,
300 unsigned int board)
301{
302 struct sun4d_handler_data *handler_data;
303 unsigned int irq;
304
305 irq = irq_alloc(real_irq, pil);
306 if (irq == 0) {
307 prom_printf("IRQ: allocate for %d %d %d failed\n",
308 real_irq, pil, board);
309 goto err_out;
310 }
311
312 handler_data = irq_get_handler_data(irq);
313 if (unlikely(handler_data))
314 goto err_out;
315
316 handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
317 if (unlikely(!handler_data)) {
318 prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
319 prom_halt();
320 }
321 handler_data->cpuid = board_to_cpu[board];
322 handler_data->real_irq = real_irq;
323 irq_set_chip_and_handler_name(irq, &sun4d_irq,
324 handle_level_irq, "level");
325 irq_set_handler_data(irq, handler_data);
326
327err_out:
328 return irq;
329}
330
331
332
302unsigned int sun4d_build_device_irq(struct platform_device *op, 333unsigned int sun4d_build_device_irq(struct platform_device *op,
303 unsigned int real_irq) 334 unsigned int real_irq)
304{ 335{
305 struct device_node *dp = op->dev.of_node; 336 struct device_node *dp = op->dev.of_node;
306 struct device_node *io_unit, *sbi = dp->parent; 337 struct device_node *board_parent, *bus = dp->parent;
338 char *bus_connection;
307 const struct linux_prom_registers *regs; 339 const struct linux_prom_registers *regs;
308 struct sun4d_handler_data *handler_data;
309 unsigned int pil; 340 unsigned int pil;
310 unsigned int irq; 341 unsigned int irq;
311 int board, slot; 342 int board, slot;
312 int sbusl; 343 int sbusl;
313 344
314 irq = 0; 345 irq = real_irq;
315 while (sbi) { 346 while (bus) {
316 if (!strcmp(sbi->name, "sbi")) 347 if (!strcmp(bus->name, "sbi")) {
348 bus_connection = "io-unit";
349 break;
350 }
351
352 if (!strcmp(bus->name, "bootbus")) {
353 bus_connection = "cpu-unit";
317 break; 354 break;
355 }
318 356
319 sbi = sbi->parent; 357 bus = bus->parent;
320 } 358 }
321 if (!sbi) 359 if (!bus)
322 goto err_out; 360 goto err_out;
323 361
324 regs = of_get_property(dp, "reg", NULL); 362 regs = of_get_property(dp, "reg", NULL);
@@ -328,17 +366,19 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
328 slot = regs->which_io; 366 slot = regs->which_io;
329 367
330 /* 368 /*
331 * If SBI's parent is not io-unit or the io-unit lacks 369 * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit
332 * a "board#" property, something is very wrong. 370 * lacks a "board#" property, something is very wrong.
333 */ 371 */
334 if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) { 372 if (!bus->parent || strcmp(bus->parent->name, bus_connection)) {
335 printk("%s: Error, parent is not io-unit.\n", sbi->full_name); 373 printk(KERN_ERR "%s: Error, parent is not %s.\n",
374 bus->full_name, bus_connection);
336 goto err_out; 375 goto err_out;
337 } 376 }
338 io_unit = sbi->parent; 377 board_parent = bus->parent;
339 board = of_getintprop_default(io_unit, "board#", -1); 378 board = of_getintprop_default(board_parent, "board#", -1);
340 if (board == -1) { 379 if (board == -1) {
341 printk("%s: Error, lacks board# property.\n", io_unit->full_name); 380 printk(KERN_ERR "%s: Error, lacks board# property.\n",
381 board_parent->full_name);
342 goto err_out; 382 goto err_out;
343 } 383 }
344 384
@@ -348,29 +388,17 @@ unsigned int sun4d_build_device_irq(struct platform_device *op,
348 else 388 else
349 pil = real_irq; 389 pil = real_irq;
350 390
351 irq = irq_alloc(real_irq, pil); 391 irq = _sun4d_build_device_irq(real_irq, pil, board);
352 if (irq == 0)
353 goto err_out;
354
355 handler_data = irq_get_handler_data(irq);
356 if (unlikely(handler_data))
357 goto err_out;
358
359 handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC);
360 if (unlikely(!handler_data)) {
361 prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n");
362 prom_halt();
363 }
364 handler_data->cpuid = board_to_cpu[board];
365 handler_data->real_irq = real_irq;
366 irq_set_chip_and_handler_name(irq, &sun4d_irq,
367 handle_level_irq, "level");
368 irq_set_handler_data(irq, handler_data);
369
370err_out: 392err_out:
371 return real_irq; 393 return irq;
372} 394}
373 395
396unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
397{
398 return _sun4d_build_device_irq(real_irq, real_irq, board);
399}
400
401
374static void __init sun4d_fixup_trap_table(void) 402static void __init sun4d_fixup_trap_table(void)
375{ 403{
376#ifdef CONFIG_SMP 404#ifdef CONFIG_SMP
@@ -402,6 +430,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
402 unsigned int irq; 430 unsigned int irq;
403 const u32 *reg; 431 const u32 *reg;
404 int err; 432 int err;
433 int board;
405 434
406 dp = of_find_node_by_name(NULL, "cpu-unit"); 435 dp = of_find_node_by_name(NULL, "cpu-unit");
407 if (!dp) { 436 if (!dp) {
@@ -414,12 +443,19 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
414 * bootbus. 443 * bootbus.
415 */ 444 */
416 reg = of_get_property(dp, "reg", NULL); 445 reg = of_get_property(dp, "reg", NULL);
417 of_node_put(dp);
418 if (!reg) { 446 if (!reg) {
419 prom_printf("sun4d_init_timers: No reg property\n"); 447 prom_printf("sun4d_init_timers: No reg property\n");
420 prom_halt(); 448 prom_halt();
421 } 449 }
422 450
451 board = of_getintprop_default(dp, "board#", -1);
452 if (board == -1) {
453 prom_printf("sun4d_init_timers: No board# property on cpu-unit\n");
454 prom_halt();
455 }
456
457 of_node_put(dp);
458
423 res.start = reg[1]; 459 res.start = reg[1];
424 res.end = reg[2] - 1; 460 res.end = reg[2] - 1;
425 res.flags = reg[0] & 0xff; 461 res.flags = reg[0] & 0xff;
@@ -434,7 +470,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
434 470
435 master_l10_counter = &sun4d_timers->l10_cur_count; 471 master_l10_counter = &sun4d_timers->l10_cur_count;
436 472
437 irq = sun4d_build_device_irq(NULL, SUN4D_TIMER_IRQ); 473 irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ);
438 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL); 474 err = request_irq(irq, counter_fn, IRQF_TIMER, "timer", NULL);
439 if (err) { 475 if (err) {
440 prom_printf("sun4d_init_timers: request_irq() failed with %d\n", 476 prom_printf("sun4d_init_timers: request_irq() failed with %d\n",
diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c
index 6db18c6927fb..170cd8e8eb2a 100644
--- a/arch/sparc/kernel/sys_sparc32.c
+++ b/arch/sparc/kernel/sys_sparc32.c
@@ -109,7 +109,7 @@ asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compa
109 109
110 default: 110 default:
111 return -ENOSYS; 111 return -ENOSYS;
112 }; 112 }
113 113
114 return -ENOSYS; 114 return -ENOSYS;
115} 115}
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 96082d30def0..908b47a5ee24 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -460,7 +460,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
460 default: 460 default:
461 err = -ENOSYS; 461 err = -ENOSYS;
462 goto out; 462 goto out;
463 }; 463 }
464 } 464 }
465 if (call <= MSGCTL) { 465 if (call <= MSGCTL) {
466 switch (call) { 466 switch (call) {
@@ -481,7 +481,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
481 default: 481 default:
482 err = -ENOSYS; 482 err = -ENOSYS;
483 goto out; 483 goto out;
484 }; 484 }
485 } 485 }
486 if (call <= SHMCTL) { 486 if (call <= SHMCTL) {
487 switch (call) { 487 switch (call) {
@@ -507,7 +507,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second
507 default: 507 default:
508 err = -ENOSYS; 508 err = -ENOSYS;
509 goto out; 509 goto out;
510 }; 510 }
511 } else { 511 } else {
512 err = -ENOSYS; 512 err = -ENOSYS;
513 } 513 }
diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c
index 2b8d54b2d850..1db6b18964d2 100644
--- a/arch/sparc/kernel/time_64.c
+++ b/arch/sparc/kernel/time_64.c
@@ -708,7 +708,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode,
708 case CLOCK_EVT_MODE_UNUSED: 708 case CLOCK_EVT_MODE_UNUSED:
709 WARN_ON(1); 709 WARN_ON(1);
710 break; 710 break;
711 }; 711 }
712} 712}
713 713
714static struct clock_event_device sparc64_clockevent = { 714static struct clock_event_device sparc64_clockevent = {
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index 1ed547bd850f..0cbdaa41cd1e 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -1804,7 +1804,7 @@ static const char *sun4v_err_type_to_str(u32 type)
1804 return "warning resumable"; 1804 return "warning resumable";
1805 default: 1805 default:
1806 return "unknown"; 1806 return "unknown";
1807 }; 1807 }
1808} 1808}
1809 1809
1810static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) 1810static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
index c752c4c479bd..b2b019ea8caa 100644
--- a/arch/sparc/kernel/unaligned_64.c
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -211,7 +211,7 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
211 default: 211 default:
212 BUG(); 212 BUG();
213 break; 213 break;
214 }; 214 }
215 } 215 }
216 return __do_int_store(dst_addr, size, src_val, asi); 216 return __do_int_store(dst_addr, size, src_val, asi);
217} 217}
@@ -328,7 +328,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
328 case ASI_SNFL: 328 case ASI_SNFL:
329 asi &= ~0x08; 329 asi &= ~0x08;
330 break; 330 break;
331 }; 331 }
332 switch (dir) { 332 switch (dir) {
333 case load: 333 case load:
334 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); 334 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
@@ -351,7 +351,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
351 default: 351 default:
352 BUG(); 352 BUG();
353 break; 353 break;
354 }; 354 }
355 *reg_addr = val_in; 355 *reg_addr = val_in;
356 } 356 }
357 break; 357 break;
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 531d54fc9829..489fc15f3194 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -176,7 +176,7 @@ static unsigned long index_to_estar_mode(unsigned int index)
176 176
177 default: 177 default:
178 BUG(); 178 BUG();
179 }; 179 }
180} 180}
181 181
182static unsigned long index_to_divisor(unsigned int index) 182static unsigned long index_to_divisor(unsigned int index)
@@ -199,7 +199,7 @@ static unsigned long index_to_divisor(unsigned int index)
199 199
200 default: 200 default:
201 BUG(); 201 BUG();
202 }; 202 }
203} 203}
204 204
205static unsigned long estar_to_divisor(unsigned long estar) 205static unsigned long estar_to_divisor(unsigned long estar)
@@ -224,7 +224,7 @@ static unsigned long estar_to_divisor(unsigned long estar)
224 break; 224 break;
225 default: 225 default:
226 BUG(); 226 BUG();
227 }; 227 }
228 228
229 return ret; 229 return ret;
230} 230}
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index 9a8ceb700833..eb1624b931d9 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -71,7 +71,7 @@ static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg
71 break; 71 break;
72 default: 72 default:
73 BUG(); 73 BUG();
74 }; 74 }
75 75
76 return ret; 76 return ret;
77} 77}
@@ -125,7 +125,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
125 125
126 default: 126 default:
127 BUG(); 127 BUG();
128 }; 128 }
129 129
130 reg = read_safari_cfg(); 130 reg = read_safari_cfg();
131 131
diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c
index aa6ac70d4fd5..29348ea139c3 100644
--- a/arch/sparc/kernel/viohs.c
+++ b/arch/sparc/kernel/viohs.c
@@ -363,7 +363,7 @@ static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt)
363 363
364 default: 364 default:
365 return handshake_failure(vio); 365 return handshake_failure(vio);
366 }; 366 }
367} 367}
368 368
369static int process_attr(struct vio_driver_state *vio, void *pkt) 369static int process_attr(struct vio_driver_state *vio, void *pkt)
diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c
index 9dfd2ebcb157..36357717d691 100644
--- a/arch/sparc/kernel/visemul.c
+++ b/arch/sparc/kernel/visemul.c
@@ -334,7 +334,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
334 left = edge32_tab_l[(rs1 >> 2) & 0x1].left; 334 left = edge32_tab_l[(rs1 >> 2) & 0x1].left;
335 right = edge32_tab_l[(rs2 >> 2) & 0x1].right; 335 right = edge32_tab_l[(rs2 >> 2) & 0x1].right;
336 break; 336 break;
337 }; 337 }
338 338
339 if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) 339 if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL))
340 rd_val = right & left; 340 rd_val = right & left;
@@ -360,7 +360,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf)
360 tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); 360 tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC);
361 regs->tstate = tstate | (ccr << 32UL); 361 regs->tstate = tstate | (ccr << 32UL);
362 } 362 }
363 }; 363 }
364} 364}
365 365
366static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) 366static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -392,7 +392,7 @@ static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf)
392 392
393 case ARRAY32_OPF: 393 case ARRAY32_OPF:
394 rd_val <<= 2; 394 rd_val <<= 2;
395 }; 395 }
396 396
397 store_reg(regs, rd_val, RD(insn)); 397 store_reg(regs, rd_val, RD(insn));
398} 398}
@@ -577,7 +577,7 @@ static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf)
577 *fpd_regaddr(f, RD(insn)) = rd_val; 577 *fpd_regaddr(f, RD(insn)) = rd_val;
578 break; 578 break;
579 } 579 }
580 }; 580 }
581} 581}
582 582
583static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) 583static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -693,7 +693,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf)
693 *fpd_regaddr(f, RD(insn)) = rd_val; 693 *fpd_regaddr(f, RD(insn)) = rd_val;
694 break; 694 break;
695 } 695 }
696 }; 696 }
697} 697}
698 698
699static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) 699static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
@@ -786,7 +786,7 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf)
786 rd_val |= 1 << i; 786 rd_val |= 1 << i;
787 } 787 }
788 break; 788 break;
789 }; 789 }
790 790
791 maybe_flush_windows(0, 0, RD(insn), 0); 791 maybe_flush_windows(0, 0, RD(insn), 0);
792 store_reg(regs, rd_val, RD(insn)); 792 store_reg(regs, rd_val, RD(insn));
@@ -885,7 +885,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn)
885 case BSHUFFLE_OPF: 885 case BSHUFFLE_OPF:
886 bshuffle(regs, insn); 886 bshuffle(regs, insn);
887 break; 887 break;
888 }; 888 }
889 889
890 regs->tpc = regs->tnpc; 890 regs->tpc = regs->tnpc;
891 regs->tnpc += 4; 891 regs->tnpc += 4;
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index b10ac4d62378..7543ddbdadb2 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -135,7 +135,7 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
135 135
136 default: 136 default:
137 break; 137 break;
138 }; 138 }
139 139
140 memset(&regs, 0, sizeof (regs)); 140 memset(&regs, 0, sizeof (regs));
141 regs.pc = pc; 141 regs.pc = pc;
diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
index ca217327e8d2..7b00de61c5f1 100644
--- a/arch/sparc/mm/init_32.c
+++ b/arch/sparc/mm/init_32.c
@@ -340,7 +340,7 @@ void __init paging_init(void)
340 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model); 340 prom_printf("paging_init: sparc_cpu_model = %d\n", sparc_cpu_model);
341 prom_printf("paging_init: Halting...\n"); 341 prom_printf("paging_init: Halting...\n");
342 prom_halt(); 342 prom_halt();
343 }; 343 }
344 344
345 /* Initialize the protection map with non-constant, MMU dependent values. */ 345 /* Initialize the protection map with non-constant, MMU dependent values. */
346 protection_map[0] = PAGE_NONE; 346 protection_map[0] = PAGE_NONE;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index e10cd03fab80..3fd8e18bed80 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1625,7 +1625,7 @@ static void __init sun4v_ktsb_init(void)
1625 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB; 1625 ktsb_descr[0].pgsz_idx = HV_PGSZ_IDX_4MB;
1626 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB; 1626 ktsb_descr[0].pgsz_mask = HV_PGSZ_MASK_4MB;
1627 break; 1627 break;
1628 }; 1628 }
1629 1629
1630 ktsb_descr[0].assoc = 1; 1630 ktsb_descr[0].assoc = 1;
1631 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES; 1631 ktsb_descr[0].num_ttes = KERNEL_TSB_NENTRIES;
@@ -2266,7 +2266,7 @@ unsigned long pte_sz_bits(unsigned long sz)
2266 return _PAGE_SZ512K_4V; 2266 return _PAGE_SZ512K_4V;
2267 case 4 * 1024 * 1024: 2267 case 4 * 1024 * 1024:
2268 return _PAGE_SZ4MB_4V; 2268 return _PAGE_SZ4MB_4V;
2269 }; 2269 }
2270 } else { 2270 } else {
2271 switch (sz) { 2271 switch (sz) {
2272 case 8 * 1024: 2272 case 8 * 1024:
@@ -2278,7 +2278,7 @@ unsigned long pte_sz_bits(unsigned long sz)
2278 return _PAGE_SZ512K_4U; 2278 return _PAGE_SZ512K_4U;
2279 case 4 * 1024 * 1024: 2279 case 4 * 1024 * 1024:
2280 return _PAGE_SZ4MB_4U; 2280 return _PAGE_SZ4MB_4U;
2281 }; 2281 }
2282 } 2282 }
2283} 2283}
2284 2284
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index fe09fd8be695..cbef74e793b8 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -1665,7 +1665,7 @@ static void __init init_swift(void)
1665 default: 1665 default:
1666 srmmu_modtype = Swift_ok; 1666 srmmu_modtype = Swift_ok;
1667 break; 1667 break;
1668 }; 1668 }
1669 1669
1670 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1670 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1671 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); 1671 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
@@ -2069,7 +2069,7 @@ static void __init get_srmmu_type(void)
2069 /* Some other Cypress revision, assume a 605. */ 2069 /* Some other Cypress revision, assume a 605. */
2070 init_cypress_605(mod_rev); 2070 init_cypress_605(mod_rev);
2071 break; 2071 break;
2072 }; 2072 }
2073 return; 2073 return;
2074 } 2074 }
2075 2075
diff --git a/arch/sparc/mm/sun4c.c b/arch/sparc/mm/sun4c.c
index a2350b5e68aa..1cf4f198709a 100644
--- a/arch/sparc/mm/sun4c.c
+++ b/arch/sparc/mm/sun4c.c
@@ -318,7 +318,7 @@ void __init sun4c_probe_vac(void)
318 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n", 318 prom_printf("probe_vac: Didn't expect vac-linesize of %d, halting\n",
319 sun4c_vacinfo.linesize); 319 sun4c_vacinfo.linesize);
320 prom_halt(); 320 prom_halt();
321 }; 321 }
322 322
323 sun4c_flush_all(); 323 sun4c_flush_all();
324 sun4c_enable_vac(); 324 sun4c_enable_vac();
@@ -364,7 +364,7 @@ static void __init patch_kernel_fault_handler(void)
364 prom_printf("Unhandled number of segmaps: %d\n", 364 prom_printf("Unhandled number of segmaps: %d\n",
365 num_segmaps); 365 num_segmaps);
366 prom_halt(); 366 prom_halt();
367 }; 367 }
368 switch (num_contexts) { 368 switch (num_contexts) {
369 case 8: 369 case 8:
370 /* Default, nothing to do. */ 370 /* Default, nothing to do. */
@@ -377,7 +377,7 @@ static void __init patch_kernel_fault_handler(void)
377 prom_printf("Unhandled number of contexts: %d\n", 377 prom_printf("Unhandled number of contexts: %d\n",
378 num_contexts); 378 num_contexts);
379 prom_halt(); 379 prom_halt();
380 }; 380 }
381 381
382 if (sun4c_vacinfo.do_hwflushes != 0) { 382 if (sun4c_vacinfo.do_hwflushes != 0) {
383 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1); 383 PATCH_INSN(vac_hwflush_patch1_on, vac_hwflush_patch1);
@@ -394,7 +394,7 @@ static void __init patch_kernel_fault_handler(void)
394 prom_printf("Impossible VAC linesize %d, halting...\n", 394 prom_printf("Impossible VAC linesize %d, halting...\n",
395 sun4c_vacinfo.linesize); 395 sun4c_vacinfo.linesize);
396 prom_halt(); 396 prom_halt();
397 }; 397 }
398 } 398 }
399} 399}
400 400
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 948461513499..a5f51b22fcbe 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -180,7 +180,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
180 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n", 180 printk(KERN_ERR "TSB[%s:%d]: Impossible TSB size %lu, killing process.\n",
181 current->comm, current->pid, tsb_bytes); 181 current->comm, current->pid, tsb_bytes);
182 do_exit(SIGSEGV); 182 do_exit(SIGSEGV);
183 }; 183 }
184 tte |= pte_sz_bits(page_sz); 184 tte |= pte_sz_bits(page_sz);
185 185
186 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 186 if (tlb_type == cheetah_plus || tlb_type == hypervisor) {
@@ -215,7 +215,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
215#endif 215#endif
216 default: 216 default:
217 BUG(); 217 BUG();
218 }; 218 }
219 hp->assoc = 1; 219 hp->assoc = 1;
220 hp->num_ttes = tsb_bytes / 16; 220 hp->num_ttes = tsb_bytes / 16;
221 hp->ctx_idx = 0; 221 hp->ctx_idx = 0;
@@ -230,7 +230,7 @@ static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsign
230#endif 230#endif
231 default: 231 default:
232 BUG(); 232 BUG();
233 }; 233 }
234 hp->tsb_base = tsb_paddr; 234 hp->tsb_base = tsb_paddr;
235 hp->resv = 0; 235 hp->resv = 0;
236 } 236 }
diff --git a/arch/sparc/prom/console_32.c b/arch/sparc/prom/console_32.c
index b05e3db5fa63..a00f47b16c10 100644
--- a/arch/sparc/prom/console_32.c
+++ b/arch/sparc/prom/console_32.c
@@ -38,7 +38,7 @@ static int prom_nbputchar(const char *buf)
38 break; 38 break;
39 default: 39 default:
40 break; 40 break;
41 }; 41 }
42 restore_current(); 42 restore_current();
43 spin_unlock_irqrestore(&prom_lock, flags); 43 spin_unlock_irqrestore(&prom_lock, flags);
44 return i; /* Ugh, we could spin forever on unsupported proms ;( */ 44 return i; /* Ugh, we could spin forever on unsupported proms ;( */
diff --git a/arch/sparc/prom/init_32.c b/arch/sparc/prom/init_32.c
index 0a601b300639..26c64cea3c9c 100644
--- a/arch/sparc/prom/init_32.c
+++ b/arch/sparc/prom/init_32.c
@@ -53,7 +53,7 @@ void __init prom_init(struct linux_romvec *rp)
53 romvec->pv_romvers); 53 romvec->pv_romvers);
54 prom_halt(); 54 prom_halt();
55 break; 55 break;
56 }; 56 }
57 57
58 prom_rev = romvec->pv_plugin_revision; 58 prom_rev = romvec->pv_plugin_revision;
59 prom_prev = romvec->pv_printrev; 59 prom_prev = romvec->pv_printrev;
diff --git a/arch/sparc/prom/mp.c b/arch/sparc/prom/mp.c
index 97c44c9ddbc8..0da8256cf76f 100644
--- a/arch/sparc/prom/mp.c
+++ b/arch/sparc/prom/mp.c
@@ -35,7 +35,7 @@ prom_startcpu(int cpunode, struct linux_prom_registers *ctable_reg, int ctx, cha
35 case PROM_V3: 35 case PROM_V3:
36 ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc); 36 ret = (*(romvec->v3_cpustart))(cpunode, (int) ctable_reg, ctx, pc);
37 break; 37 break;
38 }; 38 }
39 restore_current(); 39 restore_current();
40 spin_unlock_irqrestore(&prom_lock, flags); 40 spin_unlock_irqrestore(&prom_lock, flags);
41 41
diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig
index d3a303246c9f..e57dcce9bfda 100644
--- a/arch/unicore32/Kconfig
+++ b/arch/unicore32/Kconfig
@@ -231,10 +231,6 @@ config PUV3_PWM
231 help 231 help
232 Enable support for NB0916 PWM controllers 232 Enable support for NB0916 PWM controllers
233 233
234config PUV3_RTC
235 tristate "PKUnity v3 RTC Support"
236 depends on !ARCH_FPGA
237
238if PUV3_NB0916 234if PUV3_NB0916
239 235
240menu "PKUnity NetBook-0916 Features" 236menu "PKUnity NetBook-0916 Features"
diff --git a/arch/unicore32/Makefile b/arch/unicore32/Makefile
index 76a8beec7d03..6af4bc415f2b 100644
--- a/arch/unicore32/Makefile
+++ b/arch/unicore32/Makefile
@@ -40,42 +40,10 @@ core-y += arch/unicore32/mm/
40 40
41libs-y += arch/unicore32/lib/ 41libs-y += arch/unicore32/lib/
42 42
43ASM_GENERATED_DIR := $(srctree)/arch/unicore32/include/generated
44LINUXINCLUDE += -I$(ASM_GENERATED_DIR)
45
46ASM_GENERIC_HEADERS := atomic.h auxvec.h
47ASM_GENERIC_HEADERS += bitsperlong.h bug.h bugs.h
48ASM_GENERIC_HEADERS += cputime.h current.h
49ASM_GENERIC_HEADERS += device.h div64.h
50ASM_GENERIC_HEADERS += emergency-restart.h errno.h
51ASM_GENERIC_HEADERS += fb.h fcntl.h ftrace.h futex.h
52ASM_GENERIC_HEADERS += hardirq.h hw_irq.h
53ASM_GENERIC_HEADERS += ioctl.h ioctls.h ipcbuf.h irq_regs.h
54ASM_GENERIC_HEADERS += kdebug.h kmap_types.h
55ASM_GENERIC_HEADERS += local.h
56ASM_GENERIC_HEADERS += mman.h module.h msgbuf.h
57ASM_GENERIC_HEADERS += param.h parport.h percpu.h poll.h posix_types.h
58ASM_GENERIC_HEADERS += resource.h
59ASM_GENERIC_HEADERS += scatterlist.h sections.h segment.h sembuf.h serial.h
60ASM_GENERIC_HEADERS += setup.h shmbuf.h shmparam.h
61ASM_GENERIC_HEADERS += siginfo.h signal.h sizes.h
62ASM_GENERIC_HEADERS += socket.h sockios.h stat.h statfs.h swab.h syscalls.h
63ASM_GENERIC_HEADERS += termbits.h termios.h topology.h types.h
64ASM_GENERIC_HEADERS += ucontext.h unaligned.h user.h
65ASM_GENERIC_HEADERS += vga.h
66ASM_GENERIC_HEADERS += xor.h
67
68archprepare:
69ifneq ($(ASM_GENERATED_DIR), $(wildcard $(ASM_GENERATED_DIR)))
70 $(Q)mkdir -p $(ASM_GENERATED_DIR)/asm
71 $(Q)$(foreach a, $(ASM_GENERIC_HEADERS), \
72 echo '#include <asm-generic/$a>' \
73 > $(ASM_GENERATED_DIR)/asm/$a; )
74endif
75
76boot := arch/unicore32/boot 43boot := arch/unicore32/boot
77 44
78# Default target when executing plain make 45# Default defconfig and target when executing plain make
46KBUILD_DEFCONFIG := $(ARCH)_defconfig
79KBUILD_IMAGE := zImage 47KBUILD_IMAGE := zImage
80 48
81all: $(KBUILD_IMAGE) 49all: $(KBUILD_IMAGE)
@@ -83,8 +51,6 @@ all: $(KBUILD_IMAGE)
83zImage Image uImage: vmlinux 51zImage Image uImage: vmlinux
84 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ 52 $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
85 53
86MRPROPER_DIRS += $(ASM_GENERATED_DIR)
87
88archclean: 54archclean:
89 $(Q)$(MAKE) $(clean)=$(boot) 55 $(Q)$(MAKE) $(clean)=$(boot)
90 56
diff --git a/arch/unicore32/boot/compressed/Makefile b/arch/unicore32/boot/compressed/Makefile
index 95373428cb3d..b0954a2d23cf 100644
--- a/arch/unicore32/boot/compressed/Makefile
+++ b/arch/unicore32/boot/compressed/Makefile
@@ -59,7 +59,7 @@ $(obj)/vmlinux: $(obj)/vmlinux.lds $(obj)/head.o $(obj)/piggy.o \
59# We now have a PIC decompressor implementation. Decompressors running 59# We now have a PIC decompressor implementation. Decompressors running
60# from RAM should not define ZTEXTADDR. Decompressors running directly 60# from RAM should not define ZTEXTADDR. Decompressors running directly
61# from ROM or Flash must define ZTEXTADDR (preferably via the config) 61# from ROM or Flash must define ZTEXTADDR (preferably via the config)
62ZTEXTADDR := 0 62ZTEXTADDR := 0x03000000
63ZBSSADDR := ALIGN(4) 63ZBSSADDR := ALIGN(4)
64 64
65SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/ 65SEDFLAGS_lds = s/TEXT_START/$(ZTEXTADDR)/;s/BSS_START/$(ZBSSADDR)/
diff --git a/arch/unicore32/configs/debug_defconfig b/arch/unicore32/configs/unicore32_defconfig
index b5fbde9f1cb2..c9dd3198b6f7 100644
--- a/arch/unicore32/configs/debug_defconfig
+++ b/arch/unicore32/configs/unicore32_defconfig
@@ -1,6 +1,6 @@
1### General setup 1### General setup
2CONFIG_EXPERIMENTAL=y 2CONFIG_EXPERIMENTAL=y
3CONFIG_LOCALVERSION="-debug" 3CONFIG_LOCALVERSION="-unicore32"
4CONFIG_SWAP=y 4CONFIG_SWAP=y
5CONFIG_SYSVIPC=y 5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y 6CONFIG_POSIX_MQUEUE=y
@@ -64,7 +64,6 @@ CONFIG_I2C_BATTERY_BQ27200=n
64CONFIG_I2C_EEPROM_AT24=n 64CONFIG_I2C_EEPROM_AT24=n
65CONFIG_LCD_BACKLIGHT=n 65CONFIG_LCD_BACKLIGHT=n
66 66
67CONFIG_PUV3_RTC=y
68CONFIG_PUV3_UMAL=y 67CONFIG_PUV3_UMAL=y
69CONFIG_PUV3_MUSB=n 68CONFIG_PUV3_MUSB=n
70CONFIG_PUV3_AC97=n 69CONFIG_PUV3_AC97=n
@@ -167,8 +166,9 @@ CONFIG_LEDS_TRIGGER_IDE_DISK=y
167CONFIG_LEDS_TRIGGER_HEARTBEAT=y 166CONFIG_LEDS_TRIGGER_HEARTBEAT=y
168 167
169# Real Time Clock 168# Real Time Clock
170CONFIG_RTC_LIB=m 169CONFIG_RTC_LIB=y
171CONFIG_RTC_CLASS=m 170CONFIG_RTC_CLASS=y
171CONFIG_RTC_DRV_PUV3=y
172 172
173### File systems 173### File systems
174CONFIG_EXT2_FS=m 174CONFIG_EXT2_FS=m
diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild
index b200fdaca44d..ca113d6999c5 100644
--- a/arch/unicore32/include/asm/Kbuild
+++ b/arch/unicore32/include/asm/Kbuild
@@ -1,2 +1,61 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3generic-y += atomic.h
4generic-y += auxvec.h
5generic-y += bitsperlong.h
6generic-y += bug.h
7generic-y += bugs.h
8generic-y += cputime.h
9generic-y += current.h
10generic-y += device.h
11generic-y += div64.h
12generic-y += emergency-restart.h
13generic-y += errno.h
14generic-y += fb.h
15generic-y += fcntl.h
16generic-y += ftrace.h
17generic-y += futex.h
18generic-y += hardirq.h
19generic-y += hw_irq.h
20generic-y += ioctl.h
21generic-y += ioctls.h
22generic-y += ipcbuf.h
23generic-y += irq_regs.h
24generic-y += kdebug.h
25generic-y += kmap_types.h
26generic-y += local.h
27generic-y += mman.h
28generic-y += module.h
29generic-y += msgbuf.h
30generic-y += param.h
31generic-y += parport.h
32generic-y += percpu.h
33generic-y += poll.h
34generic-y += posix_types.h
35generic-y += resource.h
36generic-y += scatterlist.h
37generic-y += sections.h
38generic-y += segment.h
39generic-y += sembuf.h
40generic-y += serial.h
41generic-y += setup.h
42generic-y += shmbuf.h
43generic-y += shmparam.h
44generic-y += siginfo.h
45generic-y += signal.h
46generic-y += sizes.h
47generic-y += socket.h
48generic-y += sockios.h
49generic-y += stat.h
50generic-y += statfs.h
51generic-y += swab.h
52generic-y += syscalls.h
53generic-y += termbits.h
54generic-y += termios.h
55generic-y += topology.h
56generic-y += types.h
57generic-y += ucontext.h
58generic-y += unaligned.h
59generic-y += user.h
60generic-y += vga.h
61generic-y += xor.h
diff --git a/arch/unicore32/kernel/Makefile b/arch/unicore32/kernel/Makefile
index ec23a2fb2f50..aeb0f181568e 100644
--- a/arch/unicore32/kernel/Makefile
+++ b/arch/unicore32/kernel/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_UNICORE_FPU_F64) += fpu-ucf64.o
16obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o 16obj-$(CONFIG_ARCH_PUV3) += clock.o irq.o time.o
17 17
18obj-$(CONFIG_PUV3_GPIO) += gpio.o 18obj-$(CONFIG_PUV3_GPIO) += gpio.o
19obj-$(CONFIG_PUV3_RTC) += rtc.o
20obj-$(CONFIG_PUV3_PWM) += pwm.o 19obj-$(CONFIG_PUV3_PWM) += pwm.o
21obj-$(CONFIG_PUV3_PM) += pm.o sleep.o 20obj-$(CONFIG_PUV3_PM) += pm.o sleep.o
22obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o 21obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate_asm.o
diff --git a/arch/unicore32/kernel/vmlinux.lds.S b/arch/unicore32/kernel/vmlinux.lds.S
index 9bf7f7af52c5..77e407e49a63 100644
--- a/arch/unicore32/kernel/vmlinux.lds.S
+++ b/arch/unicore32/kernel/vmlinux.lds.S
@@ -30,7 +30,7 @@ SECTIONS
30 HEAD_TEXT_SECTION 30 HEAD_TEXT_SECTION
31 INIT_TEXT_SECTION(PAGE_SIZE) 31 INIT_TEXT_SECTION(PAGE_SIZE)
32 INIT_DATA_SECTION(16) 32 INIT_DATA_SECTION(16)
33 PERCPU(L1_CACHE_BYTES, PAGE_SIZE) 33 PERCPU_SECTION(L1_CACHE_BYTES)
34 __init_end = .; 34 __init_end = .;
35 35
36 _stext = .; 36 _stext = .;
diff --git a/arch/x86/include/asm/memblock.h b/arch/x86/include/asm/memblock.h
index 19ae14ba6978..0cd3800f33b9 100644
--- a/arch/x86/include/asm/memblock.h
+++ b/arch/x86/include/asm/memblock.h
@@ -4,7 +4,6 @@
4#define ARCH_DISCARD_MEMBLOCK 4#define ARCH_DISCARD_MEMBLOCK
5 5
6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align); 6u64 memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align);
7void memblock_x86_to_bootmem(u64 start, u64 end);
8 7
9void memblock_x86_reserve_range(u64 start, u64 end, char *name); 8void memblock_x86_reserve_range(u64 start, u64 end, char *name);
10void memblock_x86_free_range(u64 start, u64 end); 9void memblock_x86_free_range(u64 start, u64 end);
@@ -19,5 +18,6 @@ u64 memblock_x86_hole_size(u64 start, u64 end);
19u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align); 18u64 memblock_x86_find_in_range_node(int nid, u64 start, u64 end, u64 size, u64 align);
20u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit); 19u64 memblock_x86_free_memory_in_range(u64 addr, u64 limit);
21u64 memblock_x86_memory_in_range(u64 addr, u64 limit); 20u64 memblock_x86_memory_in_range(u64 addr, u64 limit);
21bool memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align);
22 22
23#endif 23#endif
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 31d84acc1512..a518c0a45044 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -22,6 +22,8 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
22 u64 product; 22 u64 product;
23#ifdef __i386__ 23#ifdef __i386__
24 u32 tmp1, tmp2; 24 u32 tmp1, tmp2;
25#else
26 ulong tmp;
25#endif 27#endif
26 28
27 if (shift < 0) 29 if (shift < 0)
@@ -42,8 +44,11 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
42 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) ); 44 : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
43#elif defined(__x86_64__) 45#elif defined(__x86_64__)
44 __asm__ ( 46 __asm__ (
45 "mul %%rdx ; shrd $32,%%rdx,%%rax" 47 "mul %[mul_frac] ; shrd $32, %[hi], %[lo]"
46 : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) ); 48 : [lo]"=a"(product),
49 [hi]"=d"(tmp)
50 : "0"(delta),
51 [mul_frac]"rm"((u64)mul_frac));
47#else 52#else
48#error implement me! 53#error implement me!
49#endif 54#endif
diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
index cd8cbeb5fa34..7c3a95e54ec5 100644
--- a/arch/x86/kernel/amd_iommu.c
+++ b/arch/x86/kernel/amd_iommu.c
@@ -30,6 +30,7 @@
30#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/gart.h> 32#include <asm/gart.h>
33#include <asm/dma.h>
33#include <asm/amd_iommu_proto.h> 34#include <asm/amd_iommu_proto.h>
34#include <asm/amd_iommu_types.h> 35#include <asm/amd_iommu_types.h>
35#include <asm/amd_iommu.h> 36#include <asm/amd_iommu.h>
@@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
154 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff); 155 pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
155 if (pdev) 156 if (pdev)
156 dev_data->alias = &pdev->dev; 157 dev_data->alias = &pdev->dev;
158 else {
159 kfree(dev_data);
160 return -ENOTSUPP;
161 }
157 162
158 atomic_set(&dev_data->bind, 0); 163 atomic_set(&dev_data->bind, 0);
159 164
@@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
163 return 0; 168 return 0;
164} 169}
165 170
171static void iommu_ignore_device(struct device *dev)
172{
173 u16 devid, alias;
174
175 devid = get_device_id(dev);
176 alias = amd_iommu_alias_table[devid];
177
178 memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
179 memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
180
181 amd_iommu_rlookup_table[devid] = NULL;
182 amd_iommu_rlookup_table[alias] = NULL;
183}
184
166static void iommu_uninit_device(struct device *dev) 185static void iommu_uninit_device(struct device *dev)
167{ 186{
168 kfree(dev->archdata.iommu); 187 kfree(dev->archdata.iommu);
@@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
192 continue; 211 continue;
193 212
194 ret = iommu_init_device(&pdev->dev); 213 ret = iommu_init_device(&pdev->dev);
195 if (ret) 214 if (ret == -ENOTSUPP)
215 iommu_ignore_device(&pdev->dev);
216 else if (ret)
196 goto out_free; 217 goto out_free;
197 } 218 }
198 219
@@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
2383 .dma_supported = amd_iommu_dma_supported, 2404 .dma_supported = amd_iommu_dma_supported,
2384}; 2405};
2385 2406
2407static unsigned device_dma_ops_init(void)
2408{
2409 struct pci_dev *pdev = NULL;
2410 unsigned unhandled = 0;
2411
2412 for_each_pci_dev(pdev) {
2413 if (!check_device(&pdev->dev)) {
2414 unhandled += 1;
2415 continue;
2416 }
2417
2418 pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
2419 }
2420
2421 return unhandled;
2422}
2423
2386/* 2424/*
2387 * The function which clues the AMD IOMMU driver into dma_ops. 2425 * The function which clues the AMD IOMMU driver into dma_ops.
2388 */ 2426 */
@@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
2395int __init amd_iommu_init_dma_ops(void) 2433int __init amd_iommu_init_dma_ops(void)
2396{ 2434{
2397 struct amd_iommu *iommu; 2435 struct amd_iommu *iommu;
2398 int ret; 2436 int ret, unhandled;
2399 2437
2400 /* 2438 /*
2401 * first allocate a default protection domain for every IOMMU we 2439 * first allocate a default protection domain for every IOMMU we
@@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
2421 swiotlb = 0; 2459 swiotlb = 0;
2422 2460
2423 /* Make the driver finally visible to the drivers */ 2461 /* Make the driver finally visible to the drivers */
2424 dma_ops = &amd_iommu_dma_ops; 2462 unhandled = device_dma_ops_init();
2463 if (unhandled && max_pfn > MAX_DMA32_PFN) {
2464 /* There are unhandled devices - initialize swiotlb for them */
2465 swiotlb = 1;
2466 }
2425 2467
2426 amd_iommu_stats_init(); 2468 amd_iommu_stats_init();
2427 2469
diff --git a/arch/x86/kernel/amd_iommu_init.c b/arch/x86/kernel/amd_iommu_init.c
index 9179c21120a8..bfc8453bd98d 100644
--- a/arch/x86/kernel/amd_iommu_init.c
+++ b/arch/x86/kernel/amd_iommu_init.c
@@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
731{ 731{
732 u8 *p = (u8 *)h; 732 u8 *p = (u8 *)h;
733 u8 *end = p, flags = 0; 733 u8 *end = p, flags = 0;
734 u16 dev_i, devid = 0, devid_start = 0, devid_to = 0; 734 u16 devid = 0, devid_start = 0, devid_to = 0;
735 u32 ext_flags = 0; 735 u32 dev_i, ext_flags = 0;
736 bool alias = false; 736 bool alias = false;
737 struct ivhd_entry *e; 737 struct ivhd_entry *e;
738 738
@@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
887/* Initializes the device->iommu mapping for the driver */ 887/* Initializes the device->iommu mapping for the driver */
888static int __init init_iommu_devices(struct amd_iommu *iommu) 888static int __init init_iommu_devices(struct amd_iommu *iommu)
889{ 889{
890 u16 i; 890 u32 i;
891 891
892 for (i = iommu->first_device; i <= iommu->last_device; ++i) 892 for (i = iommu->first_device; i <= iommu->last_device; ++i)
893 set_iommu_for_device(iommu, i); 893 set_iommu_for_device(iommu, i);
@@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
1177 */ 1177 */
1178static void init_device_table(void) 1178static void init_device_table(void)
1179{ 1179{
1180 u16 devid; 1180 u32 devid;
1181 1181
1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { 1182 for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID); 1183 set_dev_entry_bit(devid, DEV_ENTRY_VALID);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index b961af86bfea..b9338b8cf420 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -390,7 +390,8 @@ static unsigned int reserve_eilvt_offset(int offset, unsigned int new)
390 390
391/* 391/*
392 * If mask=1, the LVT entry does not generate interrupts while mask=0 392 * If mask=1, the LVT entry does not generate interrupts while mask=0
393 * enables the vector. See also the BKDGs. 393 * enables the vector. See also the BKDGs. Must be called with
394 * preemption disabled.
394 */ 395 */
395 396
396int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask) 397int setup_APIC_eilvt(u8 offset, u8 vector, u8 msg_type, u8 mask)
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index b511a011b7d0..adc66c3a1fef 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -632,14 +632,14 @@ late_initcall(uv_init_heartbeat);
632 632
633/* Direct Legacy VGA I/O traffic to designated IOH */ 633/* Direct Legacy VGA I/O traffic to designated IOH */
634int uv_set_vga_state(struct pci_dev *pdev, bool decode, 634int uv_set_vga_state(struct pci_dev *pdev, bool decode,
635 unsigned int command_bits, bool change_bridge) 635 unsigned int command_bits, u32 flags)
636{ 636{
637 int domain, bus, rc; 637 int domain, bus, rc;
638 638
639 PR_DEVEL("devfn %x decode %d cmd %x chg_brdg %d\n", 639 PR_DEVEL("devfn %x decode %d cmd %x flags %d\n",
640 pdev->devfn, decode, command_bits, change_bridge); 640 pdev->devfn, decode, command_bits, flags);
641 641
642 if (!change_bridge) 642 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
643 return 0; 643 return 0;
644 644
645 if ((command_bits & PCI_COMMAND_IO) == 0) 645 if ((command_bits & PCI_COMMAND_IO) == 0)
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 690bc8461835..9aeb78a23de4 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -13,6 +13,7 @@
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15#include <linux/of_pci.h> 15#include <linux/of_pci.h>
16#include <linux/initrd.h>
16 17
17#include <asm/hpet.h> 18#include <asm/hpet.h>
18#include <asm/irq_controller.h> 19#include <asm/irq_controller.h>
@@ -98,6 +99,16 @@ void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
98 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS)); 99 return __alloc_bootmem(size, align, __pa(MAX_DMA_ADDRESS));
99} 100}
100 101
102#ifdef CONFIG_BLK_DEV_INITRD
103void __init early_init_dt_setup_initrd_arch(unsigned long start,
104 unsigned long end)
105{
106 initrd_start = (unsigned long)__va(start);
107 initrd_end = (unsigned long)__va(end);
108 initrd_below_start_ok = 1;
109}
110#endif
111
101void __init add_dtb(u64 data) 112void __init add_dtb(u64 data)
102{ 113{
103 initial_dtb = data + offsetof(struct setup_data, data); 114 initial_dtb = data + offsetof(struct setup_data, data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 2e4928d45a2d..e1ba8cb24e4e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -337,7 +337,7 @@ EXPORT_SYMBOL(boot_option_idle_override);
337 * Powermanagement idle function, if any.. 337 * Powermanagement idle function, if any..
338 */ 338 */
339void (*pm_idle)(void); 339void (*pm_idle)(void);
340#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 340#ifdef CONFIG_APM_MODULE
341EXPORT_SYMBOL(pm_idle); 341EXPORT_SYMBOL(pm_idle);
342#endif 342#endif
343 343
@@ -399,7 +399,7 @@ void default_idle(void)
399 cpu_relax(); 399 cpu_relax();
400 } 400 }
401} 401}
402#if defined(CONFIG_APM_MODULE) && defined(CONFIG_APM_CPU_IDLE) 402#ifdef CONFIG_APM_MODULE
403EXPORT_SYMBOL(default_idle); 403EXPORT_SYMBOL(default_idle);
404#endif 404#endif
405 405
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 8d128783af47..a3d0dc59067b 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -245,7 +245,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
245{ 245{
246 set_user_gs(regs, 0); 246 set_user_gs(regs, 0);
247 regs->fs = 0; 247 regs->fs = 0;
248 set_fs(USER_DS);
249 regs->ds = __USER_DS; 248 regs->ds = __USER_DS;
250 regs->es = __USER_DS; 249 regs->es = __USER_DS;
251 regs->ss = __USER_DS; 250 regs->ss = __USER_DS;
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 6c9dd922ac0d..ca6f7ab8df33 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -338,7 +338,6 @@ start_thread_common(struct pt_regs *regs, unsigned long new_ip,
338 regs->cs = _cs; 338 regs->cs = _cs;
339 regs->ss = _ss; 339 regs->ss = _ss;
340 regs->flags = X86_EFLAGS_IF; 340 regs->flags = X86_EFLAGS_IF;
341 set_fs(USER_DS);
342 /* 341 /*
343 * Free the old FP and other extended state 342 * Free the old FP and other extended state
344 */ 343 */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 33a0c11797de..9fd3137230d4 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -285,6 +285,19 @@ notrace static void __cpuinit start_secondary(void *unused)
285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; 285 per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
286 x86_platform.nmi_init(); 286 x86_platform.nmi_init();
287 287
288 /*
289 * Wait until the cpu which brought this one up marked it
290 * online before enabling interrupts. If we don't do that then
291 * we can end up waking up the softirq thread before this cpu
292 * reached the active state, which makes the scheduler unhappy
293 * and schedule the softirq thread on the wrong cpu. This is
294 * only observable with forced threaded interrupts, but in
295 * theory it could also happen w/o them. It's just way harder
296 * to achieve.
297 */
298 while (!cpumask_test_cpu(smp_processor_id(), cpu_active_mask))
299 cpu_relax();
300
288 /* enable local interrupts */ 301 /* enable local interrupts */
289 local_irq_enable(); 302 local_irq_enable();
290 303
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index d6e2477feb18..6df88c7885c0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -47,38 +47,40 @@
47#define DstDI (5<<1) /* Destination is in ES:(E)DI */ 47#define DstDI (5<<1) /* Destination is in ES:(E)DI */
48#define DstMem64 (6<<1) /* 64bit memory operand */ 48#define DstMem64 (6<<1) /* 64bit memory operand */
49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ 49#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
50#define DstMask (7<<1) 50#define DstDX (8<<1) /* Destination is in DX register */
51#define DstMask (0xf<<1)
51/* Source operand type. */ 52/* Source operand type. */
52#define SrcNone (0<<4) /* No source operand. */ 53#define SrcNone (0<<5) /* No source operand. */
53#define SrcReg (1<<4) /* Register operand. */ 54#define SrcReg (1<<5) /* Register operand. */
54#define SrcMem (2<<4) /* Memory operand. */ 55#define SrcMem (2<<5) /* Memory operand. */
55#define SrcMem16 (3<<4) /* Memory operand (16-bit). */ 56#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
56#define SrcMem32 (4<<4) /* Memory operand (32-bit). */ 57#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
57#define SrcImm (5<<4) /* Immediate operand. */ 58#define SrcImm (5<<5) /* Immediate operand. */
58#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ 59#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
59#define SrcOne (7<<4) /* Implied '1' */ 60#define SrcOne (7<<5) /* Implied '1' */
60#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ 61#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
61#define SrcImmU (9<<4) /* Immediate operand, unsigned */ 62#define SrcImmU (9<<5) /* Immediate operand, unsigned */
62#define SrcSI (0xa<<4) /* Source is in the DS:RSI */ 63#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
63#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ 64#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
64#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ 65#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
65#define SrcAcc (0xd<<4) /* Source Accumulator */ 66#define SrcAcc (0xd<<5) /* Source Accumulator */
66#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */ 67#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
67#define SrcMask (0xf<<4) 68#define SrcDX (0xf<<5) /* Source is in DX register */
69#define SrcMask (0xf<<5)
68/* Generic ModRM decode. */ 70/* Generic ModRM decode. */
69#define ModRM (1<<8) 71#define ModRM (1<<9)
70/* Destination is only written; never read. */ 72/* Destination is only written; never read. */
71#define Mov (1<<9) 73#define Mov (1<<10)
72#define BitOp (1<<10) 74#define BitOp (1<<11)
73#define MemAbs (1<<11) /* Memory operand is absolute displacement */ 75#define MemAbs (1<<12) /* Memory operand is absolute displacement */
74#define String (1<<12) /* String instruction (rep capable) */ 76#define String (1<<13) /* String instruction (rep capable) */
75#define Stack (1<<13) /* Stack instruction (push/pop) */ 77#define Stack (1<<14) /* Stack instruction (push/pop) */
76#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ 78#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
77#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 79#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
78#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ 80#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
79#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ 81#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
80#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ 82#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
81#define Sse (1<<17) /* SSE Vector instruction */ 83#define Sse (1<<18) /* SSE Vector instruction */
82/* Misc flags */ 84/* Misc flags */
83#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ 85#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
84#define VendorSpecific (1<<22) /* Vendor specific instruction */ 86#define VendorSpecific (1<<22) /* Vendor specific instruction */
@@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
3154 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3156 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3155 I(SrcImmByte | Mov | Stack, em_push), 3157 I(SrcImmByte | Mov | Stack, em_push),
3156 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3158 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3157 D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ 3159 D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
3158 D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ 3160 D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
3159 /* 0x70 - 0x7F */ 3161 /* 0x70 - 0x7F */
3160 X16(D(SrcImmByte)), 3162 X16(D(SrcImmByte)),
3161 /* 0x80 - 0x87 */ 3163 /* 0x80 - 0x87 */
@@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
3212 /* 0xE8 - 0xEF */ 3214 /* 0xE8 - 0xEF */
3213 D(SrcImm | Stack), D(SrcImm | ImplicitOps), 3215 D(SrcImm | Stack), D(SrcImm | ImplicitOps),
3214 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), 3216 D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
3215 D2bvIP(SrcNone | DstAcc, in, check_perm_in), 3217 D2bvIP(SrcDX | DstAcc, in, check_perm_in),
3216 D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), 3218 D2bvIP(SrcAcc | DstDX, out, check_perm_out),
3217 /* 0xF0 - 0xF7 */ 3219 /* 0xF0 - 0xF7 */
3218 N, DI(ImplicitOps, icebp), N, N, 3220 N, DI(ImplicitOps, icebp), N, N,
3219 DI(ImplicitOps | Priv, hlt), D(ImplicitOps), 3221 DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
@@ -3613,6 +3615,12 @@ done_prefixes:
3613 memop.bytes = c->op_bytes + 2; 3615 memop.bytes = c->op_bytes + 2;
3614 goto srcmem_common; 3616 goto srcmem_common;
3615 break; 3617 break;
3618 case SrcDX:
3619 c->src.type = OP_REG;
3620 c->src.bytes = 2;
3621 c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
3622 fetch_register_operand(&c->src);
3623 break;
3616 } 3624 }
3617 3625
3618 if (rc != X86EMUL_CONTINUE) 3626 if (rc != X86EMUL_CONTINUE)
@@ -3682,6 +3690,12 @@ done_prefixes:
3682 c->dst.addr.mem.seg = VCPU_SREG_ES; 3690 c->dst.addr.mem.seg = VCPU_SREG_ES;
3683 c->dst.val = 0; 3691 c->dst.val = 0;
3684 break; 3692 break;
3693 case DstDX:
3694 c->dst.type = OP_REG;
3695 c->dst.bytes = 2;
3696 c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
3697 fetch_register_operand(&c->dst);
3698 break;
3685 case ImplicitOps: 3699 case ImplicitOps:
3686 /* Special instructions do their own operand decoding. */ 3700 /* Special instructions do their own operand decoding. */
3687 default: 3701 default:
@@ -4027,7 +4041,6 @@ special_insn:
4027 break; 4041 break;
4028 case 0xec: /* in al,dx */ 4042 case 0xec: /* in al,dx */
4029 case 0xed: /* in (e/r)ax,dx */ 4043 case 0xed: /* in (e/r)ax,dx */
4030 c->src.val = c->regs[VCPU_REGS_RDX];
4031 do_io_in: 4044 do_io_in:
4032 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 4045 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
4033 &c->dst.val)) 4046 &c->dst.val))
@@ -4035,7 +4048,6 @@ special_insn:
4035 break; 4048 break;
4036 case 0xee: /* out dx,al */ 4049 case 0xee: /* out dx,al */
4037 case 0xef: /* out dx,(e/r)ax */ 4050 case 0xef: /* out dx,(e/r)ax */
4038 c->dst.val = c->regs[VCPU_REGS_RDX];
4039 do_io_out: 4051 do_io_out:
4040 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, 4052 ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
4041 &c->src.val, 1); 4053 &c->src.val, 1);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index bd14bb4c8594..aee38623b768 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -565,7 +565,7 @@ gfn_to_memslot_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t gfn,
565 565
566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn) 566static bool mapping_level_dirty_bitmap(struct kvm_vcpu *vcpu, gfn_t large_gfn)
567{ 567{
568 return gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true); 568 return !gfn_to_memslot_dirty_bitmap(vcpu, large_gfn, true);
569} 569}
570 570
571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn) 571static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c4dc010c4cb..9d03ad4dd5ec 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -121,7 +121,7 @@ static int FNAME(walk_addr_generic)(struct guest_walker *walker,
121 gva_t addr, u32 access) 121 gva_t addr, u32 access)
122{ 122{
123 pt_element_t pte; 123 pt_element_t pte;
124 pt_element_t __user *ptep_user; 124 pt_element_t __user *uninitialized_var(ptep_user);
125 gfn_t table_gfn; 125 gfn_t table_gfn;
126 unsigned index, pt_access, uninitialized_var(pte_access); 126 unsigned index, pt_access, uninitialized_var(pte_access);
127 gpa_t pte_gpa; 127 gpa_t pte_gpa;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4c3fa0f67469..d48ec60ea421 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2047,7 +2047,8 @@ static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
2047 unsigned long cr0, 2047 unsigned long cr0,
2048 struct kvm_vcpu *vcpu) 2048 struct kvm_vcpu *vcpu)
2049{ 2049{
2050 vmx_decache_cr3(vcpu); 2050 if (!test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
2051 vmx_decache_cr3(vcpu);
2051 if (!(cr0 & X86_CR0_PG)) { 2052 if (!(cr0 & X86_CR0_PG)) {
2052 /* From paging/starting to nonpaging */ 2053 /* From paging/starting to nonpaging */
2053 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, 2054 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL,
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c
index aa1169392b83..992da5ec5a64 100644
--- a/arch/x86/mm/memblock.c
+++ b/arch/x86/mm/memblock.c
@@ -8,7 +8,7 @@
8#include <linux/range.h> 8#include <linux/range.h>
9 9
10/* Check for already reserved areas */ 10/* Check for already reserved areas */
11static bool __init check_with_memblock_reserved_size(u64 *addrp, u64 *sizep, u64 align) 11bool __init memblock_x86_check_reserved_size(u64 *addrp, u64 *sizep, u64 align)
12{ 12{
13 struct memblock_region *r; 13 struct memblock_region *r;
14 u64 addr = *addrp, last; 14 u64 addr = *addrp, last;
@@ -59,7 +59,7 @@ u64 __init memblock_x86_find_in_range_size(u64 start, u64 *sizep, u64 align)
59 if (addr >= ei_last) 59 if (addr >= ei_last)
60 continue; 60 continue;
61 *sizep = ei_last - addr; 61 *sizep = ei_last - addr;
62 while (check_with_memblock_reserved_size(&addr, sizep, align)) 62 while (memblock_x86_check_reserved_size(&addr, sizep, align))
63 ; 63 ;
64 64
65 if (*sizep) 65 if (*sizep)
diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
index 9fd8a567fe1e..9cbb710dc94b 100644
--- a/arch/x86/oprofile/op_model_amd.c
+++ b/arch/x86/oprofile/op_model_amd.c
@@ -609,16 +609,21 @@ static int setup_ibs_ctl(int ibs_eilvt_off)
609 return 0; 609 return 0;
610} 610}
611 611
612/*
613 * This runs only on the current cpu. We try to find an LVT offset and
614 * setup the local APIC. For this we must disable preemption. On
615 * success we initialize all nodes with this offset. This updates then
616 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
617 * the IBS interrupt vector is called from op_amd_setup_ctrs()/op_-
618 * amd_cpu_shutdown() using the new offset.
619 */
612static int force_ibs_eilvt_setup(void) 620static int force_ibs_eilvt_setup(void)
613{ 621{
614 int offset; 622 int offset;
615 int ret; 623 int ret;
616 624
617 /*
618 * find the next free available EILVT entry, skip offset 0,
619 * pin search to this cpu
620 */
621 preempt_disable(); 625 preempt_disable();
626 /* find the next free available EILVT entry, skip offset 0 */
622 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) { 627 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
623 if (get_eilvt(offset)) 628 if (get_eilvt(offset))
624 break; 629 break;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 0d3a4fa34560..474356b98ede 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -310,14 +310,31 @@ void __init efi_reserve_boot_services(void)
310 310
311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { 311 for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
312 efi_memory_desc_t *md = p; 312 efi_memory_desc_t *md = p;
313 unsigned long long start = md->phys_addr; 313 u64 start = md->phys_addr;
314 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; 314 u64 size = md->num_pages << EFI_PAGE_SHIFT;
315 315
316 if (md->type != EFI_BOOT_SERVICES_CODE && 316 if (md->type != EFI_BOOT_SERVICES_CODE &&
317 md->type != EFI_BOOT_SERVICES_DATA) 317 md->type != EFI_BOOT_SERVICES_DATA)
318 continue; 318 continue;
319 319 /* Only reserve where possible:
320 memblock_x86_reserve_range(start, start + size, "EFI Boot"); 320 * - Not within any already allocated areas
321 * - Not over any memory area (really needed, if above?)
322 * - Not within any part of the kernel
323 * - Not the bios reserved area
324 */
325 if ((start+size >= virt_to_phys(_text)
326 && start <= virt_to_phys(_end)) ||
327 !e820_all_mapped(start, start+size, E820_RAM) ||
328 memblock_x86_check_reserved_size(&start, &size,
329 1<<EFI_PAGE_SHIFT)) {
330 /* Could not reserve, skip it */
331 md->num_pages = 0;
332 memblock_dbg(PFX "Could not reserve boot range "
333 "[0x%010llx-0x%010llx]\n",
334 start, start+size-1);
335 } else
336 memblock_x86_reserve_range(start, start+size,
337 "EFI Boot");
321 } 338 }
322} 339}
323 340
@@ -334,6 +351,10 @@ static void __init efi_free_boot_services(void)
334 md->type != EFI_BOOT_SERVICES_DATA) 351 md->type != EFI_BOOT_SERVICES_DATA)
335 continue; 352 continue;
336 353
354 /* Could not reserve boot area */
355 if (!size)
356 continue;
357
337 free_bootmem_late(start, size); 358 free_bootmem_late(start, size);
338 } 359 }
339} 360}
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index dd7b88f2ec7a..5525163a0398 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1033,6 +1033,13 @@ static void xen_machine_halt(void)
1033 xen_reboot(SHUTDOWN_poweroff); 1033 xen_reboot(SHUTDOWN_poweroff);
1034} 1034}
1035 1035
1036static void xen_machine_power_off(void)
1037{
1038 if (pm_power_off)
1039 pm_power_off();
1040 xen_reboot(SHUTDOWN_poweroff);
1041}
1042
1036static void xen_crash_shutdown(struct pt_regs *regs) 1043static void xen_crash_shutdown(struct pt_regs *regs)
1037{ 1044{
1038 xen_reboot(SHUTDOWN_crash); 1045 xen_reboot(SHUTDOWN_crash);
@@ -1058,7 +1065,7 @@ int xen_panic_handler_init(void)
1058static const struct machine_ops xen_machine_ops __initconst = { 1065static const struct machine_ops xen_machine_ops __initconst = {
1059 .restart = xen_restart, 1066 .restart = xen_restart,
1060 .halt = xen_machine_halt, 1067 .halt = xen_machine_halt,
1061 .power_off = xen_machine_halt, 1068 .power_off = xen_machine_power_off,
1062 .shutdown = xen_machine_halt, 1069 .shutdown = xen_machine_halt,
1063 .crash_shutdown = xen_crash_shutdown, 1070 .crash_shutdown = xen_crash_shutdown,
1064 .emergency_restart = xen_emergency_restart, 1071 .emergency_restart = xen_emergency_restart,
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index dc708dcc62f1..673e968df3cf 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -59,6 +59,7 @@
59#include <asm/page.h> 59#include <asm/page.h>
60#include <asm/init.h> 60#include <asm/init.h>
61#include <asm/pat.h> 61#include <asm/pat.h>
62#include <asm/smp.h>
62 63
63#include <asm/xen/hypercall.h> 64#include <asm/xen/hypercall.h>
64#include <asm/xen/hypervisor.h> 65#include <asm/xen/hypervisor.h>
@@ -1231,7 +1232,7 @@ static void xen_flush_tlb_others(const struct cpumask *cpus,
1231{ 1232{
1232 struct { 1233 struct {
1233 struct mmuext_op op; 1234 struct mmuext_op op;
1234 DECLARE_BITMAP(mask, NR_CPUS); 1235 DECLARE_BITMAP(mask, num_processors);
1235 } *args; 1236 } *args;
1236 struct multicall_space mcs; 1237 struct multicall_space mcs;
1237 1238
@@ -1599,6 +1600,11 @@ static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
1599 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { 1600 for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) {
1600 pte_t pte; 1601 pte_t pte;
1601 1602
1603#ifdef CONFIG_X86_32
1604 if (pfn > max_pfn_mapped)
1605 max_pfn_mapped = pfn;
1606#endif
1607
1602 if (!pte_none(pte_page[pteidx])) 1608 if (!pte_none(pte_page[pteidx]))
1603 continue; 1609 continue;
1604 1610
@@ -1766,7 +1772,9 @@ pgd_t * __init xen_setup_kernel_pagetable(pgd_t *pgd,
1766 initial_kernel_pmd = 1772 initial_kernel_pmd =
1767 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); 1773 extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
1768 1774
1769 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); 1775 max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
1776 xen_start_info->nr_pt_frames * PAGE_SIZE +
1777 512*1024);
1770 1778
1771 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); 1779 kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd);
1772 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); 1780 memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD);
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c
index 8bff7e7c290b..1b2b73ff0a6e 100644
--- a/arch/x86/xen/multicalls.c
+++ b/arch/x86/xen/multicalls.c
@@ -189,10 +189,10 @@ struct multicall_space __xen_mc_entry(size_t args)
189 unsigned argidx = roundup(b->argidx, sizeof(u64)); 189 unsigned argidx = roundup(b->argidx, sizeof(u64));
190 190
191 BUG_ON(preemptible()); 191 BUG_ON(preemptible());
192 BUG_ON(b->argidx > MC_ARGS); 192 BUG_ON(b->argidx >= MC_ARGS);
193 193
194 if (b->mcidx == MC_BATCH || 194 if (b->mcidx == MC_BATCH ||
195 (argidx + args) > MC_ARGS) { 195 (argidx + args) >= MC_ARGS) {
196 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS); 196 mc_stats_flush(b->mcidx == MC_BATCH ? FL_SLOTS : FL_ARGS);
197 xen_mc_flush(); 197 xen_mc_flush();
198 argidx = roundup(b->argidx, sizeof(u64)); 198 argidx = roundup(b->argidx, sizeof(u64));
@@ -206,7 +206,7 @@ struct multicall_space __xen_mc_entry(size_t args)
206 ret.args = &b->args[argidx]; 206 ret.args = &b->args[argidx];
207 b->argidx = argidx + args; 207 b->argidx = argidx + args;
208 208
209 BUG_ON(b->argidx > MC_ARGS); 209 BUG_ON(b->argidx >= MC_ARGS);
210 return ret; 210 return ret;
211} 211}
212 212
@@ -216,7 +216,7 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
216 struct multicall_space ret = { NULL, NULL }; 216 struct multicall_space ret = { NULL, NULL };
217 217
218 BUG_ON(preemptible()); 218 BUG_ON(preemptible());
219 BUG_ON(b->argidx > MC_ARGS); 219 BUG_ON(b->argidx >= MC_ARGS);
220 220
221 if (b->mcidx == 0) 221 if (b->mcidx == 0)
222 return ret; 222 return ret;
@@ -224,14 +224,14 @@ struct multicall_space xen_mc_extend_args(unsigned long op, size_t size)
224 if (b->entries[b->mcidx - 1].op != op) 224 if (b->entries[b->mcidx - 1].op != op)
225 return ret; 225 return ret;
226 226
227 if ((b->argidx + size) > MC_ARGS) 227 if ((b->argidx + size) >= MC_ARGS)
228 return ret; 228 return ret;
229 229
230 ret.mc = &b->entries[b->mcidx - 1]; 230 ret.mc = &b->entries[b->mcidx - 1];
231 ret.args = &b->args[b->argidx]; 231 ret.args = &b->args[b->argidx];
232 b->argidx += size; 232 b->argidx += size;
233 233
234 BUG_ON(b->argidx > MC_ARGS); 234 BUG_ON(b->argidx >= MC_ARGS);
235 return ret; 235 return ret;
236} 236}
237 237
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index be1a464f6d66..60aeeb56948f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -227,11 +227,7 @@ char * __init xen_memory_setup(void)
227 227
228 memcpy(map_raw, map, sizeof(map)); 228 memcpy(map_raw, map, sizeof(map));
229 e820.nr_map = 0; 229 e820.nr_map = 0;
230#ifdef CONFIG_X86_32
231 xen_extra_mem_start = mem_end; 230 xen_extra_mem_start = mem_end;
232#else
233 xen_extra_mem_start = max((1ULL << 32), mem_end);
234#endif
235 for (i = 0; i < memmap.nr_entries; i++) { 231 for (i = 0; i < memmap.nr_entries; i++) {
236 unsigned long long end; 232 unsigned long long end;
237 233
@@ -266,6 +262,12 @@ char * __init xen_memory_setup(void)
266 if (map[i].size > 0) 262 if (map[i].size > 0)
267 e820_add_region(map[i].addr, map[i].size, map[i].type); 263 e820_add_region(map[i].addr, map[i].size, map[i].type);
268 } 264 }
265 /* Align the balloon area so that max_low_pfn does not get set
266 * to be at the _end_ of the PCI gap at the far end (fee01000).
267 * Note that xen_extra_mem_start gets set in the loop above to be
268 * past the last E820 region. */
269 if (xen_initial_domain() && (xen_extra_mem_start < (1ULL<<32)))
270 xen_extra_mem_start = (1ULL<<32);
269 271
270 /* 272 /*
271 * In domU, the ISA region is normal, usable memory, but we 273 * In domU, the ISA region is normal, usable memory, but we
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 41038c01de40..b4533a86d7e4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -205,11 +205,18 @@ static void __init xen_smp_prepare_boot_cpu(void)
205static void __init xen_smp_prepare_cpus(unsigned int max_cpus) 205static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
206{ 206{
207 unsigned cpu; 207 unsigned cpu;
208 unsigned int i;
208 209
209 xen_init_lock_cpu(0); 210 xen_init_lock_cpu(0);
210 211
211 smp_store_cpu_info(0); 212 smp_store_cpu_info(0);
212 cpu_data(0).x86_max_cores = 1; 213 cpu_data(0).x86_max_cores = 1;
214
215 for_each_possible_cpu(i) {
216 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
217 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
218 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
219 }
213 set_cpu_sibling_map(0); 220 set_cpu_sibling_map(0);
214 221
215 if (xen_smp_intr_init(0)) 222 if (xen_smp_intr_init(0))
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index dfb6e9d3d759..7f099d6e4e0b 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2802,10 +2802,11 @@ int ata_eh_reset(struct ata_link *link, int classify,
2802 } 2802 }
2803 2803
2804 /* 2804 /*
2805 * Some controllers can't be frozen very well and may set 2805 * Some controllers can't be frozen very well and may set spurious
2806 * spuruious error conditions during reset. Clear accumulated 2806 * error conditions during reset. Clear accumulated error
2807 * error information. As reset is the final recovery action, 2807 * information and re-thaw the port if frozen. As reset is the
2808 * nothing is lost by doing this. 2808 * final recovery action and we cross check link onlineness against
2809 * device classification later, no hotplug event is lost by this.
2809 */ 2810 */
2810 spin_lock_irqsave(link->ap->lock, flags); 2811 spin_lock_irqsave(link->ap->lock, flags);
2811 memset(&link->eh_info, 0, sizeof(link->eh_info)); 2812 memset(&link->eh_info, 0, sizeof(link->eh_info));
@@ -2814,6 +2815,9 @@ int ata_eh_reset(struct ata_link *link, int classify,
2814 ap->pflags &= ~ATA_PFLAG_EH_PENDING; 2815 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2815 spin_unlock_irqrestore(link->ap->lock, flags); 2816 spin_unlock_irqrestore(link->ap->lock, flags);
2816 2817
2818 if (ap->pflags & ATA_PFLAG_FROZEN)
2819 ata_eh_thaw_port(ap);
2820
2817 /* 2821 /*
2818 * Make sure onlineness and classification result correspond. 2822 * Make sure onlineness and classification result correspond.
2819 * Hotplug could have happened during reset and some 2823 * Hotplug could have happened during reset and some
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index c0dd09df7be8..eaa8a854af03 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -291,7 +291,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
291{ 291{
292 struct pm_clk_notifier_block *clknb; 292 struct pm_clk_notifier_block *clknb;
293 struct device *dev = data; 293 struct device *dev = data;
294 char *con_id; 294 char **con_id;
295 int error; 295 int error;
296 296
297 dev_dbg(dev, "%s() %ld\n", __func__, action); 297 dev_dbg(dev, "%s() %ld\n", __func__, action);
@@ -309,8 +309,8 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
309 309
310 dev->pwr_domain = clknb->pwr_domain; 310 dev->pwr_domain = clknb->pwr_domain;
311 if (clknb->con_ids[0]) { 311 if (clknb->con_ids[0]) {
312 for (con_id = clknb->con_ids[0]; *con_id; con_id++) 312 for (con_id = clknb->con_ids; *con_id; con_id++)
313 pm_runtime_clk_add(dev, con_id); 313 pm_runtime_clk_add(dev, *con_id);
314 } else { 314 } else {
315 pm_runtime_clk_add(dev, NULL); 315 pm_runtime_clk_add(dev, NULL);
316 } 316 }
@@ -380,7 +380,7 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
380{ 380{
381 struct pm_clk_notifier_block *clknb; 381 struct pm_clk_notifier_block *clknb;
382 struct device *dev = data; 382 struct device *dev = data;
383 char *con_id; 383 char **con_id;
384 384
385 dev_dbg(dev, "%s() %ld\n", __func__, action); 385 dev_dbg(dev, "%s() %ld\n", __func__, action);
386 386
@@ -389,16 +389,16 @@ static int pm_runtime_clk_notify(struct notifier_block *nb,
389 switch (action) { 389 switch (action) {
390 case BUS_NOTIFY_ADD_DEVICE: 390 case BUS_NOTIFY_ADD_DEVICE:
391 if (clknb->con_ids[0]) { 391 if (clknb->con_ids[0]) {
392 for (con_id = clknb->con_ids[0]; *con_id; con_id++) 392 for (con_id = clknb->con_ids; *con_id; con_id++)
393 enable_clock(dev, con_id); 393 enable_clock(dev, *con_id);
394 } else { 394 } else {
395 enable_clock(dev, NULL); 395 enable_clock(dev, NULL);
396 } 396 }
397 break; 397 break;
398 case BUS_NOTIFY_DEL_DEVICE: 398 case BUS_NOTIFY_DEL_DEVICE:
399 if (clknb->con_ids[0]) { 399 if (clknb->con_ids[0]) {
400 for (con_id = clknb->con_ids[0]; *con_id; con_id++) 400 for (con_id = clknb->con_ids; *con_id; con_id++)
401 disable_clock(dev, con_id); 401 disable_clock(dev, *con_id);
402 } else { 402 } else {
403 disable_clock(dev, NULL); 403 disable_clock(dev, NULL);
404 } 404 }
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index fd6305bf953e..8ecf4c6c2874 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -64,6 +64,8 @@ static ssize_t btmrvl_hscfgcmd_write(struct file *file,
64 return -EFAULT; 64 return -EFAULT;
65 65
66 ret = strict_strtol(buf, 10, &result); 66 ret = strict_strtol(buf, 10, &result);
67 if (ret)
68 return ret;
67 69
68 priv->btmrvl_dev.hscfgcmd = result; 70 priv->btmrvl_dev.hscfgcmd = result;
69 71
@@ -108,6 +110,8 @@ static ssize_t btmrvl_psmode_write(struct file *file, const char __user *ubuf,
108 return -EFAULT; 110 return -EFAULT;
109 111
110 ret = strict_strtol(buf, 10, &result); 112 ret = strict_strtol(buf, 10, &result);
113 if (ret)
114 return ret;
111 115
112 priv->btmrvl_dev.psmode = result; 116 priv->btmrvl_dev.psmode = result;
113 117
@@ -147,6 +151,8 @@ static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf,
147 return -EFAULT; 151 return -EFAULT;
148 152
149 ret = strict_strtol(buf, 10, &result); 153 ret = strict_strtol(buf, 10, &result);
154 if (ret)
155 return ret;
150 156
151 priv->btmrvl_dev.pscmd = result; 157 priv->btmrvl_dev.pscmd = result;
152 158
@@ -191,6 +197,8 @@ static ssize_t btmrvl_gpiogap_write(struct file *file, const char __user *ubuf,
191 return -EFAULT; 197 return -EFAULT;
192 198
193 ret = strict_strtol(buf, 16, &result); 199 ret = strict_strtol(buf, 16, &result);
200 if (ret)
201 return ret;
194 202
195 priv->btmrvl_dev.gpio_gap = result; 203 priv->btmrvl_dev.gpio_gap = result;
196 204
@@ -230,6 +238,8 @@ static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf,
230 return -EFAULT; 238 return -EFAULT;
231 239
232 ret = strict_strtol(buf, 10, &result); 240 ret = strict_strtol(buf, 10, &result);
241 if (ret)
242 return ret;
233 243
234 priv->btmrvl_dev.hscmd = result; 244 priv->btmrvl_dev.hscmd = result;
235 if (priv->btmrvl_dev.hscmd) { 245 if (priv->btmrvl_dev.hscmd) {
@@ -272,6 +282,8 @@ static ssize_t btmrvl_hsmode_write(struct file *file, const char __user *ubuf,
272 return -EFAULT; 282 return -EFAULT;
273 283
274 ret = strict_strtol(buf, 10, &result); 284 ret = strict_strtol(buf, 10, &result);
285 if (ret)
286 return ret;
275 287
276 priv->btmrvl_dev.hsmode = result; 288 priv->btmrvl_dev.hsmode = result;
277 289
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 051474c65b78..34d6a1cab8de 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -163,11 +163,32 @@ static irqreturn_t hpet_interrupt(int irq, void *data)
163 * This has the effect of treating non-periodic like periodic. 163 * This has the effect of treating non-periodic like periodic.
164 */ 164 */
165 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) { 165 if ((devp->hd_flags & (HPET_IE | HPET_PERIODIC)) == HPET_IE) {
166 unsigned long m, t; 166 unsigned long m, t, mc, base, k;
167 struct hpet __iomem *hpet = devp->hd_hpet;
168 struct hpets *hpetp = devp->hd_hpets;
167 169
168 t = devp->hd_ireqfreq; 170 t = devp->hd_ireqfreq;
169 m = read_counter(&devp->hd_timer->hpet_compare); 171 m = read_counter(&devp->hd_timer->hpet_compare);
170 write_counter(t + m, &devp->hd_timer->hpet_compare); 172 mc = read_counter(&hpet->hpet_mc);
173 /* The time for the next interrupt would logically be t + m,
174 * however, if we are very unlucky and the interrupt is delayed
175 * for longer than t then we will completely miss the next
176 * interrupt if we set t + m and an application will hang.
177 * Therefore we need to make a more complex computation assuming
178 * that there exists a k for which the following is true:
179 * k * t + base < mc + delta
180 * (k + 1) * t + base > mc + delta
181 * where t is the interval in hpet ticks for the given freq,
182 * base is the theoretical start value 0 < base < t,
183 * mc is the main counter value at the time of the interrupt,
184 * delta is the time it takes to write the a value to the
185 * comparator.
186 * k may then be computed as (mc - base + delta) / t .
187 */
188 base = mc % t;
189 k = (mc - base + hpetp->hp_delta) / t;
190 write_counter(t * (k + 1) + base,
191 &devp->hd_timer->hpet_compare);
171 } 192 }
172 193
173 if (devp->hd_flags & HPET_SHARED_IRQ) 194 if (devp->hd_flags & HPET_SHARED_IRQ)
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index b60a4c263686..faf7c5217848 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -298,11 +298,13 @@ static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
298 old_index = stat->last_index; 298 old_index = stat->last_index;
299 new_index = freq_table_get_index(stat, freq->new); 299 new_index = freq_table_get_index(stat, freq->new);
300 300
301 cpufreq_stats_update(freq->cpu); 301 /* We can't do stat->time_in_state[-1]= .. */
302 if (old_index == new_index) 302 if (old_index == -1 || new_index == -1)
303 return 0; 303 return 0;
304 304
305 if (old_index == -1 || new_index == -1) 305 cpufreq_stats_update(freq->cpu);
306
307 if (old_index == new_index)
306 return 0; 308 return 0;
307 309
308 spin_lock(&cpufreq_stats_lock); 310 spin_lock(&cpufreq_stats_lock);
@@ -387,6 +389,7 @@ static void __exit cpufreq_stats_exit(void)
387 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 389 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
388 for_each_online_cpu(cpu) { 390 for_each_online_cpu(cpu) {
389 cpufreq_stats_free_table(cpu); 391 cpufreq_stats_free_table(cpu);
392 cpufreq_stats_free_sysfs(cpu);
390 } 393 }
391} 394}
392 395
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 83479b6fb9a1..bce576d7478e 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1079,6 +1079,9 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
1079 } 1079 }
1080 1080
1081 res = transition_fid_vid(data, fid, vid); 1081 res = transition_fid_vid(data, fid, vid);
1082 if (res)
1083 return res;
1084
1082 freqs.new = find_khz_freq_from_fid(data->currfid); 1085 freqs.new = find_khz_freq_from_fid(data->currfid);
1083 1086
1084 for_each_cpu(i, data->available_cores) { 1087 for_each_cpu(i, data->available_cores) {
@@ -1101,7 +1104,8 @@ static int transition_frequency_pstate(struct powernow_k8_data *data,
1101 /* get MSR index for hardware pstate transition */ 1104 /* get MSR index for hardware pstate transition */
1102 pstate = index & HW_PSTATE_MASK; 1105 pstate = index & HW_PSTATE_MASK;
1103 if (pstate > data->max_hw_pstate) 1106 if (pstate > data->max_hw_pstate)
1104 return 0; 1107 return -EINVAL;
1108
1105 freqs.old = find_khz_freq_from_pstate(data->powernow_table, 1109 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1106 data->currpstate); 1110 data->currpstate);
1107 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate); 1111 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 2a638f9f09a2..028330044201 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -1221,6 +1221,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1221 } else { 1221 } else {
1222 do { 1222 do {
1223 for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 1223 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1224 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1225 irq_cap = 1;
1226 break;
1227 }
1228
1224 if ((errirq_res->flags & IORESOURCE_BITS) == 1229 if ((errirq_res->flags & IORESOURCE_BITS) ==
1225 IORESOURCE_IRQ_SHAREABLE) 1230 IORESOURCE_IRQ_SHAREABLE)
1226 chan_flag[irq_cnt] = IRQF_SHARED; 1231 chan_flag[irq_cnt] = IRQF_SHARED;
@@ -1230,15 +1235,11 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1230 "Found IRQ %d for channel %d\n", 1235 "Found IRQ %d for channel %d\n",
1231 i, irq_cnt); 1236 i, irq_cnt);
1232 chan_irq[irq_cnt++] = i; 1237 chan_irq[irq_cnt++] = i;
1233
1234 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1235 break;
1236 } 1238 }
1237 1239
1238 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { 1240 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1239 irq_cap = 1;
1240 break; 1241 break;
1241 } 1242
1242 chanirq_res = platform_get_resource(pdev, 1243 chanirq_res = platform_get_resource(pdev,
1243 IORESOURCE_IRQ, ++irqres); 1244 IORESOURCE_IRQ, ++irqres);
1244 } while (irq_cnt < pdata->channel_num && chanirq_res); 1245 } while (irq_cnt < pdata->channel_num && chanirq_res);
diff --git a/drivers/firmware/iscsi_ibft_find.c b/drivers/firmware/iscsi_ibft_find.c
index f032e446fc11..bfe723266fd8 100644
--- a/drivers/firmware/iscsi_ibft_find.c
+++ b/drivers/firmware/iscsi_ibft_find.c
@@ -108,7 +108,9 @@ done:
108 */ 108 */
109unsigned long __init find_ibft_region(unsigned long *sizep) 109unsigned long __init find_ibft_region(unsigned long *sizep)
110{ 110{
111#ifdef CONFIG_ACPI
111 int i; 112 int i;
113#endif
112 ibft_addr = NULL; 114 ibft_addr = NULL;
113 115
114#ifdef CONFIG_ACPI 116#ifdef CONFIG_ACPI
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 4a7f63143455..2967002a9f82 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -87,32 +87,20 @@ config GPIO_IT8761E
87 Say yes here to support GPIO functionality of IT8761E super I/O chip. 87 Say yes here to support GPIO functionality of IT8761E super I/O chip.
88 88
89config GPIO_EXYNOS4 89config GPIO_EXYNOS4
90 bool "Samsung Exynos4 GPIO library support" 90 def_bool y
91 default y if CPU_EXYNOS4210 91 depends on CPU_EXYNOS4210
92 depends on ARM
93 help
94 Say yes here to support Samsung Exynos4 series SoCs GPIO library
95 92
96config GPIO_PLAT_SAMSUNG 93config GPIO_PLAT_SAMSUNG
97 bool "Samsung SoCs GPIO library support" 94 def_bool y
98 default y if SAMSUNG_GPIOLIB_4BIT 95 depends on SAMSUNG_GPIOLIB_4BIT
99 depends on ARM
100 help
101 Say yes here to support Samsung SoCs GPIO library
102 96
103config GPIO_S5PC100 97config GPIO_S5PC100
104 bool "Samsung S5PC100 GPIO library support" 98 def_bool y
105 default y if CPU_S5PC100 99 depends on CPU_S5PC100
106 depends on ARM
107 help
108 Say yes here to support Samsung S5PC100 SoCs GPIO library
109 100
110config GPIO_S5PV210 101config GPIO_S5PV210
111 bool "Samsung S5PV210/S5PC110 GPIO library support" 102 def_bool y
112 default y if CPU_S5PV210 103 depends on CPU_S5PV210
113 depends on ARM
114 help
115 Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
116 104
117config GPIO_PL061 105config GPIO_PL061
118 bool "PrimeCell PL061 GPIO support" 106 bool "PrimeCell PL061 GPIO support"
diff --git a/drivers/gpio/gpio-exynos4.c b/drivers/gpio/gpio-exynos4.c
index d54ca6adb660..9029835112e7 100644
--- a/drivers/gpio/gpio-exynos4.c
+++ b/drivers/gpio/gpio-exynos4.c
@@ -21,16 +21,37 @@
21#include <plat/gpio-cfg.h> 21#include <plat/gpio-cfg.h>
22#include <plat/gpio-cfg-helpers.h> 22#include <plat/gpio-cfg-helpers.h>
23 23
24int s3c_gpio_setpull_exynos4(struct s3c_gpio_chip *chip,
25 unsigned int off, s3c_gpio_pull_t pull)
26{
27 if (pull == S3C_GPIO_PULL_UP)
28 pull = 3;
29
30 return s3c_gpio_setpull_updown(chip, off, pull);
31}
32
33s3c_gpio_pull_t s3c_gpio_getpull_exynos4(struct s3c_gpio_chip *chip,
34 unsigned int off)
35{
36 s3c_gpio_pull_t pull;
37
38 pull = s3c_gpio_getpull_updown(chip, off);
39 if (pull == 3)
40 pull = S3C_GPIO_PULL_UP;
41
42 return pull;
43}
44
24static struct s3c_gpio_cfg gpio_cfg = { 45static struct s3c_gpio_cfg gpio_cfg = {
25 .set_config = s3c_gpio_setcfg_s3c64xx_4bit, 46 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
26 .set_pull = s3c_gpio_setpull_updown, 47 .set_pull = s3c_gpio_setpull_exynos4,
27 .get_pull = s3c_gpio_getpull_updown, 48 .get_pull = s3c_gpio_getpull_exynos4,
28}; 49};
29 50
30static struct s3c_gpio_cfg gpio_cfg_noint = { 51static struct s3c_gpio_cfg gpio_cfg_noint = {
31 .set_config = s3c_gpio_setcfg_s3c64xx_4bit, 52 .set_config = s3c_gpio_setcfg_s3c64xx_4bit,
32 .set_pull = s3c_gpio_setpull_updown, 53 .set_pull = s3c_gpio_setpull_exynos4,
33 .get_pull = s3c_gpio_getpull_updown, 54 .get_pull = s3c_gpio_getpull_exynos4,
34}; 55};
35 56
36/* 57/*
diff --git a/drivers/gpio/gpio-nomadik.c b/drivers/gpio/gpio-nomadik.c
index 4961ef9bc153..2c212c732d76 100644
--- a/drivers/gpio/gpio-nomadik.c
+++ b/drivers/gpio/gpio-nomadik.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2008,2009 STMicroelectronics 4 * Copyright (C) 2008,2009 STMicroelectronics
5 * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it> 5 * Copyright (C) 2009 Alessandro Rubini <rubini@unipv.it>
6 * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com> 6 * Rewritten based on work by Prafulla WADASKAR <prafulla.wadaskar@st.com>
7 * Copyright (C) 2011 Linus Walleij <linus.walleij@linaro.org>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
@@ -49,6 +50,7 @@ struct nmk_gpio_chip {
49 u32 (*get_secondary_status)(unsigned int bank); 50 u32 (*get_secondary_status)(unsigned int bank);
50 void (*set_ioforce)(bool enable); 51 void (*set_ioforce)(bool enable);
51 spinlock_t lock; 52 spinlock_t lock;
53 bool sleepmode;
52 /* Keep track of configured edges */ 54 /* Keep track of configured edges */
53 u32 edge_rising; 55 u32 edge_rising;
54 u32 edge_falling; 56 u32 edge_falling;
@@ -393,14 +395,25 @@ EXPORT_SYMBOL(nmk_config_pins_sleep);
393 * @gpio: pin number 395 * @gpio: pin number
394 * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE, 396 * @mode: NMK_GPIO_SLPM_INPUT or NMK_GPIO_SLPM_NOCHANGE,
395 * 397 *
396 * Sets the sleep mode of a pin. If @mode is NMK_GPIO_SLPM_INPUT, the pin is 398 * This register is actually in the pinmux layer, not the GPIO block itself.
397 * changed to an input (with pullup/down enabled) in sleep and deep sleep. If 399 * The GPIO1B_SLPM register defines the GPIO mode when SLEEP/DEEP-SLEEP
398 * @mode is NMK_GPIO_SLPM_NOCHANGE, the pin remains in the state it was 400 * mode is entered (i.e. when signal IOFORCE is HIGH by the platform code).
399 * configured even when in sleep and deep sleep. 401 * Each GPIO can be configured to be forced into GPIO mode when IOFORCE is
402 * HIGH, overriding the normal setting defined by GPIO_AFSELx registers.
403 * When IOFORCE returns LOW (by software, after SLEEP/DEEP-SLEEP exit),
404 * the GPIOs return to the normal setting defined by GPIO_AFSELx registers.
400 * 405 *
401 * On DB8500v2 onwards, this setting loses the previous meaning and instead 406 * If @mode is NMK_GPIO_SLPM_INPUT, the corresponding GPIO is switched to GPIO
402 * indicates if wakeup detection is enabled on the pin. Note that 407 * mode when signal IOFORCE is HIGH (i.e. when SLEEP/DEEP-SLEEP mode is
403 * enable_irq_wake() will automatically enable wakeup detection. 408 * entered) regardless of the altfunction selected. Also wake-up detection is
409 * ENABLED.
410 *
411 * If @mode is NMK_GPIO_SLPM_NOCHANGE, the corresponding GPIO remains
412 * controlled by NMK_GPIO_DATC, NMK_GPIO_DATS, NMK_GPIO_DIR, NMK_GPIO_PDIS
413 * (for altfunction GPIO) or respective on-chip peripherals (for other
414 * altfuncs) when IOFORCE is HIGH. Also wake-up detection DISABLED.
415 *
416 * Note that enable_irq_wake() will automatically enable wakeup detection.
404 */ 417 */
405int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode) 418int nmk_gpio_set_slpm(int gpio, enum nmk_gpio_slpm mode)
406{ 419{
@@ -551,6 +564,12 @@ static void __nmk_gpio_irq_modify(struct nmk_gpio_chip *nmk_chip,
551static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip, 564static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
552 int gpio, bool on) 565 int gpio, bool on)
553{ 566{
567 if (nmk_chip->sleepmode) {
568 __nmk_gpio_set_slpm(nmk_chip, gpio - nmk_chip->chip.base,
569 on ? NMK_GPIO_SLPM_WAKEUP_ENABLE
570 : NMK_GPIO_SLPM_WAKEUP_DISABLE);
571 }
572
554 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on); 573 __nmk_gpio_irq_modify(nmk_chip, gpio, WAKE, on);
555} 574}
556 575
@@ -901,7 +920,7 @@ void nmk_gpio_wakeups_suspend(void)
901 writel(chip->fwimsc & chip->real_wake, 920 writel(chip->fwimsc & chip->real_wake,
902 chip->addr + NMK_GPIO_FWIMSC); 921 chip->addr + NMK_GPIO_FWIMSC);
903 922
904 if (cpu_is_u8500v2()) { 923 if (chip->sleepmode) {
905 chip->slpm = readl(chip->addr + NMK_GPIO_SLPC); 924 chip->slpm = readl(chip->addr + NMK_GPIO_SLPC);
906 925
907 /* 0 -> wakeup enable */ 926 /* 0 -> wakeup enable */
@@ -923,7 +942,7 @@ void nmk_gpio_wakeups_resume(void)
923 writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC); 942 writel(chip->rwimsc, chip->addr + NMK_GPIO_RWIMSC);
924 writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC); 943 writel(chip->fwimsc, chip->addr + NMK_GPIO_FWIMSC);
925 944
926 if (cpu_is_u8500v2()) 945 if (chip->sleepmode)
927 writel(chip->slpm, chip->addr + NMK_GPIO_SLPC); 946 writel(chip->slpm, chip->addr + NMK_GPIO_SLPC);
928 } 947 }
929} 948}
@@ -1010,6 +1029,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1010 nmk_chip->secondary_parent_irq = secondary_irq; 1029 nmk_chip->secondary_parent_irq = secondary_irq;
1011 nmk_chip->get_secondary_status = pdata->get_secondary_status; 1030 nmk_chip->get_secondary_status = pdata->get_secondary_status;
1012 nmk_chip->set_ioforce = pdata->set_ioforce; 1031 nmk_chip->set_ioforce = pdata->set_ioforce;
1032 nmk_chip->sleepmode = pdata->supports_sleepmode;
1013 spin_lock_init(&nmk_chip->lock); 1033 spin_lock_init(&nmk_chip->lock);
1014 1034
1015 chip = &nmk_chip->chip; 1035 chip = &nmk_chip->chip;
@@ -1065,5 +1085,3 @@ core_initcall(nmk_gpio_init);
1065MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini"); 1085MODULE_AUTHOR("Prafulla WADASKAR and Alessandro Rubini");
1066MODULE_DESCRIPTION("Nomadik GPIO Driver"); 1086MODULE_DESCRIPTION("Nomadik GPIO Driver");
1067MODULE_LICENSE("GPL"); 1087MODULE_LICENSE("GPL");
1068
1069
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 6c51191da567..35bebde23e83 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -432,7 +432,6 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
432{ 432{
433 void __iomem *base = bank->base; 433 void __iomem *base = bank->base;
434 u32 gpio_bit = 1 << gpio; 434 u32 gpio_bit = 1 << gpio;
435 u32 val;
436 435
437 if (cpu_is_omap44xx()) { 436 if (cpu_is_omap44xx()) {
438 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit, 437 MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
@@ -455,15 +454,8 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
455 } 454 }
456 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 455 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
457 if (cpu_is_omap44xx()) { 456 if (cpu_is_omap44xx()) {
458 if (trigger != 0) 457 MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
459 __raw_writel(1 << gpio, bank->base+ 458 trigger != 0);
460 OMAP4_GPIO_IRQWAKEN0);
461 else {
462 val = __raw_readl(bank->base +
463 OMAP4_GPIO_IRQWAKEN0);
464 __raw_writel(val & (~(1 << gpio)), bank->base +
465 OMAP4_GPIO_IRQWAKEN0);
466 }
467 } else { 459 } else {
468 /* 460 /*
469 * GPIO wakeup request can only be generated on edge 461 * GPIO wakeup request can only be generated on edge
@@ -477,8 +469,9 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
477 + OMAP24XX_GPIO_CLEARWKUENA); 469 + OMAP24XX_GPIO_CLEARWKUENA);
478 } 470 }
479 } 471 }
480 /* This part needs to be executed always for OMAP34xx */ 472 /* This part needs to be executed always for OMAP{34xx, 44xx} */
481 if (cpu_is_omap34xx() || (bank->non_wakeup_gpios & gpio_bit)) { 473 if (cpu_is_omap34xx() || cpu_is_omap44xx() ||
474 (bank->non_wakeup_gpios & gpio_bit)) {
482 /* 475 /*
483 * Log the edge gpio and manually trigger the IRQ 476 * Log the edge gpio and manually trigger the IRQ
484 * after resume if the input level changes 477 * after resume if the input level changes
@@ -1134,8 +1127,11 @@ static void gpio_irq_shutdown(struct irq_data *d)
1134{ 1127{
1135 unsigned int gpio = d->irq - IH_GPIO_BASE; 1128 unsigned int gpio = d->irq - IH_GPIO_BASE;
1136 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 1129 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1130 unsigned long flags;
1137 1131
1132 spin_lock_irqsave(&bank->lock, flags);
1138 _reset_gpio(bank, gpio); 1133 _reset_gpio(bank, gpio);
1134 spin_unlock_irqrestore(&bank->lock, flags);
1139} 1135}
1140 1136
1141static void gpio_ack_irq(struct irq_data *d) 1137static void gpio_ack_irq(struct irq_data *d)
@@ -1150,9 +1146,12 @@ static void gpio_mask_irq(struct irq_data *d)
1150{ 1146{
1151 unsigned int gpio = d->irq - IH_GPIO_BASE; 1147 unsigned int gpio = d->irq - IH_GPIO_BASE;
1152 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 1148 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1149 unsigned long flags;
1153 1150
1151 spin_lock_irqsave(&bank->lock, flags);
1154 _set_gpio_irqenable(bank, gpio, 0); 1152 _set_gpio_irqenable(bank, gpio, 0);
1155 _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE); 1153 _set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
1154 spin_unlock_irqrestore(&bank->lock, flags);
1156} 1155}
1157 1156
1158static void gpio_unmask_irq(struct irq_data *d) 1157static void gpio_unmask_irq(struct irq_data *d)
@@ -1161,7 +1160,9 @@ static void gpio_unmask_irq(struct irq_data *d)
1161 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 1160 struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
1162 unsigned int irq_mask = 1 << get_gpio_index(gpio); 1161 unsigned int irq_mask = 1 << get_gpio_index(gpio);
1163 u32 trigger = irqd_get_trigger_type(d); 1162 u32 trigger = irqd_get_trigger_type(d);
1163 unsigned long flags;
1164 1164
1165 spin_lock_irqsave(&bank->lock, flags);
1165 if (trigger) 1166 if (trigger)
1166 _set_gpio_triggering(bank, get_gpio_index(gpio), trigger); 1167 _set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
1167 1168
@@ -1173,6 +1174,7 @@ static void gpio_unmask_irq(struct irq_data *d)
1173 } 1174 }
1174 1175
1175 _set_gpio_irqenable(bank, gpio, 1); 1176 _set_gpio_irqenable(bank, gpio, 1);
1177 spin_unlock_irqrestore(&bank->lock, flags);
1176} 1178}
1177 1179
1178static struct irq_chip gpio_irq_chip = { 1180static struct irq_chip gpio_irq_chip = {
@@ -1524,7 +1526,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
1524 } 1526 }
1525} 1527}
1526 1528
1527static void __init omap_gpio_chip_init(struct gpio_bank *bank) 1529static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
1528{ 1530{
1529 int j; 1531 int j;
1530 static int gpio; 1532 static int gpio;
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 3e257a50bf56..61e1ef90d4e5 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -46,10 +46,11 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
46 list_for_each_entry(entry, &dev->maplist, head) { 46 list_for_each_entry(entry, &dev->maplist, head) {
47 /* 47 /*
48 * Because the kernel-userspace ABI is fixed at a 32-bit offset 48 * Because the kernel-userspace ABI is fixed at a 32-bit offset
49 * while PCI resources may live above that, we ignore the map 49 * while PCI resources may live above that, we only compare the
50 * offset for maps of type _DRM_FRAMEBUFFER or _DRM_REGISTERS. 50 * lower 32 bits of the map offset for maps of type
51 * It is assumed that each driver will have only one resource of 51 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
52 * each type. 52 * It is assumed that if a driver have more than one resource
53 * of each type, the lower 32 bits are different.
53 */ 54 */
54 if (!entry->map || 55 if (!entry->map ||
55 map->type != entry->map->type || 56 map->type != entry->map->type ||
@@ -59,9 +60,12 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
59 case _DRM_SHM: 60 case _DRM_SHM:
60 if (map->flags != _DRM_CONTAINS_LOCK) 61 if (map->flags != _DRM_CONTAINS_LOCK)
61 break; 62 break;
63 return entry;
62 case _DRM_REGISTERS: 64 case _DRM_REGISTERS:
63 case _DRM_FRAME_BUFFER: 65 case _DRM_FRAME_BUFFER:
64 return entry; 66 if ((entry->map->offset & 0xffffffff) ==
67 (map->offset & 0xffffffff))
68 return entry;
65 default: /* Make gcc happy */ 69 default: /* Make gcc happy */
66 ; 70 ;
67 } 71 }
@@ -183,9 +187,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
183 return -EINVAL; 187 return -EINVAL;
184 } 188 }
185#endif 189#endif
186#ifdef __alpha__
187 map->offset += dev->hose->mem_space->start;
188#endif
189 /* Some drivers preinitialize some maps, without the X Server 190 /* Some drivers preinitialize some maps, without the X Server
190 * needing to be aware of it. Therefore, we just return success 191 * needing to be aware of it. Therefore, we just return success
191 * when the server tries to create a duplicate map. 192 * when the server tries to create a duplicate map.
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 872747c5a544..21058e6ad2b8 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1113,7 +1113,7 @@ int drm_mode_getresources(struct drm_device *dev, void *data,
1113 if (card_res->count_fbs >= fb_count) { 1113 if (card_res->count_fbs >= fb_count) {
1114 copied = 0; 1114 copied = 0;
1115 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr; 1115 fb_id = (uint32_t __user *)(unsigned long)card_res->fb_id_ptr;
1116 list_for_each_entry(fb, &file_priv->fbs, head) { 1116 list_for_each_entry(fb, &file_priv->fbs, filp_head) {
1117 if (put_user(fb->base.id, fb_id + copied)) { 1117 if (put_user(fb->base.id, fb_id + copied)) {
1118 ret = -EFAULT; 1118 ret = -EFAULT;
1119 goto out; 1119 goto out;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a9357c66ff8..09292193dafe 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -184,9 +184,9 @@ drm_edid_block_valid(u8 *raw_edid)
184 184
185bad: 185bad:
186 if (raw_edid) { 186 if (raw_edid) {
187 DRM_ERROR("Raw EDID:\n"); 187 printk(KERN_ERR "Raw EDID:\n");
188 print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH); 188 print_hex_dump_bytes(KERN_ERR, DUMP_PREFIX_NONE, raw_edid, EDID_LENGTH);
189 printk("\n"); 189 printk(KERN_ERR "\n");
190 } 190 }
191 return 0; 191 return 0;
192} 192}
@@ -258,6 +258,17 @@ drm_do_probe_ddc_edid(struct i2c_adapter *adapter, unsigned char *buf,
258 return ret == 2 ? 0 : -1; 258 return ret == 2 ? 0 : -1;
259} 259}
260 260
261static bool drm_edid_is_zero(u8 *in_edid, int length)
262{
263 int i;
264 u32 *raw_edid = (u32 *)in_edid;
265
266 for (i = 0; i < length / 4; i++)
267 if (*(raw_edid + i) != 0)
268 return false;
269 return true;
270}
271
261static u8 * 272static u8 *
262drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 273drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
263{ 274{
@@ -273,6 +284,10 @@ drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
273 goto out; 284 goto out;
274 if (drm_edid_block_valid(block)) 285 if (drm_edid_block_valid(block))
275 break; 286 break;
287 if (i == 0 && drm_edid_is_zero(block, EDID_LENGTH)) {
288 connector->null_edid_counter++;
289 goto carp;
290 }
276 } 291 }
277 if (i == 4) 292 if (i == 4)
278 goto carp; 293 goto carp;
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index d61d185cf040..4a058c7af6c0 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -28,6 +28,7 @@
28 * IN THE SOFTWARE. 28 * IN THE SOFTWARE.
29 */ 29 */
30#include <linux/compat.h> 30#include <linux/compat.h>
31#include <linux/ratelimit.h>
31 32
32#include "drmP.h" 33#include "drmP.h"
33#include "drm_core.h" 34#include "drm_core.h"
@@ -253,10 +254,10 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
253 return -EFAULT; 254 return -EFAULT;
254 255
255 m32.handle = (unsigned long)handle; 256 m32.handle = (unsigned long)handle;
256 if (m32.handle != (unsigned long)handle && printk_ratelimit()) 257 if (m32.handle != (unsigned long)handle)
257 printk(KERN_ERR "compat_drm_addmap truncated handle" 258 printk_ratelimited(KERN_ERR "compat_drm_addmap truncated handle"
258 " %p for type %d offset %x\n", 259 " %p for type %d offset %x\n",
259 handle, m32.type, m32.offset); 260 handle, m32.type, m32.offset);
260 261
261 if (copy_to_user(argp, &m32, sizeof(m32))) 262 if (copy_to_user(argp, &m32, sizeof(m32)))
262 return -EFAULT; 263 return -EFAULT;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index e1aee4f6a7c6..b6a19cb07caf 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -251,7 +251,7 @@ err:
251} 251}
252 252
253 253
254int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p) 254static int drm_pci_irq_by_busid(struct drm_device *dev, struct drm_irq_busid *p)
255{ 255{
256 if ((p->busnum >> 8) != drm_get_pci_domain(dev) || 256 if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
257 (p->busnum & 0xff) != dev->pdev->bus->number || 257 (p->busnum & 0xff) != dev->pdev->bus->number ||
@@ -292,6 +292,7 @@ static struct drm_bus drm_pci_bus = {
292 .get_name = drm_pci_get_name, 292 .get_name = drm_pci_get_name,
293 .set_busid = drm_pci_set_busid, 293 .set_busid = drm_pci_set_busid,
294 .set_unique = drm_pci_set_unique, 294 .set_unique = drm_pci_set_unique,
295 .irq_by_busid = drm_pci_irq_by_busid,
295 .agp_init = drm_pci_agp_init, 296 .agp_init = drm_pci_agp_init,
296}; 297};
297 298
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 2c3fcbdfd8ff..5db96d45fc71 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -526,7 +526,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev) 526static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
527{ 527{
528#ifdef __alpha__ 528#ifdef __alpha__
529 return dev->hose->dense_mem_base - dev->hose->mem_space->start; 529 return dev->hose->dense_mem_base;
530#else 530#else
531 return 0; 531 return 0;
532#endif 532#endif
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 51c2257b11e6..4d46441cbe2d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -776,7 +776,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm); 776 seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
777 seq_printf(m, " seqno: 0x%08x\n", error->seqno); 777 seq_printf(m, " seqno: 0x%08x\n", error->seqno);
778 778
779 for (i = 0; i < 16; i++) 779 for (i = 0; i < dev_priv->num_fence_regs; i++)
780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]); 780 seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
781 781
782 if (error->active_bo) 782 if (error->active_bo)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ee660355ae68..f63ee162f124 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -716,6 +716,7 @@ typedef struct drm_i915_private {
716 struct intel_fbdev *fbdev; 716 struct intel_fbdev *fbdev;
717 717
718 struct drm_property *broadcast_rgb_property; 718 struct drm_property *broadcast_rgb_property;
719 struct drm_property *force_audio_property;
719 720
720 atomic_t forcewake_count; 721 atomic_t forcewake_count;
721} drm_i915_private_t; 722} drm_i915_private_t;
@@ -909,13 +910,6 @@ struct drm_i915_file_private {
909 } mm; 910 } mm;
910}; 911};
911 912
912enum intel_chip_family {
913 CHIP_I8XX = 0x01,
914 CHIP_I9XX = 0x02,
915 CHIP_I915 = 0x04,
916 CHIP_I965 = 0x08,
917};
918
919#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 913#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
920 914
921#define IS_I830(dev) ((dev)->pci_device == 0x3577) 915#define IS_I830(dev) ((dev)->pci_device == 0x3577)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0b2e167d2bce..94c84d744100 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -354,7 +354,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
354 * page_offset = offset within page 354 * page_offset = offset within page
355 * page_length = bytes to copy for this page 355 * page_length = bytes to copy for this page
356 */ 356 */
357 page_offset = offset & (PAGE_SIZE-1); 357 page_offset = offset_in_page(offset);
358 page_length = remain; 358 page_length = remain;
359 if ((page_offset + remain) > PAGE_SIZE) 359 if ((page_offset + remain) > PAGE_SIZE)
360 page_length = PAGE_SIZE - page_offset; 360 page_length = PAGE_SIZE - page_offset;
@@ -453,9 +453,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
453 * data_page_offset = offset with data_page_index page. 453 * data_page_offset = offset with data_page_index page.
454 * page_length = bytes to copy for this page 454 * page_length = bytes to copy for this page
455 */ 455 */
456 shmem_page_offset = offset & ~PAGE_MASK; 456 shmem_page_offset = offset_in_page(offset);
457 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 457 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
458 data_page_offset = data_ptr & ~PAGE_MASK; 458 data_page_offset = offset_in_page(data_ptr);
459 459
460 page_length = remain; 460 page_length = remain;
461 if ((shmem_page_offset + page_length) > PAGE_SIZE) 461 if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -465,8 +465,10 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
465 465
466 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT, 466 page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
467 GFP_HIGHUSER | __GFP_RECLAIMABLE); 467 GFP_HIGHUSER | __GFP_RECLAIMABLE);
468 if (IS_ERR(page)) 468 if (IS_ERR(page)) {
469 return PTR_ERR(page); 469 ret = PTR_ERR(page);
470 goto out;
471 }
470 472
471 if (do_bit17_swizzling) { 473 if (do_bit17_swizzling) {
472 slow_shmem_bit17_copy(page, 474 slow_shmem_bit17_copy(page,
@@ -638,8 +640,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
638 * page_offset = offset within page 640 * page_offset = offset within page
639 * page_length = bytes to copy for this page 641 * page_length = bytes to copy for this page
640 */ 642 */
641 page_base = (offset & ~(PAGE_SIZE-1)); 643 page_base = offset & PAGE_MASK;
642 page_offset = offset & (PAGE_SIZE-1); 644 page_offset = offset_in_page(offset);
643 page_length = remain; 645 page_length = remain;
644 if ((page_offset + remain) > PAGE_SIZE) 646 if ((page_offset + remain) > PAGE_SIZE)
645 page_length = PAGE_SIZE - page_offset; 647 page_length = PAGE_SIZE - page_offset;
@@ -650,7 +652,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
650 */ 652 */
651 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base, 653 if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
652 page_offset, user_data, page_length)) 654 page_offset, user_data, page_length))
653
654 return -EFAULT; 655 return -EFAULT;
655 656
656 remain -= page_length; 657 remain -= page_length;
@@ -730,9 +731,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
730 * page_length = bytes to copy for this page 731 * page_length = bytes to copy for this page
731 */ 732 */
732 gtt_page_base = offset & PAGE_MASK; 733 gtt_page_base = offset & PAGE_MASK;
733 gtt_page_offset = offset & ~PAGE_MASK; 734 gtt_page_offset = offset_in_page(offset);
734 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 735 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
735 data_page_offset = data_ptr & ~PAGE_MASK; 736 data_page_offset = offset_in_page(data_ptr);
736 737
737 page_length = remain; 738 page_length = remain;
738 if ((gtt_page_offset + page_length) > PAGE_SIZE) 739 if ((gtt_page_offset + page_length) > PAGE_SIZE)
@@ -791,7 +792,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
791 * page_offset = offset within page 792 * page_offset = offset within page
792 * page_length = bytes to copy for this page 793 * page_length = bytes to copy for this page
793 */ 794 */
794 page_offset = offset & (PAGE_SIZE-1); 795 page_offset = offset_in_page(offset);
795 page_length = remain; 796 page_length = remain;
796 if ((page_offset + remain) > PAGE_SIZE) 797 if ((page_offset + remain) > PAGE_SIZE)
797 page_length = PAGE_SIZE - page_offset; 798 page_length = PAGE_SIZE - page_offset;
@@ -896,9 +897,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
896 * data_page_offset = offset with data_page_index page. 897 * data_page_offset = offset with data_page_index page.
897 * page_length = bytes to copy for this page 898 * page_length = bytes to copy for this page
898 */ 899 */
899 shmem_page_offset = offset & ~PAGE_MASK; 900 shmem_page_offset = offset_in_page(offset);
900 data_page_index = data_ptr / PAGE_SIZE - first_data_page; 901 data_page_index = data_ptr / PAGE_SIZE - first_data_page;
901 data_page_offset = data_ptr & ~PAGE_MASK; 902 data_page_offset = offset_in_page(data_ptr);
902 903
903 page_length = remain; 904 page_length = remain;
904 if ((shmem_page_offset + page_length) > PAGE_SIZE) 905 if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -1450,8 +1451,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1450 * edge of an even tile row (where tile rows are counted as if the bo is 1451 * edge of an even tile row (where tile rows are counted as if the bo is
1451 * placed in a fenced gtt region). 1452 * placed in a fenced gtt region).
1452 */ 1453 */
1453 if (IS_GEN2(dev) || 1454 if (IS_GEN2(dev))
1454 (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) 1455 tile_height = 16;
1456 else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
1455 tile_height = 32; 1457 tile_height = 32;
1456 else 1458 else
1457 tile_height = 8; 1459 tile_height = 8;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index b79619a7b788..9e34a1abeb61 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -517,7 +517,7 @@ irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
517 if (de_iir & DE_PIPEA_VBLANK_IVB) 517 if (de_iir & DE_PIPEA_VBLANK_IVB)
518 drm_handle_vblank(dev, 0); 518 drm_handle_vblank(dev, 0);
519 519
520 if (de_iir & DE_PIPEB_VBLANK_IVB); 520 if (de_iir & DE_PIPEB_VBLANK_IVB)
521 drm_handle_vblank(dev, 1); 521 drm_handle_vblank(dev, 1);
522 522
523 /* check event from PCH */ 523 /* check event from PCH */
@@ -1740,6 +1740,16 @@ void ironlake_irq_preinstall(struct drm_device *dev)
1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work); 1740 INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
1741 1741
1742 I915_WRITE(HWSTAM, 0xeffe); 1742 I915_WRITE(HWSTAM, 0xeffe);
1743 if (IS_GEN6(dev)) {
1744 /* Workaround stalls observed on Sandy Bridge GPUs by
1745 * making the blitter command streamer generate a
1746 * write to the Hardware Status Page for
1747 * MI_USER_INTERRUPT. This appears to serialize the
1748 * previous seqno write out before the interrupt
1749 * happens.
1750 */
1751 I915_WRITE(GEN6_BLITTER_HWSTAM, ~GEN6_BLITTER_USER_INTERRUPT);
1752 }
1743 1753
1744 /* XXX hotplug from PCH */ 1754 /* XXX hotplug from PCH */
1745 1755
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index e93f93cc7e78..0979d8877880 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -288,6 +288,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
288 * This may be a DVI-I connector with a shared DDC 288 * This may be a DVI-I connector with a shared DDC
289 * link between analog and digital outputs, so we 289 * link between analog and digital outputs, so we
290 * have to check the EDID input spec of the attached device. 290 * have to check the EDID input spec of the attached device.
291 *
292 * On the other hand, what should we do if it is a broken EDID?
291 */ 293 */
292 if (edid != NULL) { 294 if (edid != NULL) {
293 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL; 295 is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
@@ -298,6 +300,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
298 if (!is_digital) { 300 if (!is_digital) {
299 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 301 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
300 return true; 302 return true;
303 } else {
304 DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
301 } 305 }
302 } 306 }
303 307
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f553ddfdc168..81a9059b6a94 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -3983,54 +3983,6 @@ static void i830_update_wm(struct drm_device *dev)
3983#define ILK_LP0_PLANE_LATENCY 700 3983#define ILK_LP0_PLANE_LATENCY 700
3984#define ILK_LP0_CURSOR_LATENCY 1300 3984#define ILK_LP0_CURSOR_LATENCY 1300
3985 3985
3986static bool ironlake_compute_wm0(struct drm_device *dev,
3987 int pipe,
3988 const struct intel_watermark_params *display,
3989 int display_latency_ns,
3990 const struct intel_watermark_params *cursor,
3991 int cursor_latency_ns,
3992 int *plane_wm,
3993 int *cursor_wm)
3994{
3995 struct drm_crtc *crtc;
3996 int htotal, hdisplay, clock, pixel_size;
3997 int line_time_us, line_count;
3998 int entries, tlb_miss;
3999
4000 crtc = intel_get_crtc_for_pipe(dev, pipe);
4001 if (crtc->fb == NULL || !crtc->enabled)
4002 return false;
4003
4004 htotal = crtc->mode.htotal;
4005 hdisplay = crtc->mode.hdisplay;
4006 clock = crtc->mode.clock;
4007 pixel_size = crtc->fb->bits_per_pixel / 8;
4008
4009 /* Use the small buffer method to calculate plane watermark */
4010 entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
4011 tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
4012 if (tlb_miss > 0)
4013 entries += tlb_miss;
4014 entries = DIV_ROUND_UP(entries, display->cacheline_size);
4015 *plane_wm = entries + display->guard_size;
4016 if (*plane_wm > (int)display->max_wm)
4017 *plane_wm = display->max_wm;
4018
4019 /* Use the large buffer method to calculate cursor watermark */
4020 line_time_us = ((htotal * 1000) / clock);
4021 line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
4022 entries = line_count * 64 * pixel_size;
4023 tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
4024 if (tlb_miss > 0)
4025 entries += tlb_miss;
4026 entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
4027 *cursor_wm = entries + cursor->guard_size;
4028 if (*cursor_wm > (int)cursor->max_wm)
4029 *cursor_wm = (int)cursor->max_wm;
4030
4031 return true;
4032}
4033
4034/* 3986/*
4035 * Check the wm result. 3987 * Check the wm result.
4036 * 3988 *
@@ -4139,12 +4091,12 @@ static void ironlake_update_wm(struct drm_device *dev)
4139 unsigned int enabled; 4091 unsigned int enabled;
4140 4092
4141 enabled = 0; 4093 enabled = 0;
4142 if (ironlake_compute_wm0(dev, 0, 4094 if (g4x_compute_wm0(dev, 0,
4143 &ironlake_display_wm_info, 4095 &ironlake_display_wm_info,
4144 ILK_LP0_PLANE_LATENCY, 4096 ILK_LP0_PLANE_LATENCY,
4145 &ironlake_cursor_wm_info, 4097 &ironlake_cursor_wm_info,
4146 ILK_LP0_CURSOR_LATENCY, 4098 ILK_LP0_CURSOR_LATENCY,
4147 &plane_wm, &cursor_wm)) { 4099 &plane_wm, &cursor_wm)) {
4148 I915_WRITE(WM0_PIPEA_ILK, 4100 I915_WRITE(WM0_PIPEA_ILK,
4149 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4101 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4150 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4102 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4153,12 +4105,12 @@ static void ironlake_update_wm(struct drm_device *dev)
4153 enabled |= 1; 4105 enabled |= 1;
4154 } 4106 }
4155 4107
4156 if (ironlake_compute_wm0(dev, 1, 4108 if (g4x_compute_wm0(dev, 1,
4157 &ironlake_display_wm_info, 4109 &ironlake_display_wm_info,
4158 ILK_LP0_PLANE_LATENCY, 4110 ILK_LP0_PLANE_LATENCY,
4159 &ironlake_cursor_wm_info, 4111 &ironlake_cursor_wm_info,
4160 ILK_LP0_CURSOR_LATENCY, 4112 ILK_LP0_CURSOR_LATENCY,
4161 &plane_wm, &cursor_wm)) { 4113 &plane_wm, &cursor_wm)) {
4162 I915_WRITE(WM0_PIPEB_ILK, 4114 I915_WRITE(WM0_PIPEB_ILK,
4163 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4115 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4164 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4116 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -4223,10 +4175,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
4223 unsigned int enabled; 4175 unsigned int enabled;
4224 4176
4225 enabled = 0; 4177 enabled = 0;
4226 if (ironlake_compute_wm0(dev, 0, 4178 if (g4x_compute_wm0(dev, 0,
4227 &sandybridge_display_wm_info, latency, 4179 &sandybridge_display_wm_info, latency,
4228 &sandybridge_cursor_wm_info, latency, 4180 &sandybridge_cursor_wm_info, latency,
4229 &plane_wm, &cursor_wm)) { 4181 &plane_wm, &cursor_wm)) {
4230 I915_WRITE(WM0_PIPEA_ILK, 4182 I915_WRITE(WM0_PIPEA_ILK,
4231 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4183 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4232 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 4184 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -4235,10 +4187,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
4235 enabled |= 1; 4187 enabled |= 1;
4236 } 4188 }
4237 4189
4238 if (ironlake_compute_wm0(dev, 1, 4190 if (g4x_compute_wm0(dev, 1,
4239 &sandybridge_display_wm_info, latency, 4191 &sandybridge_display_wm_info, latency,
4240 &sandybridge_cursor_wm_info, latency, 4192 &sandybridge_cursor_wm_info, latency,
4241 &plane_wm, &cursor_wm)) { 4193 &plane_wm, &cursor_wm)) {
4242 I915_WRITE(WM0_PIPEB_ILK, 4194 I915_WRITE(WM0_PIPEB_ILK,
4243 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); 4195 (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
4244 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 4196 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -7675,6 +7627,7 @@ static void intel_init_display(struct drm_device *dev)
7675 dev_priv->display.update_wm = NULL; 7627 dev_priv->display.update_wm = NULL;
7676 } else 7628 } else
7677 dev_priv->display.update_wm = pineview_update_wm; 7629 dev_priv->display.update_wm = pineview_update_wm;
7630 dev_priv->display.init_clock_gating = gen3_init_clock_gating;
7678 } else if (IS_G4X(dev)) { 7631 } else if (IS_G4X(dev)) {
7679 dev_priv->display.update_wm = g4x_update_wm; 7632 dev_priv->display.update_wm = g4x_update_wm;
7680 dev_priv->display.init_clock_gating = g4x_init_clock_gating; 7633 dev_priv->display.init_clock_gating = g4x_init_clock_gating;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index a4d80314e7f8..391b55f1cc74 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,8 +59,6 @@ struct intel_dp {
59 bool is_pch_edp; 59 bool is_pch_edp;
60 uint8_t train_set[4]; 60 uint8_t train_set[4];
61 uint8_t link_status[DP_LINK_STATUS_SIZE]; 61 uint8_t link_status[DP_LINK_STATUS_SIZE];
62
63 struct drm_property *force_audio_property;
64}; 62};
65 63
66/** 64/**
@@ -1702,7 +1700,7 @@ intel_dp_set_property(struct drm_connector *connector,
1702 if (ret) 1700 if (ret)
1703 return ret; 1701 return ret;
1704 1702
1705 if (property == intel_dp->force_audio_property) { 1703 if (property == dev_priv->force_audio_property) {
1706 int i = val; 1704 int i = val;
1707 bool has_audio; 1705 bool has_audio;
1708 1706
@@ -1841,16 +1839,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
1841static void 1839static void
1842intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 1840intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
1843{ 1841{
1844 struct drm_device *dev = connector->dev; 1842 intel_attach_force_audio_property(connector);
1845
1846 intel_dp->force_audio_property =
1847 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
1848 if (intel_dp->force_audio_property) {
1849 intel_dp->force_audio_property->values[0] = -1;
1850 intel_dp->force_audio_property->values[1] = 1;
1851 drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
1852 }
1853
1854 intel_attach_broadcast_rgb_property(connector); 1843 intel_attach_broadcast_rgb_property(connector);
1855} 1844}
1856 1845
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 831d7a4a0d18..9ffa61eb4d7e 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -236,6 +236,7 @@ struct intel_unpin_work {
236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 236int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus); 237extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
238 238
239extern void intel_attach_force_audio_property(struct drm_connector *connector);
239extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 240extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
240 241
241extern void intel_crt_init(struct drm_device *dev); 242extern void intel_crt_init(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index f289b8642976..aa0a8e83142e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -45,7 +45,6 @@ struct intel_hdmi {
45 bool has_hdmi_sink; 45 bool has_hdmi_sink;
46 bool has_audio; 46 bool has_audio;
47 int force_audio; 47 int force_audio;
48 struct drm_property *force_audio_property;
49}; 48};
50 49
51static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder) 50static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
@@ -194,7 +193,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
194 if (mode->clock > 165000) 193 if (mode->clock > 165000)
195 return MODE_CLOCK_HIGH; 194 return MODE_CLOCK_HIGH;
196 if (mode->clock < 20000) 195 if (mode->clock < 20000)
197 return MODE_CLOCK_HIGH; 196 return MODE_CLOCK_LOW;
198 197
199 if (mode->flags & DRM_MODE_FLAG_DBLSCAN) 198 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
200 return MODE_NO_DBLESCAN; 199 return MODE_NO_DBLESCAN;
@@ -287,7 +286,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
287 if (ret) 286 if (ret)
288 return ret; 287 return ret;
289 288
290 if (property == intel_hdmi->force_audio_property) { 289 if (property == dev_priv->force_audio_property) {
291 int i = val; 290 int i = val;
292 bool has_audio; 291 bool has_audio;
293 292
@@ -365,16 +364,7 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
365static void 364static void
366intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector) 365intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
367{ 366{
368 struct drm_device *dev = connector->dev; 367 intel_attach_force_audio_property(connector);
369
370 intel_hdmi->force_audio_property =
371 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
372 if (intel_hdmi->force_audio_property) {
373 intel_hdmi->force_audio_property->values[0] = -1;
374 intel_hdmi->force_audio_property->values[1] = 1;
375 drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
376 }
377
378 intel_attach_broadcast_rgb_property(connector); 368 intel_attach_broadcast_rgb_property(connector);
379} 369}
380 370
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index d3b903bce7c5..d98cee60b602 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -401,8 +401,7 @@ int intel_setup_gmbus(struct drm_device *dev)
401 bus->reg0 = i | GMBUS_RATE_100KHZ; 401 bus->reg0 = i | GMBUS_RATE_100KHZ;
402 402
403 /* XXX force bit banging until GMBUS is fully debugged */ 403 /* XXX force bit banging until GMBUS is fully debugged */
404 if (IS_GEN2(dev)) 404 bus->force_bit = intel_gpio_create(dev_priv, i);
405 bus->force_bit = intel_gpio_create(dev_priv, i);
406 } 405 }
407 406
408 intel_i2c_reset(dev_priv->dev); 407 intel_i2c_reset(dev_priv->dev);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 67cb076d271b..b28f7bd9f88a 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -727,6 +727,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
727 DMI_MATCH(DMI_PRODUCT_NAME, "U800"), 727 DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
728 }, 728 },
729 }, 729 },
730 {
731 .callback = intel_no_lvds_dmi_callback,
732 .ident = "Asus EeeBox PC EB1007",
733 .matches = {
734 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
735 DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
736 },
737 },
730 738
731 { } /* terminating entry */ 739 { } /* terminating entry */
732}; 740};
diff --git a/drivers/gpu/drm/i915/intel_modes.c b/drivers/gpu/drm/i915/intel_modes.c
index 9034dd8f33c7..3b26a3ba02dd 100644
--- a/drivers/gpu/drm/i915/intel_modes.c
+++ b/drivers/gpu/drm/i915/intel_modes.c
@@ -81,6 +81,36 @@ int intel_ddc_get_modes(struct drm_connector *connector,
81 return ret; 81 return ret;
82} 82}
83 83
84static const char *force_audio_names[] = {
85 "off",
86 "auto",
87 "on",
88};
89
90void
91intel_attach_force_audio_property(struct drm_connector *connector)
92{
93 struct drm_device *dev = connector->dev;
94 struct drm_i915_private *dev_priv = dev->dev_private;
95 struct drm_property *prop;
96 int i;
97
98 prop = dev_priv->force_audio_property;
99 if (prop == NULL) {
100 prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
101 "audio",
102 ARRAY_SIZE(force_audio_names));
103 if (prop == NULL)
104 return;
105
106 for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
107 drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
108
109 dev_priv->force_audio_property = prop;
110 }
111 drm_connector_attach_property(connector, prop, 0);
112}
113
84static const char *broadcast_rgb_names[] = { 114static const char *broadcast_rgb_names[] = {
85 "Full", 115 "Full",
86 "Limited 16:235", 116 "Limited 16:235",
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 754086f83941..30fe554d8936 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -148,8 +148,6 @@ struct intel_sdvo_connector {
148 int format_supported_num; 148 int format_supported_num;
149 struct drm_property *tv_format; 149 struct drm_property *tv_format;
150 150
151 struct drm_property *force_audio_property;
152
153 /* add the property for the SDVO-TV */ 151 /* add the property for the SDVO-TV */
154 struct drm_property *left; 152 struct drm_property *left;
155 struct drm_property *right; 153 struct drm_property *right;
@@ -1712,7 +1710,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
1712 if (ret) 1710 if (ret)
1713 return ret; 1711 return ret;
1714 1712
1715 if (property == intel_sdvo_connector->force_audio_property) { 1713 if (property == dev_priv->force_audio_property) {
1716 int i = val; 1714 int i = val;
1717 bool has_audio; 1715 bool has_audio;
1718 1716
@@ -2037,15 +2035,7 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
2037{ 2035{
2038 struct drm_device *dev = connector->base.base.dev; 2036 struct drm_device *dev = connector->base.base.dev;
2039 2037
2040 connector->force_audio_property = 2038 intel_attach_force_audio_property(&connector->base.base);
2041 drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
2042 if (connector->force_audio_property) {
2043 connector->force_audio_property->values[0] = -1;
2044 connector->force_audio_property->values[1] = 1;
2045 drm_connector_attach_property(&connector->base.base,
2046 connector->force_audio_property, 0);
2047 }
2048
2049 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) 2039 if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
2050 intel_attach_broadcast_rgb_property(&connector->base.base); 2040 intel_attach_broadcast_rgb_property(&connector->base.base);
2051} 2041}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 1084fa4d261b..54558a01969a 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -195,29 +195,10 @@ extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
195 195
196#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER() 196#define mga_flush_write_combine() DRM_WRITEMEMORYBARRIER()
197 197
198#if defined(__linux__) && defined(__alpha__)
199#define MGA_BASE(reg) ((unsigned long)(dev_priv->mmio->handle))
200#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
201
202#define MGA_DEREF(reg) (*(volatile u32 *)MGA_ADDR(reg))
203#define MGA_DEREF8(reg) (*(volatile u8 *)MGA_ADDR(reg))
204
205#define MGA_READ(reg) (_MGA_READ((u32 *)MGA_ADDR(reg)))
206#define MGA_READ8(reg) (_MGA_READ((u8 *)MGA_ADDR(reg)))
207#define MGA_WRITE(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF(reg) = val; } while (0)
208#define MGA_WRITE8(reg, val) do { DRM_WRITEMEMORYBARRIER(); MGA_DEREF8(reg) = val; } while (0)
209
210static inline u32 _MGA_READ(u32 *addr)
211{
212 DRM_MEMORYBARRIER();
213 return *(volatile u32 *)addr;
214}
215#else
216#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg)) 198#define MGA_READ8(reg) DRM_READ8(dev_priv->mmio, (reg))
217#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg)) 199#define MGA_READ(reg) DRM_READ32(dev_priv->mmio, (reg))
218#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val)) 200#define MGA_WRITE8(reg, val) DRM_WRITE8(dev_priv->mmio, (reg), (val))
219#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val)) 201#define MGA_WRITE(reg, val) DRM_WRITE32(dev_priv->mmio, (reg), (val))
220#endif
221 202
222#define DWGREG0 0x1c00 203#define DWGREG0 0x1c00
223#define DWGREG0_END 0x1dff 204#define DWGREG0_END 0x1dff
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index f0d459bb46e4..525744d593c1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -262,7 +262,6 @@ static bool nouveau_dsm_detect(void)
262 vga_count++; 262 vga_count++;
263 263
264 retval = nouveau_dsm_pci_probe(pdev); 264 retval = nouveau_dsm_pci_probe(pdev);
265 printk("ret val is %d\n", retval);
266 if (retval & NOUVEAU_DSM_HAS_MUX) 265 if (retval & NOUVEAU_DSM_HAS_MUX)
267 has_dsm |= 1; 266 has_dsm |= 1;
268 if (retval & NOUVEAU_DSM_HAS_OPT) 267 if (retval & NOUVEAU_DSM_HAS_OPT)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 4b9f4493c9f9..7347075ca5b8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -339,11 +339,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
339 int ret; 339 int ret;
340 340
341 if (dev_priv->chipset < 0x84) { 341 if (dev_priv->chipset < 0x84) {
342 ret = RING_SPACE(chan, 3); 342 ret = RING_SPACE(chan, 4);
343 if (ret) 343 if (ret)
344 return ret; 344 return ret;
345 345
346 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 2); 346 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 3);
347 OUT_RING (chan, NvSema);
347 OUT_RING (chan, sema->mem->start); 348 OUT_RING (chan, sema->mem->start);
348 OUT_RING (chan, 1); 349 OUT_RING (chan, 1);
349 } else 350 } else
@@ -351,10 +352,12 @@ semaphore_acquire(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
351 struct nouveau_vma *vma = &dev_priv->fence.bo->vma; 352 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
352 u64 offset = vma->offset + sema->mem->start; 353 u64 offset = vma->offset + sema->mem->start;
353 354
354 ret = RING_SPACE(chan, 5); 355 ret = RING_SPACE(chan, 7);
355 if (ret) 356 if (ret)
356 return ret; 357 return ret;
357 358
359 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
360 OUT_RING (chan, chan->vram_handle);
358 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 361 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
359 OUT_RING (chan, upper_32_bits(offset)); 362 OUT_RING (chan, upper_32_bits(offset));
360 OUT_RING (chan, lower_32_bits(offset)); 363 OUT_RING (chan, lower_32_bits(offset));
@@ -394,11 +397,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
394 int ret; 397 int ret;
395 398
396 if (dev_priv->chipset < 0x84) { 399 if (dev_priv->chipset < 0x84) {
397 ret = RING_SPACE(chan, 4); 400 ret = RING_SPACE(chan, 5);
398 if (ret) 401 if (ret)
399 return ret; 402 return ret;
400 403
401 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1); 404 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 2);
405 OUT_RING (chan, NvSema);
402 OUT_RING (chan, sema->mem->start); 406 OUT_RING (chan, sema->mem->start);
403 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1); 407 BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_RELEASE, 1);
404 OUT_RING (chan, 1); 408 OUT_RING (chan, 1);
@@ -407,10 +411,12 @@ semaphore_release(struct nouveau_channel *chan, struct nouveau_semaphore *sema)
407 struct nouveau_vma *vma = &dev_priv->fence.bo->vma; 411 struct nouveau_vma *vma = &dev_priv->fence.bo->vma;
408 u64 offset = vma->offset + sema->mem->start; 412 u64 offset = vma->offset + sema->mem->start;
409 413
410 ret = RING_SPACE(chan, 5); 414 ret = RING_SPACE(chan, 7);
411 if (ret) 415 if (ret)
412 return ret; 416 return ret;
413 417
418 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
419 OUT_RING (chan, chan->vram_handle);
414 BEGIN_RING(chan, NvSubSw, 0x0010, 4); 420 BEGIN_RING(chan, NvSubSw, 0x0010, 4);
415 OUT_RING (chan, upper_32_bits(offset)); 421 OUT_RING (chan, upper_32_bits(offset));
416 OUT_RING (chan, lower_32_bits(offset)); 422 OUT_RING (chan, lower_32_bits(offset));
@@ -504,22 +510,22 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
504 struct nouveau_gpuobj *obj = NULL; 510 struct nouveau_gpuobj *obj = NULL;
505 int ret; 511 int ret;
506 512
507 if (dev_priv->card_type >= NV_C0) 513 if (dev_priv->card_type < NV_C0) {
508 goto out_initialised; 514 /* Create an NV_SW object for various sync purposes */
515 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW);
516 if (ret)
517 return ret;
509 518
510 /* Create an NV_SW object for various sync purposes */ 519 ret = RING_SPACE(chan, 2);
511 ret = nouveau_gpuobj_gr_new(chan, NvSw, NV_SW); 520 if (ret)
512 if (ret) 521 return ret;
513 return ret;
514 522
515 /* we leave subchannel empty for nvc0 */ 523 BEGIN_RING(chan, NvSubSw, 0, 1);
516 ret = RING_SPACE(chan, 2); 524 OUT_RING (chan, NvSw);
517 if (ret) 525 FIRE_RING (chan);
518 return ret; 526 }
519 BEGIN_RING(chan, NvSubSw, 0, 1);
520 OUT_RING(chan, NvSw);
521 527
522 /* Create a DMA object for the shared cross-channel sync area. */ 528 /* Setup area of memory shared between all channels for x-chan sync */
523 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) { 529 if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
524 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem; 530 struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
525 531
@@ -534,23 +540,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
534 nouveau_gpuobj_ref(NULL, &obj); 540 nouveau_gpuobj_ref(NULL, &obj);
535 if (ret) 541 if (ret)
536 return ret; 542 return ret;
537
538 ret = RING_SPACE(chan, 2);
539 if (ret)
540 return ret;
541 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
542 OUT_RING(chan, NvSema);
543 } else {
544 ret = RING_SPACE(chan, 2);
545 if (ret)
546 return ret;
547 BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
548 OUT_RING (chan, chan->vram_handle); /* whole VM */
549 } 543 }
550 544
551 FIRE_RING(chan);
552
553out_initialised:
554 INIT_LIST_HEAD(&chan->fence.pending); 545 INIT_LIST_HEAD(&chan->fence.pending);
555 spin_lock_init(&chan->fence.lock); 546 spin_lock_init(&chan->fence.lock);
556 atomic_set(&chan->fence.last_sequence_irq, 0); 547 atomic_set(&chan->fence.last_sequence_irq, 0);
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index 053edf9d2f67..ba896e54b799 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -900,6 +900,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
900 } 900 }
901 /* NV11 and NV20 don't have this, they stop at 0x52. */ 901 /* NV11 and NV20 don't have this, they stop at 0x52. */
902 if (nv_gf4_disp_arch(dev)) { 902 if (nv_gf4_disp_arch(dev)) {
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
903 rd_cio_state(dev, head, regp, NV_CIO_CRE_53); 904 rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
904 rd_cio_state(dev, head, regp, NV_CIO_CRE_54); 905 rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
905 906
@@ -1003,6 +1004,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
1003 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0); 1004 nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
1004 } 1005 }
1005 1006
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
1006 wr_cio_state(dev, head, regp, NV_CIO_CRE_53); 1008 wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
1007 wr_cio_state(dev, head, regp, NV_CIO_CRE_54); 1009 wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
1008 1010
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 2960f583dc38..5ee14d216ce8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40))) 397 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
398 dma_bits = 40; 398 dma_bits = 40;
399 } else 399 } else
400 if (drm_pci_device_is_pcie(dev) && 400 if (0 && drm_pci_device_is_pcie(dev) &&
401 dev_priv->chipset > 0x40 && 401 dev_priv->chipset > 0x40 &&
402 dev_priv->chipset != 0x45) { 402 dev_priv->chipset != 0x45) {
403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39))) 403 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
@@ -868,7 +868,9 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
868 nouveau_vm_unmap(&node->tmp_vma); 868 nouveau_vm_unmap(&node->tmp_vma);
869 nouveau_vm_put(&node->tmp_vma); 869 nouveau_vm_put(&node->tmp_vma);
870 } 870 }
871
871 mem->mm_node = NULL; 872 mem->mm_node = NULL;
873 kfree(node);
872} 874}
873 875
874static int 876static int
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 922fb6b664ed..ef9dec0e6f8b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -182,6 +182,11 @@ nouveau_perf_init(struct drm_device *dev)
182 entries = perf[2]; 182 entries = perf[2];
183 } 183 }
184 184
185 if (entries > NOUVEAU_PM_MAX_LEVEL) {
186 NV_DEBUG(dev, "perf table has too many entries - buggy vbios?\n");
187 entries = NOUVEAU_PM_MAX_LEVEL;
188 }
189
185 entry = perf + headerlen; 190 entry = perf + headerlen;
186 for (i = 0; i < entries; i++) { 191 for (i = 0; i < entries; i++) {
187 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl]; 192 struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c77111eca6ac..82fad914e648 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
458 dev_priv->gart_info.type = NOUVEAU_GART_HW; 458 dev_priv->gart_info.type = NOUVEAU_GART_HW;
459 dev_priv->gart_info.func = &nv50_sgdma_backend; 459 dev_priv->gart_info.func = &nv50_sgdma_backend;
460 } else 460 } else
461 if (drm_pci_device_is_pcie(dev) && 461 if (0 && drm_pci_device_is_pcie(dev) &&
462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) { 462 dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
463 if (nv44_graph_class(dev)) { 463 if (nv44_graph_class(dev)) {
464 dev_priv->gart_info.func = &nv44_sgdma_backend; 464 dev_priv->gart_info.func = &nv44_sgdma_backend;
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 38ea662568c1..144f79a350ae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -371,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
371 engine->vram.flags_valid = nv50_vram_flags_valid; 371 engine->vram.flags_valid = nv50_vram_flags_valid;
372 break; 372 break;
373 case 0xC0: 373 case 0xC0:
374 case 0xD0:
374 engine->instmem.init = nvc0_instmem_init; 375 engine->instmem.init = nvc0_instmem_init;
375 engine->instmem.takedown = nvc0_instmem_takedown; 376 engine->instmem.takedown = nvc0_instmem_takedown;
376 engine->instmem.suspend = nvc0_instmem_suspend; 377 engine->instmem.suspend = nvc0_instmem_suspend;
@@ -563,68 +564,68 @@ nouveau_card_init(struct drm_device *dev)
563 if (ret) 564 if (ret)
564 goto out_timer; 565 goto out_timer;
565 566
566 switch (dev_priv->card_type) { 567 if (!nouveau_noaccel) {
567 case NV_04: 568 switch (dev_priv->card_type) {
568 nv04_graph_create(dev); 569 case NV_04:
569 break; 570 nv04_graph_create(dev);
570 case NV_10: 571 break;
571 nv10_graph_create(dev); 572 case NV_10:
572 break; 573 nv10_graph_create(dev);
573 case NV_20: 574 break;
574 case NV_30: 575 case NV_20:
575 nv20_graph_create(dev); 576 case NV_30:
576 break; 577 nv20_graph_create(dev);
577 case NV_40: 578 break;
578 nv40_graph_create(dev); 579 case NV_40:
579 break; 580 nv40_graph_create(dev);
580 case NV_50: 581 break;
581 nv50_graph_create(dev); 582 case NV_50:
582 break; 583 nv50_graph_create(dev);
583 case NV_C0: 584 break;
584 nvc0_graph_create(dev); 585 case NV_C0:
585 break; 586 nvc0_graph_create(dev);
586 default: 587 break;
587 break; 588 default:
588 } 589 break;
589 590 }
590 switch (dev_priv->chipset) {
591 case 0x84:
592 case 0x86:
593 case 0x92:
594 case 0x94:
595 case 0x96:
596 case 0xa0:
597 nv84_crypt_create(dev);
598 break;
599 }
600 591
601 switch (dev_priv->card_type) {
602 case NV_50:
603 switch (dev_priv->chipset) { 592 switch (dev_priv->chipset) {
604 case 0xa3: 593 case 0x84:
605 case 0xa5: 594 case 0x86:
606 case 0xa8: 595 case 0x92:
607 case 0xaf: 596 case 0x94:
608 nva3_copy_create(dev); 597 case 0x96:
598 case 0xa0:
599 nv84_crypt_create(dev);
609 break; 600 break;
610 } 601 }
611 break;
612 case NV_C0:
613 nvc0_copy_create(dev, 0);
614 nvc0_copy_create(dev, 1);
615 break;
616 default:
617 break;
618 }
619 602
620 if (dev_priv->card_type == NV_40) 603 switch (dev_priv->card_type) {
621 nv40_mpeg_create(dev); 604 case NV_50:
622 else 605 switch (dev_priv->chipset) {
623 if (dev_priv->card_type == NV_50 && 606 case 0xa3:
624 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0)) 607 case 0xa5:
625 nv50_mpeg_create(dev); 608 case 0xa8:
609 case 0xaf:
610 nva3_copy_create(dev);
611 break;
612 }
613 break;
614 case NV_C0:
615 nvc0_copy_create(dev, 0);
616 nvc0_copy_create(dev, 1);
617 break;
618 default:
619 break;
620 }
621
622 if (dev_priv->card_type == NV_40)
623 nv40_mpeg_create(dev);
624 else
625 if (dev_priv->card_type == NV_50 &&
626 (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
627 nv50_mpeg_create(dev);
626 628
627 if (!nouveau_noaccel) {
628 for (e = 0; e < NVOBJ_ENGINE_NR; e++) { 629 for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
629 if (dev_priv->eng[e]) { 630 if (dev_priv->eng[e]) {
630 ret = dev_priv->eng[e]->init(dev, e); 631 ret = dev_priv->eng[e]->init(dev, e);
@@ -880,8 +881,8 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
880 881
881#ifdef __BIG_ENDIAN 882#ifdef __BIG_ENDIAN
882 /* Put the card in BE mode if it's not */ 883 /* Put the card in BE mode if it's not */
883 if (nv_rd32(dev, NV03_PMC_BOOT_1)) 884 if (nv_rd32(dev, NV03_PMC_BOOT_1) != 0x01000001)
884 nv_wr32(dev, NV03_PMC_BOOT_1, 0x00000001); 885 nv_wr32(dev, NV03_PMC_BOOT_1, 0x01000001);
885 886
886 DRM_MEMORYBARRIER(); 887 DRM_MEMORYBARRIER();
887#endif 888#endif
@@ -922,6 +923,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
922 dev_priv->card_type = NV_50; 923 dev_priv->card_type = NV_50;
923 break; 924 break;
924 case 0xc0: 925 case 0xc0:
926 case 0xd0:
925 dev_priv->card_type = NV_C0; 927 dev_priv->card_type = NV_C0;
926 break; 928 break;
927 default: 929 default:
diff --git a/drivers/gpu/drm/nouveau/nouveau_vm.c b/drivers/gpu/drm/nouveau/nouveau_vm.c
index 0059e6f58a8b..519a6b4bba46 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vm.c
@@ -58,6 +58,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
58 num -= len; 58 num -= len;
59 pte += len; 59 pte += len;
60 if (unlikely(end >= max)) { 60 if (unlikely(end >= max)) {
61 phys += len << (bits + 12);
61 pde++; 62 pde++;
62 pte = 0; 63 pte = 0;
63 } 64 }
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index 3c78bc81357e..f1a3ae491995 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -376,7 +376,10 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
376 */ 376 */
377 377
378 /* framebuffer can be larger than crtc scanout area. */ 378 /* framebuffer can be larger than crtc scanout area. */
379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 379 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
380 XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
381 regp->CRTC[NV_CIO_CRE_42] =
382 XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
380 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ? 383 regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
381 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00; 384 MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
382 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) | 385 regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
@@ -824,8 +827,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
824 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3; 827 regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
825 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = 828 regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
826 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8); 829 XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
830 regp->CRTC[NV_CIO_CRE_42] =
831 XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
827 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX); 832 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
828 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX); 833 crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
834 crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
829 835
830 /* Update the framebuffer location. */ 836 /* Update the framebuffer location. */
831 regp->fb_start = nv_crtc->fb.offset & ~3; 837 regp->fb_start = nv_crtc->fb.offset & ~3;
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 74a3f6872701..08da478ba544 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -409,7 +409,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
409 struct nouveau_channel *evo = dispc->sync; 409 struct nouveau_channel *evo = dispc->sync;
410 int ret; 410 int ret;
411 411
412 ret = RING_SPACE(evo, 24); 412 ret = RING_SPACE(evo, chan ? 25 : 27);
413 if (unlikely(ret)) 413 if (unlikely(ret))
414 return ret; 414 return ret;
415 415
@@ -458,8 +458,19 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb,
458 /* queue the flip on the crtc's "display sync" channel */ 458 /* queue the flip on the crtc's "display sync" channel */
459 BEGIN_RING(evo, 0, 0x0100, 1); 459 BEGIN_RING(evo, 0, 0x0100, 1);
460 OUT_RING (evo, 0xfffe0000); 460 OUT_RING (evo, 0xfffe0000);
461 BEGIN_RING(evo, 0, 0x0084, 5); 461 if (chan) {
462 OUT_RING (evo, chan ? 0x00000100 : 0x00000010); 462 BEGIN_RING(evo, 0, 0x0084, 1);
463 OUT_RING (evo, 0x00000100);
464 } else {
465 BEGIN_RING(evo, 0, 0x0084, 1);
466 OUT_RING (evo, 0x00000010);
467 /* allows gamma somehow, PDISP will bitch at you if
468 * you don't wait for vblank before changing this..
469 */
470 BEGIN_RING(evo, 0, 0x00e0, 1);
471 OUT_RING (evo, 0x40000000);
472 }
473 BEGIN_RING(evo, 0, 0x0088, 4);
463 OUT_RING (evo, dispc->sem.offset); 474 OUT_RING (evo, dispc->sem.offset);
464 OUT_RING (evo, 0xf00d0000 | dispc->sem.value); 475 OUT_RING (evo, 0xf00d0000 | dispc->sem.value);
465 OUT_RING (evo, 0x74b1e000); 476 OUT_RING (evo, 0x74b1e000);
diff --git a/drivers/gpu/drm/nouveau/nvreg.h b/drivers/gpu/drm/nouveau/nvreg.h
index fe0f253089ac..bbfb1a68fb11 100644
--- a/drivers/gpu/drm/nouveau/nvreg.h
+++ b/drivers/gpu/drm/nouveau/nvreg.h
@@ -277,6 +277,8 @@
277# define NV_CIO_CRE_EBR_VDE_11 2:2 277# define NV_CIO_CRE_EBR_VDE_11 2:2
278# define NV_CIO_CRE_EBR_VRS_11 4:4 278# define NV_CIO_CRE_EBR_VRS_11 4:4
279# define NV_CIO_CRE_EBR_VBS_11 6:6 279# define NV_CIO_CRE_EBR_VBS_11 6:6
280# define NV_CIO_CRE_42 0x42
281# define NV_CIO_CRE_42_OFFSET_11 6:6
280# define NV_CIO_CRE_43 0x43 282# define NV_CIO_CRE_43 0x43
281# define NV_CIO_CRE_44 0x44 /* head control */ 283# define NV_CIO_CRE_44 0x44 /* head control */
282# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */ 284# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
diff --git a/drivers/gpu/drm/radeon/Kconfig b/drivers/gpu/drm/radeon/Kconfig
index 9746fee59f56..ea92bbe3ed37 100644
--- a/drivers/gpu/drm/radeon/Kconfig
+++ b/drivers/gpu/drm/radeon/Kconfig
@@ -28,11 +28,4 @@ config DRM_RADEON_KMS
28 The kernel will also perform security check on command stream 28 The kernel will also perform security check on command stream
29 provided by the user, we want to catch and forbid any illegal use 29 provided by the user, we want to catch and forbid any illegal use
30 of the GPU such as DMA into random system memory or into memory 30 of the GPU such as DMA into random system memory or into memory
31 not owned by the process supplying the command stream. This part 31 not owned by the process supplying the command stream.
32 of the code is still incomplete and this why we propose that patch
33 as a staging driver addition, future security might forbid current
34 experimental userspace to run.
35
36 This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
37 (radeon up to X1950). Works is underway to provide support for R6XX,
38 R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 49611e2365d9..1b50ad8919d5 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1200,6 +1200,7 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PARAMETERS_V3
1200#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10 1200#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF 0x10
1201#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11 1201#define EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING 0x11
1202#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12 1202#define EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION 0x12
1203#define EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP 0x14
1203 1204
1204// ucConfig 1205// ucConfig
1205#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03 1206#define EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_MASK 0x03
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index ec848787d7d9..9541995e4b21 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -671,6 +671,13 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
671 DISPPLL_CONFIG_DUAL_LINK; 671 DISPPLL_CONFIG_DUAL_LINK;
672 } 672 }
673 } 673 }
674 if (radeon_encoder_is_dp_bridge(encoder)) {
675 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
676 struct radeon_encoder *ext_radeon_encoder = to_radeon_encoder(ext_encoder);
677 args.v3.sInput.ucExtTransmitterID = ext_radeon_encoder->encoder_id;
678 } else
679 args.v3.sInput.ucExtTransmitterID = 0;
680
674 atom_execute_table(rdev->mode_info.atom_context, 681 atom_execute_table(rdev->mode_info.atom_context,
675 index, (uint32_t *)&args); 682 index, (uint32_t *)&args);
676 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 683 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
@@ -1045,7 +1052,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1045 uint64_t fb_location; 1052 uint64_t fb_location;
1046 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1053 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1047 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE); 1054 u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
1048 u32 tmp; 1055 u32 tmp, viewport_w, viewport_h;
1049 int r; 1056 int r;
1050 1057
1051 /* no fb bound */ 1058 /* no fb bound */
@@ -1171,8 +1178,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1171 y &= ~1; 1178 y &= ~1;
1172 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset, 1179 WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
1173 (x << 16) | y); 1180 (x << 16) | y);
1181 viewport_w = crtc->mode.hdisplay;
1182 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1174 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1183 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1175 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1184 (viewport_w << 16) | viewport_h);
1176 1185
1177 /* pageflip setup */ 1186 /* pageflip setup */
1178 /* make sure flip is at vb rather than hb */ 1187 /* make sure flip is at vb rather than hb */
@@ -1213,7 +1222,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1213 uint64_t fb_location; 1222 uint64_t fb_location;
1214 uint32_t fb_format, fb_pitch_pixels, tiling_flags; 1223 uint32_t fb_format, fb_pitch_pixels, tiling_flags;
1215 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE; 1224 u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
1216 u32 tmp; 1225 u32 tmp, viewport_w, viewport_h;
1217 int r; 1226 int r;
1218 1227
1219 /* no fb bound */ 1228 /* no fb bound */
@@ -1338,8 +1347,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1338 y &= ~1; 1347 y &= ~1;
1339 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset, 1348 WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
1340 (x << 16) | y); 1349 (x << 16) | y);
1350 viewport_w = crtc->mode.hdisplay;
1351 viewport_h = (crtc->mode.vdisplay + 1) & ~1;
1341 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1352 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1342 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1353 (viewport_w << 16) | viewport_h);
1343 1354
1344 /* pageflip setup */ 1355 /* pageflip setup */
1345 /* make sure flip is at vb rather than hb */ 1356 /* make sure flip is at vb rather than hb */
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.c b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
index e148ab04b80b..7b4eeb7b4a8c 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.c
@@ -39,17 +39,335 @@
39 39
40const u32 cayman_default_state[] = 40const u32 cayman_default_state[] =
41{ 41{
42 /* XXX fill in additional blit state */ 42 0xc0066900,
43 0x00000000,
44 0x00000060, /* DB_RENDER_CONTROL */
45 0x00000000, /* DB_COUNT_CONTROL */
46 0x00000000, /* DB_DEPTH_VIEW */
47 0x0000002a, /* DB_RENDER_OVERRIDE */
48 0x00000000, /* DB_RENDER_OVERRIDE2 */
49 0x00000000, /* DB_HTILE_DATA_BASE */
43 50
44 0xc0026900, 51 0xc0026900,
45 0x00000316, 52 0x0000000a,
46 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 53 0x00000000, /* DB_STENCIL_CLEAR */
47 0x00000010, /* */ 54 0x00000000, /* DB_DEPTH_CLEAR */
55
56 0xc0036900,
57 0x0000000f,
58 0x00000000, /* DB_DEPTH_INFO */
59 0x00000000, /* DB_Z_INFO */
60 0x00000000, /* DB_STENCIL_INFO */
61
62 0xc0016900,
63 0x00000080,
64 0x00000000, /* PA_SC_WINDOW_OFFSET */
65
66 0xc00d6900,
67 0x00000083,
68 0x0000ffff, /* PA_SC_CLIPRECT_RULE */
69 0x00000000, /* PA_SC_CLIPRECT_0_TL */
70 0x20002000, /* PA_SC_CLIPRECT_0_BR */
71 0x00000000,
72 0x20002000,
73 0x00000000,
74 0x20002000,
75 0x00000000,
76 0x20002000,
77 0xaaaaaaaa, /* PA_SC_EDGERULE */
78 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
79 0x0000000f, /* CB_TARGET_MASK */
80 0x0000000f, /* CB_SHADER_MASK */
81
82 0xc0226900,
83 0x00000094,
84 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
85 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
86 0x80000000,
87 0x20002000,
88 0x80000000,
89 0x20002000,
90 0x80000000,
91 0x20002000,
92 0x80000000,
93 0x20002000,
94 0x80000000,
95 0x20002000,
96 0x80000000,
97 0x20002000,
98 0x80000000,
99 0x20002000,
100 0x80000000,
101 0x20002000,
102 0x80000000,
103 0x20002000,
104 0x80000000,
105 0x20002000,
106 0x80000000,
107 0x20002000,
108 0x80000000,
109 0x20002000,
110 0x80000000,
111 0x20002000,
112 0x80000000,
113 0x20002000,
114 0x80000000,
115 0x20002000,
116 0x00000000, /* PA_SC_VPORT_ZMIN_0 */
117 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
118
119 0xc0016900,
120 0x000000d4,
121 0x00000000, /* SX_MISC */
48 122
49 0xc0026900, 123 0xc0026900,
50 0x000000d9, 124 0x000000d9,
51 0x00000000, /* CP_RINGID */ 125 0x00000000, /* CP_RINGID */
52 0x00000000, /* CP_VMID */ 126 0x00000000, /* CP_VMID */
127
128 0xc0096900,
129 0x00000100,
130 0x00ffffff, /* VGT_MAX_VTX_INDX */
131 0x00000000, /* VGT_MIN_VTX_INDX */
132 0x00000000, /* VGT_INDX_OFFSET */
133 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
134 0x00000000, /* SX_ALPHA_TEST_CONTROL */
135 0x00000000, /* CB_BLEND_RED */
136 0x00000000, /* CB_BLEND_GREEN */
137 0x00000000, /* CB_BLEND_BLUE */
138 0x00000000, /* CB_BLEND_ALPHA */
139
140 0xc0016900,
141 0x00000187,
142 0x00000100, /* SPI_VS_OUT_ID_0 */
143
144 0xc0026900,
145 0x00000191,
146 0x00000100, /* SPI_PS_INPUT_CNTL_0 */
147 0x00000101, /* SPI_PS_INPUT_CNTL_1 */
148
149 0xc0016900,
150 0x000001b1,
151 0x00000000, /* SPI_VS_OUT_CONFIG */
152
153 0xc0106900,
154 0x000001b3,
155 0x20000001, /* SPI_PS_IN_CONTROL_0 */
156 0x00000000, /* SPI_PS_IN_CONTROL_1 */
157 0x00000000, /* SPI_INTERP_CONTROL_0 */
158 0x00000000, /* SPI_INPUT_Z */
159 0x00000000, /* SPI_FOG_CNTL */
160 0x00100000, /* SPI_BARYC_CNTL */
161 0x00000000, /* SPI_PS_IN_CONTROL_2 */
162 0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
163 0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
164 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
165 0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
166 0x00000000, /* SPI_GPR_MGMT */
167 0x00000000, /* SPI_LDS_MGMT */
168 0x00000000, /* SPI_STACK_MGMT */
169 0x00000000, /* SPI_WAVE_MGMT_1 */
170 0x00000000, /* SPI_WAVE_MGMT_2 */
171
172 0xc0016900,
173 0x000001e0,
174 0x00000000, /* CB_BLEND0_CONTROL */
175
176 0xc00e6900,
177 0x00000200,
178 0x00000000, /* DB_DEPTH_CONTROL */
179 0x00000000, /* DB_EQAA */
180 0x00cc0010, /* CB_COLOR_CONTROL */
181 0x00000210, /* DB_SHADER_CONTROL */
182 0x00010000, /* PA_CL_CLIP_CNTL */
183 0x00000004, /* PA_SU_SC_MODE_CNTL */
184 0x00000100, /* PA_CL_VTE_CNTL */
185 0x00000000, /* PA_CL_VS_OUT_CNTL */
186 0x00000000, /* PA_CL_NANINF_CNTL */
187 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
188 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
189 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
190 0x00000000, /* */
191 0x00000000, /* */
192
193 0xc0026900,
194 0x00000229,
195 0x00000000, /* SQ_PGM_START_FS */
196 0x00000000,
197
198 0xc0016900,
199 0x0000023b,
200 0x00000000, /* SQ_LDS_ALLOC_PS */
201
202 0xc0066900,
203 0x00000240,
204 0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
205 0x00000000,
206 0x00000000,
207 0x00000000,
208 0x00000000,
209 0x00000000,
210
211 0xc0046900,
212 0x00000247,
213 0x00000000, /* SQ_GS_VERT_ITEMSIZE */
214 0x00000000,
215 0x00000000,
216 0x00000000,
217
218 0xc0116900,
219 0x00000280,
220 0x00000000, /* PA_SU_POINT_SIZE */
221 0x00000000, /* PA_SU_POINT_MINMAX */
222 0x00000008, /* PA_SU_LINE_CNTL */
223 0x00000000, /* PA_SC_LINE_STIPPLE */
224 0x00000000, /* VGT_OUTPUT_PATH_CNTL */
225 0x00000000, /* VGT_HOS_CNTL */
226 0x00000000,
227 0x00000000,
228 0x00000000,
229 0x00000000,
230 0x00000000,
231 0x00000000,
232 0x00000000,
233 0x00000000,
234 0x00000000,
235 0x00000000,
236 0x00000000, /* VGT_GS_MODE */
237
238 0xc0026900,
239 0x00000292,
240 0x00000000, /* PA_SC_MODE_CNTL_0 */
241 0x00000000, /* PA_SC_MODE_CNTL_1 */
242
243 0xc0016900,
244 0x000002a1,
245 0x00000000, /* VGT_PRIMITIVEID_EN */
246
247 0xc0016900,
248 0x000002a5,
249 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
250
251 0xc0026900,
252 0x000002a8,
253 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
254 0x00000000,
255
256 0xc0026900,
257 0x000002ad,
258 0x00000000, /* VGT_REUSE_OFF */
259 0x00000000,
260
261 0xc0016900,
262 0x000002d5,
263 0x00000000, /* VGT_SHADER_STAGES_EN */
264
265 0xc0016900,
266 0x000002dc,
267 0x0000aa00, /* DB_ALPHA_TO_MASK */
268
269 0xc0066900,
270 0x000002de,
271 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
272 0x00000000,
273 0x00000000,
274 0x00000000,
275 0x00000000,
276 0x00000000,
277
278 0xc0026900,
279 0x000002e5,
280 0x00000000, /* VGT_STRMOUT_CONFIG */
281 0x00000000,
282
283 0xc01b6900,
284 0x000002f5,
285 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
286 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
287 0x00000000, /* PA_SC_LINE_CNTL */
288 0x00000000, /* PA_SC_AA_CONFIG */
289 0x00000005, /* PA_SU_VTX_CNTL */
290 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
291 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
292 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
293 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
294 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
295 0x00000000,
296 0x00000000,
297 0x00000000,
298 0x00000000,
299 0x00000000,
300 0x00000000,
301 0x00000000,
302 0x00000000,
303 0x00000000,
304 0x00000000,
305 0x00000000,
306 0x00000000,
307 0x00000000,
308 0x00000000,
309 0x00000000,
310 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
311 0xffffffff,
312
313 0xc0026900,
314 0x00000316,
315 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
316 0x00000010, /* */
317};
318
319const u32 cayman_vs[] =
320{
321 0x00000004,
322 0x80400400,
323 0x0000a03c,
324 0x95000688,
325 0x00004000,
326 0x15000688,
327 0x00000000,
328 0x88000000,
329 0x04000000,
330 0x67961001,
331#ifdef __BIG_ENDIAN
332 0x00020000,
333#else
334 0x00000000,
335#endif
336 0x00000000,
337 0x04000000,
338 0x67961000,
339#ifdef __BIG_ENDIAN
340 0x00020008,
341#else
342 0x00000008,
343#endif
344 0x00000000,
345};
346
347const u32 cayman_ps[] =
348{
349 0x00000004,
350 0xa00c0000,
351 0x00000008,
352 0x80400000,
353 0x00000000,
354 0x95000688,
355 0x00000000,
356 0x88000000,
357 0x00380400,
358 0x00146b10,
359 0x00380000,
360 0x20146b10,
361 0x00380400,
362 0x40146b00,
363 0x80380000,
364 0x60146b00,
365 0x00000010,
366 0x000d1000,
367 0xb0800000,
368 0x00000000,
53}; 369};
54 370
371const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
372const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
55const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state); 373const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
diff --git a/drivers/gpu/drm/radeon/cayman_blit_shaders.h b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
index 33b75e5d0fa4..f5d0e9a60267 100644
--- a/drivers/gpu/drm/radeon/cayman_blit_shaders.h
+++ b/drivers/gpu/drm/radeon/cayman_blit_shaders.h
@@ -25,8 +25,11 @@
25#ifndef CAYMAN_BLIT_SHADERS_H 25#ifndef CAYMAN_BLIT_SHADERS_H
26#define CAYMAN_BLIT_SHADERS_H 26#define CAYMAN_BLIT_SHADERS_H
27 27
28extern const u32 cayman_ps[];
29extern const u32 cayman_vs[];
28extern const u32 cayman_default_state[]; 30extern const u32 cayman_default_state[];
29 31
32extern const u32 cayman_ps_size, cayman_vs_size;
30extern const u32 cayman_default_size; 33extern const u32 cayman_default_size;
31 34
32#endif 35#endif
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 7c37638095f7..445af7981637 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -88,21 +88,40 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
88/* get temperature in millidegrees */ 88/* get temperature in millidegrees */
89int evergreen_get_temp(struct radeon_device *rdev) 89int evergreen_get_temp(struct radeon_device *rdev)
90{ 90{
91 u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >> 91 u32 temp, toffset;
92 ASIC_T_SHIFT; 92 int actual_temp = 0;
93 u32 actual_temp = 0; 93
94 94 if (rdev->family == CHIP_JUNIPER) {
95 if (temp & 0x400) 95 toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
96 actual_temp = -256; 96 TOFFSET_SHIFT;
97 else if (temp & 0x200) 97 temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
98 actual_temp = 255; 98 TS0_ADC_DOUT_SHIFT;
99 else if (temp & 0x100) { 99
100 actual_temp = temp & 0x1ff; 100 if (toffset & 0x100)
101 actual_temp |= ~0x1ff; 101 actual_temp = temp / 2 - (0x200 - toffset);
102 } else 102 else
103 actual_temp = temp & 0xff; 103 actual_temp = temp / 2 + toffset;
104
105 actual_temp = actual_temp * 1000;
106
107 } else {
108 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
109 ASIC_T_SHIFT;
110
111 if (temp & 0x400)
112 actual_temp = -256;
113 else if (temp & 0x200)
114 actual_temp = 255;
115 else if (temp & 0x100) {
116 actual_temp = temp & 0x1ff;
117 actual_temp |= ~0x1ff;
118 } else
119 actual_temp = temp & 0xff;
104 120
105 return (actual_temp * 1000) / 2; 121 actual_temp = (actual_temp * 1000) / 2;
122 }
123
124 return actual_temp;
106} 125}
107 126
108int sumo_get_temp(struct radeon_device *rdev) 127int sumo_get_temp(struct radeon_device *rdev)
@@ -121,11 +140,17 @@ void evergreen_pm_misc(struct radeon_device *rdev)
121 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 140 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
122 141
123 if (voltage->type == VOLTAGE_SW) { 142 if (voltage->type == VOLTAGE_SW) {
143 /* 0xff01 is a flag rather then an actual voltage */
144 if (voltage->voltage == 0xff01)
145 return;
124 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) { 146 if (voltage->voltage && (voltage->voltage != rdev->pm.current_vddc)) {
125 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 147 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
126 rdev->pm.current_vddc = voltage->voltage; 148 rdev->pm.current_vddc = voltage->voltage;
127 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage); 149 DRM_DEBUG("Setting: vddc: %d\n", voltage->voltage);
128 } 150 }
151 /* 0xff01 is a flag rather then an actual voltage */
152 if (voltage->vddci == 0xff01)
153 return;
129 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) { 154 if (voltage->vddci && (voltage->vddci != rdev->pm.current_vddci)) {
130 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI); 155 radeon_atom_set_voltage(rdev, voltage->vddci, SET_VOLTAGE_TYPE_ASIC_VDDCI);
131 rdev->pm.current_vddci = voltage->vddci; 156 rdev->pm.current_vddci = voltage->vddci;
@@ -1415,6 +1440,8 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1415 case CHIP_CEDAR: 1440 case CHIP_CEDAR:
1416 case CHIP_REDWOOD: 1441 case CHIP_REDWOOD:
1417 case CHIP_PALM: 1442 case CHIP_PALM:
1443 case CHIP_SUMO:
1444 case CHIP_SUMO2:
1418 case CHIP_TURKS: 1445 case CHIP_TURKS:
1419 case CHIP_CAICOS: 1446 case CHIP_CAICOS:
1420 force_no_swizzle = false; 1447 force_no_swizzle = false;
@@ -1544,6 +1571,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev)
1544 case CHIP_REDWOOD: 1571 case CHIP_REDWOOD:
1545 case CHIP_CEDAR: 1572 case CHIP_CEDAR:
1546 case CHIP_PALM: 1573 case CHIP_PALM:
1574 case CHIP_SUMO:
1575 case CHIP_SUMO2:
1547 case CHIP_TURKS: 1576 case CHIP_TURKS:
1548 case CHIP_CAICOS: 1577 case CHIP_CAICOS:
1549 default: 1578 default:
@@ -1689,6 +1718,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1689 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1718 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1690 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1719 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1691 break; 1720 break;
1721 case CHIP_SUMO:
1722 rdev->config.evergreen.num_ses = 1;
1723 rdev->config.evergreen.max_pipes = 4;
1724 rdev->config.evergreen.max_tile_pipes = 2;
1725 if (rdev->pdev->device == 0x9648)
1726 rdev->config.evergreen.max_simds = 3;
1727 else if ((rdev->pdev->device == 0x9647) ||
1728 (rdev->pdev->device == 0x964a))
1729 rdev->config.evergreen.max_simds = 4;
1730 else
1731 rdev->config.evergreen.max_simds = 5;
1732 rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
1733 rdev->config.evergreen.max_gprs = 256;
1734 rdev->config.evergreen.max_threads = 248;
1735 rdev->config.evergreen.max_gs_threads = 32;
1736 rdev->config.evergreen.max_stack_entries = 256;
1737 rdev->config.evergreen.sx_num_of_sets = 4;
1738 rdev->config.evergreen.sx_max_export_size = 256;
1739 rdev->config.evergreen.sx_max_export_pos_size = 64;
1740 rdev->config.evergreen.sx_max_export_smx_size = 192;
1741 rdev->config.evergreen.max_hw_contexts = 8;
1742 rdev->config.evergreen.sq_num_cf_insts = 2;
1743
1744 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1745 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1746 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1747 break;
1748 case CHIP_SUMO2:
1749 rdev->config.evergreen.num_ses = 1;
1750 rdev->config.evergreen.max_pipes = 4;
1751 rdev->config.evergreen.max_tile_pipes = 4;
1752 rdev->config.evergreen.max_simds = 2;
1753 rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
1754 rdev->config.evergreen.max_gprs = 256;
1755 rdev->config.evergreen.max_threads = 248;
1756 rdev->config.evergreen.max_gs_threads = 32;
1757 rdev->config.evergreen.max_stack_entries = 512;
1758 rdev->config.evergreen.sx_num_of_sets = 4;
1759 rdev->config.evergreen.sx_max_export_size = 256;
1760 rdev->config.evergreen.sx_max_export_pos_size = 64;
1761 rdev->config.evergreen.sx_max_export_smx_size = 192;
1762 rdev->config.evergreen.max_hw_contexts = 8;
1763 rdev->config.evergreen.sq_num_cf_insts = 2;
1764
1765 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1766 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1767 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1768 break;
1692 case CHIP_BARTS: 1769 case CHIP_BARTS:
1693 rdev->config.evergreen.num_ses = 2; 1770 rdev->config.evergreen.num_ses = 2;
1694 rdev->config.evergreen.max_pipes = 4; 1771 rdev->config.evergreen.max_pipes = 4;
@@ -2039,6 +2116,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2039 switch (rdev->family) { 2116 switch (rdev->family) {
2040 case CHIP_CEDAR: 2117 case CHIP_CEDAR:
2041 case CHIP_PALM: 2118 case CHIP_PALM:
2119 case CHIP_SUMO:
2120 case CHIP_SUMO2:
2042 case CHIP_CAICOS: 2121 case CHIP_CAICOS:
2043 /* no vertex cache */ 2122 /* no vertex cache */
2044 sq_config &= ~VC_ENABLE; 2123 sq_config &= ~VC_ENABLE;
@@ -2060,6 +2139,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2060 switch (rdev->family) { 2139 switch (rdev->family) {
2061 case CHIP_CEDAR: 2140 case CHIP_CEDAR:
2062 case CHIP_PALM: 2141 case CHIP_PALM:
2142 case CHIP_SUMO:
2143 case CHIP_SUMO2:
2063 ps_thread_count = 96; 2144 ps_thread_count = 96;
2064 break; 2145 break;
2065 default: 2146 default:
@@ -2099,6 +2180,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2099 switch (rdev->family) { 2180 switch (rdev->family) {
2100 case CHIP_CEDAR: 2181 case CHIP_CEDAR:
2101 case CHIP_PALM: 2182 case CHIP_PALM:
2183 case CHIP_SUMO:
2184 case CHIP_SUMO2:
2102 case CHIP_CAICOS: 2185 case CHIP_CAICOS:
2103 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY); 2186 vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
2104 break; 2187 break;
@@ -2618,28 +2701,25 @@ static inline u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
2618 2701
2619int evergreen_irq_process(struct radeon_device *rdev) 2702int evergreen_irq_process(struct radeon_device *rdev)
2620{ 2703{
2621 u32 wptr = evergreen_get_ih_wptr(rdev); 2704 u32 wptr;
2622 u32 rptr = rdev->ih.rptr; 2705 u32 rptr;
2623 u32 src_id, src_data; 2706 u32 src_id, src_data;
2624 u32 ring_index; 2707 u32 ring_index;
2625 unsigned long flags; 2708 unsigned long flags;
2626 bool queue_hotplug = false; 2709 bool queue_hotplug = false;
2627 2710
2628 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 2711 if (!rdev->ih.enabled || rdev->shutdown)
2629 if (!rdev->ih.enabled)
2630 return IRQ_NONE; 2712 return IRQ_NONE;
2631 2713
2632 spin_lock_irqsave(&rdev->ih.lock, flags); 2714 wptr = evergreen_get_ih_wptr(rdev);
2715 rptr = rdev->ih.rptr;
2716 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
2633 2717
2718 spin_lock_irqsave(&rdev->ih.lock, flags);
2634 if (rptr == wptr) { 2719 if (rptr == wptr) {
2635 spin_unlock_irqrestore(&rdev->ih.lock, flags); 2720 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2636 return IRQ_NONE; 2721 return IRQ_NONE;
2637 } 2722 }
2638 if (rdev->shutdown) {
2639 spin_unlock_irqrestore(&rdev->ih.lock, flags);
2640 return IRQ_NONE;
2641 }
2642
2643restart_ih: 2723restart_ih:
2644 /* display interrupts */ 2724 /* display interrupts */
2645 evergreen_irq_ack(rdev); 2725 evergreen_irq_ack(rdev);
@@ -2868,7 +2948,7 @@ restart_ih:
2868 radeon_fence_process(rdev); 2948 radeon_fence_process(rdev);
2869 break; 2949 break;
2870 case 233: /* GUI IDLE */ 2950 case 233: /* GUI IDLE */
2871 DRM_DEBUG("IH: CP EOP\n"); 2951 DRM_DEBUG("IH: GUI idle\n");
2872 rdev->pm.gui_idle = true; 2952 rdev->pm.gui_idle = true;
2873 wake_up(&rdev->irq.idle_queue); 2953 wake_up(&rdev->irq.idle_queue);
2874 break; 2954 break;
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index ba06a69c6de8..57f3bc17b87e 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -31,6 +31,7 @@
31 31
32#include "evergreend.h" 32#include "evergreend.h"
33#include "evergreen_blit_shaders.h" 33#include "evergreen_blit_shaders.h"
34#include "cayman_blit_shaders.h"
34 35
35#define DI_PT_RECTLIST 0x11 36#define DI_PT_RECTLIST 0x11
36#define DI_INDEX_SIZE_16_BIT 0x0 37#define DI_INDEX_SIZE_16_BIT 0x0
@@ -152,6 +153,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
152 153
153 if ((rdev->family == CHIP_CEDAR) || 154 if ((rdev->family == CHIP_CEDAR) ||
154 (rdev->family == CHIP_PALM) || 155 (rdev->family == CHIP_PALM) ||
156 (rdev->family == CHIP_SUMO) ||
157 (rdev->family == CHIP_SUMO2) ||
155 (rdev->family == CHIP_CAICOS)) 158 (rdev->family == CHIP_CAICOS))
156 cp_set_surface_sync(rdev, 159 cp_set_surface_sync(rdev,
157 PACKET3_TC_ACTION_ENA, 48, gpu_addr); 160 PACKET3_TC_ACTION_ENA, 48, gpu_addr);
@@ -199,6 +202,16 @@ static void
199set_scissors(struct radeon_device *rdev, int x1, int y1, 202set_scissors(struct radeon_device *rdev, int x1, int y1,
200 int x2, int y2) 203 int x2, int y2)
201{ 204{
205 /* workaround some hw bugs */
206 if (x2 == 0)
207 x1 = 1;
208 if (y2 == 0)
209 y1 = 1;
210 if (rdev->family == CHIP_CAYMAN) {
211 if ((x2 == 1) && (y2 == 1))
212 x2 = 2;
213 }
214
202 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); 215 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
203 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2); 216 radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
204 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); 217 radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
@@ -255,238 +268,284 @@ set_default_state(struct radeon_device *rdev)
255 u64 gpu_addr; 268 u64 gpu_addr;
256 int dwords; 269 int dwords;
257 270
258 switch (rdev->family) {
259 case CHIP_CEDAR:
260 default:
261 num_ps_gprs = 93;
262 num_vs_gprs = 46;
263 num_temp_gprs = 4;
264 num_gs_gprs = 31;
265 num_es_gprs = 31;
266 num_hs_gprs = 23;
267 num_ls_gprs = 23;
268 num_ps_threads = 96;
269 num_vs_threads = 16;
270 num_gs_threads = 16;
271 num_es_threads = 16;
272 num_hs_threads = 16;
273 num_ls_threads = 16;
274 num_ps_stack_entries = 42;
275 num_vs_stack_entries = 42;
276 num_gs_stack_entries = 42;
277 num_es_stack_entries = 42;
278 num_hs_stack_entries = 42;
279 num_ls_stack_entries = 42;
280 break;
281 case CHIP_REDWOOD:
282 num_ps_gprs = 93;
283 num_vs_gprs = 46;
284 num_temp_gprs = 4;
285 num_gs_gprs = 31;
286 num_es_gprs = 31;
287 num_hs_gprs = 23;
288 num_ls_gprs = 23;
289 num_ps_threads = 128;
290 num_vs_threads = 20;
291 num_gs_threads = 20;
292 num_es_threads = 20;
293 num_hs_threads = 20;
294 num_ls_threads = 20;
295 num_ps_stack_entries = 42;
296 num_vs_stack_entries = 42;
297 num_gs_stack_entries = 42;
298 num_es_stack_entries = 42;
299 num_hs_stack_entries = 42;
300 num_ls_stack_entries = 42;
301 break;
302 case CHIP_JUNIPER:
303 num_ps_gprs = 93;
304 num_vs_gprs = 46;
305 num_temp_gprs = 4;
306 num_gs_gprs = 31;
307 num_es_gprs = 31;
308 num_hs_gprs = 23;
309 num_ls_gprs = 23;
310 num_ps_threads = 128;
311 num_vs_threads = 20;
312 num_gs_threads = 20;
313 num_es_threads = 20;
314 num_hs_threads = 20;
315 num_ls_threads = 20;
316 num_ps_stack_entries = 85;
317 num_vs_stack_entries = 85;
318 num_gs_stack_entries = 85;
319 num_es_stack_entries = 85;
320 num_hs_stack_entries = 85;
321 num_ls_stack_entries = 85;
322 break;
323 case CHIP_CYPRESS:
324 case CHIP_HEMLOCK:
325 num_ps_gprs = 93;
326 num_vs_gprs = 46;
327 num_temp_gprs = 4;
328 num_gs_gprs = 31;
329 num_es_gprs = 31;
330 num_hs_gprs = 23;
331 num_ls_gprs = 23;
332 num_ps_threads = 128;
333 num_vs_threads = 20;
334 num_gs_threads = 20;
335 num_es_threads = 20;
336 num_hs_threads = 20;
337 num_ls_threads = 20;
338 num_ps_stack_entries = 85;
339 num_vs_stack_entries = 85;
340 num_gs_stack_entries = 85;
341 num_es_stack_entries = 85;
342 num_hs_stack_entries = 85;
343 num_ls_stack_entries = 85;
344 break;
345 case CHIP_PALM:
346 num_ps_gprs = 93;
347 num_vs_gprs = 46;
348 num_temp_gprs = 4;
349 num_gs_gprs = 31;
350 num_es_gprs = 31;
351 num_hs_gprs = 23;
352 num_ls_gprs = 23;
353 num_ps_threads = 96;
354 num_vs_threads = 16;
355 num_gs_threads = 16;
356 num_es_threads = 16;
357 num_hs_threads = 16;
358 num_ls_threads = 16;
359 num_ps_stack_entries = 42;
360 num_vs_stack_entries = 42;
361 num_gs_stack_entries = 42;
362 num_es_stack_entries = 42;
363 num_hs_stack_entries = 42;
364 num_ls_stack_entries = 42;
365 break;
366 case CHIP_BARTS:
367 num_ps_gprs = 93;
368 num_vs_gprs = 46;
369 num_temp_gprs = 4;
370 num_gs_gprs = 31;
371 num_es_gprs = 31;
372 num_hs_gprs = 23;
373 num_ls_gprs = 23;
374 num_ps_threads = 128;
375 num_vs_threads = 20;
376 num_gs_threads = 20;
377 num_es_threads = 20;
378 num_hs_threads = 20;
379 num_ls_threads = 20;
380 num_ps_stack_entries = 85;
381 num_vs_stack_entries = 85;
382 num_gs_stack_entries = 85;
383 num_es_stack_entries = 85;
384 num_hs_stack_entries = 85;
385 num_ls_stack_entries = 85;
386 break;
387 case CHIP_TURKS:
388 num_ps_gprs = 93;
389 num_vs_gprs = 46;
390 num_temp_gprs = 4;
391 num_gs_gprs = 31;
392 num_es_gprs = 31;
393 num_hs_gprs = 23;
394 num_ls_gprs = 23;
395 num_ps_threads = 128;
396 num_vs_threads = 20;
397 num_gs_threads = 20;
398 num_es_threads = 20;
399 num_hs_threads = 20;
400 num_ls_threads = 20;
401 num_ps_stack_entries = 42;
402 num_vs_stack_entries = 42;
403 num_gs_stack_entries = 42;
404 num_es_stack_entries = 42;
405 num_hs_stack_entries = 42;
406 num_ls_stack_entries = 42;
407 break;
408 case CHIP_CAICOS:
409 num_ps_gprs = 93;
410 num_vs_gprs = 46;
411 num_temp_gprs = 4;
412 num_gs_gprs = 31;
413 num_es_gprs = 31;
414 num_hs_gprs = 23;
415 num_ls_gprs = 23;
416 num_ps_threads = 128;
417 num_vs_threads = 10;
418 num_gs_threads = 10;
419 num_es_threads = 10;
420 num_hs_threads = 10;
421 num_ls_threads = 10;
422 num_ps_stack_entries = 42;
423 num_vs_stack_entries = 42;
424 num_gs_stack_entries = 42;
425 num_es_stack_entries = 42;
426 num_hs_stack_entries = 42;
427 num_ls_stack_entries = 42;
428 break;
429 }
430
431 if ((rdev->family == CHIP_CEDAR) ||
432 (rdev->family == CHIP_PALM) ||
433 (rdev->family == CHIP_CAICOS))
434 sq_config = 0;
435 else
436 sq_config = VC_ENABLE;
437
438 sq_config |= (EXPORT_SRC_C |
439 CS_PRIO(0) |
440 LS_PRIO(0) |
441 HS_PRIO(0) |
442 PS_PRIO(0) |
443 VS_PRIO(1) |
444 GS_PRIO(2) |
445 ES_PRIO(3));
446
447 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
448 NUM_VS_GPRS(num_vs_gprs) |
449 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
450 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
451 NUM_ES_GPRS(num_es_gprs));
452 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
453 NUM_LS_GPRS(num_ls_gprs));
454 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
455 NUM_VS_THREADS(num_vs_threads) |
456 NUM_GS_THREADS(num_gs_threads) |
457 NUM_ES_THREADS(num_es_threads));
458 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
459 NUM_LS_THREADS(num_ls_threads));
460 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
461 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
462 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
463 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
464 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
465 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
466
467 /* set clear context state */ 271 /* set clear context state */
468 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0)); 272 radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
469 radeon_ring_write(rdev, 0); 273 radeon_ring_write(rdev, 0);
470 274
471 /* disable dyn gprs */ 275 if (rdev->family < CHIP_CAYMAN) {
472 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); 276 switch (rdev->family) {
473 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2); 277 case CHIP_CEDAR:
474 radeon_ring_write(rdev, 0); 278 default:
279 num_ps_gprs = 93;
280 num_vs_gprs = 46;
281 num_temp_gprs = 4;
282 num_gs_gprs = 31;
283 num_es_gprs = 31;
284 num_hs_gprs = 23;
285 num_ls_gprs = 23;
286 num_ps_threads = 96;
287 num_vs_threads = 16;
288 num_gs_threads = 16;
289 num_es_threads = 16;
290 num_hs_threads = 16;
291 num_ls_threads = 16;
292 num_ps_stack_entries = 42;
293 num_vs_stack_entries = 42;
294 num_gs_stack_entries = 42;
295 num_es_stack_entries = 42;
296 num_hs_stack_entries = 42;
297 num_ls_stack_entries = 42;
298 break;
299 case CHIP_REDWOOD:
300 num_ps_gprs = 93;
301 num_vs_gprs = 46;
302 num_temp_gprs = 4;
303 num_gs_gprs = 31;
304 num_es_gprs = 31;
305 num_hs_gprs = 23;
306 num_ls_gprs = 23;
307 num_ps_threads = 128;
308 num_vs_threads = 20;
309 num_gs_threads = 20;
310 num_es_threads = 20;
311 num_hs_threads = 20;
312 num_ls_threads = 20;
313 num_ps_stack_entries = 42;
314 num_vs_stack_entries = 42;
315 num_gs_stack_entries = 42;
316 num_es_stack_entries = 42;
317 num_hs_stack_entries = 42;
318 num_ls_stack_entries = 42;
319 break;
320 case CHIP_JUNIPER:
321 num_ps_gprs = 93;
322 num_vs_gprs = 46;
323 num_temp_gprs = 4;
324 num_gs_gprs = 31;
325 num_es_gprs = 31;
326 num_hs_gprs = 23;
327 num_ls_gprs = 23;
328 num_ps_threads = 128;
329 num_vs_threads = 20;
330 num_gs_threads = 20;
331 num_es_threads = 20;
332 num_hs_threads = 20;
333 num_ls_threads = 20;
334 num_ps_stack_entries = 85;
335 num_vs_stack_entries = 85;
336 num_gs_stack_entries = 85;
337 num_es_stack_entries = 85;
338 num_hs_stack_entries = 85;
339 num_ls_stack_entries = 85;
340 break;
341 case CHIP_CYPRESS:
342 case CHIP_HEMLOCK:
343 num_ps_gprs = 93;
344 num_vs_gprs = 46;
345 num_temp_gprs = 4;
346 num_gs_gprs = 31;
347 num_es_gprs = 31;
348 num_hs_gprs = 23;
349 num_ls_gprs = 23;
350 num_ps_threads = 128;
351 num_vs_threads = 20;
352 num_gs_threads = 20;
353 num_es_threads = 20;
354 num_hs_threads = 20;
355 num_ls_threads = 20;
356 num_ps_stack_entries = 85;
357 num_vs_stack_entries = 85;
358 num_gs_stack_entries = 85;
359 num_es_stack_entries = 85;
360 num_hs_stack_entries = 85;
361 num_ls_stack_entries = 85;
362 break;
363 case CHIP_PALM:
364 num_ps_gprs = 93;
365 num_vs_gprs = 46;
366 num_temp_gprs = 4;
367 num_gs_gprs = 31;
368 num_es_gprs = 31;
369 num_hs_gprs = 23;
370 num_ls_gprs = 23;
371 num_ps_threads = 96;
372 num_vs_threads = 16;
373 num_gs_threads = 16;
374 num_es_threads = 16;
375 num_hs_threads = 16;
376 num_ls_threads = 16;
377 num_ps_stack_entries = 42;
378 num_vs_stack_entries = 42;
379 num_gs_stack_entries = 42;
380 num_es_stack_entries = 42;
381 num_hs_stack_entries = 42;
382 num_ls_stack_entries = 42;
383 break;
384 case CHIP_SUMO:
385 num_ps_gprs = 93;
386 num_vs_gprs = 46;
387 num_temp_gprs = 4;
388 num_gs_gprs = 31;
389 num_es_gprs = 31;
390 num_hs_gprs = 23;
391 num_ls_gprs = 23;
392 num_ps_threads = 96;
393 num_vs_threads = 25;
394 num_gs_threads = 25;
395 num_es_threads = 25;
396 num_hs_threads = 25;
397 num_ls_threads = 25;
398 num_ps_stack_entries = 42;
399 num_vs_stack_entries = 42;
400 num_gs_stack_entries = 42;
401 num_es_stack_entries = 42;
402 num_hs_stack_entries = 42;
403 num_ls_stack_entries = 42;
404 break;
405 case CHIP_SUMO2:
406 num_ps_gprs = 93;
407 num_vs_gprs = 46;
408 num_temp_gprs = 4;
409 num_gs_gprs = 31;
410 num_es_gprs = 31;
411 num_hs_gprs = 23;
412 num_ls_gprs = 23;
413 num_ps_threads = 96;
414 num_vs_threads = 25;
415 num_gs_threads = 25;
416 num_es_threads = 25;
417 num_hs_threads = 25;
418 num_ls_threads = 25;
419 num_ps_stack_entries = 85;
420 num_vs_stack_entries = 85;
421 num_gs_stack_entries = 85;
422 num_es_stack_entries = 85;
423 num_hs_stack_entries = 85;
424 num_ls_stack_entries = 85;
425 break;
426 case CHIP_BARTS:
427 num_ps_gprs = 93;
428 num_vs_gprs = 46;
429 num_temp_gprs = 4;
430 num_gs_gprs = 31;
431 num_es_gprs = 31;
432 num_hs_gprs = 23;
433 num_ls_gprs = 23;
434 num_ps_threads = 128;
435 num_vs_threads = 20;
436 num_gs_threads = 20;
437 num_es_threads = 20;
438 num_hs_threads = 20;
439 num_ls_threads = 20;
440 num_ps_stack_entries = 85;
441 num_vs_stack_entries = 85;
442 num_gs_stack_entries = 85;
443 num_es_stack_entries = 85;
444 num_hs_stack_entries = 85;
445 num_ls_stack_entries = 85;
446 break;
447 case CHIP_TURKS:
448 num_ps_gprs = 93;
449 num_vs_gprs = 46;
450 num_temp_gprs = 4;
451 num_gs_gprs = 31;
452 num_es_gprs = 31;
453 num_hs_gprs = 23;
454 num_ls_gprs = 23;
455 num_ps_threads = 128;
456 num_vs_threads = 20;
457 num_gs_threads = 20;
458 num_es_threads = 20;
459 num_hs_threads = 20;
460 num_ls_threads = 20;
461 num_ps_stack_entries = 42;
462 num_vs_stack_entries = 42;
463 num_gs_stack_entries = 42;
464 num_es_stack_entries = 42;
465 num_hs_stack_entries = 42;
466 num_ls_stack_entries = 42;
467 break;
468 case CHIP_CAICOS:
469 num_ps_gprs = 93;
470 num_vs_gprs = 46;
471 num_temp_gprs = 4;
472 num_gs_gprs = 31;
473 num_es_gprs = 31;
474 num_hs_gprs = 23;
475 num_ls_gprs = 23;
476 num_ps_threads = 128;
477 num_vs_threads = 10;
478 num_gs_threads = 10;
479 num_es_threads = 10;
480 num_hs_threads = 10;
481 num_ls_threads = 10;
482 num_ps_stack_entries = 42;
483 num_vs_stack_entries = 42;
484 num_gs_stack_entries = 42;
485 num_es_stack_entries = 42;
486 num_hs_stack_entries = 42;
487 num_ls_stack_entries = 42;
488 break;
489 }
475 490
476 /* SQ config */ 491 if ((rdev->family == CHIP_CEDAR) ||
477 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11)); 492 (rdev->family == CHIP_PALM) ||
478 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2); 493 (rdev->family == CHIP_SUMO) ||
479 radeon_ring_write(rdev, sq_config); 494 (rdev->family == CHIP_SUMO2) ||
480 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); 495 (rdev->family == CHIP_CAICOS))
481 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); 496 sq_config = 0;
482 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3); 497 else
483 radeon_ring_write(rdev, 0); 498 sq_config = VC_ENABLE;
484 radeon_ring_write(rdev, 0); 499
485 radeon_ring_write(rdev, sq_thread_resource_mgmt); 500 sq_config |= (EXPORT_SRC_C |
486 radeon_ring_write(rdev, sq_thread_resource_mgmt_2); 501 CS_PRIO(0) |
487 radeon_ring_write(rdev, sq_stack_resource_mgmt_1); 502 LS_PRIO(0) |
488 radeon_ring_write(rdev, sq_stack_resource_mgmt_2); 503 HS_PRIO(0) |
489 radeon_ring_write(rdev, sq_stack_resource_mgmt_3); 504 PS_PRIO(0) |
505 VS_PRIO(1) |
506 GS_PRIO(2) |
507 ES_PRIO(3));
508
509 sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
510 NUM_VS_GPRS(num_vs_gprs) |
511 NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
512 sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
513 NUM_ES_GPRS(num_es_gprs));
514 sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
515 NUM_LS_GPRS(num_ls_gprs));
516 sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
517 NUM_VS_THREADS(num_vs_threads) |
518 NUM_GS_THREADS(num_gs_threads) |
519 NUM_ES_THREADS(num_es_threads));
520 sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
521 NUM_LS_THREADS(num_ls_threads));
522 sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
523 NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
524 sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
525 NUM_ES_STACK_ENTRIES(num_es_stack_entries));
526 sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
527 NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
528
529 /* disable dyn gprs */
530 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
531 radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
532 radeon_ring_write(rdev, 0);
533
534 /* SQ config */
535 radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
536 radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
537 radeon_ring_write(rdev, sq_config);
538 radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
539 radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
540 radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
541 radeon_ring_write(rdev, 0);
542 radeon_ring_write(rdev, 0);
543 radeon_ring_write(rdev, sq_thread_resource_mgmt);
544 radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
545 radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
546 radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
547 radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
548 }
490 549
491 /* CONTEXT_CONTROL */ 550 /* CONTEXT_CONTROL */
492 radeon_ring_write(rdev, 0xc0012800); 551 radeon_ring_write(rdev, 0xc0012800);
@@ -560,7 +619,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
560 mutex_init(&rdev->r600_blit.mutex); 619 mutex_init(&rdev->r600_blit.mutex);
561 rdev->r600_blit.state_offset = 0; 620 rdev->r600_blit.state_offset = 0;
562 621
563 rdev->r600_blit.state_len = evergreen_default_size; 622 if (rdev->family < CHIP_CAYMAN)
623 rdev->r600_blit.state_len = evergreen_default_size;
624 else
625 rdev->r600_blit.state_len = cayman_default_size;
564 626
565 dwords = rdev->r600_blit.state_len; 627 dwords = rdev->r600_blit.state_len;
566 while (dwords & 0xf) { 628 while (dwords & 0xf) {
@@ -572,11 +634,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
572 obj_size = ALIGN(obj_size, 256); 634 obj_size = ALIGN(obj_size, 256);
573 635
574 rdev->r600_blit.vs_offset = obj_size; 636 rdev->r600_blit.vs_offset = obj_size;
575 obj_size += evergreen_vs_size * 4; 637 if (rdev->family < CHIP_CAYMAN)
638 obj_size += evergreen_vs_size * 4;
639 else
640 obj_size += cayman_vs_size * 4;
576 obj_size = ALIGN(obj_size, 256); 641 obj_size = ALIGN(obj_size, 256);
577 642
578 rdev->r600_blit.ps_offset = obj_size; 643 rdev->r600_blit.ps_offset = obj_size;
579 obj_size += evergreen_ps_size * 4; 644 if (rdev->family < CHIP_CAYMAN)
645 obj_size += evergreen_ps_size * 4;
646 else
647 obj_size += cayman_ps_size * 4;
580 obj_size = ALIGN(obj_size, 256); 648 obj_size = ALIGN(obj_size, 256);
581 649
582 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, 650 r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
@@ -599,16 +667,29 @@ int evergreen_blit_init(struct radeon_device *rdev)
599 return r; 667 return r;
600 } 668 }
601 669
602 memcpy_toio(ptr + rdev->r600_blit.state_offset, 670 if (rdev->family < CHIP_CAYMAN) {
603 evergreen_default_state, rdev->r600_blit.state_len * 4); 671 memcpy_toio(ptr + rdev->r600_blit.state_offset,
604 672 evergreen_default_state, rdev->r600_blit.state_len * 4);
605 if (num_packet2s) 673
606 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 674 if (num_packet2s)
607 packet2s, num_packet2s * 4); 675 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
608 for (i = 0; i < evergreen_vs_size; i++) 676 packet2s, num_packet2s * 4);
609 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]); 677 for (i = 0; i < evergreen_vs_size; i++)
610 for (i = 0; i < evergreen_ps_size; i++) 678 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
611 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]); 679 for (i = 0; i < evergreen_ps_size; i++)
680 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
681 } else {
682 memcpy_toio(ptr + rdev->r600_blit.state_offset,
683 cayman_default_state, rdev->r600_blit.state_len * 4);
684
685 if (num_packet2s)
686 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
687 packet2s, num_packet2s * 4);
688 for (i = 0; i < cayman_vs_size; i++)
689 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
690 for (i = 0; i < cayman_ps_size; i++)
691 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
692 }
612 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 693 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
613 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 694 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
614 695
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index f37e91ee8a11..1636e3449825 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -168,10 +168,16 @@
168#define SE_DB_BUSY (1 << 30) 168#define SE_DB_BUSY (1 << 30)
169#define SE_CB_BUSY (1 << 31) 169#define SE_CB_BUSY (1 << 31)
170/* evergreen */ 170/* evergreen */
171#define CG_THERMAL_CTRL 0x72c
172#define TOFFSET_MASK 0x00003FE0
173#define TOFFSET_SHIFT 5
171#define CG_MULT_THERMAL_STATUS 0x740 174#define CG_MULT_THERMAL_STATUS 0x740
172#define ASIC_T(x) ((x) << 16) 175#define ASIC_T(x) ((x) << 16)
173#define ASIC_T_MASK 0x7FF0000 176#define ASIC_T_MASK 0x07FF0000
174#define ASIC_T_SHIFT 16 177#define ASIC_T_SHIFT 16
178#define CG_TS0_STATUS 0x760
179#define TS0_ADC_DOUT_MASK 0x000003FF
180#define TS0_ADC_DOUT_SHIFT 0
175/* APU */ 181/* APU */
176#define CG_THERMAL_STATUS 0x678 182#define CG_THERMAL_STATUS 0x678
177 183
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index b205ba1cdd8f..16caafeadf5e 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -1387,14 +1387,12 @@ static int cayman_startup(struct radeon_device *rdev)
1387 return r; 1387 return r;
1388 cayman_gpu_init(rdev); 1388 cayman_gpu_init(rdev);
1389 1389
1390#if 0 1390 r = evergreen_blit_init(rdev);
1391 r = cayman_blit_init(rdev);
1392 if (r) { 1391 if (r) {
1393 cayman_blit_fini(rdev); 1392 evergreen_blit_fini(rdev);
1394 rdev->asic->copy = NULL; 1393 rdev->asic->copy = NULL;
1395 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r); 1394 dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
1396 } 1395 }
1397#endif
1398 1396
1399 /* allocate wb buffer */ 1397 /* allocate wb buffer */
1400 r = radeon_wb_init(rdev); 1398 r = radeon_wb_init(rdev);
@@ -1452,7 +1450,7 @@ int cayman_resume(struct radeon_device *rdev)
1452 1450
1453int cayman_suspend(struct radeon_device *rdev) 1451int cayman_suspend(struct radeon_device *rdev)
1454{ 1452{
1455 /* int r; */ 1453 int r;
1456 1454
1457 /* FIXME: we should wait for ring to be empty */ 1455 /* FIXME: we should wait for ring to be empty */
1458 cayman_cp_enable(rdev, false); 1456 cayman_cp_enable(rdev, false);
@@ -1461,14 +1459,13 @@ int cayman_suspend(struct radeon_device *rdev)
1461 radeon_wb_disable(rdev); 1459 radeon_wb_disable(rdev);
1462 cayman_pcie_gart_disable(rdev); 1460 cayman_pcie_gart_disable(rdev);
1463 1461
1464#if 0
1465 /* unpin shaders bo */ 1462 /* unpin shaders bo */
1466 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); 1463 r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
1467 if (likely(r == 0)) { 1464 if (likely(r == 0)) {
1468 radeon_bo_unpin(rdev->r600_blit.shader_obj); 1465 radeon_bo_unpin(rdev->r600_blit.shader_obj);
1469 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 1466 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
1470 } 1467 }
1471#endif 1468
1472 return 0; 1469 return 0;
1473} 1470}
1474 1471
@@ -1580,7 +1577,7 @@ int cayman_init(struct radeon_device *rdev)
1580 1577
1581void cayman_fini(struct radeon_device *rdev) 1578void cayman_fini(struct radeon_device *rdev)
1582{ 1579{
1583 /* cayman_blit_fini(rdev); */ 1580 evergreen_blit_fini(rdev);
1584 cayman_cp_fini(rdev); 1581 cayman_cp_fini(rdev);
1585 r600_irq_fini(rdev); 1582 r600_irq_fini(rdev);
1586 radeon_wb_fini(rdev); 1583 radeon_wb_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 2fef9de7f363..686f9dc5d4bd 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -63,7 +63,7 @@ struct r100_cs_track {
63 unsigned num_arrays; 63 unsigned num_arrays;
64 unsigned max_indx; 64 unsigned max_indx;
65 unsigned color_channel_mask; 65 unsigned color_channel_mask;
66 struct r100_cs_track_array arrays[11]; 66 struct r100_cs_track_array arrays[16];
67 struct r100_cs_track_cb cb[R300_MAX_CB]; 67 struct r100_cs_track_cb cb[R300_MAX_CB];
68 struct r100_cs_track_cb zb; 68 struct r100_cs_track_cb zb;
69 struct r100_cs_track_cb aa; 69 struct r100_cs_track_cb aa;
@@ -146,6 +146,12 @@ static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
146 ib = p->ib->ptr; 146 ib = p->ib->ptr;
147 track = (struct r100_cs_track *)p->track; 147 track = (struct r100_cs_track *)p->track;
148 c = radeon_get_ib_value(p, idx++) & 0x1F; 148 c = radeon_get_ib_value(p, idx++) & 0x1F;
149 if (c > 16) {
150 DRM_ERROR("Only 16 vertex buffers are allowed %d\n",
151 pkt->opcode);
152 r100_cs_dump_packet(p, pkt);
153 return -EINVAL;
154 }
149 track->num_arrays = c; 155 track->num_arrays = c;
150 for (i = 0; i < (c - 1); i+=2, idx+=3) { 156 for (i = 0; i < (c - 1); i+=2, idx+=3) {
151 r = r100_cs_packet_next_reloc(p, &reloc); 157 r = r100_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 6f27593901c7..f79d2ccb6755 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -87,6 +87,10 @@ MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87MODULE_FIRMWARE("radeon/PALM_pfp.bin"); 87MODULE_FIRMWARE("radeon/PALM_pfp.bin");
88MODULE_FIRMWARE("radeon/PALM_me.bin"); 88MODULE_FIRMWARE("radeon/PALM_me.bin");
89MODULE_FIRMWARE("radeon/SUMO_rlc.bin"); 89MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
90MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
91MODULE_FIRMWARE("radeon/SUMO_me.bin");
92MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
93MODULE_FIRMWARE("radeon/SUMO2_me.bin");
90 94
91int r600_debugfs_mc_info_init(struct radeon_device *rdev); 95int r600_debugfs_mc_info_init(struct radeon_device *rdev);
92 96
@@ -586,6 +590,9 @@ void r600_pm_misc(struct radeon_device *rdev)
586 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 590 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
587 591
588 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 592 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
593 /* 0xff01 is a flag rather then an actual voltage */
594 if (voltage->voltage == 0xff01)
595 return;
589 if (voltage->voltage != rdev->pm.current_vddc) { 596 if (voltage->voltage != rdev->pm.current_vddc) {
590 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 597 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
591 rdev->pm.current_vddc = voltage->voltage; 598 rdev->pm.current_vddc = voltage->voltage;
@@ -2024,6 +2031,14 @@ int r600_init_microcode(struct radeon_device *rdev)
2024 chip_name = "PALM"; 2031 chip_name = "PALM";
2025 rlc_chip_name = "SUMO"; 2032 rlc_chip_name = "SUMO";
2026 break; 2033 break;
2034 case CHIP_SUMO:
2035 chip_name = "SUMO";
2036 rlc_chip_name = "SUMO";
2037 break;
2038 case CHIP_SUMO2:
2039 chip_name = "SUMO2";
2040 rlc_chip_name = "SUMO";
2041 break;
2027 default: BUG(); 2042 default: BUG();
2028 } 2043 }
2029 2044
@@ -3282,27 +3297,26 @@ static inline u32 r600_get_ih_wptr(struct radeon_device *rdev)
3282 3297
3283int r600_irq_process(struct radeon_device *rdev) 3298int r600_irq_process(struct radeon_device *rdev)
3284{ 3299{
3285 u32 wptr = r600_get_ih_wptr(rdev); 3300 u32 wptr;
3286 u32 rptr = rdev->ih.rptr; 3301 u32 rptr;
3287 u32 src_id, src_data; 3302 u32 src_id, src_data;
3288 u32 ring_index; 3303 u32 ring_index;
3289 unsigned long flags; 3304 unsigned long flags;
3290 bool queue_hotplug = false; 3305 bool queue_hotplug = false;
3291 3306
3292 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr); 3307 if (!rdev->ih.enabled || rdev->shutdown)
3293 if (!rdev->ih.enabled)
3294 return IRQ_NONE; 3308 return IRQ_NONE;
3295 3309
3310 wptr = r600_get_ih_wptr(rdev);
3311 rptr = rdev->ih.rptr;
3312 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr, wptr);
3313
3296 spin_lock_irqsave(&rdev->ih.lock, flags); 3314 spin_lock_irqsave(&rdev->ih.lock, flags);
3297 3315
3298 if (rptr == wptr) { 3316 if (rptr == wptr) {
3299 spin_unlock_irqrestore(&rdev->ih.lock, flags); 3317 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3300 return IRQ_NONE; 3318 return IRQ_NONE;
3301 } 3319 }
3302 if (rdev->shutdown) {
3303 spin_unlock_irqrestore(&rdev->ih.lock, flags);
3304 return IRQ_NONE;
3305 }
3306 3320
3307restart_ih: 3321restart_ih:
3308 /* display interrupts */ 3322 /* display interrupts */
@@ -3432,7 +3446,7 @@ restart_ih:
3432 radeon_fence_process(rdev); 3446 radeon_fence_process(rdev);
3433 break; 3447 break;
3434 case 233: /* GUI IDLE */ 3448 case 233: /* GUI IDLE */
3435 DRM_DEBUG("IH: CP EOP\n"); 3449 DRM_DEBUG("IH: GUI idle\n");
3436 rdev->pm.gui_idle = true; 3450 rdev->pm.gui_idle = true;
3437 wake_up(&rdev->irq.idle_queue); 3451 wake_up(&rdev->irq.idle_queue);
3438 break; 3452 break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index fd18be9871ab..909bda8dd550 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -71,20 +71,21 @@ struct r600_cs_track {
71 u64 db_bo_mc; 71 u64 db_bo_mc;
72}; 72};
73 73
74#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc } 74#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
75#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc } 75#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
76#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 } 76#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
77#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc } 77#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
78#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 } 78#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
79#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc } 79#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
80#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 } 80#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
81#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc } 81#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
82 82
83struct gpu_formats { 83struct gpu_formats {
84 unsigned blockwidth; 84 unsigned blockwidth;
85 unsigned blockheight; 85 unsigned blockheight;
86 unsigned blocksize; 86 unsigned blocksize;
87 unsigned valid_color; 87 unsigned valid_color;
88 enum radeon_family min_family;
88}; 89};
89 90
90static const struct gpu_formats color_formats_table[] = { 91static const struct gpu_formats color_formats_table[] = {
@@ -154,7 +155,11 @@ static const struct gpu_formats color_formats_table[] = {
154 [V_038004_FMT_BC3] = { 4, 4, 16, 0 }, 155 [V_038004_FMT_BC3] = { 4, 4, 16, 0 },
155 [V_038004_FMT_BC4] = { 4, 4, 8, 0 }, 156 [V_038004_FMT_BC4] = { 4, 4, 8, 0 },
156 [V_038004_FMT_BC5] = { 4, 4, 16, 0}, 157 [V_038004_FMT_BC5] = { 4, 4, 16, 0},
158 [V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
159 [V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
157 160
161 /* The other Evergreen formats */
162 [V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
158}; 163};
159 164
160static inline bool fmt_is_valid_color(u32 format) 165static inline bool fmt_is_valid_color(u32 format)
@@ -168,11 +173,14 @@ static inline bool fmt_is_valid_color(u32 format)
168 return false; 173 return false;
169} 174}
170 175
171static inline bool fmt_is_valid_texture(u32 format) 176static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
172{ 177{
173 if (format >= ARRAY_SIZE(color_formats_table)) 178 if (format >= ARRAY_SIZE(color_formats_table))
174 return false; 179 return false;
175 180
181 if (family < color_formats_table[format].min_family)
182 return false;
183
176 if (color_formats_table[format].blockwidth > 0) 184 if (color_formats_table[format].blockwidth > 0)
177 return true; 185 return true;
178 186
@@ -1325,7 +1333,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1325 return -EINVAL; 1333 return -EINVAL;
1326 } 1334 }
1327 format = G_038004_DATA_FORMAT(word1); 1335 format = G_038004_DATA_FORMAT(word1);
1328 if (!fmt_is_valid_texture(format)) { 1336 if (!fmt_is_valid_texture(format, p->family)) {
1329 dev_warn(p->dev, "%s:%d texture invalid format %d\n", 1337 dev_warn(p->dev, "%s:%d texture invalid format %d\n",
1330 __func__, __LINE__, format); 1338 __func__, __LINE__, format);
1331 return -EINVAL; 1339 return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index b2b944bcd05a..f140a0d5cb54 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -1309,6 +1309,9 @@
1309#define V_038004_FMT_BC3 0x00000033 1309#define V_038004_FMT_BC3 0x00000033
1310#define V_038004_FMT_BC4 0x00000034 1310#define V_038004_FMT_BC4 0x00000034
1311#define V_038004_FMT_BC5 0x00000035 1311#define V_038004_FMT_BC5 0x00000035
1312#define V_038004_FMT_BC6 0x00000036
1313#define V_038004_FMT_BC7 0x00000037
1314#define V_038004_FMT_32_AS_32_32_32_32 0x00000038
1312#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010 1315#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
1313#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0) 1316#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
1314#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3) 1317#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index ba643b576054..27f45579e64b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -165,6 +165,7 @@ struct radeon_clock {
165 uint32_t default_sclk; 165 uint32_t default_sclk;
166 uint32_t default_dispclk; 166 uint32_t default_dispclk;
167 uint32_t dp_extclk; 167 uint32_t dp_extclk;
168 uint32_t max_pixel_clock;
168}; 169};
169 170
170/* 171/*
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index d948265db87e..b2449629537d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = {
906 .get_vblank_counter = &evergreen_get_vblank_counter, 906 .get_vblank_counter = &evergreen_get_vblank_counter,
907 .fence_ring_emit = &r600_fence_ring_emit, 907 .fence_ring_emit = &r600_fence_ring_emit,
908 .cs_parse = &evergreen_cs_parse, 908 .cs_parse = &evergreen_cs_parse,
909 .copy_blit = NULL, 909 .copy_blit = &evergreen_copy_blit,
910 .copy_dma = NULL, 910 .copy_dma = &evergreen_copy_blit,
911 .copy = NULL, 911 .copy = &evergreen_copy_blit,
912 .get_engine_clock = &radeon_atom_get_engine_clock, 912 .get_engine_clock = &radeon_atom_get_engine_clock,
913 .set_engine_clock = &radeon_atom_set_engine_clock, 913 .set_engine_clock = &radeon_atom_set_engine_clock,
914 .get_memory_clock = &radeon_atom_get_memory_clock, 914 .get_memory_clock = &radeon_atom_get_memory_clock,
@@ -938,6 +938,13 @@ static struct radeon_asic cayman_asic = {
938int radeon_asic_init(struct radeon_device *rdev) 938int radeon_asic_init(struct radeon_device *rdev)
939{ 939{
940 radeon_register_accessor_init(rdev); 940 radeon_register_accessor_init(rdev);
941
942 /* set the number of crtcs */
943 if (rdev->flags & RADEON_SINGLE_CRTC)
944 rdev->num_crtc = 1;
945 else
946 rdev->num_crtc = 2;
947
941 switch (rdev->family) { 948 switch (rdev->family) {
942 case CHIP_R100: 949 case CHIP_R100:
943 case CHIP_RV100: 950 case CHIP_RV100:
@@ -1017,18 +1024,32 @@ int radeon_asic_init(struct radeon_device *rdev)
1017 case CHIP_JUNIPER: 1024 case CHIP_JUNIPER:
1018 case CHIP_CYPRESS: 1025 case CHIP_CYPRESS:
1019 case CHIP_HEMLOCK: 1026 case CHIP_HEMLOCK:
1027 /* set num crtcs */
1028 if (rdev->family == CHIP_CEDAR)
1029 rdev->num_crtc = 4;
1030 else
1031 rdev->num_crtc = 6;
1020 rdev->asic = &evergreen_asic; 1032 rdev->asic = &evergreen_asic;
1021 break; 1033 break;
1022 case CHIP_PALM: 1034 case CHIP_PALM:
1035 case CHIP_SUMO:
1036 case CHIP_SUMO2:
1023 rdev->asic = &sumo_asic; 1037 rdev->asic = &sumo_asic;
1024 break; 1038 break;
1025 case CHIP_BARTS: 1039 case CHIP_BARTS:
1026 case CHIP_TURKS: 1040 case CHIP_TURKS:
1027 case CHIP_CAICOS: 1041 case CHIP_CAICOS:
1042 /* set num crtcs */
1043 if (rdev->family == CHIP_CAICOS)
1044 rdev->num_crtc = 4;
1045 else
1046 rdev->num_crtc = 6;
1028 rdev->asic = &btc_asic; 1047 rdev->asic = &btc_asic;
1029 break; 1048 break;
1030 case CHIP_CAYMAN: 1049 case CHIP_CAYMAN:
1031 rdev->asic = &cayman_asic; 1050 rdev->asic = &cayman_asic;
1051 /* set num crtcs */
1052 rdev->num_crtc = 6;
1032 break; 1053 break;
1033 default: 1054 default:
1034 /* FIXME: not supported yet */ 1055 /* FIXME: not supported yet */
@@ -1040,18 +1061,6 @@ int radeon_asic_init(struct radeon_device *rdev)
1040 rdev->asic->set_memory_clock = NULL; 1061 rdev->asic->set_memory_clock = NULL;
1041 } 1062 }
1042 1063
1043 /* set the number of crtcs */
1044 if (rdev->flags & RADEON_SINGLE_CRTC)
1045 rdev->num_crtc = 1;
1046 else {
1047 if (ASIC_IS_DCE41(rdev))
1048 rdev->num_crtc = 2;
1049 else if (ASIC_IS_DCE4(rdev))
1050 rdev->num_crtc = 6;
1051 else
1052 rdev->num_crtc = 2;
1053 }
1054
1055 return 0; 1064 return 0;
1056} 1065}
1057 1066
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 90dfb2b8cf03..1e725d9f767f 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1246,6 +1246,10 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1246 } 1246 }
1247 *dcpll = *p1pll; 1247 *dcpll = *p1pll;
1248 1248
1249 rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
1250 if (rdev->clock.max_pixel_clock == 0)
1251 rdev->clock.max_pixel_clock = 40000;
1252
1249 return true; 1253 return true;
1250 } 1254 }
1251 1255
@@ -2603,6 +2607,10 @@ void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 v
2603 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) 2607 if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
2604 return; 2608 return;
2605 2609
2610 /* 0xff01 is a flag rather then an actual voltage */
2611 if (voltage_level == 0xff01)
2612 return;
2613
2606 switch (crev) { 2614 switch (crev) {
2607 case 1: 2615 case 1:
2608 args.v1.ucVoltageType = voltage_type; 2616 args.v1.ucVoltageType = voltage_type;
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 5249af8931e6..2d48e7a1474b 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -117,7 +117,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
117 p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff; 117 p1pll->reference_div = RREG32_PLL(RADEON_PPLL_REF_DIV) & 0x3ff;
118 if (p1pll->reference_div < 2) 118 if (p1pll->reference_div < 2)
119 p1pll->reference_div = 12; 119 p1pll->reference_div = 12;
120 p2pll->reference_div = p1pll->reference_div; 120 p2pll->reference_div = p1pll->reference_div;
121 121
122 /* These aren't in the device-tree */ 122 /* These aren't in the device-tree */
123 if (rdev->family >= CHIP_R420) { 123 if (rdev->family >= CHIP_R420) {
@@ -139,6 +139,8 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
139 p2pll->pll_out_min = 12500; 139 p2pll->pll_out_min = 12500;
140 p2pll->pll_out_max = 35000; 140 p2pll->pll_out_max = 35000;
141 } 141 }
142 /* not sure what the max should be in all cases */
143 rdev->clock.max_pixel_clock = 35000;
142 144
143 spll->reference_freq = mpll->reference_freq = p1pll->reference_freq; 145 spll->reference_freq = mpll->reference_freq = p1pll->reference_freq;
144 spll->reference_div = mpll->reference_div = 146 spll->reference_div = mpll->reference_div =
@@ -151,7 +153,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
151 else 153 else
152 rdev->clock.default_sclk = 154 rdev->clock.default_sclk =
153 radeon_legacy_get_engine_clock(rdev); 155 radeon_legacy_get_engine_clock(rdev);
154 156
155 val = of_get_property(dp, "ATY,MCLK", NULL); 157 val = of_get_property(dp, "ATY,MCLK", NULL);
156 if (val && *val) 158 if (val && *val)
157 rdev->clock.default_mclk = (*val) / 10; 159 rdev->clock.default_mclk = (*val) / 10;
@@ -160,7 +162,7 @@ static bool __devinit radeon_read_clocks_OF(struct drm_device *dev)
160 radeon_legacy_get_memory_clock(rdev); 162 radeon_legacy_get_memory_clock(rdev);
161 163
162 DRM_INFO("Using device-tree clock info\n"); 164 DRM_INFO("Using device-tree clock info\n");
163 165
164 return true; 166 return true;
165} 167}
166#else 168#else
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 5b991f7c6e2a..e4594676a07c 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -866,6 +866,11 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
866 rdev->clock.default_sclk = sclk; 866 rdev->clock.default_sclk = sclk;
867 rdev->clock.default_mclk = mclk; 867 rdev->clock.default_mclk = mclk;
868 868
869 if (RBIOS32(pll_info + 0x16))
870 rdev->clock.max_pixel_clock = RBIOS32(pll_info + 0x16);
871 else
872 rdev->clock.max_pixel_clock = 35000; /* might need something asic specific */
873
869 return true; 874 return true;
870 } 875 }
871 return false; 876 return false;
@@ -1548,10 +1553,12 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1548 (rdev->pdev->subsystem_device == 0x4a48)) { 1553 (rdev->pdev->subsystem_device == 0x4a48)) {
1549 /* Mac X800 */ 1554 /* Mac X800 */
1550 rdev->mode_info.connector_table = CT_MAC_X800; 1555 rdev->mode_info.connector_table = CT_MAC_X800;
1551 } else if ((rdev->pdev->device == 0x4150) && 1556 } else if ((of_machine_is_compatible("PowerMac7,2") ||
1557 of_machine_is_compatible("PowerMac7,3")) &&
1558 (rdev->pdev->device == 0x4150) &&
1552 (rdev->pdev->subsystem_vendor == 0x1002) && 1559 (rdev->pdev->subsystem_vendor == 0x1002) &&
1553 (rdev->pdev->subsystem_device == 0x4150)) { 1560 (rdev->pdev->subsystem_device == 0x4150)) {
1554 /* Mac G5 9600 */ 1561 /* Mac G5 tower 9600 */
1555 rdev->mode_info.connector_table = CT_MAC_G5_9600; 1562 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1556 } else 1563 } else
1557#endif /* CONFIG_PPC_PMAC */ 1564#endif /* CONFIG_PPC_PMAC */
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee1dccb3fec9..cbfca3a24fdf 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -44,6 +44,8 @@ extern void
44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder, 44radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
45 struct drm_connector *drm_connector); 45 struct drm_connector *drm_connector);
46 46
47bool radeon_connector_encoder_is_dp_bridge(struct drm_connector *connector);
48
47void radeon_connector_hotplug(struct drm_connector *connector) 49void radeon_connector_hotplug(struct drm_connector *connector)
48{ 50{
49 struct drm_device *dev = connector->dev; 51 struct drm_device *dev = connector->dev;
@@ -626,8 +628,14 @@ static int radeon_vga_get_modes(struct drm_connector *connector)
626static int radeon_vga_mode_valid(struct drm_connector *connector, 628static int radeon_vga_mode_valid(struct drm_connector *connector,
627 struct drm_display_mode *mode) 629 struct drm_display_mode *mode)
628{ 630{
631 struct drm_device *dev = connector->dev;
632 struct radeon_device *rdev = dev->dev_private;
633
629 /* XXX check mode bandwidth */ 634 /* XXX check mode bandwidth */
630 /* XXX verify against max DAC output frequency */ 635
636 if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
637 return MODE_CLOCK_HIGH;
638
631 return MODE_OK; 639 return MODE_OK;
632} 640}
633 641
@@ -830,6 +838,13 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
830 if (!radeon_connector->edid) { 838 if (!radeon_connector->edid) {
831 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n", 839 DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
832 drm_get_connector_name(connector)); 840 drm_get_connector_name(connector));
841 /* rs690 seems to have a problem with connectors not existing and always
842 * return a block of 0's. If we see this just stop polling on this output */
843 if ((rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) && radeon_connector->base.null_edid_counter) {
844 ret = connector_status_disconnected;
845 DRM_ERROR("%s: detected RS690 floating bus bug, stopping ddc detect\n", drm_get_connector_name(connector));
846 radeon_connector->ddc_bus = NULL;
847 }
833 } else { 848 } else {
834 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL); 849 radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
835 850
@@ -1015,6 +1030,11 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
1015 } else 1030 } else
1016 return MODE_CLOCK_HIGH; 1031 return MODE_CLOCK_HIGH;
1017 } 1032 }
1033
1034 /* check against the max pixel clock */
1035 if ((mode->clock / 10) > rdev->clock.max_pixel_clock)
1036 return MODE_CLOCK_HIGH;
1037
1018 return MODE_OK; 1038 return MODE_OK;
1019} 1039}
1020 1040
@@ -1052,10 +1072,11 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1052{ 1072{
1053 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1073 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1054 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1074 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1075 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1055 int ret; 1076 int ret;
1056 1077
1057 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1078 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
1058 struct drm_encoder *encoder; 1079 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
1059 struct drm_display_mode *mode; 1080 struct drm_display_mode *mode;
1060 1081
1061 if (!radeon_dig_connector->edp_on) 1082 if (!radeon_dig_connector->edp_on)
@@ -1067,7 +1088,6 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1067 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1088 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1068 1089
1069 if (ret > 0) { 1090 if (ret > 0) {
1070 encoder = radeon_best_single_encoder(connector);
1071 if (encoder) { 1091 if (encoder) {
1072 radeon_fixup_lvds_native_mode(encoder, connector); 1092 radeon_fixup_lvds_native_mode(encoder, connector);
1073 /* add scaled modes */ 1093 /* add scaled modes */
@@ -1091,8 +1111,14 @@ static int radeon_dp_get_modes(struct drm_connector *connector)
1091 /* add scaled modes */ 1111 /* add scaled modes */
1092 radeon_add_common_modes(encoder, connector); 1112 radeon_add_common_modes(encoder, connector);
1093 } 1113 }
1094 } else 1114 } else {
1115 /* need to setup ddc on the bridge */
1116 if (radeon_connector_encoder_is_dp_bridge(connector)) {
1117 if (encoder)
1118 radeon_atom_ext_encoder_setup_ddc(encoder);
1119 }
1095 ret = radeon_ddc_get_modes(radeon_connector); 1120 ret = radeon_ddc_get_modes(radeon_connector);
1121 }
1096 1122
1097 return ret; 1123 return ret;
1098} 1124}
@@ -1176,14 +1202,15 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1176 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 1202 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
1177 enum drm_connector_status ret = connector_status_disconnected; 1203 enum drm_connector_status ret = connector_status_disconnected;
1178 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv; 1204 struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
1205 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1179 1206
1180 if (radeon_connector->edid) { 1207 if (radeon_connector->edid) {
1181 kfree(radeon_connector->edid); 1208 kfree(radeon_connector->edid);
1182 radeon_connector->edid = NULL; 1209 radeon_connector->edid = NULL;
1183 } 1210 }
1184 1211
1185 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1212 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
1186 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1213 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
1187 if (encoder) { 1214 if (encoder) {
1188 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1215 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1189 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 1216 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
@@ -1203,6 +1230,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1203 atombios_set_edp_panel_power(connector, 1230 atombios_set_edp_panel_power(connector,
1204 ATOM_TRANSMITTER_ACTION_POWER_OFF); 1231 ATOM_TRANSMITTER_ACTION_POWER_OFF);
1205 } else { 1232 } else {
1233 /* need to setup ddc on the bridge */
1234 if (radeon_connector_encoder_is_dp_bridge(connector)) {
1235 if (encoder)
1236 radeon_atom_ext_encoder_setup_ddc(encoder);
1237 }
1206 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector); 1238 radeon_dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
1207 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) { 1239 if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
1208 ret = connector_status_connected; 1240 ret = connector_status_connected;
@@ -1217,6 +1249,16 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1217 ret = connector_status_connected; 1249 ret = connector_status_connected;
1218 } 1250 }
1219 } 1251 }
1252
1253 if ((ret == connector_status_disconnected) &&
1254 radeon_connector->dac_load_detect) {
1255 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1256 struct drm_encoder_helper_funcs *encoder_funcs;
1257 if (encoder) {
1258 encoder_funcs = encoder->helper_private;
1259 ret = encoder_funcs->detect(encoder, connector);
1260 }
1261 }
1220 } 1262 }
1221 1263
1222 radeon_connector_update_scratch_regs(connector, ret); 1264 radeon_connector_update_scratch_regs(connector, ret);
@@ -1231,7 +1273,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1231 1273
1232 /* XXX check mode bandwidth */ 1274 /* XXX check mode bandwidth */
1233 1275
1234 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) { 1276 if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
1277 (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
1235 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 1278 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
1236 1279
1237 if ((mode->hdisplay < 320) || (mode->vdisplay < 240)) 1280 if ((mode->hdisplay < 320) || (mode->vdisplay < 240))
@@ -1241,7 +1284,7 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
1241 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 1284 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
1242 struct drm_display_mode *native_mode = &radeon_encoder->native_mode; 1285 struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
1243 1286
1244 /* AVIVO hardware supports downscaling modes larger than the panel 1287 /* AVIVO hardware supports downscaling modes larger than the panel
1245 * to the panel size, but I'm not sure this is desirable. 1288 * to the panel size, but I'm not sure this is desirable.
1246 */ 1289 */
1247 if ((mode->hdisplay > native_mode->hdisplay) || 1290 if ((mode->hdisplay > native_mode->hdisplay) ||
@@ -1390,6 +1433,10 @@ radeon_add_atom_connector(struct drm_device *dev,
1390 default: 1433 default:
1391 connector->interlace_allowed = true; 1434 connector->interlace_allowed = true;
1392 connector->doublescan_allowed = true; 1435 connector->doublescan_allowed = true;
1436 radeon_connector->dac_load_detect = true;
1437 drm_connector_attach_property(&radeon_connector->base,
1438 rdev->mode_info.load_detect_property,
1439 1);
1393 break; 1440 break;
1394 case DRM_MODE_CONNECTOR_DVII: 1441 case DRM_MODE_CONNECTOR_DVII:
1395 case DRM_MODE_CONNECTOR_DVID: 1442 case DRM_MODE_CONNECTOR_DVID:
@@ -1411,6 +1458,12 @@ radeon_add_atom_connector(struct drm_device *dev,
1411 connector->doublescan_allowed = true; 1458 connector->doublescan_allowed = true;
1412 else 1459 else
1413 connector->doublescan_allowed = false; 1460 connector->doublescan_allowed = false;
1461 if (connector_type == DRM_MODE_CONNECTOR_DVII) {
1462 radeon_connector->dac_load_detect = true;
1463 drm_connector_attach_property(&radeon_connector->base,
1464 rdev->mode_info.load_detect_property,
1465 1);
1466 }
1414 break; 1467 break;
1415 case DRM_MODE_CONNECTOR_LVDS: 1468 case DRM_MODE_CONNECTOR_LVDS:
1416 case DRM_MODE_CONNECTOR_eDP: 1469 case DRM_MODE_CONNECTOR_eDP:
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 8c1916941871..fae00c0d75aa 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -228,6 +228,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
228 parser.filp = filp; 228 parser.filp = filp;
229 parser.rdev = rdev; 229 parser.rdev = rdev;
230 parser.dev = rdev->dev; 230 parser.dev = rdev->dev;
231 parser.family = rdev->family;
231 r = radeon_cs_parser_init(&parser, data); 232 r = radeon_cs_parser_init(&parser, data);
232 if (r) { 233 if (r) {
233 DRM_ERROR("Failed to initialize parser !\n"); 234 DRM_ERROR("Failed to initialize parser !\n");
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5b61364e31f4..7cfaa7e2f3b5 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -82,6 +82,8 @@ static const char radeon_family_name[][16] = {
82 "CYPRESS", 82 "CYPRESS",
83 "HEMLOCK", 83 "HEMLOCK",
84 "PALM", 84 "PALM",
85 "SUMO",
86 "SUMO2",
85 "BARTS", 87 "BARTS",
86 "TURKS", 88 "TURKS",
87 "CAICOS", 89 "CAICOS",
@@ -213,6 +215,8 @@ int radeon_wb_init(struct radeon_device *rdev)
213 return r; 215 return r;
214 } 216 }
215 217
218 /* clear wb memory */
219 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
216 /* disable event_write fences */ 220 /* disable event_write fences */
217 rdev->wb.use_event = false; 221 rdev->wb.use_event = false;
218 /* disabled via module param */ 222 /* disabled via module param */
@@ -752,6 +756,7 @@ int radeon_device_init(struct radeon_device *rdev,
752 dma_bits = rdev->need_dma32 ? 32 : 40; 756 dma_bits = rdev->need_dma32 ? 32 : 40;
753 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits)); 757 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
754 if (r) { 758 if (r) {
759 rdev->need_dma32 = true;
755 printk(KERN_WARNING "radeon: No suitable DMA available.\n"); 760 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
756 } 761 }
757 762
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ae247eec87c0..292f73f0ddbd 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -264,6 +264,8 @@ static void radeon_unpin_work_func(struct work_struct *__work)
264 radeon_bo_unreserve(work->old_rbo); 264 radeon_bo_unreserve(work->old_rbo);
265 } else 265 } else
266 DRM_ERROR("failed to reserve buffer after flip\n"); 266 DRM_ERROR("failed to reserve buffer after flip\n");
267
268 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
267 kfree(work); 269 kfree(work);
268} 270}
269 271
@@ -371,6 +373,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
371 new_radeon_fb = to_radeon_framebuffer(fb); 373 new_radeon_fb = to_radeon_framebuffer(fb);
372 /* schedule unpin of the old buffer */ 374 /* schedule unpin of the old buffer */
373 obj = old_radeon_fb->obj; 375 obj = old_radeon_fb->obj;
376 /* take a reference to the old object */
377 drm_gem_object_reference(obj);
374 rbo = gem_to_radeon_bo(obj); 378 rbo = gem_to_radeon_bo(obj);
375 work->old_rbo = rbo; 379 work->old_rbo = rbo;
376 INIT_WORK(&work->work, radeon_unpin_work_func); 380 INIT_WORK(&work->work, radeon_unpin_work_func);
@@ -378,12 +382,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
378 /* We borrow the event spin lock for protecting unpin_work */ 382 /* We borrow the event spin lock for protecting unpin_work */
379 spin_lock_irqsave(&dev->event_lock, flags); 383 spin_lock_irqsave(&dev->event_lock, flags);
380 if (radeon_crtc->unpin_work) { 384 if (radeon_crtc->unpin_work) {
381 spin_unlock_irqrestore(&dev->event_lock, flags);
382 kfree(work);
383 radeon_fence_unref(&fence);
384
385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); 385 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
386 return -EBUSY; 386 r = -EBUSY;
387 goto unlock_free;
387 } 388 }
388 radeon_crtc->unpin_work = work; 389 radeon_crtc->unpin_work = work;
389 radeon_crtc->deferred_flip_completion = 0; 390 radeon_crtc->deferred_flip_completion = 0;
@@ -497,6 +498,8 @@ pflip_cleanup1:
497pflip_cleanup: 498pflip_cleanup:
498 spin_lock_irqsave(&dev->event_lock, flags); 499 spin_lock_irqsave(&dev->event_lock, flags);
499 radeon_crtc->unpin_work = NULL; 500 radeon_crtc->unpin_work = NULL;
501unlock_free:
502 drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
500 spin_unlock_irqrestore(&dev->event_lock, flags); 503 spin_unlock_irqrestore(&dev->event_lock, flags);
501 radeon_fence_unref(&fence); 504 radeon_fence_unref(&fence);
502 kfree(work); 505 kfree(work);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 1d330606292f..73dfbe8e5f9e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -113,7 +113,7 @@ int radeon_benchmarking = 0;
113int radeon_testing = 0; 113int radeon_testing = 0;
114int radeon_connector_table = 0; 114int radeon_connector_table = 0;
115int radeon_tv = 1; 115int radeon_tv = 1;
116int radeon_audio = 1; 116int radeon_audio = 0;
117int radeon_disp_priority = 0; 117int radeon_disp_priority = 0;
118int radeon_hw_i2c = 0; 118int radeon_hw_i2c = 0;
119int radeon_pcie_gen2 = 0; 119int radeon_pcie_gen2 = 0;
@@ -151,7 +151,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
151MODULE_PARM_DESC(tv, "TV enable (0 = disable)"); 151MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
152module_param_named(tv, radeon_tv, int, 0444); 152module_param_named(tv, radeon_tv, int, 0444);
153 153
154MODULE_PARM_DESC(audio, "Audio enable (0 = disable)"); 154MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
155module_param_named(audio, radeon_audio, int, 0444); 155module_param_named(audio, radeon_audio, int, 0444);
156 156
157MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)"); 157MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 1b557554696e..b293487e5aa3 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -367,7 +367,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
367 } 367 }
368 368
369 if (ASIC_IS_DCE3(rdev) && 369 if (ASIC_IS_DCE3(rdev) &&
370 (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) { 370 ((radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) ||
371 radeon_encoder_is_dp_bridge(encoder))) {
371 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 372 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
372 radeon_dp_set_link_config(connector, mode); 373 radeon_dp_set_link_config(connector, mode);
373 } 374 }
@@ -660,21 +661,16 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
660 if (radeon_encoder_is_dp_bridge(encoder)) 661 if (radeon_encoder_is_dp_bridge(encoder))
661 return ATOM_ENCODER_MODE_DP; 662 return ATOM_ENCODER_MODE_DP;
662 663
664 /* DVO is always DVO */
665 if (radeon_encoder->encoder_id == ATOM_ENCODER_MODE_DVO)
666 return ATOM_ENCODER_MODE_DVO;
667
663 connector = radeon_get_connector_for_encoder(encoder); 668 connector = radeon_get_connector_for_encoder(encoder);
664 if (!connector) { 669 /* if we don't have an active device yet, just use one of
665 switch (radeon_encoder->encoder_id) { 670 * the connectors tied to the encoder.
666 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: 671 */
667 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: 672 if (!connector)
668 case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2: 673 connector = radeon_get_connector_for_encoder_init(encoder);
669 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
670 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
671 return ATOM_ENCODER_MODE_DVI;
672 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
673 case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2:
674 default:
675 return ATOM_ENCODER_MODE_CRT;
676 }
677 }
678 radeon_connector = to_radeon_connector(connector); 674 radeon_connector = to_radeon_connector(connector);
679 675
680 switch (connector->connector_type) { 676 switch (connector->connector_type) {
@@ -954,10 +950,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
954 int dp_lane_count = 0; 950 int dp_lane_count = 0;
955 int connector_object_id = 0; 951 int connector_object_id = 0;
956 int igp_lane_info = 0; 952 int igp_lane_info = 0;
953 int dig_encoder = dig->dig_encoder;
957 954
958 if (action == ATOM_TRANSMITTER_ACTION_INIT) 955 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
959 connector = radeon_get_connector_for_encoder_init(encoder); 956 connector = radeon_get_connector_for_encoder_init(encoder);
960 else 957 /* just needed to avoid bailing in the encoder check. the encoder
958 * isn't used for init
959 */
960 dig_encoder = 0;
961 } else
961 connector = radeon_get_connector_for_encoder(encoder); 962 connector = radeon_get_connector_for_encoder(encoder);
962 963
963 if (connector) { 964 if (connector) {
@@ -973,7 +974,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
973 } 974 }
974 975
975 /* no dig encoder assigned */ 976 /* no dig encoder assigned */
976 if (dig->dig_encoder == -1) 977 if (dig_encoder == -1)
977 return; 978 return;
978 979
979 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) 980 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
@@ -1023,7 +1024,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1023 1024
1024 if (dig->linkb) 1025 if (dig->linkb)
1025 args.v3.acConfig.ucLinkSel = 1; 1026 args.v3.acConfig.ucLinkSel = 1;
1026 if (dig->dig_encoder & 1) 1027 if (dig_encoder & 1)
1027 args.v3.acConfig.ucEncoderSel = 1; 1028 args.v3.acConfig.ucEncoderSel = 1;
1028 1029
1029 /* Select the PLL for the PHY 1030 /* Select the PLL for the PHY
@@ -1073,7 +1074,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1073 args.v3.acConfig.fDualLinkConnector = 1; 1074 args.v3.acConfig.fDualLinkConnector = 1;
1074 } 1075 }
1075 } else if (ASIC_IS_DCE32(rdev)) { 1076 } else if (ASIC_IS_DCE32(rdev)) {
1076 args.v2.acConfig.ucEncoderSel = dig->dig_encoder; 1077 args.v2.acConfig.ucEncoderSel = dig_encoder;
1077 if (dig->linkb) 1078 if (dig->linkb)
1078 args.v2.acConfig.ucLinkSel = 1; 1079 args.v2.acConfig.ucLinkSel = 1;
1079 1080
@@ -1089,9 +1090,10 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1089 break; 1090 break;
1090 } 1091 }
1091 1092
1092 if (is_dp) 1093 if (is_dp) {
1093 args.v2.acConfig.fCoherentMode = 1; 1094 args.v2.acConfig.fCoherentMode = 1;
1094 else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) { 1095 args.v2.acConfig.fDPConnector = 1;
1096 } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
1095 if (dig->coherent_mode) 1097 if (dig->coherent_mode)
1096 args.v2.acConfig.fCoherentMode = 1; 1098 args.v2.acConfig.fCoherentMode = 1;
1097 if (radeon_encoder->pixel_clock > 165000) 1099 if (radeon_encoder->pixel_clock > 165000)
@@ -1100,7 +1102,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
1100 } else { 1102 } else {
1101 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL; 1103 args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
1102 1104
1103 if (dig->dig_encoder) 1105 if (dig_encoder)
1104 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER; 1106 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
1105 else 1107 else
1106 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER; 1108 args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
@@ -1430,7 +1432,11 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1430 if (is_dig) { 1432 if (is_dig) {
1431 switch (mode) { 1433 switch (mode) {
1432 case DRM_MODE_DPMS_ON: 1434 case DRM_MODE_DPMS_ON:
1433 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); 1435 /* some early dce3.2 boards have a bug in their transmitter control table */
1436 if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
1437 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
1438 else
1439 atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
1434 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) { 1440 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
1435 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); 1441 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
1436 1442
@@ -1521,26 +1527,29 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
1521 } 1527 }
1522 1528
1523 if (ext_encoder) { 1529 if (ext_encoder) {
1524 int action;
1525
1526 switch (mode) { 1530 switch (mode) {
1527 case DRM_MODE_DPMS_ON: 1531 case DRM_MODE_DPMS_ON:
1528 default: 1532 default:
1529 if (ASIC_IS_DCE41(rdev)) 1533 if (ASIC_IS_DCE41(rdev)) {
1530 action = EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT; 1534 atombios_external_encoder_setup(encoder, ext_encoder,
1531 else 1535 EXTERNAL_ENCODER_ACTION_V3_ENABLE_OUTPUT);
1532 action = ATOM_ENABLE; 1536 atombios_external_encoder_setup(encoder, ext_encoder,
1537 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING_OFF);
1538 } else
1539 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_ENABLE);
1533 break; 1540 break;
1534 case DRM_MODE_DPMS_STANDBY: 1541 case DRM_MODE_DPMS_STANDBY:
1535 case DRM_MODE_DPMS_SUSPEND: 1542 case DRM_MODE_DPMS_SUSPEND:
1536 case DRM_MODE_DPMS_OFF: 1543 case DRM_MODE_DPMS_OFF:
1537 if (ASIC_IS_DCE41(rdev)) 1544 if (ASIC_IS_DCE41(rdev)) {
1538 action = EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT; 1545 atombios_external_encoder_setup(encoder, ext_encoder,
1539 else 1546 EXTERNAL_ENCODER_ACTION_V3_ENCODER_BLANKING);
1540 action = ATOM_DISABLE; 1547 atombios_external_encoder_setup(encoder, ext_encoder,
1548 EXTERNAL_ENCODER_ACTION_V3_DISABLE_OUTPUT);
1549 } else
1550 atombios_external_encoder_setup(encoder, ext_encoder, ATOM_DISABLE);
1541 break; 1551 break;
1542 } 1552 }
1543 atombios_external_encoder_setup(encoder, ext_encoder, action);
1544 } 1553 }
1545 1554
1546 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false); 1555 radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
@@ -1999,6 +2008,65 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
1999 return connector_status_disconnected; 2008 return connector_status_disconnected;
2000} 2009}
2001 2010
2011static enum drm_connector_status
2012radeon_atom_dig_detect(struct drm_encoder *encoder, struct drm_connector *connector)
2013{
2014 struct drm_device *dev = encoder->dev;
2015 struct radeon_device *rdev = dev->dev_private;
2016 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
2017 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
2018 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
2019 u32 bios_0_scratch;
2020
2021 if (!ASIC_IS_DCE4(rdev))
2022 return connector_status_unknown;
2023
2024 if (!ext_encoder)
2025 return connector_status_unknown;
2026
2027 if ((radeon_connector->devices & ATOM_DEVICE_CRT_SUPPORT) == 0)
2028 return connector_status_unknown;
2029
2030 /* load detect on the dp bridge */
2031 atombios_external_encoder_setup(encoder, ext_encoder,
2032 EXTERNAL_ENCODER_ACTION_V3_DACLOAD_DETECTION);
2033
2034 bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH);
2035
2036 DRM_DEBUG_KMS("Bios 0 scratch %x %08x\n", bios_0_scratch, radeon_encoder->devices);
2037 if (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT) {
2038 if (bios_0_scratch & ATOM_S0_CRT1_MASK)
2039 return connector_status_connected;
2040 }
2041 if (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT) {
2042 if (bios_0_scratch & ATOM_S0_CRT2_MASK)
2043 return connector_status_connected;
2044 }
2045 if (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT) {
2046 if (bios_0_scratch & (ATOM_S0_CV_MASK|ATOM_S0_CV_MASK_A))
2047 return connector_status_connected;
2048 }
2049 if (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT) {
2050 if (bios_0_scratch & (ATOM_S0_TV1_COMPOSITE | ATOM_S0_TV1_COMPOSITE_A))
2051 return connector_status_connected; /* CTV */
2052 else if (bios_0_scratch & (ATOM_S0_TV1_SVIDEO | ATOM_S0_TV1_SVIDEO_A))
2053 return connector_status_connected; /* STV */
2054 }
2055 return connector_status_disconnected;
2056}
2057
2058void
2059radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder)
2060{
2061 struct drm_encoder *ext_encoder = radeon_atom_get_external_encoder(encoder);
2062
2063 if (ext_encoder)
2064 /* ddc_setup on the dp bridge */
2065 atombios_external_encoder_setup(encoder, ext_encoder,
2066 EXTERNAL_ENCODER_ACTION_V3_DDC_SETUP);
2067
2068}
2069
2002static void radeon_atom_encoder_prepare(struct drm_encoder *encoder) 2070static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
2003{ 2071{
2004 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 2072 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2162,7 +2230,7 @@ static const struct drm_encoder_helper_funcs radeon_atom_dig_helper_funcs = {
2162 .mode_set = radeon_atom_encoder_mode_set, 2230 .mode_set = radeon_atom_encoder_mode_set,
2163 .commit = radeon_atom_encoder_commit, 2231 .commit = radeon_atom_encoder_commit,
2164 .disable = radeon_atom_encoder_disable, 2232 .disable = radeon_atom_encoder_disable,
2165 /* no detect for TMDS/LVDS yet */ 2233 .detect = radeon_atom_dig_detect,
2166}; 2234};
2167 2235
2168static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = { 2236static const struct drm_encoder_helper_funcs radeon_atom_dac_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 6f1d9e563e77..ec2f1ea84f81 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -81,6 +81,8 @@ enum radeon_family {
81 CHIP_CYPRESS, 81 CHIP_CYPRESS,
82 CHIP_HEMLOCK, 82 CHIP_HEMLOCK,
83 CHIP_PALM, 83 CHIP_PALM,
84 CHIP_SUMO,
85 CHIP_SUMO2,
84 CHIP_BARTS, 86 CHIP_BARTS,
85 CHIP_TURKS, 87 CHIP_TURKS,
86 CHIP_CAICOS, 88 CHIP_CAICOS,
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 1f8229436570..021d2b6b556f 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -40,6 +40,35 @@
40#include "radeon.h" 40#include "radeon.h"
41#include "radeon_trace.h" 41#include "radeon_trace.h"
42 42
43static void radeon_fence_write(struct radeon_device *rdev, u32 seq)
44{
45 if (rdev->wb.enabled) {
46 u32 scratch_index;
47 if (rdev->wb.use_event)
48 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
49 else
50 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
51 rdev->wb.wb[scratch_index/4] = cpu_to_le32(seq);;
52 } else
53 WREG32(rdev->fence_drv.scratch_reg, seq);
54}
55
56static u32 radeon_fence_read(struct radeon_device *rdev)
57{
58 u32 seq;
59
60 if (rdev->wb.enabled) {
61 u32 scratch_index;
62 if (rdev->wb.use_event)
63 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
64 else
65 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
66 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
67 } else
68 seq = RREG32(rdev->fence_drv.scratch_reg);
69 return seq;
70}
71
43int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence) 72int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
44{ 73{
45 unsigned long irq_flags; 74 unsigned long irq_flags;
@@ -50,12 +79,12 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
50 return 0; 79 return 0;
51 } 80 }
52 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq); 81 fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
53 if (!rdev->cp.ready) { 82 if (!rdev->cp.ready)
54 /* FIXME: cp is not running assume everythings is done right 83 /* FIXME: cp is not running assume everythings is done right
55 * away 84 * away
56 */ 85 */
57 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 86 radeon_fence_write(rdev, fence->seq);
58 } else 87 else
59 radeon_fence_ring_emit(rdev, fence); 88 radeon_fence_ring_emit(rdev, fence);
60 89
61 trace_radeon_fence_emit(rdev->ddev, fence->seq); 90 trace_radeon_fence_emit(rdev->ddev, fence->seq);
@@ -73,15 +102,7 @@ static bool radeon_fence_poll_locked(struct radeon_device *rdev)
73 bool wake = false; 102 bool wake = false;
74 unsigned long cjiffies; 103 unsigned long cjiffies;
75 104
76 if (rdev->wb.enabled) { 105 seq = radeon_fence_read(rdev);
77 u32 scratch_index;
78 if (rdev->wb.use_event)
79 scratch_index = R600_WB_EVENT_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
80 else
81 scratch_index = RADEON_WB_SCRATCH_OFFSET + rdev->fence_drv.scratch_reg - rdev->scratch.reg_base;
82 seq = le32_to_cpu(rdev->wb.wb[scratch_index/4]);
83 } else
84 seq = RREG32(rdev->fence_drv.scratch_reg);
85 if (seq != rdev->fence_drv.last_seq) { 106 if (seq != rdev->fence_drv.last_seq) {
86 rdev->fence_drv.last_seq = seq; 107 rdev->fence_drv.last_seq = seq;
87 rdev->fence_drv.last_jiffies = jiffies; 108 rdev->fence_drv.last_jiffies = jiffies;
@@ -251,7 +272,7 @@ retry:
251 r = radeon_gpu_reset(rdev); 272 r = radeon_gpu_reset(rdev);
252 if (r) 273 if (r)
253 return r; 274 return r;
254 WREG32(rdev->fence_drv.scratch_reg, fence->seq); 275 radeon_fence_write(rdev, fence->seq);
255 rdev->gpu_lockup = false; 276 rdev->gpu_lockup = false;
256 } 277 }
257 timeout = RADEON_FENCE_JIFFIES_TIMEOUT; 278 timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
@@ -351,7 +372,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
351 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags); 372 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
352 return r; 373 return r;
353 } 374 }
354 WREG32(rdev->fence_drv.scratch_reg, 0); 375 radeon_fence_write(rdev, 0);
355 atomic_set(&rdev->fence_drv.seq, 0); 376 atomic_set(&rdev->fence_drv.seq, 0);
356 INIT_LIST_HEAD(&rdev->fence_drv.created); 377 INIT_LIST_HEAD(&rdev->fence_drv.created);
357 INIT_LIST_HEAD(&rdev->fence_drv.emited); 378 INIT_LIST_HEAD(&rdev->fence_drv.emited);
@@ -391,7 +412,7 @@ static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
391 struct radeon_fence *fence; 412 struct radeon_fence *fence;
392 413
393 seq_printf(m, "Last signaled fence 0x%08X\n", 414 seq_printf(m, "Last signaled fence 0x%08X\n",
394 RREG32(rdev->fence_drv.scratch_reg)); 415 radeon_fence_read(rdev));
395 if (!list_empty(&rdev->fence_drv.emited)) { 416 if (!list_empty(&rdev->fence_drv.emited)) {
396 fence = list_entry(rdev->fence_drv.emited.prev, 417 fence = list_entry(rdev->fence_drv.emited.prev,
397 struct radeon_fence, list); 418 struct radeon_fence, list);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 977a341266b6..6df4e3cec0c2 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -483,6 +483,8 @@ extern void radeon_atom_encoder_init(struct radeon_device *rdev);
483extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder, 483extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
484 int action, uint8_t lane_num, 484 int action, uint8_t lane_num,
485 uint8_t lane_set); 485 uint8_t lane_set);
486extern void radeon_atom_ext_encoder_setup_ddc(struct drm_encoder *encoder);
487extern struct drm_encoder *radeon_atom_get_external_encoder(struct drm_encoder *encoder);
486extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 488extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
487 u8 write_byte, u8 *read_byte); 489 u8 write_byte, u8 *read_byte);
488 490
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 86eda1ea94df..aaa19dc418a0 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -487,6 +487,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
487 case THERMAL_TYPE_RV6XX: 487 case THERMAL_TYPE_RV6XX:
488 case THERMAL_TYPE_RV770: 488 case THERMAL_TYPE_RV770:
489 case THERMAL_TYPE_EVERGREEN: 489 case THERMAL_TYPE_EVERGREEN:
490 case THERMAL_TYPE_NI:
490 case THERMAL_TYPE_SUMO: 491 case THERMAL_TYPE_SUMO:
491 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev); 492 rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
492 if (IS_ERR(rdev->pm.int_hwmon_dev)) { 493 if (IS_ERR(rdev->pm.int_hwmon_dev)) {
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 92f1900dc7ca..ea49752ee99c 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -758,6 +758,5 @@ r600 0x9400
7580x00009714 VC_ENHANCE 7580x00009714 VC_ENHANCE
7590x00009830 DB_DEBUG 7590x00009830 DB_DEBUG
7600x00009838 DB_WATERMARKS 7600x00009838 DB_WATERMARKS
7610x00028D28 DB_SRESULTS_COMPARE_STATE0
7620x00028D44 DB_ALPHA_TO_MASK 7610x00028D44 DB_ALPHA_TO_MASK
7630x00009700 VC_CNTL 7620x00009700 VC_CNTL
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index ef8a5babe9f7..6f508ffd1035 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -105,6 +105,9 @@ void rv770_pm_misc(struct radeon_device *rdev)
105 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage; 105 struct radeon_voltage *voltage = &ps->clock_info[req_cm_idx].voltage;
106 106
107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) { 107 if ((voltage->type == VOLTAGE_SW) && voltage->voltage) {
108 /* 0xff01 is a flag rather then an actual voltage */
109 if (voltage->voltage == 0xff01)
110 return;
108 if (voltage->voltage != rdev->pm.current_vddc) { 111 if (voltage->voltage != rdev->pm.current_vddc) {
109 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC); 112 radeon_atom_set_voltage(rdev, voltage->voltage, SET_VOLTAGE_TYPE_ASIC_VDDC);
110 rdev->pm.current_vddc = voltage->voltage; 113 rdev->pm.current_vddc = voltage->voltage;
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index bf5f83ea14fe..cb1ee4e0050a 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -647,9 +647,6 @@ int savage_driver_firstopen(struct drm_device *dev)
647 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE, 647 ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
648 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING, 648 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
649 &dev_priv->aperture); 649 &dev_priv->aperture);
650 if (ret)
651 return ret;
652
653 return ret; 650 return ret;
654} 651}
655 652
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 67d2a7585934..36ca465c00ce 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -305,6 +305,7 @@ config HID_MULTITOUCH
305 - 3M PCT touch screens 305 - 3M PCT touch screens
306 - ActionStar dual touch panels 306 - ActionStar dual touch panels
307 - Cando dual touch panels 307 - Cando dual touch panels
308 - Chunghwa panels
308 - CVTouch panels 309 - CVTouch panels
309 - Cypress TrueTouch panels 310 - Cypress TrueTouch panels
310 - Elo TouchSystems IntelliTouch Plus panels 311 - Elo TouchSystems IntelliTouch Plus panels
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c957c4b4fe70..f7440e8ce3e7 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1359,6 +1359,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1359 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, 1359 { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
1360 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, 1360 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
1361 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) }, 1361 { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
1362 { HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT, USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
1362 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, 1363 { HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
1363 { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) }, 1364 { HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, USB_DEVICE_ID_CVTOUCH_SCREEN) },
1364 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, 1365 { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0b374a6d6db0..aecb5a4b8d6d 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -173,6 +173,9 @@
173#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d 173#define USB_DEVICE_ID_CHICONY_MULTI_TOUCH 0xb19d
174#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618 174#define USB_DEVICE_ID_CHICONY_WIRELESS 0x0618
175 175
176#define USB_VENDOR_ID_CHUNGHWAT 0x2247
177#define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001
178
176#define USB_VENDOR_ID_CIDC 0x1677 179#define USB_VENDOR_ID_CIDC 0x1677
177 180
178#define USB_VENDOR_ID_CMEDIA 0x0d8c 181#define USB_VENDOR_ID_CMEDIA 0x0d8c
@@ -622,6 +625,7 @@
622#define USB_VENDOR_ID_UCLOGIC 0x5543 625#define USB_VENDOR_ID_UCLOGIC 0x5543
623#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042 626#define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209 0x0042
624#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001 627#define USB_DEVICE_ID_UCLOGIC_TABLET_KNA5 0x6001
628#define USB_DEVICE_ID_UCLOGIC_TABLET_TWA60 0x0064
625#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003 629#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U 0x0003
626#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004 630#define USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U 0x0004
627#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005 631#define USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U 0x0005
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index a5eda4c8127a..0ec91c18a421 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -501,17 +501,9 @@ static int magicmouse_probe(struct hid_device *hdev,
501 } 501 }
502 report->size = 6; 502 report->size = 6;
503 503
504 /*
505 * The device reponds with 'invalid report id' when feature
506 * report switching it into multitouch mode is sent to it.
507 *
508 * This results in -EIO from the _raw low-level transport callback,
509 * but there seems to be no other way of switching the mode.
510 * Thus the super-ugly hacky success check below.
511 */
512 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature), 504 ret = hdev->hid_output_raw_report(hdev, feature, sizeof(feature),
513 HID_FEATURE_REPORT); 505 HID_FEATURE_REPORT);
514 if (ret != -EIO) { 506 if (ret != sizeof(feature)) {
515 hid_err(hdev, "unable to request touch data (%d)\n", ret); 507 hid_err(hdev, "unable to request touch data (%d)\n", ret);
516 goto err_stop_hw; 508 goto err_stop_hw;
517 } 509 }
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index ecd4d2db9e80..0b2dcd0ee591 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -64,6 +64,7 @@ struct mt_device {
64 struct mt_class *mtclass; /* our mt device class */ 64 struct mt_class *mtclass; /* our mt device class */
65 unsigned last_field_index; /* last field index of the report */ 65 unsigned last_field_index; /* last field index of the report */
66 unsigned last_slot_field; /* the last field of a slot */ 66 unsigned last_slot_field; /* the last field of a slot */
67 int last_mt_collection; /* last known mt-related collection */
67 __s8 inputmode; /* InputMode HID feature, -1 if non-existent */ 68 __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
68 __u8 num_received; /* how many contacts we received */ 69 __u8 num_received; /* how many contacts we received */
69 __u8 num_expected; /* expected last contact index */ 70 __u8 num_expected; /* expected last contact index */
@@ -225,8 +226,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
225 cls->sn_move); 226 cls->sn_move);
226 /* touchscreen emulation */ 227 /* touchscreen emulation */
227 set_abs(hi->input, ABS_X, field, cls->sn_move); 228 set_abs(hi->input, ABS_X, field, cls->sn_move);
228 td->last_slot_field = usage->hid; 229 if (td->last_mt_collection == usage->collection_index) {
229 td->last_field_index = field->index; 230 td->last_slot_field = usage->hid;
231 td->last_field_index = field->index;
232 }
230 return 1; 233 return 1;
231 case HID_GD_Y: 234 case HID_GD_Y:
232 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) 235 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -237,8 +240,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
237 cls->sn_move); 240 cls->sn_move);
238 /* touchscreen emulation */ 241 /* touchscreen emulation */
239 set_abs(hi->input, ABS_Y, field, cls->sn_move); 242 set_abs(hi->input, ABS_Y, field, cls->sn_move);
240 td->last_slot_field = usage->hid; 243 if (td->last_mt_collection == usage->collection_index) {
241 td->last_field_index = field->index; 244 td->last_slot_field = usage->hid;
245 td->last_field_index = field->index;
246 }
242 return 1; 247 return 1;
243 } 248 }
244 return 0; 249 return 0;
@@ -246,31 +251,40 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
246 case HID_UP_DIGITIZER: 251 case HID_UP_DIGITIZER:
247 switch (usage->hid) { 252 switch (usage->hid) {
248 case HID_DG_INRANGE: 253 case HID_DG_INRANGE:
249 td->last_slot_field = usage->hid; 254 if (td->last_mt_collection == usage->collection_index) {
250 td->last_field_index = field->index; 255 td->last_slot_field = usage->hid;
256 td->last_field_index = field->index;
257 }
251 return 1; 258 return 1;
252 case HID_DG_CONFIDENCE: 259 case HID_DG_CONFIDENCE:
253 td->last_slot_field = usage->hid; 260 if (td->last_mt_collection == usage->collection_index) {
254 td->last_field_index = field->index; 261 td->last_slot_field = usage->hid;
262 td->last_field_index = field->index;
263 }
255 return 1; 264 return 1;
256 case HID_DG_TIPSWITCH: 265 case HID_DG_TIPSWITCH:
257 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH); 266 hid_map_usage(hi, usage, bit, max, EV_KEY, BTN_TOUCH);
258 input_set_capability(hi->input, EV_KEY, BTN_TOUCH); 267 input_set_capability(hi->input, EV_KEY, BTN_TOUCH);
259 td->last_slot_field = usage->hid; 268 if (td->last_mt_collection == usage->collection_index) {
260 td->last_field_index = field->index; 269 td->last_slot_field = usage->hid;
270 td->last_field_index = field->index;
271 }
261 return 1; 272 return 1;
262 case HID_DG_CONTACTID: 273 case HID_DG_CONTACTID:
263 input_mt_init_slots(hi->input, td->maxcontacts); 274 input_mt_init_slots(hi->input, td->maxcontacts);
264 td->last_slot_field = usage->hid; 275 td->last_slot_field = usage->hid;
265 td->last_field_index = field->index; 276 td->last_field_index = field->index;
277 td->last_mt_collection = usage->collection_index;
266 return 1; 278 return 1;
267 case HID_DG_WIDTH: 279 case HID_DG_WIDTH:
268 hid_map_usage(hi, usage, bit, max, 280 hid_map_usage(hi, usage, bit, max,
269 EV_ABS, ABS_MT_TOUCH_MAJOR); 281 EV_ABS, ABS_MT_TOUCH_MAJOR);
270 set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field, 282 set_abs(hi->input, ABS_MT_TOUCH_MAJOR, field,
271 cls->sn_width); 283 cls->sn_width);
272 td->last_slot_field = usage->hid; 284 if (td->last_mt_collection == usage->collection_index) {
273 td->last_field_index = field->index; 285 td->last_slot_field = usage->hid;
286 td->last_field_index = field->index;
287 }
274 return 1; 288 return 1;
275 case HID_DG_HEIGHT: 289 case HID_DG_HEIGHT:
276 hid_map_usage(hi, usage, bit, max, 290 hid_map_usage(hi, usage, bit, max,
@@ -279,8 +293,10 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
279 cls->sn_height); 293 cls->sn_height);
280 input_set_abs_params(hi->input, 294 input_set_abs_params(hi->input,
281 ABS_MT_ORIENTATION, 0, 1, 0, 0); 295 ABS_MT_ORIENTATION, 0, 1, 0, 0);
282 td->last_slot_field = usage->hid; 296 if (td->last_mt_collection == usage->collection_index) {
283 td->last_field_index = field->index; 297 td->last_slot_field = usage->hid;
298 td->last_field_index = field->index;
299 }
284 return 1; 300 return 1;
285 case HID_DG_TIPPRESSURE: 301 case HID_DG_TIPPRESSURE:
286 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP) 302 if (quirks & MT_QUIRK_EGALAX_XYZ_FIXUP)
@@ -292,16 +308,20 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
292 /* touchscreen emulation */ 308 /* touchscreen emulation */
293 set_abs(hi->input, ABS_PRESSURE, field, 309 set_abs(hi->input, ABS_PRESSURE, field,
294 cls->sn_pressure); 310 cls->sn_pressure);
295 td->last_slot_field = usage->hid; 311 if (td->last_mt_collection == usage->collection_index) {
296 td->last_field_index = field->index; 312 td->last_slot_field = usage->hid;
313 td->last_field_index = field->index;
314 }
297 return 1; 315 return 1;
298 case HID_DG_CONTACTCOUNT: 316 case HID_DG_CONTACTCOUNT:
299 td->last_field_index = field->index; 317 if (td->last_mt_collection == usage->collection_index)
318 td->last_field_index = field->index;
300 return 1; 319 return 1;
301 case HID_DG_CONTACTMAX: 320 case HID_DG_CONTACTMAX:
302 /* we don't set td->last_slot_field as contactcount and 321 /* we don't set td->last_slot_field as contactcount and
303 * contact max are global to the report */ 322 * contact max are global to the report */
304 td->last_field_index = field->index; 323 if (td->last_mt_collection == usage->collection_index)
324 td->last_field_index = field->index;
305 return -1; 325 return -1;
306 } 326 }
307 /* let hid-input decide for the others */ 327 /* let hid-input decide for the others */
@@ -516,6 +536,7 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
516 } 536 }
517 td->mtclass = mtclass; 537 td->mtclass = mtclass;
518 td->inputmode = -1; 538 td->inputmode = -1;
539 td->last_mt_collection = -1;
519 hid_set_drvdata(hdev, td); 540 hid_set_drvdata(hdev, td);
520 541
521 ret = hid_parse(hdev); 542 ret = hid_parse(hdev);
@@ -593,6 +614,11 @@ static const struct hid_device_id mt_devices[] = {
593 HID_USB_DEVICE(USB_VENDOR_ID_CANDO, 614 HID_USB_DEVICE(USB_VENDOR_ID_CANDO,
594 USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) }, 615 USB_DEVICE_ID_CANDO_MULTI_TOUCH_15_6) },
595 616
617 /* Chunghwa Telecom touch panels */
618 { .driver_data = MT_CLS_DEFAULT,
619 HID_USB_DEVICE(USB_VENDOR_ID_CHUNGHWAT,
620 USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH) },
621
596 /* CVTouch panels */ 622 /* CVTouch panels */
597 { .driver_data = MT_CLS_DEFAULT, 623 { .driver_data = MT_CLS_DEFAULT,
598 HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH, 624 HID_USB_DEVICE(USB_VENDOR_ID_CVTOUCH,
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 0e30b140edca..621959d5cc42 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
74 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT }, 74 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
75 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT }, 75 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
76 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT }, 76 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_KNA5, HID_QUIRK_MULTI_INPUT },
77 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWA60, HID_QUIRK_MULTI_INPUT },
77 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT }, 78 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U, HID_QUIRK_MULTI_INPUT },
78 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT }, 79 { USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP8060U, HID_QUIRK_MULTI_INPUT },
79 { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT }, 80 { USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index ff3c644888b1..7c1188b53c3e 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -248,12 +248,15 @@ static int hiddev_release(struct inode * inode, struct file * file)
248 usbhid_close(list->hiddev->hid); 248 usbhid_close(list->hiddev->hid);
249 usbhid_put_power(list->hiddev->hid); 249 usbhid_put_power(list->hiddev->hid);
250 } else { 250 } else {
251 mutex_unlock(&list->hiddev->existancelock);
251 kfree(list->hiddev); 252 kfree(list->hiddev);
253 kfree(list);
254 return 0;
252 } 255 }
253 } 256 }
254 257
255 kfree(list);
256 mutex_unlock(&list->hiddev->existancelock); 258 mutex_unlock(&list->hiddev->existancelock);
259 kfree(list);
257 260
258 return 0; 261 return 0;
259} 262}
@@ -923,10 +926,11 @@ void hiddev_disconnect(struct hid_device *hid)
923 usb_deregister_dev(usbhid->intf, &hiddev_class); 926 usb_deregister_dev(usbhid->intf, &hiddev_class);
924 927
925 if (hiddev->open) { 928 if (hiddev->open) {
929 mutex_unlock(&hiddev->existancelock);
926 usbhid_close(hiddev->hid); 930 usbhid_close(hiddev->hid);
927 wake_up_interruptible(&hiddev->wait); 931 wake_up_interruptible(&hiddev->wait);
928 } else { 932 } else {
933 mutex_unlock(&hiddev->existancelock);
929 kfree(hiddev); 934 kfree(hiddev);
930 } 935 }
931 mutex_unlock(&hiddev->existancelock);
932} 936}
diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
index b5e892017e0c..dcb78a7a8047 100644
--- a/drivers/hwmon/asus_atk0110.c
+++ b/drivers/hwmon/asus_atk0110.c
@@ -268,6 +268,7 @@ static struct device_attribute atk_name_attr =
268static void atk_init_attribute(struct device_attribute *attr, char *name, 268static void atk_init_attribute(struct device_attribute *attr, char *name,
269 sysfs_show_func show) 269 sysfs_show_func show)
270{ 270{
271 sysfs_attr_init(&attr->attr);
271 attr->attr.name = name; 272 attr->attr.name = name;
272 attr->attr.mode = 0444; 273 attr->attr.mode = 0444;
273 attr->show = show; 274 attr->show = show;
@@ -1188,19 +1189,15 @@ static int atk_create_files(struct atk_data *data)
1188 int err; 1189 int err;
1189 1190
1190 list_for_each_entry(s, &data->sensor_list, list) { 1191 list_for_each_entry(s, &data->sensor_list, list) {
1191 sysfs_attr_init(&s->input_attr.attr);
1192 err = device_create_file(data->hwmon_dev, &s->input_attr); 1192 err = device_create_file(data->hwmon_dev, &s->input_attr);
1193 if (err) 1193 if (err)
1194 return err; 1194 return err;
1195 sysfs_attr_init(&s->label_attr.attr);
1196 err = device_create_file(data->hwmon_dev, &s->label_attr); 1195 err = device_create_file(data->hwmon_dev, &s->label_attr);
1197 if (err) 1196 if (err)
1198 return err; 1197 return err;
1199 sysfs_attr_init(&s->limit1_attr.attr);
1200 err = device_create_file(data->hwmon_dev, &s->limit1_attr); 1198 err = device_create_file(data->hwmon_dev, &s->limit1_attr);
1201 if (err) 1199 if (err)
1202 return err; 1200 return err;
1203 sysfs_attr_init(&s->limit2_attr.attr);
1204 err = device_create_file(data->hwmon_dev, &s->limit2_attr); 1201 err = device_create_file(data->hwmon_dev, &s->limit2_attr);
1205 if (err) 1202 if (err)
1206 return err; 1203 return err;
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index de3d2465fe24..0070d5476dd0 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -97,9 +97,7 @@ struct platform_data {
97struct pdev_entry { 97struct pdev_entry {
98 struct list_head list; 98 struct list_head list;
99 struct platform_device *pdev; 99 struct platform_device *pdev;
100 unsigned int cpu;
101 u16 phys_proc_id; 100 u16 phys_proc_id;
102 u16 cpu_core_id;
103}; 101};
104 102
105static LIST_HEAD(pdev_list); 103static LIST_HEAD(pdev_list);
@@ -296,7 +294,7 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
296 * If the TjMax is not plausible, an assumption 294 * If the TjMax is not plausible, an assumption
297 * will be used 295 * will be used
298 */ 296 */
299 if (val > 80 && val < 120) { 297 if (val) {
300 dev_info(dev, "TjMax is %d C.\n", val); 298 dev_info(dev, "TjMax is %d C.\n", val);
301 return val * 1000; 299 return val * 1000;
302 } 300 }
@@ -304,24 +302,9 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
304 302
305 /* 303 /*
306 * An assumption is made for early CPUs and unreadable MSR. 304 * An assumption is made for early CPUs and unreadable MSR.
307 * NOTE: the given value may not be correct. 305 * NOTE: the calculated value may not be correct.
308 */ 306 */
309 307 return adjust_tjmax(c, id, dev);
310 switch (c->x86_model) {
311 case 0xe:
312 case 0xf:
313 case 0x16:
314 case 0x1a:
315 dev_warn(dev, "TjMax is assumed as 100 C!\n");
316 return 100000;
317 case 0x17:
318 case 0x1c: /* Atom CPUs */
319 return adjust_tjmax(c, id, dev);
320 default:
321 dev_warn(dev, "CPU (model=0x%x) is not supported yet,"
322 " using default TjMax of 100C.\n", c->x86_model);
323 return 100000;
324 }
325} 308}
326 309
327static void __devinit get_ucode_rev_on_cpu(void *edx) 310static void __devinit get_ucode_rev_on_cpu(void *edx)
@@ -341,7 +324,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev)
341 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); 324 err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
342 if (!err) { 325 if (!err) {
343 val = (eax >> 16) & 0xff; 326 val = (eax >> 16) & 0xff;
344 if (val > 80 && val < 120) 327 if (val)
345 return val * 1000; 328 return val * 1000;
346 } 329 }
347 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); 330 dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu);
@@ -668,9 +651,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu)
668 } 651 }
669 652
670 pdev_entry->pdev = pdev; 653 pdev_entry->pdev = pdev;
671 pdev_entry->cpu = cpu;
672 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); 654 pdev_entry->phys_proc_id = TO_PHYS_ID(cpu);
673 pdev_entry->cpu_core_id = TO_CORE_ID(cpu);
674 655
675 list_add_tail(&pdev_entry->list, &pdev_list); 656 list_add_tail(&pdev_entry->list, &pdev_list);
676 mutex_unlock(&pdev_list_mutex); 657 mutex_unlock(&pdev_list_mutex);
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
index 537409d07ee7..1a409c5bc9bc 100644
--- a/drivers/hwmon/ibmaem.c
+++ b/drivers/hwmon/ibmaem.c
@@ -947,6 +947,7 @@ static int aem_register_sensors(struct aem_data *data,
947 947
948 /* Set up read-only sensors */ 948 /* Set up read-only sensors */
949 while (ro->label) { 949 while (ro->label) {
950 sysfs_attr_init(&sensors->dev_attr.attr);
950 sensors->dev_attr.attr.name = ro->label; 951 sensors->dev_attr.attr.name = ro->label;
951 sensors->dev_attr.attr.mode = S_IRUGO; 952 sensors->dev_attr.attr.mode = S_IRUGO;
952 sensors->dev_attr.show = ro->show; 953 sensors->dev_attr.show = ro->show;
@@ -963,6 +964,7 @@ static int aem_register_sensors(struct aem_data *data,
963 964
964 /* Set up read-write sensors */ 965 /* Set up read-write sensors */
965 while (rw->label) { 966 while (rw->label) {
967 sysfs_attr_init(&sensors->dev_attr.attr);
966 sensors->dev_attr.attr.name = rw->label; 968 sensors->dev_attr.attr.name = rw->label;
967 sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR; 969 sensors->dev_attr.attr.mode = S_IRUGO | S_IWUSR;
968 sensors->dev_attr.show = rw->show; 970 sensors->dev_attr.show = rw->show;
diff --git a/drivers/hwmon/ibmpex.c b/drivers/hwmon/ibmpex.c
index 06d4eafcf76b..41dbf8161ed7 100644
--- a/drivers/hwmon/ibmpex.c
+++ b/drivers/hwmon/ibmpex.c
@@ -358,6 +358,7 @@ static int create_sensor(struct ibmpex_bmc_data *data, int type,
358 else if (type == POWER_SENSOR) 358 else if (type == POWER_SENSOR)
359 sprintf(n, power_sensor_name_templates[func], "power", counter); 359 sprintf(n, power_sensor_name_templates[func], "power", counter);
360 360
361 sysfs_attr_init(&data->sensors[sensor].attr[func].dev_attr.attr);
361 data->sensors[sensor].attr[func].dev_attr.attr.name = n; 362 data->sensors[sensor].attr[func].dev_attr.attr.name = n;
362 data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO; 363 data->sensors[sensor].attr[func].dev_attr.attr.mode = S_IRUGO;
363 data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor; 364 data->sensors[sensor].attr[func].dev_attr.show = ibmpex_show_sensor;
diff --git a/drivers/hwmon/max6642.c b/drivers/hwmon/max6642.c
index 0f9fc40379cd..e855d3b0bd1f 100644
--- a/drivers/hwmon/max6642.c
+++ b/drivers/hwmon/max6642.c
@@ -136,15 +136,29 @@ static int max6642_detect(struct i2c_client *client,
136 if (man_id != 0x4D) 136 if (man_id != 0x4D)
137 return -ENODEV; 137 return -ENODEV;
138 138
139 /* sanity check */
140 if (i2c_smbus_read_byte_data(client, 0x04) != 0x4D
141 || i2c_smbus_read_byte_data(client, 0x06) != 0x4D
142 || i2c_smbus_read_byte_data(client, 0xff) != 0x4D)
143 return -ENODEV;
144
139 /* 145 /*
140 * We read the config and status register, the 4 lower bits in the 146 * We read the config and status register, the 4 lower bits in the
141 * config register should be zero and bit 5, 3, 1 and 0 should be 147 * config register should be zero and bit 5, 3, 1 and 0 should be
142 * zero in the status register. 148 * zero in the status register.
143 */ 149 */
144 reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG); 150 reg_config = i2c_smbus_read_byte_data(client, MAX6642_REG_R_CONFIG);
151 if ((reg_config & 0x0f) != 0x00)
152 return -ENODEV;
153
154 /* in between, another round of sanity checks */
155 if (i2c_smbus_read_byte_data(client, 0x04) != reg_config
156 || i2c_smbus_read_byte_data(client, 0x06) != reg_config
157 || i2c_smbus_read_byte_data(client, 0xff) != reg_config)
158 return -ENODEV;
159
145 reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS); 160 reg_status = i2c_smbus_read_byte_data(client, MAX6642_REG_R_STATUS);
146 if (((reg_config & 0x0f) != 0x00) || 161 if ((reg_status & 0x2b) != 0x00)
147 ((reg_status & 0x2b) != 0x00))
148 return -ENODEV; 162 return -ENODEV;
149 163
150 strlcpy(info->type, "max6642", I2C_NAME_SIZE); 164 strlcpy(info->type, "max6642", I2C_NAME_SIZE);
@@ -246,7 +260,7 @@ static SENSOR_DEVICE_ATTR_2(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
246 set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH); 260 set_temp_max, 0, MAX6642_REG_W_LOCAL_HIGH);
247static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max, 261static SENSOR_DEVICE_ATTR_2(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
248 set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH); 262 set_temp_max, 1, MAX6642_REG_W_REMOTE_HIGH);
249static SENSOR_DEVICE_ATTR(temp_fault, S_IRUGO, show_alarm, NULL, 2); 263static SENSOR_DEVICE_ATTR(temp2_fault, S_IRUGO, show_alarm, NULL, 2);
250static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6); 264static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
251static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4); 265static SENSOR_DEVICE_ATTR(temp2_max_alarm, S_IRUGO, show_alarm, NULL, 4);
252 266
@@ -256,7 +270,7 @@ static struct attribute *max6642_attributes[] = {
256 &sensor_dev_attr_temp1_max.dev_attr.attr, 270 &sensor_dev_attr_temp1_max.dev_attr.attr,
257 &sensor_dev_attr_temp2_max.dev_attr.attr, 271 &sensor_dev_attr_temp2_max.dev_attr.attr,
258 272
259 &sensor_dev_attr_temp_fault.dev_attr.attr, 273 &sensor_dev_attr_temp2_fault.dev_attr.attr,
260 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr, 274 &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
261 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr, 275 &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
262 NULL 276 NULL
diff --git a/drivers/hwmon/s3c-hwmon.c b/drivers/hwmon/s3c-hwmon.c
index 92b42db43bcf..b39f52e2752a 100644
--- a/drivers/hwmon/s3c-hwmon.c
+++ b/drivers/hwmon/s3c-hwmon.c
@@ -232,6 +232,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
232 232
233 attr = &attrs->in; 233 attr = &attrs->in;
234 attr->index = channel; 234 attr->index = channel;
235 sysfs_attr_init(&attr->dev_attr.attr);
235 attr->dev_attr.attr.name = attrs->in_name; 236 attr->dev_attr.attr.name = attrs->in_name;
236 attr->dev_attr.attr.mode = S_IRUGO; 237 attr->dev_attr.attr.mode = S_IRUGO;
237 attr->dev_attr.show = s3c_hwmon_ch_show; 238 attr->dev_attr.show = s3c_hwmon_ch_show;
@@ -249,6 +250,7 @@ static int s3c_hwmon_create_attr(struct device *dev,
249 250
250 attr = &attrs->label; 251 attr = &attrs->label;
251 attr->index = channel; 252 attr->index = channel;
253 sysfs_attr_init(&attr->dev_attr.attr);
252 attr->dev_attr.attr.name = attrs->label_name; 254 attr->dev_attr.attr.name = attrs->label_name;
253 attr->dev_attr.attr.mode = S_IRUGO; 255 attr->dev_attr.attr.mode = S_IRUGO;
254 attr->dev_attr.show = s3c_hwmon_label_show; 256 attr->dev_attr.show = s3c_hwmon_label_show;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 144d27261e43..04b09564bfa9 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -778,7 +778,8 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq,
778 sector_t block) 778 sector_t block)
779{ 779{
780 struct ide_cmd cmd; 780 struct ide_cmd cmd;
781 int uptodate = 0, nsectors; 781 int uptodate = 0;
782 unsigned int nsectors;
782 783
783 ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu", 784 ide_debug_log(IDE_DBG_RQ, "cmd: 0x%x, block: %llu",
784 rq->cmd[0], (unsigned long long)block); 785 rq->cmd[0], (unsigned long long)block);
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index be0921ef6b52..4cf25347b015 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -111,7 +111,8 @@ static void evdev_event(struct input_handle *handle,
111 111
112 rcu_read_unlock(); 112 rcu_read_unlock();
113 113
114 wake_up_interruptible(&evdev->wait); 114 if (type == EV_SYN && code == SYN_REPORT)
115 wake_up_interruptible(&evdev->wait);
115} 116}
116 117
117static int evdev_fasync(int fd, struct file *file, int on) 118static int evdev_fasync(int fd, struct file *file, int on)
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 75e11c7b70fd..da38d97a51b1 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1756,7 +1756,7 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 1756 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 1757 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 1758 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
1759 clamp(mt_slots, 2, 32); 1759 mt_slots = clamp(mt_slots, 2, 32);
1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 1760 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
1761 mt_slots = 2; 1761 mt_slots = 2;
1762 } else { 1762 } else {
diff --git a/drivers/input/keyboard/omap-keypad.c b/drivers/input/keyboard/omap-keypad.c
index f23a743817db..33d0bdc837c0 100644
--- a/drivers/input/keyboard/omap-keypad.c
+++ b/drivers/input/keyboard/omap-keypad.c
@@ -209,6 +209,7 @@ static void omap_kp_tasklet(unsigned long data)
209#endif 209#endif
210 } 210 }
211 } 211 }
212 input_sync(omap_kp_data->input);
212 memcpy(keypad_state, new_state, sizeof(keypad_state)); 213 memcpy(keypad_state, new_state, sizeof(keypad_state));
213 214
214 if (key_down) { 215 if (key_down) {
diff --git a/drivers/input/keyboard/sh_keysc.c b/drivers/input/keyboard/sh_keysc.c
index 834cf98e7efb..6876700a4469 100644
--- a/drivers/input/keyboard/sh_keysc.c
+++ b/drivers/input/keyboard/sh_keysc.c
@@ -32,7 +32,7 @@ static const struct {
32 [SH_KEYSC_MODE_3] = { 2, 4, 7 }, 32 [SH_KEYSC_MODE_3] = { 2, 4, 7 },
33 [SH_KEYSC_MODE_4] = { 3, 6, 6 }, 33 [SH_KEYSC_MODE_4] = { 3, 6, 6 },
34 [SH_KEYSC_MODE_5] = { 4, 6, 7 }, 34 [SH_KEYSC_MODE_5] = { 4, 6, 7 },
35 [SH_KEYSC_MODE_6] = { 5, 7, 7 }, 35 [SH_KEYSC_MODE_6] = { 5, 8, 8 },
36}; 36};
37 37
38struct sh_keysc_priv { 38struct sh_keysc_priv {
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 257e033986e4..0110b5a3a167 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -187,7 +187,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
187 if (size == 0) 187 if (size == 0)
188 size = xres ? : 1; 188 size = xres ? : 1;
189 189
190 clamp(value, min, max); 190 value = clamp(value, min, max);
191 191
192 mousedev->packet.x = ((value - min) * xres) / size; 192 mousedev->packet.x = ((value - min) * xres) / size;
193 mousedev->packet.abs_event = 1; 193 mousedev->packet.abs_event = 1;
@@ -201,7 +201,7 @@ static void mousedev_abs_event(struct input_dev *dev, struct mousedev *mousedev,
201 if (size == 0) 201 if (size == 0)
202 size = yres ? : 1; 202 size = yres ? : 1;
203 203
204 clamp(value, min, max); 204 value = clamp(value, min, max);
205 205
206 mousedev->packet.y = yres - ((value - min) * yres) / size; 206 mousedev->packet.y = yres - ((value - min) * yres) / size;
207 mousedev->packet.abs_event = 1; 207 mousedev->packet.abs_event = 1;
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 59de638225fe..e35058bcd7b9 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -156,8 +156,10 @@ static int if_open(struct tty_struct *tty, struct file *filp)
156 if (!cs || !try_module_get(cs->driver->owner)) 156 if (!cs || !try_module_get(cs->driver->owner))
157 return -ENODEV; 157 return -ENODEV;
158 158
159 if (mutex_lock_interruptible(&cs->mutex)) 159 if (mutex_lock_interruptible(&cs->mutex)) {
160 module_put(cs->driver->owner);
160 return -ERESTARTSYS; 161 return -ERESTARTSYS;
162 }
161 tty->driver_data = cs; 163 tty->driver_data = cs;
162 164
163 ++cs->open_count; 165 ++cs->open_count;
diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c
index 3ccbff13eaf2..71a8eb6ef71e 100644
--- a/drivers/isdn/hardware/mISDN/hfcsusb.c
+++ b/drivers/isdn/hardware/mISDN/hfcsusb.c
@@ -283,6 +283,7 @@ hfcsusb_ph_info(struct hfcsusb *hw)
283 _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY, 283 _queue_data(&dch->dev.D, MPH_INFORMATION_IND, MISDN_ID_ANY,
284 sizeof(struct ph_info_dch) + dch->dev.nrbchan * 284 sizeof(struct ph_info_dch) + dch->dev.nrbchan *
285 sizeof(struct ph_info_ch), phi, GFP_ATOMIC); 285 sizeof(struct ph_info_ch), phi, GFP_ATOMIC);
286 kfree(phi);
286} 287}
287 288
288/* 289/*
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 23f0d5e99f35..713d43b4e563 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -1,3 +1,10 @@
1config LEDS_GPIO_REGISTER
2 bool
3 help
4 This option provides the function gpio_led_register_device.
5 As this function is used by arch code it must not be compiled as a
6 module.
7
1menuconfig NEW_LEDS 8menuconfig NEW_LEDS
2 bool "LED Support" 9 bool "LED Support"
3 help 10 help
@@ -7,22 +14,14 @@ menuconfig NEW_LEDS
7 This is not related to standard keyboard LEDs which are controlled 14 This is not related to standard keyboard LEDs which are controlled
8 via the input system. 15 via the input system.
9 16
17if NEW_LEDS
18
10config LEDS_CLASS 19config LEDS_CLASS
11 bool "LED Class Support" 20 bool "LED Class Support"
12 depends on NEW_LEDS
13 help 21 help
14 This option enables the led sysfs class in /sys/class/leds. You'll 22 This option enables the led sysfs class in /sys/class/leds. You'll
15 need this to do anything useful with LEDs. If unsure, say N. 23 need this to do anything useful with LEDs. If unsure, say N.
16 24
17config LEDS_GPIO_REGISTER
18 bool
19 help
20 This option provides the function gpio_led_register_device.
21 As this function is used by arch code it must not be compiled as a
22 module.
23
24if NEW_LEDS
25
26comment "LED drivers" 25comment "LED drivers"
27 26
28config LEDS_88PM860X 27config LEDS_88PM860X
@@ -391,6 +390,7 @@ config LEDS_NETXBIG
391 390
392config LEDS_ASIC3 391config LEDS_ASIC3
393 bool "LED support for the HTC ASIC3" 392 bool "LED support for the HTC ASIC3"
393 depends on LEDS_CLASS
394 depends on MFD_ASIC3 394 depends on MFD_ASIC3
395 default y 395 default y
396 help 396 help
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 70bd738b8b99..574b09afedd3 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -534,6 +534,82 @@ void bitmap_print_sb(struct bitmap *bitmap)
534 kunmap_atomic(sb, KM_USER0); 534 kunmap_atomic(sb, KM_USER0);
535} 535}
536 536
537/*
538 * bitmap_new_disk_sb
539 * @bitmap
540 *
541 * This function is somewhat the reverse of bitmap_read_sb. bitmap_read_sb
542 * reads and verifies the on-disk bitmap superblock and populates bitmap_info.
543 * This function verifies 'bitmap_info' and populates the on-disk bitmap
544 * structure, which is to be written to disk.
545 *
546 * Returns: 0 on success, -Exxx on error
547 */
548static int bitmap_new_disk_sb(struct bitmap *bitmap)
549{
550 bitmap_super_t *sb;
551 unsigned long chunksize, daemon_sleep, write_behind;
552 int err = -EINVAL;
553
554 bitmap->sb_page = alloc_page(GFP_KERNEL);
555 if (IS_ERR(bitmap->sb_page)) {
556 err = PTR_ERR(bitmap->sb_page);
557 bitmap->sb_page = NULL;
558 return err;
559 }
560 bitmap->sb_page->index = 0;
561
562 sb = kmap_atomic(bitmap->sb_page, KM_USER0);
563
564 sb->magic = cpu_to_le32(BITMAP_MAGIC);
565 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
566
567 chunksize = bitmap->mddev->bitmap_info.chunksize;
568 BUG_ON(!chunksize);
569 if (!is_power_of_2(chunksize)) {
570 kunmap_atomic(sb, KM_USER0);
571 printk(KERN_ERR "bitmap chunksize not a power of 2\n");
572 return -EINVAL;
573 }
574 sb->chunksize = cpu_to_le32(chunksize);
575
576 daemon_sleep = bitmap->mddev->bitmap_info.daemon_sleep;
577 if (!daemon_sleep ||
578 (daemon_sleep < 1) || (daemon_sleep > MAX_SCHEDULE_TIMEOUT)) {
579 printk(KERN_INFO "Choosing daemon_sleep default (5 sec)\n");
580 daemon_sleep = 5 * HZ;
581 }
582 sb->daemon_sleep = cpu_to_le32(daemon_sleep);
583 bitmap->mddev->bitmap_info.daemon_sleep = daemon_sleep;
584
585 /*
586 * FIXME: write_behind for RAID1. If not specified, what
587 * is a good choice? We choose COUNTER_MAX / 2 arbitrarily.
588 */
589 write_behind = bitmap->mddev->bitmap_info.max_write_behind;
590 if (write_behind > COUNTER_MAX)
591 write_behind = COUNTER_MAX / 2;
592 sb->write_behind = cpu_to_le32(write_behind);
593 bitmap->mddev->bitmap_info.max_write_behind = write_behind;
594
595 /* keep the array size field of the bitmap superblock up to date */
596 sb->sync_size = cpu_to_le64(bitmap->mddev->resync_max_sectors);
597
598 memcpy(sb->uuid, bitmap->mddev->uuid, 16);
599
600 bitmap->flags |= BITMAP_STALE;
601 sb->state |= cpu_to_le32(BITMAP_STALE);
602 bitmap->events_cleared = bitmap->mddev->events;
603 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
604
605 bitmap->flags |= BITMAP_HOSTENDIAN;
606 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
607
608 kunmap_atomic(sb, KM_USER0);
609
610 return 0;
611}
612
537/* read the superblock from the bitmap file and initialize some bitmap fields */ 613/* read the superblock from the bitmap file and initialize some bitmap fields */
538static int bitmap_read_sb(struct bitmap *bitmap) 614static int bitmap_read_sb(struct bitmap *bitmap)
539{ 615{
@@ -575,7 +651,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
575 reason = "unrecognized superblock version"; 651 reason = "unrecognized superblock version";
576 else if (chunksize < 512) 652 else if (chunksize < 512)
577 reason = "bitmap chunksize too small"; 653 reason = "bitmap chunksize too small";
578 else if ((1 << ffz(~chunksize)) != chunksize) 654 else if (!is_power_of_2(chunksize))
579 reason = "bitmap chunksize not a power of 2"; 655 reason = "bitmap chunksize not a power of 2";
580 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT) 656 else if (daemon_sleep < 1 || daemon_sleep > MAX_SCHEDULE_TIMEOUT)
581 reason = "daemon sleep period out of range"; 657 reason = "daemon sleep period out of range";
@@ -1076,8 +1152,8 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1076 } 1152 }
1077 1153
1078 printk(KERN_INFO "%s: bitmap initialized from disk: " 1154 printk(KERN_INFO "%s: bitmap initialized from disk: "
1079 "read %lu/%lu pages, set %lu bits\n", 1155 "read %lu/%lu pages, set %lu of %lu bits\n",
1080 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt); 1156 bmname(bitmap), bitmap->file_pages, num_pages, bit_cnt, chunks);
1081 1157
1082 return 0; 1158 return 0;
1083 1159
@@ -1332,7 +1408,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect
1332 return 0; 1408 return 0;
1333 } 1409 }
1334 1410
1335 if (unlikely((*bmc & COUNTER_MAX) == COUNTER_MAX)) { 1411 if (unlikely(COUNTER(*bmc) == COUNTER_MAX)) {
1336 DEFINE_WAIT(__wait); 1412 DEFINE_WAIT(__wait);
1337 /* note that it is safe to do the prepare_to_wait 1413 /* note that it is safe to do the prepare_to_wait
1338 * after the test as long as we do it before dropping 1414 * after the test as long as we do it before dropping
@@ -1404,10 +1480,10 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
1404 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear); 1480 sysfs_notify_dirent_safe(bitmap->sysfs_can_clear);
1405 } 1481 }
1406 1482
1407 if (!success && ! (*bmc & NEEDED_MASK)) 1483 if (!success && !NEEDED(*bmc))
1408 *bmc |= NEEDED_MASK; 1484 *bmc |= NEEDED_MASK;
1409 1485
1410 if ((*bmc & COUNTER_MAX) == COUNTER_MAX) 1486 if (COUNTER(*bmc) == COUNTER_MAX)
1411 wake_up(&bitmap->overflow_wait); 1487 wake_up(&bitmap->overflow_wait);
1412 1488
1413 (*bmc)--; 1489 (*bmc)--;
@@ -1728,9 +1804,16 @@ int bitmap_create(mddev_t *mddev)
1728 vfs_fsync(file, 1); 1804 vfs_fsync(file, 1);
1729 } 1805 }
1730 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */ 1806 /* read superblock from bitmap file (this sets mddev->bitmap_info.chunksize) */
1731 if (!mddev->bitmap_info.external) 1807 if (!mddev->bitmap_info.external) {
1732 err = bitmap_read_sb(bitmap); 1808 /*
1733 else { 1809 * If 'MD_ARRAY_FIRST_USE' is set, then device-mapper is
1810 * instructing us to create a new on-disk bitmap instance.
1811 */
1812 if (test_and_clear_bit(MD_ARRAY_FIRST_USE, &mddev->flags))
1813 err = bitmap_new_disk_sb(bitmap);
1814 else
1815 err = bitmap_read_sb(bitmap);
1816 } else {
1734 err = 0; 1817 err = 0;
1735 if (mddev->bitmap_info.chunksize == 0 || 1818 if (mddev->bitmap_info.chunksize == 0 ||
1736 mddev->bitmap_info.daemon_sleep == 0) 1819 mddev->bitmap_info.daemon_sleep == 0)
@@ -1754,9 +1837,6 @@ int bitmap_create(mddev_t *mddev)
1754 bitmap->chunks = chunks; 1837 bitmap->chunks = chunks;
1755 bitmap->pages = pages; 1838 bitmap->pages = pages;
1756 bitmap->missing_pages = pages; 1839 bitmap->missing_pages = pages;
1757 bitmap->counter_bits = COUNTER_BITS;
1758
1759 bitmap->syncchunk = ~0UL;
1760 1840
1761#ifdef INJECT_FATAL_FAULT_1 1841#ifdef INJECT_FATAL_FAULT_1
1762 bitmap->bp = NULL; 1842 bitmap->bp = NULL;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index d0aeaf46d932..b2a127e891ac 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -85,7 +85,6 @@
85typedef __u16 bitmap_counter_t; 85typedef __u16 bitmap_counter_t;
86#define COUNTER_BITS 16 86#define COUNTER_BITS 16
87#define COUNTER_BIT_SHIFT 4 87#define COUNTER_BIT_SHIFT 4
88#define COUNTER_BYTE_RATIO (COUNTER_BITS / 8)
89#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3) 88#define COUNTER_BYTE_SHIFT (COUNTER_BIT_SHIFT - 3)
90 89
91#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1))) 90#define NEEDED_MASK ((bitmap_counter_t) (1 << (COUNTER_BITS - 1)))
@@ -196,19 +195,10 @@ struct bitmap {
196 195
197 mddev_t *mddev; /* the md device that the bitmap is for */ 196 mddev_t *mddev; /* the md device that the bitmap is for */
198 197
199 int counter_bits; /* how many bits per block counter */
200
201 /* bitmap chunksize -- how much data does each bit represent? */ 198 /* bitmap chunksize -- how much data does each bit represent? */
202 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */ 199 unsigned long chunkshift; /* chunksize = 2^chunkshift (for bitops) */
203 unsigned long chunks; /* total number of data chunks for the array */ 200 unsigned long chunks; /* total number of data chunks for the array */
204 201
205 /* We hold a count on the chunk currently being synced, and drop
206 * it when the last block is started. If the resync is aborted
207 * midway, we need to be able to drop that count, so we remember
208 * the counted chunk..
209 */
210 unsigned long syncchunk;
211
212 __u64 events_cleared; 202 __u64 events_cleared;
213 int need_sync; 203 int need_sync;
214 204
diff --git a/drivers/md/md.c b/drivers/md/md.c
index aa640a85bb21..4332fc2f25d4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -351,6 +351,9 @@ void mddev_resume(mddev_t *mddev)
351 mddev->suspended = 0; 351 mddev->suspended = 0;
352 wake_up(&mddev->sb_wait); 352 wake_up(&mddev->sb_wait);
353 mddev->pers->quiesce(mddev, 0); 353 mddev->pers->quiesce(mddev, 0);
354
355 md_wakeup_thread(mddev->thread);
356 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
354} 357}
355EXPORT_SYMBOL_GPL(mddev_resume); 358EXPORT_SYMBOL_GPL(mddev_resume);
356 359
@@ -1750,6 +1753,18 @@ static struct super_type super_types[] = {
1750 }, 1753 },
1751}; 1754};
1752 1755
1756static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev)
1757{
1758 if (mddev->sync_super) {
1759 mddev->sync_super(mddev, rdev);
1760 return;
1761 }
1762
1763 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types));
1764
1765 super_types[mddev->major_version].sync_super(mddev, rdev);
1766}
1767
1753static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1768static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2)
1754{ 1769{
1755 mdk_rdev_t *rdev, *rdev2; 1770 mdk_rdev_t *rdev, *rdev2;
@@ -1781,8 +1796,8 @@ int md_integrity_register(mddev_t *mddev)
1781 1796
1782 if (list_empty(&mddev->disks)) 1797 if (list_empty(&mddev->disks))
1783 return 0; /* nothing to do */ 1798 return 0; /* nothing to do */
1784 if (blk_get_integrity(mddev->gendisk)) 1799 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk))
1785 return 0; /* already registered */ 1800 return 0; /* shouldn't register, or already is */
1786 list_for_each_entry(rdev, &mddev->disks, same_set) { 1801 list_for_each_entry(rdev, &mddev->disks, same_set) {
1787 /* skip spares and non-functional disks */ 1802 /* skip spares and non-functional disks */
1788 if (test_bit(Faulty, &rdev->flags)) 1803 if (test_bit(Faulty, &rdev->flags))
@@ -2168,8 +2183,7 @@ static void sync_sbs(mddev_t * mddev, int nospares)
2168 /* Don't update this superblock */ 2183 /* Don't update this superblock */
2169 rdev->sb_loaded = 2; 2184 rdev->sb_loaded = 2;
2170 } else { 2185 } else {
2171 super_types[mddev->major_version]. 2186 sync_super(mddev, rdev);
2172 sync_super(mddev, rdev);
2173 rdev->sb_loaded = 1; 2187 rdev->sb_loaded = 1;
2174 } 2188 }
2175 } 2189 }
@@ -2462,7 +2476,7 @@ slot_store(mdk_rdev_t *rdev, const char *buf, size_t len)
2462 if (rdev->raid_disk == -1) 2476 if (rdev->raid_disk == -1)
2463 return -EEXIST; 2477 return -EEXIST;
2464 /* personality does all needed checks */ 2478 /* personality does all needed checks */
2465 if (rdev->mddev->pers->hot_add_disk == NULL) 2479 if (rdev->mddev->pers->hot_remove_disk == NULL)
2466 return -EINVAL; 2480 return -EINVAL;
2467 err = rdev->mddev->pers-> 2481 err = rdev->mddev->pers->
2468 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2482 hot_remove_disk(rdev->mddev, rdev->raid_disk);
@@ -4619,9 +4633,6 @@ int md_run(mddev_t *mddev)
4619 if (mddev->flags) 4633 if (mddev->flags)
4620 md_update_sb(mddev, 0); 4634 md_update_sb(mddev, 0);
4621 4635
4622 md_wakeup_thread(mddev->thread);
4623 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4624
4625 md_new_event(mddev); 4636 md_new_event(mddev);
4626 sysfs_notify_dirent_safe(mddev->sysfs_state); 4637 sysfs_notify_dirent_safe(mddev->sysfs_state);
4627 sysfs_notify_dirent_safe(mddev->sysfs_action); 4638 sysfs_notify_dirent_safe(mddev->sysfs_action);
@@ -4642,6 +4653,10 @@ static int do_md_run(mddev_t *mddev)
4642 bitmap_destroy(mddev); 4653 bitmap_destroy(mddev);
4643 goto out; 4654 goto out;
4644 } 4655 }
4656
4657 md_wakeup_thread(mddev->thread);
4658 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
4659
4645 set_capacity(mddev->gendisk, mddev->array_sectors); 4660 set_capacity(mddev->gendisk, mddev->array_sectors);
4646 revalidate_disk(mddev->gendisk); 4661 revalidate_disk(mddev->gendisk);
4647 mddev->changed = 1; 4662 mddev->changed = 1;
@@ -5259,6 +5274,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
5259 if (mddev->degraded) 5274 if (mddev->degraded)
5260 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5275 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
5261 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5276 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5277 if (!err)
5278 md_new_event(mddev);
5262 md_wakeup_thread(mddev->thread); 5279 md_wakeup_thread(mddev->thread);
5263 return err; 5280 return err;
5264 } 5281 }
@@ -6866,8 +6883,8 @@ void md_do_sync(mddev_t *mddev)
6866 * Tune reconstruction: 6883 * Tune reconstruction:
6867 */ 6884 */
6868 window = 32*(PAGE_SIZE/512); 6885 window = 32*(PAGE_SIZE/512);
6869 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6886 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n",
6870 window/2,(unsigned long long) max_sectors/2); 6887 window/2, (unsigned long long)max_sectors/2);
6871 6888
6872 atomic_set(&mddev->recovery_active, 0); 6889 atomic_set(&mddev->recovery_active, 0);
6873 last_check = 0; 6890 last_check = 0;
@@ -7045,7 +7062,6 @@ void md_do_sync(mddev_t *mddev)
7045} 7062}
7046EXPORT_SYMBOL_GPL(md_do_sync); 7063EXPORT_SYMBOL_GPL(md_do_sync);
7047 7064
7048
7049static int remove_and_add_spares(mddev_t *mddev) 7065static int remove_and_add_spares(mddev_t *mddev)
7050{ 7066{
7051 mdk_rdev_t *rdev; 7067 mdk_rdev_t *rdev;
@@ -7157,6 +7173,9 @@ static void reap_sync_thread(mddev_t *mddev)
7157 */ 7173 */
7158void md_check_recovery(mddev_t *mddev) 7174void md_check_recovery(mddev_t *mddev)
7159{ 7175{
7176 if (mddev->suspended)
7177 return;
7178
7160 if (mddev->bitmap) 7179 if (mddev->bitmap)
7161 bitmap_daemon_work(mddev); 7180 bitmap_daemon_work(mddev);
7162 7181
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 0b1fd3f1d85b..1c26c7a08ae6 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -124,6 +124,7 @@ struct mddev_s
124#define MD_CHANGE_DEVS 0 /* Some device status has changed */ 124#define MD_CHANGE_DEVS 0 /* Some device status has changed */
125#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */ 125#define MD_CHANGE_CLEAN 1 /* transition to or from 'clean' */
126#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */ 126#define MD_CHANGE_PENDING 2 /* switch from 'clean' to 'active' in progress */
127#define MD_ARRAY_FIRST_USE 3 /* First use of array, needs initialization */
127 128
128 int suspended; 129 int suspended;
129 atomic_t active_io; 130 atomic_t active_io;
@@ -330,6 +331,7 @@ struct mddev_s
330 atomic_t flush_pending; 331 atomic_t flush_pending;
331 struct work_struct flush_work; 332 struct work_struct flush_work;
332 struct work_struct event_work; /* used by dm to report failure event */ 333 struct work_struct event_work; /* used by dm to report failure event */
334 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev);
333}; 335};
334 336
335 337
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5d096096f958..f7431b6d8447 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -497,21 +497,19 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
497 return best_disk; 497 return best_disk;
498} 498}
499 499
500static int raid1_congested(void *data, int bits) 500int md_raid1_congested(mddev_t *mddev, int bits)
501{ 501{
502 mddev_t *mddev = data;
503 conf_t *conf = mddev->private; 502 conf_t *conf = mddev->private;
504 int i, ret = 0; 503 int i, ret = 0;
505 504
506 if (mddev_congested(mddev, bits))
507 return 1;
508
509 rcu_read_lock(); 505 rcu_read_lock();
510 for (i = 0; i < mddev->raid_disks; i++) { 506 for (i = 0; i < mddev->raid_disks; i++) {
511 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 507 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
512 if (rdev && !test_bit(Faulty, &rdev->flags)) { 508 if (rdev && !test_bit(Faulty, &rdev->flags)) {
513 struct request_queue *q = bdev_get_queue(rdev->bdev); 509 struct request_queue *q = bdev_get_queue(rdev->bdev);
514 510
511 BUG_ON(!q);
512
515 /* Note the '|| 1' - when read_balance prefers 513 /* Note the '|| 1' - when read_balance prefers
516 * non-congested targets, it can be removed 514 * non-congested targets, it can be removed
517 */ 515 */
@@ -524,7 +522,15 @@ static int raid1_congested(void *data, int bits)
524 rcu_read_unlock(); 522 rcu_read_unlock();
525 return ret; 523 return ret;
526} 524}
525EXPORT_SYMBOL_GPL(md_raid1_congested);
527 526
527static int raid1_congested(void *data, int bits)
528{
529 mddev_t *mddev = data;
530
531 return mddev_congested(mddev, bits) ||
532 md_raid1_congested(mddev, bits);
533}
528 534
529static void flush_pending_writes(conf_t *conf) 535static void flush_pending_writes(conf_t *conf)
530{ 536{
@@ -1972,6 +1978,8 @@ static int run(mddev_t *mddev)
1972 return PTR_ERR(conf); 1978 return PTR_ERR(conf);
1973 1979
1974 list_for_each_entry(rdev, &mddev->disks, same_set) { 1980 list_for_each_entry(rdev, &mddev->disks, same_set) {
1981 if (!mddev->gendisk)
1982 continue;
1975 disk_stack_limits(mddev->gendisk, rdev->bdev, 1983 disk_stack_limits(mddev->gendisk, rdev->bdev,
1976 rdev->data_offset << 9); 1984 rdev->data_offset << 9);
1977 /* as we don't honour merge_bvec_fn, we must never risk 1985 /* as we don't honour merge_bvec_fn, we must never risk
@@ -2013,8 +2021,10 @@ static int run(mddev_t *mddev)
2013 2021
2014 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2022 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
2015 2023
2016 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2024 if (mddev->queue) {
2017 mddev->queue->backing_dev_info.congested_data = mddev; 2025 mddev->queue->backing_dev_info.congested_fn = raid1_congested;
2026 mddev->queue->backing_dev_info.congested_data = mddev;
2027 }
2018 return md_integrity_register(mddev); 2028 return md_integrity_register(mddev);
2019} 2029}
2020 2030
diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h
index 5fc4ca1af863..e743a64fac4f 100644
--- a/drivers/md/raid1.h
+++ b/drivers/md/raid1.h
@@ -126,4 +126,6 @@ struct r1bio_s {
126 */ 126 */
127#define R1BIO_Returned 6 127#define R1BIO_Returned 6
128 128
129extern int md_raid1_congested(mddev_t *mddev, int bits);
130
129#endif 131#endif
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 346e69bfdab3..b72edf35ec54 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -129,7 +129,7 @@ static inline int raid5_dec_bi_hw_segments(struct bio *bio)
129 129
130static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 130static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt)
131{ 131{
132 bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 16); 132 bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16);
133} 133}
134 134
135/* Find first data disk in a raid6 stripe */ 135/* Find first data disk in a raid6 stripe */
@@ -514,7 +514,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
514 bi = &sh->dev[i].req; 514 bi = &sh->dev[i].req;
515 515
516 bi->bi_rw = rw; 516 bi->bi_rw = rw;
517 if (rw == WRITE) 517 if (rw & WRITE)
518 bi->bi_end_io = raid5_end_write_request; 518 bi->bi_end_io = raid5_end_write_request;
519 else 519 else
520 bi->bi_end_io = raid5_end_read_request; 520 bi->bi_end_io = raid5_end_read_request;
@@ -548,13 +548,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
548 bi->bi_io_vec[0].bv_offset = 0; 548 bi->bi_io_vec[0].bv_offset = 0;
549 bi->bi_size = STRIPE_SIZE; 549 bi->bi_size = STRIPE_SIZE;
550 bi->bi_next = NULL; 550 bi->bi_next = NULL;
551 if (rw == WRITE && 551 if ((rw & WRITE) &&
552 test_bit(R5_ReWrite, &sh->dev[i].flags)) 552 test_bit(R5_ReWrite, &sh->dev[i].flags))
553 atomic_add(STRIPE_SECTORS, 553 atomic_add(STRIPE_SECTORS,
554 &rdev->corrected_errors); 554 &rdev->corrected_errors);
555 generic_make_request(bi); 555 generic_make_request(bi);
556 } else { 556 } else {
557 if (rw == WRITE) 557 if (rw & WRITE)
558 set_bit(STRIPE_DEGRADED, &sh->state); 558 set_bit(STRIPE_DEGRADED, &sh->state);
559 pr_debug("skip op %ld on disc %d for sector %llu\n", 559 pr_debug("skip op %ld on disc %d for sector %llu\n",
560 bi->bi_rw, i, (unsigned long long)sh->sector); 560 bi->bi_rw, i, (unsigned long long)sh->sector);
@@ -585,7 +585,7 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
585 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 585 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
586 586
587 bio_for_each_segment(bvl, bio, i) { 587 bio_for_each_segment(bvl, bio, i) {
588 int len = bio_iovec_idx(bio, i)->bv_len; 588 int len = bvl->bv_len;
589 int clen; 589 int clen;
590 int b_offset = 0; 590 int b_offset = 0;
591 591
@@ -601,8 +601,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
601 clen = len; 601 clen = len;
602 602
603 if (clen > 0) { 603 if (clen > 0) {
604 b_offset += bio_iovec_idx(bio, i)->bv_offset; 604 b_offset += bvl->bv_offset;
605 bio_page = bio_iovec_idx(bio, i)->bv_page; 605 bio_page = bvl->bv_page;
606 if (frombio) 606 if (frombio)
607 tx = async_memcpy(page, bio_page, page_offset, 607 tx = async_memcpy(page, bio_page, page_offset,
608 b_offset, clen, &submit); 608 b_offset, clen, &submit);
@@ -4858,7 +4858,7 @@ static raid5_conf_t *setup_conf(mddev_t *mddev)
4858 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 4858 printk(KERN_INFO "md/raid:%s: device %s operational as raid"
4859 " disk %d\n", 4859 " disk %d\n",
4860 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 4860 mdname(mddev), bdevname(rdev->bdev, b), raid_disk);
4861 } else 4861 } else if (rdev->saved_raid_disk != raid_disk)
4862 /* Cannot rely on bitmap to complete recovery */ 4862 /* Cannot rely on bitmap to complete recovery */
4863 conf->fullsync = 1; 4863 conf->fullsync = 1;
4864 } 4864 }
diff --git a/drivers/media/dvb/dvb-usb/anysee.c b/drivers/media/dvb/dvb-usb/anysee.c
index 4dc1ca333236..7c327b54308e 100644
--- a/drivers/media/dvb/dvb-usb/anysee.c
+++ b/drivers/media/dvb/dvb-usb/anysee.c
@@ -60,8 +60,6 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
60 int act_len, ret; 60 int act_len, ret;
61 u8 buf[64]; 61 u8 buf[64];
62 62
63 if (slen > sizeof(buf))
64 slen = sizeof(buf);
65 memcpy(&buf[0], sbuf, slen); 63 memcpy(&buf[0], sbuf, slen);
66 buf[60] = state->seq++; 64 buf[60] = state->seq++;
67 65
@@ -180,30 +178,37 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
180{ 178{
181 struct dvb_usb_device *d = i2c_get_adapdata(adap); 179 struct dvb_usb_device *d = i2c_get_adapdata(adap);
182 int ret = 0, inc, i = 0; 180 int ret = 0, inc, i = 0;
181 u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */
183 182
184 if (mutex_lock_interruptible(&d->i2c_mutex) < 0) 183 if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
185 return -EAGAIN; 184 return -EAGAIN;
186 185
187 while (i < num) { 186 while (i < num) {
188 if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) { 187 if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
189 u8 buf[6]; 188 if (msg[i].len > 2 || msg[i+1].len > 60) {
189 ret = -EOPNOTSUPP;
190 break;
191 }
190 buf[0] = CMD_I2C_READ; 192 buf[0] = CMD_I2C_READ;
191 buf[1] = (msg[i].addr << 1) | 0x01; 193 buf[1] = (msg[i].addr << 1) | 0x01;
192 buf[2] = msg[i].buf[0]; 194 buf[2] = msg[i].buf[0];
193 buf[3] = msg[i].buf[1]; 195 buf[3] = msg[i].buf[1];
194 buf[4] = msg[i].len-1; 196 buf[4] = msg[i].len-1;
195 buf[5] = msg[i+1].len; 197 buf[5] = msg[i+1].len;
196 ret = anysee_ctrl_msg(d, buf, sizeof(buf), msg[i+1].buf, 198 ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf,
197 msg[i+1].len); 199 msg[i+1].len);
198 inc = 2; 200 inc = 2;
199 } else { 201 } else {
200 u8 buf[4+msg[i].len]; 202 if (msg[i].len > 48) {
203 ret = -EOPNOTSUPP;
204 break;
205 }
201 buf[0] = CMD_I2C_WRITE; 206 buf[0] = CMD_I2C_WRITE;
202 buf[1] = (msg[i].addr << 1); 207 buf[1] = (msg[i].addr << 1);
203 buf[2] = msg[i].len; 208 buf[2] = msg[i].len;
204 buf[3] = 0x01; 209 buf[3] = 0x01;
205 memcpy(&buf[4], msg[i].buf, msg[i].len); 210 memcpy(&buf[4], msg[i].buf, msg[i].len);
206 ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0); 211 ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0);
207 inc = 1; 212 inc = 1;
208 } 213 }
209 if (ret) 214 if (ret)
diff --git a/drivers/media/media-devnode.c b/drivers/media/media-devnode.c
index af5263c6625a..7b42ace419d9 100644
--- a/drivers/media/media-devnode.c
+++ b/drivers/media/media-devnode.c
@@ -213,14 +213,14 @@ int __must_check media_devnode_register(struct media_devnode *mdev)
213 213
214 /* Part 1: Find a free minor number */ 214 /* Part 1: Find a free minor number */
215 mutex_lock(&media_devnode_lock); 215 mutex_lock(&media_devnode_lock);
216 minor = find_next_zero_bit(media_devnode_nums, 0, MEDIA_NUM_DEVICES); 216 minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
217 if (minor == MEDIA_NUM_DEVICES) { 217 if (minor == MEDIA_NUM_DEVICES) {
218 mutex_unlock(&media_devnode_lock); 218 mutex_unlock(&media_devnode_lock);
219 printk(KERN_ERR "could not get a free minor\n"); 219 printk(KERN_ERR "could not get a free minor\n");
220 return -ENFILE; 220 return -ENFILE;
221 } 221 }
222 222
223 set_bit(mdev->minor, media_devnode_nums); 223 set_bit(minor, media_devnode_nums);
224 mutex_unlock(&media_devnode_lock); 224 mutex_unlock(&media_devnode_lock);
225 225
226 mdev->minor = minor; 226 mdev->minor = minor;
diff --git a/drivers/media/video/cx23885/cx23885-cards.c b/drivers/media/video/cx23885/cx23885-cards.c
index 2354336862cf..934185cca758 100644
--- a/drivers/media/video/cx23885/cx23885-cards.c
+++ b/drivers/media/video/cx23885/cx23885-cards.c
@@ -25,8 +25,8 @@
25#include <linux/delay.h> 25#include <linux/delay.h>
26#include <media/cx25840.h> 26#include <media/cx25840.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <staging/altera.h>
29 28
29#include "../../../staging/altera-stapl/altera.h"
30#include "cx23885.h" 30#include "cx23885.h"
31#include "tuner-xc2028.h" 31#include "tuner-xc2028.h"
32#include "netup-init.h" 32#include "netup-init.h"
diff --git a/drivers/media/video/gspca/coarse_expo_autogain.h b/drivers/media/video/gspca/coarse_expo_autogain.h
deleted file mode 100644
index 1cb9d941eaf6..000000000000
--- a/drivers/media/video/gspca/coarse_expo_autogain.h
+++ /dev/null
@@ -1,116 +0,0 @@
1/*
2 * Auto gain algorithm for camera's with a coarse exposure control
3 *
4 * Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21/* Autogain + exposure algorithm for cameras with a coarse exposure control
22 (usually this means we can only control the clockdiv to change exposure)
23 As changing the clockdiv so that the fps drops from 30 to 15 fps for
24 example, will lead to a huge exposure change (it effectively doubles),
25 this algorithm normally tries to only adjust the gain (between 40 and
26 80 %) and if that does not help, only then changes exposure. This leads
27 to a much more stable image then using the knee algorithm which at
28 certain points of the knee graph will only try to adjust exposure,
29 which leads to oscilating as one exposure step is huge.
30
31 Note this assumes that the sd struct for the cam in question has
32 exp_too_high_cnt and exp_too_high_cnt int members for use by this function.
33
34 Returns 0 if no changes were made, 1 if the gain and or exposure settings
35 where changed. */
36static int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
37 int avg_lum, int desired_avg_lum, int deadzone)
38{
39 int i, steps, gain, orig_gain, exposure, orig_exposure;
40 int gain_low, gain_high;
41 const struct ctrl *gain_ctrl = NULL;
42 const struct ctrl *exposure_ctrl = NULL;
43 struct sd *sd = (struct sd *) gspca_dev;
44 int retval = 0;
45
46 for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
47 if (gspca_dev->ctrl_dis & (1 << i))
48 continue;
49 if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN)
50 gain_ctrl = &gspca_dev->sd_desc->ctrls[i];
51 if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE)
52 exposure_ctrl = &gspca_dev->sd_desc->ctrls[i];
53 }
54 if (!gain_ctrl || !exposure_ctrl) {
55 PDEBUG(D_ERR, "Error: gspca_coarse_grained_expo_autogain "
56 "called on cam without gain or exposure");
57 return 0;
58 }
59
60 if (gain_ctrl->get(gspca_dev, &gain) ||
61 exposure_ctrl->get(gspca_dev, &exposure))
62 return 0;
63
64 orig_gain = gain;
65 orig_exposure = exposure;
66 gain_low =
67 (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 2;
68 gain_low += gain_ctrl->qctrl.minimum;
69 gain_high =
70 (gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 4;
71 gain_high += gain_ctrl->qctrl.minimum;
72
73 /* If we are of a multiple of deadzone, do multiple steps to reach the
74 desired lumination fast (with the risc of a slight overshoot) */
75 steps = (desired_avg_lum - avg_lum) / deadzone;
76
77 PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
78 avg_lum, desired_avg_lum, steps);
79
80 if ((gain + steps) > gain_high &&
81 sd->exposure < exposure_ctrl->qctrl.maximum) {
82 gain = gain_high;
83 sd->exp_too_low_cnt++;
84 } else if ((gain + steps) < gain_low &&
85 sd->exposure > exposure_ctrl->qctrl.minimum) {
86 gain = gain_low;
87 sd->exp_too_high_cnt++;
88 } else {
89 gain += steps;
90 if (gain > gain_ctrl->qctrl.maximum)
91 gain = gain_ctrl->qctrl.maximum;
92 else if (gain < gain_ctrl->qctrl.minimum)
93 gain = gain_ctrl->qctrl.minimum;
94 sd->exp_too_high_cnt = 0;
95 sd->exp_too_low_cnt = 0;
96 }
97
98 if (sd->exp_too_high_cnt > 3) {
99 exposure--;
100 sd->exp_too_high_cnt = 0;
101 } else if (sd->exp_too_low_cnt > 3) {
102 exposure++;
103 sd->exp_too_low_cnt = 0;
104 }
105
106 if (gain != orig_gain) {
107 gain_ctrl->set(gspca_dev, gain);
108 retval = 1;
109 }
110 if (exposure != orig_exposure) {
111 exposure_ctrl->set(gspca_dev, exposure);
112 retval = 1;
113 }
114
115 return retval;
116}
diff --git a/drivers/media/video/gspca/ov519.c b/drivers/media/video/gspca/ov519.c
index 36a46fc78734..057e287b9152 100644
--- a/drivers/media/video/gspca/ov519.c
+++ b/drivers/media/video/gspca/ov519.c
@@ -609,7 +609,7 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
609 * buffers, there are some pretty strict real time constraints for 609 * buffers, there are some pretty strict real time constraints for
610 * isochronous transfer for larger frame sizes). 610 * isochronous transfer for larger frame sizes).
611 */ 611 */
612/*jfm: this value works well for 1600x1200, but not 800x600 - see isoc_init */ 612/*jfm: this value does not work for 800x600 - see isoc_init */
613#define OVFX2_BULK_SIZE (13 * 4096) 613#define OVFX2_BULK_SIZE (13 * 4096)
614 614
615/* I2C registers */ 615/* I2C registers */
@@ -3307,6 +3307,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
3307 3307
3308 gspca_dev->cam.ctrls = sd->ctrls; 3308 gspca_dev->cam.ctrls = sd->ctrls;
3309 sd->quality = QUALITY_DEF; 3309 sd->quality = QUALITY_DEF;
3310 sd->frame_rate = 15;
3310 3311
3311 return 0; 3312 return 0;
3312} 3313}
@@ -3469,7 +3470,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
3469 ARRAY_SIZE(init_519_ov7660)); 3470 ARRAY_SIZE(init_519_ov7660));
3470 write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660)); 3471 write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660));
3471 sd->gspca_dev.curr_mode = 1; /* 640x480 */ 3472 sd->gspca_dev.curr_mode = 1; /* 640x480 */
3472 sd->frame_rate = 15;
3473 ov519_set_mode(sd); 3473 ov519_set_mode(sd);
3474 ov519_set_fr(sd); 3474 ov519_set_fr(sd);
3475 sd->ctrls[COLORS].max = 4; /* 0..4 */ 3475 sd->ctrls[COLORS].max = 4; /* 0..4 */
@@ -3511,7 +3511,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
3511 3511
3512 switch (sd->bridge) { 3512 switch (sd->bridge) {
3513 case BRIDGE_OVFX2: 3513 case BRIDGE_OVFX2:
3514 if (gspca_dev->width == 1600) 3514 if (gspca_dev->width != 800)
3515 gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE; 3515 gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
3516 else 3516 else
3517 gspca_dev->cam.bulk_size = 7 * 4096; 3517 gspca_dev->cam.bulk_size = 7 * 4096;
@@ -4478,7 +4478,7 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
4478 gspca_frame_add(gspca_dev, INTER_PACKET, data, len); 4478 gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
4479 4479
4480 /* A short read signals EOF */ 4480 /* A short read signals EOF */
4481 if (len < OVFX2_BULK_SIZE) { 4481 if (len < gspca_dev->cam.bulk_size) {
4482 /* If the frame is short, and it is one of the first ones 4482 /* If the frame is short, and it is one of the first ones
4483 the sensor and bridge are still syncing, so drop it. */ 4483 the sensor and bridge are still syncing, so drop it. */
4484 if (sd->first_frame) { 4484 if (sd->first_frame) {
diff --git a/drivers/media/video/gspca/sonixj.c b/drivers/media/video/gspca/sonixj.c
index 6415aff5cbd1..81b8a600783b 100644
--- a/drivers/media/video/gspca/sonixj.c
+++ b/drivers/media/video/gspca/sonixj.c
@@ -60,7 +60,7 @@ struct sd {
60 60
61 u32 pktsz; /* (used by pkt_scan) */ 61 u32 pktsz; /* (used by pkt_scan) */
62 u16 npkt; 62 u16 npkt;
63 u8 nchg; 63 s8 nchg;
64 s8 short_mark; 64 s8 short_mark;
65 65
66 u8 quality; /* image quality */ 66 u8 quality; /* image quality */
diff --git a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
index b538dce96f78..a14a84a5079b 100644
--- a/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
+++ b/drivers/media/video/gspca/stv06xx/stv06xx_hdcs.h
@@ -125,7 +125,7 @@
125#define HDCS_SLEEP_MODE (1 << 1) 125#define HDCS_SLEEP_MODE (1 << 1)
126 126
127#define HDCS_DEFAULT_EXPOSURE 48 127#define HDCS_DEFAULT_EXPOSURE 48
128#define HDCS_DEFAULT_GAIN 128 128#define HDCS_DEFAULT_GAIN 50
129 129
130static int hdcs_probe_1x00(struct sd *sd); 130static int hdcs_probe_1x00(struct sd *sd);
131static int hdcs_probe_1020(struct sd *sd); 131static int hdcs_probe_1020(struct sd *sd);
diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
index a4e4dfdbc2f2..0fb75524484d 100644
--- a/drivers/media/video/ivtv/ivtv-driver.c
+++ b/drivers/media/video/ivtv/ivtv-driver.c
@@ -1328,6 +1328,8 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1328 if (!itv->has_cx23415) 1328 if (!itv->has_cx23415)
1329 write_reg_sync(0x03, IVTV_REG_DMACONTROL); 1329 write_reg_sync(0x03, IVTV_REG_DMACONTROL);
1330 1330
1331 ivtv_s_std_enc(itv, &itv->tuner_std);
1332
1331 /* Default interrupts enabled. For the PVR350 this includes the 1333 /* Default interrupts enabled. For the PVR350 this includes the
1332 decoder VSYNC interrupt, which is always on. It is not only used 1334 decoder VSYNC interrupt, which is always on. It is not only used
1333 during decoding but also by the OSD. 1335 during decoding but also by the OSD.
@@ -1336,12 +1338,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
1336 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1338 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
1337 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC); 1339 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC);
1338 ivtv_set_osd_alpha(itv); 1340 ivtv_set_osd_alpha(itv);
1339 } 1341 ivtv_s_std_dec(itv, &itv->tuner_std);
1340 else 1342 } else {
1341 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT); 1343 ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
1342 1344 }
1343 /* For cards with video out, this call needs interrupts enabled */
1344 ivtv_s_std(NULL, &fh, &itv->tuner_std);
1345 1345
1346 /* Setup initial controls */ 1346 /* Setup initial controls */
1347 cx2341x_handler_setup(&itv->cxhdl); 1347 cx2341x_handler_setup(&itv->cxhdl);
diff --git a/drivers/media/video/ivtv/ivtv-firmware.c b/drivers/media/video/ivtv/ivtv-firmware.c
index 14a1cea1d70d..02c5adebf517 100644
--- a/drivers/media/video/ivtv/ivtv-firmware.c
+++ b/drivers/media/video/ivtv/ivtv-firmware.c
@@ -280,8 +280,6 @@ int ivtv_firmware_restart(struct ivtv *itv)
280{ 280{
281 int rc = 0; 281 int rc = 0;
282 v4l2_std_id std; 282 v4l2_std_id std;
283 struct ivtv_open_id fh;
284 fh.itv = itv;
285 283
286 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) 284 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
287 /* Display test image during restart */ 285 /* Display test image during restart */
@@ -301,14 +299,19 @@ int ivtv_firmware_restart(struct ivtv *itv)
301 /* Allow settings to reload */ 299 /* Allow settings to reload */
302 ivtv_mailbox_cache_invalidate(itv); 300 ivtv_mailbox_cache_invalidate(itv);
303 301
304 /* Restore video standard */ 302 /* Restore encoder video standard */
305 std = itv->std; 303 std = itv->std;
306 itv->std = 0; 304 itv->std = 0;
307 ivtv_s_std(NULL, &fh, &std); 305 ivtv_s_std_enc(itv, &std);
308 306
309 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 307 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
310 ivtv_init_mpeg_decoder(itv); 308 ivtv_init_mpeg_decoder(itv);
311 309
310 /* Restore decoder video standard */
311 std = itv->std_out;
312 itv->std_out = 0;
313 ivtv_s_std_dec(itv, &std);
314
312 /* Restore framebuffer if active */ 315 /* Restore framebuffer if active */
313 if (itv->ivtvfb_restore) 316 if (itv->ivtvfb_restore)
314 itv->ivtvfb_restore(itv); 317 itv->ivtvfb_restore(itv);
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.c b/drivers/media/video/ivtv/ivtv-ioctl.c
index 1689783cd19a..f9e347dae739 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.c
+++ b/drivers/media/video/ivtv/ivtv-ioctl.c
@@ -1071,28 +1071,8 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
1071 return 0; 1071 return 0;
1072} 1072}
1073 1073
1074int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std) 1074void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std)
1075{ 1075{
1076 DEFINE_WAIT(wait);
1077 struct ivtv *itv = fh2id(fh)->itv;
1078 struct yuv_playback_info *yi = &itv->yuv_info;
1079 int f;
1080
1081 if ((*std & V4L2_STD_ALL) == 0)
1082 return -EINVAL;
1083
1084 if (*std == itv->std)
1085 return 0;
1086
1087 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
1088 atomic_read(&itv->capturing) > 0 ||
1089 atomic_read(&itv->decoding) > 0) {
1090 /* Switching standard would turn off the radio or mess
1091 with already running streams, prevent that by
1092 returning EBUSY. */
1093 return -EBUSY;
1094 }
1095
1096 itv->std = *std; 1076 itv->std = *std;
1097 itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0; 1077 itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
1098 itv->is_50hz = !itv->is_60hz; 1078 itv->is_50hz = !itv->is_60hz;
@@ -1106,48 +1086,79 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
1106 if (itv->hw_flags & IVTV_HW_CX25840) 1086 if (itv->hw_flags & IVTV_HW_CX25840)
1107 itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284; 1087 itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284;
1108 1088
1109 IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std);
1110
1111 /* Tuner */ 1089 /* Tuner */
1112 ivtv_call_all(itv, core, s_std, itv->std); 1090 ivtv_call_all(itv, core, s_std, itv->std);
1091}
1113 1092
1114 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) { 1093void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
1115 /* set display standard */ 1094{
1116 itv->std_out = *std; 1095 struct yuv_playback_info *yi = &itv->yuv_info;
1117 itv->is_out_60hz = itv->is_60hz; 1096 DEFINE_WAIT(wait);
1118 itv->is_out_50hz = itv->is_50hz; 1097 int f;
1119 ivtv_call_all(itv, video, s_std_output, itv->std_out); 1098
1120 1099 /* set display standard */
1121 /* 1100 itv->std_out = *std;
1122 * The next firmware call is time sensitive. Time it to 1101 itv->is_out_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
1123 * avoid risk of a hard lock, by trying to ensure the call 1102 itv->is_out_50hz = !itv->is_out_60hz;
1124 * happens within the first 100 lines of the top field. 1103 ivtv_call_all(itv, video, s_std_output, itv->std_out);
1125 * Make 4 attempts to sync to the decoder before giving up. 1104
1126 */ 1105 /*
1127 for (f = 0; f < 4; f++) { 1106 * The next firmware call is time sensitive. Time it to
1128 prepare_to_wait(&itv->vsync_waitq, &wait, 1107 * avoid risk of a hard lock, by trying to ensure the call
1129 TASK_UNINTERRUPTIBLE); 1108 * happens within the first 100 lines of the top field.
1130 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) 1109 * Make 4 attempts to sync to the decoder before giving up.
1131 break; 1110 */
1132 schedule_timeout(msecs_to_jiffies(25)); 1111 for (f = 0; f < 4; f++) {
1133 } 1112 prepare_to_wait(&itv->vsync_waitq, &wait,
1134 finish_wait(&itv->vsync_waitq, &wait); 1113 TASK_UNINTERRUPTIBLE);
1135 1114 if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
1136 if (f == 4) 1115 break;
1137 IVTV_WARN("Mode change failed to sync to decoder\n"); 1116 schedule_timeout(msecs_to_jiffies(25));
1138
1139 ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
1140 itv->main_rect.left = itv->main_rect.top = 0;
1141 itv->main_rect.width = 720;
1142 itv->main_rect.height = itv->cxhdl.height;
1143 ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
1144 720, itv->main_rect.height, 0, 0);
1145 yi->main_rect = itv->main_rect;
1146 if (!itv->osd_info) {
1147 yi->osd_full_w = 720;
1148 yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
1149 }
1150 } 1117 }
1118 finish_wait(&itv->vsync_waitq, &wait);
1119
1120 if (f == 4)
1121 IVTV_WARN("Mode change failed to sync to decoder\n");
1122
1123 ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
1124 itv->main_rect.left = 0;
1125 itv->main_rect.top = 0;
1126 itv->main_rect.width = 720;
1127 itv->main_rect.height = itv->is_out_50hz ? 576 : 480;
1128 ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
1129 720, itv->main_rect.height, 0, 0);
1130 yi->main_rect = itv->main_rect;
1131 if (!itv->osd_info) {
1132 yi->osd_full_w = 720;
1133 yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
1134 }
1135}
1136
1137int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
1138{
1139 struct ivtv *itv = fh2id(fh)->itv;
1140
1141 if ((*std & V4L2_STD_ALL) == 0)
1142 return -EINVAL;
1143
1144 if (*std == itv->std)
1145 return 0;
1146
1147 if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
1148 atomic_read(&itv->capturing) > 0 ||
1149 atomic_read(&itv->decoding) > 0) {
1150 /* Switching standard would mess with already running
1151 streams, prevent that by returning EBUSY. */
1152 return -EBUSY;
1153 }
1154
1155 IVTV_DEBUG_INFO("Switching standard to %llx.\n",
1156 (unsigned long long)itv->std);
1157
1158 ivtv_s_std_enc(itv, std);
1159 if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
1160 ivtv_s_std_dec(itv, std);
1161
1151 return 0; 1162 return 0;
1152} 1163}
1153 1164
diff --git a/drivers/media/video/ivtv/ivtv-ioctl.h b/drivers/media/video/ivtv/ivtv-ioctl.h
index 58f003412afd..89185caeafae 100644
--- a/drivers/media/video/ivtv/ivtv-ioctl.h
+++ b/drivers/media/video/ivtv/ivtv-ioctl.h
@@ -27,7 +27,8 @@ u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
27void ivtv_set_osd_alpha(struct ivtv *itv); 27void ivtv_set_osd_alpha(struct ivtv *itv);
28int ivtv_set_speed(struct ivtv *itv, int speed); 28int ivtv_set_speed(struct ivtv *itv, int speed);
29void ivtv_set_funcs(struct video_device *vdev); 29void ivtv_set_funcs(struct video_device *vdev);
30int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std); 30void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std);
31void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std);
31int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf); 32int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
32int ivtv_s_input(struct file *file, void *fh, unsigned int inp); 33int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
33long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); 34long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
diff --git a/drivers/media/video/ivtv/ivtv-streams.c b/drivers/media/video/ivtv/ivtv-streams.c
index 942683336555..e7794dc1330e 100644
--- a/drivers/media/video/ivtv/ivtv-streams.c
+++ b/drivers/media/video/ivtv/ivtv-streams.c
@@ -589,7 +589,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
589 v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1); 589 v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
590 /* Avoid unpredictable PCI bus hang - disable video clocks */ 590 /* Avoid unpredictable PCI bus hang - disable video clocks */
591 v4l2_subdev_call(itv->sd_video, video, s_stream, 0); 591 v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
592 ivtv_msleep_timeout(300, 1); 592 ivtv_msleep_timeout(300, 0);
593 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0); 593 ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
594 v4l2_subdev_call(itv->sd_video, video, s_stream, 1); 594 v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
595 } 595 }
@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
834 } 834 }
835 835
836 /* Handle any pending interrupts */ 836 /* Handle any pending interrupts */
837 ivtv_msleep_timeout(100, 1); 837 ivtv_msleep_timeout(100, 0);
838 } 838 }
839 839
840 atomic_dec(&itv->capturing); 840 atomic_dec(&itv->capturing);
diff --git a/drivers/media/video/ivtv/ivtv-vbi.c b/drivers/media/video/ivtv/ivtv-vbi.c
index b6eb51ce7735..293db806d936 100644
--- a/drivers/media/video/ivtv/ivtv-vbi.c
+++ b/drivers/media/video/ivtv/ivtv-vbi.c
@@ -71,7 +71,7 @@ static void ivtv_set_wss(struct ivtv *itv, int enabled, int mode)
71 Turning this signal on and off can confuse certain 71 Turning this signal on and off can confuse certain
72 TVs. As far as I can tell there is no reason not to 72 TVs. As far as I can tell there is no reason not to
73 transmit this signal. */ 73 transmit this signal. */
74 if ((itv->std & V4L2_STD_625_50) && !enabled) { 74 if ((itv->std_out & V4L2_STD_625_50) && !enabled) {
75 enabled = 1; 75 enabled = 1;
76 mode = 0x08; /* 4x3 full format */ 76 mode = 0x08; /* 4x3 full format */
77 } 77 }
diff --git a/drivers/media/video/ivtv/ivtvfb.c b/drivers/media/video/ivtv/ivtvfb.c
index 17247451c693..6b7c9c823330 100644
--- a/drivers/media/video/ivtv/ivtvfb.c
+++ b/drivers/media/video/ivtv/ivtvfb.c
@@ -247,7 +247,7 @@ static int ivtvfb_set_osd_coords(struct ivtv *itv, const struct ivtv_osd_coords
247 247
248static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window) 248static int ivtvfb_set_display_window(struct ivtv *itv, struct v4l2_rect *ivtv_window)
249{ 249{
250 int osd_height_limit = itv->is_50hz ? 576 : 480; 250 int osd_height_limit = itv->is_out_50hz ? 576 : 480;
251 251
252 /* Only fail if resolution too high, otherwise fudge the start coords. */ 252 /* Only fail if resolution too high, otherwise fudge the start coords. */
253 if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH)) 253 if ((ivtv_window->height > osd_height_limit) || (ivtv_window->width > IVTV_OSD_MAX_WIDTH))
@@ -471,9 +471,9 @@ static int ivtvfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long ar
471 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT | 471 vblank.flags = FB_VBLANK_HAVE_COUNT |FB_VBLANK_HAVE_VCOUNT |
472 FB_VBLANK_HAVE_VSYNC; 472 FB_VBLANK_HAVE_VSYNC;
473 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16; 473 trace = read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16;
474 if (itv->is_50hz && trace > 312) 474 if (itv->is_out_50hz && trace > 312)
475 trace -= 312; 475 trace -= 312;
476 else if (itv->is_60hz && trace > 262) 476 else if (itv->is_out_60hz && trace > 262)
477 trace -= 262; 477 trace -= 262;
478 if (trace == 1) 478 if (trace == 1)
479 vblank.flags |= FB_VBLANK_VSYNCING; 479 vblank.flags |= FB_VBLANK_VSYNCING;
@@ -656,7 +656,7 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
656 IVTVFB_DEBUG_INFO("ivtvfb_check_var\n"); 656 IVTVFB_DEBUG_INFO("ivtvfb_check_var\n");
657 657
658 /* Set base references for mode calcs. */ 658 /* Set base references for mode calcs. */
659 if (itv->is_50hz) { 659 if (itv->is_out_50hz) {
660 pixclock = 84316; 660 pixclock = 84316;
661 hlimit = 776; 661 hlimit = 776;
662 vlimit = 591; 662 vlimit = 591;
@@ -784,12 +784,12 @@ static int _ivtvfb_check_var(struct fb_var_screeninfo *var, struct ivtv *itv)
784 If the margins are too large, just center the screen 784 If the margins are too large, just center the screen
785 (enforcing margins causes too many problems) */ 785 (enforcing margins causes too many problems) */
786 786
787 if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1) { 787 if (var->left_margin + var->xres > IVTV_OSD_MAX_WIDTH + 1)
788 var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2); 788 var->left_margin = 1 + ((IVTV_OSD_MAX_WIDTH - var->xres) / 2);
789 } 789
790 if (var->upper_margin + var->yres > (itv->is_50hz ? 577 : 481)) { 790 if (var->upper_margin + var->yres > (itv->is_out_50hz ? 577 : 481))
791 var->upper_margin = 1 + (((itv->is_50hz ? 576 : 480) - var->yres) / 2); 791 var->upper_margin = 1 + (((itv->is_out_50hz ? 576 : 480) -
792 } 792 var->yres) / 2);
793 793
794 /* Maintain overall 'size' for a constant refresh rate */ 794 /* Maintain overall 'size' for a constant refresh rate */
795 var->right_margin = hlimit - var->left_margin - var->xres; 795 var->right_margin = hlimit - var->left_margin - var->xres;
@@ -836,7 +836,12 @@ static int ivtvfb_pan_display(struct fb_var_screeninfo *var, struct fb_info *inf
836 u32 osd_pan_index; 836 u32 osd_pan_index;
837 struct ivtv *itv = (struct ivtv *) info->par; 837 struct ivtv *itv = (struct ivtv *) info->par;
838 838
839 osd_pan_index = (var->xoffset + (var->yoffset * var->xres_virtual))*var->bits_per_pixel/8; 839 if (var->yoffset + info->var.yres > info->var.yres_virtual ||
840 var->xoffset + info->var.xres > info->var.xres_virtual)
841 return -EINVAL;
842
843 osd_pan_index = var->yoffset * info->fix.line_length
844 + var->xoffset * info->var.bits_per_pixel / 8;
840 write_reg(osd_pan_index, 0x02A0C); 845 write_reg(osd_pan_index, 0x02A0C);
841 846
842 /* Pass this info back the yuv handler */ 847 /* Pass this info back the yuv handler */
@@ -1003,19 +1008,21 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
1003 /* Hardware coords start at 0, user coords start at 1. */ 1008 /* Hardware coords start at 0, user coords start at 1. */
1004 osd_left--; 1009 osd_left--;
1005 1010
1006 start_window.left = osd_left >= 0 ? osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2); 1011 start_window.left = osd_left >= 0 ?
1012 osd_left : ((IVTV_OSD_MAX_WIDTH - start_window.width) / 2);
1007 1013
1008 oi->display_byte_stride = 1014 oi->display_byte_stride =
1009 start_window.width * oi->bytes_per_pixel; 1015 start_window.width * oi->bytes_per_pixel;
1010 1016
1011 /* Vertical size & position */ 1017 /* Vertical size & position */
1012 1018
1013 max_height = itv->is_50hz ? 576 : 480; 1019 max_height = itv->is_out_50hz ? 576 : 480;
1014 1020
1015 if (osd_yres > max_height) 1021 if (osd_yres > max_height)
1016 osd_yres = max_height; 1022 osd_yres = max_height;
1017 1023
1018 start_window.height = osd_yres ? osd_yres : itv->is_50hz ? 480 : 400; 1024 start_window.height = osd_yres ?
1025 osd_yres : itv->is_out_50hz ? 480 : 400;
1019 1026
1020 /* Check vertical start (osd_upper). */ 1027 /* Check vertical start (osd_upper). */
1021 if (osd_upper + start_window.height > max_height + 1) { 1028 if (osd_upper + start_window.height > max_height + 1) {
diff --git a/drivers/media/video/omap3isp/isp.c b/drivers/media/video/omap3isp/isp.c
index 472a69359e60..c9fd04ee70a8 100644
--- a/drivers/media/video/omap3isp/isp.c
+++ b/drivers/media/video/omap3isp/isp.c
@@ -391,7 +391,7 @@ static inline void isp_isr_dbg(struct isp_device *isp, u32 irqstatus)
391 }; 391 };
392 int i; 392 int i;
393 393
394 dev_dbg(isp->dev, ""); 394 dev_dbg(isp->dev, "ISP IRQ: ");
395 395
396 for (i = 0; i < ARRAY_SIZE(name); i++) { 396 for (i = 0; i < ARRAY_SIZE(name); i++) {
397 if ((1 << i) & irqstatus) 397 if ((1 << i) & irqstatus)
diff --git a/drivers/media/video/soc_camera.c b/drivers/media/video/soc_camera.c
index 398864370267..4e4d4122d9a6 100644
--- a/drivers/media/video/soc_camera.c
+++ b/drivers/media/video/soc_camera.c
@@ -1512,7 +1512,7 @@ static int video_dev_create(struct soc_camera_device *icd)
1512 */ 1512 */
1513static int soc_camera_video_start(struct soc_camera_device *icd) 1513static int soc_camera_video_start(struct soc_camera_device *icd)
1514{ 1514{
1515 struct device_type *type = icd->vdev->dev.type; 1515 const struct device_type *type = icd->vdev->dev.type;
1516 int ret; 1516 int ret;
1517 1517
1518 if (!icd->dev.parent) 1518 if (!icd->dev.parent)
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c
index ede7852bb1df..c3ab0c813be2 100644
--- a/drivers/media/video/uvc/uvc_entity.c
+++ b/drivers/media/video/uvc/uvc_entity.c
@@ -30,7 +30,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain,
30 struct uvc_entity *remote; 30 struct uvc_entity *remote;
31 unsigned int i; 31 unsigned int i;
32 u8 remote_pad; 32 u8 remote_pad;
33 int ret; 33 int ret = 0;
34 34
35 for (i = 0; i < entity->num_pads; ++i) { 35 for (i = 0; i < entity->num_pads; ++i) {
36 struct media_entity *source; 36 struct media_entity *source;
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index 200311fea369..e2a52e5cf449 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -609,6 +609,7 @@ static int apds990x_detect(struct apds990x_chip *chip)
609 return ret; 609 return ret;
610} 610}
611 611
612#if defined(CONFIG_PM) || defined(CONFIG_PM_RUNTIME)
612static int apds990x_chip_on(struct apds990x_chip *chip) 613static int apds990x_chip_on(struct apds990x_chip *chip)
613{ 614{
614 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs), 615 int err = regulator_bulk_enable(ARRAY_SIZE(chip->regs),
@@ -624,6 +625,7 @@ static int apds990x_chip_on(struct apds990x_chip *chip)
624 apds990x_mode_on(chip); 625 apds990x_mode_on(chip);
625 return 0; 626 return 0;
626} 627}
628#endif
627 629
628static int apds990x_chip_off(struct apds990x_chip *chip) 630static int apds990x_chip_off(struct apds990x_chip *chip)
629{ 631{
diff --git a/drivers/misc/cs5535-mfgpt.c b/drivers/misc/cs5535-mfgpt.c
index e01e08c8c88b..bc685bfc4c33 100644
--- a/drivers/misc/cs5535-mfgpt.c
+++ b/drivers/misc/cs5535-mfgpt.c
@@ -174,7 +174,7 @@ struct cs5535_mfgpt_timer *cs5535_mfgpt_alloc_timer(int timer_nr, int domain)
174 timer_nr = t < max ? (int) t : -1; 174 timer_nr = t < max ? (int) t : -1;
175 } else { 175 } else {
176 /* check if the requested timer's available */ 176 /* check if the requested timer's available */
177 if (test_bit(timer_nr, mfgpt->avail)) 177 if (!test_bit(timer_nr, mfgpt->avail))
178 timer_nr = -1; 178 timer_nr = -1;
179 } 179 }
180 180
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index ee5109a3cd98..42f067347bc7 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -495,14 +495,14 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
495 } 495 }
496 } 496 }
497 497
498 dev->stats.tx_packets++;
499 dev->stats.tx_bytes += skb->len;
500
498 if (atomic_dec_return(&queued_msg->use_count) == 0) { 501 if (atomic_dec_return(&queued_msg->use_count) == 0) {
499 dev_kfree_skb(skb); 502 dev_kfree_skb(skb);
500 kfree(queued_msg); 503 kfree(queued_msg);
501 } 504 }
502 505
503 dev->stats.tx_packets++;
504 dev->stats.tx_bytes += skb->len;
505
506 return NETDEV_TX_OK; 506 return NETDEV_TX_OK;
507} 507}
508 508
diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c
index 7aded90f9daa..cfbddbef11de 100644
--- a/drivers/misc/spear13xx_pcie_gadget.c
+++ b/drivers/misc/spear13xx_pcie_gadget.c
@@ -845,7 +845,7 @@ err_iounmap:
845err_iounmap_app: 845err_iounmap_app:
846 iounmap(config->va_app_base); 846 iounmap(config->va_app_base);
847err_kzalloc: 847err_kzalloc:
848 kfree(config); 848 kfree(target);
849err_rel_res: 849err_rel_res:
850 release_mem_region(res1->start, resource_size(res1)); 850 release_mem_region(res1->start, resource_size(res1));
851err_rel_res0: 851err_rel_res0:
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 5da5bea0f9f0..7721de942c69 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1144,9 +1144,17 @@ static int __devinit mmci_probe(struct amba_device *dev,
1144 else if (ret != -ENOSYS) 1144 else if (ret != -ENOSYS)
1145 goto err_gpio_cd; 1145 goto err_gpio_cd;
1146 1146
1147 /*
1148 * A gpio pin that will detect cards when inserted and removed
1149 * will most likely want to trigger on the edges if it is
1150 * 0 when ejected and 1 when inserted (or mutatis mutandis
1151 * for the inverted case) so we request triggers on both
1152 * edges.
1153 */
1147 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), 1154 ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
1148 mmci_cd_irq, 0, 1155 mmci_cd_irq,
1149 DRIVER_NAME " (cd)", host); 1156 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1157 DRIVER_NAME " (cd)", host);
1150 if (ret >= 0) 1158 if (ret >= 0)
1151 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1159 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1152 } 1160 }
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 259ece047afc..5b2e2155b413 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -435,6 +435,9 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
435 reg = regulator_get(host->dev, "vmmc_aux"); 435 reg = regulator_get(host->dev, "vmmc_aux");
436 host->vcc_aux = IS_ERR(reg) ? NULL : reg; 436 host->vcc_aux = IS_ERR(reg) ? NULL : reg;
437 437
438 /* For eMMC do not power off when not in sleep state */
439 if (mmc_slot(host).no_regulator_off_init)
440 return 0;
438 /* 441 /*
439 * UGLY HACK: workaround regulator framework bugs. 442 * UGLY HACK: workaround regulator framework bugs.
440 * When the bootloader leaves a supply active, it's 443 * When the bootloader leaves a supply active, it's
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 554a26c55188..84e68f1b9adf 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -413,7 +413,7 @@ el2_open(struct net_device *dev)
413 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); 413 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
414 outb_p(0x00, E33G_IDCFR); 414 outb_p(0x00, E33G_IDCFR);
415 msleep(1); 415 msleep(1);
416 free_irq(*irqp, el2_probe_interrupt); 416 free_irq(*irqp, &seen);
417 if (!seen) 417 if (!seen)
418 continue; 418 continue;
419 419
@@ -423,6 +423,7 @@ el2_open(struct net_device *dev)
423 continue; 423 continue;
424 if (retval < 0) 424 if (retval < 0)
425 goto err_disable; 425 goto err_disable;
426 break;
426 } while (*++irqp); 427 } while (*++irqp);
427 428
428 if (*irqp == 0) { 429 if (*irqp == 0) {
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 0c9217f48b72..7b3e23f38913 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -50,7 +50,7 @@ static const char version[] =
50#ifdef __arm__ 50#ifdef __arm__
51static void write_rreg(u_long base, u_int reg, u_int val) 51static void write_rreg(u_long base, u_int reg, u_int val)
52{ 52{
53 __asm__( 53 asm volatile(
54 "str%?h %1, [%2] @ NET_RAP\n\t" 54 "str%?h %1, [%2] @ NET_RAP\n\t"
55 "str%?h %0, [%2, #-4] @ NET_RDP" 55 "str%?h %0, [%2, #-4] @ NET_RDP"
56 : 56 :
@@ -60,7 +60,7 @@ static void write_rreg(u_long base, u_int reg, u_int val)
60static inline unsigned short read_rreg(u_long base_addr, u_int reg) 60static inline unsigned short read_rreg(u_long base_addr, u_int reg)
61{ 61{
62 unsigned short v; 62 unsigned short v;
63 __asm__( 63 asm volatile(
64 "str%?h %1, [%2] @ NET_RAP\n\t" 64 "str%?h %1, [%2] @ NET_RAP\n\t"
65 "ldr%?h %0, [%2, #-4] @ NET_RDP" 65 "ldr%?h %0, [%2, #-4] @ NET_RDP"
66 : "=r" (v) 66 : "=r" (v)
@@ -70,7 +70,7 @@ static inline unsigned short read_rreg(u_long base_addr, u_int reg)
70 70
71static inline void write_ireg(u_long base, u_int reg, u_int val) 71static inline void write_ireg(u_long base, u_int reg, u_int val)
72{ 72{
73 __asm__( 73 asm volatile(
74 "str%?h %1, [%2] @ NET_RAP\n\t" 74 "str%?h %1, [%2] @ NET_RAP\n\t"
75 "str%?h %0, [%2, #8] @ NET_IDP" 75 "str%?h %0, [%2, #8] @ NET_IDP"
76 : 76 :
@@ -80,7 +80,7 @@ static inline void write_ireg(u_long base, u_int reg, u_int val)
80static inline unsigned short read_ireg(u_long base_addr, u_int reg) 80static inline unsigned short read_ireg(u_long base_addr, u_int reg)
81{ 81{
82 u_short v; 82 u_short v;
83 __asm__( 83 asm volatile(
84 "str%?h %1, [%2] @ NAT_RAP\n\t" 84 "str%?h %1, [%2] @ NAT_RAP\n\t"
85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t" 85 "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
86 : "=r" (v) 86 : "=r" (v)
@@ -91,47 +91,48 @@ static inline unsigned short read_ireg(u_long base_addr, u_int reg)
91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1)) 91#define am_writeword(dev,off,val) __raw_writew(val, ISAMEM_BASE + ((off) << 1))
92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1)) 92#define am_readword(dev,off) __raw_readw(ISAMEM_BASE + ((off) << 1))
93 93
94static inline void 94static void
95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) 95am_writebuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
96{ 96{
97 offset = ISAMEM_BASE + (offset << 1); 97 offset = ISAMEM_BASE + (offset << 1);
98 length = (length + 1) & ~1; 98 length = (length + 1) & ~1;
99 if ((int)buf & 2) { 99 if ((int)buf & 2) {
100 __asm__ __volatile__("str%?h %2, [%0], #4" 100 asm volatile("str%?h %2, [%0], #4"
101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 101 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
102 buf += 2; 102 buf += 2;
103 length -= 2; 103 length -= 2;
104 } 104 }
105 while (length > 8) { 105 while (length > 8) {
106 unsigned int tmp, tmp2; 106 register unsigned int tmp asm("r2"), tmp2 asm("r3");
107 __asm__ __volatile__( 107 asm volatile(
108 "ldm%?ia %1!, {%2, %3}\n\t" 108 "ldm%?ia %0!, {%1, %2}"
109 : "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
110 length -= 8;
111 asm volatile(
112 "str%?h %1, [%0], #4\n\t"
113 "mov%? %1, %1, lsr #16\n\t"
114 "str%?h %1, [%0], #4\n\t"
109 "str%?h %2, [%0], #4\n\t" 115 "str%?h %2, [%0], #4\n\t"
110 "mov%? %2, %2, lsr #16\n\t" 116 "mov%? %2, %2, lsr #16\n\t"
111 "str%?h %2, [%0], #4\n\t" 117 "str%?h %2, [%0], #4"
112 "str%?h %3, [%0], #4\n\t" 118 : "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
113 "mov%? %3, %3, lsr #16\n\t"
114 "str%?h %3, [%0], #4"
115 : "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2)
116 : "0" (offset), "1" (buf));
117 length -= 8;
118 } 119 }
119 while (length > 0) { 120 while (length > 0) {
120 __asm__ __volatile__("str%?h %2, [%0], #4" 121 asm volatile("str%?h %2, [%0], #4"
121 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8))); 122 : "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
122 buf += 2; 123 buf += 2;
123 length -= 2; 124 length -= 2;
124 } 125 }
125} 126}
126 127
127static inline void 128static void
128am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length) 129am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned int length)
129{ 130{
130 offset = ISAMEM_BASE + (offset << 1); 131 offset = ISAMEM_BASE + (offset << 1);
131 length = (length + 1) & ~1; 132 length = (length + 1) & ~1;
132 if ((int)buf & 2) { 133 if ((int)buf & 2) {
133 unsigned int tmp; 134 unsigned int tmp;
134 __asm__ __volatile__( 135 asm volatile(
135 "ldr%?h %2, [%0], #4\n\t" 136 "ldr%?h %2, [%0], #4\n\t"
136 "str%?b %2, [%1], #1\n\t" 137 "str%?b %2, [%1], #1\n\t"
137 "mov%? %2, %2, lsr #8\n\t" 138 "mov%? %2, %2, lsr #8\n\t"
@@ -140,12 +141,12 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
140 length -= 2; 141 length -= 2;
141 } 142 }
142 while (length > 8) { 143 while (length > 8) {
143 unsigned int tmp, tmp2, tmp3; 144 register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
144 __asm__ __volatile__( 145 asm volatile(
145 "ldr%?h %2, [%0], #4\n\t" 146 "ldr%?h %2, [%0], #4\n\t"
147 "ldr%?h %4, [%0], #4\n\t"
146 "ldr%?h %3, [%0], #4\n\t" 148 "ldr%?h %3, [%0], #4\n\t"
147 "orr%? %2, %2, %3, lsl #16\n\t" 149 "orr%? %2, %2, %4, lsl #16\n\t"
148 "ldr%?h %3, [%0], #4\n\t"
149 "ldr%?h %4, [%0], #4\n\t" 150 "ldr%?h %4, [%0], #4\n\t"
150 "orr%? %3, %3, %4, lsl #16\n\t" 151 "orr%? %3, %3, %4, lsl #16\n\t"
151 "stm%?ia %1!, {%2, %3}" 152 "stm%?ia %1!, {%2, %3}"
@@ -155,7 +156,7 @@ am_readbuffer(struct net_device *dev, u_int offset, unsigned char *buf, unsigned
155 } 156 }
156 while (length > 0) { 157 while (length > 0) {
157 unsigned int tmp; 158 unsigned int tmp;
158 __asm__ __volatile__( 159 asm volatile(
159 "ldr%?h %2, [%0], #4\n\t" 160 "ldr%?h %2, [%0], #4\n\t"
160 "str%?b %2, [%1], #1\n\t" 161 "str%?b %2, [%1], #1\n\t"
161 "mov%? %2, %2, lsr #8\n\t" 162 "mov%? %2, %2, lsr #8\n\t"
@@ -196,6 +197,42 @@ am79c961_ramtest(struct net_device *dev, unsigned int val)
196 return errorcount; 197 return errorcount;
197} 198}
198 199
200static void am79c961_mc_hash(char *addr, u16 *hash)
201{
202 if (addr[0] & 0x01) {
203 int idx, bit;
204 u32 crc;
205
206 crc = ether_crc_le(ETH_ALEN, addr);
207
208 idx = crc >> 30;
209 bit = (crc >> 26) & 15;
210
211 hash[idx] |= 1 << bit;
212 }
213}
214
215static unsigned int am79c961_get_rx_mode(struct net_device *dev, u16 *hash)
216{
217 unsigned int mode = MODE_PORT_10BT;
218
219 if (dev->flags & IFF_PROMISC) {
220 mode |= MODE_PROMISC;
221 memset(hash, 0xff, 4 * sizeof(*hash));
222 } else if (dev->flags & IFF_ALLMULTI) {
223 memset(hash, 0xff, 4 * sizeof(*hash));
224 } else {
225 struct netdev_hw_addr *ha;
226
227 memset(hash, 0, 4 * sizeof(*hash));
228
229 netdev_for_each_mc_addr(ha, dev)
230 am79c961_mc_hash(ha->addr, hash);
231 }
232
233 return mode;
234}
235
199static void 236static void
200am79c961_init_for_open(struct net_device *dev) 237am79c961_init_for_open(struct net_device *dev)
201{ 238{
@@ -203,6 +240,7 @@ am79c961_init_for_open(struct net_device *dev)
203 unsigned long flags; 240 unsigned long flags;
204 unsigned char *p; 241 unsigned char *p;
205 u_int hdr_addr, first_free_addr; 242 u_int hdr_addr, first_free_addr;
243 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
206 int i; 244 int i;
207 245
208 /* 246 /*
@@ -218,16 +256,12 @@ am79c961_init_for_open(struct net_device *dev)
218 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */ 256 write_ireg (dev->base_addr, 2, 0x0000); /* MODE register selects media */
219 257
220 for (i = LADRL; i <= LADRH; i++) 258 for (i = LADRL; i <= LADRH; i++)
221 write_rreg (dev->base_addr, i, 0); 259 write_rreg (dev->base_addr, i, multi_hash[i - LADRL]);
222 260
223 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2) 261 for (i = PADRL, p = dev->dev_addr; i <= PADRH; i++, p += 2)
224 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8)); 262 write_rreg (dev->base_addr, i, p[0] | (p[1] << 8));
225 263
226 i = MODE_PORT_10BT; 264 write_rreg (dev->base_addr, MODE, mode);
227 if (dev->flags & IFF_PROMISC)
228 i |= MODE_PROMISC;
229
230 write_rreg (dev->base_addr, MODE, i);
231 write_rreg (dev->base_addr, POLLINT, 0); 265 write_rreg (dev->base_addr, POLLINT, 0);
232 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS); 266 write_rreg (dev->base_addr, SIZERXR, -RX_BUFFERS);
233 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS); 267 write_rreg (dev->base_addr, SIZETXR, -TX_BUFFERS);
@@ -340,21 +374,6 @@ am79c961_close(struct net_device *dev)
340 return 0; 374 return 0;
341} 375}
342 376
343static void am79c961_mc_hash(char *addr, unsigned short *hash)
344{
345 if (addr[0] & 0x01) {
346 int idx, bit;
347 u32 crc;
348
349 crc = ether_crc_le(ETH_ALEN, addr);
350
351 idx = crc >> 30;
352 bit = (crc >> 26) & 15;
353
354 hash[idx] |= 1 << bit;
355 }
356}
357
358/* 377/*
359 * Set or clear promiscuous/multicast mode filter for this adapter. 378 * Set or clear promiscuous/multicast mode filter for this adapter.
360 */ 379 */
@@ -362,24 +381,9 @@ static void am79c961_setmulticastlist (struct net_device *dev)
362{ 381{
363 struct dev_priv *priv = netdev_priv(dev); 382 struct dev_priv *priv = netdev_priv(dev);
364 unsigned long flags; 383 unsigned long flags;
365 unsigned short multi_hash[4], mode; 384 u16 multi_hash[4], mode = am79c961_get_rx_mode(dev, multi_hash);
366 int i, stopped; 385 int i, stopped;
367 386
368 mode = MODE_PORT_10BT;
369
370 if (dev->flags & IFF_PROMISC) {
371 mode |= MODE_PROMISC;
372 } else if (dev->flags & IFF_ALLMULTI) {
373 memset(multi_hash, 0xff, sizeof(multi_hash));
374 } else {
375 struct netdev_hw_addr *ha;
376
377 memset(multi_hash, 0x00, sizeof(multi_hash));
378
379 netdev_for_each_mc_addr(ha, dev)
380 am79c961_mc_hash(ha->addr, multi_hash);
381 }
382
383 spin_lock_irqsave(&priv->chip_lock, flags); 387 spin_lock_irqsave(&priv->chip_lock, flags);
384 388
385 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP; 389 stopped = read_rreg(dev->base_addr, CSR0) & CSR0_STOP;
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index a167addd5382..4317af8d2f0a 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -284,10 +284,14 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
284 284
285 skb = dev_alloc_skb(length + 2); 285 skb = dev_alloc_skb(length + 2);
286 if (likely(skb != NULL)) { 286 if (likely(skb != NULL)) {
287 struct ep93xx_rdesc *rxd = &ep->descs->rdesc[entry];
287 skb_reserve(skb, 2); 288 skb_reserve(skb, 2);
288 dma_sync_single_for_cpu(NULL, ep->descs->rdesc[entry].buf_addr, 289 dma_sync_single_for_cpu(dev->dev.parent, rxd->buf_addr,
289 length, DMA_FROM_DEVICE); 290 length, DMA_FROM_DEVICE);
290 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length); 291 skb_copy_to_linear_data(skb, ep->rx_buf[entry], length);
292 dma_sync_single_for_device(dev->dev.parent,
293 rxd->buf_addr, length,
294 DMA_FROM_DEVICE);
291 skb_put(skb, length); 295 skb_put(skb, length);
292 skb->protocol = eth_type_trans(skb, dev); 296 skb->protocol = eth_type_trans(skb, dev);
293 297
@@ -349,6 +353,7 @@ poll_some_more:
349static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev) 353static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
350{ 354{
351 struct ep93xx_priv *ep = netdev_priv(dev); 355 struct ep93xx_priv *ep = netdev_priv(dev);
356 struct ep93xx_tdesc *txd;
352 int entry; 357 int entry;
353 358
354 if (unlikely(skb->len > MAX_PKT_SIZE)) { 359 if (unlikely(skb->len > MAX_PKT_SIZE)) {
@@ -360,11 +365,14 @@ static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
360 entry = ep->tx_pointer; 365 entry = ep->tx_pointer;
361 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1); 366 ep->tx_pointer = (ep->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);
362 367
363 ep->descs->tdesc[entry].tdesc1 = 368 txd = &ep->descs->tdesc[entry];
364 TDESC1_EOF | (entry << 16) | (skb->len & 0xfff); 369
370 txd->tdesc1 = TDESC1_EOF | (entry << 16) | (skb->len & 0xfff);
371 dma_sync_single_for_cpu(dev->dev.parent, txd->buf_addr, skb->len,
372 DMA_TO_DEVICE);
365 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]); 373 skb_copy_and_csum_dev(skb, ep->tx_buf[entry]);
366 dma_sync_single_for_cpu(NULL, ep->descs->tdesc[entry].buf_addr, 374 dma_sync_single_for_device(dev->dev.parent, txd->buf_addr, skb->len,
367 skb->len, DMA_TO_DEVICE); 375 DMA_TO_DEVICE);
368 dev_kfree_skb(skb); 376 dev_kfree_skb(skb);
369 377
370 spin_lock_irq(&ep->tx_pending_lock); 378 spin_lock_irq(&ep->tx_pending_lock);
@@ -458,89 +466,80 @@ static irqreturn_t ep93xx_irq(int irq, void *dev_id)
458 466
459static void ep93xx_free_buffers(struct ep93xx_priv *ep) 467static void ep93xx_free_buffers(struct ep93xx_priv *ep)
460{ 468{
469 struct device *dev = ep->dev->dev.parent;
461 int i; 470 int i;
462 471
463 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 472 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
464 dma_addr_t d; 473 dma_addr_t d;
465 474
466 d = ep->descs->rdesc[i].buf_addr; 475 d = ep->descs->rdesc[i].buf_addr;
467 if (d) 476 if (d)
468 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_FROM_DEVICE); 477 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_FROM_DEVICE);
469 478
470 if (ep->rx_buf[i] != NULL) 479 if (ep->rx_buf[i] != NULL)
471 free_page((unsigned long)ep->rx_buf[i]); 480 kfree(ep->rx_buf[i]);
472 } 481 }
473 482
474 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 483 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
475 dma_addr_t d; 484 dma_addr_t d;
476 485
477 d = ep->descs->tdesc[i].buf_addr; 486 d = ep->descs->tdesc[i].buf_addr;
478 if (d) 487 if (d)
479 dma_unmap_single(NULL, d, PAGE_SIZE, DMA_TO_DEVICE); 488 dma_unmap_single(dev, d, PKT_BUF_SIZE, DMA_TO_DEVICE);
480 489
481 if (ep->tx_buf[i] != NULL) 490 if (ep->tx_buf[i] != NULL)
482 free_page((unsigned long)ep->tx_buf[i]); 491 kfree(ep->tx_buf[i]);
483 } 492 }
484 493
485 dma_free_coherent(NULL, sizeof(struct ep93xx_descs), ep->descs, 494 dma_free_coherent(dev, sizeof(struct ep93xx_descs), ep->descs,
486 ep->descs_dma_addr); 495 ep->descs_dma_addr);
487} 496}
488 497
489/*
490 * The hardware enforces a sub-2K maximum packet size, so we put
491 * two buffers on every hardware page.
492 */
493static int ep93xx_alloc_buffers(struct ep93xx_priv *ep) 498static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
494{ 499{
500 struct device *dev = ep->dev->dev.parent;
495 int i; 501 int i;
496 502
497 ep->descs = dma_alloc_coherent(NULL, sizeof(struct ep93xx_descs), 503 ep->descs = dma_alloc_coherent(dev, sizeof(struct ep93xx_descs),
498 &ep->descs_dma_addr, GFP_KERNEL | GFP_DMA); 504 &ep->descs_dma_addr, GFP_KERNEL);
499 if (ep->descs == NULL) 505 if (ep->descs == NULL)
500 return 1; 506 return 1;
501 507
502 for (i = 0; i < RX_QUEUE_ENTRIES; i += 2) { 508 for (i = 0; i < RX_QUEUE_ENTRIES; i++) {
503 void *page; 509 void *buf;
504 dma_addr_t d; 510 dma_addr_t d;
505 511
506 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 512 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
507 if (page == NULL) 513 if (buf == NULL)
508 goto err; 514 goto err;
509 515
510 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE); 516 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_FROM_DEVICE);
511 if (dma_mapping_error(NULL, d)) { 517 if (dma_mapping_error(dev, d)) {
512 free_page((unsigned long)page); 518 kfree(buf);
513 goto err; 519 goto err;
514 } 520 }
515 521
516 ep->rx_buf[i] = page; 522 ep->rx_buf[i] = buf;
517 ep->descs->rdesc[i].buf_addr = d; 523 ep->descs->rdesc[i].buf_addr = d;
518 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE; 524 ep->descs->rdesc[i].rdesc1 = (i << 16) | PKT_BUF_SIZE;
519
520 ep->rx_buf[i + 1] = page + PKT_BUF_SIZE;
521 ep->descs->rdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
522 ep->descs->rdesc[i + 1].rdesc1 = ((i + 1) << 16) | PKT_BUF_SIZE;
523 } 525 }
524 526
525 for (i = 0; i < TX_QUEUE_ENTRIES; i += 2) { 527 for (i = 0; i < TX_QUEUE_ENTRIES; i++) {
526 void *page; 528 void *buf;
527 dma_addr_t d; 529 dma_addr_t d;
528 530
529 page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA); 531 buf = kmalloc(PKT_BUF_SIZE, GFP_KERNEL);
530 if (page == NULL) 532 if (buf == NULL)
531 goto err; 533 goto err;
532 534
533 d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE); 535 d = dma_map_single(dev, buf, PKT_BUF_SIZE, DMA_TO_DEVICE);
534 if (dma_mapping_error(NULL, d)) { 536 if (dma_mapping_error(dev, d)) {
535 free_page((unsigned long)page); 537 kfree(buf);
536 goto err; 538 goto err;
537 } 539 }
538 540
539 ep->tx_buf[i] = page; 541 ep->tx_buf[i] = buf;
540 ep->descs->tdesc[i].buf_addr = d; 542 ep->descs->tdesc[i].buf_addr = d;
541
542 ep->tx_buf[i + 1] = page + PKT_BUF_SIZE;
543 ep->descs->tdesc[i + 1].buf_addr = d + PKT_BUF_SIZE;
544 } 543 }
545 544
546 return 0; 545 return 0;
@@ -830,6 +829,7 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
830 } 829 }
831 ep = netdev_priv(dev); 830 ep = netdev_priv(dev);
832 ep->dev = dev; 831 ep->dev = dev;
832 SET_NETDEV_DEV(dev, &pdev->dev);
833 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64); 833 netif_napi_add(dev, &ep->napi, ep93xx_poll, 64);
834 834
835 platform_set_drvdata(pdev, dev); 835 platform_set_drvdata(pdev, dev);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 68d45ba2d9b9..6c019e148546 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -52,13 +52,13 @@ MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac"); 52MODULE_ALIAS("platform:bfin_mac");
53 53
54#if defined(CONFIG_BFIN_MAC_USE_L1) 54#if defined(CONFIG_BFIN_MAC_USE_L1)
55# define bfin_mac_alloc(dma_handle, size) l1_data_sram_zalloc(size) 55# define bfin_mac_alloc(dma_handle, size, num) l1_data_sram_zalloc(size*num)
56# define bfin_mac_free(dma_handle, ptr) l1_data_sram_free(ptr) 56# define bfin_mac_free(dma_handle, ptr, num) l1_data_sram_free(ptr)
57#else 57#else
58# define bfin_mac_alloc(dma_handle, size) \ 58# define bfin_mac_alloc(dma_handle, size, num) \
59 dma_alloc_coherent(NULL, size, dma_handle, GFP_KERNEL) 59 dma_alloc_coherent(NULL, size*num, dma_handle, GFP_KERNEL)
60# define bfin_mac_free(dma_handle, ptr) \ 60# define bfin_mac_free(dma_handle, ptr, num) \
61 dma_free_coherent(NULL, sizeof(*ptr), ptr, dma_handle) 61 dma_free_coherent(NULL, sizeof(*ptr)*num, ptr, dma_handle)
62#endif 62#endif
63 63
64#define PKT_BUF_SZ 1580 64#define PKT_BUF_SZ 1580
@@ -95,7 +95,7 @@ static void desc_list_free(void)
95 t = t->next; 95 t = t->next;
96 } 96 }
97 } 97 }
98 bfin_mac_free(dma_handle, tx_desc); 98 bfin_mac_free(dma_handle, tx_desc, CONFIG_BFIN_TX_DESC_NUM);
99 } 99 }
100 100
101 if (rx_desc) { 101 if (rx_desc) {
@@ -109,7 +109,7 @@ static void desc_list_free(void)
109 r = r->next; 109 r = r->next;
110 } 110 }
111 } 111 }
112 bfin_mac_free(dma_handle, rx_desc); 112 bfin_mac_free(dma_handle, rx_desc, CONFIG_BFIN_RX_DESC_NUM);
113 } 113 }
114} 114}
115 115
@@ -126,13 +126,13 @@ static int desc_list_init(void)
126#endif 126#endif
127 127
128 tx_desc = bfin_mac_alloc(&dma_handle, 128 tx_desc = bfin_mac_alloc(&dma_handle,
129 sizeof(struct net_dma_desc_tx) * 129 sizeof(struct net_dma_desc_tx),
130 CONFIG_BFIN_TX_DESC_NUM); 130 CONFIG_BFIN_TX_DESC_NUM);
131 if (tx_desc == NULL) 131 if (tx_desc == NULL)
132 goto init_error; 132 goto init_error;
133 133
134 rx_desc = bfin_mac_alloc(&dma_handle, 134 rx_desc = bfin_mac_alloc(&dma_handle,
135 sizeof(struct net_dma_desc_rx) * 135 sizeof(struct net_dma_desc_rx),
136 CONFIG_BFIN_RX_DESC_NUM); 136 CONFIG_BFIN_RX_DESC_NUM);
137 if (rx_desc == NULL) 137 if (rx_desc == NULL)
138 goto init_error; 138 goto init_error;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 07e866d5a87c..d117280b9cd8 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -378,6 +378,8 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
378 return next; 378 return next;
379} 379}
380 380
381#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
382
381/** 383/**
382 * bond_dev_queue_xmit - Prepare skb for xmit. 384 * bond_dev_queue_xmit - Prepare skb for xmit.
383 * 385 *
@@ -390,6 +392,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
390{ 392{
391 skb->dev = slave_dev; 393 skb->dev = slave_dev;
392 skb->priority = 1; 394 skb->priority = 1;
395
396 skb->queue_mapping = bond_queue_mapping(skb);
397
393 if (unlikely(netpoll_tx_running(slave_dev))) 398 if (unlikely(netpoll_tx_running(slave_dev)))
394 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 399 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
395 else 400 else
@@ -1275,6 +1280,7 @@ static inline int slave_enable_netpoll(struct slave *slave)
1275 goto out; 1280 goto out;
1276 1281
1277 np->dev = slave->dev; 1282 np->dev = slave->dev;
1283 strlcpy(np->dev_name, slave->dev->name, IFNAMSIZ);
1278 err = __netpoll_setup(np); 1284 err = __netpoll_setup(np);
1279 if (err) { 1285 if (err) {
1280 kfree(np); 1286 kfree(np);
@@ -4188,6 +4194,7 @@ static inline int bond_slave_override(struct bonding *bond,
4188 return res; 4194 return res;
4189} 4195}
4190 4196
4197
4191static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb) 4198static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4192{ 4199{
4193 /* 4200 /*
@@ -4198,6 +4205,11 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4198 */ 4205 */
4199 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0; 4206 u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
4200 4207
4208 /*
4209 * Save the original txq to restore before passing to the driver
4210 */
4211 bond_queue_mapping(skb) = skb->queue_mapping;
4212
4201 if (unlikely(txq >= dev->real_num_tx_queues)) { 4213 if (unlikely(txq >= dev->real_num_tx_queues)) {
4202 do { 4214 do {
4203 txq -= dev->real_num_tx_queues; 4215 txq -= dev->real_num_tx_queues;
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index c445457b66d5..23179dbcedd2 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -346,7 +346,7 @@ parse_eeprom (struct net_device *dev)
346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */ 346 if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) { /* D-Link Only */
347 /* Check CRC */ 347 /* Check CRC */
348 crc = ~ether_crc_le (256 - 4, sromdata); 348 crc = ~ether_crc_le (256 - 4, sromdata);
349 if (psrom->crc != crc) { 349 if (psrom->crc != cpu_to_le32(crc)) {
350 printk (KERN_ERR "%s: EEPROM data CRC error.\n", 350 printk (KERN_ERR "%s: EEPROM data CRC error.\n",
351 dev->name); 351 dev->name);
352 return -1; 352 return -1;
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 7a84e45487e8..7583a9572bcc 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -105,7 +105,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
105 goto out_ep; 105 goto out_ep;
106 106
107 fep->fcc.mem = (void __iomem *)cpm2_immr; 107 fep->fcc.mem = (void __iomem *)cpm2_immr;
108 fpi->dpram_offset = cpm_dpalloc(128, 8); 108 fpi->dpram_offset = cpm_dpalloc(128, 32);
109 if (IS_ERR_VALUE(fpi->dpram_offset)) { 109 if (IS_ERR_VALUE(fpi->dpram_offset)) {
110 ret = fpi->dpram_offset; 110 ret = fpi->dpram_offset;
111 goto out_fcccp; 111 goto out_fcccp;
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index def7f7efc803..dc0a7aa42b2e 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -10,7 +10,7 @@
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 * 12 *
13 * Copyright 2002-2009 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
15 * 15 *
16 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
@@ -475,9 +475,6 @@ static const struct net_device_ops gfar_netdev_ops = {
475#endif 475#endif
476}; 476};
477 477
478unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
479unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
480
481void lock_rx_qs(struct gfar_private *priv) 478void lock_rx_qs(struct gfar_private *priv)
482{ 479{
483 int i = 0x0; 480 int i = 0x0;
@@ -872,28 +869,28 @@ static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
872 869
873 rqfar--; 870 rqfar--;
874 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT; 871 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
875 ftp_rqfpr[rqfar] = rqfpr; 872 priv->ftp_rqfpr[rqfar] = rqfpr;
876 ftp_rqfcr[rqfar] = rqfcr; 873 priv->ftp_rqfcr[rqfar] = rqfcr;
877 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 874 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
878 875
879 rqfar--; 876 rqfar--;
880 rqfcr = RQFCR_CMP_NOMATCH; 877 rqfcr = RQFCR_CMP_NOMATCH;
881 ftp_rqfpr[rqfar] = rqfpr; 878 priv->ftp_rqfpr[rqfar] = rqfpr;
882 ftp_rqfcr[rqfar] = rqfcr; 879 priv->ftp_rqfcr[rqfar] = rqfcr;
883 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 880 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
884 881
885 rqfar--; 882 rqfar--;
886 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND; 883 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
887 rqfpr = class; 884 rqfpr = class;
888 ftp_rqfcr[rqfar] = rqfcr; 885 priv->ftp_rqfcr[rqfar] = rqfcr;
889 ftp_rqfpr[rqfar] = rqfpr; 886 priv->ftp_rqfpr[rqfar] = rqfpr;
890 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 887 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
891 888
892 rqfar--; 889 rqfar--;
893 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND; 890 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
894 rqfpr = class; 891 rqfpr = class;
895 ftp_rqfcr[rqfar] = rqfcr; 892 priv->ftp_rqfcr[rqfar] = rqfcr;
896 ftp_rqfpr[rqfar] = rqfpr; 893 priv->ftp_rqfpr[rqfar] = rqfpr;
897 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 894 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
898 895
899 return rqfar; 896 return rqfar;
@@ -908,8 +905,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
908 905
909 /* Default rule */ 906 /* Default rule */
910 rqfcr = RQFCR_CMP_MATCH; 907 rqfcr = RQFCR_CMP_MATCH;
911 ftp_rqfcr[rqfar] = rqfcr; 908 priv->ftp_rqfcr[rqfar] = rqfcr;
912 ftp_rqfpr[rqfar] = rqfpr; 909 priv->ftp_rqfpr[rqfar] = rqfpr;
913 gfar_write_filer(priv, rqfar, rqfcr, rqfpr); 910 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
914 911
915 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6); 912 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
@@ -925,8 +922,8 @@ static void gfar_init_filer_table(struct gfar_private *priv)
925 /* Rest are masked rules */ 922 /* Rest are masked rules */
926 rqfcr = RQFCR_CMP_NOMATCH; 923 rqfcr = RQFCR_CMP_NOMATCH;
927 for (i = 0; i < rqfar; i++) { 924 for (i = 0; i < rqfar; i++) {
928 ftp_rqfcr[i] = rqfcr; 925 priv->ftp_rqfcr[i] = rqfcr;
929 ftp_rqfpr[i] = rqfpr; 926 priv->ftp_rqfpr[i] = rqfpr;
930 gfar_write_filer(priv, i, rqfcr, rqfpr); 927 gfar_write_filer(priv, i, rqfcr, rqfpr);
931 } 928 }
932} 929}
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index a4e690a9aeb9..76f14d044470 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2002-2009 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
13 * 13 *
14 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -1122,10 +1122,12 @@ struct gfar_private {
1122 /* HW time stamping enabled flag */ 1122 /* HW time stamping enabled flag */
1123 int hwts_rx_en; 1123 int hwts_rx_en;
1124 int hwts_tx_en; 1124 int hwts_tx_en;
1125
1126 /*Filer table*/
1127 unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1128 unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1125}; 1129};
1126 1130
1127extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1128extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1129 1131
1130static inline int gfar_has_errata(struct gfar_private *priv, 1132static inline int gfar_has_errata(struct gfar_private *priv,
1131 enum gfar_errata err) 1133 enum gfar_errata err)
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 05103362bebe..2ecdc9a785fa 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -9,7 +9,7 @@
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com> 10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 11 *
12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009, 2011 Freescale Semiconductor, Inc.
13 * 13 *
14 * This software may be used and distributed according to 14 * This software may be used and distributed according to
15 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -610,15 +610,15 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
610 if (ethflow & RXH_L2DA) { 610 if (ethflow & RXH_L2DA) {
611 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH | 611 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
612 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 612 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
613 ftp_rqfpr[priv->cur_filer_idx] = fpr; 613 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
614 ftp_rqfcr[priv->cur_filer_idx] = fcr; 614 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
615 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 615 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
616 priv->cur_filer_idx = priv->cur_filer_idx - 1; 616 priv->cur_filer_idx = priv->cur_filer_idx - 1;
617 617
618 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH | 618 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
619 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0; 619 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
620 ftp_rqfpr[priv->cur_filer_idx] = fpr; 620 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
621 ftp_rqfcr[priv->cur_filer_idx] = fcr; 621 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
622 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 622 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
623 priv->cur_filer_idx = priv->cur_filer_idx - 1; 623 priv->cur_filer_idx = priv->cur_filer_idx - 1;
624 } 624 }
@@ -627,16 +627,16 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
627 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH | 627 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
628 RQFCR_AND | RQFCR_HASHTBL_0; 628 RQFCR_AND | RQFCR_HASHTBL_0;
629 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 629 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
630 ftp_rqfpr[priv->cur_filer_idx] = fpr; 630 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
631 ftp_rqfcr[priv->cur_filer_idx] = fcr; 631 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
632 priv->cur_filer_idx = priv->cur_filer_idx - 1; 632 priv->cur_filer_idx = priv->cur_filer_idx - 1;
633 } 633 }
634 634
635 if (ethflow & RXH_IP_SRC) { 635 if (ethflow & RXH_IP_SRC) {
636 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 636 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
637 RQFCR_AND | RQFCR_HASHTBL_0; 637 RQFCR_AND | RQFCR_HASHTBL_0;
638 ftp_rqfpr[priv->cur_filer_idx] = fpr; 638 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
639 ftp_rqfcr[priv->cur_filer_idx] = fcr; 639 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
640 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 640 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
641 priv->cur_filer_idx = priv->cur_filer_idx - 1; 641 priv->cur_filer_idx = priv->cur_filer_idx - 1;
642 } 642 }
@@ -644,8 +644,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
644 if (ethflow & (RXH_IP_DST)) { 644 if (ethflow & (RXH_IP_DST)) {
645 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH | 645 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
646 RQFCR_AND | RQFCR_HASHTBL_0; 646 RQFCR_AND | RQFCR_HASHTBL_0;
647 ftp_rqfpr[priv->cur_filer_idx] = fpr; 647 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
648 ftp_rqfcr[priv->cur_filer_idx] = fcr; 648 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
649 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 649 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
650 priv->cur_filer_idx = priv->cur_filer_idx - 1; 650 priv->cur_filer_idx = priv->cur_filer_idx - 1;
651 } 651 }
@@ -653,8 +653,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
653 if (ethflow & RXH_L3_PROTO) { 653 if (ethflow & RXH_L3_PROTO) {
654 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH | 654 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
655 RQFCR_AND | RQFCR_HASHTBL_0; 655 RQFCR_AND | RQFCR_HASHTBL_0;
656 ftp_rqfpr[priv->cur_filer_idx] = fpr; 656 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
657 ftp_rqfcr[priv->cur_filer_idx] = fcr; 657 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
658 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 658 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
659 priv->cur_filer_idx = priv->cur_filer_idx - 1; 659 priv->cur_filer_idx = priv->cur_filer_idx - 1;
660 } 660 }
@@ -662,8 +662,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
662 if (ethflow & RXH_L4_B_0_1) { 662 if (ethflow & RXH_L4_B_0_1) {
663 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 663 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
664 RQFCR_AND | RQFCR_HASHTBL_0; 664 RQFCR_AND | RQFCR_HASHTBL_0;
665 ftp_rqfpr[priv->cur_filer_idx] = fpr; 665 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
666 ftp_rqfcr[priv->cur_filer_idx] = fcr; 666 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
667 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 667 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
668 priv->cur_filer_idx = priv->cur_filer_idx - 1; 668 priv->cur_filer_idx = priv->cur_filer_idx - 1;
669 } 669 }
@@ -671,8 +671,8 @@ static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
671 if (ethflow & RXH_L4_B_2_3) { 671 if (ethflow & RXH_L4_B_2_3) {
672 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH | 672 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
673 RQFCR_AND | RQFCR_HASHTBL_0; 673 RQFCR_AND | RQFCR_HASHTBL_0;
674 ftp_rqfpr[priv->cur_filer_idx] = fpr; 674 priv->ftp_rqfpr[priv->cur_filer_idx] = fpr;
675 ftp_rqfcr[priv->cur_filer_idx] = fcr; 675 priv->ftp_rqfcr[priv->cur_filer_idx] = fcr;
676 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr); 676 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
677 priv->cur_filer_idx = priv->cur_filer_idx - 1; 677 priv->cur_filer_idx = priv->cur_filer_idx - 1;
678 } 678 }
@@ -706,12 +706,12 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
706 } 706 }
707 707
708 for (i = 0; i < MAX_FILER_IDX + 1; i++) { 708 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
709 local_rqfpr[j] = ftp_rqfpr[i]; 709 local_rqfpr[j] = priv->ftp_rqfpr[i];
710 local_rqfcr[j] = ftp_rqfcr[i]; 710 local_rqfcr[j] = priv->ftp_rqfcr[i];
711 j--; 711 j--;
712 if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE | 712 if ((priv->ftp_rqfcr[i] == (RQFCR_PID_PARSE |
713 RQFCR_CLE |RQFCR_AND)) && 713 RQFCR_CLE |RQFCR_AND)) &&
714 (ftp_rqfpr[i] == cmp_rqfpr)) 714 (priv->ftp_rqfpr[i] == cmp_rqfpr))
715 break; 715 break;
716 } 716 }
717 717
@@ -724,20 +724,22 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
724 * if it was already programmed, we need to overwrite these rules 724 * if it was already programmed, we need to overwrite these rules
725 */ 725 */
726 for (l = i+1; l < MAX_FILER_IDX; l++) { 726 for (l = i+1; l < MAX_FILER_IDX; l++) {
727 if ((ftp_rqfcr[l] & RQFCR_CLE) && 727 if ((priv->ftp_rqfcr[l] & RQFCR_CLE) &&
728 !(ftp_rqfcr[l] & RQFCR_AND)) { 728 !(priv->ftp_rqfcr[l] & RQFCR_AND)) {
729 ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT | 729 priv->ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
730 RQFCR_HASHTBL_0 | RQFCR_PID_MASK; 730 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
731 ftp_rqfpr[l] = FPR_FILER_MASK; 731 priv->ftp_rqfpr[l] = FPR_FILER_MASK;
732 gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]); 732 gfar_write_filer(priv, l, priv->ftp_rqfcr[l],
733 priv->ftp_rqfpr[l]);
733 break; 734 break;
734 } 735 }
735 736
736 if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND)) 737 if (!(priv->ftp_rqfcr[l] & RQFCR_CLE) &&
738 (priv->ftp_rqfcr[l] & RQFCR_AND))
737 continue; 739 continue;
738 else { 740 else {
739 local_rqfpr[j] = ftp_rqfpr[l]; 741 local_rqfpr[j] = priv->ftp_rqfpr[l];
740 local_rqfcr[j] = ftp_rqfcr[l]; 742 local_rqfcr[j] = priv->ftp_rqfcr[l];
741 j--; 743 j--;
742 } 744 }
743 } 745 }
@@ -750,8 +752,8 @@ static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u
750 752
751 /* Write back the popped out rules again */ 753 /* Write back the popped out rules again */
752 for (k = j+1; k < MAX_FILER_IDX; k++) { 754 for (k = j+1; k < MAX_FILER_IDX; k++) {
753 ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k]; 755 priv->ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
754 ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k]; 756 priv->ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
755 gfar_write_filer(priv, priv->cur_filer_idx, 757 gfar_write_filer(priv, priv->cur_filer_idx,
756 local_rqfcr[k], local_rqfpr[k]); 758 local_rqfcr[k], local_rqfpr[k]);
757 if (!priv->cur_filer_idx) 759 if (!priv->cur_filer_idx)
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 8e10d2f6a5ad..c3ecb118c1df 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -1580,12 +1580,12 @@ static netdev_tx_t hp100_start_xmit_bm(struct sk_buff *skb,
1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */ 1580 hp100_outl(ringptr->pdl_paddr, TX_PDA_L); /* Low Prio. Queue */
1581 1581
1582 lp->txrcommit++; 1582 lp->txrcommit++;
1583 spin_unlock_irqrestore(&lp->lock, flags);
1584 1583
1585 /* Update statistics */
1586 dev->stats.tx_packets++; 1584 dev->stats.tx_packets++;
1587 dev->stats.tx_bytes += skb->len; 1585 dev->stats.tx_bytes += skb->len;
1588 1586
1587 spin_unlock_irqrestore(&lp->lock, flags);
1588
1589 return NETDEV_TX_OK; 1589 return NETDEV_TX_OK;
1590 1590
1591drop: 1591drop:
diff --git a/drivers/net/hplance.c b/drivers/net/hplance.c
index b6060f7538df..a900d5bf2948 100644
--- a/drivers/net/hplance.c
+++ b/drivers/net/hplance.c
@@ -135,7 +135,7 @@ static void __devexit hplance_remove_one(struct dio_dev *d)
135} 135}
136 136
137/* Initialise a single lance board at the given DIO device */ 137/* Initialise a single lance board at the given DIO device */
138static void __init hplance_init(struct net_device *dev, struct dio_dev *d) 138static void __devinit hplance_init(struct net_device *dev, struct dio_dev *d)
139{ 139{
140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE); 140 unsigned long va = (d->resource.start + DIO_VIRADDRBASE);
141 struct hplance_private *lp; 141 struct hplance_private *lp;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c2e9670d154d..fd64c56a433d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -2372,6 +2372,9 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
2372 } 2372 }
2373#endif /* CONFIG_PCI_IOV */ 2373#endif /* CONFIG_PCI_IOV */
2374 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 2374 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
2375 /* i350 cannot do RSS and SR-IOV at the same time */
2376 if (hw->mac.type == e1000_i350 && adapter->vfs_allocated_count)
2377 adapter->rss_queues = 1;
2375 2378
2376 /* 2379 /*
2377 * if rss_queues > 4 or vfs are going to be allocated with rss_queues 2380 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 684cacec10f8..30f41e62049a 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -1966,11 +1966,11 @@ netxen_nic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1966 1966
1967 netxen_tso_check(netdev, tx_ring, first_desc, skb); 1967 netxen_tso_check(netdev, tx_ring, first_desc, skb);
1968 1968
1969 netxen_nic_update_cmd_producer(adapter, tx_ring);
1970
1971 adapter->stats.txbytes += skb->len; 1969 adapter->stats.txbytes += skb->len;
1972 adapter->stats.xmitcalled++; 1970 adapter->stats.xmitcalled++;
1973 1971
1972 netxen_nic_update_cmd_producer(adapter, tx_ring);
1973
1974 return NETDEV_TX_OK; 1974 return NETDEV_TX_OK;
1975 1975
1976drop_packet: 1976drop_packet:
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 392a6c4b72e5..a70244306c94 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -58,6 +58,7 @@ config BROADCOM_PHY
58 58
59config BCM63XX_PHY 59config BCM63XX_PHY
60 tristate "Drivers for Broadcom 63xx SOCs internal PHY" 60 tristate "Drivers for Broadcom 63xx SOCs internal PHY"
61 depends on BCM63XX
61 ---help--- 62 ---help---
62 Currently supports the 6348 and 6358 PHYs. 63 Currently supports the 6348 and 6358 PHYs.
63 64
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index b0c9522bb535..2cd8dc5847b4 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -543,11 +543,20 @@ static void recalibrate(struct dp83640_clock *clock)
543 543
544/* time stamping methods */ 544/* time stamping methods */
545 545
546static void decode_evnt(struct dp83640_private *dp83640, 546static int decode_evnt(struct dp83640_private *dp83640,
547 struct phy_txts *phy_txts, u16 ests) 547 void *data, u16 ests)
548{ 548{
549 struct phy_txts *phy_txts;
549 struct ptp_clock_event event; 550 struct ptp_clock_event event;
550 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK; 551 int words = (ests >> EVNT_TS_LEN_SHIFT) & EVNT_TS_LEN_MASK;
552 u16 ext_status = 0;
553
554 if (ests & MULT_EVNT) {
555 ext_status = *(u16 *) data;
556 data += sizeof(ext_status);
557 }
558
559 phy_txts = data;
551 560
552 switch (words) { /* fall through in every case */ 561 switch (words) { /* fall through in every case */
553 case 3: 562 case 3:
@@ -565,6 +574,9 @@ static void decode_evnt(struct dp83640_private *dp83640,
565 event.timestamp = phy2txts(&dp83640->edata); 574 event.timestamp = phy2txts(&dp83640->edata);
566 575
567 ptp_clock_event(dp83640->clock->ptp_clock, &event); 576 ptp_clock_event(dp83640->clock->ptp_clock, &event);
577
578 words = ext_status ? words + 2 : words + 1;
579 return words * sizeof(u16);
568} 580}
569 581
570static void decode_rxts(struct dp83640_private *dp83640, 582static void decode_rxts(struct dp83640_private *dp83640,
@@ -643,9 +655,7 @@ static void decode_status_frame(struct dp83640_private *dp83640,
643 655
644 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) { 656 } else if (PSF_EVNT == type && len >= sizeof(*phy_txts)) {
645 657
646 phy_txts = (struct phy_txts *) ptr; 658 size = decode_evnt(dp83640, ptr, ests);
647 decode_evnt(dp83640, phy_txts, ests);
648 size = sizeof(*phy_txts);
649 659
650 } else { 660 } else {
651 size = 0; 661 size = 0;
@@ -1034,8 +1044,8 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1034 1044
1035 if (is_status_frame(skb, type)) { 1045 if (is_status_frame(skb, type)) {
1036 decode_status_frame(dp83640, skb); 1046 decode_status_frame(dp83640, skb);
1037 /* Let the stack drop this frame. */ 1047 kfree_skb(skb);
1038 return false; 1048 return true;
1039 } 1049 }
1040 1050
1041 SKB_PTP_TYPE(skb) = type; 1051 SKB_PTP_TYPE(skb) = type;
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6436ba916fe4..c6ba64380829 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -524,7 +524,7 @@ static void ppp_async_process(unsigned long arg)
524#define PUT_BYTE(ap, buf, c, islcp) do { \ 524#define PUT_BYTE(ap, buf, c, islcp) do { \
525 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\ 525 if ((islcp && c < 0x20) || (ap->xaccm[c >> 5] & (1 << (c & 0x1f)))) {\
526 *buf++ = PPP_ESCAPE; \ 526 *buf++ = PPP_ESCAPE; \
527 *buf++ = c ^ 0x20; \ 527 *buf++ = c ^ PPP_TRANS; \
528 } else \ 528 } else \
529 *buf++ = c; \ 529 *buf++ = c; \
530} while (0) 530} while (0)
@@ -897,7 +897,7 @@ ppp_async_input(struct asyncppp *ap, const unsigned char *buf,
897 sp = skb_put(skb, n); 897 sp = skb_put(skb, n);
898 memcpy(sp, buf, n); 898 memcpy(sp, buf, n);
899 if (ap->state & SC_ESCAPE) { 899 if (ap->state & SC_ESCAPE) {
900 sp[0] ^= 0x20; 900 sp[0] ^= PPP_TRANS;
901 ap->state &= ~SC_ESCAPE; 901 ap->state &= ~SC_ESCAPE;
902 } 902 }
903 } 903 }
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index e224740c6e2a..1f97db1fc4b1 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1276,7 +1276,7 @@ static int pxa168_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
1276 wmb(); 1276 wmb();
1277 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD); 1277 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1278 1278
1279 stats->tx_bytes += skb->len; 1279 stats->tx_bytes += length;
1280 stats->tx_packets++; 1280 stats->tx_packets++;
1281 dev->trans_start = jiffies; 1281 dev->trans_start = jiffies;
1282 if (pep->tx_ring_size - pep->tx_desc_count <= 1) { 1282 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e9656616f2a2..a5d9fbf9d816 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1406,6 +1406,7 @@ qlcnic_dump_que(struct qlcnic_adapter *adapter, struct qlcnic_dump_entry *entry,
1406 1406
1407 for (loop = 0; loop < que->no_ops; loop++) { 1407 for (loop = 0; loop < que->no_ops; loop++) {
1408 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id); 1408 QLCNIC_WR_DUMP_REG(que->sel_addr, base, que_id);
1409 addr = que->read_addr;
1409 for (i = 0; i < cnt; i++) { 1410 for (i = 0; i < cnt; i++) {
1410 QLCNIC_RD_DUMP_REG(addr, base, &data); 1411 QLCNIC_RD_DUMP_REG(addr, base, &data);
1411 *buffer++ = cpu_to_le32(data); 1412 *buffer++ = cpu_to_le32(data);
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 3ab7d2c7baf2..0f6af5c61a7c 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -2159,6 +2159,7 @@ qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb,
2159 2159
2160 nf = &pbuf->frag_array[0]; 2160 nf = &pbuf->frag_array[0];
2161 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); 2161 pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
2162 pbuf->skb = NULL;
2162} 2163}
2163 2164
2164static inline void 2165static inline void
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index bc5bb378d008..f8c4435afe51 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1635,7 +1635,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1635 * 1635 *
1636 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec 1636 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1637 */ 1637 */
1638 static const struct { 1638 static const struct rtl_mac_info {
1639 u32 mask; 1639 u32 mask;
1640 u32 val; 1640 u32 val;
1641 int mac_version; 1641 int mac_version;
@@ -1703,7 +1703,8 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1703 1703
1704 /* Catch-all */ 1704 /* Catch-all */
1705 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } 1705 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
1706 }, *p = mac_info; 1706 };
1707 const struct rtl_mac_info *p = mac_info;
1707 u32 reg; 1708 u32 reg;
1708 1709
1709 reg = RTL_R32(TxConfig); 1710 reg = RTL_R32(TxConfig);
@@ -3799,7 +3800,7 @@ static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
3799 3800
3800static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version) 3801static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3801{ 3802{
3802 static const struct { 3803 static const struct rtl_cfg2_info {
3803 u32 mac_version; 3804 u32 mac_version;
3804 u32 clk; 3805 u32 clk;
3805 u32 val; 3806 u32 val;
@@ -3808,7 +3809,8 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
3808 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff }, 3809 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
3809 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe 3810 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
3810 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff } 3811 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
3811 }, *p = cfg2_info; 3812 };
3813 const struct rtl_cfg2_info *p = cfg2_info;
3812 unsigned int i; 3814 unsigned int i;
3813 u32 clk; 3815 u32 clk;
3814 3816
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index dc4805f473e3..f6285748bd3c 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -2400,8 +2400,10 @@ static const struct of_device_id smc91x_match[] = {
2400 { .compatible = "smsc,lan91c94", }, 2400 { .compatible = "smsc,lan91c94", },
2401 { .compatible = "smsc,lan91c111", }, 2401 { .compatible = "smsc,lan91c111", },
2402 {}, 2402 {},
2403} 2403};
2404MODULE_DEVICE_TABLE(of, smc91x_match); 2404MODULE_DEVICE_TABLE(of, smc91x_match);
2405#else
2406#define smc91x_match NULL
2405#endif 2407#endif
2406 2408
2407static struct dev_pm_ops smc_drv_pm_ops = { 2409static struct dev_pm_ops smc_drv_pm_ops = {
@@ -2416,9 +2418,7 @@ static struct platform_driver smc_driver = {
2416 .name = CARDNAME, 2418 .name = CARDNAME,
2417 .owner = THIS_MODULE, 2419 .owner = THIS_MODULE,
2418 .pm = &smc_drv_pm_ops, 2420 .pm = &smc_drv_pm_ops,
2419#ifdef CONFIG_OF
2420 .of_match_table = smc91x_match, 2421 .of_match_table = smc91x_match,
2421#endif
2422 }, 2422 },
2423}; 2423};
2424 2424
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4dab85eecb60..9a6b3824da14 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -460,7 +460,23 @@ static u32 tun_net_fix_features(struct net_device *dev, u32 features)
460 460
461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); 461 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
462} 462}
463 463#ifdef CONFIG_NET_POLL_CONTROLLER
464static void tun_poll_controller(struct net_device *dev)
465{
466 /*
467 * Tun only receives frames when:
468 * 1) the char device endpoint gets data from user space
469 * 2) the tun socket gets a sendmsg call from user space
470 * Since both of those are syncronous operations, we are guaranteed
471 * never to have pending data when we poll for it
472 * so theres nothing to do here but return.
473 * We need this though so netpoll recognizes us as an interface that
474 * supports polling, which enables bridge devices in virt setups to
475 * still use netconsole
476 */
477 return;
478}
479#endif
464static const struct net_device_ops tun_netdev_ops = { 480static const struct net_device_ops tun_netdev_ops = {
465 .ndo_uninit = tun_net_uninit, 481 .ndo_uninit = tun_net_uninit,
466 .ndo_open = tun_net_open, 482 .ndo_open = tun_net_open,
@@ -468,6 +484,9 @@ static const struct net_device_ops tun_netdev_ops = {
468 .ndo_start_xmit = tun_net_xmit, 484 .ndo_start_xmit = tun_net_xmit,
469 .ndo_change_mtu = tun_net_change_mtu, 485 .ndo_change_mtu = tun_net_change_mtu,
470 .ndo_fix_features = tun_net_fix_features, 486 .ndo_fix_features = tun_net_fix_features,
487#ifdef CONFIG_NET_POLL_CONTROLLER
488 .ndo_poll_controller = tun_poll_controller,
489#endif
471}; 490};
472 491
473static const struct net_device_ops tap_netdev_ops = { 492static const struct net_device_ops tap_netdev_ops = {
@@ -480,6 +499,9 @@ static const struct net_device_ops tap_netdev_ops = {
480 .ndo_set_multicast_list = tun_net_mclist, 499 .ndo_set_multicast_list = tun_net_mclist,
481 .ndo_set_mac_address = eth_mac_addr, 500 .ndo_set_mac_address = eth_mac_addr,
482 .ndo_validate_addr = eth_validate_addr, 501 .ndo_validate_addr = eth_validate_addr,
502#ifdef CONFIG_NET_POLL_CONTROLLER
503 .ndo_poll_controller = tun_poll_controller,
504#endif
483}; 505};
484 506
485/* Initialize net device. */ 507/* Initialize net device. */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 9d4f9117260f..84d4608153c9 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -385,6 +385,16 @@ config USB_NET_CX82310_ETH
385 router with USB ethernet port. This driver is for routers only, 385 router with USB ethernet port. This driver is for routers only,
386 it will not work with ADSL modems (use cxacru driver instead). 386 it will not work with ADSL modems (use cxacru driver instead).
387 387
388config USB_NET_KALMIA
389 tristate "Samsung Kalmia based LTE USB modem"
390 depends on USB_USBNET
391 help
392 Choose this option if you have a Samsung Kalmia based USB modem
393 as Samsung GT-B3730.
394
395 To compile this driver as a module, choose M here: the
396 module will be called kalmia.
397
388config USB_HSO 398config USB_HSO
389 tristate "Option USB High Speed Mobile Devices" 399 tristate "Option USB High Speed Mobile Devices"
390 depends on USB && RFKILL 400 depends on USB && RFKILL
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index c7ec8a5f0a90..c203fa21f6b1 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,6 +23,7 @@ obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
23obj-$(CONFIG_USB_USBNET) += usbnet.o 23obj-$(CONFIG_USB_USBNET) += usbnet.o
24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o 24obj-$(CONFIG_USB_NET_INT51X1) += int51x1.o
25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o 25obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_NET_KALMIA) += kalmia.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 27obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 28obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 29obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
new file mode 100644
index 000000000000..d965fb1e013e
--- /dev/null
+++ b/drivers/net/usb/kalmia.c
@@ -0,0 +1,384 @@
1/*
2 * USB network interface driver for Samsung Kalmia based LTE USB modem like the
3 * Samsung GT-B3730 and GT-B3710.
4 *
5 * Copyright (C) 2011 Marius Bjoernstad Kotsbak <marius@kotsbak.com>
6 *
7 * Sponsored by Quicklink Video Distribution Services Ltd.
8 *
9 * Based on the cdc_eem module.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 */
16
17#include <linux/module.h>
18#include <linux/init.h>
19#include <linux/netdevice.h>
20#include <linux/etherdevice.h>
21#include <linux/ctype.h>
22#include <linux/ethtool.h>
23#include <linux/workqueue.h>
24#include <linux/mii.h>
25#include <linux/usb.h>
26#include <linux/crc32.h>
27#include <linux/usb/cdc.h>
28#include <linux/usb/usbnet.h>
29#include <linux/gfp.h>
30
31/*
32 * The Samsung Kalmia based LTE USB modems have a CDC ACM port for modem control
33 * handled by the "option" module and an ethernet data port handled by this
34 * module.
35 *
36 * The stick must first be switched into modem mode by usb_modeswitch
37 * or similar tool. Then the modem gets sent two initialization packets by
38 * this module, which gives the MAC address of the device. User space can then
39 * connect the modem using AT commands through the ACM port and then use
40 * DHCP on the network interface exposed by this module. Network packets are
41 * sent to and from the modem in a proprietary format discovered after watching
42 * the behavior of the windows driver for the modem.
43 *
44 * More information about the use of the modem is available in usb_modeswitch
45 * forum and the project page:
46 *
47 * http://www.draisberghof.de/usb_modeswitch/bb/viewtopic.php?t=465
48 * https://github.com/mkotsbak/Samsung-GT-B3730-linux-driver
49 */
50
51/* #define DEBUG */
52/* #define VERBOSE */
53
54#define KALMIA_HEADER_LENGTH 6
55#define KALMIA_ALIGN_SIZE 4
56#define KALMIA_USB_TIMEOUT 10000
57
58/*-------------------------------------------------------------------------*/
59
60static int
61kalmia_send_init_packet(struct usbnet *dev, u8 *init_msg, u8 init_msg_len,
62 u8 *buffer, u8 expected_len)
63{
64 int act_len;
65 int status;
66
67 netdev_dbg(dev->net, "Sending init packet");
68
69 status = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 0x02),
70 init_msg, init_msg_len, &act_len, KALMIA_USB_TIMEOUT);
71 if (status != 0) {
72 netdev_err(dev->net,
73 "Error sending init packet. Status %i, length %i\n",
74 status, act_len);
75 return status;
76 }
77 else if (act_len != init_msg_len) {
78 netdev_err(dev->net,
79 "Did not send all of init packet. Bytes sent: %i",
80 act_len);
81 }
82 else {
83 netdev_dbg(dev->net, "Successfully sent init packet.");
84 }
85
86 status = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 0x81),
87 buffer, expected_len, &act_len, KALMIA_USB_TIMEOUT);
88
89 if (status != 0)
90 netdev_err(dev->net,
91 "Error receiving init result. Status %i, length %i\n",
92 status, act_len);
93 else if (act_len != expected_len)
94 netdev_err(dev->net, "Unexpected init result length: %i\n",
95 act_len);
96
97 return status;
98}
99
100static int
101kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
102{
103 char init_msg_1[] =
104 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x20, 0x00, 0x00,
105 0x00, 0x00 };
106 char init_msg_2[] =
107 { 0x57, 0x50, 0x04, 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0xf4,
108 0x00, 0x00 };
109 char receive_buf[28];
110 int status;
111
112 status = kalmia_send_init_packet(dev, init_msg_1, sizeof(init_msg_1)
113 / sizeof(init_msg_1[0]), receive_buf, 24);
114 if (status != 0)
115 return status;
116
117 status = kalmia_send_init_packet(dev, init_msg_2, sizeof(init_msg_2)
118 / sizeof(init_msg_2[0]), receive_buf, 28);
119 if (status != 0)
120 return status;
121
122 memcpy(ethernet_addr, receive_buf + 10, ETH_ALEN);
123
124 return status;
125}
126
127static int
128kalmia_bind(struct usbnet *dev, struct usb_interface *intf)
129{
130 u8 status;
131 u8 ethernet_addr[ETH_ALEN];
132
133 /* Don't bind to AT command interface */
134 if (intf->cur_altsetting->desc.bInterfaceClass != USB_CLASS_VENDOR_SPEC)
135 return -EINVAL;
136
137 dev->in = usb_rcvbulkpipe(dev->udev, 0x81 & USB_ENDPOINT_NUMBER_MASK);
138 dev->out = usb_sndbulkpipe(dev->udev, 0x02 & USB_ENDPOINT_NUMBER_MASK);
139 dev->status = NULL;
140
141 dev->net->hard_header_len += KALMIA_HEADER_LENGTH;
142 dev->hard_mtu = 1400;
143 dev->rx_urb_size = dev->hard_mtu * 10; // Found as optimal after testing
144
145 status = kalmia_init_and_get_ethernet_addr(dev, ethernet_addr);
146
147 if (status < 0) {
148 usb_set_intfdata(intf, NULL);
149 usb_driver_release_interface(driver_of(intf), intf);
150 return status;
151 }
152
153 memcpy(dev->net->dev_addr, ethernet_addr, ETH_ALEN);
154 memcpy(dev->net->perm_addr, ethernet_addr, ETH_ALEN);
155
156 return status;
157}
158
159static struct sk_buff *
160kalmia_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
161{
162 struct sk_buff *skb2 = NULL;
163 u16 content_len;
164 unsigned char *header_start;
165 unsigned char ether_type_1, ether_type_2;
166 u8 remainder, padlen = 0;
167
168 if (!skb_cloned(skb)) {
169 int headroom = skb_headroom(skb);
170 int tailroom = skb_tailroom(skb);
171
172 if ((tailroom >= KALMIA_ALIGN_SIZE) && (headroom
173 >= KALMIA_HEADER_LENGTH))
174 goto done;
175
176 if ((headroom + tailroom) > (KALMIA_HEADER_LENGTH
177 + KALMIA_ALIGN_SIZE)) {
178 skb->data = memmove(skb->head + KALMIA_HEADER_LENGTH,
179 skb->data, skb->len);
180 skb_set_tail_pointer(skb, skb->len);
181 goto done;
182 }
183 }
184
185 skb2 = skb_copy_expand(skb, KALMIA_HEADER_LENGTH,
186 KALMIA_ALIGN_SIZE, flags);
187 if (!skb2)
188 return NULL;
189
190 dev_kfree_skb_any(skb);
191 skb = skb2;
192
193 done: header_start = skb_push(skb, KALMIA_HEADER_LENGTH);
194 ether_type_1 = header_start[KALMIA_HEADER_LENGTH + 12];
195 ether_type_2 = header_start[KALMIA_HEADER_LENGTH + 13];
196
197 netdev_dbg(dev->net, "Sending etherType: %02x%02x", ether_type_1,
198 ether_type_2);
199
200 /* According to empiric data for data packages */
201 header_start[0] = 0x57;
202 header_start[1] = 0x44;
203 content_len = skb->len - KALMIA_HEADER_LENGTH;
204 header_start[2] = (content_len & 0xff); /* low byte */
205 header_start[3] = (content_len >> 8); /* high byte */
206
207 header_start[4] = ether_type_1;
208 header_start[5] = ether_type_2;
209
210 /* Align to 4 bytes by padding with zeros */
211 remainder = skb->len % KALMIA_ALIGN_SIZE;
212 if (remainder > 0) {
213 padlen = KALMIA_ALIGN_SIZE - remainder;
214 memset(skb_put(skb, padlen), 0, padlen);
215 }
216
217 netdev_dbg(
218 dev->net,
219 "Sending package with length %i and padding %i. Header: %02x:%02x:%02x:%02x:%02x:%02x.",
220 content_len, padlen, header_start[0], header_start[1],
221 header_start[2], header_start[3], header_start[4],
222 header_start[5]);
223
224 return skb;
225}
226
227static int
228kalmia_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
229{
230 /*
231 * Our task here is to strip off framing, leaving skb with one
232 * data frame for the usbnet framework code to process.
233 */
234 const u8 HEADER_END_OF_USB_PACKET[] =
235 { 0x57, 0x5a, 0x00, 0x00, 0x08, 0x00 };
236 const u8 EXPECTED_UNKNOWN_HEADER_1[] =
237 { 0x57, 0x43, 0x1e, 0x00, 0x15, 0x02 };
238 const u8 EXPECTED_UNKNOWN_HEADER_2[] =
239 { 0x57, 0x50, 0x0e, 0x00, 0x00, 0x00 };
240 u8 i = 0;
241
242 /* incomplete header? */
243 if (skb->len < KALMIA_HEADER_LENGTH)
244 return 0;
245
246 do {
247 struct sk_buff *skb2 = NULL;
248 u8 *header_start;
249 u16 usb_packet_length, ether_packet_length;
250 int is_last;
251
252 header_start = skb->data;
253
254 if (unlikely(header_start[0] != 0x57 || header_start[1] != 0x44)) {
255 if (!memcmp(header_start, EXPECTED_UNKNOWN_HEADER_1,
256 sizeof(EXPECTED_UNKNOWN_HEADER_1)) || !memcmp(
257 header_start, EXPECTED_UNKNOWN_HEADER_2,
258 sizeof(EXPECTED_UNKNOWN_HEADER_2))) {
259 netdev_dbg(
260 dev->net,
261 "Received expected unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
262 header_start[0], header_start[1],
263 header_start[2], header_start[3],
264 header_start[4], header_start[5],
265 skb->len - KALMIA_HEADER_LENGTH);
266 }
267 else {
268 netdev_err(
269 dev->net,
270 "Received unknown frame header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
271 header_start[0], header_start[1],
272 header_start[2], header_start[3],
273 header_start[4], header_start[5],
274 skb->len - KALMIA_HEADER_LENGTH);
275 return 0;
276 }
277 }
278 else
279 netdev_dbg(
280 dev->net,
281 "Received header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
282 header_start[0], header_start[1], header_start[2],
283 header_start[3], header_start[4], header_start[5],
284 skb->len - KALMIA_HEADER_LENGTH);
285
286 /* subtract start header and end header */
287 usb_packet_length = skb->len - (2 * KALMIA_HEADER_LENGTH);
288 ether_packet_length = header_start[2] + (header_start[3] << 8);
289 skb_pull(skb, KALMIA_HEADER_LENGTH);
290
291 /* Some small packets misses end marker */
292 if (usb_packet_length < ether_packet_length) {
293 ether_packet_length = usb_packet_length
294 + KALMIA_HEADER_LENGTH;
295 is_last = true;
296 }
297 else {
298 netdev_dbg(dev->net, "Correct package length #%i", i
299 + 1);
300
301 is_last = (memcmp(skb->data + ether_packet_length,
302 HEADER_END_OF_USB_PACKET,
303 sizeof(HEADER_END_OF_USB_PACKET)) == 0);
304 if (!is_last) {
305 header_start = skb->data + ether_packet_length;
306 netdev_dbg(
307 dev->net,
308 "End header: %02x:%02x:%02x:%02x:%02x:%02x. Package length: %i\n",
309 header_start[0], header_start[1],
310 header_start[2], header_start[3],
311 header_start[4], header_start[5],
312 skb->len - KALMIA_HEADER_LENGTH);
313 }
314 }
315
316 if (is_last) {
317 skb2 = skb;
318 }
319 else {
320 skb2 = skb_clone(skb, GFP_ATOMIC);
321 if (unlikely(!skb2))
322 return 0;
323 }
324
325 skb_trim(skb2, ether_packet_length);
326
327 if (is_last) {
328 return 1;
329 }
330 else {
331 usbnet_skb_return(dev, skb2);
332 skb_pull(skb, ether_packet_length);
333 }
334
335 i++;
336 }
337 while (skb->len);
338
339 return 1;
340}
341
342static const struct driver_info kalmia_info = {
343 .description = "Samsung Kalmia LTE USB dongle",
344 .flags = FLAG_WWAN,
345 .bind = kalmia_bind,
346 .rx_fixup = kalmia_rx_fixup,
347 .tx_fixup = kalmia_tx_fixup
348};
349
350/*-------------------------------------------------------------------------*/
351
352static const struct usb_device_id products[] = {
353 /* The unswitched USB ID, to get the module auto loaded: */
354 { USB_DEVICE(0x04e8, 0x689a) },
355 /* The stick swithed into modem (by e.g. usb_modeswitch): */
356 { USB_DEVICE(0x04e8, 0x6889),
357 .driver_info = (unsigned long) &kalmia_info, },
358 { /* EMPTY == end of list */} };
359MODULE_DEVICE_TABLE( usb, products);
360
361static struct usb_driver kalmia_driver = {
362 .name = "kalmia",
363 .id_table = products,
364 .probe = usbnet_probe,
365 .disconnect = usbnet_disconnect,
366 .suspend = usbnet_suspend,
367 .resume = usbnet_resume
368};
369
370static int __init kalmia_init(void)
371{
372 return usb_register(&kalmia_driver);
373}
374module_init( kalmia_init);
375
376static void __exit kalmia_exit(void)
377{
378 usb_deregister(&kalmia_driver);
379}
380module_exit( kalmia_exit);
381
382MODULE_AUTHOR("Marius Bjoernstad Kotsbak <marius@kotsbak.com>");
383MODULE_DESCRIPTION("Samsung Kalmia USB network driver");
384MODULE_LICENSE("GPL");
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 1eba06f6fa4c..0f27f4c5bb73 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2204,8 +2204,10 @@ fst_open(struct net_device *dev)
2204 2204
2205 if (port->mode != FST_RAW) { 2205 if (port->mode != FST_RAW) {
2206 err = hdlc_open(dev); 2206 err = hdlc_open(dev);
2207 if (err) 2207 if (err) {
2208 module_put(THIS_MODULE);
2208 return err; 2209 return err;
2210 }
2209 } 2211 }
2210 2212
2211 fst_openport(port); 2213 fst_openport(port);
diff --git a/drivers/net/wireless/iwlegacy/iwl-4965.c b/drivers/net/wireless/iwlegacy/iwl-4965.c
index 3a022bcf615c..9cf96cb51712 100644
--- a/drivers/net/wireless/iwlegacy/iwl-4965.c
+++ b/drivers/net/wireless/iwlegacy/iwl-4965.c
@@ -1216,10 +1216,10 @@ static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *c
1216 * receive commit_rxon request 1216 * receive commit_rxon request
1217 * abort any previous channel switch if still in process 1217 * abort any previous channel switch if still in process
1218 */ 1218 */
1219 if (priv->switch_rxon.switch_in_progress && 1219 if (test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status) &&
1220 (priv->switch_rxon.channel != ctx->staging.channel)) { 1220 (priv->switch_channel != ctx->staging.channel)) {
1221 IWL_DEBUG_11H(priv, "abort channel switch on %d\n", 1221 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1222 le16_to_cpu(priv->switch_rxon.channel)); 1222 le16_to_cpu(priv->switch_channel));
1223 iwl_legacy_chswitch_done(priv, false); 1223 iwl_legacy_chswitch_done(priv, false);
1224 } 1224 }
1225 1225
@@ -1402,9 +1402,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1402 return rc; 1402 return rc;
1403 } 1403 }
1404 1404
1405 priv->switch_rxon.channel = cmd.channel;
1406 priv->switch_rxon.switch_in_progress = true;
1407
1408 return iwl_legacy_send_cmd_pdu(priv, 1405 return iwl_legacy_send_cmd_pdu(priv,
1409 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1406 REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1410} 1407}
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c
index 42df8321dae8..3be76bd5499a 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.c
+++ b/drivers/net/wireless/iwlegacy/iwl-core.c
@@ -859,12 +859,8 @@ void iwl_legacy_chswitch_done(struct iwl_priv *priv, bool is_success)
859 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 859 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
860 return; 860 return;
861 861
862 if (priv->switch_rxon.switch_in_progress) { 862 if (test_and_clear_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
863 ieee80211_chswitch_done(ctx->vif, is_success); 863 ieee80211_chswitch_done(ctx->vif, is_success);
864 mutex_lock(&priv->mutex);
865 priv->switch_rxon.switch_in_progress = false;
866 mutex_unlock(&priv->mutex);
867 }
868} 864}
869EXPORT_SYMBOL(iwl_legacy_chswitch_done); 865EXPORT_SYMBOL(iwl_legacy_chswitch_done);
870 866
@@ -876,19 +872,19 @@ void iwl_legacy_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
876 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 872 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
877 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active; 873 struct iwl_legacy_rxon_cmd *rxon = (void *)&ctx->active;
878 874
879 if (priv->switch_rxon.switch_in_progress) { 875 if (!test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
880 if (!le32_to_cpu(csa->status) && 876 return;
881 (csa->channel == priv->switch_rxon.channel)) { 877
882 rxon->channel = csa->channel; 878 if (!le32_to_cpu(csa->status) && csa->channel == priv->switch_channel) {
883 ctx->staging.channel = csa->channel; 879 rxon->channel = csa->channel;
884 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n", 880 ctx->staging.channel = csa->channel;
885 le16_to_cpu(csa->channel)); 881 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
886 iwl_legacy_chswitch_done(priv, true);
887 } else {
888 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
889 le16_to_cpu(csa->channel)); 882 le16_to_cpu(csa->channel));
890 iwl_legacy_chswitch_done(priv, false); 883 iwl_legacy_chswitch_done(priv, true);
891 } 884 } else {
885 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
886 le16_to_cpu(csa->channel));
887 iwl_legacy_chswitch_done(priv, false);
892 } 888 }
893} 889}
894EXPORT_SYMBOL(iwl_legacy_rx_csa); 890EXPORT_SYMBOL(iwl_legacy_rx_csa);
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.h b/drivers/net/wireless/iwlegacy/iwl-core.h
index bc66c604106c..c5fbda0760de 100644
--- a/drivers/net/wireless/iwlegacy/iwl-core.h
+++ b/drivers/net/wireless/iwlegacy/iwl-core.h
@@ -560,7 +560,7 @@ void iwl_legacy_free_geos(struct iwl_priv *priv);
560#define STATUS_SCAN_HW 15 560#define STATUS_SCAN_HW 15
561#define STATUS_POWER_PMI 16 561#define STATUS_POWER_PMI 16
562#define STATUS_FW_ERROR 17 562#define STATUS_FW_ERROR 17
563 563#define STATUS_CHANNEL_SWITCH_PENDING 18
564 564
565static inline int iwl_legacy_is_ready(struct iwl_priv *priv) 565static inline int iwl_legacy_is_ready(struct iwl_priv *priv)
566{ 566{
diff --git a/drivers/net/wireless/iwlegacy/iwl-dev.h b/drivers/net/wireless/iwlegacy/iwl-dev.h
index 416448acd971..0a8d07fdc0e7 100644
--- a/drivers/net/wireless/iwlegacy/iwl-dev.h
+++ b/drivers/net/wireless/iwlegacy/iwl-dev.h
@@ -856,17 +856,6 @@ struct traffic_stats {
856}; 856};
857 857
858/* 858/*
859 * iwl_switch_rxon: "channel switch" structure
860 *
861 * @ switch_in_progress: channel switch in progress
862 * @ channel: new channel
863 */
864struct iwl_switch_rxon {
865 bool switch_in_progress;
866 __le16 channel;
867};
868
869/*
870 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds 859 * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
871 * to perform continuous uCode event logging operation if enabled 860 * to perform continuous uCode event logging operation if enabled
872 */ 861 */
@@ -1116,7 +1105,7 @@ struct iwl_priv {
1116 1105
1117 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX]; 1106 struct iwl_rxon_context contexts[NUM_IWL_RXON_CTX];
1118 1107
1119 struct iwl_switch_rxon switch_rxon; 1108 __le16 switch_channel;
1120 1109
1121 /* 1st responses from initialize and runtime uCode images. 1110 /* 1st responses from initialize and runtime uCode images.
1122 * _4965's initialize alive response contains some calibration data. */ 1111 * _4965's initialize alive response contains some calibration data. */
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c
index af2ae22fcfd3..7157ba529680 100644
--- a/drivers/net/wireless/iwlegacy/iwl4965-base.c
+++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c
@@ -2861,16 +2861,13 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2861 goto out; 2861 goto out;
2862 2862
2863 if (test_bit(STATUS_EXIT_PENDING, &priv->status) || 2863 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
2864 test_bit(STATUS_SCANNING, &priv->status)) 2864 test_bit(STATUS_SCANNING, &priv->status) ||
2865 test_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status))
2865 goto out; 2866 goto out;
2866 2867
2867 if (!iwl_legacy_is_associated_ctx(ctx)) 2868 if (!iwl_legacy_is_associated_ctx(ctx))
2868 goto out; 2869 goto out;
2869 2870
2870 /* channel switch in progress */
2871 if (priv->switch_rxon.switch_in_progress == true)
2872 goto out;
2873
2874 if (priv->cfg->ops->lib->set_channel_switch) { 2871 if (priv->cfg->ops->lib->set_channel_switch) {
2875 2872
2876 ch = channel->hw_value; 2873 ch = channel->hw_value;
@@ -2919,15 +2916,18 @@ void iwl4965_mac_channel_switch(struct ieee80211_hw *hw,
2919 * at this point, staging_rxon has the 2916 * at this point, staging_rxon has the
2920 * configuration for channel switch 2917 * configuration for channel switch
2921 */ 2918 */
2922 if (priv->cfg->ops->lib->set_channel_switch(priv, 2919 set_bit(STATUS_CHANNEL_SWITCH_PENDING, &priv->status);
2923 ch_switch)) 2920 priv->switch_channel = cpu_to_le16(ch);
2924 priv->switch_rxon.switch_in_progress = false; 2921 if (priv->cfg->ops->lib->set_channel_switch(priv, ch_switch)) {
2922 clear_bit(STATUS_CHANNEL_SWITCH_PENDING,
2923 &priv->status);
2924 priv->switch_channel = 0;
2925 ieee80211_chswitch_done(ctx->vif, false);
2926 }
2925 } 2927 }
2926 } 2928 }
2927out: 2929out:
2928 mutex_unlock(&priv->mutex); 2930 mutex_unlock(&priv->mutex);
2929 if (!priv->switch_rxon.switch_in_progress)
2930 ieee80211_chswitch_done(ctx->vif, false);
2931 IWL_DEBUG_MAC80211(priv, "leave\n"); 2931 IWL_DEBUG_MAC80211(priv, "leave\n");
2932} 2932}
2933 2933
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 660831ce293c..687c1f223497 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1288,6 +1288,8 @@ int mwifiex_register_cfg80211(struct net_device *dev, u8 *mac,
1288 1288
1289 *(unsigned long *) wdev_priv = (unsigned long) priv; 1289 *(unsigned long *) wdev_priv = (unsigned long) priv;
1290 1290
1291 set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev);
1292
1291 ret = wiphy_register(wdev->wiphy); 1293 ret = wiphy_register(wdev->wiphy);
1292 if (ret < 0) { 1294 if (ret < 0) {
1293 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n", 1295 dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n",
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 8ff43c281411..d633edbd9796 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2475,6 +2475,7 @@ struct mwl8k_cmd_set_hw_spec {
2475 * faster client. 2475 * faster client.
2476 */ 2476 */
2477#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400 2477#define MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY 0x00000400
2478#define MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR 0x00000200
2478#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080 2479#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
2479#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020 2480#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
2480#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010 2481#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
@@ -2511,7 +2512,8 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
2511 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT | 2512 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
2512 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP | 2513 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
2513 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON | 2514 MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON |
2514 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY); 2515 MWL8K_SET_HW_SPEC_FLAG_ENABLE_LIFE_TIME_EXPIRY |
2516 MWL8K_SET_HW_SPEC_FLAG_GENERATE_CCMP_HDR);
2515 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 2517 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
2516 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS); 2518 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
2517 2519
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index fc44005b0d53..e502db0532e5 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -670,6 +670,19 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
670 &rx_status, 670 &rx_status,
671 (u8 *) pdesc, skb); 671 (u8 *) pdesc, skb);
672 672
673 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
674 if (unlikely(!new_skb)) {
675 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
676 DBG_DMESG,
677 ("can't alloc skb for rx\n"));
678 goto done;
679 }
680
681 pci_unmap_single(rtlpci->pdev,
682 *((dma_addr_t *) skb->cb),
683 rtlpci->rxbuffersize,
684 PCI_DMA_FROMDEVICE);
685
673 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc, 686 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
674 false, 687 false,
675 HW_DESC_RXPKT_LEN)); 688 HW_DESC_RXPKT_LEN));
@@ -686,21 +699,6 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
686 hdr = rtl_get_hdr(skb); 699 hdr = rtl_get_hdr(skb);
687 fc = rtl_get_fc(skb); 700 fc = rtl_get_fc(skb);
688 701
689 /* try for new buffer - if allocation fails, drop
690 * frame and reuse old buffer
691 */
692 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
693 if (unlikely(!new_skb)) {
694 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
695 DBG_DMESG,
696 ("can't alloc skb for rx\n"));
697 goto done;
698 }
699 pci_unmap_single(rtlpci->pdev,
700 *((dma_addr_t *) skb->cb),
701 rtlpci->rxbuffersize,
702 PCI_DMA_FROMDEVICE);
703
704 if (!stats.crc && !stats.hwerror) { 702 if (!stats.crc && !stats.hwerror) {
705 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, 703 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
706 sizeof(rx_status)); 704 sizeof(rx_status));
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index a3984f4ef192..f34b5b29fb95 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -141,6 +141,13 @@ static struct notifier_block module_load_nb = {
141 .notifier_call = module_load_notify, 141 .notifier_call = module_load_notify,
142}; 142};
143 143
144static void free_all_tasks(void)
145{
146 /* make sure we don't leak task structs */
147 process_task_mortuary();
148 process_task_mortuary();
149}
150
144int sync_start(void) 151int sync_start(void)
145{ 152{
146 int err; 153 int err;
@@ -148,8 +155,6 @@ int sync_start(void)
148 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL)) 155 if (!zalloc_cpumask_var(&marked_cpus, GFP_KERNEL))
149 return -ENOMEM; 156 return -ENOMEM;
150 157
151 mutex_lock(&buffer_mutex);
152
153 err = task_handoff_register(&task_free_nb); 158 err = task_handoff_register(&task_free_nb);
154 if (err) 159 if (err)
155 goto out1; 160 goto out1;
@@ -166,7 +171,6 @@ int sync_start(void)
166 start_cpu_work(); 171 start_cpu_work();
167 172
168out: 173out:
169 mutex_unlock(&buffer_mutex);
170 return err; 174 return err;
171out4: 175out4:
172 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 176 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
@@ -174,6 +178,7 @@ out3:
174 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 178 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
175out2: 179out2:
176 task_handoff_unregister(&task_free_nb); 180 task_handoff_unregister(&task_free_nb);
181 free_all_tasks();
177out1: 182out1:
178 free_cpumask_var(marked_cpus); 183 free_cpumask_var(marked_cpus);
179 goto out; 184 goto out;
@@ -182,20 +187,16 @@ out1:
182 187
183void sync_stop(void) 188void sync_stop(void)
184{ 189{
185 /* flush buffers */
186 mutex_lock(&buffer_mutex);
187 end_cpu_work(); 190 end_cpu_work();
188 unregister_module_notifier(&module_load_nb); 191 unregister_module_notifier(&module_load_nb);
189 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb); 192 profile_event_unregister(PROFILE_MUNMAP, &munmap_nb);
190 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb); 193 profile_event_unregister(PROFILE_TASK_EXIT, &task_exit_nb);
191 task_handoff_unregister(&task_free_nb); 194 task_handoff_unregister(&task_free_nb);
192 mutex_unlock(&buffer_mutex); 195 barrier(); /* do all of the above first */
193 flush_cpu_work();
194 196
195 /* make sure we don't leak task structs */ 197 flush_cpu_work();
196 process_task_mortuary();
197 process_task_mortuary();
198 198
199 free_all_tasks();
199 free_cpumask_var(marked_cpus); 200 free_cpumask_var(marked_cpus);
200} 201}
201 202
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index c85f744270a5..094308e41be5 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -51,6 +51,7 @@ obj-$(CONFIG_X86_VISWS) += setup-irq.o
51obj-$(CONFIG_MN10300) += setup-bus.o 51obj-$(CONFIG_MN10300) += setup-bus.o
52obj-$(CONFIG_MICROBLAZE) += setup-bus.o 52obj-$(CONFIG_MICROBLAZE) += setup-bus.o
53obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o 53obj-$(CONFIG_TILE) += setup-bus.o setup-irq.o
54obj-$(CONFIG_SPARC_LEON) += setup-bus.o setup-irq.o
54 55
55# 56#
56# ACPI Related PCI FW Functions 57# ACPI Related PCI FW Functions
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
index 59f17acf7f68..f02c34d26d1b 100644
--- a/drivers/pci/intel-iommu.c
+++ b/drivers/pci/intel-iommu.c
@@ -3388,7 +3388,7 @@ static void __init init_iommu_pm_ops(void)
3388} 3388}
3389 3389
3390#else 3390#else
3391static inline int init_iommu_pm_ops(void) { } 3391static inline void init_iommu_pm_ops(void) {}
3392#endif /* CONFIG_PM */ 3392#endif /* CONFIG_PM */
3393 3393
3394/* 3394/*
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 56098b3e17c0..5f10c23dff94 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,11 +3271,11 @@ void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3271} 3271}
3272 3272
3273static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode, 3273static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3274 unsigned int command_bits, bool change_bridge) 3274 unsigned int command_bits, u32 flags)
3275{ 3275{
3276 if (arch_set_vga_state) 3276 if (arch_set_vga_state)
3277 return arch_set_vga_state(dev, decode, command_bits, 3277 return arch_set_vga_state(dev, decode, command_bits,
3278 change_bridge); 3278 flags);
3279 return 0; 3279 return 0;
3280} 3280}
3281 3281
diff --git a/drivers/pcmcia/pxa2xx_vpac270.c b/drivers/pcmcia/pxa2xx_vpac270.c
index 435002dfc3ca..712baab3c83d 100644
--- a/drivers/pcmcia/pxa2xx_vpac270.c
+++ b/drivers/pcmcia/pxa2xx_vpac270.c
@@ -11,6 +11,7 @@
11 * 11 *
12 */ 12 */
13 13
14#include <linux/gpio.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/platform_device.h> 16#include <linux/platform_device.h>
16 17
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index a8d03aeb4051..e7f301da2902 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -46,7 +46,8 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
46 caps.n_ext_ts = ptp->info->n_ext_ts; 46 caps.n_ext_ts = ptp->info->n_ext_ts;
47 caps.n_per_out = ptp->info->n_per_out; 47 caps.n_per_out = ptp->info->n_per_out;
48 caps.pps = ptp->info->pps; 48 caps.pps = ptp->info->pps;
49 err = copy_to_user((void __user *)arg, &caps, sizeof(caps)); 49 if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
50 err = -EFAULT;
50 break; 51 break;
51 52
52 case PTP_EXTTS_REQUEST: 53 case PTP_EXTTS_REQUEST:
@@ -129,8 +130,10 @@ ssize_t ptp_read(struct posix_clock *pc,
129 return -ERESTARTSYS; 130 return -ERESTARTSYS;
130 } 131 }
131 132
132 if (ptp->defunct) 133 if (ptp->defunct) {
134 mutex_unlock(&ptp->tsevq_mux);
133 return -ENODEV; 135 return -ENODEV;
136 }
134 137
135 spin_lock_irqsave(&queue->lock, flags); 138 spin_lock_irqsave(&queue->lock, flags);
136 139
@@ -150,10 +153,8 @@ ssize_t ptp_read(struct posix_clock *pc,
150 153
151 mutex_unlock(&ptp->tsevq_mux); 154 mutex_unlock(&ptp->tsevq_mux);
152 155
153 if (copy_to_user(buf, event, cnt)) { 156 if (copy_to_user(buf, event, cnt))
154 mutex_unlock(&ptp->tsevq_mux);
155 return -EFAULT; 157 return -EFAULT;
156 }
157 158
158 return cnt; 159 return cnt;
159} 160}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index f822e13dc04b..ce2aabf5c550 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -1051,4 +1051,13 @@ config RTC_DRV_TILE
1051 Enable support for the Linux driver side of the Tilera 1051 Enable support for the Linux driver side of the Tilera
1052 hypervisor's real-time clock interface. 1052 hypervisor's real-time clock interface.
1053 1053
1054config RTC_DRV_PUV3
1055 tristate "PKUnity v3 RTC support"
1056 depends on ARCH_PUV3
1057 help
1058 This enables support for the RTC in the PKUnity-v3 SoCs.
1059
1060 This drive can also be built as a module. If so, the module
1061 will be called rtc-puv3.
1062
1054endif # RTC_CLASS 1063endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 213d725f16d4..0ffefe877bfa 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -78,6 +78,7 @@ obj-$(CONFIG_RTC_DRV_PCF50633) += rtc-pcf50633.o
78obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o 78obj-$(CONFIG_RTC_DRV_PL030) += rtc-pl030.o
79obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o 79obj-$(CONFIG_RTC_DRV_PL031) += rtc-pl031.o
80obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o 80obj-$(CONFIG_RTC_DRV_PS3) += rtc-ps3.o
81obj-$(CONFIG_RTC_DRV_PUV3) += rtc-puv3.o
81obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o 82obj-$(CONFIG_RTC_DRV_PXA) += rtc-pxa.o
82obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o 83obj-$(CONFIG_RTC_DRV_R9701) += rtc-r9701.o
83obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o 84obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index ef6316acec43..df68618f6dbb 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -318,7 +318,7 @@ int rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
318} 318}
319EXPORT_SYMBOL_GPL(rtc_read_alarm); 319EXPORT_SYMBOL_GPL(rtc_read_alarm);
320 320
321int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) 321static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
322{ 322{
323 struct rtc_time tm; 323 struct rtc_time tm;
324 long now, scheduled; 324 long now, scheduled;
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index d0e06edb14c5..cace6d3aed9a 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -421,7 +421,8 @@ static long rtc_dev_ioctl(struct file *file,
421 err = ops->ioctl(rtc->dev.parent, cmd, arg); 421 err = ops->ioctl(rtc->dev.parent, cmd, arg);
422 if (err == -ENOIOCTLCMD) 422 if (err == -ENOIOCTLCMD)
423 err = -ENOTTY; 423 err = -ENOTTY;
424 } 424 } else
425 err = -ENOTTY;
425 break; 426 break;
426 } 427 }
427 428
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 1a84b3e227d1..7317d3b9a3d5 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -189,7 +189,7 @@ static int __devinit m41t93_probe(struct spi_device *spi)
189 189
190static int __devexit m41t93_remove(struct spi_device *spi) 190static int __devexit m41t93_remove(struct spi_device *spi)
191{ 191{
192 struct rtc_device *rtc = platform_get_drvdata(spi); 192 struct rtc_device *rtc = spi_get_drvdata(spi);
193 193
194 if (rtc) 194 if (rtc)
195 rtc_device_unregister(rtc); 195 rtc_device_unregister(rtc);
diff --git a/arch/unicore32/kernel/rtc.c b/drivers/rtc/rtc-puv3.c
index 8cad70b3302c..46f14b82f3ab 100644
--- a/arch/unicore32/kernel/rtc.c
+++ b/drivers/rtc/rtc-puv3.c
@@ -1,7 +1,5 @@
1/* 1/*
2 * linux/arch/unicore32/kernel/rtc.c 2 * RTC driver code specific to PKUnity SoC and UniCore ISA
3 *
4 * Code specific to PKUnity SoC and UniCore ISA
5 * 3 *
6 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> 4 * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn>
7 * Copyright (C) 2001-2010 Guan Xuetao 5 * Copyright (C) 2001-2010 Guan Xuetao
@@ -36,7 +34,6 @@ static int puv3_rtc_tickno = IRQ_RTC;
36static DEFINE_SPINLOCK(puv3_rtc_pie_lock); 34static DEFINE_SPINLOCK(puv3_rtc_pie_lock);
37 35
38/* IRQ Handlers */ 36/* IRQ Handlers */
39
40static irqreturn_t puv3_rtc_alarmirq(int irq, void *id) 37static irqreturn_t puv3_rtc_alarmirq(int irq, void *id)
41{ 38{
42 struct rtc_device *rdev = id; 39 struct rtc_device *rdev = id;
@@ -89,7 +86,6 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
89} 86}
90 87
91/* Time read/write */ 88/* Time read/write */
92
93static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) 89static int puv3_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm)
94{ 90{
95 rtc_time_to_tm(readl(RTC_RCNR), rtc_tm); 91 rtc_time_to_tm(readl(RTC_RCNR), rtc_tm);
@@ -196,7 +192,6 @@ static void puv3_rtc_release(struct device *dev)
196 struct rtc_device *rtc_dev = platform_get_drvdata(pdev); 192 struct rtc_device *rtc_dev = platform_get_drvdata(pdev);
197 193
198 /* do not clear AIE here, it may be needed for wake */ 194 /* do not clear AIE here, it may be needed for wake */
199
200 puv3_rtc_setpie(dev, 0); 195 puv3_rtc_setpie(dev, 0);
201 free_irq(puv3_rtc_alarmno, rtc_dev); 196 free_irq(puv3_rtc_alarmno, rtc_dev);
202 free_irq(puv3_rtc_tickno, rtc_dev); 197 free_irq(puv3_rtc_tickno, rtc_dev);
@@ -218,7 +213,6 @@ static void puv3_rtc_enable(struct platform_device *pdev, int en)
218 writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR); 213 writel(readl(RTC_RTSR) & ~RTC_RTSR_HZE, RTC_RTSR);
219 } else { 214 } else {
220 /* re-enable the device, and check it is ok */ 215 /* re-enable the device, and check it is ok */
221
222 if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) { 216 if ((readl(RTC_RTSR) & RTC_RTSR_HZE) == 0) {
223 dev_info(&pdev->dev, "rtc disabled, re-enabling\n"); 217 dev_info(&pdev->dev, "rtc disabled, re-enabling\n");
224 writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR); 218 writel(readl(RTC_RTSR) | RTC_RTSR_HZE, RTC_RTSR);
@@ -251,7 +245,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
251 pr_debug("%s: probe=%p\n", __func__, pdev); 245 pr_debug("%s: probe=%p\n", __func__, pdev);
252 246
253 /* find the IRQs */ 247 /* find the IRQs */
254
255 puv3_rtc_tickno = platform_get_irq(pdev, 1); 248 puv3_rtc_tickno = platform_get_irq(pdev, 1);
256 if (puv3_rtc_tickno < 0) { 249 if (puv3_rtc_tickno < 0) {
257 dev_err(&pdev->dev, "no irq for rtc tick\n"); 250 dev_err(&pdev->dev, "no irq for rtc tick\n");
@@ -268,7 +261,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
268 puv3_rtc_tickno, puv3_rtc_alarmno); 261 puv3_rtc_tickno, puv3_rtc_alarmno);
269 262
270 /* get the memory region */ 263 /* get the memory region */
271
272 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 264 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
273 if (res == NULL) { 265 if (res == NULL) {
274 dev_err(&pdev->dev, "failed to get memory region resource\n"); 266 dev_err(&pdev->dev, "failed to get memory region resource\n");
@@ -288,7 +280,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
288 puv3_rtc_enable(pdev, 1); 280 puv3_rtc_enable(pdev, 1);
289 281
290 /* register RTC and exit */ 282 /* register RTC and exit */
291
292 rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops, 283 rtc = rtc_device_register("pkunity", &pdev->dev, &puv3_rtcops,
293 THIS_MODULE); 284 THIS_MODULE);
294 285
@@ -315,8 +306,6 @@ static int puv3_rtc_probe(struct platform_device *pdev)
315 306
316#ifdef CONFIG_PM 307#ifdef CONFIG_PM
317 308
318/* RTC Power management control */
319
320static int ticnt_save; 309static int ticnt_save;
321 310
322static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state) 311static int puv3_rtc_suspend(struct platform_device *pdev, pm_message_t state)
@@ -368,4 +357,3 @@ module_exit(puv3_rtc_exit);
368MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip"); 357MODULE_DESCRIPTION("RTC Driver for the PKUnity v3 chip");
369MODULE_AUTHOR("Hu Dongliang"); 358MODULE_AUTHOR("Hu Dongliang");
370MODULE_LICENSE("GPL v2"); 359MODULE_LICENSE("GPL v2");
371
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 55e8f721e38a..570d4da10696 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -416,7 +416,7 @@ static void process_buffer_error(struct qdio_q *q, int count)
416 416
417 /* special handling for no target buffer empty */ 417 /* special handling for no target buffer empty */
418 if ((!q->is_input_q && 418 if ((!q->is_input_q &&
419 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 419 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
420 qperf_inc(q, target_full); 420 qperf_inc(q, target_full);
421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
422 q->first_to_check); 422 q->first_to_check);
@@ -427,8 +427,8 @@ static void process_buffer_error(struct qdio_q *q, int count)
427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
429 DBF_ERROR("F14:%2x F15:%2x", 429 DBF_ERROR("F14:%2x F15:%2x",
430 q->sbal[q->first_to_check]->element[14].flags & 0xff, 430 q->sbal[q->first_to_check]->element[14].sflags,
431 q->sbal[q->first_to_check]->element[15].flags & 0xff); 431 q->sbal[q->first_to_check]->element[15].sflags);
432 432
433 /* 433 /*
434 * Interrupts may be avoided as long as the error is present 434 * Interrupts may be avoided as long as the error is present
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 55c6aa1c9704..d3cee33e554c 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -361,7 +361,7 @@ enum qeth_header_ids {
361 361
362static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale) 362static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
363{ 363{
364 return (sbale->flags & SBAL_FLAGS_LAST_ENTRY); 364 return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
365} 365}
366 366
367enum qeth_qdio_buffer_states { 367enum qeth_qdio_buffer_states {
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 503678a30981..dd08f7b42fb8 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -890,7 +890,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
890 struct sk_buff *skb; 890 struct sk_buff *skb;
891 891
892 /* is PCI flag set on buffer? */ 892 /* is PCI flag set on buffer? */
893 if (buf->buffer->element[0].flags & 0x40) 893 if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
894 atomic_dec(&queue->set_pci_flags_count); 894 atomic_dec(&queue->set_pci_flags_count);
895 895
896 skb = skb_dequeue(&buf->skb_list); 896 skb = skb_dequeue(&buf->skb_list);
@@ -906,9 +906,11 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
906 buf->is_header[i] = 0; 906 buf->is_header[i] = 0;
907 buf->buffer->element[i].length = 0; 907 buf->buffer->element[i].length = 0;
908 buf->buffer->element[i].addr = NULL; 908 buf->buffer->element[i].addr = NULL;
909 buf->buffer->element[i].flags = 0; 909 buf->buffer->element[i].eflags = 0;
910 buf->buffer->element[i].sflags = 0;
910 } 911 }
911 buf->buffer->element[15].flags = 0; 912 buf->buffer->element[15].eflags = 0;
913 buf->buffer->element[15].sflags = 0;
912 buf->next_element_to_fill = 0; 914 buf->next_element_to_fill = 0;
913 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY); 915 atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
914} 916}
@@ -2368,9 +2370,10 @@ static int qeth_init_input_buffer(struct qeth_card *card,
2368 buf->buffer->element[i].length = PAGE_SIZE; 2370 buf->buffer->element[i].length = PAGE_SIZE;
2369 buf->buffer->element[i].addr = pool_entry->elements[i]; 2371 buf->buffer->element[i].addr = pool_entry->elements[i];
2370 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1) 2372 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2371 buf->buffer->element[i].flags = SBAL_FLAGS_LAST_ENTRY; 2373 buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2372 else 2374 else
2373 buf->buffer->element[i].flags = 0; 2375 buf->buffer->element[i].eflags = 0;
2376 buf->buffer->element[i].sflags = 0;
2374 } 2377 }
2375 return 0; 2378 return 0;
2376} 2379}
@@ -2718,11 +2721,11 @@ int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
2718 if (qdio_error) { 2721 if (qdio_error) {
2719 QETH_CARD_TEXT(card, 2, dbftext); 2722 QETH_CARD_TEXT(card, 2, dbftext);
2720 QETH_CARD_TEXT_(card, 2, " F15=%02X", 2723 QETH_CARD_TEXT_(card, 2, " F15=%02X",
2721 buf->element[15].flags & 0xff); 2724 buf->element[15].sflags);
2722 QETH_CARD_TEXT_(card, 2, " F14=%02X", 2725 QETH_CARD_TEXT_(card, 2, " F14=%02X",
2723 buf->element[14].flags & 0xff); 2726 buf->element[14].sflags);
2724 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error); 2727 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
2725 if ((buf->element[15].flags & 0xff) == 0x12) { 2728 if ((buf->element[15].sflags) == 0x12) {
2726 card->stats.rx_dropped++; 2729 card->stats.rx_dropped++;
2727 return 0; 2730 return 0;
2728 } else 2731 } else
@@ -2798,7 +2801,7 @@ EXPORT_SYMBOL_GPL(qeth_queue_input_buffer);
2798static int qeth_handle_send_error(struct qeth_card *card, 2801static int qeth_handle_send_error(struct qeth_card *card,
2799 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err) 2802 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
2800{ 2803{
2801 int sbalf15 = buffer->buffer->element[15].flags & 0xff; 2804 int sbalf15 = buffer->buffer->element[15].sflags;
2802 2805
2803 QETH_CARD_TEXT(card, 6, "hdsnderr"); 2806 QETH_CARD_TEXT(card, 6, "hdsnderr");
2804 if (card->info.type == QETH_CARD_TYPE_IQD) { 2807 if (card->info.type == QETH_CARD_TYPE_IQD) {
@@ -2907,8 +2910,8 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2907 2910
2908 for (i = index; i < index + count; ++i) { 2911 for (i = index; i < index + count; ++i) {
2909 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; 2912 buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
2910 buf->buffer->element[buf->next_element_to_fill - 1].flags |= 2913 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
2911 SBAL_FLAGS_LAST_ENTRY; 2914 SBAL_EFLAGS_LAST_ENTRY;
2912 2915
2913 if (queue->card->info.type == QETH_CARD_TYPE_IQD) 2916 if (queue->card->info.type == QETH_CARD_TYPE_IQD)
2914 continue; 2917 continue;
@@ -2921,7 +2924,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2921 /* it's likely that we'll go to packing 2924 /* it's likely that we'll go to packing
2922 * mode soon */ 2925 * mode soon */
2923 atomic_inc(&queue->set_pci_flags_count); 2926 atomic_inc(&queue->set_pci_flags_count);
2924 buf->buffer->element[0].flags |= 0x40; 2927 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2925 } 2928 }
2926 } else { 2929 } else {
2927 if (!atomic_read(&queue->set_pci_flags_count)) { 2930 if (!atomic_read(&queue->set_pci_flags_count)) {
@@ -2934,7 +2937,7 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
2934 * further send was requested by the stack 2937 * further send was requested by the stack
2935 */ 2938 */
2936 atomic_inc(&queue->set_pci_flags_count); 2939 atomic_inc(&queue->set_pci_flags_count);
2937 buf->buffer->element[0].flags |= 0x40; 2940 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
2938 } 2941 }
2939 } 2942 }
2940 } 2943 }
@@ -3180,20 +3183,20 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3180 if (!length) { 3183 if (!length) {
3181 if (first_lap) 3184 if (first_lap)
3182 if (skb_shinfo(skb)->nr_frags) 3185 if (skb_shinfo(skb)->nr_frags)
3183 buffer->element[element].flags = 3186 buffer->element[element].eflags =
3184 SBAL_FLAGS_FIRST_FRAG; 3187 SBAL_EFLAGS_FIRST_FRAG;
3185 else 3188 else
3186 buffer->element[element].flags = 0; 3189 buffer->element[element].eflags = 0;
3187 else 3190 else
3188 buffer->element[element].flags = 3191 buffer->element[element].eflags =
3189 SBAL_FLAGS_MIDDLE_FRAG; 3192 SBAL_EFLAGS_MIDDLE_FRAG;
3190 } else { 3193 } else {
3191 if (first_lap) 3194 if (first_lap)
3192 buffer->element[element].flags = 3195 buffer->element[element].eflags =
3193 SBAL_FLAGS_FIRST_FRAG; 3196 SBAL_EFLAGS_FIRST_FRAG;
3194 else 3197 else
3195 buffer->element[element].flags = 3198 buffer->element[element].eflags =
3196 SBAL_FLAGS_MIDDLE_FRAG; 3199 SBAL_EFLAGS_MIDDLE_FRAG;
3197 } 3200 }
3198 data += length_here; 3201 data += length_here;
3199 element++; 3202 element++;
@@ -3205,12 +3208,12 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb,
3205 buffer->element[element].addr = (char *)page_to_phys(frag->page) 3208 buffer->element[element].addr = (char *)page_to_phys(frag->page)
3206 + frag->page_offset; 3209 + frag->page_offset;
3207 buffer->element[element].length = frag->size; 3210 buffer->element[element].length = frag->size;
3208 buffer->element[element].flags = SBAL_FLAGS_MIDDLE_FRAG; 3211 buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG;
3209 element++; 3212 element++;
3210 } 3213 }
3211 3214
3212 if (buffer->element[element - 1].flags) 3215 if (buffer->element[element - 1].eflags)
3213 buffer->element[element - 1].flags = SBAL_FLAGS_LAST_FRAG; 3216 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3214 *next_element_to_fill = element; 3217 *next_element_to_fill = element;
3215} 3218}
3216 3219
@@ -3234,7 +3237,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3234 /*fill first buffer entry only with header information */ 3237 /*fill first buffer entry only with header information */
3235 buffer->element[element].addr = skb->data; 3238 buffer->element[element].addr = skb->data;
3236 buffer->element[element].length = hdr_len; 3239 buffer->element[element].length = hdr_len;
3237 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3240 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3238 buf->next_element_to_fill++; 3241 buf->next_element_to_fill++;
3239 skb->data += hdr_len; 3242 skb->data += hdr_len;
3240 skb->len -= hdr_len; 3243 skb->len -= hdr_len;
@@ -3246,7 +3249,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
3246 buffer->element[element].addr = hdr; 3249 buffer->element[element].addr = hdr;
3247 buffer->element[element].length = sizeof(struct qeth_hdr) + 3250 buffer->element[element].length = sizeof(struct qeth_hdr) +
3248 hd_len; 3251 hd_len;
3249 buffer->element[element].flags = SBAL_FLAGS_FIRST_FRAG; 3252 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3250 buf->is_header[element] = 1; 3253 buf->is_header[element] = 1;
3251 buf->next_element_to_fill++; 3254 buf->next_element_to_fill++;
3252 } 3255 }
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 8512b5c0ef82..022fb6a8cb83 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -640,7 +640,7 @@ static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
640} 640}
641 641
642static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio, 642static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
643 u32 fsf_cmd, u32 sbtype, 643 u32 fsf_cmd, u8 sbtype,
644 mempool_t *pool) 644 mempool_t *pool)
645{ 645{
646 struct zfcp_adapter *adapter = qdio->adapter; 646 struct zfcp_adapter *adapter = qdio->adapter;
@@ -841,7 +841,7 @@ struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *scmnd)
841 if (zfcp_qdio_sbal_get(qdio)) 841 if (zfcp_qdio_sbal_get(qdio))
842 goto out; 842 goto out;
843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, 843 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
844 SBAL_FLAGS0_TYPE_READ, 844 SBAL_SFLAGS0_TYPE_READ,
845 qdio->adapter->pool.scsi_abort); 845 qdio->adapter->pool.scsi_abort);
846 if (IS_ERR(req)) { 846 if (IS_ERR(req)) {
847 req = NULL; 847 req = NULL;
@@ -1012,7 +1012,7 @@ int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *wka_port,
1012 goto out; 1012 goto out;
1013 1013
1014 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, 1014 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC,
1015 SBAL_FLAGS0_TYPE_WRITE_READ, pool); 1015 SBAL_SFLAGS0_TYPE_WRITE_READ, pool);
1016 1016
1017 if (IS_ERR(req)) { 1017 if (IS_ERR(req)) {
1018 ret = PTR_ERR(req); 1018 ret = PTR_ERR(req);
@@ -1110,7 +1110,7 @@ int zfcp_fsf_send_els(struct zfcp_adapter *adapter, u32 d_id,
1110 goto out; 1110 goto out;
1111 1111
1112 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, 1112 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS,
1113 SBAL_FLAGS0_TYPE_WRITE_READ, NULL); 1113 SBAL_SFLAGS0_TYPE_WRITE_READ, NULL);
1114 1114
1115 if (IS_ERR(req)) { 1115 if (IS_ERR(req)) {
1116 ret = PTR_ERR(req); 1116 ret = PTR_ERR(req);
@@ -1156,7 +1156,7 @@ int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *erp_action)
1156 goto out; 1156 goto out;
1157 1157
1158 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1158 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1159 SBAL_FLAGS0_TYPE_READ, 1159 SBAL_SFLAGS0_TYPE_READ,
1160 qdio->adapter->pool.erp_req); 1160 qdio->adapter->pool.erp_req);
1161 1161
1162 if (IS_ERR(req)) { 1162 if (IS_ERR(req)) {
@@ -1198,7 +1198,7 @@ int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
1198 goto out_unlock; 1198 goto out_unlock;
1199 1199
1200 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, 1200 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
1201 SBAL_FLAGS0_TYPE_READ, NULL); 1201 SBAL_SFLAGS0_TYPE_READ, NULL);
1202 1202
1203 if (IS_ERR(req)) { 1203 if (IS_ERR(req)) {
1204 retval = PTR_ERR(req); 1204 retval = PTR_ERR(req);
@@ -1250,7 +1250,7 @@ int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
1250 goto out; 1250 goto out;
1251 1251
1252 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1252 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1253 SBAL_FLAGS0_TYPE_READ, 1253 SBAL_SFLAGS0_TYPE_READ,
1254 qdio->adapter->pool.erp_req); 1254 qdio->adapter->pool.erp_req);
1255 1255
1256 if (IS_ERR(req)) { 1256 if (IS_ERR(req)) {
@@ -1296,7 +1296,7 @@ int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
1296 goto out_unlock; 1296 goto out_unlock;
1297 1297
1298 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, 1298 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
1299 SBAL_FLAGS0_TYPE_READ, NULL); 1299 SBAL_SFLAGS0_TYPE_READ, NULL);
1300 1300
1301 if (IS_ERR(req)) { 1301 if (IS_ERR(req)) {
1302 retval = PTR_ERR(req); 1302 retval = PTR_ERR(req);
@@ -1412,7 +1412,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
1412 goto out; 1412 goto out;
1413 1413
1414 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1414 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1415 SBAL_FLAGS0_TYPE_READ, 1415 SBAL_SFLAGS0_TYPE_READ,
1416 qdio->adapter->pool.erp_req); 1416 qdio->adapter->pool.erp_req);
1417 1417
1418 if (IS_ERR(req)) { 1418 if (IS_ERR(req)) {
@@ -1478,7 +1478,7 @@ int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
1478 goto out; 1478 goto out;
1479 1479
1480 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1480 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1481 SBAL_FLAGS0_TYPE_READ, 1481 SBAL_SFLAGS0_TYPE_READ,
1482 qdio->adapter->pool.erp_req); 1482 qdio->adapter->pool.erp_req);
1483 1483
1484 if (IS_ERR(req)) { 1484 if (IS_ERR(req)) {
@@ -1553,7 +1553,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
1553 goto out; 1553 goto out;
1554 1554
1555 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, 1555 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
1556 SBAL_FLAGS0_TYPE_READ, 1556 SBAL_SFLAGS0_TYPE_READ,
1557 qdio->adapter->pool.erp_req); 1557 qdio->adapter->pool.erp_req);
1558 1558
1559 if (IS_ERR(req)) { 1559 if (IS_ERR(req)) {
@@ -1606,7 +1606,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
1606 goto out; 1606 goto out;
1607 1607
1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, 1608 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
1609 SBAL_FLAGS0_TYPE_READ, 1609 SBAL_SFLAGS0_TYPE_READ,
1610 qdio->adapter->pool.erp_req); 1610 qdio->adapter->pool.erp_req);
1611 1611
1612 if (IS_ERR(req)) { 1612 if (IS_ERR(req)) {
@@ -1698,7 +1698,7 @@ int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
1698 goto out; 1698 goto out;
1699 1699
1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, 1700 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
1701 SBAL_FLAGS0_TYPE_READ, 1701 SBAL_SFLAGS0_TYPE_READ,
1702 qdio->adapter->pool.erp_req); 1702 qdio->adapter->pool.erp_req);
1703 1703
1704 if (IS_ERR(req)) { 1704 if (IS_ERR(req)) {
@@ -1812,7 +1812,7 @@ int zfcp_fsf_open_lun(struct zfcp_erp_action *erp_action)
1812 goto out; 1812 goto out;
1813 1813
1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, 1814 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
1815 SBAL_FLAGS0_TYPE_READ, 1815 SBAL_SFLAGS0_TYPE_READ,
1816 adapter->pool.erp_req); 1816 adapter->pool.erp_req);
1817 1817
1818 if (IS_ERR(req)) { 1818 if (IS_ERR(req)) {
@@ -1901,7 +1901,7 @@ int zfcp_fsf_close_lun(struct zfcp_erp_action *erp_action)
1901 goto out; 1901 goto out;
1902 1902
1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, 1903 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
1904 SBAL_FLAGS0_TYPE_READ, 1904 SBAL_SFLAGS0_TYPE_READ,
1905 qdio->adapter->pool.erp_req); 1905 qdio->adapter->pool.erp_req);
1906 1906
1907 if (IS_ERR(req)) { 1907 if (IS_ERR(req)) {
@@ -2161,7 +2161,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2161{ 2161{
2162 struct zfcp_fsf_req *req; 2162 struct zfcp_fsf_req *req;
2163 struct fcp_cmnd *fcp_cmnd; 2163 struct fcp_cmnd *fcp_cmnd;
2164 unsigned int sbtype = SBAL_FLAGS0_TYPE_READ; 2164 u8 sbtype = SBAL_SFLAGS0_TYPE_READ;
2165 int real_bytes, retval = -EIO, dix_bytes = 0; 2165 int real_bytes, retval = -EIO, dix_bytes = 0;
2166 struct scsi_device *sdev = scsi_cmnd->device; 2166 struct scsi_device *sdev = scsi_cmnd->device;
2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev); 2167 struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
@@ -2181,7 +2181,7 @@ int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *scsi_cmnd)
2181 } 2181 }
2182 2182
2183 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE) 2183 if (scsi_cmnd->sc_data_direction == DMA_TO_DEVICE)
2184 sbtype = SBAL_FLAGS0_TYPE_WRITE; 2184 sbtype = SBAL_SFLAGS0_TYPE_WRITE;
2185 2185
2186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2186 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2187 sbtype, adapter->pool.scsi_req); 2187 sbtype, adapter->pool.scsi_req);
@@ -2280,7 +2280,7 @@ struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *scmnd,
2280 goto out; 2280 goto out;
2281 2281
2282 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, 2282 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
2283 SBAL_FLAGS0_TYPE_WRITE, 2283 SBAL_SFLAGS0_TYPE_WRITE,
2284 qdio->adapter->pool.scsi_req); 2284 qdio->adapter->pool.scsi_req);
2285 2285
2286 if (IS_ERR(req)) { 2286 if (IS_ERR(req)) {
@@ -2328,17 +2328,18 @@ struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *adapter,
2328 struct zfcp_qdio *qdio = adapter->qdio; 2328 struct zfcp_qdio *qdio = adapter->qdio;
2329 struct zfcp_fsf_req *req = NULL; 2329 struct zfcp_fsf_req *req = NULL;
2330 struct fsf_qtcb_bottom_support *bottom; 2330 struct fsf_qtcb_bottom_support *bottom;
2331 int direction, retval = -EIO, bytes; 2331 int retval = -EIO, bytes;
2332 u8 direction;
2332 2333
2333 if (!(adapter->adapter_features & FSF_FEATURE_CFDC)) 2334 if (!(adapter->adapter_features & FSF_FEATURE_CFDC))
2334 return ERR_PTR(-EOPNOTSUPP); 2335 return ERR_PTR(-EOPNOTSUPP);
2335 2336
2336 switch (fsf_cfdc->command) { 2337 switch (fsf_cfdc->command) {
2337 case FSF_QTCB_DOWNLOAD_CONTROL_FILE: 2338 case FSF_QTCB_DOWNLOAD_CONTROL_FILE:
2338 direction = SBAL_FLAGS0_TYPE_WRITE; 2339 direction = SBAL_SFLAGS0_TYPE_WRITE;
2339 break; 2340 break;
2340 case FSF_QTCB_UPLOAD_CONTROL_FILE: 2341 case FSF_QTCB_UPLOAD_CONTROL_FILE:
2341 direction = SBAL_FLAGS0_TYPE_READ; 2342 direction = SBAL_SFLAGS0_TYPE_READ;
2342 break; 2343 break;
2343 default: 2344 default:
2344 return ERR_PTR(-EINVAL); 2345 return ERR_PTR(-EINVAL);
@@ -2413,7 +2414,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
2413 fsf_req->qdio_req.sbal_response = sbal_idx; 2414 fsf_req->qdio_req.sbal_response = sbal_idx;
2414 zfcp_fsf_req_complete(fsf_req); 2415 zfcp_fsf_req_complete(fsf_req);
2415 2416
2416 if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY)) 2417 if (likely(sbale->eflags & SBAL_EFLAGS_LAST_ENTRY))
2417 break; 2418 break;
2418 } 2419 }
2419} 2420}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 98e97d90835b..d9c40ea73eef 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -124,7 +124,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
124 124
125 /* set last entry flag in current SBALE of current SBAL */ 125 /* set last entry flag in current SBALE of current SBAL */
126 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 126 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
127 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 127 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
128 128
129 /* don't exceed last allowed SBAL */ 129 /* don't exceed last allowed SBAL */
130 if (q_req->sbal_last == q_req->sbal_limit) 130 if (q_req->sbal_last == q_req->sbal_limit)
@@ -132,7 +132,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
132 132
133 /* set chaining flag in first SBALE of current SBAL */ 133 /* set chaining flag in first SBALE of current SBAL */
134 sbale = zfcp_qdio_sbale_req(qdio, q_req); 134 sbale = zfcp_qdio_sbale_req(qdio, q_req);
135 sbale->flags |= SBAL_FLAGS0_MORE_SBALS; 135 sbale->sflags |= SBAL_SFLAGS0_MORE_SBALS;
136 136
137 /* calculate index of next SBAL */ 137 /* calculate index of next SBAL */
138 q_req->sbal_last++; 138 q_req->sbal_last++;
@@ -147,7 +147,7 @@ zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
147 147
148 /* set storage-block type for new SBAL */ 148 /* set storage-block type for new SBAL */
149 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 149 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
150 sbale->flags |= q_req->sbtype; 150 sbale->sflags |= q_req->sbtype;
151 151
152 return sbale; 152 return sbale;
153} 153}
@@ -177,7 +177,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
177 177
178 /* set storage-block type for this request */ 178 /* set storage-block type for this request */
179 sbale = zfcp_qdio_sbale_req(qdio, q_req); 179 sbale = zfcp_qdio_sbale_req(qdio, q_req);
180 sbale->flags |= q_req->sbtype; 180 sbale->sflags |= q_req->sbtype;
181 181
182 for (; sg; sg = sg_next(sg)) { 182 for (; sg; sg = sg_next(sg)) {
183 sbale = zfcp_qdio_sbale_next(qdio, q_req); 183 sbale = zfcp_qdio_sbale_next(qdio, q_req);
@@ -384,7 +384,8 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) { 384 for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
385 sbale = &(qdio->res_q[cc]->element[0]); 385 sbale = &(qdio->res_q[cc]->element[0]);
386 sbale->length = 0; 386 sbale->length = 0;
387 sbale->flags = SBAL_FLAGS_LAST_ENTRY; 387 sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
388 sbale->sflags = 0;
388 sbale->addr = NULL; 389 sbale->addr = NULL;
389 } 390 }
390 391
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 2297d8d3e947..54e22ace012b 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -67,7 +67,7 @@ struct zfcp_qdio {
67 * @qdio_outb_usage: usage of outbound queue 67 * @qdio_outb_usage: usage of outbound queue
68 */ 68 */
69struct zfcp_qdio_req { 69struct zfcp_qdio_req {
70 u32 sbtype; 70 u8 sbtype;
71 u8 sbal_number; 71 u8 sbal_number;
72 u8 sbal_first; 72 u8 sbal_first;
73 u8 sbal_last; 73 u8 sbal_last;
@@ -116,7 +116,7 @@ zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
116 */ 116 */
117static inline 117static inline
118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, 118void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
119 unsigned long req_id, u32 sbtype, void *data, u32 len) 119 unsigned long req_id, u8 sbtype, void *data, u32 len)
120{ 120{
121 struct qdio_buffer_element *sbale; 121 struct qdio_buffer_element *sbale;
122 int count = min(atomic_read(&qdio->req_q_free), 122 int count = min(atomic_read(&qdio->req_q_free),
@@ -131,7 +131,8 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
131 131
132 sbale = zfcp_qdio_sbale_req(qdio, q_req); 132 sbale = zfcp_qdio_sbale_req(qdio, q_req);
133 sbale->addr = (void *) req_id; 133 sbale->addr = (void *) req_id;
134 sbale->flags = SBAL_FLAGS0_COMMAND | sbtype; 134 sbale->eflags = 0;
135 sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
135 136
136 if (unlikely(!data)) 137 if (unlikely(!data))
137 return; 138 return;
@@ -173,7 +174,7 @@ void zfcp_qdio_set_sbale_last(struct zfcp_qdio *qdio,
173 struct qdio_buffer_element *sbale; 174 struct qdio_buffer_element *sbale;
174 175
175 sbale = zfcp_qdio_sbale_curr(qdio, q_req); 176 sbale = zfcp_qdio_sbale_curr(qdio, q_req);
176 sbale->flags |= SBAL_FLAGS_LAST_ENTRY; 177 sbale->eflags |= SBAL_EFLAGS_LAST_ENTRY;
177} 178}
178 179
179/** 180/**
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index 4f64183b27fa..7e9c39951ecb 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -635,7 +635,7 @@ static void clks_core_resume(void)
635 struct clk *clkp; 635 struct clk *clkp;
636 636
637 list_for_each_entry(clkp, &clock_list, node) { 637 list_for_each_entry(clkp, &clock_list, node) {
638 if (likely(clkp->ops)) { 638 if (likely(clkp->usecount && clkp->ops)) {
639 unsigned long rate = clkp->rate; 639 unsigned long rate = clkp->rate;
640 640
641 if (likely(clkp->ops->set_parent)) 641 if (likely(clkp->ops->set_parent))
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index 6a9e58dd36c7..d18ce9e946d8 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -1861,6 +1861,7 @@ static int pl022_setup(struct spi_device *spi)
1861 } 1861 }
1862 if ((clk_freq.cpsdvsr < CPSDVR_MIN) 1862 if ((clk_freq.cpsdvsr < CPSDVR_MIN)
1863 || (clk_freq.cpsdvsr > CPSDVR_MAX)) { 1863 || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
1864 status = -EINVAL;
1864 dev_err(&spi->dev, 1865 dev_err(&spi->dev,
1865 "cpsdvsr is configured incorrectly\n"); 1866 "cpsdvsr is configured incorrectly\n");
1866 goto err_config_params; 1867 goto err_config_params;
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 6f86ba0175ac..969cdd2fe124 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -298,7 +298,7 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
298 unsigned int count, c; 298 unsigned int count, c;
299 unsigned long base, tx_reg, rx_reg; 299 unsigned long base, tx_reg, rx_reg;
300 int word_len, data_type, element_count; 300 int word_len, data_type, element_count;
301 int elements; 301 int elements = 0;
302 u32 l; 302 u32 l;
303 u8 * rx; 303 u8 * rx;
304 const u8 * tx; 304 const u8 * tx;
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index f706dba165cf..cc880c95e7de 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -681,13 +681,14 @@ static void bfin_spi_pump_transfers(unsigned long data)
681 drv_data->cs_change = transfer->cs_change; 681 drv_data->cs_change = transfer->cs_change;
682 682
683 /* Bits per word setup */ 683 /* Bits per word setup */
684 bits_per_word = transfer->bits_per_word ? : message->spi->bits_per_word; 684 bits_per_word = transfer->bits_per_word ? :
685 if ((bits_per_word > 0) && (bits_per_word % 16 == 0)) { 685 message->spi->bits_per_word ? : 8;
686 if (bits_per_word % 16 == 0) {
686 drv_data->n_bytes = bits_per_word/8; 687 drv_data->n_bytes = bits_per_word/8;
687 drv_data->len = (transfer->len) >> 1; 688 drv_data->len = (transfer->len) >> 1;
688 cr_width = BIT_CTL_WORDSIZE; 689 cr_width = BIT_CTL_WORDSIZE;
689 drv_data->ops = &bfin_bfin_spi_transfer_ops_u16; 690 drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
690 } else if ((bits_per_word > 0) && (bits_per_word % 8 == 0)) { 691 } else if (bits_per_word % 8 == 0) {
691 drv_data->n_bytes = bits_per_word/8; 692 drv_data->n_bytes = bits_per_word/8;
692 drv_data->len = transfer->len; 693 drv_data->len = transfer->len;
693 cr_width = 0; 694 cr_width = 0;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index dfc16f955eb8..196284dc2f36 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -24,23 +24,6 @@ menuconfig STAGING
24 24
25if STAGING 25if STAGING
26 26
27config STAGING_EXCLUDE_BUILD
28 bool "Exclude Staging drivers from being built" if STAGING
29 default y
30 ---help---
31 Are you sure you really want to build the staging drivers?
32 They taint your kernel, don't live up to the normal Linux
33 kernel quality standards, are a bit crufty around the edges,
34 and might go off and kick your dog when you aren't paying
35 attention.
36
37 Say N here to be able to select and build the Staging drivers.
38 This option is primarily here to prevent them from being built
39 when selecting 'make allyesconfg' and 'make allmodconfig' so
40 don't be all that put off, your dog will be just fine.
41
42if !STAGING_EXCLUDE_BUILD
43
44source "drivers/staging/tty/Kconfig" 27source "drivers/staging/tty/Kconfig"
45 28
46source "drivers/staging/generic_serial/Kconfig" 29source "drivers/staging/generic_serial/Kconfig"
@@ -177,5 +160,4 @@ source "drivers/staging/mei/Kconfig"
177 160
178source "drivers/staging/nvec/Kconfig" 161source "drivers/staging/nvec/Kconfig"
179 162
180endif # !STAGING_EXCLUDE_BUILD
181endif # STAGING 163endif # STAGING
diff --git a/drivers/staging/altera-stapl/altera-jtag.c b/drivers/staging/altera-stapl/altera-jtag.c
index 876308858b82..8b1620b1b2d0 100644
--- a/drivers/staging/altera-stapl/altera-jtag.c
+++ b/drivers/staging/altera-stapl/altera-jtag.c
@@ -26,7 +26,7 @@
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <staging/altera.h> 29#include "altera.h"
30#include "altera-exprt.h" 30#include "altera-exprt.h"
31#include "altera-jtag.h" 31#include "altera-jtag.h"
32 32
diff --git a/drivers/staging/altera-stapl/altera.c b/drivers/staging/altera-stapl/altera.c
index 05aad351b120..9cd5e76880c0 100644
--- a/drivers/staging/altera-stapl/altera.c
+++ b/drivers/staging/altera-stapl/altera.c
@@ -28,7 +28,7 @@
28#include <linux/string.h> 28#include <linux/string.h>
29#include <linux/firmware.h> 29#include <linux/firmware.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <staging/altera.h> 31#include "altera.h"
32#include "altera-exprt.h" 32#include "altera-exprt.h"
33#include "altera-jtag.h" 33#include "altera-jtag.h"
34 34
diff --git a/include/staging/altera.h b/drivers/staging/altera-stapl/altera.h
index 94c0c6181daf..94c0c6181daf 100644
--- a/include/staging/altera.h
+++ b/drivers/staging/altera-stapl/altera.h
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index 1f15e1fb1ab2..afd6cc16a2b8 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -1,6 +1,7 @@
1config ATH6K_LEGACY 1config ATH6K_LEGACY
2 tristate "Atheros AR6003 support (non mac80211)" 2 tristate "Atheros AR6003 support (non mac80211)"
3 depends on MMC && WLAN 3 depends on MMC && WLAN
4 depends on CFG80211
4 select WIRELESS_EXT 5 select WIRELESS_EXT
5 select WEXT_PRIV 6 select WEXT_PRIV
6 help 7 help
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index 77dfb4070c1d..d3a774dbb7e8 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -870,7 +870,8 @@ ar6k_cfg80211_scanComplete_event(struct ar6_softc *ar, int status)
870 if(ar->scan_request) 870 if(ar->scan_request)
871 { 871 {
872 /* Translate data to cfg80211 mgmt format */ 872 /* Translate data to cfg80211 mgmt format */
873 wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy); 873 if (ar->arWmi)
874 wmi_iterate_nodes(ar->arWmi, ar6k_cfg80211_scan_node, ar->wdev->wiphy);
874 875
875 cfg80211_scan_done(ar->scan_request, 876 cfg80211_scan_done(ar->scan_request,
876 ((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false); 877 ((status & A_ECANCELED) || (status & A_EBUSY)) ? true : false);
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_iw.c b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
index e5cf3099f5da..35eec917f232 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_iw.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_iw.c
@@ -65,8 +65,6 @@ wl_iw_extra_params_t g_wl_iw_params;
65extern bool wl_iw_conn_status_str(u32 event_type, u32 status, 65extern bool wl_iw_conn_status_str(u32 event_type, u32 status,
66 u32 reason, char *stringBuf, uint buflen); 66 u32 reason, char *stringBuf, uint buflen);
67 67
68uint wl_msg_level = WL_ERROR_VAL;
69
70#define MAX_WLIW_IOCTL_LEN 1024 68#define MAX_WLIW_IOCTL_LEN 1024
71 69
72#ifdef CONFIG_WIRELESS_EXT 70#ifdef CONFIG_WIRELESS_EXT
diff --git a/drivers/staging/gma500/psb_drv.c b/drivers/staging/gma500/psb_drv.c
index 1c45c11a774e..aa87b1b6a44a 100644
--- a/drivers/staging/gma500/psb_drv.c
+++ b/drivers/staging/gma500/psb_drv.c
@@ -542,6 +542,8 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
542 unsigned long irqflags; 542 unsigned long irqflags;
543 int ret = -ENOMEM; 543 int ret = -ENOMEM;
544 uint32_t tt_pages; 544 uint32_t tt_pages;
545 struct drm_connector *connector;
546 struct psb_intel_output *psb_intel_output;
545 547
546 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); 548 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
547 if (dev_priv == NULL) 549 if (dev_priv == NULL)
@@ -663,7 +665,18 @@ static int psb_driver_load(struct drm_device *dev, unsigned long chipset)
663 drm_kms_helper_poll_init(dev); 665 drm_kms_helper_poll_init(dev);
664 } 666 }
665 667
666 ret = psb_backlight_init(dev); 668 /* Only add backlight support if we have LVDS output */
669 list_for_each_entry(connector, &dev->mode_config.connector_list,
670 head) {
671 psb_intel_output = to_psb_intel_output(connector);
672
673 switch (psb_intel_output->type) {
674 case INTEL_OUTPUT_LVDS:
675 ret = psb_backlight_init(dev);
676 break;
677 }
678 }
679
667 if (ret) 680 if (ret)
668 return ret; 681 return ret;
669#if 0 682#if 0
diff --git a/drivers/staging/gma500/psb_fb.c b/drivers/staging/gma500/psb_fb.c
index 99c03a2e06bd..084c36bbfe86 100644
--- a/drivers/staging/gma500/psb_fb.c
+++ b/drivers/staging/gma500/psb_fb.c
@@ -441,6 +441,16 @@ static int psbfb_create(struct psb_fbdev *fbdev,
441 info->screen_size = size; 441 info->screen_size = size;
442 memset(info->screen_base, 0, size); 442 memset(info->screen_base, 0, size);
443 443
444 if (dev_priv->pg->stolen_size) {
445 info->apertures = alloc_apertures(1);
446 if (!info->apertures) {
447 ret = -ENOMEM;
448 goto out_err0;
449 }
450 info->apertures->ranges[0].base = dev->mode_config.fb_base;
451 info->apertures->ranges[0].size = dev_priv->pg->stolen_size;
452 }
453
444 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); 454 drm_fb_helper_fill_fix(info, fb->pitch, fb->depth);
445 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper, 455 drm_fb_helper_fill_var(info, &fbdev->psb_fb_helper,
446 sizes->fb_width, sizes->fb_height); 456 sizes->fb_width, sizes->fb_height);
diff --git a/drivers/staging/gma500/psb_intel_bios.c b/drivers/staging/gma500/psb_intel_bios.c
index 48ac8ba7f40b..417965da5e24 100644
--- a/drivers/staging/gma500/psb_intel_bios.c
+++ b/drivers/staging/gma500/psb_intel_bios.c
@@ -154,10 +154,15 @@ static void parse_lfp_panel_data(struct drm_psb_private *dev_priv,
154 154
155 fill_detail_timing_data(panel_fixed_mode, dvo_timing); 155 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
156 156
157 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode; 157 if (panel_fixed_mode->htotal > 0 && panel_fixed_mode->vtotal > 0) {
158 158 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
159 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 159 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
160 drm_mode_debug_printmodeline(panel_fixed_mode); 160 drm_mode_debug_printmodeline(panel_fixed_mode);
161 } else {
162 DRM_DEBUG("Ignoring bogus LVDS VBT mode.\n");
163 dev_priv->lvds_vbt = 0;
164 kfree(panel_fixed_mode);
165 }
161 166
162 return; 167 return;
163} 168}
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index 0b9b85424dfa..4cc1a5bfab40 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -81,7 +81,6 @@ struct adis16201_state {
81 81
82int adis16201_set_irq(struct iio_dev *indio_dev, bool enable); 82int adis16201_set_irq(struct iio_dev *indio_dev, bool enable);
83 83
84#ifdef CONFIG_IIO_RING_BUFFER
85enum adis16201_scan { 84enum adis16201_scan {
86 ADIS16201_SCAN_SUPPLY, 85 ADIS16201_SCAN_SUPPLY,
87 ADIS16201_SCAN_ACC_X, 86 ADIS16201_SCAN_ACC_X,
@@ -92,6 +91,7 @@ enum adis16201_scan {
92 ADIS16201_SCAN_INCLI_Y, 91 ADIS16201_SCAN_INCLI_Y,
93}; 92};
94 93
94#ifdef CONFIG_IIO_RING_BUFFER
95void adis16201_remove_trigger(struct iio_dev *indio_dev); 95void adis16201_remove_trigger(struct iio_dev *indio_dev);
96int adis16201_probe_trigger(struct iio_dev *indio_dev); 96int adis16201_probe_trigger(struct iio_dev *indio_dev);
97 97
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 8bb8ce50c248..175e21bb9b40 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -76,7 +76,6 @@ struct adis16203_state {
76 76
77int adis16203_set_irq(struct iio_dev *indio_dev, bool enable); 77int adis16203_set_irq(struct iio_dev *indio_dev, bool enable);
78 78
79#ifdef CONFIG_IIO_RING_BUFFER
80enum adis16203_scan { 79enum adis16203_scan {
81 ADIS16203_SCAN_SUPPLY, 80 ADIS16203_SCAN_SUPPLY,
82 ADIS16203_SCAN_AUX_ADC, 81 ADIS16203_SCAN_AUX_ADC,
@@ -85,6 +84,7 @@ enum adis16203_scan {
85 ADIS16203_SCAN_INCLI_Y, 84 ADIS16203_SCAN_INCLI_Y,
86}; 85};
87 86
87#ifdef CONFIG_IIO_RING_BUFFER
88void adis16203_remove_trigger(struct iio_dev *indio_dev); 88void adis16203_remove_trigger(struct iio_dev *indio_dev);
89int adis16203_probe_trigger(struct iio_dev *indio_dev); 89int adis16203_probe_trigger(struct iio_dev *indio_dev);
90 90
diff --git a/drivers/staging/iio/dac/max517.c b/drivers/staging/iio/dac/max517.c
index 881768df47a6..2fe34d21b6aa 100644
--- a/drivers/staging/iio/dac/max517.c
+++ b/drivers/staging/iio/dac/max517.c
@@ -195,7 +195,7 @@ static const struct iio_info max517_info = {
195}; 195};
196 196
197static const struct iio_info max518_info = { 197static const struct iio_info max518_info = {
198 .attrs = &max517_attribute_group, 198 .attrs = &max518_attribute_group,
199 .driver_module = THIS_MODULE, 199 .driver_module = THIS_MODULE,
200}; 200};
201 201
diff --git a/drivers/staging/iio/imu/adis16400_ring.c b/drivers/staging/iio/imu/adis16400_ring.c
index 2589a7e167e4..3612373ddede 100644
--- a/drivers/staging/iio/imu/adis16400_ring.c
+++ b/drivers/staging/iio/imu/adis16400_ring.c
@@ -137,13 +137,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
137 if (st->variant->flags & ADIS16400_NO_BURST) { 137 if (st->variant->flags & ADIS16400_NO_BURST) {
138 ret = adis16350_spi_read_all(&indio_dev->dev, st->rx); 138 ret = adis16350_spi_read_all(&indio_dev->dev, st->rx);
139 if (ret < 0) 139 if (ret < 0)
140 return ret; 140 goto err;
141 for (; i < ring->scan_count; i++) 141 for (; i < ring->scan_count; i++)
142 data[i] = *(s16 *)(st->rx + i*2); 142 data[i] = *(s16 *)(st->rx + i*2);
143 } else { 143 } else {
144 ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx); 144 ret = adis16400_spi_read_burst(&indio_dev->dev, st->rx);
145 if (ret < 0) 145 if (ret < 0)
146 return ret; 146 goto err;
147 for (; i < indio_dev->ring->scan_count; i++) { 147 for (; i < indio_dev->ring->scan_count; i++) {
148 j = __ffs(mask); 148 j = __ffs(mask);
149 mask &= ~(1 << j); 149 mask &= ~(1 << j);
@@ -158,9 +158,13 @@ static irqreturn_t adis16400_trigger_handler(int irq, void *p)
158 ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp); 158 ring->access->store_to(indio_dev->ring, (u8 *) data, pf->timestamp);
159 159
160 iio_trigger_notify_done(indio_dev->trig); 160 iio_trigger_notify_done(indio_dev->trig);
161 kfree(data);
162 161
162 kfree(data);
163 return IRQ_HANDLED; 163 return IRQ_HANDLED;
164
165err:
166 kfree(data);
167 return ret;
164} 168}
165 169
166void adis16400_unconfigure_ring(struct iio_dev *indio_dev) 170void adis16400_unconfigure_ring(struct iio_dev *indio_dev)
diff --git a/drivers/staging/iio/industrialio-trigger.c b/drivers/staging/iio/industrialio-trigger.c
index 615902333fb0..d504aa251ced 100644
--- a/drivers/staging/iio/industrialio-trigger.c
+++ b/drivers/staging/iio/industrialio-trigger.c
@@ -294,6 +294,7 @@ struct iio_poll_func
294 pf->h = h; 294 pf->h = h;
295 pf->thread = thread; 295 pf->thread = thread;
296 pf->type = type; 296 pf->type = type;
297 pf->private_data = private;
297 298
298 return pf; 299 return pf;
299} 300}
diff --git a/drivers/staging/mei/init.c b/drivers/staging/mei/init.c
index 2818851c0761..d1ffa32cd141 100644
--- a/drivers/staging/mei/init.c
+++ b/drivers/staging/mei/init.c
@@ -205,10 +205,10 @@ int mei_hw_init(struct mei_device *dev)
205 "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n", 205 "host_hw_state = 0x%08x, me_hw_state = 0x%08x.\n",
206 dev->host_hw_state, dev->me_hw_state); 206 dev->host_hw_state, dev->me_hw_state);
207 207
208 if (!(dev->host_hw_state & H_RDY) != H_RDY) 208 if (!(dev->host_hw_state & H_RDY))
209 dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n"); 209 dev_dbg(&dev->pdev->dev, "host turn off H_RDY.\n");
210 210
211 if (!(dev->me_hw_state & ME_RDY_HRA) != ME_RDY_HRA) 211 if (!(dev->me_hw_state & ME_RDY_HRA))
212 dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n"); 212 dev_dbg(&dev->pdev->dev, "ME turn off ME_RDY.\n");
213 213
214 printk(KERN_ERR "mei: link layer initialization failed.\n"); 214 printk(KERN_ERR "mei: link layer initialization failed.\n");
diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig
index b05306766870..fe40e0b6f675 100644
--- a/drivers/staging/olpc_dcon/Kconfig
+++ b/drivers/staging/olpc_dcon/Kconfig
@@ -2,6 +2,7 @@ config FB_OLPC_DCON
2 tristate "One Laptop Per Child Display CONtroller support" 2 tristate "One Laptop Per Child Display CONtroller support"
3 depends on OLPC && FB 3 depends on OLPC && FB
4 select I2C 4 select I2C
5 select BACKLIGHT_CLASS_DEVICE
5 ---help--- 6 ---help---
6 Add support for the OLPC XO DCON controller. This controller is 7 Add support for the OLPC XO DCON controller. This controller is
7 only available on OLPC platforms. Unless you have one of these 8 only available on OLPC platforms. Unless you have one of these
diff --git a/drivers/staging/rts_pstor/sd.c b/drivers/staging/rts_pstor/sd.c
index bddb0312b31e..cdae497d5467 100644
--- a/drivers/staging/rts_pstor/sd.c
+++ b/drivers/staging/rts_pstor/sd.c
@@ -2328,7 +2328,7 @@ Switch_Fail:
2328 2328
2329 retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5); 2329 retval = sd_send_cmd_get_rsp(chip, IO_SEND_OP_COND, 0, SD_RSP_TYPE_R4, rsp, 5);
2330 if (retval == STATUS_SUCCESS) { 2330 if (retval == STATUS_SUCCESS) {
2331 int func_num = (rsp[1] >> 4) && 0x07; 2331 int func_num = (rsp[1] >> 4) & 0x07;
2332 if (func_num) { 2332 if (func_num) {
2333 RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num); 2333 RTSX_DEBUGP("SD_IO card (Function number: %d)!\n", func_num);
2334 chip->sd_io = 1; 2334 chip->sd_io = 1;
diff --git a/drivers/staging/usbip/stub_dev.c b/drivers/staging/usbip/stub_dev.c
index 6e99ec87fee0..8cbea42b69bc 100644
--- a/drivers/staging/usbip/stub_dev.c
+++ b/drivers/staging/usbip/stub_dev.c
@@ -26,6 +26,8 @@
26static int stub_probe(struct usb_interface *interface, 26static int stub_probe(struct usb_interface *interface,
27 const struct usb_device_id *id); 27 const struct usb_device_id *id);
28static void stub_disconnect(struct usb_interface *interface); 28static void stub_disconnect(struct usb_interface *interface);
29static int stub_pre_reset(struct usb_interface *interface);
30static int stub_post_reset(struct usb_interface *interface);
29 31
30/* 32/*
31 * Define device IDs here if you want to explicitly limit exportable devices. 33 * Define device IDs here if you want to explicitly limit exportable devices.
@@ -59,6 +61,8 @@ struct usb_driver stub_driver = {
59 .probe = stub_probe, 61 .probe = stub_probe,
60 .disconnect = stub_disconnect, 62 .disconnect = stub_disconnect,
61 .id_table = stub_table, 63 .id_table = stub_table,
64 .pre_reset = stub_pre_reset,
65 .post_reset = stub_post_reset,
62}; 66};
63 67
64/* 68/*
@@ -541,3 +545,20 @@ static void stub_disconnect(struct usb_interface *interface)
541 del_match_busid((char *)udev_busid); 545 del_match_busid((char *)udev_busid);
542 } 546 }
543} 547}
548
549/*
550 * Presence of pre_reset and post_reset prevents the driver from being unbound
551 * when the device is being reset
552 */
553
554int stub_pre_reset(struct usb_interface *interface)
555{
556 dev_dbg(&interface->dev, "pre_reset\n");
557 return 0;
558}
559
560int stub_post_reset(struct usb_interface *interface)
561{
562 dev_dbg(&interface->dev, "post_reset\n");
563 return 0;
564}
diff --git a/drivers/staging/usbip/stub_rx.c b/drivers/staging/usbip/stub_rx.c
index a5c1fa1f0430..bc57844600b9 100644
--- a/drivers/staging/usbip/stub_rx.c
+++ b/drivers/staging/usbip/stub_rx.c
@@ -175,16 +175,18 @@ static int tweak_reset_device_cmd(struct urb *urb)
175 dev_info(&urb->dev->dev, "usb_queue_reset_device\n"); 175 dev_info(&urb->dev->dev, "usb_queue_reset_device\n");
176 176
177 /* 177 /*
178 * usb_lock_device_for_reset caused a deadlock: it causes the driver 178 * With the implementation of pre_reset and post_reset the driver no
179 * to unbind. In the shutdown the rx thread is signalled to shut down 179 * longer unbinds. This allows the use of synchronous reset.
180 * but this thread is pending in the usb_lock_device_for_reset.
181 *
182 * Instead queue the reset.
183 *
184 * Unfortunatly an existing usbip connection will be dropped due to
185 * driver unbinding.
186 */ 180 */
187 usb_queue_reset_device(sdev->interface); 181
182 if (usb_lock_device_for_reset(sdev->udev, sdev->interface)<0)
183 {
184 dev_err(&urb->dev->dev, "could not obtain lock to reset device\n");
185 return 0;
186 }
187 usb_reset_device(sdev->udev);
188 usb_unlock_device(sdev->udev);
189
188 return 0; 190 return 0;
189} 191}
190 192
diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c
index f2cb7503fcb2..465210930890 100644
--- a/drivers/tty/serial/pch_uart.c
+++ b/drivers/tty/serial/pch_uart.c
@@ -1397,6 +1397,7 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
1397 int fifosize, base_baud; 1397 int fifosize, base_baud;
1398 int port_type; 1398 int port_type;
1399 struct pch_uart_driver_data *board; 1399 struct pch_uart_driver_data *board;
1400 const char *board_name;
1400 1401
1401 board = &drv_dat[id->driver_data]; 1402 board = &drv_dat[id->driver_data];
1402 port_type = board->port_type; 1403 port_type = board->port_type;
@@ -1412,7 +1413,8 @@ static struct eg20t_port *pch_uart_init_port(struct pci_dev *pdev,
1412 base_baud = 1843200; /* 1.8432MHz */ 1413 base_baud = 1843200; /* 1.8432MHz */
1413 1414
1414 /* quirk for CM-iTC board */ 1415 /* quirk for CM-iTC board */
1415 if (strstr(dmi_get_system_info(DMI_BOARD_NAME), "CM-iTC")) 1416 board_name = dmi_get_system_info(DMI_BOARD_NAME);
1417 if (board_name && strstr(board_name, "CM-iTC"))
1416 base_baud = 192000000; /* 192.0MHz */ 1418 base_baud = 192000000; /* 192.0MHz */
1417 1419
1418 switch (port_type) { 1420 switch (port_type) {
diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
index f1a7918d71aa..6c9b7cd6778a 100644
--- a/drivers/tty/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,8 +413,7 @@ static void flush_to_ldisc(struct work_struct *work)
413 spin_lock_irqsave(&tty->buf.lock, flags); 413 spin_lock_irqsave(&tty->buf.lock, flags);
414 414
415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) { 415 if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
416 struct tty_buffer *head, *tail = tty->buf.tail; 416 struct tty_buffer *head;
417 int seen_tail = 0;
418 while ((head = tty->buf.head) != NULL) { 417 while ((head = tty->buf.head) != NULL) {
419 int count; 418 int count;
420 char *char_buf; 419 char *char_buf;
@@ -424,15 +423,6 @@ static void flush_to_ldisc(struct work_struct *work)
424 if (!count) { 423 if (!count) {
425 if (head->next == NULL) 424 if (head->next == NULL)
426 break; 425 break;
427 /*
428 There's a possibility tty might get new buffer
429 added during the unlock window below. We could
430 end up spinning in here forever hogging the CPU
431 completely. To avoid this let's have a rest each
432 time we processed the tail buffer.
433 */
434 if (tail == head)
435 seen_tail = 1;
436 tty->buf.head = head->next; 426 tty->buf.head = head->next;
437 tty_buffer_free(tty, head); 427 tty_buffer_free(tty, head);
438 continue; 428 continue;
@@ -442,7 +432,7 @@ static void flush_to_ldisc(struct work_struct *work)
442 line discipline as we want to empty the queue */ 432 line discipline as we want to empty the queue */
443 if (test_bit(TTY_FLUSHPENDING, &tty->flags)) 433 if (test_bit(TTY_FLUSHPENDING, &tty->flags))
444 break; 434 break;
445 if (!tty->receive_room || seen_tail) 435 if (!tty->receive_room)
446 break; 436 break;
447 if (count > tty->receive_room) 437 if (count > tty->receive_room)
448 count = tty->receive_room; 438 count = tty->receive_room;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 395a347f2ebb..dac7676ce21b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1530,6 +1530,8 @@ static const struct usb_device_id acm_ids[] = {
1530 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */ 1530 { NOKIA_PCSUITE_ACM_INFO(0x04ce), }, /* Nokia E90 */
1531 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */ 1531 { NOKIA_PCSUITE_ACM_INFO(0x01d4), }, /* Nokia E55 */
1532 { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */ 1532 { NOKIA_PCSUITE_ACM_INFO(0x0302), }, /* Nokia N8 */
1533 { NOKIA_PCSUITE_ACM_INFO(0x0335), }, /* Nokia E7 */
1534 { NOKIA_PCSUITE_ACM_INFO(0x03cd), }, /* Nokia C7 */
1533 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */ 1535 { SAMSUNG_PCSUITE_ACM_INFO(0x6651), }, /* Samsung GTi8510 (INNOV8) */
1534 1536
1535 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ 1537 /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 79a58c3a2e2a..90ae1753dda1 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -339,7 +339,8 @@ static int get_hub_status(struct usb_device *hdev,
339{ 339{
340 int i, status = -ETIMEDOUT; 340 int i, status = -ETIMEDOUT;
341 341
342 for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) { 342 for (i = 0; i < USB_STS_RETRIES &&
343 (status == -ETIMEDOUT || status == -EPIPE); i++) {
343 status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), 344 status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
344 USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0, 345 USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_HUB, 0, 0,
345 data, sizeof(*data), USB_STS_TIMEOUT); 346 data, sizeof(*data), USB_STS_TIMEOUT);
@@ -355,7 +356,8 @@ static int get_port_status(struct usb_device *hdev, int port1,
355{ 356{
356 int i, status = -ETIMEDOUT; 357 int i, status = -ETIMEDOUT;
357 358
358 for (i = 0; i < USB_STS_RETRIES && status == -ETIMEDOUT; i++) { 359 for (i = 0; i < USB_STS_RETRIES &&
360 (status == -ETIMEDOUT || status == -EPIPE); i++) {
359 status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0), 361 status = usb_control_msg(hdev, usb_rcvctrlpipe(hdev, 0),
360 USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1, 362 USB_REQ_GET_STATUS, USB_DIR_IN | USB_RT_PORT, 0, port1,
361 data, sizeof(*data), USB_STS_TIMEOUT); 363 data, sizeof(*data), USB_STS_TIMEOUT);
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c
index 1b125c224dcf..2278dad886e2 100644
--- a/drivers/usb/core/inode.c
+++ b/drivers/usb/core/inode.c
@@ -389,7 +389,6 @@ static int usbfs_rmdir(struct inode *dir, struct dentry *dentry)
389 mutex_unlock(&inode->i_mutex); 389 mutex_unlock(&inode->i_mutex);
390 if (!error) 390 if (!error)
391 d_delete(dentry); 391 d_delete(dentry);
392 dput(dentry);
393 return error; 392 return error;
394} 393}
395 394
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 58456d1aec21..029e288805b6 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -632,13 +632,10 @@ config USB_DUMMY_HCD
632 632
633endchoice 633endchoice
634 634
635# Selected by UDC drivers that support high-speed operation.
635config USB_GADGET_DUALSPEED 636config USB_GADGET_DUALSPEED
636 bool 637 bool
637 depends on USB_GADGET 638 depends on USB_GADGET
638 default n
639 help
640 Means that gadget drivers should include extra descriptors
641 and code to handle dual-speed controllers.
642 639
643# 640#
644# USB Gadget Drivers 641# USB Gadget Drivers
diff --git a/drivers/usb/gadget/amd5536udc.c b/drivers/usb/gadget/amd5536udc.c
index 6e42aab75806..95e8138cd48f 100644
--- a/drivers/usb/gadget/amd5536udc.c
+++ b/drivers/usb/gadget/amd5536udc.c
@@ -60,6 +60,7 @@
60#include <linux/device.h> 60#include <linux/device.h>
61#include <linux/io.h> 61#include <linux/io.h>
62#include <linux/irq.h> 62#include <linux/irq.h>
63#include <linux/prefetch.h>
63 64
64#include <asm/byteorder.h> 65#include <asm/byteorder.h>
65#include <asm/system.h> 66#include <asm/system.h>
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 41dc093c0a1b..f4690ffcb489 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -38,6 +38,7 @@
38#include <linux/clk.h> 38#include <linux/clk.h>
39#include <linux/usb/ch9.h> 39#include <linux/usb/ch9.h>
40#include <linux/usb/gadget.h> 40#include <linux/usb/gadget.h>
41#include <linux/prefetch.h>
41 42
42#include <asm/byteorder.h> 43#include <asm/byteorder.h>
43#include <mach/hardware.h> 44#include <mach/hardware.h>
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index 61ff927928ab..d3dcabc1a5fc 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1906,6 +1906,7 @@ static int dummy_hcd_probe(struct platform_device *pdev)
1906 if (!hcd) 1906 if (!hcd)
1907 return -ENOMEM; 1907 return -ENOMEM;
1908 the_controller = hcd_to_dummy (hcd); 1908 the_controller = hcd_to_dummy (hcd);
1909 hcd->has_tt = 1;
1909 1910
1910 retval = usb_add_hcd(hcd, 0, 0); 1911 retval = usb_add_hcd(hcd, 0, 0);
1911 if (retval != 0) { 1912 if (retval != 0) {
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index a01383f71f38..a56876aaf76c 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -431,8 +431,10 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
431 431
432 /* halt any endpoint by doing a "wrong direction" i/o call */ 432 /* halt any endpoint by doing a "wrong direction" i/o call */
433 if (!usb_endpoint_dir_in(&data->desc)) { 433 if (!usb_endpoint_dir_in(&data->desc)) {
434 if (usb_endpoint_xfer_isoc(&data->desc)) 434 if (usb_endpoint_xfer_isoc(&data->desc)) {
435 mutex_unlock(&data->lock);
435 return -EINVAL; 436 return -EINVAL;
437 }
436 DBG (data->dev, "%s halt\n", data->name); 438 DBG (data->dev, "%s halt\n", data->name);
437 spin_lock_irq (&data->dev->lock); 439 spin_lock_irq (&data->dev->lock);
438 if (likely (data->ep != NULL)) 440 if (likely (data->ep != NULL))
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index b62b2640deb0..b1a8146b9d50 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -2083,7 +2083,7 @@ out:
2083} 2083}
2084 2084
2085#ifdef CONFIG_PM 2085#ifdef CONFIG_PM
2086static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state) 2086static int mv_udc_suspend(struct device *_dev)
2087{ 2087{
2088 struct mv_udc *udc = the_controller; 2088 struct mv_udc *udc = the_controller;
2089 2089
@@ -2092,7 +2092,7 @@ static int mv_udc_suspend(struct platform_device *_dev, pm_message_t state)
2092 return 0; 2092 return 0;
2093} 2093}
2094 2094
2095static int mv_udc_resume(struct platform_device *_dev) 2095static int mv_udc_resume(struct device *_dev)
2096{ 2096{
2097 struct mv_udc *udc = the_controller; 2097 struct mv_udc *udc = the_controller;
2098 int retval; 2098 int retval;
@@ -2100,7 +2100,7 @@ static int mv_udc_resume(struct platform_device *_dev)
2100 retval = mv_udc_phy_init(udc->phy_regs); 2100 retval = mv_udc_phy_init(udc->phy_regs);
2101 if (retval) { 2101 if (retval) {
2102 dev_err(_dev, "phy initialization error %d\n", retval); 2102 dev_err(_dev, "phy initialization error %d\n", retval);
2103 goto error; 2103 return retval;
2104 } 2104 }
2105 udc_reset(udc); 2105 udc_reset(udc);
2106 ep0_reset(udc); 2106 ep0_reset(udc);
@@ -2122,7 +2122,7 @@ static struct platform_driver udc_driver = {
2122 .owner = THIS_MODULE, 2122 .owner = THIS_MODULE,
2123 .name = "pxa-u2o", 2123 .name = "pxa-u2o",
2124#ifdef CONFIG_PM 2124#ifdef CONFIG_PM
2125 .pm = mv_udc_pm_ops, 2125 .pm = &mv_udc_pm_ops,
2126#endif 2126#endif
2127 }, 2127 },
2128}; 2128};
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 24696f7fa6a9..476d88e1ae97 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -63,6 +63,7 @@
63#include <linux/device.h> 63#include <linux/device.h>
64#include <linux/usb/ch9.h> 64#include <linux/usb/ch9.h>
65#include <linux/usb/gadget.h> 65#include <linux/usb/gadget.h>
66#include <linux/prefetch.h>
66 67
67#include <asm/byteorder.h> 68#include <asm/byteorder.h>
68#include <asm/io.h> 69#include <asm/io.h>
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index 365c02fc25fc..774545494cf2 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2216,7 +2216,6 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
2216 if (retval != 0) { 2216 if (retval != 0) {
2217 pr_err("%s: can't get irq %i, err %d\n", 2217 pr_err("%s: can't get irq %i, err %d\n",
2218 driver_name, LUBBOCK_USB_DISC_IRQ, retval); 2218 driver_name, LUBBOCK_USB_DISC_IRQ, retval);
2219lubbock_fail0:
2220 goto err_irq_lub; 2219 goto err_irq_lub;
2221 } 2220 }
2222 retval = request_irq(LUBBOCK_USB_IRQ, 2221 retval = request_irq(LUBBOCK_USB_IRQ,
@@ -2226,7 +2225,6 @@ lubbock_fail0:
2226 if (retval != 0) { 2225 if (retval != 0) {
2227 pr_err("%s: can't get irq %i, err %d\n", 2226 pr_err("%s: can't get irq %i, err %d\n",
2228 driver_name, LUBBOCK_USB_IRQ, retval); 2227 driver_name, LUBBOCK_USB_IRQ, retval);
2229 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2230 goto lubbock_fail0; 2228 goto lubbock_fail0;
2231 } 2229 }
2232 } else 2230 } else
@@ -2236,10 +2234,11 @@ lubbock_fail0:
2236 return 0; 2234 return 0;
2237 2235
2238#ifdef CONFIG_ARCH_LUBBOCK 2236#ifdef CONFIG_ARCH_LUBBOCK
2237lubbock_fail0:
2239 free_irq(LUBBOCK_USB_DISC_IRQ, dev); 2238 free_irq(LUBBOCK_USB_DISC_IRQ, dev);
2240 err_irq_lub: 2239 err_irq_lub:
2241#endif
2242 free_irq(irq, dev); 2240 free_irq(irq, dev);
2241#endif
2243 err_irq1: 2242 err_irq1:
2244 if (gpio_is_valid(dev->mach->gpio_pullup)) 2243 if (gpio_is_valid(dev->mach->gpio_pullup))
2245 gpio_free(dev->mach->gpio_pullup); 2244 gpio_free(dev->mach->gpio_pullup);
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index acb9cc418df9..0dfee282878a 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -2680,9 +2680,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2680 2680
2681 writel(0, hsotg->regs + S3C_DAINTMSK); 2681 writel(0, hsotg->regs + S3C_DAINTMSK);
2682 2682
2683 dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 2683 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2684 readl(hsotg->regs + S3C_DIEPCTL0), 2684 readl(hsotg->regs + S3C_DIEPCTL0),
2685 readl(hsotg->regs + S3C_DOEPCTL0)); 2685 readl(hsotg->regs + S3C_DOEPCTL0));
2686 2686
2687 /* enable in and out endpoint interrupts */ 2687 /* enable in and out endpoint interrupts */
2688 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt); 2688 s3c_hsotg_en_gsint(hsotg, S3C_GINTSTS_OEPInt | S3C_GINTSTS_IEPInt);
@@ -2701,7 +2701,7 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2701 udelay(10); /* see openiboot */ 2701 udelay(10); /* see openiboot */
2702 __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone); 2702 __bic32(hsotg->regs + S3C_DCTL, S3C_DCTL_PWROnPrgDone);
2703 2703
2704 dev_info(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL)); 2704 dev_dbg(hsotg->dev, "DCTL=0x%08x\n", readl(hsotg->regs + S3C_DCTL));
2705 2705
2706 /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by 2706 /* S3C_DxEPCTL_USBActEp says RO in manual, but seems to be set by
2707 writing to the EPCTL register.. */ 2707 writing to the EPCTL register.. */
@@ -2721,9 +2721,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
2721 2721
2722 s3c_hsotg_enqueue_setup(hsotg); 2722 s3c_hsotg_enqueue_setup(hsotg);
2723 2723
2724 dev_info(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n", 2724 dev_dbg(hsotg->dev, "EP0: DIEPCTL0=0x%08x, DOEPCTL0=0x%08x\n",
2725 readl(hsotg->regs + S3C_DIEPCTL0), 2725 readl(hsotg->regs + S3C_DIEPCTL0),
2726 readl(hsotg->regs + S3C_DOEPCTL0)); 2726 readl(hsotg->regs + S3C_DOEPCTL0));
2727 2727
2728 /* clear global NAKs */ 2728 /* clear global NAKs */
2729 writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK, 2729 writel(S3C_DCTL_CGOUTNak | S3C_DCTL_CGNPInNAK,
@@ -2921,9 +2921,9 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
2921 2921
2922 /* setup fifos */ 2922 /* setup fifos */
2923 2923
2924 dev_info(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n", 2924 dev_dbg(hsotg->dev, "GRXFSIZ=0x%08x, GNPTXFSIZ=0x%08x\n",
2925 readl(hsotg->regs + S3C_GRXFSIZ), 2925 readl(hsotg->regs + S3C_GRXFSIZ),
2926 readl(hsotg->regs + S3C_GNPTXFSIZ)); 2926 readl(hsotg->regs + S3C_GNPTXFSIZ));
2927 2927
2928 s3c_hsotg_init_fifo(hsotg); 2928 s3c_hsotg_init_fifo(hsotg);
2929 2929
@@ -2945,6 +2945,7 @@ static void s3c_hsotg_init(struct s3c_hsotg *hsotg)
2945 2945
2946static void s3c_hsotg_dump(struct s3c_hsotg *hsotg) 2946static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
2947{ 2947{
2948#ifdef DEBUG
2948 struct device *dev = hsotg->dev; 2949 struct device *dev = hsotg->dev;
2949 void __iomem *regs = hsotg->regs; 2950 void __iomem *regs = hsotg->regs;
2950 u32 val; 2951 u32 val;
@@ -2987,6 +2988,7 @@ static void s3c_hsotg_dump(struct s3c_hsotg *hsotg)
2987 2988
2988 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n", 2989 dev_info(dev, "DVBUSDIS=0x%08x, DVBUSPULSE=%08x\n",
2989 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE)); 2990 readl(regs + S3C_DVBUSDIS), readl(regs + S3C_DVBUSPULSE));
2991#endif
2990} 2992}
2991 2993
2992 2994
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index cfe3cf56d6bd..d5e3e1e58626 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/usb/ch9.h> 27#include <linux/usb/ch9.h>
28#include <linux/usb/gadget.h> 28#include <linux/usb/gadget.h>
29#include <linux/prefetch.h>
29 30
30#include <mach/regs-s3c2443-clock.h> 31#include <mach/regs-s3c2443-clock.h>
31#include <plat/udc.h> 32#include <plat/udc.h>
@@ -1301,7 +1302,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
1301 hsudc->uclk = clk_get(&pdev->dev, "usb-device"); 1302 hsudc->uclk = clk_get(&pdev->dev, "usb-device");
1302 if (IS_ERR(hsudc->uclk)) { 1303 if (IS_ERR(hsudc->uclk)) {
1303 dev_err(dev, "failed to find usb-device clock source\n"); 1304 dev_err(dev, "failed to find usb-device clock source\n");
1304 return PTR_ERR(hsudc->uclk); 1305 ret = PTR_ERR(hsudc->uclk);
1306 goto err_clk;
1305 } 1307 }
1306 clk_enable(hsudc->uclk); 1308 clk_enable(hsudc->uclk);
1307 1309
@@ -1310,7 +1312,8 @@ static int s3c_hsudc_probe(struct platform_device *pdev)
1310 disable_irq(hsudc->irq); 1312 disable_irq(hsudc->irq);
1311 local_irq_enable(); 1313 local_irq_enable();
1312 return 0; 1314 return 0;
1313 1315err_clk:
1316 free_irq(hsudc->irq, hsudc);
1314err_irq: 1317err_irq:
1315 iounmap(hsudc->regs); 1318 iounmap(hsudc->regs);
1316 1319
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 6d8b04061d5d..100f2635cf0a 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -36,6 +36,7 @@
36#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/clk.h> 37#include <linux/clk.h>
38#include <linux/gpio.h> 38#include <linux/gpio.h>
39#include <linux/prefetch.h>
39 40
40#include <linux/debugfs.h> 41#include <linux/debugfs.h>
41#include <linux/seq_file.h> 42#include <linux/seq_file.h>
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index afef7b0a4195..80be5472783a 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -312,8 +312,10 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
312 return PTR_ERR(usb_clk); 312 return PTR_ERR(usb_clk);
313 313
314 hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x"); 314 hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
315 if (!hcd) 315 if (!hcd) {
316 return -ENOMEM; 316 retval = -ENOMEM;
317 goto err0;
318 }
317 319
318 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 320 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
319 if (!r) { 321 if (!r) {
@@ -368,6 +370,7 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
368 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 370 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
369 err1: 371 err1:
370 usb_put_hcd(hcd); 372 usb_put_hcd(hcd);
373 err0:
371 clk_put(usb_clk); 374 clk_put(usb_clk);
372 return retval; 375 return retval;
373} 376}
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 2e0486178dbe..1f50b4468e87 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -438,13 +438,13 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
438 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx); 438 struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
439 439
440 switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) { 440 switch (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state))) {
441 case 0: 441 case SLOT_STATE_ENABLED:
442 return "enabled/disabled"; 442 return "enabled/disabled";
443 case 1: 443 case SLOT_STATE_DEFAULT:
444 return "default"; 444 return "default";
445 case 2: 445 case SLOT_STATE_ADDRESSED:
446 return "addressed"; 446 return "addressed";
447 case 3: 447 case SLOT_STATE_CONFIGURED:
448 return "configured"; 448 return "configured";
449 default: 449 default:
450 return "reserved"; 450 return "reserved";
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 26caba4c1950..0f8e1d29a858 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -985,9 +985,19 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev,
985 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1; 985 interval = clamp_val(ep->desc.bInterval, 1, 16) - 1;
986 if (interval != ep->desc.bInterval - 1) 986 if (interval != ep->desc.bInterval - 1)
987 dev_warn(&udev->dev, 987 dev_warn(&udev->dev,
988 "ep %#x - rounding interval to %d microframes\n", 988 "ep %#x - rounding interval to %d %sframes\n",
989 ep->desc.bEndpointAddress, 989 ep->desc.bEndpointAddress,
990 1 << interval); 990 1 << interval,
991 udev->speed == USB_SPEED_FULL ? "" : "micro");
992
993 if (udev->speed == USB_SPEED_FULL) {
994 /*
995 * Full speed isoc endpoints specify interval in frames,
996 * not microframes. We are using microframes everywhere,
997 * so adjust accordingly.
998 */
999 interval += 3; /* 1 frame = 2^3 uframes */
1000 }
991 1001
992 return interval; 1002 return interval;
993} 1003}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c408e9f6a707..17541d09eabb 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -106,12 +106,22 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
106 106
107 /* Look for vendor-specific quirks */ 107 /* Look for vendor-specific quirks */
108 if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC && 108 if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
109 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK && 109 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) {
110 pdev->revision == 0x0) { 110 if (pdev->revision == 0x0) {
111 xhci->quirks |= XHCI_RESET_EP_QUIRK; 111 xhci->quirks |= XHCI_RESET_EP_QUIRK;
112 xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure" 112 xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
113 " endpoint cmd after reset endpoint\n"); 113 " endpoint cmd after reset endpoint\n");
114 }
115 /* Fresco Logic confirms: all revisions of this chip do not
116 * support MSI, even though some of them claim to in their PCI
117 * capabilities.
118 */
119 xhci->quirks |= XHCI_BROKEN_MSI;
120 xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
121 "has broken MSI implementation\n",
122 pdev->revision);
114 } 123 }
124
115 if (pdev->vendor == PCI_VENDOR_ID_NEC) 125 if (pdev->vendor == PCI_VENDOR_ID_NEC)
116 xhci->quirks |= XHCI_NEC_HOST; 126 xhci->quirks |= XHCI_NEC_HOST;
117 127
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index cc1485bfed38..800f417c7309 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1782,7 +1782,7 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
1782 struct usb_iso_packet_descriptor *frame; 1782 struct usb_iso_packet_descriptor *frame;
1783 int idx; 1783 int idx;
1784 1784
1785 ep_ring = xhci_dma_to_transfer_ring(ep, event->buffer); 1785 ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
1786 urb_priv = td->urb->hcpriv; 1786 urb_priv = td->urb->hcpriv;
1787 idx = urb_priv->td_cnt; 1787 idx = urb_priv->td_cnt;
1788 frame = &td->urb->iso_frame_desc[idx]; 1788 frame = &td->urb->iso_frame_desc[idx];
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index d9660eb97eb9..06e7023258d0 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -430,12 +430,19 @@ int xhci_run(struct usb_hcd *hcd)
430 free_irq(hcd->irq, hcd); 430 free_irq(hcd->irq, hcd);
431 hcd->irq = -1; 431 hcd->irq = -1;
432 432
433 /* Some Fresco Logic host controllers advertise MSI, but fail to
434 * generate interrupts. Don't even try to enable MSI.
435 */
436 if (xhci->quirks & XHCI_BROKEN_MSI)
437 goto legacy_irq;
438
433 ret = xhci_setup_msix(xhci); 439 ret = xhci_setup_msix(xhci);
434 if (ret) 440 if (ret)
435 /* fall back to msi*/ 441 /* fall back to msi*/
436 ret = xhci_setup_msi(xhci); 442 ret = xhci_setup_msi(xhci);
437 443
438 if (ret) { 444 if (ret) {
445legacy_irq:
439 /* fall back to legacy interrupt*/ 446 /* fall back to legacy interrupt*/
440 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, 447 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
441 hcd->irq_descr, hcd); 448 hcd->irq_descr, hcd);
@@ -1849,8 +1856,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1849 1856
1850 /* Free any rings that were dropped, but not changed. */ 1857 /* Free any rings that were dropped, but not changed. */
1851 for (i = 1; i < 31; ++i) { 1858 for (i = 1; i < 31; ++i) {
1852 if ((ctrl_ctx->drop_flags & (1 << (i + 1))) && 1859 if ((le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1))) &&
1853 !(ctrl_ctx->add_flags & (1 << (i + 1)))) 1860 !(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1))))
1854 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i); 1861 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1855 } 1862 }
1856 xhci_zero_in_ctx(xhci, virt_dev); 1863 xhci_zero_in_ctx(xhci, virt_dev);
@@ -2467,6 +2474,7 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2467 struct xhci_command *reset_device_cmd; 2474 struct xhci_command *reset_device_cmd;
2468 int timeleft; 2475 int timeleft;
2469 int last_freed_endpoint; 2476 int last_freed_endpoint;
2477 struct xhci_slot_ctx *slot_ctx;
2470 2478
2471 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__); 2479 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2472 if (ret <= 0) 2480 if (ret <= 0)
@@ -2499,6 +2507,12 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2499 return -EINVAL; 2507 return -EINVAL;
2500 } 2508 }
2501 2509
2510 /* If device is not setup, there is no point in resetting it */
2511 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2512 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
2513 SLOT_STATE_DISABLED)
2514 return 0;
2515
2502 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id); 2516 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
2503 /* Allocate the command structure that holds the struct completion. 2517 /* Allocate the command structure that holds the struct completion.
2504 * Assume we're in process context, since the normal device reset 2518 * Assume we're in process context, since the normal device reset
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index ac0196e7fcf1..7d1ea3bf5e1f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -560,6 +560,11 @@ struct xhci_slot_ctx {
560#define SLOT_STATE (0x1f << 27) 560#define SLOT_STATE (0x1f << 27)
561#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27) 561#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
562 562
563#define SLOT_STATE_DISABLED 0
564#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
565#define SLOT_STATE_DEFAULT 1
566#define SLOT_STATE_ADDRESSED 2
567#define SLOT_STATE_CONFIGURED 3
563 568
564/** 569/**
565 * struct xhci_ep_ctx 570 * struct xhci_ep_ctx
@@ -1302,6 +1307,7 @@ struct xhci_hcd {
1302 * commands. 1307 * commands.
1303 */ 1308 */
1304#define XHCI_EP_LIMIT_QUIRK (1 << 5) 1309#define XHCI_EP_LIMIT_QUIRK (1 << 5)
1310#define XHCI_BROKEN_MSI (1 << 6)
1305 unsigned int num_active_eps; 1311 unsigned int num_active_eps;
1306 unsigned int limit_active_eps; 1312 unsigned int limit_active_eps;
1307 /* There are two roothubs to keep track of bus suspend info for */ 1313 /* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index ab8e1001e5e2..c71b0372786e 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -96,6 +96,7 @@
96#include <linux/init.h> 96#include <linux/init.h>
97#include <linux/list.h> 97#include <linux/list.h>
98#include <linux/kobject.h> 98#include <linux/kobject.h>
99#include <linux/prefetch.h>
99#include <linux/platform_device.h> 100#include <linux/platform_device.h>
100#include <linux/io.h> 101#include <linux/io.h>
101 102
diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c
index 206cfabc9286..547486ccd059 100644
--- a/drivers/usb/renesas_usbhs/mod_gadget.c
+++ b/drivers/usb/renesas_usbhs/mod_gadget.c
@@ -1380,5 +1380,6 @@ void __devexit usbhs_mod_gadget_remove(struct usbhs_priv *priv)
1380{ 1380{
1381 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); 1381 struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
1382 1382
1383 kfree(gpriv->uep);
1383 kfree(gpriv); 1384 kfree(gpriv);
1384} 1385}
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index e8dbde55f6c5..162728977553 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -647,6 +647,7 @@ static struct usb_device_id id_table_combined [] = {
647 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) }, 647 { USB_DEVICE(FTDI_VID, EVER_ECO_PRO_CDS) },
648 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) }, 648 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_1_PID) },
649 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) }, 649 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_2_PID) },
650 { USB_DEVICE(FTDI_VID, FTDI_4N_GALAXY_DE_3_PID) },
650 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) }, 651 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_0_PID) },
651 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) }, 652 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_1_PID) },
652 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) }, 653 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_2_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 1d946cd238ba..ab1fcdf3c378 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -351,6 +351,7 @@
351 */ 351 */
352#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0 352#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
353#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1 353#define FTDI_4N_GALAXY_DE_2_PID 0xF3C1
354#define FTDI_4N_GALAXY_DE_3_PID 0xF3C2
354 355
355/* 356/*
356 * Linx Technologies product ids 357 * Linx Technologies product ids
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 318dd00040a3..60b25d8ea0e2 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -311,10 +311,6 @@ static void option_instat_callback(struct urb *urb);
311#define ZTE_PRODUCT_AC2726 0xfff5 311#define ZTE_PRODUCT_AC2726 0xfff5
312#define ZTE_PRODUCT_AC8710T 0xffff 312#define ZTE_PRODUCT_AC8710T 0xffff
313 313
314/* ZTE PRODUCTS -- alternate vendor ID */
315#define ZTE_VENDOR_ID2 0x1d6b
316#define ZTE_PRODUCT_MF_330 0x0002
317
318#define BENQ_VENDOR_ID 0x04a5 314#define BENQ_VENDOR_ID 0x04a5
319#define BENQ_PRODUCT_H10 0x4068 315#define BENQ_PRODUCT_H10 0x4068
320 316
@@ -340,11 +336,12 @@ static void option_instat_callback(struct urb *urb);
340#define TOSHIBA_PRODUCT_G450 0x0d45 336#define TOSHIBA_PRODUCT_G450 0x0d45
341 337
342#define ALINK_VENDOR_ID 0x1e0e 338#define ALINK_VENDOR_ID 0x1e0e
339#define ALINK_PRODUCT_PH300 0x9100
343#define ALINK_PRODUCT_3GU 0x9200 340#define ALINK_PRODUCT_3GU 0x9200
344 341
345/* ALCATEL PRODUCTS */ 342/* ALCATEL PRODUCTS */
346#define ALCATEL_VENDOR_ID 0x1bbb 343#define ALCATEL_VENDOR_ID 0x1bbb
347#define ALCATEL_PRODUCT_X060S 0x0000 344#define ALCATEL_PRODUCT_X060S_X200 0x0000
348 345
349#define PIRELLI_VENDOR_ID 0x1266 346#define PIRELLI_VENDOR_ID 0x1266
350#define PIRELLI_PRODUCT_C100_1 0x1002 347#define PIRELLI_PRODUCT_C100_1 0x1002
@@ -379,6 +376,9 @@ static void option_instat_callback(struct urb *urb);
379 * It seems to contain a Qualcomm QSC6240/6290 chipset */ 376 * It seems to contain a Qualcomm QSC6240/6290 chipset */
380#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603 377#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
381 378
379/* Zoom */
380#define ZOOM_PRODUCT_4597 0x9607
381
382/* Haier products */ 382/* Haier products */
383#define HAIER_VENDOR_ID 0x201e 383#define HAIER_VENDOR_ID 0x201e
384#define HAIER_PRODUCT_CE100 0x2009 384#define HAIER_PRODUCT_CE100 0x2009
@@ -432,6 +432,20 @@ static const struct option_blacklist_info four_g_w14_blacklist = {
432 .reason = OPTION_BLACKLIST_SENDSETUP 432 .reason = OPTION_BLACKLIST_SENDSETUP
433}; 433};
434 434
435static const u8 alcatel_x200_no_sendsetup[] = { 0, 1 };
436static const struct option_blacklist_info alcatel_x200_blacklist = {
437 .infolen = ARRAY_SIZE(alcatel_x200_no_sendsetup),
438 .ifaceinfo = alcatel_x200_no_sendsetup,
439 .reason = OPTION_BLACKLIST_SENDSETUP
440};
441
442static const u8 zte_k3765_z_no_sendsetup[] = { 0, 1, 2 };
443static const struct option_blacklist_info zte_k3765_z_blacklist = {
444 .infolen = ARRAY_SIZE(zte_k3765_z_no_sendsetup),
445 .ifaceinfo = zte_k3765_z_no_sendsetup,
446 .reason = OPTION_BLACKLIST_SENDSETUP
447};
448
435static const struct usb_device_id option_ids[] = { 449static const struct usb_device_id option_ids[] = {
436 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, 450 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
437 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, 451 { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -916,13 +930,13 @@ static const struct usb_device_id option_ids[] = {
916 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, 930 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
917 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, 931 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) },
918 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, 932 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) },
919 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff) }, 933 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
934 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
920 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, 935 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
921 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, 936 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) },
922 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, 937 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) },
923 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, 938 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
924 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) }, 939 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
925 { USB_DEVICE(ZTE_VENDOR_ID2, ZTE_PRODUCT_MF_330) },
926 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) }, 940 { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
927 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) }, 941 { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
928 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */ 942 { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
@@ -935,13 +949,17 @@ static const struct usb_device_id option_ids[] = {
935 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) }, 949 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
936 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */ 950 { USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
937 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) }, 951 { USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
952 { USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
938 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) }, 953 { USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
939 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) }, 954 { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
955 .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
956 },
940 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, 957 { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
941 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, 958 { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
942 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 959 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
943 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 960 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
944 }, 961 },
962 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
945 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, 963 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
946 /* Pirelli */ 964 /* Pirelli */
947 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)}, 965 { USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 00418995d8e9..e8ae21b2d387 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -819,6 +819,35 @@ Retry_Sense:
819 } 819 }
820 } 820 }
821 821
822 /*
823 * Some devices don't work or return incorrect data the first
824 * time they get a READ(10) command, or for the first READ(10)
825 * after a media change. If the INITIAL_READ10 flag is set,
826 * keep track of whether READ(10) commands succeed. If the
827 * previous one succeeded and this one failed, set the REDO_READ10
828 * flag to force a retry.
829 */
830 if (unlikely((us->fflags & US_FL_INITIAL_READ10) &&
831 srb->cmnd[0] == READ_10)) {
832 if (srb->result == SAM_STAT_GOOD) {
833 set_bit(US_FLIDX_READ10_WORKED, &us->dflags);
834 } else if (test_bit(US_FLIDX_READ10_WORKED, &us->dflags)) {
835 clear_bit(US_FLIDX_READ10_WORKED, &us->dflags);
836 set_bit(US_FLIDX_REDO_READ10, &us->dflags);
837 }
838
839 /*
840 * Next, if the REDO_READ10 flag is set, return a result
841 * code that will cause the SCSI core to retry the READ(10)
842 * command immediately.
843 */
844 if (test_bit(US_FLIDX_REDO_READ10, &us->dflags)) {
845 clear_bit(US_FLIDX_REDO_READ10, &us->dflags);
846 srb->result = DID_IMM_RETRY << 16;
847 srb->sense_buffer[0] = 0;
848 }
849 }
850
822 /* Did we transfer less than the minimum amount required? */ 851 /* Did we transfer less than the minimum amount required? */
823 if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) && 852 if ((srb->result == SAM_STAT_GOOD || srb->sense_buffer[2] == 0) &&
824 scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow) 853 scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index c1602b8c5594..ccff3483eebc 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1114,6 +1114,16 @@ UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1114 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1114 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1115 US_FL_FIX_CAPACITY ), 1115 US_FL_FIX_CAPACITY ),
1116 1116
1117/* Reported by Paul Hartman <paul.hartman+linux@gmail.com>
1118 * This card reader returns "Illegal Request, Logical Block Address
1119 * Out of Range" for the first READ(10) after a new card is inserted.
1120 */
1121UNUSUAL_DEV( 0x090c, 0x6000, 0x0100, 0x0100,
1122 "Feiya",
1123 "SD/SDHC Card Reader",
1124 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1125 US_FL_INITIAL_READ10 ),
1126
1117/* This Pentax still camera is not conformant 1127/* This Pentax still camera is not conformant
1118 * to the USB storage specification: - 1128 * to the USB storage specification: -
1119 * - It does not like the INQUIRY command. So we must handle this command 1129 * - It does not like the INQUIRY command. So we must handle this command
@@ -1888,6 +1898,15 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
1888 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1898 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1889 US_FL_NO_READ_DISC_INFO ), 1899 US_FL_NO_READ_DISC_INFO ),
1890 1900
1901/* Reported by Sven Geggus <sven-usbst@geggus.net>
1902 * This encrypted pen drive returns bogus data for the initial READ(10).
1903 */
1904UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
1905 "Corsair",
1906 "Padlock v2",
1907 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1908 US_FL_INITIAL_READ10 ),
1909
1891/* Patch by Richard Schütz <r.schtz@t-online.de> 1910/* Patch by Richard Schütz <r.schtz@t-online.de>
1892 * This external hard drive enclosure uses a JMicron chip which 1911 * This external hard drive enclosure uses a JMicron chip which
1893 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ 1912 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5ee7ac42e08f..0ca095820f3e 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -440,7 +440,8 @@ static void adjust_quirks(struct us_data *us)
440 US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 | 440 US_FL_NOT_LOCKABLE | US_FL_MAX_SECTORS_64 |
441 US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE | 441 US_FL_CAPACITY_OK | US_FL_IGNORE_RESIDUE |
442 US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT | 442 US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
443 US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16); 443 US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
444 US_FL_INITIAL_READ10);
444 445
445 p = quirks; 446 p = quirks;
446 while (*p) { 447 while (*p) {
@@ -490,6 +491,9 @@ static void adjust_quirks(struct us_data *us)
490 case 'm': 491 case 'm':
491 f |= US_FL_MAX_SECTORS_64; 492 f |= US_FL_MAX_SECTORS_64;
492 break; 493 break;
494 case 'n':
495 f |= US_FL_INITIAL_READ10;
496 break;
493 case 'o': 497 case 'o':
494 f |= US_FL_CAPACITY_OK; 498 f |= US_FL_CAPACITY_OK;
495 break; 499 break;
@@ -953,6 +957,13 @@ int usb_stor_probe2(struct us_data *us)
953 if (result) 957 if (result)
954 goto BadDevice; 958 goto BadDevice;
955 959
960 /*
961 * If the device returns invalid data for the first READ(10)
962 * command, indicate the command should be retried.
963 */
964 if (us->fflags & US_FL_INITIAL_READ10)
965 set_bit(US_FLIDX_REDO_READ10, &us->dflags);
966
956 /* Acquire all the other resources and add the host */ 967 /* Acquire all the other resources and add the host */
957 result = usb_stor_acquire_resources(us); 968 result = usb_stor_acquire_resources(us);
958 if (result) 969 if (result)
diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
index 89d3bfff98df..7b0f2113632e 100644
--- a/drivers/usb/storage/usb.h
+++ b/drivers/usb/storage/usb.h
@@ -73,6 +73,8 @@ struct us_unusual_dev {
73#define US_FLIDX_RESETTING 4 /* device reset in progress */ 73#define US_FLIDX_RESETTING 4 /* device reset in progress */
74#define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */ 74#define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */
75#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */ 75#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */
76#define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */
77#define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */
76 78
77#define USB_STOR_STRING_LEN 32 79#define USB_STOR_STRING_LEN 32
78 80
diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
index 3ec4923c2d84..c22e8d39a2cb 100644
--- a/drivers/video/arcfb.c
+++ b/drivers/video/arcfb.c
@@ -515,11 +515,10 @@ static int __devinit arcfb_probe(struct platform_device *dev)
515 515
516 /* We need a flat backing store for the Arc's 516 /* We need a flat backing store for the Arc's
517 less-flat actual paged framebuffer */ 517 less-flat actual paged framebuffer */
518 if (!(videomemory = vmalloc(videomemorysize))) 518 videomemory = vzalloc(videomemorysize);
519 if (!videomemory)
519 return retval; 520 return retval;
520 521
521 memset(videomemory, 0, videomemorysize);
522
523 info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev); 522 info = framebuffer_alloc(sizeof(struct arcfb_par), &dev->dev);
524 if (!info) 523 if (!info)
525 goto err; 524 goto err;
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index ebb893c49e90..d7aaec5667bf 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -248,10 +248,6 @@ static int atyfb_sync(struct fb_info *info);
248 248
249static int aty_init(struct fb_info *info); 249static int aty_init(struct fb_info *info);
250 250
251#ifdef CONFIG_ATARI
252static int store_video_par(char *videopar, unsigned char m64_num);
253#endif
254
255static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc); 251static void aty_get_crtc(const struct atyfb_par *par, struct crtc *crtc);
256 252
257static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc); 253static void aty_set_crtc(const struct atyfb_par *par, const struct crtc *crtc);
@@ -2268,11 +2264,13 @@ error:
2268 return; 2264 return;
2269} 2265}
2270 2266
2267#ifdef CONFIG_PCI
2271static void aty_bl_exit(struct backlight_device *bd) 2268static void aty_bl_exit(struct backlight_device *bd)
2272{ 2269{
2273 backlight_device_unregister(bd); 2270 backlight_device_unregister(bd);
2274 printk("aty: Backlight unloaded\n"); 2271 printk("aty: Backlight unloaded\n");
2275} 2272}
2273#endif /* CONFIG_PCI */
2276 2274
2277#endif /* CONFIG_FB_ATY_BACKLIGHT */ 2275#endif /* CONFIG_FB_ATY_BACKLIGHT */
2278 2276
@@ -2789,7 +2787,7 @@ aty_init_exit:
2789 return ret; 2787 return ret;
2790} 2788}
2791 2789
2792#ifdef CONFIG_ATARI 2790#if defined(CONFIG_ATARI) && !defined(MODULE)
2793static int __devinit store_video_par(char *video_str, unsigned char m64_num) 2791static int __devinit store_video_par(char *video_str, unsigned char m64_num)
2794{ 2792{
2795 char *p; 2793 char *p;
@@ -2818,7 +2816,7 @@ static int __devinit store_video_par(char *video_str, unsigned char m64_num)
2818 phys_vmembase[m64_num] = 0; 2816 phys_vmembase[m64_num] = 0;
2819 return -1; 2817 return -1;
2820} 2818}
2821#endif /* CONFIG_ATARI */ 2819#endif /* CONFIG_ATARI && !MODULE */
2822 2820
2823/* 2821/*
2824 * Blank the display. 2822 * Blank the display.
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0c9373bedd1f..2d93c8d61ad5 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -302,6 +302,18 @@ config BACKLIGHT_ADP8860
302 To compile this driver as a module, choose M here: the module will 302 To compile this driver as a module, choose M here: the module will
303 be called adp8860_bl. 303 be called adp8860_bl.
304 304
305config BACKLIGHT_ADP8870
306 tristate "Backlight Driver for ADP8870 using WLED"
307 depends on BACKLIGHT_CLASS_DEVICE && I2C
308 select NEW_LEDS
309 select LEDS_CLASS
310 help
311 If you have a LCD backlight connected to the ADP8870,
312 say Y here to enable this driver.
313
314 To compile this driver as a module, choose M here: the module will
315 be called adp8870_bl.
316
305config BACKLIGHT_88PM860X 317config BACKLIGHT_88PM860X
306 tristate "Backlight Driver for 88PM8606 using WLED" 318 tristate "Backlight Driver for 88PM8606 using WLED"
307 depends on MFD_88PM860X 319 depends on MFD_88PM860X
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index b9ca8490df87..ee72adb8786e 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
34obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o 34obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
35obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o 35obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
36obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o 36obj-$(CONFIG_BACKLIGHT_ADP8860) += adp8860_bl.o
37obj-$(CONFIG_BACKLIGHT_ADP8870) += adp8870_bl.o
37obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o 38obj-$(CONFIG_BACKLIGHT_88PM860X) += 88pm860x_bl.o
38obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o 39obj-$(CONFIG_BACKLIGHT_PCF50633) += pcf50633-backlight.o
39 40
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
new file mode 100644
index 000000000000..05a8832bb3eb
--- /dev/null
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -0,0 +1,1012 @@
1/*
2 * Backlight driver for Analog Devices ADP8870 Backlight Devices
3 *
4 * Copyright 2009-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/version.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/pm.h>
14#include <linux/platform_device.h>
15#include <linux/i2c.h>
16#include <linux/fb.h>
17#include <linux/backlight.h>
18#include <linux/leds.h>
19#include <linux/workqueue.h>
20#include <linux/slab.h>
21
22#include <linux/i2c/adp8870.h>
23#define ADP8870_EXT_FEATURES
24#define ADP8870_USE_LEDS
25
26
27#define ADP8870_MFDVID 0x00 /* Manufacturer and device ID */
28#define ADP8870_MDCR 0x01 /* Device mode and status */
29#define ADP8870_INT_STAT 0x02 /* Interrupts status */
30#define ADP8870_INT_EN 0x03 /* Interrupts enable */
31#define ADP8870_CFGR 0x04 /* Configuration register */
32#define ADP8870_BLSEL 0x05 /* Sink enable backlight or independent */
33#define ADP8870_PWMLED 0x06 /* PWM Enable Selection Register */
34#define ADP8870_BLOFF 0x07 /* Backlight off timeout */
35#define ADP8870_BLDIM 0x08 /* Backlight dim timeout */
36#define ADP8870_BLFR 0x09 /* Backlight fade in and out rates */
37#define ADP8870_BLMX1 0x0A /* Backlight (Brightness Level 1-daylight) maximum current */
38#define ADP8870_BLDM1 0x0B /* Backlight (Brightness Level 1-daylight) dim current */
39#define ADP8870_BLMX2 0x0C /* Backlight (Brightness Level 2-bright) maximum current */
40#define ADP8870_BLDM2 0x0D /* Backlight (Brightness Level 2-bright) dim current */
41#define ADP8870_BLMX3 0x0E /* Backlight (Brightness Level 3-office) maximum current */
42#define ADP8870_BLDM3 0x0F /* Backlight (Brightness Level 3-office) dim current */
43#define ADP8870_BLMX4 0x10 /* Backlight (Brightness Level 4-indoor) maximum current */
44#define ADP8870_BLDM4 0x11 /* Backlight (Brightness Level 4-indoor) dim current */
45#define ADP8870_BLMX5 0x12 /* Backlight (Brightness Level 5-dark) maximum current */
46#define ADP8870_BLDM5 0x13 /* Backlight (Brightness Level 5-dark) dim current */
47#define ADP8870_ISCLAW 0x1A /* Independent sink current fade law register */
48#define ADP8870_ISCC 0x1B /* Independent sink current control register */
49#define ADP8870_ISCT1 0x1C /* Independent Sink Current Timer Register LED[7:5] */
50#define ADP8870_ISCT2 0x1D /* Independent Sink Current Timer Register LED[4:1] */
51#define ADP8870_ISCF 0x1E /* Independent sink current fade register */
52#define ADP8870_ISC1 0x1F /* Independent Sink Current LED1 */
53#define ADP8870_ISC2 0x20 /* Independent Sink Current LED2 */
54#define ADP8870_ISC3 0x21 /* Independent Sink Current LED3 */
55#define ADP8870_ISC4 0x22 /* Independent Sink Current LED4 */
56#define ADP8870_ISC5 0x23 /* Independent Sink Current LED5 */
57#define ADP8870_ISC6 0x24 /* Independent Sink Current LED6 */
58#define ADP8870_ISC7 0x25 /* Independent Sink Current LED7 (Brightness Level 1-daylight) */
59#define ADP8870_ISC7_L2 0x26 /* Independent Sink Current LED7 (Brightness Level 2-bright) */
60#define ADP8870_ISC7_L3 0x27 /* Independent Sink Current LED7 (Brightness Level 3-office) */
61#define ADP8870_ISC7_L4 0x28 /* Independent Sink Current LED7 (Brightness Level 4-indoor) */
62#define ADP8870_ISC7_L5 0x29 /* Independent Sink Current LED7 (Brightness Level 5-dark) */
63#define ADP8870_CMP_CTL 0x2D /* ALS Comparator Control Register */
64#define ADP8870_ALS1_EN 0x2E /* Main ALS comparator level enable */
65#define ADP8870_ALS2_EN 0x2F /* Second ALS comparator level enable */
66#define ADP8870_ALS1_STAT 0x30 /* Main ALS Comparator Status Register */
67#define ADP8870_ALS2_STAT 0x31 /* Second ALS Comparator Status Register */
68#define ADP8870_L2TRP 0x32 /* L2 comparator reference */
69#define ADP8870_L2HYS 0x33 /* L2 hysteresis */
70#define ADP8870_L3TRP 0x34 /* L3 comparator reference */
71#define ADP8870_L3HYS 0x35 /* L3 hysteresis */
72#define ADP8870_L4TRP 0x36 /* L4 comparator reference */
73#define ADP8870_L4HYS 0x37 /* L4 hysteresis */
74#define ADP8870_L5TRP 0x38 /* L5 comparator reference */
75#define ADP8870_L5HYS 0x39 /* L5 hysteresis */
76#define ADP8870_PH1LEVL 0x40 /* First phototransistor ambient light level-low byte register */
77#define ADP8870_PH1LEVH 0x41 /* First phototransistor ambient light level-high byte register */
78#define ADP8870_PH2LEVL 0x42 /* Second phototransistor ambient light level-low byte register */
79#define ADP8870_PH2LEVH 0x43 /* Second phototransistor ambient light level-high byte register */
80
81#define ADP8870_MANUFID 0x3 /* Analog Devices AD8870 Manufacturer and device ID */
82#define ADP8870_DEVID(x) ((x) & 0xF)
83#define ADP8870_MANID(x) ((x) >> 4)
84
85/* MDCR Device mode and status */
86#define D7ALSEN (1 << 7)
87#define INT_CFG (1 << 6)
88#define NSTBY (1 << 5)
89#define DIM_EN (1 << 4)
90#define GDWN_DIS (1 << 3)
91#define SIS_EN (1 << 2)
92#define CMP_AUTOEN (1 << 1)
93#define BLEN (1 << 0)
94
95/* ADP8870_ALS1_EN Main ALS comparator level enable */
96#define L5_EN (1 << 3)
97#define L4_EN (1 << 2)
98#define L3_EN (1 << 1)
99#define L2_EN (1 << 0)
100
101#define CFGR_BLV_SHIFT 3
102#define CFGR_BLV_MASK 0x7
103#define ADP8870_FLAG_LED_MASK 0xFF
104
105#define FADE_VAL(in, out) ((0xF & (in)) | ((0xF & (out)) << 4))
106#define BL_CFGR_VAL(law, blv) ((((blv) & CFGR_BLV_MASK) << CFGR_BLV_SHIFT) | ((0x3 & (law)) << 1))
107#define ALS_CMPR_CFG_VAL(filt) ((0x7 & (filt)) << 1)
108
109struct adp8870_bl {
110 struct i2c_client *client;
111 struct backlight_device *bl;
112 struct adp8870_led *led;
113 struct adp8870_backlight_platform_data *pdata;
114 struct mutex lock;
115 unsigned long cached_daylight_max;
116 int id;
117 int revid;
118 int current_brightness;
119};
120
121struct adp8870_led {
122 struct led_classdev cdev;
123 struct work_struct work;
124 struct i2c_client *client;
125 enum led_brightness new_brightness;
126 int id;
127 int flags;
128};
129
130static int adp8870_read(struct i2c_client *client, int reg, uint8_t *val)
131{
132 int ret;
133
134 ret = i2c_smbus_read_byte_data(client, reg);
135 if (ret < 0) {
136 dev_err(&client->dev, "failed reading at 0x%02x\n", reg);
137 return ret;
138 }
139
140 *val = ret;
141 return 0;
142}
143
144
145static int adp8870_write(struct i2c_client *client, u8 reg, u8 val)
146{
147 int ret = i2c_smbus_write_byte_data(client, reg, val);
148 if (ret)
149 dev_err(&client->dev, "failed to write\n");
150
151 return ret;
152}
153
154static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
155{
156 struct adp8870_bl *data = i2c_get_clientdata(client);
157 uint8_t reg_val;
158 int ret;
159
160 mutex_lock(&data->lock);
161
162 ret = adp8870_read(client, reg, &reg_val);
163
164 if (!ret && ((reg_val & bit_mask) == 0)) {
165 reg_val |= bit_mask;
166 ret = adp8870_write(client, reg, reg_val);
167 }
168
169 mutex_unlock(&data->lock);
170 return ret;
171}
172
173static int adp8870_clr_bits(struct i2c_client *client, int reg, uint8_t bit_mask)
174{
175 struct adp8870_bl *data = i2c_get_clientdata(client);
176 uint8_t reg_val;
177 int ret;
178
179 mutex_lock(&data->lock);
180
181 ret = adp8870_read(client, reg, &reg_val);
182
183 if (!ret && (reg_val & bit_mask)) {
184 reg_val &= ~bit_mask;
185 ret = adp8870_write(client, reg, reg_val);
186 }
187
188 mutex_unlock(&data->lock);
189 return ret;
190}
191
192/*
193 * Independent sink / LED
194 */
195#if defined(ADP8870_USE_LEDS)
196static void adp8870_led_work(struct work_struct *work)
197{
198 struct adp8870_led *led = container_of(work, struct adp8870_led, work);
199 adp8870_write(led->client, ADP8870_ISC1 + led->id - 1,
200 led->new_brightness >> 1);
201}
202
203static void adp8870_led_set(struct led_classdev *led_cdev,
204 enum led_brightness value)
205{
206 struct adp8870_led *led;
207
208 led = container_of(led_cdev, struct adp8870_led, cdev);
209 led->new_brightness = value;
210 /*
211 * Use workqueue for IO since I2C operations can sleep.
212 */
213 schedule_work(&led->work);
214}
215
216static int adp8870_led_setup(struct adp8870_led *led)
217{
218 struct i2c_client *client = led->client;
219 int ret = 0;
220
221 ret = adp8870_write(client, ADP8870_ISC1 + led->id - 1, 0);
222 if (ret)
223 return ret;
224
225 ret = adp8870_set_bits(client, ADP8870_ISCC, 1 << (led->id - 1));
226 if (ret)
227 return ret;
228
229 if (led->id > 4)
230 ret = adp8870_set_bits(client, ADP8870_ISCT1,
231 (led->flags & 0x3) << ((led->id - 5) * 2));
232 else
233 ret = adp8870_set_bits(client, ADP8870_ISCT2,
234 (led->flags & 0x3) << ((led->id - 1) * 2));
235
236 return ret;
237}
238
239static int __devinit adp8870_led_probe(struct i2c_client *client)
240{
241 struct adp8870_backlight_platform_data *pdata =
242 client->dev.platform_data;
243 struct adp8870_bl *data = i2c_get_clientdata(client);
244 struct adp8870_led *led, *led_dat;
245 struct led_info *cur_led;
246 int ret, i;
247
248
249 led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
250 if (led == NULL) {
251 dev_err(&client->dev, "failed to alloc memory\n");
252 return -ENOMEM;
253 }
254
255 ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
256 if (ret)
257 goto err_free;
258
259 ret = adp8870_write(client, ADP8870_ISCT1,
260 (pdata->led_on_time & 0x3) << 6);
261 if (ret)
262 goto err_free;
263
264 ret = adp8870_write(client, ADP8870_ISCF,
265 FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
266 if (ret)
267 goto err_free;
268
269 for (i = 0; i < pdata->num_leds; ++i) {
270 cur_led = &pdata->leds[i];
271 led_dat = &led[i];
272
273 led_dat->id = cur_led->flags & ADP8870_FLAG_LED_MASK;
274
275 if (led_dat->id > 7 || led_dat->id < 1) {
276 dev_err(&client->dev, "Invalid LED ID %d\n",
277 led_dat->id);
278 goto err;
279 }
280
281 if (pdata->bl_led_assign & (1 << (led_dat->id - 1))) {
282 dev_err(&client->dev, "LED %d used by Backlight\n",
283 led_dat->id);
284 goto err;
285 }
286
287 led_dat->cdev.name = cur_led->name;
288 led_dat->cdev.default_trigger = cur_led->default_trigger;
289 led_dat->cdev.brightness_set = adp8870_led_set;
290 led_dat->cdev.brightness = LED_OFF;
291 led_dat->flags = cur_led->flags >> FLAG_OFFT_SHIFT;
292 led_dat->client = client;
293 led_dat->new_brightness = LED_OFF;
294 INIT_WORK(&led_dat->work, adp8870_led_work);
295
296 ret = led_classdev_register(&client->dev, &led_dat->cdev);
297 if (ret) {
298 dev_err(&client->dev, "failed to register LED %d\n",
299 led_dat->id);
300 goto err;
301 }
302
303 ret = adp8870_led_setup(led_dat);
304 if (ret) {
305 dev_err(&client->dev, "failed to write\n");
306 i++;
307 goto err;
308 }
309 }
310
311 data->led = led;
312
313 return 0;
314
315 err:
316 for (i = i - 1; i >= 0; --i) {
317 led_classdev_unregister(&led[i].cdev);
318 cancel_work_sync(&led[i].work);
319 }
320
321 err_free:
322 kfree(led);
323
324 return ret;
325}
326
327static int __devexit adp8870_led_remove(struct i2c_client *client)
328{
329 struct adp8870_backlight_platform_data *pdata =
330 client->dev.platform_data;
331 struct adp8870_bl *data = i2c_get_clientdata(client);
332 int i;
333
334 for (i = 0; i < pdata->num_leds; i++) {
335 led_classdev_unregister(&data->led[i].cdev);
336 cancel_work_sync(&data->led[i].work);
337 }
338
339 kfree(data->led);
340 return 0;
341}
342#else
343static int __devinit adp8870_led_probe(struct i2c_client *client)
344{
345 return 0;
346}
347
348static int __devexit adp8870_led_remove(struct i2c_client *client)
349{
350 return 0;
351}
352#endif
353
354static int adp8870_bl_set(struct backlight_device *bl, int brightness)
355{
356 struct adp8870_bl *data = bl_get_data(bl);
357 struct i2c_client *client = data->client;
358 int ret = 0;
359
360 if (data->pdata->en_ambl_sens) {
361 if ((brightness > 0) && (brightness < ADP8870_MAX_BRIGHTNESS)) {
362 /* Disable Ambient Light auto adjust */
363 ret = adp8870_clr_bits(client, ADP8870_MDCR,
364 CMP_AUTOEN);
365 if (ret)
366 return ret;
367 ret = adp8870_write(client, ADP8870_BLMX1, brightness);
368 if (ret)
369 return ret;
370 } else {
371 /*
372 * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
373 * restore daylight l1 sysfs brightness
374 */
375 ret = adp8870_write(client, ADP8870_BLMX1,
376 data->cached_daylight_max);
377 if (ret)
378 return ret;
379
380 ret = adp8870_set_bits(client, ADP8870_MDCR,
381 CMP_AUTOEN);
382 if (ret)
383 return ret;
384 }
385 } else {
386 ret = adp8870_write(client, ADP8870_BLMX1, brightness);
387 if (ret)
388 return ret;
389 }
390
391 if (data->current_brightness && brightness == 0)
392 ret = adp8870_set_bits(client,
393 ADP8870_MDCR, DIM_EN);
394 else if (data->current_brightness == 0 && brightness)
395 ret = adp8870_clr_bits(client,
396 ADP8870_MDCR, DIM_EN);
397
398 if (!ret)
399 data->current_brightness = brightness;
400
401 return ret;
402}
403
404static int adp8870_bl_update_status(struct backlight_device *bl)
405{
406 int brightness = bl->props.brightness;
407 if (bl->props.power != FB_BLANK_UNBLANK)
408 brightness = 0;
409
410 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
411 brightness = 0;
412
413 return adp8870_bl_set(bl, brightness);
414}
415
416static int adp8870_bl_get_brightness(struct backlight_device *bl)
417{
418 struct adp8870_bl *data = bl_get_data(bl);
419
420 return data->current_brightness;
421}
422
423static const struct backlight_ops adp8870_bl_ops = {
424 .update_status = adp8870_bl_update_status,
425 .get_brightness = adp8870_bl_get_brightness,
426};
427
428static int adp8870_bl_setup(struct backlight_device *bl)
429{
430 struct adp8870_bl *data = bl_get_data(bl);
431 struct i2c_client *client = data->client;
432 struct adp8870_backlight_platform_data *pdata = data->pdata;
433 int ret = 0;
434
435 ret = adp8870_write(client, ADP8870_BLSEL, ~pdata->bl_led_assign);
436 if (ret)
437 return ret;
438
439 ret = adp8870_write(client, ADP8870_PWMLED, pdata->pwm_assign);
440 if (ret)
441 return ret;
442
443 ret = adp8870_write(client, ADP8870_BLMX1, pdata->l1_daylight_max);
444 if (ret)
445 return ret;
446
447 ret = adp8870_write(client, ADP8870_BLDM1, pdata->l1_daylight_dim);
448 if (ret)
449 return ret;
450
451 if (pdata->en_ambl_sens) {
452 data->cached_daylight_max = pdata->l1_daylight_max;
453 ret = adp8870_write(client, ADP8870_BLMX2,
454 pdata->l2_bright_max);
455 if (ret)
456 return ret;
457 ret = adp8870_write(client, ADP8870_BLDM2,
458 pdata->l2_bright_dim);
459 if (ret)
460 return ret;
461
462 ret = adp8870_write(client, ADP8870_BLMX3,
463 pdata->l3_office_max);
464 if (ret)
465 return ret;
466 ret = adp8870_write(client, ADP8870_BLDM3,
467 pdata->l3_office_dim);
468 if (ret)
469 return ret;
470
471 ret = adp8870_write(client, ADP8870_BLMX4,
472 pdata->l4_indoor_max);
473 if (ret)
474 return ret;
475
476 ret = adp8870_write(client, ADP8870_BLDM4,
477 pdata->l4_indor_dim);
478 if (ret)
479 return ret;
480
481 ret = adp8870_write(client, ADP8870_BLMX5,
482 pdata->l5_dark_max);
483 if (ret)
484 return ret;
485
486 ret = adp8870_write(client, ADP8870_BLDM5,
487 pdata->l5_dark_dim);
488 if (ret)
489 return ret;
490
491 ret = adp8870_write(client, ADP8870_L2TRP, pdata->l2_trip);
492 if (ret)
493 return ret;
494
495 ret = adp8870_write(client, ADP8870_L2HYS, pdata->l2_hyst);
496 if (ret)
497 return ret;
498
499 ret = adp8870_write(client, ADP8870_L3TRP, pdata->l3_trip);
500 if (ret)
501 return ret;
502
503 ret = adp8870_write(client, ADP8870_L3HYS, pdata->l3_hyst);
504 if (ret)
505 return ret;
506
507 ret = adp8870_write(client, ADP8870_L4TRP, pdata->l4_trip);
508 if (ret)
509 return ret;
510
511 ret = adp8870_write(client, ADP8870_L4HYS, pdata->l4_hyst);
512 if (ret)
513 return ret;
514
515 ret = adp8870_write(client, ADP8870_L5TRP, pdata->l5_trip);
516 if (ret)
517 return ret;
518
519 ret = adp8870_write(client, ADP8870_L5HYS, pdata->l5_hyst);
520 if (ret)
521 return ret;
522
523 ret = adp8870_write(client, ADP8870_ALS1_EN, L5_EN | L4_EN |
524 L3_EN | L2_EN);
525 if (ret)
526 return ret;
527
528 ret = adp8870_write(client, ADP8870_CMP_CTL,
529 ALS_CMPR_CFG_VAL(pdata->abml_filt));
530 if (ret)
531 return ret;
532 }
533
534 ret = adp8870_write(client, ADP8870_CFGR,
535 BL_CFGR_VAL(pdata->bl_fade_law, 0));
536 if (ret)
537 return ret;
538
539 ret = adp8870_write(client, ADP8870_BLFR, FADE_VAL(pdata->bl_fade_in,
540 pdata->bl_fade_out));
541 if (ret)
542 return ret;
543 /*
544 * ADP8870 Rev0 requires GDWN_DIS bit set
545 */
546
547 ret = adp8870_set_bits(client, ADP8870_MDCR, BLEN | DIM_EN | NSTBY |
548 (data->revid == 0 ? GDWN_DIS : 0));
549
550 return ret;
551}
552
553static ssize_t adp8870_show(struct device *dev, char *buf, int reg)
554{
555 struct adp8870_bl *data = dev_get_drvdata(dev);
556 int error;
557 uint8_t reg_val;
558
559 mutex_lock(&data->lock);
560 error = adp8870_read(data->client, reg, &reg_val);
561 mutex_unlock(&data->lock);
562
563 if (error < 0)
564 return error;
565
566 return sprintf(buf, "%u\n", reg_val);
567}
568
569static ssize_t adp8870_store(struct device *dev, const char *buf,
570 size_t count, int reg)
571{
572 struct adp8870_bl *data = dev_get_drvdata(dev);
573 unsigned long val;
574 int ret;
575
576 ret = strict_strtoul(buf, 10, &val);
577 if (ret)
578 return ret;
579
580 mutex_lock(&data->lock);
581 adp8870_write(data->client, reg, val);
582 mutex_unlock(&data->lock);
583
584 return count;
585}
586
587static ssize_t adp8870_bl_l5_dark_max_show(struct device *dev,
588 struct device_attribute *attr, char *buf)
589{
590 return adp8870_show(dev, buf, ADP8870_BLMX5);
591}
592
593static ssize_t adp8870_bl_l5_dark_max_store(struct device *dev,
594 struct device_attribute *attr, const char *buf, size_t count)
595{
596 return adp8870_store(dev, buf, count, ADP8870_BLMX5);
597}
598static DEVICE_ATTR(l5_dark_max, 0664, adp8870_bl_l5_dark_max_show,
599 adp8870_bl_l5_dark_max_store);
600
601
602static ssize_t adp8870_bl_l4_indoor_max_show(struct device *dev,
603 struct device_attribute *attr, char *buf)
604{
605 return adp8870_show(dev, buf, ADP8870_BLMX4);
606}
607
608static ssize_t adp8870_bl_l4_indoor_max_store(struct device *dev,
609 struct device_attribute *attr, const char *buf, size_t count)
610{
611 return adp8870_store(dev, buf, count, ADP8870_BLMX4);
612}
613static DEVICE_ATTR(l4_indoor_max, 0664, adp8870_bl_l4_indoor_max_show,
614 adp8870_bl_l4_indoor_max_store);
615
616
617static ssize_t adp8870_bl_l3_office_max_show(struct device *dev,
618 struct device_attribute *attr, char *buf)
619{
620 return adp8870_show(dev, buf, ADP8870_BLMX3);
621}
622
623static ssize_t adp8870_bl_l3_office_max_store(struct device *dev,
624 struct device_attribute *attr, const char *buf, size_t count)
625{
626 return adp8870_store(dev, buf, count, ADP8870_BLMX3);
627}
628
629static DEVICE_ATTR(l3_office_max, 0664, adp8870_bl_l3_office_max_show,
630 adp8870_bl_l3_office_max_store);
631
632static ssize_t adp8870_bl_l2_bright_max_show(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 return adp8870_show(dev, buf, ADP8870_BLMX2);
636}
637
638static ssize_t adp8870_bl_l2_bright_max_store(struct device *dev,
639 struct device_attribute *attr, const char *buf, size_t count)
640{
641 return adp8870_store(dev, buf, count, ADP8870_BLMX2);
642}
643static DEVICE_ATTR(l2_bright_max, 0664, adp8870_bl_l2_bright_max_show,
644 adp8870_bl_l2_bright_max_store);
645
646static ssize_t adp8870_bl_l1_daylight_max_show(struct device *dev,
647 struct device_attribute *attr, char *buf)
648{
649 return adp8870_show(dev, buf, ADP8870_BLMX1);
650}
651
652static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
653 struct device_attribute *attr, const char *buf, size_t count)
654{
655 struct adp8870_bl *data = dev_get_drvdata(dev);
656 int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
657 if (ret)
658 return ret;
659
660 return adp8870_store(dev, buf, count, ADP8870_BLMX1);
661}
662static DEVICE_ATTR(l1_daylight_max, 0664, adp8870_bl_l1_daylight_max_show,
663 adp8870_bl_l1_daylight_max_store);
664
665static ssize_t adp8870_bl_l5_dark_dim_show(struct device *dev,
666 struct device_attribute *attr, char *buf)
667{
668 return adp8870_show(dev, buf, ADP8870_BLDM5);
669}
670
671static ssize_t adp8870_bl_l5_dark_dim_store(struct device *dev,
672 struct device_attribute *attr,
673 const char *buf, size_t count)
674{
675 return adp8870_store(dev, buf, count, ADP8870_BLDM5);
676}
677static DEVICE_ATTR(l5_dark_dim, 0664, adp8870_bl_l5_dark_dim_show,
678 adp8870_bl_l5_dark_dim_store);
679
680static ssize_t adp8870_bl_l4_indoor_dim_show(struct device *dev,
681 struct device_attribute *attr, char *buf)
682{
683 return adp8870_show(dev, buf, ADP8870_BLDM4);
684}
685
686static ssize_t adp8870_bl_l4_indoor_dim_store(struct device *dev,
687 struct device_attribute *attr,
688 const char *buf, size_t count)
689{
690 return adp8870_store(dev, buf, count, ADP8870_BLDM4);
691}
692static DEVICE_ATTR(l4_indoor_dim, 0664, adp8870_bl_l4_indoor_dim_show,
693 adp8870_bl_l4_indoor_dim_store);
694
695
696static ssize_t adp8870_bl_l3_office_dim_show(struct device *dev,
697 struct device_attribute *attr, char *buf)
698{
699 return adp8870_show(dev, buf, ADP8870_BLDM3);
700}
701
702static ssize_t adp8870_bl_l3_office_dim_store(struct device *dev,
703 struct device_attribute *attr,
704 const char *buf, size_t count)
705{
706 return adp8870_store(dev, buf, count, ADP8870_BLDM3);
707}
708static DEVICE_ATTR(l3_office_dim, 0664, adp8870_bl_l3_office_dim_show,
709 adp8870_bl_l3_office_dim_store);
710
711static ssize_t adp8870_bl_l2_bright_dim_show(struct device *dev,
712 struct device_attribute *attr, char *buf)
713{
714 return adp8870_show(dev, buf, ADP8870_BLDM2);
715}
716
717static ssize_t adp8870_bl_l2_bright_dim_store(struct device *dev,
718 struct device_attribute *attr,
719 const char *buf, size_t count)
720{
721 return adp8870_store(dev, buf, count, ADP8870_BLDM2);
722}
723static DEVICE_ATTR(l2_bright_dim, 0664, adp8870_bl_l2_bright_dim_show,
724 adp8870_bl_l2_bright_dim_store);
725
726static ssize_t adp8870_bl_l1_daylight_dim_show(struct device *dev,
727 struct device_attribute *attr, char *buf)
728{
729 return adp8870_show(dev, buf, ADP8870_BLDM1);
730}
731
732static ssize_t adp8870_bl_l1_daylight_dim_store(struct device *dev,
733 struct device_attribute *attr,
734 const char *buf, size_t count)
735{
736 return adp8870_store(dev, buf, count, ADP8870_BLDM1);
737}
738static DEVICE_ATTR(l1_daylight_dim, 0664, adp8870_bl_l1_daylight_dim_show,
739 adp8870_bl_l1_daylight_dim_store);
740
741#ifdef ADP8870_EXT_FEATURES
742static ssize_t adp8870_bl_ambient_light_level_show(struct device *dev,
743 struct device_attribute *attr, char *buf)
744{
745 struct adp8870_bl *data = dev_get_drvdata(dev);
746 int error;
747 uint8_t reg_val;
748 uint16_t ret_val;
749
750 mutex_lock(&data->lock);
751 error = adp8870_read(data->client, ADP8870_PH1LEVL, &reg_val);
752 if (error < 0) {
753 mutex_unlock(&data->lock);
754 return error;
755 }
756 ret_val = reg_val;
757 error = adp8870_read(data->client, ADP8870_PH1LEVH, &reg_val);
758 mutex_unlock(&data->lock);
759
760 if (error < 0)
761 return error;
762
763 /* Return 13-bit conversion value for the first light sensor */
764 ret_val += (reg_val & 0x1F) << 8;
765
766 return sprintf(buf, "%u\n", ret_val);
767}
768static DEVICE_ATTR(ambient_light_level, 0444,
769 adp8870_bl_ambient_light_level_show, NULL);
770
771static ssize_t adp8870_bl_ambient_light_zone_show(struct device *dev,
772 struct device_attribute *attr, char *buf)
773{
774 struct adp8870_bl *data = dev_get_drvdata(dev);
775 int error;
776 uint8_t reg_val;
777
778 mutex_lock(&data->lock);
779 error = adp8870_read(data->client, ADP8870_CFGR, &reg_val);
780 mutex_unlock(&data->lock);
781
782 if (error < 0)
783 return error;
784
785 return sprintf(buf, "%u\n",
786 ((reg_val >> CFGR_BLV_SHIFT) & CFGR_BLV_MASK) + 1);
787}
788
789static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
790 struct device_attribute *attr,
791 const char *buf, size_t count)
792{
793 struct adp8870_bl *data = dev_get_drvdata(dev);
794 unsigned long val;
795 uint8_t reg_val;
796 int ret;
797
798 ret = strict_strtoul(buf, 10, &val);
799 if (ret)
800 return ret;
801
802 if (val == 0) {
803 /* Enable automatic ambient light sensing */
804 adp8870_set_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
805 } else if ((val > 0) && (val < 6)) {
806 /* Disable automatic ambient light sensing */
807 adp8870_clr_bits(data->client, ADP8870_MDCR, CMP_AUTOEN);
808
809 /* Set user supplied ambient light zone */
810 mutex_lock(&data->lock);
811 adp8870_read(data->client, ADP8870_CFGR, &reg_val);
812 reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
813 reg_val |= (val - 1) << CFGR_BLV_SHIFT;
814 adp8870_write(data->client, ADP8870_CFGR, reg_val);
815 mutex_unlock(&data->lock);
816 }
817
818 return count;
819}
820static DEVICE_ATTR(ambient_light_zone, 0664,
821 adp8870_bl_ambient_light_zone_show,
822 adp8870_bl_ambient_light_zone_store);
823#endif
824
825static struct attribute *adp8870_bl_attributes[] = {
826 &dev_attr_l5_dark_max.attr,
827 &dev_attr_l5_dark_dim.attr,
828 &dev_attr_l4_indoor_max.attr,
829 &dev_attr_l4_indoor_dim.attr,
830 &dev_attr_l3_office_max.attr,
831 &dev_attr_l3_office_dim.attr,
832 &dev_attr_l2_bright_max.attr,
833 &dev_attr_l2_bright_dim.attr,
834 &dev_attr_l1_daylight_max.attr,
835 &dev_attr_l1_daylight_dim.attr,
836#ifdef ADP8870_EXT_FEATURES
837 &dev_attr_ambient_light_level.attr,
838 &dev_attr_ambient_light_zone.attr,
839#endif
840 NULL
841};
842
843static const struct attribute_group adp8870_bl_attr_group = {
844 .attrs = adp8870_bl_attributes,
845};
846
847static int __devinit adp8870_probe(struct i2c_client *client,
848 const struct i2c_device_id *id)
849{
850 struct backlight_properties props;
851 struct backlight_device *bl;
852 struct adp8870_bl *data;
853 struct adp8870_backlight_platform_data *pdata =
854 client->dev.platform_data;
855 uint8_t reg_val;
856 int ret;
857
858 if (!i2c_check_functionality(client->adapter,
859 I2C_FUNC_SMBUS_BYTE_DATA)) {
860 dev_err(&client->dev, "SMBUS Byte Data not Supported\n");
861 return -EIO;
862 }
863
864 if (!pdata) {
865 dev_err(&client->dev, "no platform data?\n");
866 return -EINVAL;
867 }
868
869 ret = adp8870_read(client, ADP8870_MFDVID, &reg_val);
870 if (ret < 0)
871 return -EIO;
872
873 if (ADP8870_MANID(reg_val) != ADP8870_MANUFID) {
874 dev_err(&client->dev, "failed to probe\n");
875 return -ENODEV;
876 }
877
878 data = kzalloc(sizeof(*data), GFP_KERNEL);
879 if (data == NULL)
880 return -ENOMEM;
881
882 data->revid = ADP8870_DEVID(reg_val);
883 data->client = client;
884 data->pdata = pdata;
885 data->id = id->driver_data;
886 data->current_brightness = 0;
887 i2c_set_clientdata(client, data);
888
889 mutex_init(&data->lock);
890
891 memset(&props, 0, sizeof(props));
892 props.type = BACKLIGHT_RAW;
893 props.max_brightness = props.brightness = ADP8870_MAX_BRIGHTNESS;
894 bl = backlight_device_register(dev_driver_string(&client->dev),
895 &client->dev, data, &adp8870_bl_ops, &props);
896 if (IS_ERR(bl)) {
897 dev_err(&client->dev, "failed to register backlight\n");
898 ret = PTR_ERR(bl);
899 goto out2;
900 }
901
902 data->bl = bl;
903
904 if (pdata->en_ambl_sens)
905 ret = sysfs_create_group(&bl->dev.kobj,
906 &adp8870_bl_attr_group);
907
908 if (ret) {
909 dev_err(&client->dev, "failed to register sysfs\n");
910 goto out1;
911 }
912
913 ret = adp8870_bl_setup(bl);
914 if (ret) {
915 ret = -EIO;
916 goto out;
917 }
918
919 backlight_update_status(bl);
920
921 dev_info(&client->dev, "Rev.%d Backlight\n", data->revid);
922
923 if (pdata->num_leds)
924 adp8870_led_probe(client);
925
926 return 0;
927
928out:
929 if (data->pdata->en_ambl_sens)
930 sysfs_remove_group(&data->bl->dev.kobj,
931 &adp8870_bl_attr_group);
932out1:
933 backlight_device_unregister(bl);
934out2:
935 i2c_set_clientdata(client, NULL);
936 kfree(data);
937
938 return ret;
939}
940
941static int __devexit adp8870_remove(struct i2c_client *client)
942{
943 struct adp8870_bl *data = i2c_get_clientdata(client);
944
945 adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
946
947 if (data->led)
948 adp8870_led_remove(client);
949
950 if (data->pdata->en_ambl_sens)
951 sysfs_remove_group(&data->bl->dev.kobj,
952 &adp8870_bl_attr_group);
953
954 backlight_device_unregister(data->bl);
955 i2c_set_clientdata(client, NULL);
956 kfree(data);
957
958 return 0;
959}
960
961#ifdef CONFIG_PM
962static int adp8870_i2c_suspend(struct i2c_client *client, pm_message_t message)
963{
964 adp8870_clr_bits(client, ADP8870_MDCR, NSTBY);
965
966 return 0;
967}
968
969static int adp8870_i2c_resume(struct i2c_client *client)
970{
971 adp8870_set_bits(client, ADP8870_MDCR, NSTBY);
972
973 return 0;
974}
975#else
976#define adp8870_i2c_suspend NULL
977#define adp8870_i2c_resume NULL
978#endif
979
980static const struct i2c_device_id adp8870_id[] = {
981 { "adp8870", 0 },
982 { }
983};
984MODULE_DEVICE_TABLE(i2c, adp8870_id);
985
986static struct i2c_driver adp8870_driver = {
987 .driver = {
988 .name = KBUILD_MODNAME,
989 },
990 .probe = adp8870_probe,
991 .remove = __devexit_p(adp8870_remove),
992 .suspend = adp8870_i2c_suspend,
993 .resume = adp8870_i2c_resume,
994 .id_table = adp8870_id,
995};
996
997static int __init adp8870_init(void)
998{
999 return i2c_add_driver(&adp8870_driver);
1000}
1001module_init(adp8870_init);
1002
1003static void __exit adp8870_exit(void)
1004{
1005 i2c_del_driver(&adp8870_driver);
1006}
1007module_exit(adp8870_exit);
1008
1009MODULE_LICENSE("GPL v2");
1010MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
1011MODULE_DESCRIPTION("ADP8870 Backlight driver");
1012MODULE_ALIAS("platform:adp8870-backlight");
diff --git a/drivers/video/bf537-lq035.c b/drivers/video/bf537-lq035.c
index 47c21fb2c82f..bea53c1a4950 100644
--- a/drivers/video/bf537-lq035.c
+++ b/drivers/video/bf537-lq035.c
@@ -789,6 +789,7 @@ static int __devinit bfin_lq035_probe(struct platform_device *pdev)
789 i2c_add_driver(&ad5280_driver); 789 i2c_add_driver(&ad5280_driver);
790 790
791 memset(&props, 0, sizeof(props)); 791 memset(&props, 0, sizeof(props));
792 props.type = BACKLIGHT_RAW;
792 props.max_brightness = MAX_BRIGHENESS; 793 props.max_brightness = MAX_BRIGHENESS;
793 bl_dev = backlight_device_register("bf537-bl", NULL, NULL, 794 bl_dev = backlight_device_register("bf537-bl", NULL, NULL,
794 &bfin_lq035fb_bl_ops, &props); 795 &bfin_lq035fb_bl_ops, &props);
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index ebda6876d3a9..377dde3d5bfc 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1101,12 +1101,10 @@ static int __devinit broadsheetfb_probe(struct platform_device *dev)
1101 1101
1102 videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE); 1102 videomemorysize = roundup((dpyw*dpyh), PAGE_SIZE);
1103 1103
1104 videomemory = vmalloc(videomemorysize); 1104 videomemory = vzalloc(videomemorysize);
1105 if (!videomemory) 1105 if (!videomemory)
1106 goto err_fb_rel; 1106 goto err_fb_rel;
1107 1107
1108 memset(videomemory, 0, videomemorysize);
1109
1110 info->screen_base = (char *)videomemory; 1108 info->screen_base = (char *)videomemory;
1111 info->fbops = &broadsheetfb_ops; 1109 info->fbops = &broadsheetfb_ops;
1112 1110
diff --git a/drivers/video/efifb.c b/drivers/video/efifb.c
index fb205843c2c7..784139aed079 100644
--- a/drivers/video/efifb.c
+++ b/drivers/video/efifb.c
@@ -16,6 +16,8 @@
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <video/vga.h> 17#include <video/vga.h>
18 18
19static bool request_mem_succeeded = false;
20
19static struct fb_var_screeninfo efifb_defined __devinitdata = { 21static struct fb_var_screeninfo efifb_defined __devinitdata = {
20 .activate = FB_ACTIVATE_NOW, 22 .activate = FB_ACTIVATE_NOW,
21 .height = -1, 23 .height = -1,
@@ -281,7 +283,9 @@ static void efifb_destroy(struct fb_info *info)
281{ 283{
282 if (info->screen_base) 284 if (info->screen_base)
283 iounmap(info->screen_base); 285 iounmap(info->screen_base);
284 release_mem_region(info->apertures->ranges[0].base, info->apertures->ranges[0].size); 286 if (request_mem_succeeded)
287 release_mem_region(info->apertures->ranges[0].base,
288 info->apertures->ranges[0].size);
285 framebuffer_release(info); 289 framebuffer_release(info);
286} 290}
287 291
@@ -326,14 +330,13 @@ static int __init efifb_setup(char *options)
326 return 0; 330 return 0;
327} 331}
328 332
329static int __devinit efifb_probe(struct platform_device *dev) 333static int __init efifb_probe(struct platform_device *dev)
330{ 334{
331 struct fb_info *info; 335 struct fb_info *info;
332 int err; 336 int err;
333 unsigned int size_vmode; 337 unsigned int size_vmode;
334 unsigned int size_remap; 338 unsigned int size_remap;
335 unsigned int size_total; 339 unsigned int size_total;
336 int request_succeeded = 0;
337 340
338 if (!screen_info.lfb_depth) 341 if (!screen_info.lfb_depth)
339 screen_info.lfb_depth = 32; 342 screen_info.lfb_depth = 32;
@@ -387,7 +390,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
387 efifb_fix.smem_len = size_remap; 390 efifb_fix.smem_len = size_remap;
388 391
389 if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) { 392 if (request_mem_region(efifb_fix.smem_start, size_remap, "efifb")) {
390 request_succeeded = 1; 393 request_mem_succeeded = true;
391 } else { 394 } else {
392 /* We cannot make this fatal. Sometimes this comes from magic 395 /* We cannot make this fatal. Sometimes this comes from magic
393 spaces our resource handlers simply don't know about */ 396 spaces our resource handlers simply don't know about */
@@ -413,7 +416,7 @@ static int __devinit efifb_probe(struct platform_device *dev)
413 info->apertures->ranges[0].base = efifb_fix.smem_start; 416 info->apertures->ranges[0].base = efifb_fix.smem_start;
414 info->apertures->ranges[0].size = size_remap; 417 info->apertures->ranges[0].size = size_remap;
415 418
416 info->screen_base = ioremap(efifb_fix.smem_start, efifb_fix.smem_len); 419 info->screen_base = ioremap_wc(efifb_fix.smem_start, efifb_fix.smem_len);
417 if (!info->screen_base) { 420 if (!info->screen_base) {
418 printk(KERN_ERR "efifb: abort, cannot ioremap video memory " 421 printk(KERN_ERR "efifb: abort, cannot ioremap video memory "
419 "0x%x @ 0x%lx\n", 422 "0x%x @ 0x%lx\n",
@@ -491,13 +494,12 @@ err_unmap:
491err_release_fb: 494err_release_fb:
492 framebuffer_release(info); 495 framebuffer_release(info);
493err_release_mem: 496err_release_mem:
494 if (request_succeeded) 497 if (request_mem_succeeded)
495 release_mem_region(efifb_fix.smem_start, size_total); 498 release_mem_region(efifb_fix.smem_start, size_total);
496 return err; 499 return err;
497} 500}
498 501
499static struct platform_driver efifb_driver = { 502static struct platform_driver efifb_driver = {
500 .probe = efifb_probe,
501 .driver = { 503 .driver = {
502 .name = "efifb", 504 .name = "efifb",
503 }, 505 },
@@ -528,13 +530,21 @@ static int __init efifb_init(void)
528 if (!screen_info.lfb_linelength) 530 if (!screen_info.lfb_linelength)
529 return -ENODEV; 531 return -ENODEV;
530 532
531 ret = platform_driver_register(&efifb_driver); 533 ret = platform_device_register(&efifb_device);
534 if (ret)
535 return ret;
532 536
533 if (!ret) { 537 /*
534 ret = platform_device_register(&efifb_device); 538 * This is not just an optimization. We will interfere
535 if (ret) 539 * with a real driver if we get reprobed, so don't allow
536 platform_driver_unregister(&efifb_driver); 540 * it.
541 */
542 ret = platform_driver_probe(&efifb_driver, efifb_probe);
543 if (ret) {
544 platform_device_unregister(&efifb_device);
545 return ret;
537 } 546 }
547
538 return ret; 548 return ret;
539} 549}
540module_init(efifb_init); 550module_init(efifb_init);
diff --git a/drivers/video/hecubafb.c b/drivers/video/hecubafb.c
index 1b94643ecbcf..fbef15f7a218 100644
--- a/drivers/video/hecubafb.c
+++ b/drivers/video/hecubafb.c
@@ -231,11 +231,10 @@ static int __devinit hecubafb_probe(struct platform_device *dev)
231 231
232 videomemorysize = (DPY_W*DPY_H)/8; 232 videomemorysize = (DPY_W*DPY_H)/8;
233 233
234 if (!(videomemory = vmalloc(videomemorysize))) 234 videomemory = vzalloc(videomemorysize);
235 if (!videomemory)
235 return retval; 236 return retval;
236 237
237 memset(videomemory, 0, videomemorysize);
238
239 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev); 238 info = framebuffer_alloc(sizeof(struct hecubafb_par), &dev->dev);
240 if (!info) 239 if (!info)
241 goto err_fballoc; 240 goto err_fballoc;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index d2ccfd6e662c..f135dbead07d 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -856,10 +856,10 @@ failed_platform_init:
856 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu, 856 dma_free_writecombine(&pdev->dev,fbi->map_size,fbi->map_cpu,
857 fbi->map_dma); 857 fbi->map_dma);
858failed_map: 858failed_map:
859 clk_put(fbi->clk);
860failed_getclock:
861 iounmap(fbi->regs); 859 iounmap(fbi->regs);
862failed_ioremap: 860failed_ioremap:
861 clk_put(fbi->clk);
862failed_getclock:
863 release_mem_region(res->start, resource_size(res)); 863 release_mem_region(res->start, resource_size(res));
864failed_req: 864failed_req:
865 kfree(info->pseudo_palette); 865 kfree(info->pseudo_palette);
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c
index ed64edfd2c43..97d45e5115e2 100644
--- a/drivers/video/metronomefb.c
+++ b/drivers/video/metronomefb.c
@@ -628,12 +628,10 @@ static int __devinit metronomefb_probe(struct platform_device *dev)
628 /* we need to add a spare page because our csum caching scheme walks 628 /* we need to add a spare page because our csum caching scheme walks
629 * to the end of the page */ 629 * to the end of the page */
630 videomemorysize = PAGE_SIZE + (fw * fh); 630 videomemorysize = PAGE_SIZE + (fw * fh);
631 videomemory = vmalloc(videomemorysize); 631 videomemory = vzalloc(videomemorysize);
632 if (!videomemory) 632 if (!videomemory)
633 goto err_fb_rel; 633 goto err_fb_rel;
634 634
635 memset(videomemory, 0, videomemorysize);
636
637 info->screen_base = (char __force __iomem *)videomemory; 635 info->screen_base = (char __force __iomem *)videomemory;
638 info->fbops = &metronomefb_ops; 636 info->fbops = &metronomefb_ops;
639 637
diff --git a/drivers/video/modedb.c b/drivers/video/modedb.c
index 48c3ea8652b6..cb175fe7abc0 100644
--- a/drivers/video/modedb.c
+++ b/drivers/video/modedb.c
@@ -1128,3 +1128,4 @@ EXPORT_SYMBOL(fb_find_best_mode);
1128EXPORT_SYMBOL(fb_find_nearest_mode); 1128EXPORT_SYMBOL(fb_find_nearest_mode);
1129EXPORT_SYMBOL(fb_videomode_to_modelist); 1129EXPORT_SYMBOL(fb_videomode_to_modelist);
1130EXPORT_SYMBOL(fb_find_mode); 1130EXPORT_SYMBOL(fb_find_mode);
1131EXPORT_SYMBOL(fb_find_mode_cvt);
diff --git a/drivers/video/pxa168fb.c b/drivers/video/pxa168fb.c
index 35f61dd0cb3a..bb95ec56d25d 100644
--- a/drivers/video/pxa168fb.c
+++ b/drivers/video/pxa168fb.c
@@ -623,19 +623,21 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 623 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
624 if (res == NULL) { 624 if (res == NULL) {
625 dev_err(&pdev->dev, "no IO memory defined\n"); 625 dev_err(&pdev->dev, "no IO memory defined\n");
626 return -ENOENT; 626 ret = -ENOENT;
627 goto failed_put_clk;
627 } 628 }
628 629
629 irq = platform_get_irq(pdev, 0); 630 irq = platform_get_irq(pdev, 0);
630 if (irq < 0) { 631 if (irq < 0) {
631 dev_err(&pdev->dev, "no IRQ defined\n"); 632 dev_err(&pdev->dev, "no IRQ defined\n");
632 return -ENOENT; 633 ret = -ENOENT;
634 goto failed_put_clk;
633 } 635 }
634 636
635 info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev); 637 info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev);
636 if (info == NULL) { 638 if (info == NULL) {
637 clk_put(clk); 639 ret = -ENOMEM;
638 return -ENOMEM; 640 goto failed_put_clk;
639 } 641 }
640 642
641 /* Initialize private data */ 643 /* Initialize private data */
@@ -671,7 +673,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
671 fbi->reg_base = ioremap_nocache(res->start, resource_size(res)); 673 fbi->reg_base = ioremap_nocache(res->start, resource_size(res));
672 if (fbi->reg_base == NULL) { 674 if (fbi->reg_base == NULL) {
673 ret = -ENOMEM; 675 ret = -ENOMEM;
674 goto failed; 676 goto failed_free_info;
675 } 677 }
676 678
677 /* 679 /*
@@ -683,7 +685,7 @@ static int __devinit pxa168fb_probe(struct platform_device *pdev)
683 &fbi->fb_start_dma, GFP_KERNEL); 685 &fbi->fb_start_dma, GFP_KERNEL);
684 if (info->screen_base == NULL) { 686 if (info->screen_base == NULL) {
685 ret = -ENOMEM; 687 ret = -ENOMEM;
686 goto failed; 688 goto failed_free_info;
687 } 689 }
688 690
689 info->fix.smem_start = (unsigned long)fbi->fb_start_dma; 691 info->fix.smem_start = (unsigned long)fbi->fb_start_dma;
@@ -772,8 +774,9 @@ failed_free_clk:
772failed_free_fbmem: 774failed_free_fbmem:
773 dma_free_coherent(fbi->dev, info->fix.smem_len, 775 dma_free_coherent(fbi->dev, info->fix.smem_len,
774 info->screen_base, fbi->fb_start_dma); 776 info->screen_base, fbi->fb_start_dma);
775failed: 777failed_free_info:
776 kfree(info); 778 kfree(info);
779failed_put_clk:
777 clk_put(clk); 780 clk_put(clk);
778 781
779 dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret); 782 dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index 0352afa49a39..4aecf213c9be 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -235,13 +235,12 @@ static int s3c_fb_check_var(struct fb_var_screeninfo *var,
235 struct fb_info *info) 235 struct fb_info *info)
236{ 236{
237 struct s3c_fb_win *win = info->par; 237 struct s3c_fb_win *win = info->par;
238 struct s3c_fb_pd_win *windata = win->windata;
239 struct s3c_fb *sfb = win->parent; 238 struct s3c_fb *sfb = win->parent;
240 239
241 dev_dbg(sfb->dev, "checking parameters\n"); 240 dev_dbg(sfb->dev, "checking parameters\n");
242 241
243 var->xres_virtual = max((unsigned int)windata->virtual_x, var->xres); 242 var->xres_virtual = max(var->xres_virtual, var->xres);
244 var->yres_virtual = max((unsigned int)windata->virtual_y, var->yres); 243 var->yres_virtual = max(var->yres_virtual, var->yres);
245 244
246 if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) { 245 if (!s3c_fb_validate_win_bpp(win, var->bits_per_pixel)) {
247 dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n", 246 dev_dbg(sfb->dev, "win %d: unsupported bpp %d\n",
@@ -558,6 +557,13 @@ static int s3c_fb_set_par(struct fb_info *info)
558 vidosd_set_alpha(win, alpha); 557 vidosd_set_alpha(win, alpha);
559 vidosd_set_size(win, data); 558 vidosd_set_size(win, data);
560 559
560 /* Enable DMA channel for this window */
561 if (sfb->variant.has_shadowcon) {
562 data = readl(sfb->regs + SHADOWCON);
563 data |= SHADOWCON_CHx_ENABLE(win_no);
564 writel(data, sfb->regs + SHADOWCON);
565 }
566
561 data = WINCONx_ENWIN; 567 data = WINCONx_ENWIN;
562 568
563 /* note, since we have to round up the bits-per-pixel, we end up 569 /* note, since we have to round up the bits-per-pixel, we end up
@@ -637,13 +643,6 @@ static int s3c_fb_set_par(struct fb_info *info)
637 writel(data, regs + sfb->variant.wincon + (win_no * 4)); 643 writel(data, regs + sfb->variant.wincon + (win_no * 4));
638 writel(0x0, regs + sfb->variant.winmap + (win_no * 4)); 644 writel(0x0, regs + sfb->variant.winmap + (win_no * 4));
639 645
640 /* Enable DMA channel for this window */
641 if (sfb->variant.has_shadowcon) {
642 data = readl(sfb->regs + SHADOWCON);
643 data |= SHADOWCON_CHx_ENABLE(win_no);
644 writel(data, sfb->regs + SHADOWCON);
645 }
646
647 shadow_protect_win(win, 0); 646 shadow_protect_win(win, 0);
648 647
649 return 0; 648 return 0;
@@ -1487,11 +1486,10 @@ static int __devexit s3c_fb_remove(struct platform_device *pdev)
1487 1486
1488 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res)); 1487 release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
1489 1488
1490 kfree(sfb);
1491
1492 pm_runtime_put_sync(sfb->dev); 1489 pm_runtime_put_sync(sfb->dev);
1493 pm_runtime_disable(sfb->dev); 1490 pm_runtime_disable(sfb->dev);
1494 1491
1492 kfree(sfb);
1495 return 0; 1493 return 0;
1496} 1494}
1497 1495
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index 3b7f2f5bae71..4de541ca9c52 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -2237,6 +2237,22 @@ static int __devinit savagefb_probe(struct pci_dev* dev,
2237 &info->modelist); 2237 &info->modelist);
2238#endif 2238#endif
2239 info->var = savagefb_var800x600x8; 2239 info->var = savagefb_var800x600x8;
2240 /* if a panel was detected, default to a CVT mode instead */
2241 if (par->SavagePanelWidth) {
2242 struct fb_videomode cvt_mode;
2243
2244 memset(&cvt_mode, 0, sizeof(cvt_mode));
2245 cvt_mode.xres = par->SavagePanelWidth;
2246 cvt_mode.yres = par->SavagePanelHeight;
2247 cvt_mode.refresh = 60;
2248 /* FIXME: if we know there is only the panel
2249 * we can enable reduced blanking as well */
2250 if (fb_find_mode_cvt(&cvt_mode, 0, 0))
2251 printk(KERN_WARNING "No CVT mode found for panel\n");
2252 else if (fb_find_mode(&info->var, info, NULL, NULL, 0,
2253 &cvt_mode, 0) != 3)
2254 info->var = savagefb_var800x600x8;
2255 }
2240 2256
2241 if (mode_option) { 2257 if (mode_option) {
2242 fb_find_mode(&info->var, info, mode_option, 2258 fb_find_mode(&info->var, info, mode_option,
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index 6ae40b630dc9..7d54e2c612f7 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -1127,23 +1127,16 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1127 struct fb_info *info = hdmi->info; 1127 struct fb_info *info = hdmi->info;
1128 unsigned long parent_rate = 0, hdmi_rate; 1128 unsigned long parent_rate = 0, hdmi_rate;
1129 1129
1130 /* A device has been plugged in */
1131 pm_runtime_get_sync(hdmi->dev);
1132
1133 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate); 1130 ret = sh_hdmi_read_edid(hdmi, &hdmi_rate, &parent_rate);
1134 if (ret < 0) { 1131 if (ret < 0)
1135 pm_runtime_put(hdmi->dev);
1136 goto out; 1132 goto out;
1137 }
1138 1133
1139 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE; 1134 hdmi->hp_state = HDMI_HOTPLUG_EDID_DONE;
1140 1135
1141 /* Reconfigure the clock */ 1136 /* Reconfigure the clock */
1142 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate); 1137 ret = sh_hdmi_clk_configure(hdmi, hdmi_rate, parent_rate);
1143 if (ret < 0) { 1138 if (ret < 0)
1144 pm_runtime_put(hdmi->dev);
1145 goto out; 1139 goto out;
1146 }
1147 1140
1148 msleep(10); 1141 msleep(10);
1149 sh_hdmi_configure(hdmi); 1142 sh_hdmi_configure(hdmi);
@@ -1191,7 +1184,6 @@ static void sh_hdmi_edid_work_fn(struct work_struct *work)
1191 fb_set_suspend(hdmi->info, 1); 1184 fb_set_suspend(hdmi->info, 1);
1192 1185
1193 console_unlock(); 1186 console_unlock();
1194 pm_runtime_put(hdmi->dev);
1195 } 1187 }
1196 1188
1197out: 1189out:
@@ -1312,7 +1304,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1312 INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn); 1304 INIT_DELAYED_WORK(&hdmi->edid_work, sh_hdmi_edid_work_fn);
1313 1305
1314 pm_runtime_enable(&pdev->dev); 1306 pm_runtime_enable(&pdev->dev);
1315 pm_runtime_resume(&pdev->dev); 1307 pm_runtime_get_sync(&pdev->dev);
1316 1308
1317 /* Product and revision IDs are 0 in sh-mobile version */ 1309 /* Product and revision IDs are 0 in sh-mobile version */
1318 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n", 1310 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
@@ -1340,7 +1332,7 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1340ecodec: 1332ecodec:
1341 free_irq(irq, hdmi); 1333 free_irq(irq, hdmi);
1342ereqirq: 1334ereqirq:
1343 pm_runtime_suspend(&pdev->dev); 1335 pm_runtime_put(&pdev->dev);
1344 pm_runtime_disable(&pdev->dev); 1336 pm_runtime_disable(&pdev->dev);
1345 iounmap(hdmi->base); 1337 iounmap(hdmi->base);
1346emap: 1338emap:
@@ -1377,7 +1369,7 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
1377 free_irq(irq, hdmi); 1369 free_irq(irq, hdmi);
1378 /* Wait for already scheduled work */ 1370 /* Wait for already scheduled work */
1379 cancel_delayed_work_sync(&hdmi->edid_work); 1371 cancel_delayed_work_sync(&hdmi->edid_work);
1380 pm_runtime_suspend(&pdev->dev); 1372 pm_runtime_put(&pdev->dev);
1381 pm_runtime_disable(&pdev->dev); 1373 pm_runtime_disable(&pdev->dev);
1382 clk_disable(hdmi->hdmi_clk); 1374 clk_disable(hdmi->hdmi_clk);
1383 clk_put(hdmi->hdmi_clk); 1375 clk_put(hdmi->hdmi_clk);
diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c
index 404c03b4b7c7..019dbd3f12b2 100644
--- a/drivers/video/sh_mobile_lcdcfb.c
+++ b/drivers/video/sh_mobile_lcdcfb.c
@@ -470,7 +470,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
470 unsigned long tmp; 470 unsigned long tmp;
471 int bpp = 0; 471 int bpp = 0;
472 unsigned long ldddsr; 472 unsigned long ldddsr;
473 int k, m; 473 int k, m, ret;
474 474
475 /* enable clocks before accessing the hardware */ 475 /* enable clocks before accessing the hardware */
476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { 476 for (k = 0; k < ARRAY_SIZE(priv->ch); k++) {
@@ -540,7 +540,7 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv)
540 540
541 board_cfg = &ch->cfg.board_cfg; 541 board_cfg = &ch->cfg.board_cfg;
542 if (board_cfg->setup_sys) { 542 if (board_cfg->setup_sys) {
543 int ret = board_cfg->setup_sys(board_cfg->board_data, 543 ret = board_cfg->setup_sys(board_cfg->board_data,
544 ch, &sh_mobile_lcdc_sys_bus_ops); 544 ch, &sh_mobile_lcdc_sys_bus_ops);
545 if (ret) 545 if (ret)
546 return ret; 546 return ret;
diff --git a/drivers/video/vga16fb.c b/drivers/video/vga16fb.c
index 53b2c5aae067..305c975b1787 100644
--- a/drivers/video/vga16fb.c
+++ b/drivers/video/vga16fb.c
@@ -1265,9 +1265,11 @@ static void vga16fb_imageblit(struct fb_info *info, const struct fb_image *image
1265 1265
1266static void vga16fb_destroy(struct fb_info *info) 1266static void vga16fb_destroy(struct fb_info *info)
1267{ 1267{
1268 struct platform_device *dev = container_of(info->device, struct platform_device, dev);
1268 iounmap(info->screen_base); 1269 iounmap(info->screen_base);
1269 fb_dealloc_cmap(&info->cmap); 1270 fb_dealloc_cmap(&info->cmap);
1270 /* XXX unshare VGA regions */ 1271 /* XXX unshare VGA regions */
1272 platform_set_drvdata(dev, NULL);
1271 framebuffer_release(info); 1273 framebuffer_release(info);
1272} 1274}
1273 1275
diff --git a/drivers/video/xen-fbfront.c b/drivers/video/xen-fbfront.c
index a20218c2fda8..beac52fc1c0e 100644
--- a/drivers/video/xen-fbfront.c
+++ b/drivers/video/xen-fbfront.c
@@ -395,10 +395,9 @@ static int __devinit xenfb_probe(struct xenbus_device *dev,
395 spin_lock_init(&info->dirty_lock); 395 spin_lock_init(&info->dirty_lock);
396 spin_lock_init(&info->resize_lock); 396 spin_lock_init(&info->resize_lock);
397 397
398 info->fb = vmalloc(fb_size); 398 info->fb = vzalloc(fb_size);
399 if (info->fb == NULL) 399 if (info->fb == NULL)
400 goto error_nomem; 400 goto error_nomem;
401 memset(info->fb, 0, fb_size);
402 401
403 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT; 402 info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
404 403
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 00d615d7aa21..979d6eed9a0f 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -42,7 +42,7 @@ config W1_MASTER_MXC
42 42
43config W1_MASTER_DS1WM 43config W1_MASTER_DS1WM
44 tristate "Maxim DS1WM 1-wire busmaster" 44 tristate "Maxim DS1WM 1-wire busmaster"
45 depends on W1 45 depends on W1 && GENERIC_HARDIRQS
46 help 46 help
47 Say Y here to enable the DS1WM 1-wire driver, such as that 47 Say Y here to enable the DS1WM 1-wire driver, such as that
48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like 48 in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 3ff822b48145..30df85d8fca8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -395,9 +395,9 @@ static void unmask_evtchn(int port)
395static void xen_irq_init(unsigned irq) 395static void xen_irq_init(unsigned irq)
396{ 396{
397 struct irq_info *info; 397 struct irq_info *info;
398#ifdef CONFIG_SMP
398 struct irq_desc *desc = irq_to_desc(irq); 399 struct irq_desc *desc = irq_to_desc(irq);
399 400
400#ifdef CONFIG_SMP
401 /* By default all event channels notify CPU#0. */ 401 /* By default all event channels notify CPU#0. */
402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); 402 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
403#endif 403#endif
@@ -626,6 +626,9 @@ int xen_allocate_pirq_gsi(unsigned gsi)
626 * 626 *
627 * Note: We don't assign an event channel until the irq actually started 627 * Note: We don't assign an event channel until the irq actually started
628 * up. Return an existing irq if we've already got one for the gsi. 628 * up. Return an existing irq if we've already got one for the gsi.
629 *
630 * Shareable implies level triggered, not shareable implies edge
631 * triggered here.
629 */ 632 */
630int xen_bind_pirq_gsi_to_irq(unsigned gsi, 633int xen_bind_pirq_gsi_to_irq(unsigned gsi,
631 unsigned pirq, int shareable, char *name) 634 unsigned pirq, int shareable, char *name)
@@ -664,16 +667,13 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
664 667
665 pirq_query_unmask(irq); 668 pirq_query_unmask(irq);
666 /* We try to use the handler with the appropriate semantic for the 669 /* We try to use the handler with the appropriate semantic for the
667 * type of interrupt: if the interrupt doesn't need an eoi 670 * type of interrupt: if the interrupt is an edge triggered
668 * (pirq_needs_eoi returns false), we treat it like an edge 671 * interrupt we use handle_edge_irq.
669 * triggered interrupt so we use handle_edge_irq.
670 * As a matter of fact this only happens when the corresponding
671 * physical interrupt is edge triggered or an msi.
672 * 672 *
673 * On the other hand if the interrupt needs an eoi (pirq_needs_eoi 673 * On the other hand if the interrupt is level triggered we use
674 * returns true) we treat it like a level triggered interrupt so we 674 * handle_fasteoi_irq like the native code does for this kind of
675 * use handle_fasteoi_irq like the native code does for this kind of
676 * interrupts. 675 * interrupts.
676 *
677 * Depending on the Xen version, pirq_needs_eoi might return true 677 * Depending on the Xen version, pirq_needs_eoi might return true
678 * not only for level triggered interrupts but for edge triggered 678 * not only for level triggered interrupts but for edge triggered
679 * interrupts too. In any case Xen always honors the eoi mechanism, 679 * interrupts too. In any case Xen always honors the eoi mechanism,
@@ -681,7 +681,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
681 * hasn't received an eoi yet. Therefore using the fasteoi handler 681 * hasn't received an eoi yet. Therefore using the fasteoi handler
682 * is the right choice either way. 682 * is the right choice either way.
683 */ 683 */
684 if (pirq_needs_eoi(irq)) 684 if (shareable)
685 irq_set_chip_and_handler_name(irq, &xen_pirq_chip, 685 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
686 handle_fasteoi_irq, name); 686 handle_fasteoi_irq, name);
687 else 687 else
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 65ea21a97492..6e8c15a23201 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -147,9 +147,15 @@ void __init xen_swiotlb_init(int verbose)
147{ 147{
148 unsigned long bytes; 148 unsigned long bytes;
149 int rc; 149 int rc;
150 150 unsigned long nr_tbl;
151 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT); 151
152 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE); 152 nr_tbl = swioltb_nr_tbl();
153 if (nr_tbl)
154 xen_io_tlb_nslabs = nr_tbl;
155 else {
156 xen_io_tlb_nslabs = (64 * 1024 * 1024 >> IO_TLB_SHIFT);
157 xen_io_tlb_nslabs = ALIGN(xen_io_tlb_nslabs, IO_TLB_SEGSIZE);
158 }
153 159
154 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT; 160 bytes = xen_io_tlb_nslabs << IO_TLB_SHIFT;
155 161
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 20c106f24927..1b0b19550015 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -584,11 +584,11 @@ static struct dentry *afs_lookup(struct inode *dir, struct dentry *dentry,
584 584
585success: 585success:
586 d_add(dentry, inode); 586 d_add(dentry, inode);
587 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%llu }", 587 _leave(" = 0 { vn=%u u=%u } -> { ino=%lu v=%u }",
588 fid.vnode, 588 fid.vnode,
589 fid.unique, 589 fid.unique,
590 dentry->d_inode->i_ino, 590 dentry->d_inode->i_ino,
591 (unsigned long long)dentry->d_inode->i_version); 591 dentry->d_inode->i_generation);
592 592
593 return NULL; 593 return NULL;
594} 594}
@@ -671,10 +671,10 @@ static int afs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
671 * been deleted and replaced, and the original vnode ID has 671 * been deleted and replaced, and the original vnode ID has
672 * been reused */ 672 * been reused */
673 if (fid.unique != vnode->fid.unique) { 673 if (fid.unique != vnode->fid.unique) {
674 _debug("%s: file deleted (uq %u -> %u I:%llu)", 674 _debug("%s: file deleted (uq %u -> %u I:%u)",
675 dentry->d_name.name, fid.unique, 675 dentry->d_name.name, fid.unique,
676 vnode->fid.unique, 676 vnode->fid.unique,
677 (unsigned long long)dentry->d_inode->i_version); 677 dentry->d_inode->i_generation);
678 spin_lock(&vnode->lock); 678 spin_lock(&vnode->lock);
679 set_bit(AFS_VNODE_DELETED, &vnode->flags); 679 set_bit(AFS_VNODE_DELETED, &vnode->flags);
680 spin_unlock(&vnode->lock); 680 spin_unlock(&vnode->lock);
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 4bd0218473a9..346e3289abd7 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -89,7 +89,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
89 i_size_write(&vnode->vfs_inode, size); 89 i_size_write(&vnode->vfs_inode, size);
90 vnode->vfs_inode.i_uid = status->owner; 90 vnode->vfs_inode.i_uid = status->owner;
91 vnode->vfs_inode.i_gid = status->group; 91 vnode->vfs_inode.i_gid = status->group;
92 vnode->vfs_inode.i_version = vnode->fid.unique; 92 vnode->vfs_inode.i_generation = vnode->fid.unique;
93 vnode->vfs_inode.i_nlink = status->nlink; 93 vnode->vfs_inode.i_nlink = status->nlink;
94 94
95 mode = vnode->vfs_inode.i_mode; 95 mode = vnode->vfs_inode.i_mode;
@@ -102,6 +102,7 @@ static void xdr_decode_AFSFetchStatus(const __be32 **_bp,
102 vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server; 102 vnode->vfs_inode.i_ctime.tv_sec = status->mtime_server;
103 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime; 103 vnode->vfs_inode.i_mtime = vnode->vfs_inode.i_ctime;
104 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime; 104 vnode->vfs_inode.i_atime = vnode->vfs_inode.i_ctime;
105 vnode->vfs_inode.i_version = data_version;
105 } 106 }
106 107
107 expected_version = status->data_version; 108 expected_version = status->data_version;
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index db66c5201474..0fdab6e03d87 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -75,7 +75,8 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
75 inode->i_ctime.tv_nsec = 0; 75 inode->i_ctime.tv_nsec = 0;
76 inode->i_atime = inode->i_mtime = inode->i_ctime; 76 inode->i_atime = inode->i_mtime = inode->i_ctime;
77 inode->i_blocks = 0; 77 inode->i_blocks = 0;
78 inode->i_version = vnode->fid.unique; 78 inode->i_generation = vnode->fid.unique;
79 inode->i_version = vnode->status.data_version;
79 inode->i_mapping->a_ops = &afs_fs_aops; 80 inode->i_mapping->a_ops = &afs_fs_aops;
80 81
81 /* check to see whether a symbolic link is really a mountpoint */ 82 /* check to see whether a symbolic link is really a mountpoint */
@@ -100,7 +101,7 @@ static int afs_iget5_test(struct inode *inode, void *opaque)
100 struct afs_iget_data *data = opaque; 101 struct afs_iget_data *data = opaque;
101 102
102 return inode->i_ino == data->fid.vnode && 103 return inode->i_ino == data->fid.vnode &&
103 inode->i_version == data->fid.unique; 104 inode->i_generation == data->fid.unique;
104} 105}
105 106
106/* 107/*
@@ -122,7 +123,7 @@ static int afs_iget5_set(struct inode *inode, void *opaque)
122 struct afs_vnode *vnode = AFS_FS_I(inode); 123 struct afs_vnode *vnode = AFS_FS_I(inode);
123 124
124 inode->i_ino = data->fid.vnode; 125 inode->i_ino = data->fid.vnode;
125 inode->i_version = data->fid.unique; 126 inode->i_generation = data->fid.unique;
126 vnode->fid = data->fid; 127 vnode->fid = data->fid;
127 vnode->volume = data->volume; 128 vnode->volume = data->volume;
128 129
@@ -380,8 +381,7 @@ int afs_getattr(struct vfsmount *mnt, struct dentry *dentry,
380 381
381 inode = dentry->d_inode; 382 inode = dentry->d_inode;
382 383
383 _enter("{ ino=%lu v=%llu }", inode->i_ino, 384 _enter("{ ino=%lu v=%u }", inode->i_ino, inode->i_generation);
384 (unsigned long long)inode->i_version);
385 385
386 generic_fillattr(inode, stat); 386 generic_fillattr(inode, stat);
387 return 0; 387 return 0;
diff --git a/fs/afs/super.c b/fs/afs/super.c
index fb240e8766d6..356dcf0929e8 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -31,8 +31,8 @@
31static void afs_i_init_once(void *foo); 31static void afs_i_init_once(void *foo);
32static struct dentry *afs_mount(struct file_system_type *fs_type, 32static struct dentry *afs_mount(struct file_system_type *fs_type,
33 int flags, const char *dev_name, void *data); 33 int flags, const char *dev_name, void *data);
34static void afs_kill_super(struct super_block *sb);
34static struct inode *afs_alloc_inode(struct super_block *sb); 35static struct inode *afs_alloc_inode(struct super_block *sb);
35static void afs_put_super(struct super_block *sb);
36static void afs_destroy_inode(struct inode *inode); 36static void afs_destroy_inode(struct inode *inode);
37static int afs_statfs(struct dentry *dentry, struct kstatfs *buf); 37static int afs_statfs(struct dentry *dentry, struct kstatfs *buf);
38 38
@@ -40,7 +40,7 @@ struct file_system_type afs_fs_type = {
40 .owner = THIS_MODULE, 40 .owner = THIS_MODULE,
41 .name = "afs", 41 .name = "afs",
42 .mount = afs_mount, 42 .mount = afs_mount,
43 .kill_sb = kill_anon_super, 43 .kill_sb = afs_kill_super,
44 .fs_flags = 0, 44 .fs_flags = 0,
45}; 45};
46 46
@@ -50,7 +50,6 @@ static const struct super_operations afs_super_ops = {
50 .drop_inode = afs_drop_inode, 50 .drop_inode = afs_drop_inode,
51 .destroy_inode = afs_destroy_inode, 51 .destroy_inode = afs_destroy_inode,
52 .evict_inode = afs_evict_inode, 52 .evict_inode = afs_evict_inode,
53 .put_super = afs_put_super,
54 .show_options = generic_show_options, 53 .show_options = generic_show_options,
55}; 54};
56 55
@@ -282,19 +281,25 @@ static int afs_parse_device_name(struct afs_mount_params *params,
282 */ 281 */
283static int afs_test_super(struct super_block *sb, void *data) 282static int afs_test_super(struct super_block *sb, void *data)
284{ 283{
285 struct afs_mount_params *params = data; 284 struct afs_super_info *as1 = data;
286 struct afs_super_info *as = sb->s_fs_info; 285 struct afs_super_info *as = sb->s_fs_info;
287 286
288 return as->volume == params->volume; 287 return as->volume == as1->volume;
288}
289
290static int afs_set_super(struct super_block *sb, void *data)
291{
292 sb->s_fs_info = data;
293 return set_anon_super(sb, NULL);
289} 294}
290 295
291/* 296/*
292 * fill in the superblock 297 * fill in the superblock
293 */ 298 */
294static int afs_fill_super(struct super_block *sb, void *data) 299static int afs_fill_super(struct super_block *sb,
300 struct afs_mount_params *params)
295{ 301{
296 struct afs_mount_params *params = data; 302 struct afs_super_info *as = sb->s_fs_info;
297 struct afs_super_info *as = NULL;
298 struct afs_fid fid; 303 struct afs_fid fid;
299 struct dentry *root = NULL; 304 struct dentry *root = NULL;
300 struct inode *inode = NULL; 305 struct inode *inode = NULL;
@@ -302,23 +307,13 @@ static int afs_fill_super(struct super_block *sb, void *data)
302 307
303 _enter(""); 308 _enter("");
304 309
305 /* allocate a superblock info record */
306 as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
307 if (!as) {
308 _leave(" = -ENOMEM");
309 return -ENOMEM;
310 }
311
312 afs_get_volume(params->volume);
313 as->volume = params->volume;
314
315 /* fill in the superblock */ 310 /* fill in the superblock */
316 sb->s_blocksize = PAGE_CACHE_SIZE; 311 sb->s_blocksize = PAGE_CACHE_SIZE;
317 sb->s_blocksize_bits = PAGE_CACHE_SHIFT; 312 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
318 sb->s_magic = AFS_FS_MAGIC; 313 sb->s_magic = AFS_FS_MAGIC;
319 sb->s_op = &afs_super_ops; 314 sb->s_op = &afs_super_ops;
320 sb->s_fs_info = as;
321 sb->s_bdi = &as->volume->bdi; 315 sb->s_bdi = &as->volume->bdi;
316 strlcpy(sb->s_id, as->volume->vlocation->vldb.name, sizeof(sb->s_id));
322 317
323 /* allocate the root inode and dentry */ 318 /* allocate the root inode and dentry */
324 fid.vid = as->volume->vid; 319 fid.vid = as->volume->vid;
@@ -326,7 +321,7 @@ static int afs_fill_super(struct super_block *sb, void *data)
326 fid.unique = 1; 321 fid.unique = 1;
327 inode = afs_iget(sb, params->key, &fid, NULL, NULL); 322 inode = afs_iget(sb, params->key, &fid, NULL, NULL);
328 if (IS_ERR(inode)) 323 if (IS_ERR(inode))
329 goto error_inode; 324 return PTR_ERR(inode);
330 325
331 if (params->autocell) 326 if (params->autocell)
332 set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags); 327 set_bit(AFS_VNODE_AUTOCELL, &AFS_FS_I(inode)->flags);
@@ -342,16 +337,8 @@ static int afs_fill_super(struct super_block *sb, void *data)
342 _leave(" = 0"); 337 _leave(" = 0");
343 return 0; 338 return 0;
344 339
345error_inode:
346 ret = PTR_ERR(inode);
347 inode = NULL;
348error: 340error:
349 iput(inode); 341 iput(inode);
350 afs_put_volume(as->volume);
351 kfree(as);
352
353 sb->s_fs_info = NULL;
354
355 _leave(" = %d", ret); 342 _leave(" = %d", ret);
356 return ret; 343 return ret;
357} 344}
@@ -367,6 +354,7 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
367 struct afs_volume *vol; 354 struct afs_volume *vol;
368 struct key *key; 355 struct key *key;
369 char *new_opts = kstrdup(options, GFP_KERNEL); 356 char *new_opts = kstrdup(options, GFP_KERNEL);
357 struct afs_super_info *as;
370 int ret; 358 int ret;
371 359
372 _enter(",,%s,%p", dev_name, options); 360 _enter(",,%s,%p", dev_name, options);
@@ -399,12 +387,22 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
399 ret = PTR_ERR(vol); 387 ret = PTR_ERR(vol);
400 goto error; 388 goto error;
401 } 389 }
402 params.volume = vol; 390
391 /* allocate a superblock info record */
392 as = kzalloc(sizeof(struct afs_super_info), GFP_KERNEL);
393 if (!as) {
394 ret = -ENOMEM;
395 afs_put_volume(vol);
396 goto error;
397 }
398 as->volume = vol;
403 399
404 /* allocate a deviceless superblock */ 400 /* allocate a deviceless superblock */
405 sb = sget(fs_type, afs_test_super, set_anon_super, &params); 401 sb = sget(fs_type, afs_test_super, afs_set_super, as);
406 if (IS_ERR(sb)) { 402 if (IS_ERR(sb)) {
407 ret = PTR_ERR(sb); 403 ret = PTR_ERR(sb);
404 afs_put_volume(vol);
405 kfree(as);
408 goto error; 406 goto error;
409 } 407 }
410 408
@@ -422,16 +420,16 @@ static struct dentry *afs_mount(struct file_system_type *fs_type,
422 } else { 420 } else {
423 _debug("reuse"); 421 _debug("reuse");
424 ASSERTCMP(sb->s_flags, &, MS_ACTIVE); 422 ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
423 afs_put_volume(vol);
424 kfree(as);
425 } 425 }
426 426
427 afs_put_volume(params.volume);
428 afs_put_cell(params.cell); 427 afs_put_cell(params.cell);
429 kfree(new_opts); 428 kfree(new_opts);
430 _leave(" = 0 [%p]", sb); 429 _leave(" = 0 [%p]", sb);
431 return dget(sb->s_root); 430 return dget(sb->s_root);
432 431
433error: 432error:
434 afs_put_volume(params.volume);
435 afs_put_cell(params.cell); 433 afs_put_cell(params.cell);
436 key_put(params.key); 434 key_put(params.key);
437 kfree(new_opts); 435 kfree(new_opts);
@@ -439,18 +437,12 @@ error:
439 return ERR_PTR(ret); 437 return ERR_PTR(ret);
440} 438}
441 439
442/* 440static void afs_kill_super(struct super_block *sb)
443 * finish the unmounting process on the superblock
444 */
445static void afs_put_super(struct super_block *sb)
446{ 441{
447 struct afs_super_info *as = sb->s_fs_info; 442 struct afs_super_info *as = sb->s_fs_info;
448 443 kill_anon_super(sb);
449 _enter("");
450
451 afs_put_volume(as->volume); 444 afs_put_volume(as->volume);
452 445 kfree(as);
453 _leave("");
454} 446}
455 447
456/* 448/*
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 789b3afb3423..b806285ff853 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -84,23 +84,21 @@ void afs_put_writeback(struct afs_writeback *wb)
84 * partly or wholly fill a page that's under preparation for writing 84 * partly or wholly fill a page that's under preparation for writing
85 */ 85 */
86static int afs_fill_page(struct afs_vnode *vnode, struct key *key, 86static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
87 loff_t pos, unsigned len, struct page *page) 87 loff_t pos, struct page *page)
88{ 88{
89 loff_t i_size; 89 loff_t i_size;
90 unsigned eof;
91 int ret; 90 int ret;
91 int len;
92 92
93 _enter(",,%llu,%u", (unsigned long long)pos, len); 93 _enter(",,%llu", (unsigned long long)pos);
94
95 ASSERTCMP(len, <=, PAGE_CACHE_SIZE);
96 94
97 i_size = i_size_read(&vnode->vfs_inode); 95 i_size = i_size_read(&vnode->vfs_inode);
98 if (pos + len > i_size) 96 if (pos + PAGE_CACHE_SIZE > i_size)
99 eof = i_size; 97 len = i_size - pos;
100 else 98 else
101 eof = PAGE_CACHE_SIZE; 99 len = PAGE_CACHE_SIZE;
102 100
103 ret = afs_vnode_fetch_data(vnode, key, 0, eof, page); 101 ret = afs_vnode_fetch_data(vnode, key, pos, len, page);
104 if (ret < 0) { 102 if (ret < 0) {
105 if (ret == -ENOENT) { 103 if (ret == -ENOENT) {
106 _debug("got NOENT from server" 104 _debug("got NOENT from server"
@@ -153,9 +151,8 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
153 *pagep = page; 151 *pagep = page;
154 /* page won't leak in error case: it eventually gets cleaned off LRU */ 152 /* page won't leak in error case: it eventually gets cleaned off LRU */
155 153
156 if (!PageUptodate(page)) { 154 if (!PageUptodate(page) && len != PAGE_CACHE_SIZE) {
157 _debug("not up to date"); 155 ret = afs_fill_page(vnode, key, index << PAGE_CACHE_SHIFT, page);
158 ret = afs_fill_page(vnode, key, pos, len, page);
159 if (ret < 0) { 156 if (ret < 0) {
160 kfree(candidate); 157 kfree(candidate);
161 _leave(" = %d [prep]", ret); 158 _leave(" = %d [prep]", ret);
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 9ad2369d9e35..bfcb18feb1df 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -231,9 +231,6 @@ static int bad_inode_readlink(struct dentry *dentry, char __user *buffer,
231 231
232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags) 232static int bad_inode_permission(struct inode *inode, int mask, unsigned int flags)
233{ 233{
234 if (flags & IPERM_FLAG_RCU)
235 return -ECHILD;
236
237 return -EIO; 234 return -EIO;
238} 235}
239 236
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 93b1aa932014..52d7eca8c7bf 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -121,9 +121,6 @@ struct btrfs_inode {
121 */ 121 */
122 u64 index_cnt; 122 u64 index_cnt;
123 123
124 /* the start of block group preferred for allocations. */
125 u64 block_group;
126
127 /* the fsync log has some corner cases that mean we have to check 124 /* the fsync log has some corner cases that mean we have to check
128 * directories to see if any unlinks have been done before 125 * directories to see if any unlinks have been done before
129 * the directory was logged. See tree-log.c for all the 126 * the directory was logged. See tree-log.c for all the
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index b0e18d986e0a..2e667868e0d2 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -43,8 +43,6 @@ struct btrfs_path *btrfs_alloc_path(void)
43{ 43{
44 struct btrfs_path *path; 44 struct btrfs_path *path;
45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); 45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
46 if (path)
47 path->reada = 1;
48 return path; 46 return path;
49} 47}
50 48
@@ -1224,11 +1222,13 @@ static void reada_for_search(struct btrfs_root *root,
1224 u64 search; 1222 u64 search;
1225 u64 target; 1223 u64 target;
1226 u64 nread = 0; 1224 u64 nread = 0;
1225 u64 gen;
1227 int direction = path->reada; 1226 int direction = path->reada;
1228 struct extent_buffer *eb; 1227 struct extent_buffer *eb;
1229 u32 nr; 1228 u32 nr;
1230 u32 blocksize; 1229 u32 blocksize;
1231 u32 nscan = 0; 1230 u32 nscan = 0;
1231 bool map = true;
1232 1232
1233 if (level != 1) 1233 if (level != 1)
1234 return; 1234 return;
@@ -1250,7 +1250,19 @@ static void reada_for_search(struct btrfs_root *root,
1250 1250
1251 nritems = btrfs_header_nritems(node); 1251 nritems = btrfs_header_nritems(node);
1252 nr = slot; 1252 nr = slot;
1253 if (node->map_token || path->skip_locking)
1254 map = false;
1255
1253 while (1) { 1256 while (1) {
1257 if (map && !node->map_token) {
1258 unsigned long offset = btrfs_node_key_ptr_offset(nr);
1259 map_private_extent_buffer(node, offset,
1260 sizeof(struct btrfs_key_ptr),
1261 &node->map_token,
1262 &node->kaddr,
1263 &node->map_start,
1264 &node->map_len, KM_USER1);
1265 }
1254 if (direction < 0) { 1266 if (direction < 0) {
1255 if (nr == 0) 1267 if (nr == 0)
1256 break; 1268 break;
@@ -1268,14 +1280,23 @@ static void reada_for_search(struct btrfs_root *root,
1268 search = btrfs_node_blockptr(node, nr); 1280 search = btrfs_node_blockptr(node, nr);
1269 if ((search <= target && target - search <= 65536) || 1281 if ((search <= target && target - search <= 65536) ||
1270 (search > target && search - target <= 65536)) { 1282 (search > target && search - target <= 65536)) {
1271 readahead_tree_block(root, search, blocksize, 1283 gen = btrfs_node_ptr_generation(node, nr);
1272 btrfs_node_ptr_generation(node, nr)); 1284 if (map && node->map_token) {
1285 unmap_extent_buffer(node, node->map_token,
1286 KM_USER1);
1287 node->map_token = NULL;
1288 }
1289 readahead_tree_block(root, search, blocksize, gen);
1273 nread += blocksize; 1290 nread += blocksize;
1274 } 1291 }
1275 nscan++; 1292 nscan++;
1276 if ((nread > 65536 || nscan > 32)) 1293 if ((nread > 65536 || nscan > 32))
1277 break; 1294 break;
1278 } 1295 }
1296 if (map && node->map_token) {
1297 unmap_extent_buffer(node, node->map_token, KM_USER1);
1298 node->map_token = NULL;
1299 }
1279} 1300}
1280 1301
1281/* 1302/*
@@ -1648,9 +1669,6 @@ again:
1648 } 1669 }
1649cow_done: 1670cow_done:
1650 BUG_ON(!cow && ins_len); 1671 BUG_ON(!cow && ins_len);
1651 if (level != btrfs_header_level(b))
1652 WARN_ON(1);
1653 level = btrfs_header_level(b);
1654 1672
1655 p->nodes[level] = b; 1673 p->nodes[level] = b;
1656 if (!p->skip_locking) 1674 if (!p->skip_locking)
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 6c093fa98f61..300628795fdb 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -930,7 +930,6 @@ struct btrfs_fs_info {
930 * is required instead of the faster short fsync log commits 930 * is required instead of the faster short fsync log commits
931 */ 931 */
932 u64 last_trans_log_full_commit; 932 u64 last_trans_log_full_commit;
933 u64 open_ioctl_trans;
934 unsigned long mount_opt:20; 933 unsigned long mount_opt:20;
935 unsigned long compress_type:4; 934 unsigned long compress_type:4;
936 u64 max_inline; 935 u64 max_inline;
@@ -947,7 +946,6 @@ struct btrfs_fs_info {
947 struct super_block *sb; 946 struct super_block *sb;
948 struct inode *btree_inode; 947 struct inode *btree_inode;
949 struct backing_dev_info bdi; 948 struct backing_dev_info bdi;
950 struct mutex trans_mutex;
951 struct mutex tree_log_mutex; 949 struct mutex tree_log_mutex;
952 struct mutex transaction_kthread_mutex; 950 struct mutex transaction_kthread_mutex;
953 struct mutex cleaner_mutex; 951 struct mutex cleaner_mutex;
@@ -968,6 +966,13 @@ struct btrfs_fs_info {
968 struct rw_semaphore subvol_sem; 966 struct rw_semaphore subvol_sem;
969 struct srcu_struct subvol_srcu; 967 struct srcu_struct subvol_srcu;
970 968
969 spinlock_t trans_lock;
970 /*
971 * the reloc mutex goes with the trans lock, it is taken
972 * during commit to protect us from the relocation code
973 */
974 struct mutex reloc_mutex;
975
971 struct list_head trans_list; 976 struct list_head trans_list;
972 struct list_head hashers; 977 struct list_head hashers;
973 struct list_head dead_roots; 978 struct list_head dead_roots;
@@ -980,6 +985,7 @@ struct btrfs_fs_info {
980 atomic_t async_submit_draining; 985 atomic_t async_submit_draining;
981 atomic_t nr_async_bios; 986 atomic_t nr_async_bios;
982 atomic_t async_delalloc_pages; 987 atomic_t async_delalloc_pages;
988 atomic_t open_ioctl_trans;
983 989
984 /* 990 /*
985 * this is used by the balancing code to wait for all the pending 991 * this is used by the balancing code to wait for all the pending
@@ -1044,6 +1050,7 @@ struct btrfs_fs_info {
1044 int closing; 1050 int closing;
1045 int log_root_recovering; 1051 int log_root_recovering;
1046 int enospc_unlink; 1052 int enospc_unlink;
1053 int trans_no_join;
1047 1054
1048 u64 total_pinned; 1055 u64 total_pinned;
1049 1056
@@ -1065,7 +1072,6 @@ struct btrfs_fs_info {
1065 struct reloc_control *reloc_ctl; 1072 struct reloc_control *reloc_ctl;
1066 1073
1067 spinlock_t delalloc_lock; 1074 spinlock_t delalloc_lock;
1068 spinlock_t new_trans_lock;
1069 u64 delalloc_bytes; 1075 u64 delalloc_bytes;
1070 1076
1071 /* data_alloc_cluster is only used in ssd mode */ 1077 /* data_alloc_cluster is only used in ssd mode */
@@ -1172,6 +1178,14 @@ struct btrfs_root {
1172 u32 type; 1178 u32 type;
1173 1179
1174 u64 highest_objectid; 1180 u64 highest_objectid;
1181
1182 /* btrfs_record_root_in_trans is a multi-step process,
1183 * and it can race with the balancing code. But the
1184 * race is very small, and only the first time the root
1185 * is added to each transaction. So in_trans_setup
1186 * is used to tell us when more checks are required
1187 */
1188 unsigned long in_trans_setup;
1175 int ref_cows; 1189 int ref_cows;
1176 int track_dirty; 1190 int track_dirty;
1177 int in_radix; 1191 int in_radix;
@@ -1181,7 +1195,6 @@ struct btrfs_root {
1181 struct btrfs_key defrag_max; 1195 struct btrfs_key defrag_max;
1182 int defrag_running; 1196 int defrag_running;
1183 char *name; 1197 char *name;
1184 int in_sysfs;
1185 1198
1186 /* the dirty list is only used by non-reference counted roots */ 1199 /* the dirty list is only used by non-reference counted roots */
1187 struct list_head dirty_list; 1200 struct list_head dirty_list;
@@ -1340,6 +1353,7 @@ struct btrfs_ioctl_defrag_range_args {
1340#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1353#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
1341#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) 1354#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
1342#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) 1355#define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16)
1356#define BTRFS_MOUNT_INODE_MAP_CACHE (1 << 17)
1343 1357
1344#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1358#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1345#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1359#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2238,6 +2252,9 @@ int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
2238void btrfs_block_rsv_release(struct btrfs_root *root, 2252void btrfs_block_rsv_release(struct btrfs_root *root,
2239 struct btrfs_block_rsv *block_rsv, 2253 struct btrfs_block_rsv *block_rsv,
2240 u64 num_bytes); 2254 u64 num_bytes);
2255int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
2256 struct btrfs_root *root,
2257 struct btrfs_block_rsv *rsv);
2241int btrfs_set_block_group_ro(struct btrfs_root *root, 2258int btrfs_set_block_group_ro(struct btrfs_root *root,
2242 struct btrfs_block_group_cache *cache); 2259 struct btrfs_block_group_cache *cache);
2243int btrfs_set_block_group_rw(struct btrfs_root *root, 2260int btrfs_set_block_group_rw(struct btrfs_root *root,
@@ -2350,6 +2367,15 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
2350 struct btrfs_root *root, 2367 struct btrfs_root *root,
2351 struct extent_buffer *node, 2368 struct extent_buffer *node,
2352 struct extent_buffer *parent); 2369 struct extent_buffer *parent);
2370static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
2371{
2372 /*
2373 * Get synced with close_ctree()
2374 */
2375 smp_mb();
2376 return fs_info->closing;
2377}
2378
2353/* root-item.c */ 2379/* root-item.c */
2354int btrfs_find_root_ref(struct btrfs_root *tree_root, 2380int btrfs_find_root_ref(struct btrfs_root *tree_root,
2355 struct btrfs_path *path, 2381 struct btrfs_path *path,
@@ -2512,8 +2538,7 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
2512int btrfs_writepages(struct address_space *mapping, 2538int btrfs_writepages(struct address_space *mapping,
2513 struct writeback_control *wbc); 2539 struct writeback_control *wbc);
2514int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 2540int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
2515 struct btrfs_root *new_root, 2541 struct btrfs_root *new_root, u64 new_dirid);
2516 u64 new_dirid, u64 alloc_hint);
2517int btrfs_merge_bio_hook(struct page *page, unsigned long offset, 2542int btrfs_merge_bio_hook(struct page *page, unsigned long offset,
2518 size_t size, struct bio *bio, unsigned long bio_flags); 2543 size_t size, struct bio *bio, unsigned long bio_flags);
2519 2544
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 01e29503a54b..f1cbd028f7b3 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -297,7 +297,6 @@ struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len)
297 item->data_len = data_len; 297 item->data_len = data_len;
298 item->ins_or_del = 0; 298 item->ins_or_del = 0;
299 item->bytes_reserved = 0; 299 item->bytes_reserved = 0;
300 item->block_rsv = NULL;
301 item->delayed_node = NULL; 300 item->delayed_node = NULL;
302 atomic_set(&item->refs, 1); 301 atomic_set(&item->refs, 1);
303 } 302 }
@@ -593,10 +592,8 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
593 592
594 num_bytes = btrfs_calc_trans_metadata_size(root, 1); 593 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
595 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); 594 ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
596 if (!ret) { 595 if (!ret)
597 item->bytes_reserved = num_bytes; 596 item->bytes_reserved = num_bytes;
598 item->block_rsv = dst_rsv;
599 }
600 597
601 return ret; 598 return ret;
602} 599}
@@ -604,10 +601,13 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
604static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, 601static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
605 struct btrfs_delayed_item *item) 602 struct btrfs_delayed_item *item)
606{ 603{
604 struct btrfs_block_rsv *rsv;
605
607 if (!item->bytes_reserved) 606 if (!item->bytes_reserved)
608 return; 607 return;
609 608
610 btrfs_block_rsv_release(root, item->block_rsv, 609 rsv = &root->fs_info->global_block_rsv;
610 btrfs_block_rsv_release(root, rsv,
611 item->bytes_reserved); 611 item->bytes_reserved);
612} 612}
613 613
@@ -678,6 +678,7 @@ static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans,
678 INIT_LIST_HEAD(&head); 678 INIT_LIST_HEAD(&head);
679 679
680 next = item; 680 next = item;
681 nitems = 0;
681 682
682 /* 683 /*
683 * count the number of the continuous items that we can insert in batch 684 * count the number of the continuous items that we can insert in batch
@@ -1013,6 +1014,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1013 struct btrfs_delayed_root *delayed_root; 1014 struct btrfs_delayed_root *delayed_root;
1014 struct btrfs_delayed_node *curr_node, *prev_node; 1015 struct btrfs_delayed_node *curr_node, *prev_node;
1015 struct btrfs_path *path; 1016 struct btrfs_path *path;
1017 struct btrfs_block_rsv *block_rsv;
1016 int ret = 0; 1018 int ret = 0;
1017 1019
1018 path = btrfs_alloc_path(); 1020 path = btrfs_alloc_path();
@@ -1020,6 +1022,9 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1020 return -ENOMEM; 1022 return -ENOMEM;
1021 path->leave_spinning = 1; 1023 path->leave_spinning = 1;
1022 1024
1025 block_rsv = trans->block_rsv;
1026 trans->block_rsv = &root->fs_info->global_block_rsv;
1027
1023 delayed_root = btrfs_get_delayed_root(root); 1028 delayed_root = btrfs_get_delayed_root(root);
1024 1029
1025 curr_node = btrfs_first_delayed_node(delayed_root); 1030 curr_node = btrfs_first_delayed_node(delayed_root);
@@ -1044,6 +1049,7 @@ int btrfs_run_delayed_items(struct btrfs_trans_handle *trans,
1044 } 1049 }
1045 1050
1046 btrfs_free_path(path); 1051 btrfs_free_path(path);
1052 trans->block_rsv = block_rsv;
1047 return ret; 1053 return ret;
1048} 1054}
1049 1055
@@ -1051,6 +1057,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1051 struct btrfs_delayed_node *node) 1057 struct btrfs_delayed_node *node)
1052{ 1058{
1053 struct btrfs_path *path; 1059 struct btrfs_path *path;
1060 struct btrfs_block_rsv *block_rsv;
1054 int ret; 1061 int ret;
1055 1062
1056 path = btrfs_alloc_path(); 1063 path = btrfs_alloc_path();
@@ -1058,6 +1065,9 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1058 return -ENOMEM; 1065 return -ENOMEM;
1059 path->leave_spinning = 1; 1066 path->leave_spinning = 1;
1060 1067
1068 block_rsv = trans->block_rsv;
1069 trans->block_rsv = &node->root->fs_info->global_block_rsv;
1070
1061 ret = btrfs_insert_delayed_items(trans, path, node->root, node); 1071 ret = btrfs_insert_delayed_items(trans, path, node->root, node);
1062 if (!ret) 1072 if (!ret)
1063 ret = btrfs_delete_delayed_items(trans, path, node->root, node); 1073 ret = btrfs_delete_delayed_items(trans, path, node->root, node);
@@ -1065,6 +1075,7 @@ static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
1065 ret = btrfs_update_delayed_inode(trans, node->root, path, node); 1075 ret = btrfs_update_delayed_inode(trans, node->root, path, node);
1066 btrfs_free_path(path); 1076 btrfs_free_path(path);
1067 1077
1078 trans->block_rsv = block_rsv;
1068 return ret; 1079 return ret;
1069} 1080}
1070 1081
@@ -1115,6 +1126,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1115 struct btrfs_path *path; 1126 struct btrfs_path *path;
1116 struct btrfs_delayed_node *delayed_node = NULL; 1127 struct btrfs_delayed_node *delayed_node = NULL;
1117 struct btrfs_root *root; 1128 struct btrfs_root *root;
1129 struct btrfs_block_rsv *block_rsv;
1118 unsigned long nr = 0; 1130 unsigned long nr = 0;
1119 int need_requeue = 0; 1131 int need_requeue = 0;
1120 int ret; 1132 int ret;
@@ -1129,10 +1141,13 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1129 delayed_node = async_node->delayed_node; 1141 delayed_node = async_node->delayed_node;
1130 root = delayed_node->root; 1142 root = delayed_node->root;
1131 1143
1132 trans = btrfs_join_transaction(root, 0); 1144 trans = btrfs_join_transaction(root);
1133 if (IS_ERR(trans)) 1145 if (IS_ERR(trans))
1134 goto free_path; 1146 goto free_path;
1135 1147
1148 block_rsv = trans->block_rsv;
1149 trans->block_rsv = &root->fs_info->global_block_rsv;
1150
1136 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); 1151 ret = btrfs_insert_delayed_items(trans, path, root, delayed_node);
1137 if (!ret) 1152 if (!ret)
1138 ret = btrfs_delete_delayed_items(trans, path, root, 1153 ret = btrfs_delete_delayed_items(trans, path, root,
@@ -1175,6 +1190,7 @@ static void btrfs_async_run_delayed_node_done(struct btrfs_work *work)
1175 1190
1176 nr = trans->blocks_used; 1191 nr = trans->blocks_used;
1177 1192
1193 trans->block_rsv = block_rsv;
1178 btrfs_end_transaction_dmeta(trans, root); 1194 btrfs_end_transaction_dmeta(trans, root);
1179 __btrfs_btree_balance_dirty(root, nr); 1195 __btrfs_btree_balance_dirty(root, nr);
1180free_path: 1196free_path:
@@ -1221,6 +1237,13 @@ again:
1221 return 0; 1237 return 0;
1222} 1238}
1223 1239
1240void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
1241{
1242 struct btrfs_delayed_root *delayed_root;
1243 delayed_root = btrfs_get_delayed_root(root);
1244 WARN_ON(btrfs_first_delayed_node(delayed_root));
1245}
1246
1224void btrfs_balance_delayed_items(struct btrfs_root *root) 1247void btrfs_balance_delayed_items(struct btrfs_root *root)
1225{ 1248{
1226 struct btrfs_delayed_root *delayed_root; 1249 struct btrfs_delayed_root *delayed_root;
@@ -1572,8 +1595,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1572 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1595 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1573 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1596 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1574 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); 1597 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
1575 btrfs_set_stack_inode_block_group(inode_item, 1598 btrfs_set_stack_inode_block_group(inode_item, 0);
1576 BTRFS_I(inode)->block_group);
1577 1599
1578 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), 1600 btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
1579 inode->i_atime.tv_sec); 1601 inode->i_atime.tv_sec);
@@ -1595,7 +1617,7 @@ int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans,
1595 struct btrfs_root *root, struct inode *inode) 1617 struct btrfs_root *root, struct inode *inode)
1596{ 1618{
1597 struct btrfs_delayed_node *delayed_node; 1619 struct btrfs_delayed_node *delayed_node;
1598 int ret; 1620 int ret = 0;
1599 1621
1600 delayed_node = btrfs_get_or_create_delayed_node(inode); 1622 delayed_node = btrfs_get_or_create_delayed_node(inode);
1601 if (IS_ERR(delayed_node)) 1623 if (IS_ERR(delayed_node))
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index eb7d240aa648..d1a6a2915c66 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -75,7 +75,6 @@ struct btrfs_delayed_item {
75 struct list_head tree_list; /* used for batch insert/delete items */ 75 struct list_head tree_list; /* used for batch insert/delete items */
76 struct list_head readdir_list; /* used for readdir items */ 76 struct list_head readdir_list; /* used for readdir items */
77 u64 bytes_reserved; 77 u64 bytes_reserved;
78 struct btrfs_block_rsv *block_rsv;
79 struct btrfs_delayed_node *delayed_node; 78 struct btrfs_delayed_node *delayed_node;
80 atomic_t refs; 79 atomic_t refs;
81 int ins_or_del; 80 int ins_or_del;
@@ -138,4 +137,8 @@ int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent,
138/* for init */ 137/* for init */
139int __init btrfs_delayed_inode_init(void); 138int __init btrfs_delayed_inode_init(void);
140void btrfs_delayed_inode_exit(void); 139void btrfs_delayed_inode_exit(void);
140
141/* for debugging */
142void btrfs_assert_delayed_root_empty(struct btrfs_root *root);
143
141#endif 144#endif
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 98b6a71decba..1ac8db5dc0a3 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1044,7 +1044,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1044 root->last_trans = 0; 1044 root->last_trans = 0;
1045 root->highest_objectid = 0; 1045 root->highest_objectid = 0;
1046 root->name = NULL; 1046 root->name = NULL;
1047 root->in_sysfs = 0;
1048 root->inode_tree = RB_ROOT; 1047 root->inode_tree = RB_ROOT;
1049 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); 1048 INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC);
1050 root->block_rsv = NULL; 1049 root->block_rsv = NULL;
@@ -1300,19 +1299,21 @@ again:
1300 return root; 1299 return root;
1301 1300
1302 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); 1301 root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
1303 if (!root->free_ino_ctl)
1304 goto fail;
1305 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), 1302 root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
1306 GFP_NOFS); 1303 GFP_NOFS);
1307 if (!root->free_ino_pinned) 1304 if (!root->free_ino_pinned || !root->free_ino_ctl) {
1305 ret = -ENOMEM;
1308 goto fail; 1306 goto fail;
1307 }
1309 1308
1310 btrfs_init_free_ino_ctl(root); 1309 btrfs_init_free_ino_ctl(root);
1311 mutex_init(&root->fs_commit_mutex); 1310 mutex_init(&root->fs_commit_mutex);
1312 spin_lock_init(&root->cache_lock); 1311 spin_lock_init(&root->cache_lock);
1313 init_waitqueue_head(&root->cache_wait); 1312 init_waitqueue_head(&root->cache_wait);
1314 1313
1315 set_anon_super(&root->anon_super, NULL); 1314 ret = set_anon_super(&root->anon_super, NULL);
1315 if (ret)
1316 goto fail;
1316 1317
1317 if (btrfs_root_refs(&root->root_item) == 0) { 1318 if (btrfs_root_refs(&root->root_item) == 0) {
1318 ret = -ENOENT; 1319 ret = -ENOENT;
@@ -1505,24 +1506,24 @@ static int transaction_kthread(void *arg)
1505 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE); 1506 vfs_check_frozen(root->fs_info->sb, SB_FREEZE_WRITE);
1506 mutex_lock(&root->fs_info->transaction_kthread_mutex); 1507 mutex_lock(&root->fs_info->transaction_kthread_mutex);
1507 1508
1508 spin_lock(&root->fs_info->new_trans_lock); 1509 spin_lock(&root->fs_info->trans_lock);
1509 cur = root->fs_info->running_transaction; 1510 cur = root->fs_info->running_transaction;
1510 if (!cur) { 1511 if (!cur) {
1511 spin_unlock(&root->fs_info->new_trans_lock); 1512 spin_unlock(&root->fs_info->trans_lock);
1512 goto sleep; 1513 goto sleep;
1513 } 1514 }
1514 1515
1515 now = get_seconds(); 1516 now = get_seconds();
1516 if (!cur->blocked && 1517 if (!cur->blocked &&
1517 (now < cur->start_time || now - cur->start_time < 30)) { 1518 (now < cur->start_time || now - cur->start_time < 30)) {
1518 spin_unlock(&root->fs_info->new_trans_lock); 1519 spin_unlock(&root->fs_info->trans_lock);
1519 delay = HZ * 5; 1520 delay = HZ * 5;
1520 goto sleep; 1521 goto sleep;
1521 } 1522 }
1522 transid = cur->transid; 1523 transid = cur->transid;
1523 spin_unlock(&root->fs_info->new_trans_lock); 1524 spin_unlock(&root->fs_info->trans_lock);
1524 1525
1525 trans = btrfs_join_transaction(root, 1); 1526 trans = btrfs_join_transaction(root);
1526 BUG_ON(IS_ERR(trans)); 1527 BUG_ON(IS_ERR(trans));
1527 if (transid == trans->transid) { 1528 if (transid == trans->transid) {
1528 ret = btrfs_commit_transaction(trans, root); 1529 ret = btrfs_commit_transaction(trans, root);
@@ -1613,11 +1614,12 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1613 INIT_LIST_HEAD(&fs_info->ordered_operations); 1614 INIT_LIST_HEAD(&fs_info->ordered_operations);
1614 INIT_LIST_HEAD(&fs_info->caching_block_groups); 1615 INIT_LIST_HEAD(&fs_info->caching_block_groups);
1615 spin_lock_init(&fs_info->delalloc_lock); 1616 spin_lock_init(&fs_info->delalloc_lock);
1616 spin_lock_init(&fs_info->new_trans_lock); 1617 spin_lock_init(&fs_info->trans_lock);
1617 spin_lock_init(&fs_info->ref_cache_lock); 1618 spin_lock_init(&fs_info->ref_cache_lock);
1618 spin_lock_init(&fs_info->fs_roots_radix_lock); 1619 spin_lock_init(&fs_info->fs_roots_radix_lock);
1619 spin_lock_init(&fs_info->delayed_iput_lock); 1620 spin_lock_init(&fs_info->delayed_iput_lock);
1620 spin_lock_init(&fs_info->defrag_inodes_lock); 1621 spin_lock_init(&fs_info->defrag_inodes_lock);
1622 mutex_init(&fs_info->reloc_mutex);
1621 1623
1622 init_completion(&fs_info->kobj_unregister); 1624 init_completion(&fs_info->kobj_unregister);
1623 fs_info->tree_root = tree_root; 1625 fs_info->tree_root = tree_root;
@@ -1645,6 +1647,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1645 fs_info->max_inline = 8192 * 1024; 1647 fs_info->max_inline = 8192 * 1024;
1646 fs_info->metadata_ratio = 0; 1648 fs_info->metadata_ratio = 0;
1647 fs_info->defrag_inodes = RB_ROOT; 1649 fs_info->defrag_inodes = RB_ROOT;
1650 fs_info->trans_no_join = 0;
1648 1651
1649 fs_info->thread_pool_size = min_t(unsigned long, 1652 fs_info->thread_pool_size = min_t(unsigned long,
1650 num_online_cpus() + 2, 8); 1653 num_online_cpus() + 2, 8);
@@ -1667,8 +1670,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1667 init_waitqueue_head(&fs_info->scrub_pause_wait); 1670 init_waitqueue_head(&fs_info->scrub_pause_wait);
1668 init_rwsem(&fs_info->scrub_super_lock); 1671 init_rwsem(&fs_info->scrub_super_lock);
1669 fs_info->scrub_workers_refcnt = 0; 1672 fs_info->scrub_workers_refcnt = 0;
1670 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1671 fs_info->thread_pool_size, &fs_info->generic_worker);
1672 1673
1673 sb->s_blocksize = 4096; 1674 sb->s_blocksize = 4096;
1674 sb->s_blocksize_bits = blksize_bits(4096); 1675 sb->s_blocksize_bits = blksize_bits(4096);
@@ -1709,7 +1710,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1709 fs_info->do_barriers = 1; 1710 fs_info->do_barriers = 1;
1710 1711
1711 1712
1712 mutex_init(&fs_info->trans_mutex);
1713 mutex_init(&fs_info->ordered_operations_mutex); 1713 mutex_init(&fs_info->ordered_operations_mutex);
1714 mutex_init(&fs_info->tree_log_mutex); 1714 mutex_init(&fs_info->tree_log_mutex);
1715 mutex_init(&fs_info->chunk_mutex); 1715 mutex_init(&fs_info->chunk_mutex);
@@ -2479,13 +2479,13 @@ int btrfs_commit_super(struct btrfs_root *root)
2479 down_write(&root->fs_info->cleanup_work_sem); 2479 down_write(&root->fs_info->cleanup_work_sem);
2480 up_write(&root->fs_info->cleanup_work_sem); 2480 up_write(&root->fs_info->cleanup_work_sem);
2481 2481
2482 trans = btrfs_join_transaction(root, 1); 2482 trans = btrfs_join_transaction(root);
2483 if (IS_ERR(trans)) 2483 if (IS_ERR(trans))
2484 return PTR_ERR(trans); 2484 return PTR_ERR(trans);
2485 ret = btrfs_commit_transaction(trans, root); 2485 ret = btrfs_commit_transaction(trans, root);
2486 BUG_ON(ret); 2486 BUG_ON(ret);
2487 /* run commit again to drop the original snapshot */ 2487 /* run commit again to drop the original snapshot */
2488 trans = btrfs_join_transaction(root, 1); 2488 trans = btrfs_join_transaction(root);
2489 if (IS_ERR(trans)) 2489 if (IS_ERR(trans))
2490 return PTR_ERR(trans); 2490 return PTR_ERR(trans);
2491 btrfs_commit_transaction(trans, root); 2491 btrfs_commit_transaction(trans, root);
@@ -2911,9 +2911,8 @@ static int btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
2911 2911
2912 INIT_LIST_HEAD(&splice); 2912 INIT_LIST_HEAD(&splice);
2913 2913
2914 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2915
2916 spin_lock(&root->fs_info->delalloc_lock); 2914 spin_lock(&root->fs_info->delalloc_lock);
2915 list_splice_init(&root->fs_info->delalloc_inodes, &splice);
2917 2916
2918 while (!list_empty(&splice)) { 2917 while (!list_empty(&splice)) {
2919 btrfs_inode = list_entry(splice.next, struct btrfs_inode, 2918 btrfs_inode = list_entry(splice.next, struct btrfs_inode,
@@ -3024,10 +3023,13 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3024 3023
3025 WARN_ON(1); 3024 WARN_ON(1);
3026 3025
3027 mutex_lock(&root->fs_info->trans_mutex);
3028 mutex_lock(&root->fs_info->transaction_kthread_mutex); 3026 mutex_lock(&root->fs_info->transaction_kthread_mutex);
3029 3027
3028 spin_lock(&root->fs_info->trans_lock);
3030 list_splice_init(&root->fs_info->trans_list, &list); 3029 list_splice_init(&root->fs_info->trans_list, &list);
3030 root->fs_info->trans_no_join = 1;
3031 spin_unlock(&root->fs_info->trans_lock);
3032
3031 while (!list_empty(&list)) { 3033 while (!list_empty(&list)) {
3032 t = list_entry(list.next, struct btrfs_transaction, list); 3034 t = list_entry(list.next, struct btrfs_transaction, list);
3033 if (!t) 3035 if (!t)
@@ -3052,23 +3054,18 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3052 t->blocked = 0; 3054 t->blocked = 0;
3053 if (waitqueue_active(&root->fs_info->transaction_wait)) 3055 if (waitqueue_active(&root->fs_info->transaction_wait))
3054 wake_up(&root->fs_info->transaction_wait); 3056 wake_up(&root->fs_info->transaction_wait);
3055 mutex_unlock(&root->fs_info->trans_mutex);
3056 3057
3057 mutex_lock(&root->fs_info->trans_mutex);
3058 t->commit_done = 1; 3058 t->commit_done = 1;
3059 if (waitqueue_active(&t->commit_wait)) 3059 if (waitqueue_active(&t->commit_wait))
3060 wake_up(&t->commit_wait); 3060 wake_up(&t->commit_wait);
3061 mutex_unlock(&root->fs_info->trans_mutex);
3062
3063 mutex_lock(&root->fs_info->trans_mutex);
3064 3061
3065 btrfs_destroy_pending_snapshots(t); 3062 btrfs_destroy_pending_snapshots(t);
3066 3063
3067 btrfs_destroy_delalloc_inodes(root); 3064 btrfs_destroy_delalloc_inodes(root);
3068 3065
3069 spin_lock(&root->fs_info->new_trans_lock); 3066 spin_lock(&root->fs_info->trans_lock);
3070 root->fs_info->running_transaction = NULL; 3067 root->fs_info->running_transaction = NULL;
3071 spin_unlock(&root->fs_info->new_trans_lock); 3068 spin_unlock(&root->fs_info->trans_lock);
3072 3069
3073 btrfs_destroy_marked_extents(root, &t->dirty_pages, 3070 btrfs_destroy_marked_extents(root, &t->dirty_pages,
3074 EXTENT_DIRTY); 3071 EXTENT_DIRTY);
@@ -3082,8 +3079,10 @@ static int btrfs_cleanup_transaction(struct btrfs_root *root)
3082 kmem_cache_free(btrfs_transaction_cachep, t); 3079 kmem_cache_free(btrfs_transaction_cachep, t);
3083 } 3080 }
3084 3081
3082 spin_lock(&root->fs_info->trans_lock);
3083 root->fs_info->trans_no_join = 0;
3084 spin_unlock(&root->fs_info->trans_lock);
3085 mutex_unlock(&root->fs_info->transaction_kthread_mutex); 3085 mutex_unlock(&root->fs_info->transaction_kthread_mutex);
3086 mutex_unlock(&root->fs_info->trans_mutex);
3087 3086
3088 return 0; 3087 return 0;
3089} 3088}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 169bd62ce776..1f61bf5b4960 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -348,7 +348,7 @@ static int caching_kthread(void *data)
348 */ 348 */
349 path->skip_locking = 1; 349 path->skip_locking = 1;
350 path->search_commit_root = 1; 350 path->search_commit_root = 1;
351 path->reada = 2; 351 path->reada = 1;
352 352
353 key.objectid = last; 353 key.objectid = last;
354 key.offset = 0; 354 key.offset = 0;
@@ -366,8 +366,7 @@ again:
366 nritems = btrfs_header_nritems(leaf); 366 nritems = btrfs_header_nritems(leaf);
367 367
368 while (1) { 368 while (1) {
369 smp_mb(); 369 if (btrfs_fs_closing(fs_info) > 1) {
370 if (fs_info->closing > 1) {
371 last = (u64)-1; 370 last = (u64)-1;
372 break; 371 break;
373 } 372 }
@@ -379,15 +378,18 @@ again:
379 if (ret) 378 if (ret)
380 break; 379 break;
381 380
382 caching_ctl->progress = last; 381 if (need_resched() ||
383 btrfs_release_path(path); 382 btrfs_next_leaf(extent_root, path)) {
384 up_read(&fs_info->extent_commit_sem); 383 caching_ctl->progress = last;
385 mutex_unlock(&caching_ctl->mutex); 384 btrfs_release_path(path);
386 if (btrfs_transaction_in_commit(fs_info)) 385 up_read(&fs_info->extent_commit_sem);
387 schedule_timeout(1); 386 mutex_unlock(&caching_ctl->mutex);
388 else
389 cond_resched(); 387 cond_resched();
390 goto again; 388 goto again;
389 }
390 leaf = path->nodes[0];
391 nritems = btrfs_header_nritems(leaf);
392 continue;
391 } 393 }
392 394
393 if (key.objectid < block_group->key.objectid) { 395 if (key.objectid < block_group->key.objectid) {
@@ -3065,7 +3067,7 @@ again:
3065 spin_unlock(&data_sinfo->lock); 3067 spin_unlock(&data_sinfo->lock);
3066alloc: 3068alloc:
3067 alloc_target = btrfs_get_alloc_profile(root, 1); 3069 alloc_target = btrfs_get_alloc_profile(root, 1);
3068 trans = btrfs_join_transaction(root, 1); 3070 trans = btrfs_join_transaction(root);
3069 if (IS_ERR(trans)) 3071 if (IS_ERR(trans))
3070 return PTR_ERR(trans); 3072 return PTR_ERR(trans);
3071 3073
@@ -3087,13 +3089,21 @@ alloc:
3087 } 3089 }
3088 goto again; 3090 goto again;
3089 } 3091 }
3092
3093 /*
3094 * If we have less pinned bytes than we want to allocate then
3095 * don't bother committing the transaction, it won't help us.
3096 */
3097 if (data_sinfo->bytes_pinned < bytes)
3098 committed = 1;
3090 spin_unlock(&data_sinfo->lock); 3099 spin_unlock(&data_sinfo->lock);
3091 3100
3092 /* commit the current transaction and try again */ 3101 /* commit the current transaction and try again */
3093commit_trans: 3102commit_trans:
3094 if (!committed && !root->fs_info->open_ioctl_trans) { 3103 if (!committed &&
3104 !atomic_read(&root->fs_info->open_ioctl_trans)) {
3095 committed = 1; 3105 committed = 1;
3096 trans = btrfs_join_transaction(root, 1); 3106 trans = btrfs_join_transaction(root);
3097 if (IS_ERR(trans)) 3107 if (IS_ERR(trans))
3098 return PTR_ERR(trans); 3108 return PTR_ERR(trans);
3099 ret = btrfs_commit_transaction(trans, root); 3109 ret = btrfs_commit_transaction(trans, root);
@@ -3304,10 +3314,6 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3304 if (reserved == 0) 3314 if (reserved == 0)
3305 return 0; 3315 return 0;
3306 3316
3307 /* nothing to shrink - nothing to reclaim */
3308 if (root->fs_info->delalloc_bytes == 0)
3309 return 0;
3310
3311 max_reclaim = min(reserved, to_reclaim); 3317 max_reclaim = min(reserved, to_reclaim);
3312 3318
3313 while (loops < 1024) { 3319 while (loops < 1024) {
@@ -3472,7 +3478,7 @@ again:
3472 goto out; 3478 goto out;
3473 3479
3474 ret = -ENOSPC; 3480 ret = -ENOSPC;
3475 trans = btrfs_join_transaction(root, 1); 3481 trans = btrfs_join_transaction(root);
3476 if (IS_ERR(trans)) 3482 if (IS_ERR(trans))
3477 goto out; 3483 goto out;
3478 ret = btrfs_commit_transaction(trans, root); 3484 ret = btrfs_commit_transaction(trans, root);
@@ -3699,7 +3705,7 @@ int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3699 if (trans) 3705 if (trans)
3700 return -EAGAIN; 3706 return -EAGAIN;
3701 3707
3702 trans = btrfs_join_transaction(root, 1); 3708 trans = btrfs_join_transaction(root);
3703 BUG_ON(IS_ERR(trans)); 3709 BUG_ON(IS_ERR(trans));
3704 ret = btrfs_commit_transaction(trans, root); 3710 ret = btrfs_commit_transaction(trans, root);
3705 return 0; 3711 return 0;
@@ -3837,6 +3843,37 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3837 WARN_ON(fs_info->chunk_block_rsv.reserved > 0); 3843 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3838} 3844}
3839 3845
3846int btrfs_truncate_reserve_metadata(struct btrfs_trans_handle *trans,
3847 struct btrfs_root *root,
3848 struct btrfs_block_rsv *rsv)
3849{
3850 struct btrfs_block_rsv *trans_rsv = &root->fs_info->trans_block_rsv;
3851 u64 num_bytes;
3852 int ret;
3853
3854 /*
3855 * Truncate should be freeing data, but give us 2 items just in case it
3856 * needs to use some space. We may want to be smarter about this in the
3857 * future.
3858 */
3859 num_bytes = btrfs_calc_trans_metadata_size(root, 2);
3860
3861 /* We already have enough bytes, just return */
3862 if (rsv->reserved >= num_bytes)
3863 return 0;
3864
3865 num_bytes -= rsv->reserved;
3866
3867 /*
3868 * You should have reserved enough space before hand to do this, so this
3869 * should not fail.
3870 */
3871 ret = block_rsv_migrate_bytes(trans_rsv, rsv, num_bytes);
3872 BUG_ON(ret);
3873
3874 return 0;
3875}
3876
3840int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, 3877int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3841 struct btrfs_root *root, 3878 struct btrfs_root *root,
3842 int num_items) 3879 int num_items)
@@ -3877,23 +3914,18 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3877 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv; 3914 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3878 3915
3879 /* 3916 /*
3880 * one for deleting orphan item, one for updating inode and 3917 * We need to hold space in order to delete our orphan item once we've
3881 * two for calling btrfs_truncate_inode_items. 3918 * added it, so this takes the reservation so we can release it later
3882 * 3919 * when we are truly done with the orphan item.
3883 * btrfs_truncate_inode_items is a delete operation, it frees
3884 * more space than it uses in most cases. So two units of
3885 * metadata space should be enough for calling it many times.
3886 * If all of the metadata space is used, we can commit
3887 * transaction and use space it freed.
3888 */ 3920 */
3889 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); 3921 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3890 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); 3922 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3891} 3923}
3892 3924
3893void btrfs_orphan_release_metadata(struct inode *inode) 3925void btrfs_orphan_release_metadata(struct inode *inode)
3894{ 3926{
3895 struct btrfs_root *root = BTRFS_I(inode)->root; 3927 struct btrfs_root *root = BTRFS_I(inode)->root;
3896 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); 3928 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
3897 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); 3929 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3898} 3930}
3899 3931
@@ -4987,6 +5019,15 @@ have_block_group:
4987 if (unlikely(block_group->ro)) 5019 if (unlikely(block_group->ro))
4988 goto loop; 5020 goto loop;
4989 5021
5022 spin_lock(&block_group->free_space_ctl->tree_lock);
5023 if (cached &&
5024 block_group->free_space_ctl->free_space <
5025 num_bytes + empty_size) {
5026 spin_unlock(&block_group->free_space_ctl->tree_lock);
5027 goto loop;
5028 }
5029 spin_unlock(&block_group->free_space_ctl->tree_lock);
5030
4990 /* 5031 /*
4991 * Ok we want to try and use the cluster allocator, so lets look 5032 * Ok we want to try and use the cluster allocator, so lets look
4992 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will 5033 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
@@ -5150,6 +5191,7 @@ checks:
5150 btrfs_add_free_space(block_group, offset, 5191 btrfs_add_free_space(block_group, offset,
5151 search_start - offset); 5192 search_start - offset);
5152 BUG_ON(offset > search_start); 5193 BUG_ON(offset > search_start);
5194 btrfs_put_block_group(block_group);
5153 break; 5195 break;
5154loop: 5196loop:
5155 failed_cluster_refill = false; 5197 failed_cluster_refill = false;
@@ -5172,9 +5214,7 @@ loop:
5172 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try 5214 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5173 * again 5215 * again
5174 */ 5216 */
5175 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE && 5217 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
5176 (found_uncached_bg || empty_size || empty_cluster ||
5177 allowed_chunk_alloc)) {
5178 index = 0; 5218 index = 0;
5179 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) { 5219 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5180 found_uncached_bg = false; 5220 found_uncached_bg = false;
@@ -5214,42 +5254,39 @@ loop:
5214 goto search; 5254 goto search;
5215 } 5255 }
5216 5256
5217 if (loop < LOOP_CACHING_WAIT) { 5257 loop++;
5218 loop++;
5219 goto search;
5220 }
5221 5258
5222 if (loop == LOOP_ALLOC_CHUNK) { 5259 if (loop == LOOP_ALLOC_CHUNK) {
5223 empty_size = 0; 5260 if (allowed_chunk_alloc) {
5224 empty_cluster = 0; 5261 ret = do_chunk_alloc(trans, root, num_bytes +
5225 } 5262 2 * 1024 * 1024, data,
5263 CHUNK_ALLOC_LIMITED);
5264 allowed_chunk_alloc = 0;
5265 if (ret == 1)
5266 done_chunk_alloc = 1;
5267 } else if (!done_chunk_alloc &&
5268 space_info->force_alloc ==
5269 CHUNK_ALLOC_NO_FORCE) {
5270 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5271 }
5226 5272
5227 if (allowed_chunk_alloc) { 5273 /*
5228 ret = do_chunk_alloc(trans, root, num_bytes + 5274 * We didn't allocate a chunk, go ahead and drop the
5229 2 * 1024 * 1024, data, 5275 * empty size and loop again.
5230 CHUNK_ALLOC_LIMITED); 5276 */
5231 allowed_chunk_alloc = 0; 5277 if (!done_chunk_alloc)
5232 done_chunk_alloc = 1; 5278 loop = LOOP_NO_EMPTY_SIZE;
5233 } else if (!done_chunk_alloc &&
5234 space_info->force_alloc == CHUNK_ALLOC_NO_FORCE) {
5235 space_info->force_alloc = CHUNK_ALLOC_LIMITED;
5236 } 5279 }
5237 5280
5238 if (loop < LOOP_NO_EMPTY_SIZE) { 5281 if (loop == LOOP_NO_EMPTY_SIZE) {
5239 loop++; 5282 empty_size = 0;
5240 goto search; 5283 empty_cluster = 0;
5241 } 5284 }
5242 ret = -ENOSPC; 5285
5286 goto search;
5243 } else if (!ins->objectid) { 5287 } else if (!ins->objectid) {
5244 ret = -ENOSPC; 5288 ret = -ENOSPC;
5245 } 5289 } else if (ins->objectid) {
5246
5247 /* we found what we needed */
5248 if (ins->objectid) {
5249 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5250 trans->block_group = block_group->key.objectid;
5251
5252 btrfs_put_block_group(block_group);
5253 ret = 0; 5290 ret = 0;
5254 } 5291 }
5255 5292
@@ -6526,7 +6563,7 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
6526 6563
6527 BUG_ON(cache->ro); 6564 BUG_ON(cache->ro);
6528 6565
6529 trans = btrfs_join_transaction(root, 1); 6566 trans = btrfs_join_transaction(root);
6530 BUG_ON(IS_ERR(trans)); 6567 BUG_ON(IS_ERR(trans));
6531 6568
6532 alloc_flags = update_block_group_flags(root, cache->flags); 6569 alloc_flags = update_block_group_flags(root, cache->flags);
@@ -6882,6 +6919,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
6882 path = btrfs_alloc_path(); 6919 path = btrfs_alloc_path();
6883 if (!path) 6920 if (!path)
6884 return -ENOMEM; 6921 return -ENOMEM;
6922 path->reada = 1;
6885 6923
6886 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy); 6924 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
6887 if (cache_gen != 0 && 6925 if (cache_gen != 0 &&
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c5d9fbb92bc3..7055d11c1efd 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1476,7 +1476,7 @@ u64 count_range_bits(struct extent_io_tree *tree,
1476 if (total_bytes >= max_bytes) 1476 if (total_bytes >= max_bytes)
1477 break; 1477 break;
1478 if (!found) { 1478 if (!found) {
1479 *start = state->start; 1479 *start = max(cur_start, state->start);
1480 found = 1; 1480 found = 1;
1481 } 1481 }
1482 last = state->end; 1482 last = state->end;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 4e8445a4757c..a11a92ee2d30 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -126,9 +126,9 @@ struct extent_buffer {
126 unsigned long map_len; 126 unsigned long map_len;
127 struct page *first_page; 127 struct page *first_page;
128 unsigned long bflags; 128 unsigned long bflags;
129 atomic_t refs;
130 struct list_head leak_list; 129 struct list_head leak_list;
131 struct rcu_head rcu_head; 130 struct rcu_head rcu_head;
131 atomic_t refs;
132 132
133 /* the spinlock is used to protect most operations */ 133 /* the spinlock is used to protect most operations */
134 spinlock_t lock; 134 spinlock_t lock;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c6a22d783c35..fa4ef18b66b1 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -129,7 +129,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
129 if (!btrfs_test_opt(root, AUTO_DEFRAG)) 129 if (!btrfs_test_opt(root, AUTO_DEFRAG))
130 return 0; 130 return 0;
131 131
132 if (root->fs_info->closing) 132 if (btrfs_fs_closing(root->fs_info))
133 return 0; 133 return 0;
134 134
135 if (BTRFS_I(inode)->in_defrag) 135 if (BTRFS_I(inode)->in_defrag)
@@ -144,7 +144,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
144 if (!defrag) 144 if (!defrag)
145 return -ENOMEM; 145 return -ENOMEM;
146 146
147 defrag->ino = inode->i_ino; 147 defrag->ino = btrfs_ino(inode);
148 defrag->transid = transid; 148 defrag->transid = transid;
149 defrag->root = root->root_key.objectid; 149 defrag->root = root->root_key.objectid;
150 150
@@ -229,7 +229,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
229 first_ino = defrag->ino + 1; 229 first_ino = defrag->ino + 1;
230 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); 230 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
231 231
232 if (fs_info->closing) 232 if (btrfs_fs_closing(fs_info))
233 goto next_free; 233 goto next_free;
234 234
235 spin_unlock(&fs_info->defrag_inodes_lock); 235 spin_unlock(&fs_info->defrag_inodes_lock);
@@ -1480,14 +1480,12 @@ int btrfs_sync_file(struct file *file, int datasync)
1480 * the current transaction, we can bail out now without any 1480 * the current transaction, we can bail out now without any
1481 * syncing 1481 * syncing
1482 */ 1482 */
1483 mutex_lock(&root->fs_info->trans_mutex); 1483 smp_mb();
1484 if (BTRFS_I(inode)->last_trans <= 1484 if (BTRFS_I(inode)->last_trans <=
1485 root->fs_info->last_trans_committed) { 1485 root->fs_info->last_trans_committed) {
1486 BTRFS_I(inode)->last_trans = 0; 1486 BTRFS_I(inode)->last_trans = 0;
1487 mutex_unlock(&root->fs_info->trans_mutex);
1488 goto out; 1487 goto out;
1489 } 1488 }
1490 mutex_unlock(&root->fs_info->trans_mutex);
1491 1489
1492 /* 1490 /*
1493 * ok we haven't committed the transaction yet, lets do a commit 1491 * ok we haven't committed the transaction yet, lets do a commit
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 70d45795d758..9f985a429877 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -98,7 +98,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root,
98 return inode; 98 return inode;
99 99
100 spin_lock(&block_group->lock); 100 spin_lock(&block_group->lock);
101 if (!root->fs_info->closing) { 101 if (!btrfs_fs_closing(root->fs_info)) {
102 block_group->inode = igrab(inode); 102 block_group->inode = igrab(inode);
103 block_group->iref = 1; 103 block_group->iref = 1;
104 } 104 }
@@ -250,7 +250,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
250 pgoff_t index = 0; 250 pgoff_t index = 0;
251 unsigned long first_page_offset; 251 unsigned long first_page_offset;
252 int num_checksums; 252 int num_checksums;
253 int ret = 0, ret2; 253 int ret = 0;
254 254
255 INIT_LIST_HEAD(&bitmaps); 255 INIT_LIST_HEAD(&bitmaps);
256 256
@@ -402,7 +402,14 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
402 spin_lock(&ctl->tree_lock); 402 spin_lock(&ctl->tree_lock);
403 ret = link_free_space(ctl, e); 403 ret = link_free_space(ctl, e);
404 spin_unlock(&ctl->tree_lock); 404 spin_unlock(&ctl->tree_lock);
405 BUG_ON(ret); 405 if (ret) {
406 printk(KERN_ERR "Duplicate entries in "
407 "free space cache, dumping\n");
408 kunmap(page);
409 unlock_page(page);
410 page_cache_release(page);
411 goto free_cache;
412 }
406 } else { 413 } else {
407 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); 414 e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS);
408 if (!e->bitmap) { 415 if (!e->bitmap) {
@@ -414,10 +421,18 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
414 goto free_cache; 421 goto free_cache;
415 } 422 }
416 spin_lock(&ctl->tree_lock); 423 spin_lock(&ctl->tree_lock);
417 ret2 = link_free_space(ctl, e); 424 ret = link_free_space(ctl, e);
418 ctl->total_bitmaps++; 425 ctl->total_bitmaps++;
419 ctl->op->recalc_thresholds(ctl); 426 ctl->op->recalc_thresholds(ctl);
420 spin_unlock(&ctl->tree_lock); 427 spin_unlock(&ctl->tree_lock);
428 if (ret) {
429 printk(KERN_ERR "Duplicate entries in "
430 "free space cache, dumping\n");
431 kunmap(page);
432 unlock_page(page);
433 page_cache_release(page);
434 goto free_cache;
435 }
421 list_add_tail(&e->list, &bitmaps); 436 list_add_tail(&e->list, &bitmaps);
422 } 437 }
423 438
@@ -478,8 +493,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
478 * If we're unmounting then just return, since this does a search on the 493 * If we're unmounting then just return, since this does a search on the
479 * normal root and not the commit root and we could deadlock. 494 * normal root and not the commit root and we could deadlock.
480 */ 495 */
481 smp_mb(); 496 if (btrfs_fs_closing(fs_info))
482 if (fs_info->closing)
483 return 0; 497 return 0;
484 498
485 /* 499 /*
@@ -575,10 +589,25 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
575 589
576 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> 590 num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
577 PAGE_CACHE_SHIFT; 591 PAGE_CACHE_SHIFT;
592
593 /* Since the first page has all of our checksums and our generation we
594 * need to calculate the offset into the page that we can start writing
595 * our entries.
596 */
597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
598
578 filemap_write_and_wait(inode->i_mapping); 599 filemap_write_and_wait(inode->i_mapping);
579 btrfs_wait_ordered_range(inode, inode->i_size & 600 btrfs_wait_ordered_range(inode, inode->i_size &
580 ~(root->sectorsize - 1), (u64)-1); 601 ~(root->sectorsize - 1), (u64)-1);
581 602
603 /* make sure we don't overflow that first page */
604 if (first_page_offset + sizeof(struct btrfs_free_space_entry) >= PAGE_CACHE_SIZE) {
605 /* this is really the same as running out of space, where we also return 0 */
606 printk(KERN_CRIT "Btrfs: free space cache was too big for the crc page\n");
607 ret = 0;
608 goto out_update;
609 }
610
582 /* We need a checksum per page. */ 611 /* We need a checksum per page. */
583 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); 612 crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS);
584 if (!crc) 613 if (!crc)
@@ -590,12 +619,6 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
590 return -1; 619 return -1;
591 } 620 }
592 621
593 /* Since the first page has all of our checksums and our generation we
594 * need to calculate the offset into the page that we can start writing
595 * our entries.
596 */
597 first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64);
598
599 /* Get the cluster for this block_group if it exists */ 622 /* Get the cluster for this block_group if it exists */
600 if (block_group && !list_empty(&block_group->cluster_list)) 623 if (block_group && !list_empty(&block_group->cluster_list))
601 cluster = list_entry(block_group->cluster_list.next, 624 cluster = list_entry(block_group->cluster_list.next,
@@ -857,12 +880,14 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
857 ret = 1; 880 ret = 1;
858 881
859out_free: 882out_free:
883 kfree(checksums);
884 kfree(pages);
885
886out_update:
860 if (ret != 1) { 887 if (ret != 1) {
861 invalidate_inode_pages2_range(inode->i_mapping, 0, index); 888 invalidate_inode_pages2_range(inode->i_mapping, 0, index);
862 BTRFS_I(inode)->generation = 0; 889 BTRFS_I(inode)->generation = 0;
863 } 890 }
864 kfree(checksums);
865 kfree(pages);
866 btrfs_update_inode(trans, root, inode); 891 btrfs_update_inode(trans, root, inode);
867 return ret; 892 return ret;
868} 893}
@@ -963,10 +988,16 @@ static int tree_insert_offset(struct rb_root *root, u64 offset,
963 * logically. 988 * logically.
964 */ 989 */
965 if (bitmap) { 990 if (bitmap) {
966 WARN_ON(info->bitmap); 991 if (info->bitmap) {
992 WARN_ON_ONCE(1);
993 return -EEXIST;
994 }
967 p = &(*p)->rb_right; 995 p = &(*p)->rb_right;
968 } else { 996 } else {
969 WARN_ON(!info->bitmap); 997 if (!info->bitmap) {
998 WARN_ON_ONCE(1);
999 return -EEXIST;
1000 }
970 p = &(*p)->rb_left; 1001 p = &(*p)->rb_left;
971 } 1002 }
972 } 1003 }
@@ -1386,6 +1417,23 @@ again:
1386 return 0; 1417 return 0;
1387} 1418}
1388 1419
1420static u64 add_bytes_to_bitmap(struct btrfs_free_space_ctl *ctl,
1421 struct btrfs_free_space *info, u64 offset,
1422 u64 bytes)
1423{
1424 u64 bytes_to_set = 0;
1425 u64 end;
1426
1427 end = info->offset + (u64)(BITS_PER_BITMAP * ctl->unit);
1428
1429 bytes_to_set = min(end - offset, bytes);
1430
1431 bitmap_set_bits(ctl, info, offset, bytes_to_set);
1432
1433 return bytes_to_set;
1434
1435}
1436
1389static bool use_bitmap(struct btrfs_free_space_ctl *ctl, 1437static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1390 struct btrfs_free_space *info) 1438 struct btrfs_free_space *info)
1391{ 1439{
@@ -1422,12 +1470,18 @@ static bool use_bitmap(struct btrfs_free_space_ctl *ctl,
1422 return true; 1470 return true;
1423} 1471}
1424 1472
1473static struct btrfs_free_space_op free_space_op = {
1474 .recalc_thresholds = recalculate_thresholds,
1475 .use_bitmap = use_bitmap,
1476};
1477
1425static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, 1478static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1426 struct btrfs_free_space *info) 1479 struct btrfs_free_space *info)
1427{ 1480{
1428 struct btrfs_free_space *bitmap_info; 1481 struct btrfs_free_space *bitmap_info;
1482 struct btrfs_block_group_cache *block_group = NULL;
1429 int added = 0; 1483 int added = 0;
1430 u64 bytes, offset, end; 1484 u64 bytes, offset, bytes_added;
1431 int ret; 1485 int ret;
1432 1486
1433 bytes = info->bytes; 1487 bytes = info->bytes;
@@ -1436,7 +1490,49 @@ static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl,
1436 if (!ctl->op->use_bitmap(ctl, info)) 1490 if (!ctl->op->use_bitmap(ctl, info))
1437 return 0; 1491 return 0;
1438 1492
1493 if (ctl->op == &free_space_op)
1494 block_group = ctl->private;
1439again: 1495again:
1496 /*
1497 * Since we link bitmaps right into the cluster we need to see if we
1498 * have a cluster here, and if so and it has our bitmap we need to add
1499 * the free space to that bitmap.
1500 */
1501 if (block_group && !list_empty(&block_group->cluster_list)) {
1502 struct btrfs_free_cluster *cluster;
1503 struct rb_node *node;
1504 struct btrfs_free_space *entry;
1505
1506 cluster = list_entry(block_group->cluster_list.next,
1507 struct btrfs_free_cluster,
1508 block_group_list);
1509 spin_lock(&cluster->lock);
1510 node = rb_first(&cluster->root);
1511 if (!node) {
1512 spin_unlock(&cluster->lock);
1513 goto no_cluster_bitmap;
1514 }
1515
1516 entry = rb_entry(node, struct btrfs_free_space, offset_index);
1517 if (!entry->bitmap) {
1518 spin_unlock(&cluster->lock);
1519 goto no_cluster_bitmap;
1520 }
1521
1522 if (entry->offset == offset_to_bitmap(ctl, offset)) {
1523 bytes_added = add_bytes_to_bitmap(ctl, entry,
1524 offset, bytes);
1525 bytes -= bytes_added;
1526 offset += bytes_added;
1527 }
1528 spin_unlock(&cluster->lock);
1529 if (!bytes) {
1530 ret = 1;
1531 goto out;
1532 }
1533 }
1534
1535no_cluster_bitmap:
1440 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 1536 bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset),
1441 1, 0); 1537 1, 0);
1442 if (!bitmap_info) { 1538 if (!bitmap_info) {
@@ -1444,19 +1540,10 @@ again:
1444 goto new_bitmap; 1540 goto new_bitmap;
1445 } 1541 }
1446 1542
1447 end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); 1543 bytes_added = add_bytes_to_bitmap(ctl, bitmap_info, offset, bytes);
1448 1544 bytes -= bytes_added;
1449 if (offset >= bitmap_info->offset && offset + bytes > end) { 1545 offset += bytes_added;
1450 bitmap_set_bits(ctl, bitmap_info, offset, end - offset); 1546 added = 0;
1451 bytes -= end - offset;
1452 offset = end;
1453 added = 0;
1454 } else if (offset >= bitmap_info->offset && offset + bytes <= end) {
1455 bitmap_set_bits(ctl, bitmap_info, offset, bytes);
1456 bytes = 0;
1457 } else {
1458 BUG();
1459 }
1460 1547
1461 if (!bytes) { 1548 if (!bytes) {
1462 ret = 1; 1549 ret = 1;
@@ -1735,11 +1822,6 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group,
1735 "\n", count); 1822 "\n", count);
1736} 1823}
1737 1824
1738static struct btrfs_free_space_op free_space_op = {
1739 .recalc_thresholds = recalculate_thresholds,
1740 .use_bitmap = use_bitmap,
1741};
1742
1743void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) 1825void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
1744{ 1826{
1745 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 1827 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
@@ -2111,9 +2193,11 @@ again:
2111/* 2193/*
2112 * This searches the block group for just extents to fill the cluster with. 2194 * This searches the block group for just extents to fill the cluster with.
2113 */ 2195 */
2114static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, 2196static noinline int
2115 struct btrfs_free_cluster *cluster, 2197setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2116 u64 offset, u64 bytes, u64 min_bytes) 2198 struct btrfs_free_cluster *cluster,
2199 struct list_head *bitmaps, u64 offset, u64 bytes,
2200 u64 min_bytes)
2117{ 2201{
2118 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2202 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2119 struct btrfs_free_space *first = NULL; 2203 struct btrfs_free_space *first = NULL;
@@ -2135,6 +2219,8 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2135 * extent entry. 2219 * extent entry.
2136 */ 2220 */
2137 while (entry->bitmap) { 2221 while (entry->bitmap) {
2222 if (list_empty(&entry->list))
2223 list_add_tail(&entry->list, bitmaps);
2138 node = rb_next(&entry->offset_index); 2224 node = rb_next(&entry->offset_index);
2139 if (!node) 2225 if (!node)
2140 return -ENOSPC; 2226 return -ENOSPC;
@@ -2154,8 +2240,12 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2154 return -ENOSPC; 2240 return -ENOSPC;
2155 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2241 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2156 2242
2157 if (entry->bitmap) 2243 if (entry->bitmap) {
2244 if (list_empty(&entry->list))
2245 list_add_tail(&entry->list, bitmaps);
2158 continue; 2246 continue;
2247 }
2248
2159 /* 2249 /*
2160 * we haven't filled the empty size and the window is 2250 * we haven't filled the empty size and the window is
2161 * very large. reset and try again 2251 * very large. reset and try again
@@ -2207,9 +2297,11 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
2207 * This specifically looks for bitmaps that may work in the cluster, we assume 2297 * This specifically looks for bitmaps that may work in the cluster, we assume
2208 * that we have already failed to find extents that will work. 2298 * that we have already failed to find extents that will work.
2209 */ 2299 */
2210static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, 2300static noinline int
2211 struct btrfs_free_cluster *cluster, 2301setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2212 u64 offset, u64 bytes, u64 min_bytes) 2302 struct btrfs_free_cluster *cluster,
2303 struct list_head *bitmaps, u64 offset, u64 bytes,
2304 u64 min_bytes)
2213{ 2305{
2214 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2306 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2215 struct btrfs_free_space *entry; 2307 struct btrfs_free_space *entry;
@@ -2219,10 +2311,39 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
2219 if (ctl->total_bitmaps == 0) 2311 if (ctl->total_bitmaps == 0)
2220 return -ENOSPC; 2312 return -ENOSPC;
2221 2313
2314 /*
2315 * First check our cached list of bitmaps and see if there is an entry
2316 * here that will work.
2317 */
2318 list_for_each_entry(entry, bitmaps, list) {
2319 if (entry->bytes < min_bytes)
2320 continue;
2321 ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
2322 bytes, min_bytes);
2323 if (!ret)
2324 return 0;
2325 }
2326
2327 /*
2328 * If we do have entries on our list and we are here then we didn't find
2329 * anything, so go ahead and get the next entry after the last entry in
2330 * this list and start the search from there.
2331 */
2332 if (!list_empty(bitmaps)) {
2333 entry = list_entry(bitmaps->prev, struct btrfs_free_space,
2334 list);
2335 node = rb_next(&entry->offset_index);
2336 if (!node)
2337 return -ENOSPC;
2338 entry = rb_entry(node, struct btrfs_free_space, offset_index);
2339 goto search;
2340 }
2341
2222 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); 2342 entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1);
2223 if (!entry) 2343 if (!entry)
2224 return -ENOSPC; 2344 return -ENOSPC;
2225 2345
2346search:
2226 node = &entry->offset_index; 2347 node = &entry->offset_index;
2227 do { 2348 do {
2228 entry = rb_entry(node, struct btrfs_free_space, offset_index); 2349 entry = rb_entry(node, struct btrfs_free_space, offset_index);
@@ -2253,6 +2374,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2253 u64 offset, u64 bytes, u64 empty_size) 2374 u64 offset, u64 bytes, u64 empty_size)
2254{ 2375{
2255 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; 2376 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2377 struct list_head bitmaps;
2378 struct btrfs_free_space *entry, *tmp;
2256 u64 min_bytes; 2379 u64 min_bytes;
2257 int ret; 2380 int ret;
2258 2381
@@ -2291,11 +2414,16 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
2291 goto out; 2414 goto out;
2292 } 2415 }
2293 2416
2294 ret = setup_cluster_no_bitmap(block_group, cluster, offset, bytes, 2417 INIT_LIST_HEAD(&bitmaps);
2295 min_bytes); 2418 ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
2419 bytes, min_bytes);
2296 if (ret) 2420 if (ret)
2297 ret = setup_cluster_bitmap(block_group, cluster, offset, 2421 ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
2298 bytes, min_bytes); 2422 offset, bytes, min_bytes);
2423
2424 /* Clear our temporary list */
2425 list_for_each_entry_safe(entry, tmp, &bitmaps, list)
2426 list_del_init(&entry->list);
2299 2427
2300 if (!ret) { 2428 if (!ret) {
2301 atomic_inc(&block_group->count); 2429 atomic_inc(&block_group->count);
@@ -2481,7 +2609,7 @@ struct inode *lookup_free_ino_inode(struct btrfs_root *root,
2481 return inode; 2609 return inode;
2482 2610
2483 spin_lock(&root->cache_lock); 2611 spin_lock(&root->cache_lock);
2484 if (!root->fs_info->closing) 2612 if (!btrfs_fs_closing(root->fs_info))
2485 root->cache_inode = igrab(inode); 2613 root->cache_inode = igrab(inode);
2486 spin_unlock(&root->cache_lock); 2614 spin_unlock(&root->cache_lock);
2487 2615
@@ -2504,12 +2632,14 @@ int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2504 int ret = 0; 2632 int ret = 0;
2505 u64 root_gen = btrfs_root_generation(&root->root_item); 2633 u64 root_gen = btrfs_root_generation(&root->root_item);
2506 2634
2635 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2636 return 0;
2637
2507 /* 2638 /*
2508 * If we're unmounting then just return, since this does a search on the 2639 * If we're unmounting then just return, since this does a search on the
2509 * normal root and not the commit root and we could deadlock. 2640 * normal root and not the commit root and we could deadlock.
2510 */ 2641 */
2511 smp_mb(); 2642 if (btrfs_fs_closing(fs_info))
2512 if (fs_info->closing)
2513 return 0; 2643 return 0;
2514 2644
2515 path = btrfs_alloc_path(); 2645 path = btrfs_alloc_path();
@@ -2543,6 +2673,9 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
2543 struct inode *inode; 2673 struct inode *inode;
2544 int ret; 2674 int ret;
2545 2675
2676 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
2677 return 0;
2678
2546 inode = lookup_free_ino_inode(root, path); 2679 inode = lookup_free_ino_inode(root, path);
2547 if (IS_ERR(inode)) 2680 if (IS_ERR(inode))
2548 return 0; 2681 return 0;
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 3262cd17a12f..b4087e0fa871 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -38,6 +38,9 @@ static int caching_kthread(void *data)
38 int slot; 38 int slot;
39 int ret; 39 int ret;
40 40
41 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
42 return 0;
43
41 path = btrfs_alloc_path(); 44 path = btrfs_alloc_path();
42 if (!path) 45 if (!path)
43 return -ENOMEM; 46 return -ENOMEM;
@@ -59,8 +62,7 @@ again:
59 goto out; 62 goto out;
60 63
61 while (1) { 64 while (1) {
62 smp_mb(); 65 if (btrfs_fs_closing(fs_info))
63 if (fs_info->closing)
64 goto out; 66 goto out;
65 67
66 leaf = path->nodes[0]; 68 leaf = path->nodes[0];
@@ -141,6 +143,9 @@ static void start_caching(struct btrfs_root *root)
141 int ret; 143 int ret;
142 u64 objectid; 144 u64 objectid;
143 145
146 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
147 return;
148
144 spin_lock(&root->cache_lock); 149 spin_lock(&root->cache_lock);
145 if (root->cached != BTRFS_CACHE_NO) { 150 if (root->cached != BTRFS_CACHE_NO) {
146 spin_unlock(&root->cache_lock); 151 spin_unlock(&root->cache_lock);
@@ -178,6 +183,9 @@ static void start_caching(struct btrfs_root *root)
178 183
179int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) 184int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid)
180{ 185{
186 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
187 return btrfs_find_free_objectid(root, objectid);
188
181again: 189again:
182 *objectid = btrfs_find_ino_for_alloc(root); 190 *objectid = btrfs_find_ino_for_alloc(root);
183 191
@@ -201,6 +209,10 @@ void btrfs_return_ino(struct btrfs_root *root, u64 objectid)
201{ 209{
202 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; 210 struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
203 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; 211 struct btrfs_free_space_ctl *pinned = root->free_ino_pinned;
212
213 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
214 return;
215
204again: 216again:
205 if (root->cached == BTRFS_CACHE_FINISHED) { 217 if (root->cached == BTRFS_CACHE_FINISHED) {
206 __btrfs_add_free_space(ctl, objectid, 1); 218 __btrfs_add_free_space(ctl, objectid, 1);
@@ -250,6 +262,9 @@ void btrfs_unpin_free_ino(struct btrfs_root *root)
250 struct rb_node *n; 262 struct rb_node *n;
251 u64 count; 263 u64 count;
252 264
265 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
266 return;
267
253 while (1) { 268 while (1) {
254 n = rb_first(rbroot); 269 n = rb_first(rbroot);
255 if (!n) 270 if (!n)
@@ -388,9 +403,24 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
388 int prealloc; 403 int prealloc;
389 bool retry = false; 404 bool retry = false;
390 405
406 /* only fs tree and subvol/snap needs ino cache */
407 if (root->root_key.objectid != BTRFS_FS_TREE_OBJECTID &&
408 (root->root_key.objectid < BTRFS_FIRST_FREE_OBJECTID ||
409 root->root_key.objectid > BTRFS_LAST_FREE_OBJECTID))
410 return 0;
411
412 /* Don't save inode cache if we are deleting this root */
413 if (btrfs_root_refs(&root->root_item) == 0 &&
414 root != root->fs_info->tree_root)
415 return 0;
416
417 if (!btrfs_test_opt(root, INODE_MAP_CACHE))
418 return 0;
419
391 path = btrfs_alloc_path(); 420 path = btrfs_alloc_path();
392 if (!path) 421 if (!path)
393 return -ENOMEM; 422 return -ENOMEM;
423
394again: 424again:
395 inode = lookup_free_ino_inode(root, path); 425 inode = lookup_free_ino_inode(root, path);
396 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { 426 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 39a9d5750efd..0a9b10c5b0a7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -138,7 +138,6 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
138 return -ENOMEM; 138 return -ENOMEM;
139 139
140 path->leave_spinning = 1; 140 path->leave_spinning = 1;
141 btrfs_set_trans_block_group(trans, inode);
142 141
143 key.objectid = btrfs_ino(inode); 142 key.objectid = btrfs_ino(inode);
144 key.offset = start; 143 key.offset = start;
@@ -426,9 +425,8 @@ again:
426 } 425 }
427 } 426 }
428 if (start == 0) { 427 if (start == 0) {
429 trans = btrfs_join_transaction(root, 1); 428 trans = btrfs_join_transaction(root);
430 BUG_ON(IS_ERR(trans)); 429 BUG_ON(IS_ERR(trans));
431 btrfs_set_trans_block_group(trans, inode);
432 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 430 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
433 431
434 /* lets try to make an inline extent */ 432 /* lets try to make an inline extent */
@@ -623,8 +621,9 @@ retry:
623 async_extent->start + async_extent->ram_size - 1, 621 async_extent->start + async_extent->ram_size - 1,
624 GFP_NOFS); 622 GFP_NOFS);
625 623
626 trans = btrfs_join_transaction(root, 1); 624 trans = btrfs_join_transaction(root);
627 BUG_ON(IS_ERR(trans)); 625 BUG_ON(IS_ERR(trans));
626 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
628 ret = btrfs_reserve_extent(trans, root, 627 ret = btrfs_reserve_extent(trans, root,
629 async_extent->compressed_size, 628 async_extent->compressed_size,
630 async_extent->compressed_size, 629 async_extent->compressed_size,
@@ -793,9 +792,8 @@ static noinline int cow_file_range(struct inode *inode,
793 int ret = 0; 792 int ret = 0;
794 793
795 BUG_ON(is_free_space_inode(root, inode)); 794 BUG_ON(is_free_space_inode(root, inode));
796 trans = btrfs_join_transaction(root, 1); 795 trans = btrfs_join_transaction(root);
797 BUG_ON(IS_ERR(trans)); 796 BUG_ON(IS_ERR(trans));
798 btrfs_set_trans_block_group(trans, inode);
799 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 797 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
800 798
801 num_bytes = (end - start + blocksize) & ~(blocksize - 1); 799 num_bytes = (end - start + blocksize) & ~(blocksize - 1);
@@ -1077,10 +1075,12 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1077 nolock = is_free_space_inode(root, inode); 1075 nolock = is_free_space_inode(root, inode);
1078 1076
1079 if (nolock) 1077 if (nolock)
1080 trans = btrfs_join_transaction_nolock(root, 1); 1078 trans = btrfs_join_transaction_nolock(root);
1081 else 1079 else
1082 trans = btrfs_join_transaction(root, 1); 1080 trans = btrfs_join_transaction(root);
1081
1083 BUG_ON(IS_ERR(trans)); 1082 BUG_ON(IS_ERR(trans));
1083 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1084 1084
1085 cow_start = (u64)-1; 1085 cow_start = (u64)-1;
1086 cur_offset = start; 1086 cur_offset = start;
@@ -1519,8 +1519,6 @@ static noinline int add_pending_csums(struct btrfs_trans_handle *trans,
1519{ 1519{
1520 struct btrfs_ordered_sum *sum; 1520 struct btrfs_ordered_sum *sum;
1521 1521
1522 btrfs_set_trans_block_group(trans, inode);
1523
1524 list_for_each_entry(sum, list, list) { 1522 list_for_each_entry(sum, list, list) {
1525 btrfs_csum_file_blocks(trans, 1523 btrfs_csum_file_blocks(trans,
1526 BTRFS_I(inode)->root->fs_info->csum_root, sum); 1524 BTRFS_I(inode)->root->fs_info->csum_root, sum);
@@ -1735,11 +1733,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1735 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1733 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
1736 if (!ret) { 1734 if (!ret) {
1737 if (nolock) 1735 if (nolock)
1738 trans = btrfs_join_transaction_nolock(root, 1); 1736 trans = btrfs_join_transaction_nolock(root);
1739 else 1737 else
1740 trans = btrfs_join_transaction(root, 1); 1738 trans = btrfs_join_transaction(root);
1741 BUG_ON(IS_ERR(trans)); 1739 BUG_ON(IS_ERR(trans));
1742 btrfs_set_trans_block_group(trans, inode);
1743 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1740 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1744 ret = btrfs_update_inode(trans, root, inode); 1741 ret = btrfs_update_inode(trans, root, inode);
1745 BUG_ON(ret); 1742 BUG_ON(ret);
@@ -1752,11 +1749,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1752 0, &cached_state, GFP_NOFS); 1749 0, &cached_state, GFP_NOFS);
1753 1750
1754 if (nolock) 1751 if (nolock)
1755 trans = btrfs_join_transaction_nolock(root, 1); 1752 trans = btrfs_join_transaction_nolock(root);
1756 else 1753 else
1757 trans = btrfs_join_transaction(root, 1); 1754 trans = btrfs_join_transaction(root);
1758 BUG_ON(IS_ERR(trans)); 1755 BUG_ON(IS_ERR(trans));
1759 btrfs_set_trans_block_group(trans, inode);
1760 trans->block_rsv = &root->fs_info->delalloc_block_rsv; 1756 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
1761 1757
1762 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags)) 1758 if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered_extent->flags))
@@ -1990,7 +1986,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1990 } 1986 }
1991 1987
1992 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM) 1988 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
1993 return 0; 1989 goto good;
1994 1990
1995 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID && 1991 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID &&
1996 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) { 1992 test_range_bit(io_tree, start, end, EXTENT_NODATASUM, 1, NULL)) {
@@ -2431,7 +2427,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2431 (u64)-1); 2427 (u64)-1);
2432 2428
2433 if (root->orphan_block_rsv || root->orphan_item_inserted) { 2429 if (root->orphan_block_rsv || root->orphan_item_inserted) {
2434 trans = btrfs_join_transaction(root, 1); 2430 trans = btrfs_join_transaction(root);
2435 if (!IS_ERR(trans)) 2431 if (!IS_ERR(trans))
2436 btrfs_end_transaction(trans, root); 2432 btrfs_end_transaction(trans, root);
2437 } 2433 }
@@ -2511,12 +2507,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
2511 struct btrfs_root *root = BTRFS_I(inode)->root; 2507 struct btrfs_root *root = BTRFS_I(inode)->root;
2512 struct btrfs_key location; 2508 struct btrfs_key location;
2513 int maybe_acls; 2509 int maybe_acls;
2514 u64 alloc_group_block;
2515 u32 rdev; 2510 u32 rdev;
2516 int ret; 2511 int ret;
2517 2512
2518 path = btrfs_alloc_path(); 2513 path = btrfs_alloc_path();
2519 BUG_ON(!path); 2514 BUG_ON(!path);
2515 path->leave_spinning = 1;
2520 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location)); 2516 memcpy(&location, &BTRFS_I(inode)->location, sizeof(location));
2521 2517
2522 ret = btrfs_lookup_inode(NULL, root, path, &location, 0); 2518 ret = btrfs_lookup_inode(NULL, root, path, &location, 0);
@@ -2526,6 +2522,12 @@ static void btrfs_read_locked_inode(struct inode *inode)
2526 leaf = path->nodes[0]; 2522 leaf = path->nodes[0];
2527 inode_item = btrfs_item_ptr(leaf, path->slots[0], 2523 inode_item = btrfs_item_ptr(leaf, path->slots[0],
2528 struct btrfs_inode_item); 2524 struct btrfs_inode_item);
2525 if (!leaf->map_token)
2526 map_private_extent_buffer(leaf, (unsigned long)inode_item,
2527 sizeof(struct btrfs_inode_item),
2528 &leaf->map_token, &leaf->kaddr,
2529 &leaf->map_start, &leaf->map_len,
2530 KM_USER1);
2529 2531
2530 inode->i_mode = btrfs_inode_mode(leaf, inode_item); 2532 inode->i_mode = btrfs_inode_mode(leaf, inode_item);
2531 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item); 2533 inode->i_nlink = btrfs_inode_nlink(leaf, inode_item);
@@ -2555,8 +2557,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
2555 BTRFS_I(inode)->index_cnt = (u64)-1; 2557 BTRFS_I(inode)->index_cnt = (u64)-1;
2556 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item); 2558 BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
2557 2559
2558 alloc_group_block = btrfs_inode_block_group(leaf, inode_item);
2559
2560 /* 2560 /*
2561 * try to precache a NULL acl entry for files that don't have 2561 * try to precache a NULL acl entry for files that don't have
2562 * any xattrs or acls 2562 * any xattrs or acls
@@ -2566,8 +2566,11 @@ static void btrfs_read_locked_inode(struct inode *inode)
2566 if (!maybe_acls) 2566 if (!maybe_acls)
2567 cache_no_acl(inode); 2567 cache_no_acl(inode);
2568 2568
2569 BTRFS_I(inode)->block_group = btrfs_find_block_group(root, 0, 2569 if (leaf->map_token) {
2570 alloc_group_block, 0); 2570 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
2571 leaf->map_token = NULL;
2572 }
2573
2571 btrfs_free_path(path); 2574 btrfs_free_path(path);
2572 inode_item = NULL; 2575 inode_item = NULL;
2573 2576
@@ -2647,7 +2650,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2647 btrfs_set_inode_transid(leaf, item, trans->transid); 2650 btrfs_set_inode_transid(leaf, item, trans->transid);
2648 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2651 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2649 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2652 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
2650 btrfs_set_inode_block_group(leaf, item, BTRFS_I(inode)->block_group); 2653 btrfs_set_inode_block_group(leaf, item, 0);
2651 2654
2652 if (leaf->map_token) { 2655 if (leaf->map_token) {
2653 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1); 2656 unmap_extent_buffer(leaf, leaf->map_token, KM_USER1);
@@ -3004,8 +3007,6 @@ static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
3004 if (IS_ERR(trans)) 3007 if (IS_ERR(trans))
3005 return PTR_ERR(trans); 3008 return PTR_ERR(trans);
3006 3009
3007 btrfs_set_trans_block_group(trans, dir);
3008
3009 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0); 3010 btrfs_record_unlink_dir(trans, dir, dentry->d_inode, 0);
3010 3011
3011 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode, 3012 ret = btrfs_unlink_inode(trans, root, dir, dentry->d_inode,
@@ -3075,6 +3076,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3075 ret = btrfs_update_inode(trans, root, dir); 3076 ret = btrfs_update_inode(trans, root, dir);
3076 BUG_ON(ret); 3077 BUG_ON(ret);
3077 3078
3079 btrfs_free_path(path);
3078 return 0; 3080 return 0;
3079} 3081}
3080 3082
@@ -3094,8 +3096,6 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry)
3094 if (IS_ERR(trans)) 3096 if (IS_ERR(trans))
3095 return PTR_ERR(trans); 3097 return PTR_ERR(trans);
3096 3098
3097 btrfs_set_trans_block_group(trans, dir);
3098
3099 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 3099 if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
3100 err = btrfs_unlink_subvol(trans, root, dir, 3100 err = btrfs_unlink_subvol(trans, root, dir,
3101 BTRFS_I(inode)->location.objectid, 3101 BTRFS_I(inode)->location.objectid,
@@ -3514,7 +3514,6 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
3514 err = PTR_ERR(trans); 3514 err = PTR_ERR(trans);
3515 break; 3515 break;
3516 } 3516 }
3517 btrfs_set_trans_block_group(trans, inode);
3518 3517
3519 err = btrfs_drop_extents(trans, inode, cur_offset, 3518 err = btrfs_drop_extents(trans, inode, cur_offset,
3520 cur_offset + hole_size, 3519 cur_offset + hole_size,
@@ -3648,9 +3647,8 @@ void btrfs_evict_inode(struct inode *inode)
3648 btrfs_i_size_write(inode, 0); 3647 btrfs_i_size_write(inode, 0);
3649 3648
3650 while (1) { 3649 while (1) {
3651 trans = btrfs_start_transaction(root, 0); 3650 trans = btrfs_join_transaction(root);
3652 BUG_ON(IS_ERR(trans)); 3651 BUG_ON(IS_ERR(trans));
3653 btrfs_set_trans_block_group(trans, inode);
3654 trans->block_rsv = root->orphan_block_rsv; 3652 trans->block_rsv = root->orphan_block_rsv;
3655 3653
3656 ret = btrfs_block_rsv_check(trans, root, 3654 ret = btrfs_block_rsv_check(trans, root,
@@ -4133,7 +4131,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent,
4133 path = btrfs_alloc_path(); 4131 path = btrfs_alloc_path();
4134 if (!path) 4132 if (!path)
4135 return -ENOMEM; 4133 return -ENOMEM;
4136 path->reada = 2; 4134
4135 path->reada = 1;
4137 4136
4138 if (key_type == BTRFS_DIR_INDEX_KEY) { 4137 if (key_type == BTRFS_DIR_INDEX_KEY) {
4139 INIT_LIST_HEAD(&ins_list); 4138 INIT_LIST_HEAD(&ins_list);
@@ -4268,18 +4267,16 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4268 if (BTRFS_I(inode)->dummy_inode) 4267 if (BTRFS_I(inode)->dummy_inode)
4269 return 0; 4268 return 0;
4270 4269
4271 smp_mb(); 4270 if (btrfs_fs_closing(root->fs_info) && is_free_space_inode(root, inode))
4272 if (root->fs_info->closing && is_free_space_inode(root, inode))
4273 nolock = true; 4271 nolock = true;
4274 4272
4275 if (wbc->sync_mode == WB_SYNC_ALL) { 4273 if (wbc->sync_mode == WB_SYNC_ALL) {
4276 if (nolock) 4274 if (nolock)
4277 trans = btrfs_join_transaction_nolock(root, 1); 4275 trans = btrfs_join_transaction_nolock(root);
4278 else 4276 else
4279 trans = btrfs_join_transaction(root, 1); 4277 trans = btrfs_join_transaction(root);
4280 if (IS_ERR(trans)) 4278 if (IS_ERR(trans))
4281 return PTR_ERR(trans); 4279 return PTR_ERR(trans);
4282 btrfs_set_trans_block_group(trans, inode);
4283 if (nolock) 4280 if (nolock)
4284 ret = btrfs_end_transaction_nolock(trans, root); 4281 ret = btrfs_end_transaction_nolock(trans, root);
4285 else 4282 else
@@ -4303,9 +4300,8 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
4303 if (BTRFS_I(inode)->dummy_inode) 4300 if (BTRFS_I(inode)->dummy_inode)
4304 return; 4301 return;
4305 4302
4306 trans = btrfs_join_transaction(root, 1); 4303 trans = btrfs_join_transaction(root);
4307 BUG_ON(IS_ERR(trans)); 4304 BUG_ON(IS_ERR(trans));
4308 btrfs_set_trans_block_group(trans, inode);
4309 4305
4310 ret = btrfs_update_inode(trans, root, inode); 4306 ret = btrfs_update_inode(trans, root, inode);
4311 if (ret && ret == -ENOSPC) { 4307 if (ret && ret == -ENOSPC) {
@@ -4319,7 +4315,6 @@ void btrfs_dirty_inode(struct inode *inode, int flags)
4319 PTR_ERR(trans)); 4315 PTR_ERR(trans));
4320 return; 4316 return;
4321 } 4317 }
4322 btrfs_set_trans_block_group(trans, inode);
4323 4318
4324 ret = btrfs_update_inode(trans, root, inode); 4319 ret = btrfs_update_inode(trans, root, inode);
4325 if (ret) { 4320 if (ret) {
@@ -4418,8 +4413,8 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4418 struct btrfs_root *root, 4413 struct btrfs_root *root,
4419 struct inode *dir, 4414 struct inode *dir,
4420 const char *name, int name_len, 4415 const char *name, int name_len,
4421 u64 ref_objectid, u64 objectid, 4416 u64 ref_objectid, u64 objectid, int mode,
4422 u64 alloc_hint, int mode, u64 *index) 4417 u64 *index)
4423{ 4418{
4424 struct inode *inode; 4419 struct inode *inode;
4425 struct btrfs_inode_item *inode_item; 4420 struct btrfs_inode_item *inode_item;
@@ -4472,8 +4467,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
4472 owner = 0; 4467 owner = 0;
4473 else 4468 else
4474 owner = 1; 4469 owner = 1;
4475 BTRFS_I(inode)->block_group =
4476 btrfs_find_block_group(root, 0, alloc_hint, owner);
4477 4470
4478 key[0].objectid = objectid; 4471 key[0].objectid = objectid;
4479 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY); 4472 btrfs_set_key_type(&key[0], BTRFS_INODE_ITEM_KEY);
@@ -4629,15 +4622,13 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4629 if (IS_ERR(trans)) 4622 if (IS_ERR(trans))
4630 return PTR_ERR(trans); 4623 return PTR_ERR(trans);
4631 4624
4632 btrfs_set_trans_block_group(trans, dir);
4633
4634 err = btrfs_find_free_ino(root, &objectid); 4625 err = btrfs_find_free_ino(root, &objectid);
4635 if (err) 4626 if (err)
4636 goto out_unlock; 4627 goto out_unlock;
4637 4628
4638 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4629 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4639 dentry->d_name.len, btrfs_ino(dir), objectid, 4630 dentry->d_name.len, btrfs_ino(dir), objectid,
4640 BTRFS_I(dir)->block_group, mode, &index); 4631 mode, &index);
4641 if (IS_ERR(inode)) { 4632 if (IS_ERR(inode)) {
4642 err = PTR_ERR(inode); 4633 err = PTR_ERR(inode);
4643 goto out_unlock; 4634 goto out_unlock;
@@ -4649,7 +4640,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4649 goto out_unlock; 4640 goto out_unlock;
4650 } 4641 }
4651 4642
4652 btrfs_set_trans_block_group(trans, inode);
4653 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4643 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4654 if (err) 4644 if (err)
4655 drop_inode = 1; 4645 drop_inode = 1;
@@ -4658,8 +4648,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
4658 init_special_inode(inode, inode->i_mode, rdev); 4648 init_special_inode(inode, inode->i_mode, rdev);
4659 btrfs_update_inode(trans, root, inode); 4649 btrfs_update_inode(trans, root, inode);
4660 } 4650 }
4661 btrfs_update_inode_block_group(trans, inode);
4662 btrfs_update_inode_block_group(trans, dir);
4663out_unlock: 4651out_unlock:
4664 nr = trans->blocks_used; 4652 nr = trans->blocks_used;
4665 btrfs_end_transaction_throttle(trans, root); 4653 btrfs_end_transaction_throttle(trans, root);
@@ -4692,15 +4680,13 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4692 if (IS_ERR(trans)) 4680 if (IS_ERR(trans))
4693 return PTR_ERR(trans); 4681 return PTR_ERR(trans);
4694 4682
4695 btrfs_set_trans_block_group(trans, dir);
4696
4697 err = btrfs_find_free_ino(root, &objectid); 4683 err = btrfs_find_free_ino(root, &objectid);
4698 if (err) 4684 if (err)
4699 goto out_unlock; 4685 goto out_unlock;
4700 4686
4701 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4687 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4702 dentry->d_name.len, btrfs_ino(dir), objectid, 4688 dentry->d_name.len, btrfs_ino(dir), objectid,
4703 BTRFS_I(dir)->block_group, mode, &index); 4689 mode, &index);
4704 if (IS_ERR(inode)) { 4690 if (IS_ERR(inode)) {
4705 err = PTR_ERR(inode); 4691 err = PTR_ERR(inode);
4706 goto out_unlock; 4692 goto out_unlock;
@@ -4712,7 +4698,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4712 goto out_unlock; 4698 goto out_unlock;
4713 } 4699 }
4714 4700
4715 btrfs_set_trans_block_group(trans, inode);
4716 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 4701 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
4717 if (err) 4702 if (err)
4718 drop_inode = 1; 4703 drop_inode = 1;
@@ -4723,8 +4708,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
4723 inode->i_op = &btrfs_file_inode_operations; 4708 inode->i_op = &btrfs_file_inode_operations;
4724 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 4709 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
4725 } 4710 }
4726 btrfs_update_inode_block_group(trans, inode);
4727 btrfs_update_inode_block_group(trans, dir);
4728out_unlock: 4711out_unlock:
4729 nr = trans->blocks_used; 4712 nr = trans->blocks_used;
4730 btrfs_end_transaction_throttle(trans, root); 4713 btrfs_end_transaction_throttle(trans, root);
@@ -4771,8 +4754,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4771 4754
4772 btrfs_inc_nlink(inode); 4755 btrfs_inc_nlink(inode);
4773 inode->i_ctime = CURRENT_TIME; 4756 inode->i_ctime = CURRENT_TIME;
4774
4775 btrfs_set_trans_block_group(trans, dir);
4776 ihold(inode); 4757 ihold(inode);
4777 4758
4778 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index); 4759 err = btrfs_add_nondir(trans, dir, dentry, inode, 1, index);
@@ -4781,7 +4762,6 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4781 drop_inode = 1; 4762 drop_inode = 1;
4782 } else { 4763 } else {
4783 struct dentry *parent = dget_parent(dentry); 4764 struct dentry *parent = dget_parent(dentry);
4784 btrfs_update_inode_block_group(trans, dir);
4785 err = btrfs_update_inode(trans, root, inode); 4765 err = btrfs_update_inode(trans, root, inode);
4786 BUG_ON(err); 4766 BUG_ON(err);
4787 btrfs_log_new_name(trans, inode, NULL, parent); 4767 btrfs_log_new_name(trans, inode, NULL, parent);
@@ -4818,7 +4798,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4818 trans = btrfs_start_transaction(root, 5); 4798 trans = btrfs_start_transaction(root, 5);
4819 if (IS_ERR(trans)) 4799 if (IS_ERR(trans))
4820 return PTR_ERR(trans); 4800 return PTR_ERR(trans);
4821 btrfs_set_trans_block_group(trans, dir);
4822 4801
4823 err = btrfs_find_free_ino(root, &objectid); 4802 err = btrfs_find_free_ino(root, &objectid);
4824 if (err) 4803 if (err)
@@ -4826,8 +4805,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4826 4805
4827 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 4806 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
4828 dentry->d_name.len, btrfs_ino(dir), objectid, 4807 dentry->d_name.len, btrfs_ino(dir), objectid,
4829 BTRFS_I(dir)->block_group, S_IFDIR | mode, 4808 S_IFDIR | mode, &index);
4830 &index);
4831 if (IS_ERR(inode)) { 4809 if (IS_ERR(inode)) {
4832 err = PTR_ERR(inode); 4810 err = PTR_ERR(inode);
4833 goto out_fail; 4811 goto out_fail;
@@ -4841,7 +4819,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4841 4819
4842 inode->i_op = &btrfs_dir_inode_operations; 4820 inode->i_op = &btrfs_dir_inode_operations;
4843 inode->i_fop = &btrfs_dir_file_operations; 4821 inode->i_fop = &btrfs_dir_file_operations;
4844 btrfs_set_trans_block_group(trans, inode);
4845 4822
4846 btrfs_i_size_write(inode, 0); 4823 btrfs_i_size_write(inode, 0);
4847 err = btrfs_update_inode(trans, root, inode); 4824 err = btrfs_update_inode(trans, root, inode);
@@ -4855,8 +4832,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
4855 4832
4856 d_instantiate(dentry, inode); 4833 d_instantiate(dentry, inode);
4857 drop_on_err = 0; 4834 drop_on_err = 0;
4858 btrfs_update_inode_block_group(trans, inode);
4859 btrfs_update_inode_block_group(trans, dir);
4860 4835
4861out_fail: 4836out_fail:
4862 nr = trans->blocks_used; 4837 nr = trans->blocks_used;
@@ -4989,7 +4964,15 @@ again:
4989 4964
4990 if (!path) { 4965 if (!path) {
4991 path = btrfs_alloc_path(); 4966 path = btrfs_alloc_path();
4992 BUG_ON(!path); 4967 if (!path) {
4968 err = -ENOMEM;
4969 goto out;
4970 }
4971 /*
4972 * Chances are we'll be called again, so go ahead and do
4973 * readahead
4974 */
4975 path->reada = 1;
4993 } 4976 }
4994 4977
4995 ret = btrfs_lookup_file_extent(trans, root, path, 4978 ret = btrfs_lookup_file_extent(trans, root, path,
@@ -5130,8 +5113,10 @@ again:
5130 kunmap(page); 5113 kunmap(page);
5131 free_extent_map(em); 5114 free_extent_map(em);
5132 em = NULL; 5115 em = NULL;
5116
5133 btrfs_release_path(path); 5117 btrfs_release_path(path);
5134 trans = btrfs_join_transaction(root, 1); 5118 trans = btrfs_join_transaction(root);
5119
5135 if (IS_ERR(trans)) 5120 if (IS_ERR(trans))
5136 return ERR_CAST(trans); 5121 return ERR_CAST(trans);
5137 goto again; 5122 goto again;
@@ -5375,7 +5360,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5375 btrfs_drop_extent_cache(inode, start, start + len - 1, 0); 5360 btrfs_drop_extent_cache(inode, start, start + len - 1, 0);
5376 } 5361 }
5377 5362
5378 trans = btrfs_join_transaction(root, 0); 5363 trans = btrfs_join_transaction(root);
5379 if (IS_ERR(trans)) 5364 if (IS_ERR(trans))
5380 return ERR_CAST(trans); 5365 return ERR_CAST(trans);
5381 5366
@@ -5611,7 +5596,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
5611 * to make sure the current transaction stays open 5596 * to make sure the current transaction stays open
5612 * while we look for nocow cross refs 5597 * while we look for nocow cross refs
5613 */ 5598 */
5614 trans = btrfs_join_transaction(root, 0); 5599 trans = btrfs_join_transaction(root);
5615 if (IS_ERR(trans)) 5600 if (IS_ERR(trans))
5616 goto must_cow; 5601 goto must_cow;
5617 5602
@@ -5750,7 +5735,7 @@ again:
5750 5735
5751 BUG_ON(!ordered); 5736 BUG_ON(!ordered);
5752 5737
5753 trans = btrfs_join_transaction(root, 1); 5738 trans = btrfs_join_transaction(root);
5754 if (IS_ERR(trans)) { 5739 if (IS_ERR(trans)) {
5755 err = -ENOMEM; 5740 err = -ENOMEM;
5756 goto out; 5741 goto out;
@@ -6500,6 +6485,7 @@ out:
6500static int btrfs_truncate(struct inode *inode) 6485static int btrfs_truncate(struct inode *inode)
6501{ 6486{
6502 struct btrfs_root *root = BTRFS_I(inode)->root; 6487 struct btrfs_root *root = BTRFS_I(inode)->root;
6488 struct btrfs_block_rsv *rsv;
6503 int ret; 6489 int ret;
6504 int err = 0; 6490 int err = 0;
6505 struct btrfs_trans_handle *trans; 6491 struct btrfs_trans_handle *trans;
@@ -6513,28 +6499,80 @@ static int btrfs_truncate(struct inode *inode)
6513 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1); 6499 btrfs_wait_ordered_range(inode, inode->i_size & (~mask), (u64)-1);
6514 btrfs_ordered_update_i_size(inode, inode->i_size, NULL); 6500 btrfs_ordered_update_i_size(inode, inode->i_size, NULL);
6515 6501
6516 trans = btrfs_start_transaction(root, 5); 6502 /*
6517 if (IS_ERR(trans)) 6503 * Yes ladies and gentelment, this is indeed ugly. The fact is we have
6518 return PTR_ERR(trans); 6504 * 3 things going on here
6505 *
6506 * 1) We need to reserve space for our orphan item and the space to
6507 * delete our orphan item. Lord knows we don't want to have a dangling
6508 * orphan item because we didn't reserve space to remove it.
6509 *
6510 * 2) We need to reserve space to update our inode.
6511 *
6512 * 3) We need to have something to cache all the space that is going to
6513 * be free'd up by the truncate operation, but also have some slack
6514 * space reserved in case it uses space during the truncate (thank you
6515 * very much snapshotting).
6516 *
6517 * And we need these to all be seperate. The fact is we can use alot of
6518 * space doing the truncate, and we have no earthly idea how much space
6519 * we will use, so we need the truncate reservation to be seperate so it
6520 * doesn't end up using space reserved for updating the inode or
6521 * removing the orphan item. We also need to be able to stop the
6522 * transaction and start a new one, which means we need to be able to
6523 * update the inode several times, and we have no idea of knowing how
6524 * many times that will be, so we can't just reserve 1 item for the
6525 * entirety of the opration, so that has to be done seperately as well.
6526 * Then there is the orphan item, which does indeed need to be held on
6527 * to for the whole operation, and we need nobody to touch this reserved
6528 * space except the orphan code.
6529 *
6530 * So that leaves us with
6531 *
6532 * 1) root->orphan_block_rsv - for the orphan deletion.
6533 * 2) rsv - for the truncate reservation, which we will steal from the
6534 * transaction reservation.
6535 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
6536 * updating the inode.
6537 */
6538 rsv = btrfs_alloc_block_rsv(root);
6539 if (!rsv)
6540 return -ENOMEM;
6541 btrfs_add_durable_block_rsv(root->fs_info, rsv);
6519 6542
6520 btrfs_set_trans_block_group(trans, inode); 6543 trans = btrfs_start_transaction(root, 4);
6544 if (IS_ERR(trans)) {
6545 err = PTR_ERR(trans);
6546 goto out;
6547 }
6548
6549 /*
6550 * Reserve space for the truncate process. Truncate should be adding
6551 * space, but if there are snapshots it may end up using space.
6552 */
6553 ret = btrfs_truncate_reserve_metadata(trans, root, rsv);
6554 BUG_ON(ret);
6521 6555
6522 ret = btrfs_orphan_add(trans, inode); 6556 ret = btrfs_orphan_add(trans, inode);
6523 if (ret) { 6557 if (ret) {
6524 btrfs_end_transaction(trans, root); 6558 btrfs_end_transaction(trans, root);
6525 return ret; 6559 goto out;
6526 } 6560 }
6527 6561
6528 nr = trans->blocks_used; 6562 nr = trans->blocks_used;
6529 btrfs_end_transaction(trans, root); 6563 btrfs_end_transaction(trans, root);
6530 btrfs_btree_balance_dirty(root, nr); 6564 btrfs_btree_balance_dirty(root, nr);
6531 6565
6532 /* Now start a transaction for the truncate */ 6566 /*
6533 trans = btrfs_start_transaction(root, 0); 6567 * Ok so we've already migrated our bytes over for the truncate, so here
6534 if (IS_ERR(trans)) 6568 * just reserve the one slot we need for updating the inode.
6535 return PTR_ERR(trans); 6569 */
6536 btrfs_set_trans_block_group(trans, inode); 6570 trans = btrfs_start_transaction(root, 1);
6537 trans->block_rsv = root->orphan_block_rsv; 6571 if (IS_ERR(trans)) {
6572 err = PTR_ERR(trans);
6573 goto out;
6574 }
6575 trans->block_rsv = rsv;
6538 6576
6539 /* 6577 /*
6540 * setattr is responsible for setting the ordered_data_close flag, 6578 * setattr is responsible for setting the ordered_data_close flag,
@@ -6558,24 +6596,17 @@ static int btrfs_truncate(struct inode *inode)
6558 6596
6559 while (1) { 6597 while (1) {
6560 if (!trans) { 6598 if (!trans) {
6561 trans = btrfs_start_transaction(root, 0); 6599 trans = btrfs_start_transaction(root, 3);
6562 if (IS_ERR(trans)) 6600 if (IS_ERR(trans)) {
6563 return PTR_ERR(trans); 6601 err = PTR_ERR(trans);
6564 btrfs_set_trans_block_group(trans, inode); 6602 goto out;
6565 trans->block_rsv = root->orphan_block_rsv; 6603 }
6566 }
6567 6604
6568 ret = btrfs_block_rsv_check(trans, root, 6605 ret = btrfs_truncate_reserve_metadata(trans, root,
6569 root->orphan_block_rsv, 0, 5); 6606 rsv);
6570 if (ret == -EAGAIN) { 6607 BUG_ON(ret);
6571 ret = btrfs_commit_transaction(trans, root); 6608
6572 if (ret) 6609 trans->block_rsv = rsv;
6573 return ret;
6574 trans = NULL;
6575 continue;
6576 } else if (ret) {
6577 err = ret;
6578 break;
6579 } 6610 }
6580 6611
6581 ret = btrfs_truncate_inode_items(trans, root, inode, 6612 ret = btrfs_truncate_inode_items(trans, root, inode,
@@ -6586,6 +6617,7 @@ static int btrfs_truncate(struct inode *inode)
6586 break; 6617 break;
6587 } 6618 }
6588 6619
6620 trans->block_rsv = &root->fs_info->trans_block_rsv;
6589 ret = btrfs_update_inode(trans, root, inode); 6621 ret = btrfs_update_inode(trans, root, inode);
6590 if (ret) { 6622 if (ret) {
6591 err = ret; 6623 err = ret;
@@ -6599,6 +6631,7 @@ static int btrfs_truncate(struct inode *inode)
6599 } 6631 }
6600 6632
6601 if (ret == 0 && inode->i_nlink > 0) { 6633 if (ret == 0 && inode->i_nlink > 0) {
6634 trans->block_rsv = root->orphan_block_rsv;
6602 ret = btrfs_orphan_del(trans, inode); 6635 ret = btrfs_orphan_del(trans, inode);
6603 if (ret) 6636 if (ret)
6604 err = ret; 6637 err = ret;
@@ -6610,15 +6643,20 @@ static int btrfs_truncate(struct inode *inode)
6610 ret = btrfs_orphan_del(NULL, inode); 6643 ret = btrfs_orphan_del(NULL, inode);
6611 } 6644 }
6612 6645
6646 trans->block_rsv = &root->fs_info->trans_block_rsv;
6613 ret = btrfs_update_inode(trans, root, inode); 6647 ret = btrfs_update_inode(trans, root, inode);
6614 if (ret && !err) 6648 if (ret && !err)
6615 err = ret; 6649 err = ret;
6616 6650
6617 nr = trans->blocks_used; 6651 nr = trans->blocks_used;
6618 ret = btrfs_end_transaction_throttle(trans, root); 6652 ret = btrfs_end_transaction_throttle(trans, root);
6653 btrfs_btree_balance_dirty(root, nr);
6654
6655out:
6656 btrfs_free_block_rsv(root, rsv);
6657
6619 if (ret && !err) 6658 if (ret && !err)
6620 err = ret; 6659 err = ret;
6621 btrfs_btree_balance_dirty(root, nr);
6622 6660
6623 return err; 6661 return err;
6624} 6662}
@@ -6627,15 +6665,14 @@ static int btrfs_truncate(struct inode *inode)
6627 * create a new subvolume directory/inode (helper for the ioctl). 6665 * create a new subvolume directory/inode (helper for the ioctl).
6628 */ 6666 */
6629int btrfs_create_subvol_root(struct btrfs_trans_handle *trans, 6667int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
6630 struct btrfs_root *new_root, 6668 struct btrfs_root *new_root, u64 new_dirid)
6631 u64 new_dirid, u64 alloc_hint)
6632{ 6669{
6633 struct inode *inode; 6670 struct inode *inode;
6634 int err; 6671 int err;
6635 u64 index = 0; 6672 u64 index = 0;
6636 6673
6637 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid, 6674 inode = btrfs_new_inode(trans, new_root, NULL, "..", 2, new_dirid,
6638 new_dirid, alloc_hint, S_IFDIR | 0700, &index); 6675 new_dirid, S_IFDIR | 0700, &index);
6639 if (IS_ERR(inode)) 6676 if (IS_ERR(inode))
6640 return PTR_ERR(inode); 6677 return PTR_ERR(inode);
6641 inode->i_op = &btrfs_dir_inode_operations; 6678 inode->i_op = &btrfs_dir_inode_operations;
@@ -6748,21 +6785,6 @@ void btrfs_destroy_inode(struct inode *inode)
6748 spin_unlock(&root->fs_info->ordered_extent_lock); 6785 spin_unlock(&root->fs_info->ordered_extent_lock);
6749 } 6786 }
6750 6787
6751 if (root == root->fs_info->tree_root) {
6752 struct btrfs_block_group_cache *block_group;
6753
6754 block_group = btrfs_lookup_block_group(root->fs_info,
6755 BTRFS_I(inode)->block_group);
6756 if (block_group && block_group->inode == inode) {
6757 spin_lock(&block_group->lock);
6758 block_group->inode = NULL;
6759 spin_unlock(&block_group->lock);
6760 btrfs_put_block_group(block_group);
6761 } else if (block_group) {
6762 btrfs_put_block_group(block_group);
6763 }
6764 }
6765
6766 spin_lock(&root->orphan_lock); 6788 spin_lock(&root->orphan_lock);
6767 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6789 if (!list_empty(&BTRFS_I(inode)->i_orphan)) {
6768 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", 6790 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
@@ -6948,8 +6970,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
6948 goto out_notrans; 6970 goto out_notrans;
6949 } 6971 }
6950 6972
6951 btrfs_set_trans_block_group(trans, new_dir);
6952
6953 if (dest != root) 6973 if (dest != root)
6954 btrfs_record_root_in_trans(trans, dest); 6974 btrfs_record_root_in_trans(trans, dest);
6955 6975
@@ -7131,16 +7151,13 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7131 if (IS_ERR(trans)) 7151 if (IS_ERR(trans))
7132 return PTR_ERR(trans); 7152 return PTR_ERR(trans);
7133 7153
7134 btrfs_set_trans_block_group(trans, dir);
7135
7136 err = btrfs_find_free_ino(root, &objectid); 7154 err = btrfs_find_free_ino(root, &objectid);
7137 if (err) 7155 if (err)
7138 goto out_unlock; 7156 goto out_unlock;
7139 7157
7140 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, 7158 inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name,
7141 dentry->d_name.len, btrfs_ino(dir), objectid, 7159 dentry->d_name.len, btrfs_ino(dir), objectid,
7142 BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, 7160 S_IFLNK|S_IRWXUGO, &index);
7143 &index);
7144 if (IS_ERR(inode)) { 7161 if (IS_ERR(inode)) {
7145 err = PTR_ERR(inode); 7162 err = PTR_ERR(inode);
7146 goto out_unlock; 7163 goto out_unlock;
@@ -7152,7 +7169,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7152 goto out_unlock; 7169 goto out_unlock;
7153 } 7170 }
7154 7171
7155 btrfs_set_trans_block_group(trans, inode);
7156 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index); 7172 err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
7157 if (err) 7173 if (err)
7158 drop_inode = 1; 7174 drop_inode = 1;
@@ -7163,8 +7179,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
7163 inode->i_op = &btrfs_file_inode_operations; 7179 inode->i_op = &btrfs_file_inode_operations;
7164 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops; 7180 BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
7165 } 7181 }
7166 btrfs_update_inode_block_group(trans, inode);
7167 btrfs_update_inode_block_group(trans, dir);
7168 if (drop_inode) 7182 if (drop_inode)
7169 goto out_unlock; 7183 goto out_unlock;
7170 7184
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 85e818ce00c5..a3c4751e07db 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -243,7 +243,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
243 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS); 243 ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
244 } 244 }
245 245
246 trans = btrfs_join_transaction(root, 1); 246 trans = btrfs_join_transaction(root);
247 BUG_ON(IS_ERR(trans)); 247 BUG_ON(IS_ERR(trans));
248 248
249 ret = btrfs_update_inode(trans, root, inode); 249 ret = btrfs_update_inode(trans, root, inode);
@@ -414,8 +414,7 @@ static noinline int create_subvol(struct btrfs_root *root,
414 414
415 btrfs_record_root_in_trans(trans, new_root); 415 btrfs_record_root_in_trans(trans, new_root);
416 416
417 ret = btrfs_create_subvol_root(trans, new_root, new_dirid, 417 ret = btrfs_create_subvol_root(trans, new_root, new_dirid);
418 BTRFS_I(dir)->block_group);
419 /* 418 /*
420 * insert the directory item 419 * insert the directory item
421 */ 420 */
@@ -483,8 +482,10 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
483 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot); 482 ret = btrfs_snap_reserve_metadata(trans, pending_snapshot);
484 BUG_ON(ret); 483 BUG_ON(ret);
485 484
485 spin_lock(&root->fs_info->trans_lock);
486 list_add(&pending_snapshot->list, 486 list_add(&pending_snapshot->list,
487 &trans->transaction->pending_snapshots); 487 &trans->transaction->pending_snapshots);
488 spin_unlock(&root->fs_info->trans_lock);
488 if (async_transid) { 489 if (async_transid) {
489 *async_transid = trans->transid; 490 *async_transid = trans->transid;
490 ret = btrfs_commit_transaction_async(trans, 491 ret = btrfs_commit_transaction_async(trans,
@@ -707,16 +708,17 @@ static int find_new_extents(struct btrfs_root *root,
707 struct btrfs_file_extent_item *extent; 708 struct btrfs_file_extent_item *extent;
708 int type; 709 int type;
709 int ret; 710 int ret;
711 u64 ino = btrfs_ino(inode);
710 712
711 path = btrfs_alloc_path(); 713 path = btrfs_alloc_path();
712 if (!path) 714 if (!path)
713 return -ENOMEM; 715 return -ENOMEM;
714 716
715 min_key.objectid = inode->i_ino; 717 min_key.objectid = ino;
716 min_key.type = BTRFS_EXTENT_DATA_KEY; 718 min_key.type = BTRFS_EXTENT_DATA_KEY;
717 min_key.offset = *off; 719 min_key.offset = *off;
718 720
719 max_key.objectid = inode->i_ino; 721 max_key.objectid = ino;
720 max_key.type = (u8)-1; 722 max_key.type = (u8)-1;
721 max_key.offset = (u64)-1; 723 max_key.offset = (u64)-1;
722 724
@@ -727,7 +729,7 @@ static int find_new_extents(struct btrfs_root *root,
727 path, 0, newer_than); 729 path, 0, newer_than);
728 if (ret != 0) 730 if (ret != 0)
729 goto none; 731 goto none;
730 if (min_key.objectid != inode->i_ino) 732 if (min_key.objectid != ino)
731 goto none; 733 goto none;
732 if (min_key.type != BTRFS_EXTENT_DATA_KEY) 734 if (min_key.type != BTRFS_EXTENT_DATA_KEY)
733 goto none; 735 goto none;
@@ -2054,29 +2056,34 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
2054 2056
2055static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) 2057static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg)
2056{ 2058{
2057 struct btrfs_ioctl_fs_info_args fi_args; 2059 struct btrfs_ioctl_fs_info_args *fi_args;
2058 struct btrfs_device *device; 2060 struct btrfs_device *device;
2059 struct btrfs_device *next; 2061 struct btrfs_device *next;
2060 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; 2062 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
2063 int ret = 0;
2061 2064
2062 if (!capable(CAP_SYS_ADMIN)) 2065 if (!capable(CAP_SYS_ADMIN))
2063 return -EPERM; 2066 return -EPERM;
2064 2067
2065 fi_args.num_devices = fs_devices->num_devices; 2068 fi_args = kzalloc(sizeof(*fi_args), GFP_KERNEL);
2066 fi_args.max_id = 0; 2069 if (!fi_args)
2067 memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); 2070 return -ENOMEM;
2071
2072 fi_args->num_devices = fs_devices->num_devices;
2073 memcpy(&fi_args->fsid, root->fs_info->fsid, sizeof(fi_args->fsid));
2068 2074
2069 mutex_lock(&fs_devices->device_list_mutex); 2075 mutex_lock(&fs_devices->device_list_mutex);
2070 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { 2076 list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
2071 if (device->devid > fi_args.max_id) 2077 if (device->devid > fi_args->max_id)
2072 fi_args.max_id = device->devid; 2078 fi_args->max_id = device->devid;
2073 } 2079 }
2074 mutex_unlock(&fs_devices->device_list_mutex); 2080 mutex_unlock(&fs_devices->device_list_mutex);
2075 2081
2076 if (copy_to_user(arg, &fi_args, sizeof(fi_args))) 2082 if (copy_to_user(arg, fi_args, sizeof(*fi_args)))
2077 return -EFAULT; 2083 ret = -EFAULT;
2078 2084
2079 return 0; 2085 kfree(fi_args);
2086 return ret;
2080} 2087}
2081 2088
2082static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) 2089static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
@@ -2489,12 +2496,10 @@ static long btrfs_ioctl_trans_start(struct file *file)
2489 if (ret) 2496 if (ret)
2490 goto out; 2497 goto out;
2491 2498
2492 mutex_lock(&root->fs_info->trans_mutex); 2499 atomic_inc(&root->fs_info->open_ioctl_trans);
2493 root->fs_info->open_ioctl_trans++;
2494 mutex_unlock(&root->fs_info->trans_mutex);
2495 2500
2496 ret = -ENOMEM; 2501 ret = -ENOMEM;
2497 trans = btrfs_start_ioctl_transaction(root, 0); 2502 trans = btrfs_start_ioctl_transaction(root);
2498 if (IS_ERR(trans)) 2503 if (IS_ERR(trans))
2499 goto out_drop; 2504 goto out_drop;
2500 2505
@@ -2502,9 +2507,7 @@ static long btrfs_ioctl_trans_start(struct file *file)
2502 return 0; 2507 return 0;
2503 2508
2504out_drop: 2509out_drop:
2505 mutex_lock(&root->fs_info->trans_mutex); 2510 atomic_dec(&root->fs_info->open_ioctl_trans);
2506 root->fs_info->open_ioctl_trans--;
2507 mutex_unlock(&root->fs_info->trans_mutex);
2508 mnt_drop_write(file->f_path.mnt); 2511 mnt_drop_write(file->f_path.mnt);
2509out: 2512out:
2510 return ret; 2513 return ret;
@@ -2738,9 +2741,7 @@ long btrfs_ioctl_trans_end(struct file *file)
2738 2741
2739 btrfs_end_transaction(trans, root); 2742 btrfs_end_transaction(trans, root);
2740 2743
2741 mutex_lock(&root->fs_info->trans_mutex); 2744 atomic_dec(&root->fs_info->open_ioctl_trans);
2742 root->fs_info->open_ioctl_trans--;
2743 mutex_unlock(&root->fs_info->trans_mutex);
2744 2745
2745 mnt_drop_write(file->f_path.mnt); 2746 mnt_drop_write(file->f_path.mnt);
2746 return 0; 2747 return 0;
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ca38eca70af0..5e0a3dc79a45 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -677,6 +677,8 @@ struct backref_node *build_backref_tree(struct reloc_control *rc,
677 err = -ENOMEM; 677 err = -ENOMEM;
678 goto out; 678 goto out;
679 } 679 }
680 path1->reada = 1;
681 path2->reada = 2;
680 682
681 node = alloc_backref_node(cache); 683 node = alloc_backref_node(cache);
682 if (!node) { 684 if (!node) {
@@ -1366,7 +1368,7 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1366 int ret; 1368 int ret;
1367 1369
1368 if (!root->reloc_root) 1370 if (!root->reloc_root)
1369 return 0; 1371 goto out;
1370 1372
1371 reloc_root = root->reloc_root; 1373 reloc_root = root->reloc_root;
1372 root_item = &reloc_root->root_item; 1374 root_item = &reloc_root->root_item;
@@ -1388,6 +1390,8 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
1388 ret = btrfs_update_root(trans, root->fs_info->tree_root, 1390 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1389 &reloc_root->root_key, root_item); 1391 &reloc_root->root_key, root_item);
1390 BUG_ON(ret); 1392 BUG_ON(ret);
1393
1394out:
1391 return 0; 1395 return 0;
1392} 1396}
1393 1397
@@ -1999,6 +2003,7 @@ static noinline_for_stack int merge_reloc_root(struct reloc_control *rc,
1999 path = btrfs_alloc_path(); 2003 path = btrfs_alloc_path();
2000 if (!path) 2004 if (!path)
2001 return -ENOMEM; 2005 return -ENOMEM;
2006 path->reada = 1;
2002 2007
2003 reloc_root = root->reloc_root; 2008 reloc_root = root->reloc_root;
2004 root_item = &reloc_root->root_item; 2009 root_item = &reloc_root->root_item;
@@ -2139,10 +2144,11 @@ int prepare_to_merge(struct reloc_control *rc, int err)
2139 u64 num_bytes = 0; 2144 u64 num_bytes = 0;
2140 int ret; 2145 int ret;
2141 2146
2142 mutex_lock(&root->fs_info->trans_mutex); 2147 mutex_lock(&root->fs_info->reloc_mutex);
2143 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2; 2148 rc->merging_rsv_size += root->nodesize * (BTRFS_MAX_LEVEL - 1) * 2;
2144 rc->merging_rsv_size += rc->nodes_relocated * 2; 2149 rc->merging_rsv_size += rc->nodes_relocated * 2;
2145 mutex_unlock(&root->fs_info->trans_mutex); 2150 mutex_unlock(&root->fs_info->reloc_mutex);
2151
2146again: 2152again:
2147 if (!err) { 2153 if (!err) {
2148 num_bytes = rc->merging_rsv_size; 2154 num_bytes = rc->merging_rsv_size;
@@ -2152,7 +2158,7 @@ again:
2152 err = ret; 2158 err = ret;
2153 } 2159 }
2154 2160
2155 trans = btrfs_join_transaction(rc->extent_root, 1); 2161 trans = btrfs_join_transaction(rc->extent_root);
2156 if (IS_ERR(trans)) { 2162 if (IS_ERR(trans)) {
2157 if (!err) 2163 if (!err)
2158 btrfs_block_rsv_release(rc->extent_root, 2164 btrfs_block_rsv_release(rc->extent_root,
@@ -2211,9 +2217,16 @@ int merge_reloc_roots(struct reloc_control *rc)
2211 int ret; 2217 int ret;
2212again: 2218again:
2213 root = rc->extent_root; 2219 root = rc->extent_root;
2214 mutex_lock(&root->fs_info->trans_mutex); 2220
2221 /*
2222 * this serializes us with btrfs_record_root_in_transaction,
2223 * we have to make sure nobody is in the middle of
2224 * adding their roots to the list while we are
2225 * doing this splice
2226 */
2227 mutex_lock(&root->fs_info->reloc_mutex);
2215 list_splice_init(&rc->reloc_roots, &reloc_roots); 2228 list_splice_init(&rc->reloc_roots, &reloc_roots);
2216 mutex_unlock(&root->fs_info->trans_mutex); 2229 mutex_unlock(&root->fs_info->reloc_mutex);
2217 2230
2218 while (!list_empty(&reloc_roots)) { 2231 while (!list_empty(&reloc_roots)) {
2219 found = 1; 2232 found = 1;
@@ -3236,7 +3249,7 @@ truncate:
3236 goto out; 3249 goto out;
3237 } 3250 }
3238 3251
3239 trans = btrfs_join_transaction(root, 0); 3252 trans = btrfs_join_transaction(root);
3240 if (IS_ERR(trans)) { 3253 if (IS_ERR(trans)) {
3241 btrfs_free_path(path); 3254 btrfs_free_path(path);
3242 ret = PTR_ERR(trans); 3255 ret = PTR_ERR(trans);
@@ -3300,6 +3313,7 @@ static int find_data_references(struct reloc_control *rc,
3300 path = btrfs_alloc_path(); 3313 path = btrfs_alloc_path();
3301 if (!path) 3314 if (!path)
3302 return -ENOMEM; 3315 return -ENOMEM;
3316 path->reada = 1;
3303 3317
3304 root = read_fs_root(rc->extent_root->fs_info, ref_root); 3318 root = read_fs_root(rc->extent_root->fs_info, ref_root);
3305 if (IS_ERR(root)) { 3319 if (IS_ERR(root)) {
@@ -3586,17 +3600,19 @@ next:
3586static void set_reloc_control(struct reloc_control *rc) 3600static void set_reloc_control(struct reloc_control *rc)
3587{ 3601{
3588 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3602 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3589 mutex_lock(&fs_info->trans_mutex); 3603
3604 mutex_lock(&fs_info->reloc_mutex);
3590 fs_info->reloc_ctl = rc; 3605 fs_info->reloc_ctl = rc;
3591 mutex_unlock(&fs_info->trans_mutex); 3606 mutex_unlock(&fs_info->reloc_mutex);
3592} 3607}
3593 3608
3594static void unset_reloc_control(struct reloc_control *rc) 3609static void unset_reloc_control(struct reloc_control *rc)
3595{ 3610{
3596 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info; 3611 struct btrfs_fs_info *fs_info = rc->extent_root->fs_info;
3597 mutex_lock(&fs_info->trans_mutex); 3612
3613 mutex_lock(&fs_info->reloc_mutex);
3598 fs_info->reloc_ctl = NULL; 3614 fs_info->reloc_ctl = NULL;
3599 mutex_unlock(&fs_info->trans_mutex); 3615 mutex_unlock(&fs_info->reloc_mutex);
3600} 3616}
3601 3617
3602static int check_extent_flags(u64 flags) 3618static int check_extent_flags(u64 flags)
@@ -3645,7 +3661,7 @@ int prepare_to_relocate(struct reloc_control *rc)
3645 rc->create_reloc_tree = 1; 3661 rc->create_reloc_tree = 1;
3646 set_reloc_control(rc); 3662 set_reloc_control(rc);
3647 3663
3648 trans = btrfs_join_transaction(rc->extent_root, 1); 3664 trans = btrfs_join_transaction(rc->extent_root);
3649 BUG_ON(IS_ERR(trans)); 3665 BUG_ON(IS_ERR(trans));
3650 btrfs_commit_transaction(trans, rc->extent_root); 3666 btrfs_commit_transaction(trans, rc->extent_root);
3651 return 0; 3667 return 0;
@@ -3668,6 +3684,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3668 path = btrfs_alloc_path(); 3684 path = btrfs_alloc_path();
3669 if (!path) 3685 if (!path)
3670 return -ENOMEM; 3686 return -ENOMEM;
3687 path->reada = 1;
3671 3688
3672 ret = prepare_to_relocate(rc); 3689 ret = prepare_to_relocate(rc);
3673 if (ret) { 3690 if (ret) {
@@ -3834,7 +3851,7 @@ restart:
3834 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1); 3851 btrfs_block_rsv_release(rc->extent_root, rc->block_rsv, (u64)-1);
3835 3852
3836 /* get rid of pinned extents */ 3853 /* get rid of pinned extents */
3837 trans = btrfs_join_transaction(rc->extent_root, 1); 3854 trans = btrfs_join_transaction(rc->extent_root);
3838 if (IS_ERR(trans)) 3855 if (IS_ERR(trans))
3839 err = PTR_ERR(trans); 3856 err = PTR_ERR(trans);
3840 else 3857 else
@@ -4093,6 +4110,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4093 path = btrfs_alloc_path(); 4110 path = btrfs_alloc_path();
4094 if (!path) 4111 if (!path)
4095 return -ENOMEM; 4112 return -ENOMEM;
4113 path->reada = -1;
4096 4114
4097 key.objectid = BTRFS_TREE_RELOC_OBJECTID; 4115 key.objectid = BTRFS_TREE_RELOC_OBJECTID;
4098 key.type = BTRFS_ROOT_ITEM_KEY; 4116 key.type = BTRFS_ROOT_ITEM_KEY;
@@ -4159,7 +4177,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4159 4177
4160 set_reloc_control(rc); 4178 set_reloc_control(rc);
4161 4179
4162 trans = btrfs_join_transaction(rc->extent_root, 1); 4180 trans = btrfs_join_transaction(rc->extent_root);
4163 if (IS_ERR(trans)) { 4181 if (IS_ERR(trans)) {
4164 unset_reloc_control(rc); 4182 unset_reloc_control(rc);
4165 err = PTR_ERR(trans); 4183 err = PTR_ERR(trans);
@@ -4193,7 +4211,7 @@ int btrfs_recover_relocation(struct btrfs_root *root)
4193 4211
4194 unset_reloc_control(rc); 4212 unset_reloc_control(rc);
4195 4213
4196 trans = btrfs_join_transaction(rc->extent_root, 1); 4214 trans = btrfs_join_transaction(rc->extent_root);
4197 if (IS_ERR(trans)) 4215 if (IS_ERR(trans))
4198 err = PTR_ERR(trans); 4216 err = PTR_ERR(trans);
4199 else 4217 else
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 6dfed0c27ac3..a8d03d5efb5d 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -16,13 +16,7 @@
16 * Boston, MA 021110-1307, USA. 16 * Boston, MA 021110-1307, USA.
17 */ 17 */
18 18
19#include <linux/sched.h>
20#include <linux/pagemap.h>
21#include <linux/writeback.h>
22#include <linux/blkdev.h> 19#include <linux/blkdev.h>
23#include <linux/rbtree.h>
24#include <linux/slab.h>
25#include <linux/workqueue.h>
26#include "ctree.h" 20#include "ctree.h"
27#include "volumes.h" 21#include "volumes.h"
28#include "disk-io.h" 22#include "disk-io.h"
@@ -117,33 +111,37 @@ static void scrub_free_csums(struct scrub_dev *sdev)
117 } 111 }
118} 112}
119 113
114static void scrub_free_bio(struct bio *bio)
115{
116 int i;
117 struct page *last_page = NULL;
118
119 if (!bio)
120 return;
121
122 for (i = 0; i < bio->bi_vcnt; ++i) {
123 if (bio->bi_io_vec[i].bv_page == last_page)
124 continue;
125 last_page = bio->bi_io_vec[i].bv_page;
126 __free_page(last_page);
127 }
128 bio_put(bio);
129}
130
120static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) 131static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev)
121{ 132{
122 int i; 133 int i;
123 int j;
124 struct page *last_page;
125 134
126 if (!sdev) 135 if (!sdev)
127 return; 136 return;
128 137
129 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 138 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
130 struct scrub_bio *sbio = sdev->bios[i]; 139 struct scrub_bio *sbio = sdev->bios[i];
131 struct bio *bio;
132 140
133 if (!sbio) 141 if (!sbio)
134 break; 142 break;
135 143
136 bio = sbio->bio; 144 scrub_free_bio(sbio->bio);
137 if (bio) {
138 last_page = NULL;
139 for (j = 0; j < bio->bi_vcnt; ++j) {
140 if (bio->bi_io_vec[j].bv_page == last_page)
141 continue;
142 last_page = bio->bi_io_vec[j].bv_page;
143 __free_page(last_page);
144 }
145 bio_put(bio);
146 }
147 kfree(sbio); 145 kfree(sbio);
148 } 146 }
149 147
@@ -156,8 +154,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
156{ 154{
157 struct scrub_dev *sdev; 155 struct scrub_dev *sdev;
158 int i; 156 int i;
159 int j;
160 int ret;
161 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; 157 struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
162 158
163 sdev = kzalloc(sizeof(*sdev), GFP_NOFS); 159 sdev = kzalloc(sizeof(*sdev), GFP_NOFS);
@@ -165,7 +161,6 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
165 goto nomem; 161 goto nomem;
166 sdev->dev = dev; 162 sdev->dev = dev;
167 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { 163 for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) {
168 struct bio *bio;
169 struct scrub_bio *sbio; 164 struct scrub_bio *sbio;
170 165
171 sbio = kzalloc(sizeof(*sbio), GFP_NOFS); 166 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
@@ -173,32 +168,10 @@ struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev)
173 goto nomem; 168 goto nomem;
174 sdev->bios[i] = sbio; 169 sdev->bios[i] = sbio;
175 170
176 bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO);
177 if (!bio)
178 goto nomem;
179
180 sbio->index = i; 171 sbio->index = i;
181 sbio->sdev = sdev; 172 sbio->sdev = sdev;
182 sbio->bio = bio;
183 sbio->count = 0; 173 sbio->count = 0;
184 sbio->work.func = scrub_checksum; 174 sbio->work.func = scrub_checksum;
185 bio->bi_private = sdev->bios[i];
186 bio->bi_end_io = scrub_bio_end_io;
187 bio->bi_sector = 0;
188 bio->bi_bdev = dev->bdev;
189 bio->bi_size = 0;
190
191 for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) {
192 struct page *page;
193 page = alloc_page(GFP_NOFS);
194 if (!page)
195 goto nomem;
196
197 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
198 if (!ret)
199 goto nomem;
200 }
201 WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO);
202 175
203 if (i != SCRUB_BIOS_PER_DEV-1) 176 if (i != SCRUB_BIOS_PER_DEV-1)
204 sdev->bios[i]->next_free = i + 1; 177 sdev->bios[i]->next_free = i + 1;
@@ -369,9 +342,6 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
369 int ret; 342 int ret;
370 DECLARE_COMPLETION_ONSTACK(complete); 343 DECLARE_COMPLETION_ONSTACK(complete);
371 344
372 /* we are going to wait on this IO */
373 rw |= REQ_SYNC;
374
375 bio = bio_alloc(GFP_NOFS, 1); 345 bio = bio_alloc(GFP_NOFS, 1);
376 bio->bi_bdev = bdev; 346 bio->bi_bdev = bdev;
377 bio->bi_sector = sector; 347 bio->bi_sector = sector;
@@ -380,6 +350,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
380 bio->bi_private = &complete; 350 bio->bi_private = &complete;
381 submit_bio(rw, bio); 351 submit_bio(rw, bio);
382 352
353 /* this will also unplug the queue */
383 wait_for_completion(&complete); 354 wait_for_completion(&complete);
384 355
385 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); 356 ret = !test_bit(BIO_UPTODATE, &bio->bi_flags);
@@ -394,6 +365,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
394 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; 365 struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info;
395 366
396 sbio->err = err; 367 sbio->err = err;
368 sbio->bio = bio;
397 369
398 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); 370 btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work);
399} 371}
@@ -453,6 +425,8 @@ static void scrub_checksum(struct btrfs_work *work)
453 } 425 }
454 426
455out: 427out:
428 scrub_free_bio(sbio->bio);
429 sbio->bio = NULL;
456 spin_lock(&sdev->list_lock); 430 spin_lock(&sdev->list_lock);
457 sbio->next_free = sdev->first_free; 431 sbio->next_free = sdev->first_free;
458 sdev->first_free = sbio->index; 432 sdev->first_free = sbio->index;
@@ -583,25 +557,50 @@ static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer)
583static int scrub_submit(struct scrub_dev *sdev) 557static int scrub_submit(struct scrub_dev *sdev)
584{ 558{
585 struct scrub_bio *sbio; 559 struct scrub_bio *sbio;
560 struct bio *bio;
561 int i;
586 562
587 if (sdev->curr == -1) 563 if (sdev->curr == -1)
588 return 0; 564 return 0;
589 565
590 sbio = sdev->bios[sdev->curr]; 566 sbio = sdev->bios[sdev->curr];
591 567
592 sbio->bio->bi_sector = sbio->physical >> 9; 568 bio = bio_alloc(GFP_NOFS, sbio->count);
593 sbio->bio->bi_size = sbio->count * PAGE_SIZE; 569 if (!bio)
594 sbio->bio->bi_next = NULL; 570 goto nomem;
595 sbio->bio->bi_flags |= 1 << BIO_UPTODATE; 571
596 sbio->bio->bi_comp_cpu = -1; 572 bio->bi_private = sbio;
597 sbio->bio->bi_bdev = sdev->dev->bdev; 573 bio->bi_end_io = scrub_bio_end_io;
574 bio->bi_bdev = sdev->dev->bdev;
575 bio->bi_sector = sbio->physical >> 9;
576
577 for (i = 0; i < sbio->count; ++i) {
578 struct page *page;
579 int ret;
580
581 page = alloc_page(GFP_NOFS);
582 if (!page)
583 goto nomem;
584
585 ret = bio_add_page(bio, page, PAGE_SIZE, 0);
586 if (!ret) {
587 __free_page(page);
588 goto nomem;
589 }
590 }
591
598 sbio->err = 0; 592 sbio->err = 0;
599 sdev->curr = -1; 593 sdev->curr = -1;
600 atomic_inc(&sdev->in_flight); 594 atomic_inc(&sdev->in_flight);
601 595
602 submit_bio(0, sbio->bio); 596 submit_bio(READ, bio);
603 597
604 return 0; 598 return 0;
599
600nomem:
601 scrub_free_bio(bio);
602
603 return -ENOMEM;
605} 604}
606 605
607static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, 606static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len,
@@ -633,7 +632,11 @@ again:
633 sbio->logical = logical; 632 sbio->logical = logical;
634 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || 633 } else if (sbio->physical + sbio->count * PAGE_SIZE != physical ||
635 sbio->logical + sbio->count * PAGE_SIZE != logical) { 634 sbio->logical + sbio->count * PAGE_SIZE != logical) {
636 scrub_submit(sdev); 635 int ret;
636
637 ret = scrub_submit(sdev);
638 if (ret)
639 return ret;
637 goto again; 640 goto again;
638 } 641 }
639 sbio->spag[sbio->count].flags = flags; 642 sbio->spag[sbio->count].flags = flags;
@@ -645,8 +648,13 @@ again:
645 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); 648 memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size);
646 } 649 }
647 ++sbio->count; 650 ++sbio->count;
648 if (sbio->count == SCRUB_PAGES_PER_BIO || force) 651 if (sbio->count == SCRUB_PAGES_PER_BIO || force) {
649 scrub_submit(sdev); 652 int ret;
653
654 ret = scrub_submit(sdev);
655 if (ret)
656 return ret;
657 }
650 658
651 return 0; 659 return 0;
652} 660}
@@ -727,6 +735,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
727 struct btrfs_root *root = fs_info->extent_root; 735 struct btrfs_root *root = fs_info->extent_root;
728 struct btrfs_root *csum_root = fs_info->csum_root; 736 struct btrfs_root *csum_root = fs_info->csum_root;
729 struct btrfs_extent_item *extent; 737 struct btrfs_extent_item *extent;
738 struct blk_plug plug;
730 u64 flags; 739 u64 flags;
731 int ret; 740 int ret;
732 int slot; 741 int slot;
@@ -789,18 +798,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
789 798
790 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 799 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
791 if (ret < 0) 800 if (ret < 0)
792 goto out; 801 goto out_noplug;
793
794 l = path->nodes[0];
795 slot = path->slots[0];
796 btrfs_item_key_to_cpu(l, &key, slot);
797 if (key.objectid != logical) {
798 ret = btrfs_previous_item(root, path, 0,
799 BTRFS_EXTENT_ITEM_KEY);
800 if (ret < 0)
801 goto out;
802 }
803 802
803 /*
804 * we might miss half an extent here, but that doesn't matter,
805 * as it's only the prefetch
806 */
804 while (1) { 807 while (1) {
805 l = path->nodes[0]; 808 l = path->nodes[0];
806 slot = path->slots[0]; 809 slot = path->slots[0];
@@ -809,7 +812,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
809 if (ret == 0) 812 if (ret == 0)
810 continue; 813 continue;
811 if (ret < 0) 814 if (ret < 0)
812 goto out; 815 goto out_noplug;
813 816
814 break; 817 break;
815 } 818 }
@@ -831,6 +834,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev,
831 * the scrub. This might currently (crc32) end up to be about 1MB 834 * the scrub. This might currently (crc32) end up to be about 1MB
832 */ 835 */
833 start_stripe = 0; 836 start_stripe = 0;
837 blk_start_plug(&plug);
834again: 838again:
835 logical = base + offset + start_stripe * increment; 839 logical = base + offset + start_stripe * increment;
836 for (i = start_stripe; i < nstripes; ++i) { 840 for (i = start_stripe; i < nstripes; ++i) {
@@ -890,15 +894,20 @@ again:
890 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 894 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
891 if (ret < 0) 895 if (ret < 0)
892 goto out; 896 goto out;
893 897 if (ret > 0) {
894 l = path->nodes[0];
895 slot = path->slots[0];
896 btrfs_item_key_to_cpu(l, &key, slot);
897 if (key.objectid != logical) {
898 ret = btrfs_previous_item(root, path, 0, 898 ret = btrfs_previous_item(root, path, 0,
899 BTRFS_EXTENT_ITEM_KEY); 899 BTRFS_EXTENT_ITEM_KEY);
900 if (ret < 0) 900 if (ret < 0)
901 goto out; 901 goto out;
902 if (ret > 0) {
903 /* there's no smaller item, so stick with the
904 * larger one */
905 btrfs_release_path(path);
906 ret = btrfs_search_slot(NULL, root, &key,
907 path, 0, 0);
908 if (ret < 0)
909 goto out;
910 }
902 } 911 }
903 912
904 while (1) { 913 while (1) {
@@ -972,6 +981,8 @@ next:
972 scrub_submit(sdev); 981 scrub_submit(sdev);
973 982
974out: 983out:
984 blk_finish_plug(&plug);
985out_noplug:
975 btrfs_free_path(path); 986 btrfs_free_path(path);
976 return ret < 0 ? ret : 0; 987 return ret < 0 ? ret : 0;
977} 988}
@@ -1047,8 +1058,15 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1047 while (1) { 1058 while (1) {
1048 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1059 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1049 if (ret < 0) 1060 if (ret < 0)
1050 goto out; 1061 break;
1051 ret = 0; 1062 if (ret > 0) {
1063 if (path->slots[0] >=
1064 btrfs_header_nritems(path->nodes[0])) {
1065 ret = btrfs_next_leaf(root, path);
1066 if (ret)
1067 break;
1068 }
1069 }
1052 1070
1053 l = path->nodes[0]; 1071 l = path->nodes[0];
1054 slot = path->slots[0]; 1072 slot = path->slots[0];
@@ -1058,7 +1076,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1058 if (found_key.objectid != sdev->dev->devid) 1076 if (found_key.objectid != sdev->dev->devid)
1059 break; 1077 break;
1060 1078
1061 if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) 1079 if (btrfs_key_type(&found_key) != BTRFS_DEV_EXTENT_KEY)
1062 break; 1080 break;
1063 1081
1064 if (found_key.offset >= end) 1082 if (found_key.offset >= end)
@@ -1087,7 +1105,7 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1087 cache = btrfs_lookup_block_group(fs_info, chunk_offset); 1105 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
1088 if (!cache) { 1106 if (!cache) {
1089 ret = -ENOENT; 1107 ret = -ENOENT;
1090 goto out; 1108 break;
1091 } 1109 }
1092 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, 1110 ret = scrub_chunk(sdev, chunk_tree, chunk_objectid,
1093 chunk_offset, length); 1111 chunk_offset, length);
@@ -1099,9 +1117,13 @@ int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end)
1099 btrfs_release_path(path); 1117 btrfs_release_path(path);
1100 } 1118 }
1101 1119
1102out:
1103 btrfs_free_path(path); 1120 btrfs_free_path(path);
1104 return ret; 1121
1122 /*
1123 * ret can still be 1 from search_slot or next_leaf,
1124 * that's not an error
1125 */
1126 return ret < 0 ? ret : 0;
1105} 1127}
1106 1128
1107static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) 1129static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
@@ -1138,8 +1160,12 @@ static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
1138 struct btrfs_fs_info *fs_info = root->fs_info; 1160 struct btrfs_fs_info *fs_info = root->fs_info;
1139 1161
1140 mutex_lock(&fs_info->scrub_lock); 1162 mutex_lock(&fs_info->scrub_lock);
1141 if (fs_info->scrub_workers_refcnt == 0) 1163 if (fs_info->scrub_workers_refcnt == 0) {
1164 btrfs_init_workers(&fs_info->scrub_workers, "scrub",
1165 fs_info->thread_pool_size, &fs_info->generic_worker);
1166 fs_info->scrub_workers.idle_thresh = 4;
1142 btrfs_start_workers(&fs_info->scrub_workers, 1); 1167 btrfs_start_workers(&fs_info->scrub_workers, 1);
1168 }
1143 ++fs_info->scrub_workers_refcnt; 1169 ++fs_info->scrub_workers_refcnt;
1144 mutex_unlock(&fs_info->scrub_lock); 1170 mutex_unlock(&fs_info->scrub_lock);
1145 1171
@@ -1166,7 +1192,7 @@ int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end,
1166 int ret; 1192 int ret;
1167 struct btrfs_device *dev; 1193 struct btrfs_device *dev;
1168 1194
1169 if (root->fs_info->closing) 1195 if (btrfs_fs_closing(root->fs_info))
1170 return -EINVAL; 1196 return -EINVAL;
1171 1197
1172 /* 1198 /*
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 9b2e7e5bc3ef..0bb4ebbb71b7 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -161,7 +161,8 @@ enum {
161 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 161 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
162 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 162 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
163 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, 163 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
164 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, 164 Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
165 Opt_inode_cache, Opt_err,
165}; 166};
166 167
167static match_table_t tokens = { 168static match_table_t tokens = {
@@ -193,6 +194,7 @@ static match_table_t tokens = {
193 {Opt_enospc_debug, "enospc_debug"}, 194 {Opt_enospc_debug, "enospc_debug"},
194 {Opt_subvolrootid, "subvolrootid=%d"}, 195 {Opt_subvolrootid, "subvolrootid=%d"},
195 {Opt_defrag, "autodefrag"}, 196 {Opt_defrag, "autodefrag"},
197 {Opt_inode_cache, "inode_cache"},
196 {Opt_err, NULL}, 198 {Opt_err, NULL},
197}; 199};
198 200
@@ -361,6 +363,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
361 printk(KERN_INFO "btrfs: enabling disk space caching\n"); 363 printk(KERN_INFO "btrfs: enabling disk space caching\n");
362 btrfs_set_opt(info->mount_opt, SPACE_CACHE); 364 btrfs_set_opt(info->mount_opt, SPACE_CACHE);
363 break; 365 break;
366 case Opt_inode_cache:
367 printk(KERN_INFO "btrfs: enabling inode map caching\n");
368 btrfs_set_opt(info->mount_opt, INODE_MAP_CACHE);
369 break;
364 case Opt_clear_cache: 370 case Opt_clear_cache:
365 printk(KERN_INFO "btrfs: force clearing of disk cache\n"); 371 printk(KERN_INFO "btrfs: force clearing of disk cache\n");
366 btrfs_set_opt(info->mount_opt, CLEAR_CACHE); 372 btrfs_set_opt(info->mount_opt, CLEAR_CACHE);
@@ -819,7 +825,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
819 } else { 825 } else {
820 char b[BDEVNAME_SIZE]; 826 char b[BDEVNAME_SIZE];
821 827
822 s->s_flags = flags; 828 s->s_flags = flags | MS_NOSEC;
823 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 829 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
824 error = btrfs_fill_super(s, fs_devices, data, 830 error = btrfs_fill_super(s, fs_devices, data,
825 flags & MS_SILENT ? 1 : 0); 831 flags & MS_SILENT ? 1 : 0);
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c3c223ae6691..daac9ae6d731 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -28,152 +28,6 @@
28#include "disk-io.h" 28#include "disk-io.h"
29#include "transaction.h" 29#include "transaction.h"
30 30
31static ssize_t root_blocks_used_show(struct btrfs_root *root, char *buf)
32{
33 return snprintf(buf, PAGE_SIZE, "%llu\n",
34 (unsigned long long)btrfs_root_used(&root->root_item));
35}
36
37static ssize_t root_block_limit_show(struct btrfs_root *root, char *buf)
38{
39 return snprintf(buf, PAGE_SIZE, "%llu\n",
40 (unsigned long long)btrfs_root_limit(&root->root_item));
41}
42
43static ssize_t super_blocks_used_show(struct btrfs_fs_info *fs, char *buf)
44{
45
46 return snprintf(buf, PAGE_SIZE, "%llu\n",
47 (unsigned long long)btrfs_super_bytes_used(&fs->super_copy));
48}
49
50static ssize_t super_total_blocks_show(struct btrfs_fs_info *fs, char *buf)
51{
52 return snprintf(buf, PAGE_SIZE, "%llu\n",
53 (unsigned long long)btrfs_super_total_bytes(&fs->super_copy));
54}
55
56static ssize_t super_blocksize_show(struct btrfs_fs_info *fs, char *buf)
57{
58 return snprintf(buf, PAGE_SIZE, "%llu\n",
59 (unsigned long long)btrfs_super_sectorsize(&fs->super_copy));
60}
61
62/* this is for root attrs (subvols/snapshots) */
63struct btrfs_root_attr {
64 struct attribute attr;
65 ssize_t (*show)(struct btrfs_root *, char *);
66 ssize_t (*store)(struct btrfs_root *, const char *, size_t);
67};
68
69#define ROOT_ATTR(name, mode, show, store) \
70static struct btrfs_root_attr btrfs_root_attr_##name = __ATTR(name, mode, \
71 show, store)
72
73ROOT_ATTR(blocks_used, 0444, root_blocks_used_show, NULL);
74ROOT_ATTR(block_limit, 0644, root_block_limit_show, NULL);
75
76static struct attribute *btrfs_root_attrs[] = {
77 &btrfs_root_attr_blocks_used.attr,
78 &btrfs_root_attr_block_limit.attr,
79 NULL,
80};
81
82/* this is for super attrs (actual full fs) */
83struct btrfs_super_attr {
84 struct attribute attr;
85 ssize_t (*show)(struct btrfs_fs_info *, char *);
86 ssize_t (*store)(struct btrfs_fs_info *, const char *, size_t);
87};
88
89#define SUPER_ATTR(name, mode, show, store) \
90static struct btrfs_super_attr btrfs_super_attr_##name = __ATTR(name, mode, \
91 show, store)
92
93SUPER_ATTR(blocks_used, 0444, super_blocks_used_show, NULL);
94SUPER_ATTR(total_blocks, 0444, super_total_blocks_show, NULL);
95SUPER_ATTR(blocksize, 0444, super_blocksize_show, NULL);
96
97static struct attribute *btrfs_super_attrs[] = {
98 &btrfs_super_attr_blocks_used.attr,
99 &btrfs_super_attr_total_blocks.attr,
100 &btrfs_super_attr_blocksize.attr,
101 NULL,
102};
103
104static ssize_t btrfs_super_attr_show(struct kobject *kobj,
105 struct attribute *attr, char *buf)
106{
107 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
108 super_kobj);
109 struct btrfs_super_attr *a = container_of(attr,
110 struct btrfs_super_attr,
111 attr);
112
113 return a->show ? a->show(fs, buf) : 0;
114}
115
116static ssize_t btrfs_super_attr_store(struct kobject *kobj,
117 struct attribute *attr,
118 const char *buf, size_t len)
119{
120 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
121 super_kobj);
122 struct btrfs_super_attr *a = container_of(attr,
123 struct btrfs_super_attr,
124 attr);
125
126 return a->store ? a->store(fs, buf, len) : 0;
127}
128
129static ssize_t btrfs_root_attr_show(struct kobject *kobj,
130 struct attribute *attr, char *buf)
131{
132 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
133 root_kobj);
134 struct btrfs_root_attr *a = container_of(attr,
135 struct btrfs_root_attr,
136 attr);
137
138 return a->show ? a->show(root, buf) : 0;
139}
140
141static ssize_t btrfs_root_attr_store(struct kobject *kobj,
142 struct attribute *attr,
143 const char *buf, size_t len)
144{
145 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
146 root_kobj);
147 struct btrfs_root_attr *a = container_of(attr,
148 struct btrfs_root_attr,
149 attr);
150 return a->store ? a->store(root, buf, len) : 0;
151}
152
153static void btrfs_super_release(struct kobject *kobj)
154{
155 struct btrfs_fs_info *fs = container_of(kobj, struct btrfs_fs_info,
156 super_kobj);
157 complete(&fs->kobj_unregister);
158}
159
160static void btrfs_root_release(struct kobject *kobj)
161{
162 struct btrfs_root *root = container_of(kobj, struct btrfs_root,
163 root_kobj);
164 complete(&root->kobj_unregister);
165}
166
167static const struct sysfs_ops btrfs_super_attr_ops = {
168 .show = btrfs_super_attr_show,
169 .store = btrfs_super_attr_store,
170};
171
172static const struct sysfs_ops btrfs_root_attr_ops = {
173 .show = btrfs_root_attr_show,
174 .store = btrfs_root_attr_store,
175};
176
177/* /sys/fs/btrfs/ entry */ 31/* /sys/fs/btrfs/ entry */
178static struct kset *btrfs_kset; 32static struct kset *btrfs_kset;
179 33
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dc80f7156923..51dcec86757f 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -35,6 +35,7 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
35{ 35{
36 WARN_ON(atomic_read(&transaction->use_count) == 0); 36 WARN_ON(atomic_read(&transaction->use_count) == 0);
37 if (atomic_dec_and_test(&transaction->use_count)) { 37 if (atomic_dec_and_test(&transaction->use_count)) {
38 BUG_ON(!list_empty(&transaction->list));
38 memset(transaction, 0, sizeof(*transaction)); 39 memset(transaction, 0, sizeof(*transaction));
39 kmem_cache_free(btrfs_transaction_cachep, transaction); 40 kmem_cache_free(btrfs_transaction_cachep, transaction);
40 } 41 }
@@ -49,46 +50,72 @@ static noinline void switch_commit_root(struct btrfs_root *root)
49/* 50/*
50 * either allocate a new transaction or hop into the existing one 51 * either allocate a new transaction or hop into the existing one
51 */ 52 */
52static noinline int join_transaction(struct btrfs_root *root) 53static noinline int join_transaction(struct btrfs_root *root, int nofail)
53{ 54{
54 struct btrfs_transaction *cur_trans; 55 struct btrfs_transaction *cur_trans;
56
57 spin_lock(&root->fs_info->trans_lock);
58 if (root->fs_info->trans_no_join) {
59 if (!nofail) {
60 spin_unlock(&root->fs_info->trans_lock);
61 return -EBUSY;
62 }
63 }
64
55 cur_trans = root->fs_info->running_transaction; 65 cur_trans = root->fs_info->running_transaction;
56 if (!cur_trans) { 66 if (cur_trans) {
57 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, 67 atomic_inc(&cur_trans->use_count);
58 GFP_NOFS); 68 atomic_inc(&cur_trans->num_writers);
59 if (!cur_trans) 69 cur_trans->num_joined++;
60 return -ENOMEM; 70 spin_unlock(&root->fs_info->trans_lock);
61 root->fs_info->generation++; 71 return 0;
62 atomic_set(&cur_trans->num_writers, 1); 72 }
63 cur_trans->num_joined = 0; 73 spin_unlock(&root->fs_info->trans_lock);
64 cur_trans->transid = root->fs_info->generation; 74
65 init_waitqueue_head(&cur_trans->writer_wait); 75 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
66 init_waitqueue_head(&cur_trans->commit_wait); 76 if (!cur_trans)
67 cur_trans->in_commit = 0; 77 return -ENOMEM;
68 cur_trans->blocked = 0; 78 spin_lock(&root->fs_info->trans_lock);
69 atomic_set(&cur_trans->use_count, 1); 79 if (root->fs_info->running_transaction) {
70 cur_trans->commit_done = 0; 80 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
71 cur_trans->start_time = get_seconds(); 81 cur_trans = root->fs_info->running_transaction;
72 82 atomic_inc(&cur_trans->use_count);
73 cur_trans->delayed_refs.root = RB_ROOT;
74 cur_trans->delayed_refs.num_entries = 0;
75 cur_trans->delayed_refs.num_heads_ready = 0;
76 cur_trans->delayed_refs.num_heads = 0;
77 cur_trans->delayed_refs.flushing = 0;
78 cur_trans->delayed_refs.run_delayed_start = 0;
79 spin_lock_init(&cur_trans->delayed_refs.lock);
80
81 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
82 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
83 extent_io_tree_init(&cur_trans->dirty_pages,
84 root->fs_info->btree_inode->i_mapping);
85 spin_lock(&root->fs_info->new_trans_lock);
86 root->fs_info->running_transaction = cur_trans;
87 spin_unlock(&root->fs_info->new_trans_lock);
88 } else {
89 atomic_inc(&cur_trans->num_writers); 83 atomic_inc(&cur_trans->num_writers);
90 cur_trans->num_joined++; 84 cur_trans->num_joined++;
85 spin_unlock(&root->fs_info->trans_lock);
86 return 0;
91 } 87 }
88 atomic_set(&cur_trans->num_writers, 1);
89 cur_trans->num_joined = 0;
90 init_waitqueue_head(&cur_trans->writer_wait);
91 init_waitqueue_head(&cur_trans->commit_wait);
92 cur_trans->in_commit = 0;
93 cur_trans->blocked = 0;
94 /*
95 * One for this trans handle, one so it will live on until we
96 * commit the transaction.
97 */
98 atomic_set(&cur_trans->use_count, 2);
99 cur_trans->commit_done = 0;
100 cur_trans->start_time = get_seconds();
101
102 cur_trans->delayed_refs.root = RB_ROOT;
103 cur_trans->delayed_refs.num_entries = 0;
104 cur_trans->delayed_refs.num_heads_ready = 0;
105 cur_trans->delayed_refs.num_heads = 0;
106 cur_trans->delayed_refs.flushing = 0;
107 cur_trans->delayed_refs.run_delayed_start = 0;
108 spin_lock_init(&cur_trans->commit_lock);
109 spin_lock_init(&cur_trans->delayed_refs.lock);
110
111 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
112 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
113 extent_io_tree_init(&cur_trans->dirty_pages,
114 root->fs_info->btree_inode->i_mapping);
115 root->fs_info->generation++;
116 cur_trans->transid = root->fs_info->generation;
117 root->fs_info->running_transaction = cur_trans;
118 spin_unlock(&root->fs_info->trans_lock);
92 119
93 return 0; 120 return 0;
94} 121}
@@ -99,36 +126,82 @@ static noinline int join_transaction(struct btrfs_root *root)
99 * to make sure the old root from before we joined the transaction is deleted 126 * to make sure the old root from before we joined the transaction is deleted
100 * when the transaction commits 127 * when the transaction commits
101 */ 128 */
102static noinline int record_root_in_trans(struct btrfs_trans_handle *trans, 129static int record_root_in_trans(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root) 130 struct btrfs_root *root)
104{ 131{
105 if (root->ref_cows && root->last_trans < trans->transid) { 132 if (root->ref_cows && root->last_trans < trans->transid) {
106 WARN_ON(root == root->fs_info->extent_root); 133 WARN_ON(root == root->fs_info->extent_root);
107 WARN_ON(root->commit_root != root->node); 134 WARN_ON(root->commit_root != root->node);
108 135
136 /*
137 * see below for in_trans_setup usage rules
138 * we have the reloc mutex held now, so there
139 * is only one writer in this function
140 */
141 root->in_trans_setup = 1;
142
143 /* make sure readers find in_trans_setup before
144 * they find our root->last_trans update
145 */
146 smp_wmb();
147
148 spin_lock(&root->fs_info->fs_roots_radix_lock);
149 if (root->last_trans == trans->transid) {
150 spin_unlock(&root->fs_info->fs_roots_radix_lock);
151 return 0;
152 }
109 radix_tree_tag_set(&root->fs_info->fs_roots_radix, 153 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
110 (unsigned long)root->root_key.objectid, 154 (unsigned long)root->root_key.objectid,
111 BTRFS_ROOT_TRANS_TAG); 155 BTRFS_ROOT_TRANS_TAG);
156 spin_unlock(&root->fs_info->fs_roots_radix_lock);
112 root->last_trans = trans->transid; 157 root->last_trans = trans->transid;
158
159 /* this is pretty tricky. We don't want to
160 * take the relocation lock in btrfs_record_root_in_trans
161 * unless we're really doing the first setup for this root in
162 * this transaction.
163 *
164 * Normally we'd use root->last_trans as a flag to decide
165 * if we want to take the expensive mutex.
166 *
167 * But, we have to set root->last_trans before we
168 * init the relocation root, otherwise, we trip over warnings
169 * in ctree.c. The solution used here is to flag ourselves
170 * with root->in_trans_setup. When this is 1, we're still
171 * fixing up the reloc trees and everyone must wait.
172 *
173 * When this is zero, they can trust root->last_trans and fly
174 * through btrfs_record_root_in_trans without having to take the
175 * lock. smp_wmb() makes sure that all the writes above are
176 * done before we pop in the zero below
177 */
113 btrfs_init_reloc_root(trans, root); 178 btrfs_init_reloc_root(trans, root);
179 smp_wmb();
180 root->in_trans_setup = 0;
114 } 181 }
115 return 0; 182 return 0;
116} 183}
117 184
185
118int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, 186int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
119 struct btrfs_root *root) 187 struct btrfs_root *root)
120{ 188{
121 if (!root->ref_cows) 189 if (!root->ref_cows)
122 return 0; 190 return 0;
123 191
124 mutex_lock(&root->fs_info->trans_mutex); 192 /*
125 if (root->last_trans == trans->transid) { 193 * see record_root_in_trans for comments about in_trans_setup usage
126 mutex_unlock(&root->fs_info->trans_mutex); 194 * and barriers
195 */
196 smp_rmb();
197 if (root->last_trans == trans->transid &&
198 !root->in_trans_setup)
127 return 0; 199 return 0;
128 }
129 200
201 mutex_lock(&root->fs_info->reloc_mutex);
130 record_root_in_trans(trans, root); 202 record_root_in_trans(trans, root);
131 mutex_unlock(&root->fs_info->trans_mutex); 203 mutex_unlock(&root->fs_info->reloc_mutex);
204
132 return 0; 205 return 0;
133} 206}
134 207
@@ -140,21 +213,23 @@ static void wait_current_trans(struct btrfs_root *root)
140{ 213{
141 struct btrfs_transaction *cur_trans; 214 struct btrfs_transaction *cur_trans;
142 215
216 spin_lock(&root->fs_info->trans_lock);
143 cur_trans = root->fs_info->running_transaction; 217 cur_trans = root->fs_info->running_transaction;
144 if (cur_trans && cur_trans->blocked) { 218 if (cur_trans && cur_trans->blocked) {
145 DEFINE_WAIT(wait); 219 DEFINE_WAIT(wait);
146 atomic_inc(&cur_trans->use_count); 220 atomic_inc(&cur_trans->use_count);
221 spin_unlock(&root->fs_info->trans_lock);
147 while (1) { 222 while (1) {
148 prepare_to_wait(&root->fs_info->transaction_wait, &wait, 223 prepare_to_wait(&root->fs_info->transaction_wait, &wait,
149 TASK_UNINTERRUPTIBLE); 224 TASK_UNINTERRUPTIBLE);
150 if (!cur_trans->blocked) 225 if (!cur_trans->blocked)
151 break; 226 break;
152 mutex_unlock(&root->fs_info->trans_mutex);
153 schedule(); 227 schedule();
154 mutex_lock(&root->fs_info->trans_mutex);
155 } 228 }
156 finish_wait(&root->fs_info->transaction_wait, &wait); 229 finish_wait(&root->fs_info->transaction_wait, &wait);
157 put_transaction(cur_trans); 230 put_transaction(cur_trans);
231 } else {
232 spin_unlock(&root->fs_info->trans_lock);
158 } 233 }
159} 234}
160 235
@@ -167,10 +242,16 @@ enum btrfs_trans_type {
167 242
168static int may_wait_transaction(struct btrfs_root *root, int type) 243static int may_wait_transaction(struct btrfs_root *root, int type)
169{ 244{
170 if (!root->fs_info->log_root_recovering && 245 if (root->fs_info->log_root_recovering)
171 ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || 246 return 0;
172 type == TRANS_USERSPACE)) 247
248 if (type == TRANS_USERSPACE)
173 return 1; 249 return 1;
250
251 if (type == TRANS_START &&
252 !atomic_read(&root->fs_info->open_ioctl_trans))
253 return 1;
254
174 return 0; 255 return 0;
175} 256}
176 257
@@ -184,36 +265,44 @@ static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
184 265
185 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) 266 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)
186 return ERR_PTR(-EROFS); 267 return ERR_PTR(-EROFS);
268
269 if (current->journal_info) {
270 WARN_ON(type != TRANS_JOIN && type != TRANS_JOIN_NOLOCK);
271 h = current->journal_info;
272 h->use_count++;
273 h->orig_rsv = h->block_rsv;
274 h->block_rsv = NULL;
275 goto got_it;
276 }
187again: 277again:
188 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); 278 h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
189 if (!h) 279 if (!h)
190 return ERR_PTR(-ENOMEM); 280 return ERR_PTR(-ENOMEM);
191 281
192 if (type != TRANS_JOIN_NOLOCK)
193 mutex_lock(&root->fs_info->trans_mutex);
194 if (may_wait_transaction(root, type)) 282 if (may_wait_transaction(root, type))
195 wait_current_trans(root); 283 wait_current_trans(root);
196 284
197 ret = join_transaction(root); 285 do {
286 ret = join_transaction(root, type == TRANS_JOIN_NOLOCK);
287 if (ret == -EBUSY)
288 wait_current_trans(root);
289 } while (ret == -EBUSY);
290
198 if (ret < 0) { 291 if (ret < 0) {
199 kmem_cache_free(btrfs_trans_handle_cachep, h); 292 kmem_cache_free(btrfs_trans_handle_cachep, h);
200 if (type != TRANS_JOIN_NOLOCK)
201 mutex_unlock(&root->fs_info->trans_mutex);
202 return ERR_PTR(ret); 293 return ERR_PTR(ret);
203 } 294 }
204 295
205 cur_trans = root->fs_info->running_transaction; 296 cur_trans = root->fs_info->running_transaction;
206 atomic_inc(&cur_trans->use_count);
207 if (type != TRANS_JOIN_NOLOCK)
208 mutex_unlock(&root->fs_info->trans_mutex);
209 297
210 h->transid = cur_trans->transid; 298 h->transid = cur_trans->transid;
211 h->transaction = cur_trans; 299 h->transaction = cur_trans;
212 h->blocks_used = 0; 300 h->blocks_used = 0;
213 h->block_group = 0;
214 h->bytes_reserved = 0; 301 h->bytes_reserved = 0;
215 h->delayed_ref_updates = 0; 302 h->delayed_ref_updates = 0;
303 h->use_count = 1;
216 h->block_rsv = NULL; 304 h->block_rsv = NULL;
305 h->orig_rsv = NULL;
217 306
218 smp_mb(); 307 smp_mb();
219 if (cur_trans->blocked && may_wait_transaction(root, type)) { 308 if (cur_trans->blocked && may_wait_transaction(root, type)) {
@@ -241,11 +330,8 @@ again:
241 } 330 }
242 } 331 }
243 332
244 if (type != TRANS_JOIN_NOLOCK) 333got_it:
245 mutex_lock(&root->fs_info->trans_mutex); 334 btrfs_record_root_in_trans(h, root);
246 record_root_in_trans(h, root);
247 if (type != TRANS_JOIN_NOLOCK)
248 mutex_unlock(&root->fs_info->trans_mutex);
249 335
250 if (!current->journal_info && type != TRANS_USERSPACE) 336 if (!current->journal_info && type != TRANS_USERSPACE)
251 current->journal_info = h; 337 current->journal_info = h;
@@ -257,22 +343,19 @@ struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
257{ 343{
258 return start_transaction(root, num_items, TRANS_START); 344 return start_transaction(root, num_items, TRANS_START);
259} 345}
260struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 346struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
261 int num_blocks)
262{ 347{
263 return start_transaction(root, 0, TRANS_JOIN); 348 return start_transaction(root, 0, TRANS_JOIN);
264} 349}
265 350
266struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, 351struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
267 int num_blocks)
268{ 352{
269 return start_transaction(root, 0, TRANS_JOIN_NOLOCK); 353 return start_transaction(root, 0, TRANS_JOIN_NOLOCK);
270} 354}
271 355
272struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, 356struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
273 int num_blocks)
274{ 357{
275 return start_transaction(r, 0, TRANS_USERSPACE); 358 return start_transaction(root, 0, TRANS_USERSPACE);
276} 359}
277 360
278/* wait for a transaction commit to be fully complete */ 361/* wait for a transaction commit to be fully complete */
@@ -280,17 +363,13 @@ static noinline int wait_for_commit(struct btrfs_root *root,
280 struct btrfs_transaction *commit) 363 struct btrfs_transaction *commit)
281{ 364{
282 DEFINE_WAIT(wait); 365 DEFINE_WAIT(wait);
283 mutex_lock(&root->fs_info->trans_mutex);
284 while (!commit->commit_done) { 366 while (!commit->commit_done) {
285 prepare_to_wait(&commit->commit_wait, &wait, 367 prepare_to_wait(&commit->commit_wait, &wait,
286 TASK_UNINTERRUPTIBLE); 368 TASK_UNINTERRUPTIBLE);
287 if (commit->commit_done) 369 if (commit->commit_done)
288 break; 370 break;
289 mutex_unlock(&root->fs_info->trans_mutex);
290 schedule(); 371 schedule();
291 mutex_lock(&root->fs_info->trans_mutex);
292 } 372 }
293 mutex_unlock(&root->fs_info->trans_mutex);
294 finish_wait(&commit->commit_wait, &wait); 373 finish_wait(&commit->commit_wait, &wait);
295 return 0; 374 return 0;
296} 375}
@@ -300,59 +379,56 @@ int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
300 struct btrfs_transaction *cur_trans = NULL, *t; 379 struct btrfs_transaction *cur_trans = NULL, *t;
301 int ret; 380 int ret;
302 381
303 mutex_lock(&root->fs_info->trans_mutex);
304
305 ret = 0; 382 ret = 0;
306 if (transid) { 383 if (transid) {
307 if (transid <= root->fs_info->last_trans_committed) 384 if (transid <= root->fs_info->last_trans_committed)
308 goto out_unlock; 385 goto out;
309 386
310 /* find specified transaction */ 387 /* find specified transaction */
388 spin_lock(&root->fs_info->trans_lock);
311 list_for_each_entry(t, &root->fs_info->trans_list, list) { 389 list_for_each_entry(t, &root->fs_info->trans_list, list) {
312 if (t->transid == transid) { 390 if (t->transid == transid) {
313 cur_trans = t; 391 cur_trans = t;
392 atomic_inc(&cur_trans->use_count);
314 break; 393 break;
315 } 394 }
316 if (t->transid > transid) 395 if (t->transid > transid)
317 break; 396 break;
318 } 397 }
398 spin_unlock(&root->fs_info->trans_lock);
319 ret = -EINVAL; 399 ret = -EINVAL;
320 if (!cur_trans) 400 if (!cur_trans)
321 goto out_unlock; /* bad transid */ 401 goto out; /* bad transid */
322 } else { 402 } else {
323 /* find newest transaction that is committing | committed */ 403 /* find newest transaction that is committing | committed */
404 spin_lock(&root->fs_info->trans_lock);
324 list_for_each_entry_reverse(t, &root->fs_info->trans_list, 405 list_for_each_entry_reverse(t, &root->fs_info->trans_list,
325 list) { 406 list) {
326 if (t->in_commit) { 407 if (t->in_commit) {
327 if (t->commit_done) 408 if (t->commit_done)
328 goto out_unlock; 409 break;
329 cur_trans = t; 410 cur_trans = t;
411 atomic_inc(&cur_trans->use_count);
330 break; 412 break;
331 } 413 }
332 } 414 }
415 spin_unlock(&root->fs_info->trans_lock);
333 if (!cur_trans) 416 if (!cur_trans)
334 goto out_unlock; /* nothing committing|committed */ 417 goto out; /* nothing committing|committed */
335 } 418 }
336 419
337 atomic_inc(&cur_trans->use_count);
338 mutex_unlock(&root->fs_info->trans_mutex);
339
340 wait_for_commit(root, cur_trans); 420 wait_for_commit(root, cur_trans);
341 421
342 mutex_lock(&root->fs_info->trans_mutex);
343 put_transaction(cur_trans); 422 put_transaction(cur_trans);
344 ret = 0; 423 ret = 0;
345out_unlock: 424out:
346 mutex_unlock(&root->fs_info->trans_mutex);
347 return ret; 425 return ret;
348} 426}
349 427
350void btrfs_throttle(struct btrfs_root *root) 428void btrfs_throttle(struct btrfs_root *root)
351{ 429{
352 mutex_lock(&root->fs_info->trans_mutex); 430 if (!atomic_read(&root->fs_info->open_ioctl_trans))
353 if (!root->fs_info->open_ioctl_trans)
354 wait_current_trans(root); 431 wait_current_trans(root);
355 mutex_unlock(&root->fs_info->trans_mutex);
356} 432}
357 433
358static int should_end_transaction(struct btrfs_trans_handle *trans, 434static int should_end_transaction(struct btrfs_trans_handle *trans,
@@ -370,6 +446,7 @@ int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
370 struct btrfs_transaction *cur_trans = trans->transaction; 446 struct btrfs_transaction *cur_trans = trans->transaction;
371 int updates; 447 int updates;
372 448
449 smp_mb();
373 if (cur_trans->blocked || cur_trans->delayed_refs.flushing) 450 if (cur_trans->blocked || cur_trans->delayed_refs.flushing)
374 return 1; 451 return 1;
375 452
@@ -388,6 +465,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
388 struct btrfs_fs_info *info = root->fs_info; 465 struct btrfs_fs_info *info = root->fs_info;
389 int count = 0; 466 int count = 0;
390 467
468 if (--trans->use_count) {
469 trans->block_rsv = trans->orig_rsv;
470 return 0;
471 }
472
391 while (count < 4) { 473 while (count < 4) {
392 unsigned long cur = trans->delayed_ref_updates; 474 unsigned long cur = trans->delayed_ref_updates;
393 trans->delayed_ref_updates = 0; 475 trans->delayed_ref_updates = 0;
@@ -410,9 +492,11 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
410 492
411 btrfs_trans_release_metadata(trans, root); 493 btrfs_trans_release_metadata(trans, root);
412 494
413 if (lock && !root->fs_info->open_ioctl_trans && 495 if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
414 should_end_transaction(trans, root)) 496 should_end_transaction(trans, root)) {
415 trans->transaction->blocked = 1; 497 trans->transaction->blocked = 1;
498 smp_wmb();
499 }
416 500
417 if (lock && cur_trans->blocked && !cur_trans->in_commit) { 501 if (lock && cur_trans->blocked && !cur_trans->in_commit) {
418 if (throttle) 502 if (throttle)
@@ -703,9 +787,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
703 */ 787 */
704int btrfs_add_dead_root(struct btrfs_root *root) 788int btrfs_add_dead_root(struct btrfs_root *root)
705{ 789{
706 mutex_lock(&root->fs_info->trans_mutex); 790 spin_lock(&root->fs_info->trans_lock);
707 list_add(&root->root_list, &root->fs_info->dead_roots); 791 list_add(&root->root_list, &root->fs_info->dead_roots);
708 mutex_unlock(&root->fs_info->trans_mutex); 792 spin_unlock(&root->fs_info->trans_lock);
709 return 0; 793 return 0;
710} 794}
711 795
@@ -721,6 +805,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
721 int ret; 805 int ret;
722 int err = 0; 806 int err = 0;
723 807
808 spin_lock(&fs_info->fs_roots_radix_lock);
724 while (1) { 809 while (1) {
725 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, 810 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
726 (void **)gang, 0, 811 (void **)gang, 0,
@@ -733,6 +818,7 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
733 radix_tree_tag_clear(&fs_info->fs_roots_radix, 818 radix_tree_tag_clear(&fs_info->fs_roots_radix,
734 (unsigned long)root->root_key.objectid, 819 (unsigned long)root->root_key.objectid,
735 BTRFS_ROOT_TRANS_TAG); 820 BTRFS_ROOT_TRANS_TAG);
821 spin_unlock(&fs_info->fs_roots_radix_lock);
736 822
737 btrfs_free_log(trans, root); 823 btrfs_free_log(trans, root);
738 btrfs_update_reloc_root(trans, root); 824 btrfs_update_reloc_root(trans, root);
@@ -753,10 +839,12 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
753 err = btrfs_update_root(trans, fs_info->tree_root, 839 err = btrfs_update_root(trans, fs_info->tree_root,
754 &root->root_key, 840 &root->root_key,
755 &root->root_item); 841 &root->root_item);
842 spin_lock(&fs_info->fs_roots_radix_lock);
756 if (err) 843 if (err)
757 break; 844 break;
758 } 845 }
759 } 846 }
847 spin_unlock(&fs_info->fs_roots_radix_lock);
760 return err; 848 return err;
761} 849}
762 850
@@ -786,7 +874,7 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
786 btrfs_btree_balance_dirty(info->tree_root, nr); 874 btrfs_btree_balance_dirty(info->tree_root, nr);
787 cond_resched(); 875 cond_resched();
788 876
789 if (root->fs_info->closing || ret != -EAGAIN) 877 if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
790 break; 878 break;
791 } 879 }
792 root->defrag_running = 0; 880 root->defrag_running = 0;
@@ -869,6 +957,15 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
869 ret = btrfs_update_inode(trans, parent_root, parent_inode); 957 ret = btrfs_update_inode(trans, parent_root, parent_inode);
870 BUG_ON(ret); 958 BUG_ON(ret);
871 959
960 /*
961 * pull in the delayed directory update
962 * and the delayed inode item
963 * otherwise we corrupt the FS during
964 * snapshot
965 */
966 ret = btrfs_run_delayed_items(trans, root);
967 BUG_ON(ret);
968
872 record_root_in_trans(trans, root); 969 record_root_in_trans(trans, root);
873 btrfs_set_root_last_snapshot(&root->root_item, trans->transid); 970 btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
874 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); 971 memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
@@ -930,14 +1027,6 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
930 int ret; 1027 int ret;
931 1028
932 list_for_each_entry(pending, head, list) { 1029 list_for_each_entry(pending, head, list) {
933 /*
934 * We must deal with the delayed items before creating
935 * snapshots, or we will create a snapthot with inconsistent
936 * information.
937 */
938 ret = btrfs_run_delayed_items(trans, fs_info->fs_root);
939 BUG_ON(ret);
940
941 ret = create_pending_snapshot(trans, fs_info, pending); 1030 ret = create_pending_snapshot(trans, fs_info, pending);
942 BUG_ON(ret); 1031 BUG_ON(ret);
943 } 1032 }
@@ -967,20 +1056,20 @@ static void update_super_roots(struct btrfs_root *root)
967int btrfs_transaction_in_commit(struct btrfs_fs_info *info) 1056int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
968{ 1057{
969 int ret = 0; 1058 int ret = 0;
970 spin_lock(&info->new_trans_lock); 1059 spin_lock(&info->trans_lock);
971 if (info->running_transaction) 1060 if (info->running_transaction)
972 ret = info->running_transaction->in_commit; 1061 ret = info->running_transaction->in_commit;
973 spin_unlock(&info->new_trans_lock); 1062 spin_unlock(&info->trans_lock);
974 return ret; 1063 return ret;
975} 1064}
976 1065
977int btrfs_transaction_blocked(struct btrfs_fs_info *info) 1066int btrfs_transaction_blocked(struct btrfs_fs_info *info)
978{ 1067{
979 int ret = 0; 1068 int ret = 0;
980 spin_lock(&info->new_trans_lock); 1069 spin_lock(&info->trans_lock);
981 if (info->running_transaction) 1070 if (info->running_transaction)
982 ret = info->running_transaction->blocked; 1071 ret = info->running_transaction->blocked;
983 spin_unlock(&info->new_trans_lock); 1072 spin_unlock(&info->trans_lock);
984 return ret; 1073 return ret;
985} 1074}
986 1075
@@ -1004,9 +1093,7 @@ static void wait_current_trans_commit_start(struct btrfs_root *root,
1004 &wait); 1093 &wait);
1005 break; 1094 break;
1006 } 1095 }
1007 mutex_unlock(&root->fs_info->trans_mutex);
1008 schedule(); 1096 schedule();
1009 mutex_lock(&root->fs_info->trans_mutex);
1010 finish_wait(&root->fs_info->transaction_blocked_wait, &wait); 1097 finish_wait(&root->fs_info->transaction_blocked_wait, &wait);
1011 } 1098 }
1012} 1099}
@@ -1032,9 +1119,7 @@ static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
1032 &wait); 1119 &wait);
1033 break; 1120 break;
1034 } 1121 }
1035 mutex_unlock(&root->fs_info->trans_mutex);
1036 schedule(); 1122 schedule();
1037 mutex_lock(&root->fs_info->trans_mutex);
1038 finish_wait(&root->fs_info->transaction_wait, 1123 finish_wait(&root->fs_info->transaction_wait,
1039 &wait); 1124 &wait);
1040 } 1125 }
@@ -1072,7 +1157,7 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1072 1157
1073 INIT_DELAYED_WORK(&ac->work, do_async_commit); 1158 INIT_DELAYED_WORK(&ac->work, do_async_commit);
1074 ac->root = root; 1159 ac->root = root;
1075 ac->newtrans = btrfs_join_transaction(root, 0); 1160 ac->newtrans = btrfs_join_transaction(root);
1076 if (IS_ERR(ac->newtrans)) { 1161 if (IS_ERR(ac->newtrans)) {
1077 int err = PTR_ERR(ac->newtrans); 1162 int err = PTR_ERR(ac->newtrans);
1078 kfree(ac); 1163 kfree(ac);
@@ -1080,23 +1165,22 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1080 } 1165 }
1081 1166
1082 /* take transaction reference */ 1167 /* take transaction reference */
1083 mutex_lock(&root->fs_info->trans_mutex);
1084 cur_trans = trans->transaction; 1168 cur_trans = trans->transaction;
1085 atomic_inc(&cur_trans->use_count); 1169 atomic_inc(&cur_trans->use_count);
1086 mutex_unlock(&root->fs_info->trans_mutex);
1087 1170
1088 btrfs_end_transaction(trans, root); 1171 btrfs_end_transaction(trans, root);
1089 schedule_delayed_work(&ac->work, 0); 1172 schedule_delayed_work(&ac->work, 0);
1090 1173
1091 /* wait for transaction to start and unblock */ 1174 /* wait for transaction to start and unblock */
1092 mutex_lock(&root->fs_info->trans_mutex);
1093 if (wait_for_unblock) 1175 if (wait_for_unblock)
1094 wait_current_trans_commit_start_and_unblock(root, cur_trans); 1176 wait_current_trans_commit_start_and_unblock(root, cur_trans);
1095 else 1177 else
1096 wait_current_trans_commit_start(root, cur_trans); 1178 wait_current_trans_commit_start(root, cur_trans);
1097 put_transaction(cur_trans);
1098 mutex_unlock(&root->fs_info->trans_mutex);
1099 1179
1180 if (current->journal_info == trans)
1181 current->journal_info = NULL;
1182
1183 put_transaction(cur_trans);
1100 return 0; 1184 return 0;
1101} 1185}
1102 1186
@@ -1139,38 +1223,41 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1139 ret = btrfs_run_delayed_refs(trans, root, 0); 1223 ret = btrfs_run_delayed_refs(trans, root, 0);
1140 BUG_ON(ret); 1224 BUG_ON(ret);
1141 1225
1142 mutex_lock(&root->fs_info->trans_mutex); 1226 spin_lock(&cur_trans->commit_lock);
1143 if (cur_trans->in_commit) { 1227 if (cur_trans->in_commit) {
1228 spin_unlock(&cur_trans->commit_lock);
1144 atomic_inc(&cur_trans->use_count); 1229 atomic_inc(&cur_trans->use_count);
1145 mutex_unlock(&root->fs_info->trans_mutex);
1146 btrfs_end_transaction(trans, root); 1230 btrfs_end_transaction(trans, root);
1147 1231
1148 ret = wait_for_commit(root, cur_trans); 1232 ret = wait_for_commit(root, cur_trans);
1149 BUG_ON(ret); 1233 BUG_ON(ret);
1150 1234
1151 mutex_lock(&root->fs_info->trans_mutex);
1152 put_transaction(cur_trans); 1235 put_transaction(cur_trans);
1153 mutex_unlock(&root->fs_info->trans_mutex);
1154 1236
1155 return 0; 1237 return 0;
1156 } 1238 }
1157 1239
1158 trans->transaction->in_commit = 1; 1240 trans->transaction->in_commit = 1;
1159 trans->transaction->blocked = 1; 1241 trans->transaction->blocked = 1;
1242 spin_unlock(&cur_trans->commit_lock);
1160 wake_up(&root->fs_info->transaction_blocked_wait); 1243 wake_up(&root->fs_info->transaction_blocked_wait);
1161 1244
1245 spin_lock(&root->fs_info->trans_lock);
1162 if (cur_trans->list.prev != &root->fs_info->trans_list) { 1246 if (cur_trans->list.prev != &root->fs_info->trans_list) {
1163 prev_trans = list_entry(cur_trans->list.prev, 1247 prev_trans = list_entry(cur_trans->list.prev,
1164 struct btrfs_transaction, list); 1248 struct btrfs_transaction, list);
1165 if (!prev_trans->commit_done) { 1249 if (!prev_trans->commit_done) {
1166 atomic_inc(&prev_trans->use_count); 1250 atomic_inc(&prev_trans->use_count);
1167 mutex_unlock(&root->fs_info->trans_mutex); 1251 spin_unlock(&root->fs_info->trans_lock);
1168 1252
1169 wait_for_commit(root, prev_trans); 1253 wait_for_commit(root, prev_trans);
1170 1254
1171 mutex_lock(&root->fs_info->trans_mutex);
1172 put_transaction(prev_trans); 1255 put_transaction(prev_trans);
1256 } else {
1257 spin_unlock(&root->fs_info->trans_lock);
1173 } 1258 }
1259 } else {
1260 spin_unlock(&root->fs_info->trans_lock);
1174 } 1261 }
1175 1262
1176 if (now < cur_trans->start_time || now - cur_trans->start_time < 1) 1263 if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
@@ -1178,12 +1265,12 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1178 1265
1179 do { 1266 do {
1180 int snap_pending = 0; 1267 int snap_pending = 0;
1268
1181 joined = cur_trans->num_joined; 1269 joined = cur_trans->num_joined;
1182 if (!list_empty(&trans->transaction->pending_snapshots)) 1270 if (!list_empty(&trans->transaction->pending_snapshots))
1183 snap_pending = 1; 1271 snap_pending = 1;
1184 1272
1185 WARN_ON(cur_trans != trans->transaction); 1273 WARN_ON(cur_trans != trans->transaction);
1186 mutex_unlock(&root->fs_info->trans_mutex);
1187 1274
1188 if (flush_on_commit || snap_pending) { 1275 if (flush_on_commit || snap_pending) {
1189 btrfs_start_delalloc_inodes(root, 1); 1276 btrfs_start_delalloc_inodes(root, 1);
@@ -1206,26 +1293,48 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1206 prepare_to_wait(&cur_trans->writer_wait, &wait, 1293 prepare_to_wait(&cur_trans->writer_wait, &wait,
1207 TASK_UNINTERRUPTIBLE); 1294 TASK_UNINTERRUPTIBLE);
1208 1295
1209 smp_mb();
1210 if (atomic_read(&cur_trans->num_writers) > 1) 1296 if (atomic_read(&cur_trans->num_writers) > 1)
1211 schedule_timeout(MAX_SCHEDULE_TIMEOUT); 1297 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
1212 else if (should_grow) 1298 else if (should_grow)
1213 schedule_timeout(1); 1299 schedule_timeout(1);
1214 1300
1215 mutex_lock(&root->fs_info->trans_mutex);
1216 finish_wait(&cur_trans->writer_wait, &wait); 1301 finish_wait(&cur_trans->writer_wait, &wait);
1217 } while (atomic_read(&cur_trans->num_writers) > 1 || 1302 } while (atomic_read(&cur_trans->num_writers) > 1 ||
1218 (should_grow && cur_trans->num_joined != joined)); 1303 (should_grow && cur_trans->num_joined != joined));
1219 1304
1220 ret = create_pending_snapshots(trans, root->fs_info); 1305 /*
1221 BUG_ON(ret); 1306 * Ok now we need to make sure to block out any other joins while we
1307 * commit the transaction. We could have started a join before setting
1308 * no_join so make sure to wait for num_writers to == 1 again.
1309 */
1310 spin_lock(&root->fs_info->trans_lock);
1311 root->fs_info->trans_no_join = 1;
1312 spin_unlock(&root->fs_info->trans_lock);
1313 wait_event(cur_trans->writer_wait,
1314 atomic_read(&cur_trans->num_writers) == 1);
1315
1316 /*
1317 * the reloc mutex makes sure that we stop
1318 * the balancing code from coming in and moving
1319 * extents around in the middle of the commit
1320 */
1321 mutex_lock(&root->fs_info->reloc_mutex);
1222 1322
1223 ret = btrfs_run_delayed_items(trans, root); 1323 ret = btrfs_run_delayed_items(trans, root);
1224 BUG_ON(ret); 1324 BUG_ON(ret);
1225 1325
1326 ret = create_pending_snapshots(trans, root->fs_info);
1327 BUG_ON(ret);
1328
1226 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); 1329 ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1227 BUG_ON(ret); 1330 BUG_ON(ret);
1228 1331
1332 /*
1333 * make sure none of the code above managed to slip in a
1334 * delayed item
1335 */
1336 btrfs_assert_delayed_root_empty(root);
1337
1229 WARN_ON(cur_trans != trans->transaction); 1338 WARN_ON(cur_trans != trans->transaction);
1230 1339
1231 btrfs_scrub_pause(root); 1340 btrfs_scrub_pause(root);
@@ -1258,9 +1367,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1258 btrfs_prepare_extent_commit(trans, root); 1367 btrfs_prepare_extent_commit(trans, root);
1259 1368
1260 cur_trans = root->fs_info->running_transaction; 1369 cur_trans = root->fs_info->running_transaction;
1261 spin_lock(&root->fs_info->new_trans_lock);
1262 root->fs_info->running_transaction = NULL;
1263 spin_unlock(&root->fs_info->new_trans_lock);
1264 1370
1265 btrfs_set_root_node(&root->fs_info->tree_root->root_item, 1371 btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1266 root->fs_info->tree_root->node); 1372 root->fs_info->tree_root->node);
@@ -1281,10 +1387,14 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1281 sizeof(root->fs_info->super_copy)); 1387 sizeof(root->fs_info->super_copy));
1282 1388
1283 trans->transaction->blocked = 0; 1389 trans->transaction->blocked = 0;
1390 spin_lock(&root->fs_info->trans_lock);
1391 root->fs_info->running_transaction = NULL;
1392 root->fs_info->trans_no_join = 0;
1393 spin_unlock(&root->fs_info->trans_lock);
1394 mutex_unlock(&root->fs_info->reloc_mutex);
1284 1395
1285 wake_up(&root->fs_info->transaction_wait); 1396 wake_up(&root->fs_info->transaction_wait);
1286 1397
1287 mutex_unlock(&root->fs_info->trans_mutex);
1288 ret = btrfs_write_and_wait_transaction(trans, root); 1398 ret = btrfs_write_and_wait_transaction(trans, root);
1289 BUG_ON(ret); 1399 BUG_ON(ret);
1290 write_ctree_super(trans, root, 0); 1400 write_ctree_super(trans, root, 0);
@@ -1297,22 +1407,21 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1297 1407
1298 btrfs_finish_extent_commit(trans, root); 1408 btrfs_finish_extent_commit(trans, root);
1299 1409
1300 mutex_lock(&root->fs_info->trans_mutex);
1301
1302 cur_trans->commit_done = 1; 1410 cur_trans->commit_done = 1;
1303 1411
1304 root->fs_info->last_trans_committed = cur_trans->transid; 1412 root->fs_info->last_trans_committed = cur_trans->transid;
1305 1413
1306 wake_up(&cur_trans->commit_wait); 1414 wake_up(&cur_trans->commit_wait);
1307 1415
1416 spin_lock(&root->fs_info->trans_lock);
1308 list_del_init(&cur_trans->list); 1417 list_del_init(&cur_trans->list);
1418 spin_unlock(&root->fs_info->trans_lock);
1419
1309 put_transaction(cur_trans); 1420 put_transaction(cur_trans);
1310 put_transaction(cur_trans); 1421 put_transaction(cur_trans);
1311 1422
1312 trace_btrfs_transaction_commit(root); 1423 trace_btrfs_transaction_commit(root);
1313 1424
1314 mutex_unlock(&root->fs_info->trans_mutex);
1315
1316 btrfs_scrub_continue(root); 1425 btrfs_scrub_continue(root);
1317 1426
1318 if (current->journal_info == trans) 1427 if (current->journal_info == trans)
@@ -1334,9 +1443,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
1334 LIST_HEAD(list); 1443 LIST_HEAD(list);
1335 struct btrfs_fs_info *fs_info = root->fs_info; 1444 struct btrfs_fs_info *fs_info = root->fs_info;
1336 1445
1337 mutex_lock(&fs_info->trans_mutex); 1446 spin_lock(&fs_info->trans_lock);
1338 list_splice_init(&fs_info->dead_roots, &list); 1447 list_splice_init(&fs_info->dead_roots, &list);
1339 mutex_unlock(&fs_info->trans_mutex); 1448 spin_unlock(&fs_info->trans_lock);
1340 1449
1341 while (!list_empty(&list)) { 1450 while (!list_empty(&list)) {
1342 root = list_entry(list.next, struct btrfs_root, root_list); 1451 root = list_entry(list.next, struct btrfs_root, root_list);
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index 804c88639e5d..02564e6230ac 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -28,10 +28,12 @@ struct btrfs_transaction {
28 * transaction can end 28 * transaction can end
29 */ 29 */
30 atomic_t num_writers; 30 atomic_t num_writers;
31 atomic_t use_count;
31 32
32 unsigned long num_joined; 33 unsigned long num_joined;
34
35 spinlock_t commit_lock;
33 int in_commit; 36 int in_commit;
34 atomic_t use_count;
35 int commit_done; 37 int commit_done;
36 int blocked; 38 int blocked;
37 struct list_head list; 39 struct list_head list;
@@ -45,13 +47,14 @@ struct btrfs_transaction {
45 47
46struct btrfs_trans_handle { 48struct btrfs_trans_handle {
47 u64 transid; 49 u64 transid;
48 u64 block_group;
49 u64 bytes_reserved; 50 u64 bytes_reserved;
51 unsigned long use_count;
50 unsigned long blocks_reserved; 52 unsigned long blocks_reserved;
51 unsigned long blocks_used; 53 unsigned long blocks_used;
52 unsigned long delayed_ref_updates; 54 unsigned long delayed_ref_updates;
53 struct btrfs_transaction *transaction; 55 struct btrfs_transaction *transaction;
54 struct btrfs_block_rsv *block_rsv; 56 struct btrfs_block_rsv *block_rsv;
57 struct btrfs_block_rsv *orig_rsv;
55}; 58};
56 59
57struct btrfs_pending_snapshot { 60struct btrfs_pending_snapshot {
@@ -66,19 +69,6 @@ struct btrfs_pending_snapshot {
66 struct list_head list; 69 struct list_head list;
67}; 70};
68 71
69static inline void btrfs_set_trans_block_group(struct btrfs_trans_handle *trans,
70 struct inode *inode)
71{
72 trans->block_group = BTRFS_I(inode)->block_group;
73}
74
75static inline void btrfs_update_inode_block_group(
76 struct btrfs_trans_handle *trans,
77 struct inode *inode)
78{
79 BTRFS_I(inode)->block_group = trans->block_group;
80}
81
82static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans, 72static inline void btrfs_set_inode_last_trans(struct btrfs_trans_handle *trans,
83 struct inode *inode) 73 struct inode *inode)
84{ 74{
@@ -92,12 +82,9 @@ int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans,
92 struct btrfs_root *root); 82 struct btrfs_root *root);
93struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, 83struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
94 int num_items); 84 int num_items);
95struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, 85struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root);
96 int num_blocks); 86struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root);
97struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root, 87struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
98 int num_blocks);
99struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
100 int num_blocks);
101int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); 88int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
102int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 89int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
103 struct btrfs_root *root); 90 struct btrfs_root *root);
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 592396c6dc47..4ce8a9f41d1e 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -3177,7 +3177,7 @@ again:
3177 tmp_key.offset = (u64)-1; 3177 tmp_key.offset = (u64)-1;
3178 3178
3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3179 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key);
3180 BUG_ON(!wc.replay_dest); 3180 BUG_ON(IS_ERR_OR_NULL(wc.replay_dest));
3181 3181
3182 wc.replay_dest->log_root = log; 3182 wc.replay_dest->log_root = log;
3183 btrfs_record_root_in_trans(trans, wc.replay_dest); 3183 btrfs_record_root_in_trans(trans, wc.replay_dest);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c48214ef5c09..1efa56e18f9b 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -504,7 +504,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
504 BUG_ON(!new_device); 504 BUG_ON(!new_device);
505 memcpy(new_device, device, sizeof(*new_device)); 505 memcpy(new_device, device, sizeof(*new_device));
506 new_device->name = kstrdup(device->name, GFP_NOFS); 506 new_device->name = kstrdup(device->name, GFP_NOFS);
507 BUG_ON(!new_device->name); 507 BUG_ON(device->name && !new_device->name);
508 new_device->bdev = NULL; 508 new_device->bdev = NULL;
509 new_device->writeable = 0; 509 new_device->writeable = 0;
510 new_device->in_fs_metadata = 0; 510 new_device->in_fs_metadata = 0;
@@ -689,12 +689,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
689 transid = btrfs_super_generation(disk_super); 689 transid = btrfs_super_generation(disk_super);
690 if (disk_super->label[0]) 690 if (disk_super->label[0])
691 printk(KERN_INFO "device label %s ", disk_super->label); 691 printk(KERN_INFO "device label %s ", disk_super->label);
692 else { 692 else
693 /* FIXME, make a readl uuid parser */ 693 printk(KERN_INFO "device fsid %pU ", disk_super->fsid);
694 printk(KERN_INFO "device fsid %llx-%llx ",
695 *(unsigned long long *)disk_super->fsid,
696 *(unsigned long long *)(disk_super->fsid + 8));
697 }
698 printk(KERN_CONT "devid %llu transid %llu %s\n", 694 printk(KERN_CONT "devid %llu transid %llu %s\n",
699 (unsigned long long)devid, (unsigned long long)transid, path); 695 (unsigned long long)devid, (unsigned long long)transid, path);
700 ret = device_list_add(path, disk_super, devid, fs_devices_ret); 696 ret = device_list_add(path, disk_super, devid, fs_devices_ret);
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index f3107e4b4d56..5366fe452ab0 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -158,8 +158,6 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
158 if (IS_ERR(trans)) 158 if (IS_ERR(trans))
159 return PTR_ERR(trans); 159 return PTR_ERR(trans);
160 160
161 btrfs_set_trans_block_group(trans, inode);
162
163 ret = do_setxattr(trans, inode, name, value, size, flags); 161 ret = do_setxattr(trans, inode, name, value, size, flags);
164 if (ret) 162 if (ret)
165 goto out; 163 goto out;
diff --git a/fs/buffer.c b/fs/buffer.c
index 49c9aada0374..1a80b048ade8 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -1902,10 +1902,8 @@ int __block_write_begin(struct page *page, loff_t pos, unsigned len,
1902 if (!buffer_uptodate(*wait_bh)) 1902 if (!buffer_uptodate(*wait_bh))
1903 err = -EIO; 1903 err = -EIO;
1904 } 1904 }
1905 if (unlikely(err)) { 1905 if (unlikely(err))
1906 page_zero_new_buffers(page, from, to); 1906 page_zero_new_buffers(page, from, to);
1907 ClearPageUptodate(page);
1908 }
1909 return err; 1907 return err;
1910} 1908}
1911EXPORT_SYMBOL(__block_write_begin); 1909EXPORT_SYMBOL(__block_write_begin);
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 33da49dc3cc6..5a3953db8118 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -453,7 +453,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
453 int err; 453 int err;
454 struct inode *inode = page->mapping->host; 454 struct inode *inode = page->mapping->host;
455 BUG_ON(!inode); 455 BUG_ON(!inode);
456 igrab(inode); 456 ihold(inode);
457 err = writepage_nounlock(page, wbc); 457 err = writepage_nounlock(page, wbc);
458 unlock_page(page); 458 unlock_page(page);
459 iput(inode); 459 iput(inode);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 1f72b00447c4..f605753c8fe9 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2940,14 +2940,12 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
2940 while (!list_empty(&mdsc->cap_dirty)) { 2940 while (!list_empty(&mdsc->cap_dirty)) {
2941 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info, 2941 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
2942 i_dirty_item); 2942 i_dirty_item);
2943 inode = igrab(&ci->vfs_inode); 2943 inode = &ci->vfs_inode;
2944 ihold(inode);
2944 dout("flush_dirty_caps %p\n", inode); 2945 dout("flush_dirty_caps %p\n", inode);
2945 spin_unlock(&mdsc->cap_dirty_lock); 2946 spin_unlock(&mdsc->cap_dirty_lock);
2946 if (inode) { 2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
2947 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, 2948 iput(inode);
2948 NULL);
2949 iput(inode);
2950 }
2951 spin_lock(&mdsc->cap_dirty_lock); 2949 spin_lock(&mdsc->cap_dirty_lock);
2952 } 2950 }
2953 spin_unlock(&mdsc->cap_dirty_lock); 2951 spin_unlock(&mdsc->cap_dirty_lock);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 33729e822bb9..ef8f08c343e8 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -308,7 +308,8 @@ more:
308 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 308 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
309 if (IS_ERR(req)) 309 if (IS_ERR(req))
310 return PTR_ERR(req); 310 return PTR_ERR(req);
311 req->r_inode = igrab(inode); 311 req->r_inode = inode;
312 ihold(inode);
312 req->r_dentry = dget(filp->f_dentry); 313 req->r_dentry = dget(filp->f_dentry);
313 /* hints to request -> mds selection code */ 314 /* hints to request -> mds selection code */
314 req->r_direct_mode = USE_AUTH_MDS; 315 req->r_direct_mode = USE_AUTH_MDS;
@@ -787,10 +788,12 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
787 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; 788 req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
788 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; 789 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
789 err = ceph_mdsc_do_request(mdsc, dir, req); 790 err = ceph_mdsc_do_request(mdsc, dir, req);
790 if (err) 791 if (err) {
791 d_drop(dentry); 792 d_drop(dentry);
792 else if (!req->r_reply_info.head->is_dentry) 793 } else if (!req->r_reply_info.head->is_dentry) {
793 d_instantiate(dentry, igrab(old_dentry->d_inode)); 794 ihold(old_dentry->d_inode);
795 d_instantiate(dentry, old_dentry->d_inode);
796 }
794 ceph_mdsc_put_request(req); 797 ceph_mdsc_put_request(req);
795 return err; 798 return err;
796} 799}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index a610d3d67488..f67b687550de 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -109,7 +109,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb,
109 err = ceph_mdsc_do_request(mdsc, NULL, req); 109 err = ceph_mdsc_do_request(mdsc, NULL, req);
110 inode = req->r_target_inode; 110 inode = req->r_target_inode;
111 if (inode) 111 if (inode)
112 igrab(inode); 112 ihold(inode);
113 ceph_mdsc_put_request(req); 113 ceph_mdsc_put_request(req);
114 if (!inode) 114 if (!inode)
115 return ERR_PTR(-ESTALE); 115 return ERR_PTR(-ESTALE);
@@ -167,7 +167,7 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb,
167 err = ceph_mdsc_do_request(mdsc, NULL, req); 167 err = ceph_mdsc_do_request(mdsc, NULL, req);
168 inode = req->r_target_inode; 168 inode = req->r_target_inode;
169 if (inode) 169 if (inode)
170 igrab(inode); 170 ihold(inode);
171 ceph_mdsc_put_request(req); 171 ceph_mdsc_put_request(req);
172 if (!inode) 172 if (!inode)
173 return ERR_PTR(err ? err : -ESTALE); 173 return ERR_PTR(err ? err : -ESTALE);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 203252d88d9f..9542f07d0b93 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -191,7 +191,8 @@ int ceph_open(struct inode *inode, struct file *file)
191 err = PTR_ERR(req); 191 err = PTR_ERR(req);
192 goto out; 192 goto out;
193 } 193 }
194 req->r_inode = igrab(inode); 194 req->r_inode = inode;
195 ihold(inode);
195 req->r_num_caps = 1; 196 req->r_num_caps = 1;
196 err = ceph_mdsc_do_request(mdsc, parent_inode, req); 197 err = ceph_mdsc_do_request(mdsc, parent_inode, req);
197 if (!err) 198 if (!err)
@@ -282,7 +283,7 @@ int ceph_release(struct inode *inode, struct file *file)
282static int striped_read(struct inode *inode, 283static int striped_read(struct inode *inode,
283 u64 off, u64 len, 284 u64 off, u64 len,
284 struct page **pages, int num_pages, 285 struct page **pages, int num_pages,
285 int *checkeof, bool align_to_pages, 286 int *checkeof, bool o_direct,
286 unsigned long buf_align) 287 unsigned long buf_align)
287{ 288{
288 struct ceph_fs_client *fsc = ceph_inode_to_client(inode); 289 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
@@ -307,7 +308,7 @@ static int striped_read(struct inode *inode,
307 io_align = off & ~PAGE_MASK; 308 io_align = off & ~PAGE_MASK;
308 309
309more: 310more:
310 if (align_to_pages) 311 if (o_direct)
311 page_align = (pos - io_align + buf_align) & ~PAGE_MASK; 312 page_align = (pos - io_align + buf_align) & ~PAGE_MASK;
312 else 313 else
313 page_align = pos & ~PAGE_MASK; 314 page_align = pos & ~PAGE_MASK;
@@ -317,10 +318,10 @@ more:
317 ci->i_truncate_seq, 318 ci->i_truncate_seq,
318 ci->i_truncate_size, 319 ci->i_truncate_size,
319 page_pos, pages_left, page_align); 320 page_pos, pages_left, page_align);
320 hit_stripe = this_len < left;
321 was_short = ret >= 0 && ret < this_len;
322 if (ret == -ENOENT) 321 if (ret == -ENOENT)
323 ret = 0; 322 ret = 0;
323 hit_stripe = this_len < left;
324 was_short = ret >= 0 && ret < this_len;
324 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read, 325 dout("striped_read %llu~%u (read %u) got %d%s%s\n", pos, left, read,
325 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : ""); 326 ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
326 327
@@ -345,20 +346,22 @@ more:
345 } 346 }
346 347
347 if (was_short) { 348 if (was_short) {
348 /* was original extent fully inside i_size? */ 349 /* did we bounce off eof? */
349 if (pos + left <= inode->i_size) { 350 if (pos + left > inode->i_size)
350 dout("zero tail\n"); 351 *checkeof = 1;
351 ceph_zero_page_vector_range(page_off + read, len - read, 352
353 /* zero trailing bytes (inside i_size) */
354 if (left > 0 && pos < inode->i_size) {
355 if (pos + left > inode->i_size)
356 left = inode->i_size - pos;
357
358 dout("zero tail %d\n", left);
359 ceph_zero_page_vector_range(page_off + read, left,
352 pages); 360 pages);
353 read = len; 361 read += left;
354 goto out;
355 } 362 }
356
357 /* check i_size */
358 *checkeof = 1;
359 } 363 }
360 364
361out:
362 if (ret >= 0) 365 if (ret >= 0)
363 ret = read; 366 ret = read;
364 dout("striped_read returns %d\n", ret); 367 dout("striped_read returns %d\n", ret);
@@ -658,7 +661,7 @@ out:
658 661
659 /* hit EOF or hole? */ 662 /* hit EOF or hole? */
660 if (statret == 0 && *ppos < inode->i_size) { 663 if (statret == 0 && *ppos < inode->i_size) {
661 dout("aio_read sync_read hit hole, reading more\n"); 664 dout("aio_read sync_read hit hole, ppos %lld < size %lld, reading more\n", *ppos, inode->i_size);
662 read += ret; 665 read += ret;
663 base += ret; 666 base += ret;
664 len -= ret; 667 len -= ret;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 70b6a4839c38..d8858e96ab18 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1101,10 +1101,10 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1101 goto done; 1101 goto done;
1102 } 1102 }
1103 req->r_dentry = dn; /* may have spliced */ 1103 req->r_dentry = dn; /* may have spliced */
1104 igrab(in); 1104 ihold(in);
1105 } else if (ceph_ino(in) == vino.ino && 1105 } else if (ceph_ino(in) == vino.ino &&
1106 ceph_snap(in) == vino.snap) { 1106 ceph_snap(in) == vino.snap) {
1107 igrab(in); 1107 ihold(in);
1108 } else { 1108 } else {
1109 dout(" %p links to %p %llx.%llx, not %llx.%llx\n", 1109 dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1110 dn, in, ceph_ino(in), ceph_snap(in), 1110 dn, in, ceph_ino(in), ceph_snap(in),
@@ -1144,7 +1144,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
1144 goto done; 1144 goto done;
1145 } 1145 }
1146 req->r_dentry = dn; /* may have spliced */ 1146 req->r_dentry = dn; /* may have spliced */
1147 igrab(in); 1147 ihold(in);
1148 rinfo->head->is_dentry = 1; /* fool notrace handlers */ 1148 rinfo->head->is_dentry = 1; /* fool notrace handlers */
1149 } 1149 }
1150 1150
@@ -1328,7 +1328,7 @@ void ceph_queue_writeback(struct inode *inode)
1328 if (queue_work(ceph_inode_to_client(inode)->wb_wq, 1328 if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1329 &ceph_inode(inode)->i_wb_work)) { 1329 &ceph_inode(inode)->i_wb_work)) {
1330 dout("ceph_queue_writeback %p\n", inode); 1330 dout("ceph_queue_writeback %p\n", inode);
1331 igrab(inode); 1331 ihold(inode);
1332 } else { 1332 } else {
1333 dout("ceph_queue_writeback %p failed\n", inode); 1333 dout("ceph_queue_writeback %p failed\n", inode);
1334 } 1334 }
@@ -1353,7 +1353,7 @@ void ceph_queue_invalidate(struct inode *inode)
1353 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq, 1353 if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1354 &ceph_inode(inode)->i_pg_inv_work)) { 1354 &ceph_inode(inode)->i_pg_inv_work)) {
1355 dout("ceph_queue_invalidate %p\n", inode); 1355 dout("ceph_queue_invalidate %p\n", inode);
1356 igrab(inode); 1356 ihold(inode);
1357 } else { 1357 } else {
1358 dout("ceph_queue_invalidate %p failed\n", inode); 1358 dout("ceph_queue_invalidate %p failed\n", inode);
1359 } 1359 }
@@ -1477,7 +1477,7 @@ void ceph_queue_vmtruncate(struct inode *inode)
1477 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq, 1477 if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1478 &ci->i_vmtruncate_work)) { 1478 &ci->i_vmtruncate_work)) {
1479 dout("ceph_queue_vmtruncate %p\n", inode); 1479 dout("ceph_queue_vmtruncate %p\n", inode);
1480 igrab(inode); 1480 ihold(inode);
1481 } else { 1481 } else {
1482 dout("ceph_queue_vmtruncate %p failed, pending=%d\n", 1482 dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1483 inode, ci->i_truncate_pending); 1483 inode, ci->i_truncate_pending);
@@ -1738,7 +1738,8 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1738 __mark_inode_dirty(inode, inode_dirty_flags); 1738 __mark_inode_dirty(inode, inode_dirty_flags);
1739 1739
1740 if (mask) { 1740 if (mask) {
1741 req->r_inode = igrab(inode); 1741 req->r_inode = inode;
1742 ihold(inode);
1742 req->r_inode_drop = release; 1743 req->r_inode_drop = release;
1743 req->r_args.setattr.mask = cpu_to_le32(mask); 1744 req->r_args.setattr.mask = cpu_to_le32(mask);
1744 req->r_num_caps = 1; 1745 req->r_num_caps = 1;
@@ -1779,7 +1780,8 @@ int ceph_do_getattr(struct inode *inode, int mask)
1779 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS); 1780 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1780 if (IS_ERR(req)) 1781 if (IS_ERR(req))
1781 return PTR_ERR(req); 1782 return PTR_ERR(req);
1782 req->r_inode = igrab(inode); 1783 req->r_inode = inode;
1784 ihold(inode);
1783 req->r_num_caps = 1; 1785 req->r_num_caps = 1;
1784 req->r_args.getattr.mask = cpu_to_le32(mask); 1786 req->r_args.getattr.mask = cpu_to_le32(mask);
1785 err = ceph_mdsc_do_request(mdsc, NULL, req); 1787 err = ceph_mdsc_do_request(mdsc, NULL, req);
diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c
index 8888c9ba68db..ef0b5f48e13a 100644
--- a/fs/ceph/ioctl.c
+++ b/fs/ceph/ioctl.c
@@ -73,7 +73,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
73 USE_AUTH_MDS); 73 USE_AUTH_MDS);
74 if (IS_ERR(req)) 74 if (IS_ERR(req))
75 return PTR_ERR(req); 75 return PTR_ERR(req);
76 req->r_inode = igrab(inode); 76 req->r_inode = inode;
77 ihold(inode);
77 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; 78 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
78 79
79 req->r_args.setlayout.layout.fl_stripe_unit = 80 req->r_args.setlayout.layout.fl_stripe_unit =
@@ -135,7 +136,8 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
135 136
136 if (IS_ERR(req)) 137 if (IS_ERR(req))
137 return PTR_ERR(req); 138 return PTR_ERR(req);
138 req->r_inode = igrab(inode); 139 req->r_inode = inode;
140 ihold(inode);
139 141
140 req->r_args.setlayout.layout.fl_stripe_unit = 142 req->r_args.setlayout.layout.fl_stripe_unit =
141 cpu_to_le32(l.stripe_unit); 143 cpu_to_le32(l.stripe_unit);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 476b329867d4..80576d05d687 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -23,7 +23,8 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); 23 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
24 if (IS_ERR(req)) 24 if (IS_ERR(req))
25 return PTR_ERR(req); 25 return PTR_ERR(req);
26 req->r_inode = igrab(inode); 26 req->r_inode = inode;
27 ihold(inode);
27 28
28 /* mds requires start and length rather than start and end */ 29 /* mds requires start and length rather than start and end */
29 if (LLONG_MAX == fl->fl_end) 30 if (LLONG_MAX == fl->fl_end)
@@ -32,11 +33,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
32 length = fl->fl_end - fl->fl_start + 1; 33 length = fl->fl_end - fl->fl_start + 1;
33 34
34 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 35 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
35 "length: %llu, wait: %d, type`: %d", (int)lock_type, 36 "length: %llu, wait: %d, type: %d", (int)lock_type,
36 (int)operation, (u64)fl->fl_pid, fl->fl_start, 37 (int)operation, (u64)fl->fl_pid, fl->fl_start,
37 length, wait, fl->fl_type); 38 length, wait, fl->fl_type);
38 39
39
40 req->r_args.filelock_change.rule = lock_type; 40 req->r_args.filelock_change.rule = lock_type;
41 req->r_args.filelock_change.type = cmd; 41 req->r_args.filelock_change.type = cmd;
42 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); 42 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
@@ -70,7 +70,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
70 } 70 }
71 ceph_mdsc_put_request(req); 71 ceph_mdsc_put_request(req);
72 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, " 72 dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
73 "length: %llu, wait: %d, type`: %d, err code %d", (int)lock_type, 73 "length: %llu, wait: %d, type: %d, err code %d", (int)lock_type,
74 (int)operation, (u64)fl->fl_pid, fl->fl_start, 74 (int)operation, (u64)fl->fl_pid, fl->fl_start,
75 length, wait, fl->fl_type, err); 75 length, wait, fl->fl_type, err);
76 return err; 76 return err;
@@ -109,16 +109,20 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
109 dout("mds locked, locking locally"); 109 dout("mds locked, locking locally");
110 err = posix_lock_file(file, fl, NULL); 110 err = posix_lock_file(file, fl, NULL);
111 if (err && (CEPH_MDS_OP_SETFILELOCK == op)) { 111 if (err && (CEPH_MDS_OP_SETFILELOCK == op)) {
112 /* undo! This should only happen if the kernel detects 112 /* undo! This should only happen if
113 * local deadlock. */ 113 * the kernel detects local
114 * deadlock. */
114 ceph_lock_message(CEPH_LOCK_FCNTL, op, file, 115 ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
115 CEPH_LOCK_UNLOCK, 0, fl); 116 CEPH_LOCK_UNLOCK, 0, fl);
116 dout("got %d on posix_lock_file, undid lock", err); 117 dout("got %d on posix_lock_file, undid lock",
118 err);
117 } 119 }
118 } 120 }
119 121
120 } else { 122 } else if (err == -ERESTARTSYS) {
121 dout("mds returned error code %d", err); 123 dout("undoing lock\n");
124 ceph_lock_message(CEPH_LOCK_FCNTL, op, file,
125 CEPH_LOCK_UNLOCK, 0, fl);
122 } 126 }
123 return err; 127 return err;
124} 128}
@@ -155,8 +159,11 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
155 file, CEPH_LOCK_UNLOCK, 0, fl); 159 file, CEPH_LOCK_UNLOCK, 0, fl);
156 dout("got %d on flock_lock_file_wait, undid lock", err); 160 dout("got %d on flock_lock_file_wait, undid lock", err);
157 } 161 }
158 } else { 162 } else if (err == -ERESTARTSYS) {
159 dout("mds error code %d", err); 163 dout("undoing lock\n");
164 ceph_lock_message(CEPH_LOCK_FLOCK,
165 CEPH_MDS_OP_SETFILELOCK,
166 file, CEPH_LOCK_UNLOCK, 0, fl);
160 } 167 }
161 return err; 168 return err;
162} 169}
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 24067d68a554..54b14de2e729 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -722,7 +722,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
722 ci = list_first_entry(&mdsc->snap_flush_list, 722 ci = list_first_entry(&mdsc->snap_flush_list,
723 struct ceph_inode_info, i_snap_flush_item); 723 struct ceph_inode_info, i_snap_flush_item);
724 inode = &ci->vfs_inode; 724 inode = &ci->vfs_inode;
725 igrab(inode); 725 ihold(inode);
726 spin_unlock(&mdsc->snap_flush_lock); 726 spin_unlock(&mdsc->snap_flush_lock);
727 spin_lock(&inode->i_lock); 727 spin_lock(&inode->i_lock);
728 __ceph_flush_snaps(ci, &session, 0); 728 __ceph_flush_snaps(ci, &session, 0);
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index f2b628696180..f42d730f1b66 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -665,7 +665,8 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
665 err = PTR_ERR(req); 665 err = PTR_ERR(req);
666 goto out; 666 goto out;
667 } 667 }
668 req->r_inode = igrab(inode); 668 req->r_inode = inode;
669 ihold(inode);
669 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 670 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
670 req->r_num_caps = 1; 671 req->r_num_caps = 1;
671 req->r_args.setxattr.flags = cpu_to_le32(flags); 672 req->r_args.setxattr.flags = cpu_to_le32(flags);
@@ -795,7 +796,8 @@ static int ceph_send_removexattr(struct dentry *dentry, const char *name)
795 USE_AUTH_MDS); 796 USE_AUTH_MDS);
796 if (IS_ERR(req)) 797 if (IS_ERR(req))
797 return PTR_ERR(req); 798 return PTR_ERR(req);
798 req->r_inode = igrab(inode); 799 req->r_inode = inode;
800 ihold(inode);
799 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 801 req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
800 req->r_num_caps = 1; 802 req->r_num_caps = 1;
801 req->r_path2 = kstrdup(name, GFP_NOFS); 803 req->r_path2 = kstrdup(name, GFP_NOFS);
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 1cd4c3a1862d..53ed1ad2c112 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -7,6 +7,7 @@ config CIFS
7 select CRYPTO_MD5 7 select CRYPTO_MD5
8 select CRYPTO_HMAC 8 select CRYPTO_HMAC
9 select CRYPTO_ARC4 9 select CRYPTO_ARC4
10 select CRYPTO_ECB
10 select CRYPTO_DES 11 select CRYPTO_DES
11 help 12 help
12 This is the client VFS module for the Common Internet File System 13 This is the client VFS module for the Common Internet File System
@@ -148,7 +149,7 @@ config CIFS_FSCACHE
148 149
149config CIFS_ACL 150config CIFS_ACL
150 bool "Provide CIFS ACL support (EXPERIMENTAL)" 151 bool "Provide CIFS ACL support (EXPERIMENTAL)"
151 depends on EXPERIMENTAL && CIFS_XATTR 152 depends on EXPERIMENTAL && CIFS_XATTR && KEYS
152 help 153 help
153 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob 154 Allows to fetch CIFS/NTFS ACL from the server. The DACL blob
154 is handed over to the application/caller. 155 is handed over to the application/caller.
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c
index dd8584d35a14..545509c3313b 100644
--- a/fs/cifs/cache.c
+++ b/fs/cifs/cache.c
@@ -92,7 +92,7 @@ static uint16_t cifs_server_get_key(const void *cookie_netfs_data,
92 break; 92 break;
93 93
94 default: 94 default:
95 cERROR(1, "CIFS: Unknown network family '%d'", sa->sa_family); 95 cERROR(1, "Unknown network family '%d'", sa->sa_family);
96 key_len = 0; 96 key_len = 0;
97 break; 97 break;
98 } 98 }
@@ -152,7 +152,7 @@ static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer,
152 152
153 sharename = extract_sharename(tcon->treeName); 153 sharename = extract_sharename(tcon->treeName);
154 if (IS_ERR(sharename)) { 154 if (IS_ERR(sharename)) {
155 cFYI(1, "CIFS: couldn't extract sharename\n"); 155 cFYI(1, "%s: couldn't extract sharename\n", __func__);
156 sharename = NULL; 156 sharename = NULL;
157 return 0; 157 return 0;
158 } 158 }
@@ -302,7 +302,7 @@ static void cifs_fscache_inode_now_uncached(void *cookie_netfs_data)
302 pagevec_init(&pvec, 0); 302 pagevec_init(&pvec, 0);
303 first = 0; 303 first = 0;
304 304
305 cFYI(1, "cifs inode 0x%p now uncached", cifsi); 305 cFYI(1, "%s: cifs inode 0x%p now uncached", __func__, cifsi);
306 306
307 for (;;) { 307 for (;;) {
308 nr_pages = pagevec_lookup(&pvec, 308 nr_pages = pagevec_lookup(&pvec,
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index dfbd9f1f373d..5a0ee7f2af06 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -184,7 +184,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
184 if (cifs_pdu == NULL || server == NULL) 184 if (cifs_pdu == NULL || server == NULL)
185 return -EINVAL; 185 return -EINVAL;
186 186
187 if (cifs_pdu->Command == SMB_COM_NEGOTIATE) 187 if (!server->session_estab)
188 return 0; 188 return 0;
189 189
190 if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) { 190 if (cifs_pdu->Command == SMB_COM_LOCKING_ANDX) {
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 989442dcfb45..2f0c58646c10 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -257,9 +257,6 @@ static int cifs_permission(struct inode *inode, int mask, unsigned int flags)
257{ 257{
258 struct cifs_sb_info *cifs_sb; 258 struct cifs_sb_info *cifs_sb;
259 259
260 if (flags & IPERM_FLAG_RCU)
261 return -ECHILD;
262
263 cifs_sb = CIFS_SB(inode->i_sb); 260 cifs_sb = CIFS_SB(inode->i_sb);
264 261
265 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) { 262 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
@@ -352,6 +349,37 @@ cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
352 } 349 }
353} 350}
354 351
352static void
353cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
354{
355 seq_printf(s, ",sec=");
356
357 switch (server->secType) {
358 case LANMAN:
359 seq_printf(s, "lanman");
360 break;
361 case NTLMv2:
362 seq_printf(s, "ntlmv2");
363 break;
364 case NTLM:
365 seq_printf(s, "ntlm");
366 break;
367 case Kerberos:
368 seq_printf(s, "krb5");
369 break;
370 case RawNTLMSSP:
371 seq_printf(s, "ntlmssp");
372 break;
373 default:
374 /* shouldn't ever happen */
375 seq_printf(s, "unknown");
376 break;
377 }
378
379 if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
380 seq_printf(s, "i");
381}
382
355/* 383/*
356 * cifs_show_options() is for displaying mount options in /proc/mounts. 384 * cifs_show_options() is for displaying mount options in /proc/mounts.
357 * Not all settable options are displayed but most of the important 385 * Not all settable options are displayed but most of the important
@@ -365,6 +393,8 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
365 struct sockaddr *srcaddr; 393 struct sockaddr *srcaddr;
366 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 394 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
367 395
396 cifs_show_security(s, tcon->ses->server);
397
368 seq_printf(s, ",unc=%s", tcon->treeName); 398 seq_printf(s, ",unc=%s", tcon->treeName);
369 399
370 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER) 400 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 64313f778ebf..0900e1658c96 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -129,5 +129,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
129extern const struct export_operations cifs_export_ops; 129extern const struct export_operations cifs_export_ops;
130#endif /* CIFS_NFSD_EXPORT */ 130#endif /* CIFS_NFSD_EXPORT */
131 131
132#define CIFS_VERSION "1.72" 132#define CIFS_VERSION "1.73"
133#endif /* _CIFSFS_H */ 133#endif /* _CIFSFS_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 6d88b82537c3..12cf72dd0c42 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -152,7 +152,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
152 mid_entry->callback(mid_entry); 152 mid_entry->callback(mid_entry);
153 } 153 }
154 154
155 while (server->tcpStatus == CifsNeedReconnect) { 155 do {
156 try_to_freeze(); 156 try_to_freeze();
157 157
158 /* we should try only the port we connected to before */ 158 /* we should try only the port we connected to before */
@@ -167,7 +167,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
167 server->tcpStatus = CifsNeedNegotiate; 167 server->tcpStatus = CifsNeedNegotiate;
168 spin_unlock(&GlobalMid_Lock); 168 spin_unlock(&GlobalMid_Lock);
169 } 169 }
170 } 170 } while (server->tcpStatus == CifsNeedReconnect);
171 171
172 return rc; 172 return rc;
173} 173}
@@ -784,7 +784,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
784 struct smb_vol *vol) 784 struct smb_vol *vol)
785{ 785{
786 char *value, *data, *end; 786 char *value, *data, *end;
787 char *mountdata_copy, *options; 787 char *mountdata_copy = NULL, *options;
788 unsigned int temp_len, i, j; 788 unsigned int temp_len, i, j;
789 char separator[2]; 789 char separator[2];
790 short int override_uid = -1; 790 short int override_uid = -1;
@@ -1391,7 +1391,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1391 "/proc/fs/cifs/LookupCacheEnabled to 0\n"); 1391 "/proc/fs/cifs/LookupCacheEnabled to 0\n");
1392 } else if (strnicmp(data, "fsc", 3) == 0) { 1392 } else if (strnicmp(data, "fsc", 3) == 0) {
1393#ifndef CONFIG_CIFS_FSCACHE 1393#ifndef CONFIG_CIFS_FSCACHE
1394 cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE" 1394 cERROR(1, "FS-Cache support needs CONFIG_CIFS_FSCACHE "
1395 "kernel config option set"); 1395 "kernel config option set");
1396 goto cifs_parse_mount_err; 1396 goto cifs_parse_mount_err;
1397#endif 1397#endif
@@ -1976,7 +1976,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info)
1976 warned_on_ntlm = true; 1976 warned_on_ntlm = true;
1977 cERROR(1, "default security mechanism requested. The default " 1977 cERROR(1, "default security mechanism requested. The default "
1978 "security mechanism will be upgraded from ntlm to " 1978 "security mechanism will be upgraded from ntlm to "
1979 "ntlmv2 in kernel release 2.6.41"); 1979 "ntlmv2 in kernel release 3.1");
1980 } 1980 }
1981 ses->overrideSecFlg = volume_info->secFlg; 1981 ses->overrideSecFlg = volume_info->secFlg;
1982 1982
@@ -2149,7 +2149,10 @@ cifs_put_tlink(struct tcon_link *tlink)
2149} 2149}
2150 2150
2151static inline struct tcon_link * 2151static inline struct tcon_link *
2152cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb); 2152cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
2153{
2154 return cifs_sb->master_tlink;
2155}
2153 2156
2154static int 2157static int
2155compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) 2158compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
@@ -3171,6 +3174,10 @@ out:
3171 return rc; 3174 return rc;
3172} 3175}
3173 3176
3177/*
3178 * Issue a TREE_CONNECT request. Note that for IPC$ shares, that the tcon
3179 * pointer may be NULL.
3180 */
3174int 3181int
3175CIFSTCon(unsigned int xid, struct cifs_ses *ses, 3182CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3176 const char *tree, struct cifs_tcon *tcon, 3183 const char *tree, struct cifs_tcon *tcon,
@@ -3205,7 +3212,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3205 pSMB->AndXCommand = 0xFF; 3212 pSMB->AndXCommand = 0xFF;
3206 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); 3213 pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO);
3207 bcc_ptr = &pSMB->Password[0]; 3214 bcc_ptr = &pSMB->Password[0];
3208 if ((ses->server->sec_mode) & SECMODE_USER) { 3215 if (!tcon || (ses->server->sec_mode & SECMODE_USER)) {
3209 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ 3216 pSMB->PasswordLength = cpu_to_le16(1); /* minimum */
3210 *bcc_ptr = 0; /* password is null byte */ 3217 *bcc_ptr = 0; /* password is null byte */
3211 bcc_ptr++; /* skip password */ 3218 bcc_ptr++; /* skip password */
@@ -3371,7 +3378,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
3371 } 3378 }
3372 if (rc == 0) { 3379 if (rc == 0) {
3373 spin_lock(&GlobalMid_Lock); 3380 spin_lock(&GlobalMid_Lock);
3374 if (server->tcpStatus != CifsExiting) 3381 if (server->tcpStatus == CifsNeedNegotiate)
3375 server->tcpStatus = CifsGood; 3382 server->tcpStatus = CifsGood;
3376 else 3383 else
3377 rc = -EHOSTDOWN; 3384 rc = -EHOSTDOWN;
@@ -3484,12 +3491,6 @@ out:
3484 return tcon; 3491 return tcon;
3485} 3492}
3486 3493
3487static inline struct tcon_link *
3488cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
3489{
3490 return cifs_sb->master_tlink;
3491}
3492
3493struct cifs_tcon * 3494struct cifs_tcon *
3494cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) 3495cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb)
3495{ 3496{
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index d368a47ba5eb..816696621ec9 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -28,14 +28,14 @@ void cifs_fscache_get_client_cookie(struct TCP_Server_Info *server)
28 server->fscache = 28 server->fscache =
29 fscache_acquire_cookie(cifs_fscache_netfs.primary_index, 29 fscache_acquire_cookie(cifs_fscache_netfs.primary_index,
30 &cifs_fscache_server_index_def, server); 30 &cifs_fscache_server_index_def, server);
31 cFYI(1, "CIFS: get client cookie (0x%p/0x%p)", server, 31 cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
32 server->fscache); 32 server->fscache);
33} 33}
34 34
35void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) 35void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server)
36{ 36{
37 cFYI(1, "CIFS: release client cookie (0x%p/0x%p)", server, 37 cFYI(1, "%s: (0x%p/0x%p)", __func__, server,
38 server->fscache); 38 server->fscache);
39 fscache_relinquish_cookie(server->fscache, 0); 39 fscache_relinquish_cookie(server->fscache, 0);
40 server->fscache = NULL; 40 server->fscache = NULL;
41} 41}
@@ -47,13 +47,13 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
47 tcon->fscache = 47 tcon->fscache =
48 fscache_acquire_cookie(server->fscache, 48 fscache_acquire_cookie(server->fscache,
49 &cifs_fscache_super_index_def, tcon); 49 &cifs_fscache_super_index_def, tcon);
50 cFYI(1, "CIFS: get superblock cookie (0x%p/0x%p)", 50 cFYI(1, "%s: (0x%p/0x%p)", __func__, server->fscache,
51 server->fscache, tcon->fscache); 51 tcon->fscache);
52} 52}
53 53
54void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) 54void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon)
55{ 55{
56 cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); 56 cFYI(1, "%s: (0x%p)", __func__, tcon->fscache);
57 fscache_relinquish_cookie(tcon->fscache, 0); 57 fscache_relinquish_cookie(tcon->fscache, 0);
58 tcon->fscache = NULL; 58 tcon->fscache = NULL;
59} 59}
@@ -70,8 +70,8 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode)
70 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) { 70 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) {
71 cifsi->fscache = fscache_acquire_cookie(tcon->fscache, 71 cifsi->fscache = fscache_acquire_cookie(tcon->fscache,
72 &cifs_fscache_inode_object_def, cifsi); 72 &cifs_fscache_inode_object_def, cifsi);
73 cFYI(1, "CIFS: got FH cookie (0x%p/0x%p)", tcon->fscache, 73 cFYI(1, "%s: got FH cookie (0x%p/0x%p)", __func__,
74 cifsi->fscache); 74 tcon->fscache, cifsi->fscache);
75 } 75 }
76} 76}
77 77
@@ -80,8 +80,7 @@ void cifs_fscache_release_inode_cookie(struct inode *inode)
80 struct cifsInodeInfo *cifsi = CIFS_I(inode); 80 struct cifsInodeInfo *cifsi = CIFS_I(inode);
81 81
82 if (cifsi->fscache) { 82 if (cifsi->fscache) {
83 cFYI(1, "CIFS releasing inode cookie (0x%p)", 83 cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
84 cifsi->fscache);
85 fscache_relinquish_cookie(cifsi->fscache, 0); 84 fscache_relinquish_cookie(cifsi->fscache, 0);
86 cifsi->fscache = NULL; 85 cifsi->fscache = NULL;
87 } 86 }
@@ -92,8 +91,7 @@ static void cifs_fscache_disable_inode_cookie(struct inode *inode)
92 struct cifsInodeInfo *cifsi = CIFS_I(inode); 91 struct cifsInodeInfo *cifsi = CIFS_I(inode);
93 92
94 if (cifsi->fscache) { 93 if (cifsi->fscache) {
95 cFYI(1, "CIFS disabling inode cookie (0x%p)", 94 cFYI(1, "%s: (0x%p)", __func__, cifsi->fscache);
96 cifsi->fscache);
97 fscache_relinquish_cookie(cifsi->fscache, 1); 95 fscache_relinquish_cookie(cifsi->fscache, 1);
98 cifsi->fscache = NULL; 96 cifsi->fscache = NULL;
99 } 97 }
@@ -121,8 +119,8 @@ void cifs_fscache_reset_inode_cookie(struct inode *inode)
121 cifs_sb_master_tcon(cifs_sb)->fscache, 119 cifs_sb_master_tcon(cifs_sb)->fscache,
122 &cifs_fscache_inode_object_def, 120 &cifs_fscache_inode_object_def,
123 cifsi); 121 cifsi);
124 cFYI(1, "CIFS: new cookie 0x%p oldcookie 0x%p", 122 cFYI(1, "%s: new cookie 0x%p oldcookie 0x%p",
125 cifsi->fscache, old); 123 __func__, cifsi->fscache, old);
126 } 124 }
127} 125}
128 126
@@ -132,8 +130,8 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
132 struct inode *inode = page->mapping->host; 130 struct inode *inode = page->mapping->host;
133 struct cifsInodeInfo *cifsi = CIFS_I(inode); 131 struct cifsInodeInfo *cifsi = CIFS_I(inode);
134 132
135 cFYI(1, "CIFS: fscache release page (0x%p/0x%p)", 133 cFYI(1, "%s: (0x%p/0x%p)", __func__, page,
136 page, cifsi->fscache); 134 cifsi->fscache);
137 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) 135 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp))
138 return 0; 136 return 0;
139 } 137 }
@@ -144,8 +142,7 @@ int cifs_fscache_release_page(struct page *page, gfp_t gfp)
144static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx, 142static void cifs_readpage_from_fscache_complete(struct page *page, void *ctx,
145 int error) 143 int error)
146{ 144{
147 cFYI(1, "CFS: readpage_from_fscache_complete (0x%p/%d)", 145 cFYI(1, "%s: (0x%p/%d)", __func__, page, error);
148 page, error);
149 if (!error) 146 if (!error)
150 SetPageUptodate(page); 147 SetPageUptodate(page);
151 unlock_page(page); 148 unlock_page(page);
@@ -158,7 +155,7 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
158{ 155{
159 int ret; 156 int ret;
160 157
161 cFYI(1, "CIFS: readpage_from_fscache(fsc:%p, p:%p, i:0x%p", 158 cFYI(1, "%s: (fsc:%p, p:%p, i:0x%p", __func__,
162 CIFS_I(inode)->fscache, page, inode); 159 CIFS_I(inode)->fscache, page, inode);
163 ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page, 160 ret = fscache_read_or_alloc_page(CIFS_I(inode)->fscache, page,
164 cifs_readpage_from_fscache_complete, 161 cifs_readpage_from_fscache_complete,
@@ -167,11 +164,11 @@ int __cifs_readpage_from_fscache(struct inode *inode, struct page *page)
167 switch (ret) { 164 switch (ret) {
168 165
169 case 0: /* page found in fscache, read submitted */ 166 case 0: /* page found in fscache, read submitted */
170 cFYI(1, "CIFS: readpage_from_fscache: submitted"); 167 cFYI(1, "%s: submitted", __func__);
171 return ret; 168 return ret;
172 case -ENOBUFS: /* page won't be cached */ 169 case -ENOBUFS: /* page won't be cached */
173 case -ENODATA: /* page not in cache */ 170 case -ENODATA: /* page not in cache */
174 cFYI(1, "CIFS: readpage_from_fscache %d", ret); 171 cFYI(1, "%s: %d", __func__, ret);
175 return 1; 172 return 1;
176 173
177 default: 174 default:
@@ -190,7 +187,7 @@ int __cifs_readpages_from_fscache(struct inode *inode,
190{ 187{
191 int ret; 188 int ret;
192 189
193 cFYI(1, "CIFS: __cifs_readpages_from_fscache (0x%p/%u/0x%p)", 190 cFYI(1, "%s: (0x%p/%u/0x%p)", __func__,
194 CIFS_I(inode)->fscache, *nr_pages, inode); 191 CIFS_I(inode)->fscache, *nr_pages, inode);
195 ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping, 192 ret = fscache_read_or_alloc_pages(CIFS_I(inode)->fscache, mapping,
196 pages, nr_pages, 193 pages, nr_pages,
@@ -199,12 +196,12 @@ int __cifs_readpages_from_fscache(struct inode *inode,
199 mapping_gfp_mask(mapping)); 196 mapping_gfp_mask(mapping));
200 switch (ret) { 197 switch (ret) {
201 case 0: /* read submitted to the cache for all pages */ 198 case 0: /* read submitted to the cache for all pages */
202 cFYI(1, "CIFS: readpages_from_fscache: submitted"); 199 cFYI(1, "%s: submitted", __func__);
203 return ret; 200 return ret;
204 201
205 case -ENOBUFS: /* some pages are not cached and can't be */ 202 case -ENOBUFS: /* some pages are not cached and can't be */
206 case -ENODATA: /* some pages are not cached */ 203 case -ENODATA: /* some pages are not cached */
207 cFYI(1, "CIFS: readpages_from_fscache: no page"); 204 cFYI(1, "%s: no page", __func__);
208 return 1; 205 return 1;
209 206
210 default: 207 default:
@@ -218,7 +215,7 @@ void __cifs_readpage_to_fscache(struct inode *inode, struct page *page)
218{ 215{
219 int ret; 216 int ret;
220 217
221 cFYI(1, "CIFS: readpage_to_fscache(fsc: %p, p: %p, i: %p", 218 cFYI(1, "%s: (fsc: %p, p: %p, i: %p)", __func__,
222 CIFS_I(inode)->fscache, page, inode); 219 CIFS_I(inode)->fscache, page, inode);
223 ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL); 220 ret = fscache_write_page(CIFS_I(inode)->fscache, page, GFP_KERNEL);
224 if (ret != 0) 221 if (ret != 0)
@@ -230,7 +227,7 @@ void __cifs_fscache_invalidate_page(struct page *page, struct inode *inode)
230 struct cifsInodeInfo *cifsi = CIFS_I(inode); 227 struct cifsInodeInfo *cifsi = CIFS_I(inode);
231 struct fscache_cookie *cookie = cifsi->fscache; 228 struct fscache_cookie *cookie = cifsi->fscache;
232 229
233 cFYI(1, "CIFS: fscache invalidatepage (0x%p/0x%p)", page, cookie); 230 cFYI(1, "%s: (0x%p/0x%p)", __func__, page, cookie);
234 fscache_wait_on_page_write(cookie, page); 231 fscache_wait_on_page_write(cookie, page);
235 fscache_uncache_page(cookie, page); 232 fscache_uncache_page(cookie, page);
236} 233}
diff --git a/fs/coda/pioctl.c b/fs/coda/pioctl.c
index 6cbb3afb36dc..cb140ef293e4 100644
--- a/fs/coda/pioctl.c
+++ b/fs/coda/pioctl.c
@@ -43,8 +43,6 @@ const struct file_operations coda_ioctl_operations = {
43/* the coda pioctl inode ops */ 43/* the coda pioctl inode ops */
44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags) 44static int coda_ioctl_permission(struct inode *inode, int mask, unsigned int flags)
45{ 45{
46 if (flags & IPERM_FLAG_RCU)
47 return -ECHILD;
48 return (mask & MAY_EXEC) ? -EACCES : 0; 46 return (mask & MAY_EXEC) ? -EACCES : 0;
49} 47}
50 48
diff --git a/fs/dcookies.c b/fs/dcookies.c
index a21cabdbd87b..dda0dc702d1b 100644
--- a/fs/dcookies.c
+++ b/fs/dcookies.c
@@ -178,6 +178,8 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
178 /* FIXME: (deleted) ? */ 178 /* FIXME: (deleted) ? */
179 path = d_path(&dcs->path, kbuf, PAGE_SIZE); 179 path = d_path(&dcs->path, kbuf, PAGE_SIZE);
180 180
181 mutex_unlock(&dcookie_mutex);
182
181 if (IS_ERR(path)) { 183 if (IS_ERR(path)) {
182 err = PTR_ERR(path); 184 err = PTR_ERR(path);
183 goto out_free; 185 goto out_free;
@@ -194,6 +196,7 @@ SYSCALL_DEFINE(lookup_dcookie)(u64 cookie64, char __user * buf, size_t len)
194 196
195out_free: 197out_free:
196 kfree(kbuf); 198 kfree(kbuf);
199 return err;
197out: 200out:
198 mutex_unlock(&dcookie_mutex); 201 mutex_unlock(&dcookie_mutex);
199 return err; 202 return err;
diff --git a/fs/exec.c b/fs/exec.c
index ea5f748906a8..6075a1e727ae 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1093,6 +1093,7 @@ int flush_old_exec(struct linux_binprm * bprm)
1093 1093
1094 bprm->mm = NULL; /* We're using it now */ 1094 bprm->mm = NULL; /* We're using it now */
1095 1095
1096 set_fs(USER_DS);
1096 current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD); 1097 current->flags &= ~(PF_RANDOMIZE | PF_KTHREAD);
1097 flush_thread(); 1098 flush_thread();
1098 current->personality &= ~bprm->per_clear; 1099 current->personality &= ~bprm->per_clear;
@@ -1357,10 +1358,6 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
1357 if (retval) 1358 if (retval)
1358 return retval; 1359 return retval;
1359 1360
1360 /* kernel module loader fixup */
1361 /* so we don't try to load run modprobe in kernel space. */
1362 set_fs(USER_DS);
1363
1364 retval = audit_bprm(bprm); 1361 retval = audit_bprm(bprm);
1365 if (retval) 1362 if (retval)
1366 return retval; 1363 return retval;
@@ -1999,7 +1996,7 @@ static void wait_for_dump_helpers(struct file *file)
1999 * is a special value that we use to trap recursive 1996 * is a special value that we use to trap recursive
2000 * core dumps 1997 * core dumps
2001 */ 1998 */
2002static int umh_pipe_setup(struct subprocess_info *info) 1999static int umh_pipe_setup(struct subprocess_info *info, struct cred *new)
2003{ 2000{
2004 struct file *rp, *wp; 2001 struct file *rp, *wp;
2005 struct fdtable *fdt; 2002 struct fdtable *fdt;
diff --git a/fs/fat/file.c b/fs/fat/file.c
index 7257752b6d5d..7018e1d8902d 100644
--- a/fs/fat/file.c
+++ b/fs/fat/file.c
@@ -102,7 +102,7 @@ static int fat_ioctl_set_attributes(struct file *file, u32 __user *user_attr)
102 if (attr & ATTR_SYS) 102 if (attr & ATTR_SYS)
103 inode->i_flags |= S_IMMUTABLE; 103 inode->i_flags |= S_IMMUTABLE;
104 else 104 else
105 inode->i_flags &= S_IMMUTABLE; 105 inode->i_flags &= ~S_IMMUTABLE;
106 } 106 }
107 107
108 fat_save_attrs(inode, attr); 108 fat_save_attrs(inode, attr);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index cc6ec4b2f0ff..38f84cd48b67 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -921,6 +921,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
921 if (sb->s_flags & MS_MANDLOCK) 921 if (sb->s_flags & MS_MANDLOCK)
922 goto err; 922 goto err;
923 923
924 sb->s_flags &= ~MS_NOSEC;
925
924 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 926 if (!parse_fuse_opt((char *) data, &d, is_bdev))
925 goto err; 927 goto err;
926 928
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 2792a790e50b..1c1336e7b3b2 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -663,14 +663,19 @@ static void glock_work_func(struct work_struct *work)
663 drop_ref = 1; 663 drop_ref = 1;
664 } 664 }
665 spin_lock(&gl->gl_spin); 665 spin_lock(&gl->gl_spin);
666 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) && 666 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
667 gl->gl_state != LM_ST_UNLOCKED && 667 gl->gl_state != LM_ST_UNLOCKED &&
668 gl->gl_demote_state != LM_ST_EXCLUSIVE) { 668 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
669 unsigned long holdtime, now = jiffies; 669 unsigned long holdtime, now = jiffies;
670
670 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time; 671 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
671 if (time_before(now, holdtime)) 672 if (time_before(now, holdtime))
672 delay = holdtime - now; 673 delay = holdtime - now;
673 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags); 674
675 if (!delay) {
676 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
677 set_bit(GLF_DEMOTE, &gl->gl_flags);
678 }
674 } 679 }
675 run_queue(gl, 0); 680 run_queue(gl, 0);
676 spin_unlock(&gl->gl_spin); 681 spin_unlock(&gl->gl_spin);
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 3db5ba4568fc..b3cc8586984e 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -974,7 +974,7 @@ out_no_inode:
974out_no_read: 974out_no_read:
975 printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n", 975 printk(KERN_WARNING "%s: bread failed, dev=%s, iso_blknum=%d, block=%d\n",
976 __func__, s->s_id, iso_blknum, block); 976 __func__, s->s_id, iso_blknum, block);
977 goto out_freesbi; 977 goto out_freebh;
978out_bad_zone_size: 978out_bad_zone_size:
979 printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n", 979 printk(KERN_WARNING "ISOFS: Bad logical zone size %ld\n",
980 sbi->s_log_zone_size); 980 sbi->s_log_zone_size);
@@ -989,6 +989,7 @@ out_unknown_format:
989 989
990out_freebh: 990out_freebh:
991 brelse(bh); 991 brelse(bh);
992 brelse(pri_bh);
992out_freesbi: 993out_freesbi:
993 kfree(opt.iocharset); 994 kfree(opt.iocharset);
994 kfree(sbi); 995 kfree(sbi);
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 278e3fb40b71..583636f745e5 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1123,7 +1123,7 @@ int lmLogOpen(struct super_block *sb)
1123 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 1123 bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL,
1124 log); 1124 log);
1125 if (IS_ERR(bdev)) { 1125 if (IS_ERR(bdev)) {
1126 rc = -PTR_ERR(bdev); 1126 rc = PTR_ERR(bdev);
1127 goto free; 1127 goto free;
1128 } 1128 }
1129 1129
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 9ed89d1663f8..1afae26cf236 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -555,13 +555,6 @@ static int logfs_symlink(struct inode *dir, struct dentry *dentry,
555 return __logfs_create(dir, dentry, inode, target, destlen); 555 return __logfs_create(dir, dentry, inode, target, destlen);
556} 556}
557 557
558static int logfs_permission(struct inode *inode, int mask, unsigned int flags)
559{
560 if (flags & IPERM_FLAG_RCU)
561 return -ECHILD;
562 return generic_permission(inode, mask, flags, NULL);
563}
564
565static int logfs_link(struct dentry *old_dentry, struct inode *dir, 558static int logfs_link(struct dentry *old_dentry, struct inode *dir,
566 struct dentry *dentry) 559 struct dentry *dentry)
567{ 560{
@@ -820,7 +813,6 @@ const struct inode_operations logfs_dir_iops = {
820 .mknod = logfs_mknod, 813 .mknod = logfs_mknod,
821 .rename = logfs_rename, 814 .rename = logfs_rename,
822 .rmdir = logfs_rmdir, 815 .rmdir = logfs_rmdir,
823 .permission = logfs_permission,
824 .symlink = logfs_symlink, 816 .symlink = logfs_symlink,
825 .unlink = logfs_unlink, 817 .unlink = logfs_unlink,
826}; 818};
diff --git a/fs/namei.c b/fs/namei.c
index e2e4e8d032ee..0223c41fb114 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -238,7 +238,8 @@ int generic_permission(struct inode *inode, int mask, unsigned int flags,
238 238
239 /* 239 /*
240 * Read/write DACs are always overridable. 240 * Read/write DACs are always overridable.
241 * Executable DACs are overridable if at least one exec bit is set. 241 * Executable DACs are overridable for all directories and
242 * for non-directories that have least one exec bit set.
242 */ 243 */
243 if (!(mask & MAY_EXEC) || execute_ok(inode)) 244 if (!(mask & MAY_EXEC) || execute_ok(inode))
244 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE)) 245 if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
@@ -812,6 +813,11 @@ static int follow_automount(struct path *path, unsigned flags,
812 if (!mnt) /* mount collision */ 813 if (!mnt) /* mount collision */
813 return 0; 814 return 0;
814 815
816 if (!*need_mntput) {
817 /* lock_mount() may release path->mnt on error */
818 mntget(path->mnt);
819 *need_mntput = true;
820 }
815 err = finish_automount(mnt, path); 821 err = finish_automount(mnt, path);
816 822
817 switch (err) { 823 switch (err) {
@@ -819,12 +825,9 @@ static int follow_automount(struct path *path, unsigned flags,
819 /* Someone else made a mount here whilst we were busy */ 825 /* Someone else made a mount here whilst we were busy */
820 return 0; 826 return 0;
821 case 0: 827 case 0:
822 dput(path->dentry); 828 path_put(path);
823 if (*need_mntput)
824 mntput(path->mnt);
825 path->mnt = mnt; 829 path->mnt = mnt;
826 path->dentry = dget(mnt->mnt_root); 830 path->dentry = dget(mnt->mnt_root);
827 *need_mntput = true;
828 return 0; 831 return 0;
829 default: 832 default:
830 return err; 833 return err;
@@ -844,9 +847,10 @@ static int follow_automount(struct path *path, unsigned flags,
844 */ 847 */
845static int follow_managed(struct path *path, unsigned flags) 848static int follow_managed(struct path *path, unsigned flags)
846{ 849{
850 struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
847 unsigned managed; 851 unsigned managed;
848 bool need_mntput = false; 852 bool need_mntput = false;
849 int ret; 853 int ret = 0;
850 854
851 /* Given that we're not holding a lock here, we retain the value in a 855 /* Given that we're not holding a lock here, we retain the value in a
852 * local variable for each dentry as we look at it so that we don't see 856 * local variable for each dentry as we look at it so that we don't see
@@ -861,7 +865,7 @@ static int follow_managed(struct path *path, unsigned flags)
861 BUG_ON(!path->dentry->d_op->d_manage); 865 BUG_ON(!path->dentry->d_op->d_manage);
862 ret = path->dentry->d_op->d_manage(path->dentry, false); 866 ret = path->dentry->d_op->d_manage(path->dentry, false);
863 if (ret < 0) 867 if (ret < 0)
864 return ret == -EISDIR ? 0 : ret; 868 break;
865 } 869 }
866 870
867 /* Transit to a mounted filesystem. */ 871 /* Transit to a mounted filesystem. */
@@ -887,14 +891,19 @@ static int follow_managed(struct path *path, unsigned flags)
887 if (managed & DCACHE_NEED_AUTOMOUNT) { 891 if (managed & DCACHE_NEED_AUTOMOUNT) {
888 ret = follow_automount(path, flags, &need_mntput); 892 ret = follow_automount(path, flags, &need_mntput);
889 if (ret < 0) 893 if (ret < 0)
890 return ret == -EISDIR ? 0 : ret; 894 break;
891 continue; 895 continue;
892 } 896 }
893 897
894 /* We didn't change the current path point */ 898 /* We didn't change the current path point */
895 break; 899 break;
896 } 900 }
897 return 0; 901
902 if (need_mntput && path->mnt == mnt)
903 mntput(path->mnt);
904 if (ret == -EISDIR)
905 ret = 0;
906 return ret;
898} 907}
899 908
900int follow_down_one(struct path *path) 909int follow_down_one(struct path *path)
@@ -1003,9 +1012,6 @@ failed:
1003 * Follow down to the covering mount currently visible to userspace. At each 1012 * Follow down to the covering mount currently visible to userspace. At each
1004 * point, the filesystem owning that dentry may be queried as to whether the 1013 * point, the filesystem owning that dentry may be queried as to whether the
1005 * caller is permitted to proceed or not. 1014 * caller is permitted to proceed or not.
1006 *
1007 * Care must be taken as namespace_sem may be held (indicated by mounting_here
1008 * being true).
1009 */ 1015 */
1010int follow_down(struct path *path) 1016int follow_down(struct path *path)
1011{ 1017{
@@ -2624,6 +2630,10 @@ static long do_rmdir(int dfd, const char __user *pathname)
2624 error = PTR_ERR(dentry); 2630 error = PTR_ERR(dentry);
2625 if (IS_ERR(dentry)) 2631 if (IS_ERR(dentry))
2626 goto exit2; 2632 goto exit2;
2633 if (!dentry->d_inode) {
2634 error = -ENOENT;
2635 goto exit3;
2636 }
2627 error = mnt_want_write(nd.path.mnt); 2637 error = mnt_want_write(nd.path.mnt);
2628 if (error) 2638 if (error)
2629 goto exit3; 2639 goto exit3;
@@ -2712,8 +2722,9 @@ static long do_unlinkat(int dfd, const char __user *pathname)
2712 if (nd.last.name[nd.last.len]) 2722 if (nd.last.name[nd.last.len])
2713 goto slashes; 2723 goto slashes;
2714 inode = dentry->d_inode; 2724 inode = dentry->d_inode;
2715 if (inode) 2725 if (!inode)
2716 ihold(inode); 2726 goto slashes;
2727 ihold(inode);
2717 error = mnt_want_write(nd.path.mnt); 2728 error = mnt_want_write(nd.path.mnt);
2718 if (error) 2729 if (error)
2719 goto exit2; 2730 goto exit2;
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
index 18b3e8975fe0..fbb2a5ef5817 100644
--- a/fs/nfsd/Kconfig
+++ b/fs/nfsd/Kconfig
@@ -82,6 +82,7 @@ config NFSD_V4
82 select NFSD_V3 82 select NFSD_V3
83 select FS_POSIX_ACL 83 select FS_POSIX_ACL
84 select SUNRPC_GSS 84 select SUNRPC_GSS
85 select CRYPTO
85 help 86 help
86 This option enables support in your system's NFS server for 87 This option enables support in your system's NFS server for
87 version 4 of the NFS protocol (RFC 3530). 88 version 4 of the NFS protocol (RFC 3530).
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 1f5eae40f34e..2b1449dd2f49 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -13,6 +13,7 @@
13#include <linux/lockd/lockd.h> 13#include <linux/lockd/lockd.h>
14#include <linux/sunrpc/clnt.h> 14#include <linux/sunrpc/clnt.h>
15#include <linux/sunrpc/gss_api.h> 15#include <linux/sunrpc/gss_api.h>
16#include <linux/sunrpc/gss_krb5_enctypes.h>
16 17
17#include "idmap.h" 18#include "idmap.h"
18#include "nfsd.h" 19#include "nfsd.h"
@@ -189,18 +190,10 @@ static struct file_operations export_features_operations = {
189 .release = single_release, 190 .release = single_release,
190}; 191};
191 192
192#ifdef CONFIG_SUNRPC_GSS 193#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
193static int supported_enctypes_show(struct seq_file *m, void *v) 194static int supported_enctypes_show(struct seq_file *m, void *v)
194{ 195{
195 struct gss_api_mech *k5mech; 196 seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
196
197 k5mech = gss_mech_get_by_name("krb5");
198 if (k5mech == NULL)
199 goto out;
200 if (k5mech->gm_upcall_enctypes != NULL)
201 seq_printf(m, k5mech->gm_upcall_enctypes);
202 gss_mech_put(k5mech);
203out:
204 return 0; 197 return 0;
205} 198}
206 199
@@ -215,7 +208,7 @@ static struct file_operations supported_enctypes_ops = {
215 .llseek = seq_lseek, 208 .llseek = seq_lseek,
216 .release = single_release, 209 .release = single_release,
217}; 210};
218#endif /* CONFIG_SUNRPC_GSS */ 211#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
219 212
220extern int nfsd_pool_stats_open(struct inode *inode, struct file *file); 213extern int nfsd_pool_stats_open(struct inode *inode, struct file *file);
221extern int nfsd_pool_stats_release(struct inode *inode, struct file *file); 214extern int nfsd_pool_stats_release(struct inode *inode, struct file *file);
@@ -1427,9 +1420,9 @@ static int nfsd_fill_super(struct super_block * sb, void * data, int silent)
1427 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR}, 1420 [NFSD_Versions] = {"versions", &transaction_ops, S_IWUSR|S_IRUSR},
1428 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO}, 1421 [NFSD_Ports] = {"portlist", &transaction_ops, S_IWUSR|S_IRUGO},
1429 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO}, 1422 [NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
1430#ifdef CONFIG_SUNRPC_GSS 1423#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
1431 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO}, 1424 [NFSD_SupportedEnctypes] = {"supported_krb5_enctypes", &supported_enctypes_ops, S_IRUGO},
1432#endif /* CONFIG_SUNRPC_GSS */ 1425#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
1433#ifdef CONFIG_NFSD_V4 1426#ifdef CONFIG_NFSD_V4
1434 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1427 [NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
1435 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR}, 1428 [NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index d5718273bb32..fd0acca5370a 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -696,7 +696,15 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
696} 696}
697#endif /* CONFIG_NFSD_V3 */ 697#endif /* CONFIG_NFSD_V3 */
698 698
699static int nfsd_open_break_lease(struct inode *inode, int access)
700{
701 unsigned int mode;
699 702
703 if (access & NFSD_MAY_NOT_BREAK_LEASE)
704 return 0;
705 mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
706 return break_lease(inode, mode | O_NONBLOCK);
707}
700 708
701/* 709/*
702 * Open an existing file or directory. 710 * Open an existing file or directory.
@@ -744,12 +752,7 @@ nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
744 if (!inode->i_fop) 752 if (!inode->i_fop)
745 goto out; 753 goto out;
746 754
747 /* 755 host_err = nfsd_open_break_lease(inode, access);
748 * Check to see if there are any leases on this file.
749 * This may block while leases are broken.
750 */
751 if (!(access & NFSD_MAY_NOT_BREAK_LEASE))
752 host_err = break_lease(inode, O_NONBLOCK | ((access & NFSD_MAY_WRITE) ? O_WRONLY : 0));
753 if (host_err) /* NOMEM or WOULDBLOCK */ 756 if (host_err) /* NOMEM or WOULDBLOCK */
754 goto out_nfserr; 757 goto out_nfserr;
755 758
@@ -1660,8 +1663,10 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
1660 if (!dold->d_inode) 1663 if (!dold->d_inode)
1661 goto out_drop_write; 1664 goto out_drop_write;
1662 host_err = nfsd_break_lease(dold->d_inode); 1665 host_err = nfsd_break_lease(dold->d_inode);
1663 if (host_err) 1666 if (host_err) {
1667 err = nfserrno(host_err);
1664 goto out_drop_write; 1668 goto out_drop_write;
1669 }
1665 host_err = vfs_link(dold, dirp, dnew); 1670 host_err = vfs_link(dold, dirp, dnew);
1666 if (!host_err) { 1671 if (!host_err) {
1667 err = nfserrno(commit_metadata(ffhp)); 1672 err = nfserrno(commit_metadata(ffhp));
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 7eafe468a29c..b2e3ff347620 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -1346,6 +1346,11 @@ static void nilfs_btree_shrink(struct nilfs_bmap *btree,
1346 path[level].bp_bh = NULL; 1346 path[level].bp_bh = NULL;
1347} 1347}
1348 1348
1349static void nilfs_btree_nop(struct nilfs_bmap *btree,
1350 struct nilfs_btree_path *path,
1351 int level, __u64 *keyp, __u64 *ptrp)
1352{
1353}
1349 1354
1350static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree, 1355static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1351 struct nilfs_btree_path *path, 1356 struct nilfs_btree_path *path,
@@ -1356,20 +1361,19 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1356 struct buffer_head *bh; 1361 struct buffer_head *bh;
1357 struct nilfs_btree_node *node, *parent, *sib; 1362 struct nilfs_btree_node *node, *parent, *sib;
1358 __u64 sibptr; 1363 __u64 sibptr;
1359 int pindex, level, ncmin, ncmax, ncblk, ret; 1364 int pindex, dindex, level, ncmin, ncmax, ncblk, ret;
1360 1365
1361 ret = 0; 1366 ret = 0;
1362 stats->bs_nblocks = 0; 1367 stats->bs_nblocks = 0;
1363 ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree)); 1368 ncmin = NILFS_BTREE_NODE_NCHILDREN_MIN(nilfs_btree_node_size(btree));
1364 ncblk = nilfs_btree_nchildren_per_block(btree); 1369 ncblk = nilfs_btree_nchildren_per_block(btree);
1365 1370
1366 for (level = NILFS_BTREE_LEVEL_NODE_MIN; 1371 for (level = NILFS_BTREE_LEVEL_NODE_MIN, dindex = path[level].bp_index;
1367 level < nilfs_btree_height(btree) - 1; 1372 level < nilfs_btree_height(btree) - 1;
1368 level++) { 1373 level++) {
1369 node = nilfs_btree_get_nonroot_node(path, level); 1374 node = nilfs_btree_get_nonroot_node(path, level);
1370 path[level].bp_oldreq.bpr_ptr = 1375 path[level].bp_oldreq.bpr_ptr =
1371 nilfs_btree_node_get_ptr(node, path[level].bp_index, 1376 nilfs_btree_node_get_ptr(node, dindex, ncblk);
1372 ncblk);
1373 ret = nilfs_bmap_prepare_end_ptr(btree, 1377 ret = nilfs_bmap_prepare_end_ptr(btree,
1374 &path[level].bp_oldreq, dat); 1378 &path[level].bp_oldreq, dat);
1375 if (ret < 0) 1379 if (ret < 0)
@@ -1383,6 +1387,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1383 1387
1384 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax); 1388 parent = nilfs_btree_get_node(btree, path, level + 1, &ncmax);
1385 pindex = path[level + 1].bp_index; 1389 pindex = path[level + 1].bp_index;
1390 dindex = pindex;
1386 1391
1387 if (pindex > 0) { 1392 if (pindex > 0) {
1388 /* left sibling */ 1393 /* left sibling */
@@ -1421,6 +1426,14 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1421 path[level].bp_sib_bh = bh; 1426 path[level].bp_sib_bh = bh;
1422 path[level].bp_op = nilfs_btree_concat_right; 1427 path[level].bp_op = nilfs_btree_concat_right;
1423 stats->bs_nblocks++; 1428 stats->bs_nblocks++;
1429 /*
1430 * When merging right sibling node
1431 * into the current node, pointer to
1432 * the right sibling node must be
1433 * terminated instead. The adjustment
1434 * below is required for that.
1435 */
1436 dindex = pindex + 1;
1424 /* continue; */ 1437 /* continue; */
1425 } 1438 }
1426 } else { 1439 } else {
@@ -1431,29 +1444,31 @@ static int nilfs_btree_prepare_delete(struct nilfs_bmap *btree,
1431 NILFS_BTREE_ROOT_NCHILDREN_MAX) { 1444 NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1432 path[level].bp_op = nilfs_btree_shrink; 1445 path[level].bp_op = nilfs_btree_shrink;
1433 stats->bs_nblocks += 2; 1446 stats->bs_nblocks += 2;
1447 level++;
1448 path[level].bp_op = nilfs_btree_nop;
1449 goto shrink_root_child;
1434 } else { 1450 } else {
1435 path[level].bp_op = nilfs_btree_do_delete; 1451 path[level].bp_op = nilfs_btree_do_delete;
1436 stats->bs_nblocks++; 1452 stats->bs_nblocks++;
1453 goto out;
1437 } 1454 }
1438
1439 goto out;
1440
1441 } 1455 }
1442 } 1456 }
1443 1457
1458 /* child of the root node is deleted */
1459 path[level].bp_op = nilfs_btree_do_delete;
1460 stats->bs_nblocks++;
1461
1462shrink_root_child:
1444 node = nilfs_btree_get_root(btree); 1463 node = nilfs_btree_get_root(btree);
1445 path[level].bp_oldreq.bpr_ptr = 1464 path[level].bp_oldreq.bpr_ptr =
1446 nilfs_btree_node_get_ptr(node, path[level].bp_index, 1465 nilfs_btree_node_get_ptr(node, dindex,
1447 NILFS_BTREE_ROOT_NCHILDREN_MAX); 1466 NILFS_BTREE_ROOT_NCHILDREN_MAX);
1448 1467
1449 ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat); 1468 ret = nilfs_bmap_prepare_end_ptr(btree, &path[level].bp_oldreq, dat);
1450 if (ret < 0) 1469 if (ret < 0)
1451 goto err_out_child_node; 1470 goto err_out_child_node;
1452 1471
1453 /* child of the root node is deleted */
1454 path[level].bp_op = nilfs_btree_do_delete;
1455 stats->bs_nblocks++;
1456
1457 /* success */ 1472 /* success */
1458 out: 1473 out:
1459 *levelp = level; 1474 *levelp = level;
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index b954878ad6ce..b9b45fc2903e 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -801,12 +801,7 @@ out_err:
801 801
802int nilfs_permission(struct inode *inode, int mask, unsigned int flags) 802int nilfs_permission(struct inode *inode, int mask, unsigned int flags)
803{ 803{
804 struct nilfs_root *root; 804 struct nilfs_root *root = NILFS_I(inode)->i_root;
805
806 if (flags & IPERM_FLAG_RCU)
807 return -ECHILD;
808
809 root = NILFS_I(inode)->i_root;
810 if ((mask & MAY_WRITE) && root && 805 if ((mask & MAY_WRITE) && root &&
811 root->cno != NILFS_CPTREE_CURRENT_CNO) 806 root->cno != NILFS_CPTREE_CURRENT_CNO)
812 return -EROFS; /* snapshot is not writable */ 807 return -EROFS; /* snapshot is not writable */
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 141646e88fb5..bb24ab6c282f 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2573,7 +2573,7 @@ static struct nilfs_sc_info *nilfs_segctor_new(struct super_block *sb,
2573 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK; 2573 sci->sc_watermark = NILFS_SC_DEFAULT_WATERMARK;
2574 2574
2575 if (nilfs->ns_interval) 2575 if (nilfs->ns_interval)
2576 sci->sc_interval = nilfs->ns_interval; 2576 sci->sc_interval = HZ * nilfs->ns_interval;
2577 if (nilfs->ns_watermark) 2577 if (nilfs->ns_watermark)
2578 sci->sc_watermark = nilfs->ns_watermark; 2578 sci->sc_watermark = nilfs->ns_watermark;
2579 return sci; 2579 return sci;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index cdbaf5e97308..56f61027236b 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1072,7 +1072,7 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
1072 1072
1073 sb->s_magic = OCFS2_SUPER_MAGIC; 1073 sb->s_magic = OCFS2_SUPER_MAGIC;
1074 1074
1075 sb->s_flags = (sb->s_flags & ~MS_POSIXACL) | 1075 sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
1076 ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0); 1076 ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
1077 1077
1078 /* Hard readonly mode only if: bdev_read_only, MS_RDONLY, 1078 /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 14def991d9dd..8a84210ca080 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2169,11 +2169,7 @@ static const struct file_operations proc_fd_operations = {
2169 */ 2169 */
2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags) 2170static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
2171{ 2171{
2172 int rv; 2172 int rv = generic_permission(inode, mask, flags, NULL);
2173
2174 if (flags & IPERM_FLAG_RCU)
2175 return -ECHILD;
2176 rv = generic_permission(inode, mask, flags, NULL);
2177 if (rv == 0) 2173 if (rv == 0)
2178 return 0; 2174 return 0;
2179 if (task_pid(current) == proc_pid(inode)) 2175 if (task_pid(current) == proc_pid(inode))
diff --git a/fs/proc/namespaces.c b/fs/proc/namespaces.c
index 781dec5bd682..be177f702acb 100644
--- a/fs/proc/namespaces.c
+++ b/fs/proc/namespaces.c
@@ -38,18 +38,21 @@ static struct dentry *proc_ns_instantiate(struct inode *dir,
38 struct inode *inode; 38 struct inode *inode;
39 struct proc_inode *ei; 39 struct proc_inode *ei;
40 struct dentry *error = ERR_PTR(-ENOENT); 40 struct dentry *error = ERR_PTR(-ENOENT);
41 void *ns;
41 42
42 inode = proc_pid_make_inode(dir->i_sb, task); 43 inode = proc_pid_make_inode(dir->i_sb, task);
43 if (!inode) 44 if (!inode)
44 goto out; 45 goto out;
45 46
47 ns = ns_ops->get(task);
48 if (!ns)
49 goto out_iput;
50
46 ei = PROC_I(inode); 51 ei = PROC_I(inode);
47 inode->i_mode = S_IFREG|S_IRUSR; 52 inode->i_mode = S_IFREG|S_IRUSR;
48 inode->i_fop = &ns_file_operations; 53 inode->i_fop = &ns_file_operations;
49 ei->ns_ops = ns_ops; 54 ei->ns_ops = ns_ops;
50 ei->ns = ns_ops->get(task); 55 ei->ns = ns;
51 if (!ei->ns)
52 goto out_iput;
53 56
54 dentry->d_op = &pid_dentry_operations; 57 dentry->d_op = &pid_dentry_operations;
55 d_add(dentry, inode); 58 d_add(dentry, inode);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index f50133c11c24..d167de365a8d 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -304,9 +304,6 @@ static int proc_sys_permission(struct inode *inode, int mask,unsigned int flags)
304 struct ctl_table *table; 304 struct ctl_table *table;
305 int error; 305 int error;
306 306
307 if (flags & IPERM_FLAG_RCU)
308 return -ECHILD;
309
310 /* Executable files are not allowed under /proc/sys/ */ 307 /* Executable files are not allowed under /proc/sys/ */
311 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode)) 308 if ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))
312 return -EACCES; 309 return -EACCES;
diff --git a/fs/proc/root.c b/fs/proc/root.c
index a9000e9cfee5..d6c3b416529b 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -28,11 +28,12 @@ static int proc_test_super(struct super_block *sb, void *data)
28 28
29static int proc_set_super(struct super_block *sb, void *data) 29static int proc_set_super(struct super_block *sb, void *data)
30{ 30{
31 struct pid_namespace *ns; 31 int err = set_anon_super(sb, NULL);
32 32 if (!err) {
33 ns = (struct pid_namespace *)data; 33 struct pid_namespace *ns = (struct pid_namespace *)data;
34 sb->s_fs_info = get_pid_ns(ns); 34 sb->s_fs_info = get_pid_ns(ns);
35 return set_anon_super(sb, NULL); 35 }
36 return err;
36} 37}
37 38
38static struct dentry *proc_mount(struct file_system_type *fs_type, 39static struct dentry *proc_mount(struct file_system_type *fs_type,
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index e8a62f41b458..d78089690965 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -954,8 +954,6 @@ static int xattr_mount_check(struct super_block *s)
954 954
955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) 955int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
956{ 956{
957 if (flags & IPERM_FLAG_RCU)
958 return -ECHILD;
959 /* 957 /*
960 * We don't do permission checks on the internal objects. 958 * We don't do permission checks on the internal objects.
961 * Permissions are determined by the "owning" object. 959 * Permissions are determined by the "owning" object.
diff --git a/fs/super.c b/fs/super.c
index c75593953c52..ab3d672db0de 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -822,7 +822,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type,
822 } else { 822 } else {
823 char b[BDEVNAME_SIZE]; 823 char b[BDEVNAME_SIZE];
824 824
825 s->s_flags = flags; 825 s->s_flags = flags | MS_NOSEC;
826 s->s_mode = mode; 826 s->s_mode = mode;
827 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id)); 827 strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
828 sb_set_blocksize(s, block_size(bdev)); 828 sb_set_blocksize(s, block_size(bdev));
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 266895783b47..e34f0d99ea4e 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -95,6 +95,14 @@ static int sysfs_set_super(struct super_block *sb, void *data)
95 return error; 95 return error;
96} 96}
97 97
98static void free_sysfs_super_info(struct sysfs_super_info *info)
99{
100 int type;
101 for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
102 kobj_ns_drop(type, info->ns[type]);
103 kfree(info);
104}
105
98static struct dentry *sysfs_mount(struct file_system_type *fs_type, 106static struct dentry *sysfs_mount(struct file_system_type *fs_type,
99 int flags, const char *dev_name, void *data) 107 int flags, const char *dev_name, void *data)
100{ 108{
@@ -108,11 +116,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
108 return ERR_PTR(-ENOMEM); 116 return ERR_PTR(-ENOMEM);
109 117
110 for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) 118 for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
111 info->ns[type] = kobj_ns_current(type); 119 info->ns[type] = kobj_ns_grab_current(type);
112 120
113 sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info); 121 sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
114 if (IS_ERR(sb) || sb->s_fs_info != info) 122 if (IS_ERR(sb) || sb->s_fs_info != info)
115 kfree(info); 123 free_sysfs_super_info(info);
116 if (IS_ERR(sb)) 124 if (IS_ERR(sb))
117 return ERR_CAST(sb); 125 return ERR_CAST(sb);
118 if (!sb->s_root) { 126 if (!sb->s_root) {
@@ -131,12 +139,11 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
131static void sysfs_kill_sb(struct super_block *sb) 139static void sysfs_kill_sb(struct super_block *sb)
132{ 140{
133 struct sysfs_super_info *info = sysfs_info(sb); 141 struct sysfs_super_info *info = sysfs_info(sb);
134
135 /* Remove the superblock from fs_supers/s_instances 142 /* Remove the superblock from fs_supers/s_instances
136 * so we can't find it, before freeing sysfs_super_info. 143 * so we can't find it, before freeing sysfs_super_info.
137 */ 144 */
138 kill_anon_super(sb); 145 kill_anon_super(sb);
139 kfree(info); 146 free_sysfs_super_info(info);
140} 147}
141 148
142static struct file_system_type sysfs_fs_type = { 149static struct file_system_type sysfs_fs_type = {
@@ -145,28 +152,6 @@ static struct file_system_type sysfs_fs_type = {
145 .kill_sb = sysfs_kill_sb, 152 .kill_sb = sysfs_kill_sb,
146}; 153};
147 154
148void sysfs_exit_ns(enum kobj_ns_type type, const void *ns)
149{
150 struct super_block *sb;
151
152 mutex_lock(&sysfs_mutex);
153 spin_lock(&sb_lock);
154 list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) {
155 struct sysfs_super_info *info = sysfs_info(sb);
156 /*
157 * If we see a superblock on the fs_supers/s_instances
158 * list the unmount has not completed and sb->s_fs_info
159 * points to a valid struct sysfs_super_info.
160 */
161 /* Ignore superblocks with the wrong ns */
162 if (info->ns[type] != ns)
163 continue;
164 info->ns[type] = NULL;
165 }
166 spin_unlock(&sb_lock);
167 mutex_unlock(&sysfs_mutex);
168}
169
170int __init sysfs_init(void) 155int __init sysfs_init(void)
171{ 156{
172 int err = -ENOMEM; 157 int err = -ENOMEM;
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index 3d28af31d863..2ed2404f3113 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -136,7 +136,7 @@ struct sysfs_addrm_cxt {
136 * instance). 136 * instance).
137 */ 137 */
138struct sysfs_super_info { 138struct sysfs_super_info {
139 const void *ns[KOBJ_NS_TYPES]; 139 void *ns[KOBJ_NS_TYPES];
140}; 140};
141#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info)) 141#define sysfs_info(SB) ((struct sysfs_super_info *)(SB->s_fs_info))
142extern struct sysfs_dirent sysfs_root; 142extern struct sysfs_dirent sysfs_root;
diff --git a/fs/timerfd.c b/fs/timerfd.c
index f67acbdda5e8..dffeb3795af1 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -61,7 +61,9 @@ static enum hrtimer_restart timerfd_tmrproc(struct hrtimer *htmr)
61 61
62/* 62/*
63 * Called when the clock was set to cancel the timers in the cancel 63 * Called when the clock was set to cancel the timers in the cancel
64 * list. 64 * list. This will wake up processes waiting on these timers. The
65 * wake-up requires ctx->ticks to be non zero, therefore we increment
66 * it before calling wake_up_locked().
65 */ 67 */
66void timerfd_clock_was_set(void) 68void timerfd_clock_was_set(void)
67{ 69{
@@ -76,6 +78,7 @@ void timerfd_clock_was_set(void)
76 spin_lock_irqsave(&ctx->wqh.lock, flags); 78 spin_lock_irqsave(&ctx->wqh.lock, flags);
77 if (ctx->moffs.tv64 != moffs.tv64) { 79 if (ctx->moffs.tv64 != moffs.tv64) {
78 ctx->moffs.tv64 = KTIME_MAX; 80 ctx->moffs.tv64 = KTIME_MAX;
81 ctx->ticks++;
79 wake_up_locked(&ctx->wqh); 82 wake_up_locked(&ctx->wqh);
80 } 83 }
81 spin_unlock_irqrestore(&ctx->wqh.lock, flags); 84 spin_unlock_irqrestore(&ctx->wqh.lock, flags);
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index b5aeb5a8ebed..529be0582029 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1848,7 +1848,6 @@ static void ubifs_put_super(struct super_block *sb)
1848 bdi_destroy(&c->bdi); 1848 bdi_destroy(&c->bdi);
1849 ubi_close_volume(c->ubi); 1849 ubi_close_volume(c->ubi);
1850 mutex_unlock(&c->umount_mutex); 1850 mutex_unlock(&c->umount_mutex);
1851 kfree(c);
1852} 1851}
1853 1852
1854static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) 1853static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
@@ -1971,61 +1970,65 @@ static struct ubi_volume_desc *open_ubi(const char *name, int mode)
1971 return ERR_PTR(-EINVAL); 1970 return ERR_PTR(-EINVAL);
1972} 1971}
1973 1972
1974static int ubifs_fill_super(struct super_block *sb, void *data, int silent) 1973static struct ubifs_info *alloc_ubifs_info(struct ubi_volume_desc *ubi)
1975{ 1974{
1976 struct ubi_volume_desc *ubi = sb->s_fs_info;
1977 struct ubifs_info *c; 1975 struct ubifs_info *c;
1978 struct inode *root;
1979 int err;
1980 1976
1981 c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL); 1977 c = kzalloc(sizeof(struct ubifs_info), GFP_KERNEL);
1982 if (!c) 1978 if (c) {
1983 return -ENOMEM; 1979 spin_lock_init(&c->cnt_lock);
1980 spin_lock_init(&c->cs_lock);
1981 spin_lock_init(&c->buds_lock);
1982 spin_lock_init(&c->space_lock);
1983 spin_lock_init(&c->orphan_lock);
1984 init_rwsem(&c->commit_sem);
1985 mutex_init(&c->lp_mutex);
1986 mutex_init(&c->tnc_mutex);
1987 mutex_init(&c->log_mutex);
1988 mutex_init(&c->mst_mutex);
1989 mutex_init(&c->umount_mutex);
1990 mutex_init(&c->bu_mutex);
1991 mutex_init(&c->write_reserve_mutex);
1992 init_waitqueue_head(&c->cmt_wq);
1993 c->buds = RB_ROOT;
1994 c->old_idx = RB_ROOT;
1995 c->size_tree = RB_ROOT;
1996 c->orph_tree = RB_ROOT;
1997 INIT_LIST_HEAD(&c->infos_list);
1998 INIT_LIST_HEAD(&c->idx_gc);
1999 INIT_LIST_HEAD(&c->replay_list);
2000 INIT_LIST_HEAD(&c->replay_buds);
2001 INIT_LIST_HEAD(&c->uncat_list);
2002 INIT_LIST_HEAD(&c->empty_list);
2003 INIT_LIST_HEAD(&c->freeable_list);
2004 INIT_LIST_HEAD(&c->frdi_idx_list);
2005 INIT_LIST_HEAD(&c->unclean_leb_list);
2006 INIT_LIST_HEAD(&c->old_buds);
2007 INIT_LIST_HEAD(&c->orph_list);
2008 INIT_LIST_HEAD(&c->orph_new);
2009 c->no_chk_data_crc = 1;
2010
2011 c->highest_inum = UBIFS_FIRST_INO;
2012 c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
2013
2014 ubi_get_volume_info(ubi, &c->vi);
2015 ubi_get_device_info(c->vi.ubi_num, &c->di);
2016 }
2017 return c;
2018}
1984 2019
1985 spin_lock_init(&c->cnt_lock); 2020static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
1986 spin_lock_init(&c->cs_lock); 2021{
1987 spin_lock_init(&c->buds_lock); 2022 struct ubifs_info *c = sb->s_fs_info;
1988 spin_lock_init(&c->space_lock); 2023 struct inode *root;
1989 spin_lock_init(&c->orphan_lock); 2024 int err;
1990 init_rwsem(&c->commit_sem);
1991 mutex_init(&c->lp_mutex);
1992 mutex_init(&c->tnc_mutex);
1993 mutex_init(&c->log_mutex);
1994 mutex_init(&c->mst_mutex);
1995 mutex_init(&c->umount_mutex);
1996 mutex_init(&c->bu_mutex);
1997 mutex_init(&c->write_reserve_mutex);
1998 init_waitqueue_head(&c->cmt_wq);
1999 c->buds = RB_ROOT;
2000 c->old_idx = RB_ROOT;
2001 c->size_tree = RB_ROOT;
2002 c->orph_tree = RB_ROOT;
2003 INIT_LIST_HEAD(&c->infos_list);
2004 INIT_LIST_HEAD(&c->idx_gc);
2005 INIT_LIST_HEAD(&c->replay_list);
2006 INIT_LIST_HEAD(&c->replay_buds);
2007 INIT_LIST_HEAD(&c->uncat_list);
2008 INIT_LIST_HEAD(&c->empty_list);
2009 INIT_LIST_HEAD(&c->freeable_list);
2010 INIT_LIST_HEAD(&c->frdi_idx_list);
2011 INIT_LIST_HEAD(&c->unclean_leb_list);
2012 INIT_LIST_HEAD(&c->old_buds);
2013 INIT_LIST_HEAD(&c->orph_list);
2014 INIT_LIST_HEAD(&c->orph_new);
2015 c->no_chk_data_crc = 1;
2016 2025
2017 c->vfs_sb = sb; 2026 c->vfs_sb = sb;
2018 c->highest_inum = UBIFS_FIRST_INO;
2019 c->lhead_lnum = c->ltail_lnum = UBIFS_LOG_LNUM;
2020
2021 ubi_get_volume_info(ubi, &c->vi);
2022 ubi_get_device_info(c->vi.ubi_num, &c->di);
2023
2024 /* Re-open the UBI device in read-write mode */ 2027 /* Re-open the UBI device in read-write mode */
2025 c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE); 2028 c->ubi = ubi_open_volume(c->vi.ubi_num, c->vi.vol_id, UBI_READWRITE);
2026 if (IS_ERR(c->ubi)) { 2029 if (IS_ERR(c->ubi)) {
2027 err = PTR_ERR(c->ubi); 2030 err = PTR_ERR(c->ubi);
2028 goto out_free; 2031 goto out;
2029 } 2032 }
2030 2033
2031 /* 2034 /*
@@ -2091,24 +2094,29 @@ out_bdi:
2091 bdi_destroy(&c->bdi); 2094 bdi_destroy(&c->bdi);
2092out_close: 2095out_close:
2093 ubi_close_volume(c->ubi); 2096 ubi_close_volume(c->ubi);
2094out_free: 2097out:
2095 kfree(c);
2096 return err; 2098 return err;
2097} 2099}
2098 2100
2099static int sb_test(struct super_block *sb, void *data) 2101static int sb_test(struct super_block *sb, void *data)
2100{ 2102{
2101 dev_t *dev = data; 2103 struct ubifs_info *c1 = data;
2102 struct ubifs_info *c = sb->s_fs_info; 2104 struct ubifs_info *c = sb->s_fs_info;
2103 2105
2104 return c->vi.cdev == *dev; 2106 return c->vi.cdev == c1->vi.cdev;
2107}
2108
2109static int sb_set(struct super_block *sb, void *data)
2110{
2111 sb->s_fs_info = data;
2112 return set_anon_super(sb, NULL);
2105} 2113}
2106 2114
2107static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags, 2115static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2108 const char *name, void *data) 2116 const char *name, void *data)
2109{ 2117{
2110 struct ubi_volume_desc *ubi; 2118 struct ubi_volume_desc *ubi;
2111 struct ubi_volume_info vi; 2119 struct ubifs_info *c;
2112 struct super_block *sb; 2120 struct super_block *sb;
2113 int err; 2121 int err;
2114 2122
@@ -2125,19 +2133,25 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2125 name, (int)PTR_ERR(ubi)); 2133 name, (int)PTR_ERR(ubi));
2126 return ERR_CAST(ubi); 2134 return ERR_CAST(ubi);
2127 } 2135 }
2128 ubi_get_volume_info(ubi, &vi);
2129 2136
2130 dbg_gen("opened ubi%d_%d", vi.ubi_num, vi.vol_id); 2137 c = alloc_ubifs_info(ubi);
2138 if (!c) {
2139 err = -ENOMEM;
2140 goto out_close;
2141 }
2142
2143 dbg_gen("opened ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
2131 2144
2132 sb = sget(fs_type, &sb_test, &set_anon_super, &vi.cdev); 2145 sb = sget(fs_type, sb_test, sb_set, c);
2133 if (IS_ERR(sb)) { 2146 if (IS_ERR(sb)) {
2134 err = PTR_ERR(sb); 2147 err = PTR_ERR(sb);
2148 kfree(c);
2135 goto out_close; 2149 goto out_close;
2136 } 2150 }
2137 2151
2138 if (sb->s_root) { 2152 if (sb->s_root) {
2139 struct ubifs_info *c1 = sb->s_fs_info; 2153 struct ubifs_info *c1 = sb->s_fs_info;
2140 2154 kfree(c);
2141 /* A new mount point for already mounted UBIFS */ 2155 /* A new mount point for already mounted UBIFS */
2142 dbg_gen("this ubi volume is already mounted"); 2156 dbg_gen("this ubi volume is already mounted");
2143 if (!!(flags & MS_RDONLY) != c1->ro_mount) { 2157 if (!!(flags & MS_RDONLY) != c1->ro_mount) {
@@ -2146,11 +2160,6 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
2146 } 2160 }
2147 } else { 2161 } else {
2148 sb->s_flags = flags; 2162 sb->s_flags = flags;
2149 /*
2150 * Pass 'ubi' to 'fill_super()' in sb->s_fs_info where it is
2151 * replaced by 'c'.
2152 */
2153 sb->s_fs_info = ubi;
2154 err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0); 2163 err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
2155 if (err) 2164 if (err)
2156 goto out_deact; 2165 goto out_deact;
@@ -2170,11 +2179,18 @@ out_close:
2170 return ERR_PTR(err); 2179 return ERR_PTR(err);
2171} 2180}
2172 2181
2182static void kill_ubifs_super(struct super_block *s)
2183{
2184 struct ubifs_info *c = s->s_fs_info;
2185 kill_anon_super(s);
2186 kfree(c);
2187}
2188
2173static struct file_system_type ubifs_fs_type = { 2189static struct file_system_type ubifs_fs_type = {
2174 .name = "ubifs", 2190 .name = "ubifs",
2175 .owner = THIS_MODULE, 2191 .owner = THIS_MODULE,
2176 .mount = ubifs_mount, 2192 .mount = ubifs_mount,
2177 .kill_sb = kill_anon_super, 2193 .kill_sb = kill_ubifs_super,
2178}; 2194};
2179 2195
2180/* 2196/*
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index f4213ba1ff85..7f782af286bf 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -131,19 +131,34 @@ xfs_file_fsync(
131{ 131{
132 struct inode *inode = file->f_mapping->host; 132 struct inode *inode = file->f_mapping->host;
133 struct xfs_inode *ip = XFS_I(inode); 133 struct xfs_inode *ip = XFS_I(inode);
134 struct xfs_mount *mp = ip->i_mount;
134 struct xfs_trans *tp; 135 struct xfs_trans *tp;
135 int error = 0; 136 int error = 0;
136 int log_flushed = 0; 137 int log_flushed = 0;
137 138
138 trace_xfs_file_fsync(ip); 139 trace_xfs_file_fsync(ip);
139 140
140 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) 141 if (XFS_FORCED_SHUTDOWN(mp))
141 return -XFS_ERROR(EIO); 142 return -XFS_ERROR(EIO);
142 143
143 xfs_iflags_clear(ip, XFS_ITRUNCATED); 144 xfs_iflags_clear(ip, XFS_ITRUNCATED);
144 145
145 xfs_ioend_wait(ip); 146 xfs_ioend_wait(ip);
146 147
148 if (mp->m_flags & XFS_MOUNT_BARRIER) {
149 /*
150 * If we have an RT and/or log subvolume we need to make sure
151 * to flush the write cache the device used for file data
152 * first. This is to ensure newly written file data make
153 * it to disk before logging the new inode size in case of
154 * an extending write.
155 */
156 if (XFS_IS_REALTIME_INODE(ip))
157 xfs_blkdev_issue_flush(mp->m_rtdev_targp);
158 else if (mp->m_logdev_targp != mp->m_ddev_targp)
159 xfs_blkdev_issue_flush(mp->m_ddev_targp);
160 }
161
147 /* 162 /*
148 * We always need to make sure that the required inode state is safe on 163 * We always need to make sure that the required inode state is safe on
149 * disk. The inode might be clean but we still might need to force the 164 * disk. The inode might be clean but we still might need to force the
@@ -175,9 +190,9 @@ xfs_file_fsync(
175 * updates. The sync transaction will also force the log. 190 * updates. The sync transaction will also force the log.
176 */ 191 */
177 xfs_iunlock(ip, XFS_ILOCK_SHARED); 192 xfs_iunlock(ip, XFS_ILOCK_SHARED);
178 tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS); 193 tp = xfs_trans_alloc(mp, XFS_TRANS_FSYNC_TS);
179 error = xfs_trans_reserve(tp, 0, 194 error = xfs_trans_reserve(tp, 0,
180 XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0); 195 XFS_FSYNC_TS_LOG_RES(mp), 0, 0, 0);
181 if (error) { 196 if (error) {
182 xfs_trans_cancel(tp, 0); 197 xfs_trans_cancel(tp, 0);
183 return -error; 198 return -error;
@@ -209,28 +224,25 @@ xfs_file_fsync(
209 * force the log. 224 * force the log.
210 */ 225 */
211 if (xfs_ipincount(ip)) { 226 if (xfs_ipincount(ip)) {
212 error = _xfs_log_force_lsn(ip->i_mount, 227 error = _xfs_log_force_lsn(mp,
213 ip->i_itemp->ili_last_lsn, 228 ip->i_itemp->ili_last_lsn,
214 XFS_LOG_SYNC, &log_flushed); 229 XFS_LOG_SYNC, &log_flushed);
215 } 230 }
216 xfs_iunlock(ip, XFS_ILOCK_SHARED); 231 xfs_iunlock(ip, XFS_ILOCK_SHARED);
217 } 232 }
218 233
219 if (ip->i_mount->m_flags & XFS_MOUNT_BARRIER) { 234 /*
220 /* 235 * If we only have a single device, and the log force about was
221 * If the log write didn't issue an ordered tag we need 236 * a no-op we might have to flush the data device cache here.
222 * to flush the disk cache for the data device now. 237 * This can only happen for fdatasync/O_DSYNC if we were overwriting
223 */ 238 * an already allocated file and thus do not have any metadata to
224 if (!log_flushed) 239 * commit.
225 xfs_blkdev_issue_flush(ip->i_mount->m_ddev_targp); 240 */
226 241 if ((mp->m_flags & XFS_MOUNT_BARRIER) &&
227 /* 242 mp->m_logdev_targp == mp->m_ddev_targp &&
228 * If this inode is on the RT dev we need to flush that 243 !XFS_IS_REALTIME_INODE(ip) &&
229 * cache as well. 244 !log_flushed)
230 */ 245 xfs_blkdev_issue_flush(mp->m_ddev_targp);
231 if (XFS_IS_REALTIME_INODE(ip))
232 xfs_blkdev_issue_flush(ip->i_mount->m_rtdev_targp);
233 }
234 246
235 return -error; 247 return -error;
236} 248}
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index dd21784525a8..d44d92cd12b1 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -182,7 +182,7 @@ xfs_vn_mknod(
182 if (IS_POSIXACL(dir)) { 182 if (IS_POSIXACL(dir)) {
183 default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT); 183 default_acl = xfs_get_acl(dir, ACL_TYPE_DEFAULT);
184 if (IS_ERR(default_acl)) 184 if (IS_ERR(default_acl))
185 return -PTR_ERR(default_acl); 185 return PTR_ERR(default_acl);
186 186
187 if (!default_acl) 187 if (!default_acl)
188 mode &= ~current_umask(); 188 mode &= ~current_umask();
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 1e3a7ce804dc..a1a881e68a9a 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -627,68 +627,6 @@ xfs_blkdev_put(
627 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 627 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
628} 628}
629 629
630/*
631 * Try to write out the superblock using barriers.
632 */
633STATIC int
634xfs_barrier_test(
635 xfs_mount_t *mp)
636{
637 xfs_buf_t *sbp = xfs_getsb(mp, 0);
638 int error;
639
640 XFS_BUF_UNDONE(sbp);
641 XFS_BUF_UNREAD(sbp);
642 XFS_BUF_UNDELAYWRITE(sbp);
643 XFS_BUF_WRITE(sbp);
644 XFS_BUF_UNASYNC(sbp);
645 XFS_BUF_ORDERED(sbp);
646
647 xfsbdstrat(mp, sbp);
648 error = xfs_buf_iowait(sbp);
649
650 /*
651 * Clear all the flags we set and possible error state in the
652 * buffer. We only did the write to try out whether barriers
653 * worked and shouldn't leave any traces in the superblock
654 * buffer.
655 */
656 XFS_BUF_DONE(sbp);
657 XFS_BUF_ERROR(sbp, 0);
658 XFS_BUF_UNORDERED(sbp);
659
660 xfs_buf_relse(sbp);
661 return error;
662}
663
664STATIC void
665xfs_mountfs_check_barriers(xfs_mount_t *mp)
666{
667 int error;
668
669 if (mp->m_logdev_targp != mp->m_ddev_targp) {
670 xfs_notice(mp,
671 "Disabling barriers, not supported with external log device");
672 mp->m_flags &= ~XFS_MOUNT_BARRIER;
673 return;
674 }
675
676 if (xfs_readonly_buftarg(mp->m_ddev_targp)) {
677 xfs_notice(mp,
678 "Disabling barriers, underlying device is readonly");
679 mp->m_flags &= ~XFS_MOUNT_BARRIER;
680 return;
681 }
682
683 error = xfs_barrier_test(mp);
684 if (error) {
685 xfs_notice(mp,
686 "Disabling barriers, trial barrier write failed");
687 mp->m_flags &= ~XFS_MOUNT_BARRIER;
688 return;
689 }
690}
691
692void 630void
693xfs_blkdev_issue_flush( 631xfs_blkdev_issue_flush(
694 xfs_buftarg_t *buftarg) 632 xfs_buftarg_t *buftarg)
@@ -1240,14 +1178,6 @@ xfs_fs_remount(
1240 switch (token) { 1178 switch (token) {
1241 case Opt_barrier: 1179 case Opt_barrier:
1242 mp->m_flags |= XFS_MOUNT_BARRIER; 1180 mp->m_flags |= XFS_MOUNT_BARRIER;
1243
1244 /*
1245 * Test if barriers are actually working if we can,
1246 * else delay this check until the filesystem is
1247 * marked writeable.
1248 */
1249 if (!(mp->m_flags & XFS_MOUNT_RDONLY))
1250 xfs_mountfs_check_barriers(mp);
1251 break; 1181 break;
1252 case Opt_nobarrier: 1182 case Opt_nobarrier:
1253 mp->m_flags &= ~XFS_MOUNT_BARRIER; 1183 mp->m_flags &= ~XFS_MOUNT_BARRIER;
@@ -1282,8 +1212,6 @@ xfs_fs_remount(
1282 /* ro -> rw */ 1212 /* ro -> rw */
1283 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) { 1213 if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
1284 mp->m_flags &= ~XFS_MOUNT_RDONLY; 1214 mp->m_flags &= ~XFS_MOUNT_RDONLY;
1285 if (mp->m_flags & XFS_MOUNT_BARRIER)
1286 xfs_mountfs_check_barriers(mp);
1287 1215
1288 /* 1216 /*
1289 * If this is the first remount to writeable state we 1217 * If this is the first remount to writeable state we
@@ -1465,9 +1393,6 @@ xfs_fs_fill_super(
1465 if (error) 1393 if (error)
1466 goto out_free_sb; 1394 goto out_free_sb;
1467 1395
1468 if (mp->m_flags & XFS_MOUNT_BARRIER)
1469 xfs_mountfs_check_barriers(mp);
1470
1471 error = xfs_filestream_mount(mp); 1396 error = xfs_filestream_mount(mp);
1472 if (error) 1397 if (error)
1473 goto out_free_sb; 1398 goto out_free_sb;
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 211930246f20..41d5b8f2bf92 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1372,8 +1372,17 @@ xlog_sync(xlog_t *log,
1372 XFS_BUF_ASYNC(bp); 1372 XFS_BUF_ASYNC(bp);
1373 bp->b_flags |= XBF_LOG_BUFFER; 1373 bp->b_flags |= XBF_LOG_BUFFER;
1374 1374
1375 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1375 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1376 /*
1377 * If we have an external log device, flush the data device
1378 * before flushing the log to make sure all meta data
1379 * written back from the AIL actually made it to disk
1380 * before writing out the new log tail LSN in the log buffer.
1381 */
1382 if (log->l_mp->m_logdev_targp != log->l_mp->m_ddev_targp)
1383 xfs_blkdev_issue_flush(log->l_mp->m_ddev_targp);
1376 XFS_BUF_ORDERED(bp); 1384 XFS_BUF_ORDERED(bp);
1385 }
1377 1386
1378 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1387 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1379 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1388 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h
index fcdcb5d5c995..d494001b1226 100644
--- a/include/asm-generic/gpio.h
+++ b/include/asm-generic/gpio.h
@@ -170,16 +170,6 @@ extern int __gpio_cansleep(unsigned gpio);
170 170
171extern int __gpio_to_irq(unsigned gpio); 171extern int __gpio_to_irq(unsigned gpio);
172 172
173#define GPIOF_DIR_OUT (0 << 0)
174#define GPIOF_DIR_IN (1 << 0)
175
176#define GPIOF_INIT_LOW (0 << 1)
177#define GPIOF_INIT_HIGH (1 << 1)
178
179#define GPIOF_IN (GPIOF_DIR_IN)
180#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
181#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
182
183/** 173/**
184 * struct gpio - a structure describing a GPIO with configuration 174 * struct gpio - a structure describing a GPIO with configuration
185 * @gpio: the GPIO number 175 * @gpio: the GPIO number
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index e9b8e5926bef..76bff2bff15e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -88,7 +88,7 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
88 pmd_t pmd = *pmdp; 88 pmd_t pmd = *pmdp;
89 pmd_clear(mm, address, pmdp); 89 pmd_clear(mm, address, pmdp);
90 return pmd; 90 return pmd;
91}) 91}
92#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 92#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
93#endif 93#endif
94 94
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 9573e0ce3120..33d12f87f0e0 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -520,6 +520,8 @@ struct drm_connector {
520 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER]; 520 uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
521 uint32_t force_encoder_id; 521 uint32_t force_encoder_id;
522 struct drm_encoder *encoder; /* currently active encoder */ 522 struct drm_encoder *encoder; /* currently active encoder */
523
524 int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
523}; 525};
524 526
525/** 527/**
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index f04b2a3b0f49..e08f344c6cff 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -467,6 +467,17 @@
467 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 467 {0x1002, 0x9614, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
468 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 468 {0x1002, 0x9615, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
469 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 469 {0x1002, 0x9616, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS780|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
470 {0x1002, 0x9640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
471 {0x1002, 0x9641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
472 {0x1002, 0x9642, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
473 {0x1002, 0x9643, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
474 {0x1002, 0x9644, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
475 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
476 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
477 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
478 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
479 {0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
480 {0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
470 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 481 {0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
471 {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 482 {0x1002, 0x9711, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
472 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 483 {0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h
index 1ae12710d732..98999cf107ce 100644
--- a/include/linux/basic_mmio_gpio.h
+++ b/include/linux/basic_mmio_gpio.h
@@ -16,6 +16,7 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/compiler.h> 18#include <linux/compiler.h>
19#include <linux/spinlock_types.h>
19 20
20struct bgpio_pdata { 21struct bgpio_pdata {
21 int base; 22 int base;
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index d4646b48dc4a..18a1baf31f2d 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -188,6 +188,7 @@ struct clocksource {
188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG 188#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
189 /* Watchdog related data, used by the framework */ 189 /* Watchdog related data, used by the framework */
190 struct list_head wd_list; 190 struct list_head wd_list;
191 cycle_t cs_last;
191 cycle_t wd_last; 192 cycle_t wd_last;
192#endif 193#endif
193} ____cacheline_aligned; 194} ____cacheline_aligned;
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 0b0d9c39ed67..7aad1f440867 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -2,8 +2,16 @@
2#include <linux/fs.h> 2#include <linux/fs.h>
3 3
4#ifdef CONFIG_CGROUP_DEVICE 4#ifdef CONFIG_CGROUP_DEVICE
5extern int devcgroup_inode_permission(struct inode *inode, int mask); 5extern int __devcgroup_inode_permission(struct inode *inode, int mask);
6extern int devcgroup_inode_mknod(int mode, dev_t dev); 6extern int devcgroup_inode_mknod(int mode, dev_t dev);
7static inline int devcgroup_inode_permission(struct inode *inode, int mask)
8{
9 if (likely(!inode->i_rdev))
10 return 0;
11 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
12 return 0;
13 return __devcgroup_inode_permission(inode, mask);
14}
7#else 15#else
8static inline int devcgroup_inode_permission(struct inode *inode, int mask) 16static inline int devcgroup_inode_permission(struct inode *inode, int mask)
9{ return 0; } 17{ return 0; }
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index dfd34934213d..048d0fa38d03 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -268,7 +268,7 @@ struct ethtool_pauseparam {
268 __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ 268 __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
269 269
270 /* If the link is being auto-negotiated (via ethtool_cmd.autoneg 270 /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
271 * being true) the user may set 'autonet' here non-zero to have the 271 * being true) the user may set 'autoneg' here non-zero to have the
272 * pause parameters be auto-negotiated too. In such a case, the 272 * pause parameters be auto-negotiated too. In such a case, the
273 * {rx,tx}_pause values below determine what capabilities are 273 * {rx,tx}_pause values below determine what capabilities are
274 * advertised. 274 * advertised.
@@ -798,7 +798,7 @@ bool ethtool_invalid_flags(struct net_device *dev, u32 data, u32 supported);
798 * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums 798 * @get_tx_csum: Deprecated as redundant. Report whether transmit checksums
799 * are turned on or off. 799 * are turned on or off.
800 * @set_tx_csum: Deprecated in favour of generic netdev features. Turn 800 * @set_tx_csum: Deprecated in favour of generic netdev features. Turn
801 * transmit checksums on or off. Returns a egative error code or zero. 801 * transmit checksums on or off. Returns a negative error code or zero.
802 * @get_sg: Deprecated as redundant. Report whether scatter-gather is 802 * @get_sg: Deprecated as redundant. Report whether scatter-gather is
803 * enabled. 803 * enabled.
804 * @set_sg: Deprecated in favour of generic netdev features. Turn 804 * @set_sg: Deprecated in favour of generic netdev features. Turn
@@ -1072,7 +1072,7 @@ struct ethtool_ops {
1072/* The following are all involved in forcing a particular link 1072/* The following are all involved in forcing a particular link
1073 * mode for the device for setting things. When getting the 1073 * mode for the device for setting things. When getting the
1074 * devices settings, these indicate the current mode and whether 1074 * devices settings, these indicate the current mode and whether
1075 * it was foced up into this mode or autonegotiated. 1075 * it was forced up into this mode or autonegotiated.
1076 */ 1076 */
1077 1077
1078/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */ 1078/* The forced speed, 10Mb, 100Mb, gigabit, 2.5Gb, 10GbE. */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c55d6b7cd5d6..6e73e2e9ae33 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -208,6 +208,7 @@ struct inodes_stat_t {
208#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ 208#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
209#define MS_I_VERSION (1<<23) /* Update inode I_version field */ 209#define MS_I_VERSION (1<<23) /* Update inode I_version field */
210#define MS_STRICTATIME (1<<24) /* Always perform atime updates */ 210#define MS_STRICTATIME (1<<24) /* Always perform atime updates */
211#define MS_NOSEC (1<<28)
211#define MS_BORN (1<<29) 212#define MS_BORN (1<<29)
212#define MS_ACTIVE (1<<30) 213#define MS_ACTIVE (1<<30)
213#define MS_NOUSER (1<<31) 214#define MS_NOUSER (1<<31)
@@ -743,9 +744,13 @@ struct inode {
743 744
744 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 745 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
745 unsigned int i_flags; 746 unsigned int i_flags;
747 unsigned long i_state;
748#ifdef CONFIG_SECURITY
749 void *i_security;
750#endif
746 struct mutex i_mutex; 751 struct mutex i_mutex;
747 752
748 unsigned long i_state; 753
749 unsigned long dirtied_when; /* jiffies of first dirtying */ 754 unsigned long dirtied_when; /* jiffies of first dirtying */
750 755
751 struct hlist_node i_hash; 756 struct hlist_node i_hash;
@@ -797,9 +802,6 @@ struct inode {
797 atomic_t i_readcount; /* struct files open RO */ 802 atomic_t i_readcount; /* struct files open RO */
798#endif 803#endif
799 atomic_t i_writecount; 804 atomic_t i_writecount;
800#ifdef CONFIG_SECURITY
801 void *i_security;
802#endif
803#ifdef CONFIG_FS_POSIX_ACL 805#ifdef CONFIG_FS_POSIX_ACL
804 struct posix_acl *i_acl; 806 struct posix_acl *i_acl;
805 struct posix_acl *i_default_acl; 807 struct posix_acl *i_default_acl;
@@ -2591,7 +2593,7 @@ static inline int is_sxid(mode_t mode)
2591 2593
2592static inline void inode_has_no_xattr(struct inode *inode) 2594static inline void inode_has_no_xattr(struct inode *inode)
2593{ 2595{
2594 if (!is_sxid(inode->i_mode)) 2596 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
2595 inode->i_flags |= S_NOSEC; 2597 inode->i_flags |= S_NOSEC;
2596} 2598}
2597 2599
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 32d47e710661..17b5a0d80e42 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -3,6 +3,17 @@
3 3
4/* see Documentation/gpio.txt */ 4/* see Documentation/gpio.txt */
5 5
6/* make these flag values available regardless of GPIO kconfig options */
7#define GPIOF_DIR_OUT (0 << 0)
8#define GPIOF_DIR_IN (1 << 0)
9
10#define GPIOF_INIT_LOW (0 << 1)
11#define GPIOF_INIT_HIGH (1 << 1)
12
13#define GPIOF_IN (GPIOF_DIR_IN)
14#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW)
15#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH)
16
6#ifdef CONFIG_GENERIC_GPIO 17#ifdef CONFIG_GENERIC_GPIO
7#include <asm/gpio.h> 18#include <asm/gpio.h>
8 19
diff --git a/include/linux/i2c/adp8870.h b/include/linux/i2c/adp8870.h
new file mode 100644
index 000000000000..624dceccbd5b
--- /dev/null
+++ b/include/linux/i2c/adp8870.h
@@ -0,0 +1,153 @@
1/*
2 * Definitions and platform data for Analog Devices
3 * Backlight drivers ADP8870
4 *
5 * Copyright 2009-2010 Analog Devices Inc.
6 *
7 * Licensed under the GPL-2 or later.
8 */
9
10#ifndef __LINUX_I2C_ADP8870_H
11#define __LINUX_I2C_ADP8870_H
12
13#define ID_ADP8870 8870
14
15#define ADP8870_MAX_BRIGHTNESS 0x7F
16#define FLAG_OFFT_SHIFT 8
17
18/*
19 * LEDs subdevice platform data
20 */
21
22#define ADP8870_LED_DIS_BLINK (0 << FLAG_OFFT_SHIFT)
23#define ADP8870_LED_OFFT_600ms (1 << FLAG_OFFT_SHIFT)
24#define ADP8870_LED_OFFT_1200ms (2 << FLAG_OFFT_SHIFT)
25#define ADP8870_LED_OFFT_1800ms (3 << FLAG_OFFT_SHIFT)
26
27#define ADP8870_LED_ONT_200ms 0
28#define ADP8870_LED_ONT_600ms 1
29#define ADP8870_LED_ONT_800ms 2
30#define ADP8870_LED_ONT_1200ms 3
31
32#define ADP8870_LED_D7 (7)
33#define ADP8870_LED_D6 (6)
34#define ADP8870_LED_D5 (5)
35#define ADP8870_LED_D4 (4)
36#define ADP8870_LED_D3 (3)
37#define ADP8870_LED_D2 (2)
38#define ADP8870_LED_D1 (1)
39
40/*
41 * Backlight subdevice platform data
42 */
43
44#define ADP8870_BL_D7 (1 << 6)
45#define ADP8870_BL_D6 (1 << 5)
46#define ADP8870_BL_D5 (1 << 4)
47#define ADP8870_BL_D4 (1 << 3)
48#define ADP8870_BL_D3 (1 << 2)
49#define ADP8870_BL_D2 (1 << 1)
50#define ADP8870_BL_D1 (1 << 0)
51
52#define ADP8870_FADE_T_DIS 0 /* Fade Timer Disabled */
53#define ADP8870_FADE_T_300ms 1 /* 0.3 Sec */
54#define ADP8870_FADE_T_600ms 2
55#define ADP8870_FADE_T_900ms 3
56#define ADP8870_FADE_T_1200ms 4
57#define ADP8870_FADE_T_1500ms 5
58#define ADP8870_FADE_T_1800ms 6
59#define ADP8870_FADE_T_2100ms 7
60#define ADP8870_FADE_T_2400ms 8
61#define ADP8870_FADE_T_2700ms 9
62#define ADP8870_FADE_T_3000ms 10
63#define ADP8870_FADE_T_3500ms 11
64#define ADP8870_FADE_T_4000ms 12
65#define ADP8870_FADE_T_4500ms 13
66#define ADP8870_FADE_T_5000ms 14
67#define ADP8870_FADE_T_5500ms 15 /* 5.5 Sec */
68
69#define ADP8870_FADE_LAW_LINEAR 0
70#define ADP8870_FADE_LAW_SQUARE 1
71#define ADP8870_FADE_LAW_CUBIC1 2
72#define ADP8870_FADE_LAW_CUBIC2 3
73
74#define ADP8870_BL_AMBL_FILT_80ms 0 /* Light sensor filter time */
75#define ADP8870_BL_AMBL_FILT_160ms 1
76#define ADP8870_BL_AMBL_FILT_320ms 2
77#define ADP8870_BL_AMBL_FILT_640ms 3
78#define ADP8870_BL_AMBL_FILT_1280ms 4
79#define ADP8870_BL_AMBL_FILT_2560ms 5
80#define ADP8870_BL_AMBL_FILT_5120ms 6
81#define ADP8870_BL_AMBL_FILT_10240ms 7 /* 10.24 sec */
82
83/*
84 * Blacklight current 0..30mA
85 */
86#define ADP8870_BL_CUR_mA(I) ((I * 127) / 30)
87
88/*
89 * L2 comparator current 0..1106uA
90 */
91#define ADP8870_L2_COMP_CURR_uA(I) ((I * 255) / 1106)
92
93/*
94 * L3 comparator current 0..551uA
95 */
96#define ADP8870_L3_COMP_CURR_uA(I) ((I * 255) / 551)
97
98/*
99 * L4 comparator current 0..275uA
100 */
101#define ADP8870_L4_COMP_CURR_uA(I) ((I * 255) / 275)
102
103/*
104 * L5 comparator current 0..138uA
105 */
106#define ADP8870_L5_COMP_CURR_uA(I) ((I * 255) / 138)
107
108struct adp8870_backlight_platform_data {
109 u8 bl_led_assign; /* 1 = Backlight 0 = Individual LED */
110 u8 pwm_assign; /* 1 = Enables PWM mode */
111
112 u8 bl_fade_in; /* Backlight Fade-In Timer */
113 u8 bl_fade_out; /* Backlight Fade-Out Timer */
114 u8 bl_fade_law; /* fade-on/fade-off transfer characteristic */
115
116 u8 en_ambl_sens; /* 1 = enable ambient light sensor */
117 u8 abml_filt; /* Light sensor filter time */
118
119 u8 l1_daylight_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
120 u8 l1_daylight_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
121 u8 l2_bright_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
122 u8 l2_bright_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
123 u8 l3_office_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
124 u8 l3_office_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
125 u8 l4_indoor_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
126 u8 l4_indor_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
127 u8 l5_dark_max; /* use BL_CUR_mA(I) 0 <= I <= 30 mA */
128 u8 l5_dark_dim; /* typ = 0, use BL_CUR_mA(I) 0 <= I <= 30 mA */
129
130 u8 l2_trip; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
131 u8 l2_hyst; /* use L2_COMP_CURR_uA(I) 0 <= I <= 1106 uA */
132 u8 l3_trip; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
133 u8 l3_hyst; /* use L3_COMP_CURR_uA(I) 0 <= I <= 551 uA */
134 u8 l4_trip; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
135 u8 l4_hyst; /* use L4_COMP_CURR_uA(I) 0 <= I <= 275 uA */
136 u8 l5_trip; /* use L5_COMP_CURR_uA(I) 0 <= I <= 138 uA */
137 u8 l5_hyst; /* use L6_COMP_CURR_uA(I) 0 <= I <= 138 uA */
138
139 /**
140 * Independent Current Sinks / LEDS
141 * Sinks not assigned to the Backlight can be exposed to
142 * user space using the LEDS CLASS interface
143 */
144
145 int num_leds;
146 struct led_info *leds;
147 u8 led_fade_in; /* LED Fade-In Timer */
148 u8 led_fade_out; /* LED Fade-Out Timer */
149 u8 led_fade_law; /* fade-on/fade-off transfer characteristic */
150 u8 led_on_time;
151};
152
153#endif /* __LINUX_I2C_ADP8870_H */
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index 6d66ce1791a9..7b318630139f 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -62,6 +62,7 @@ struct tpacket_auxdata {
62 __u16 tp_mac; 62 __u16 tp_mac;
63 __u16 tp_net; 63 __u16 tp_net;
64 __u16 tp_vlan_tci; 64 __u16 tp_vlan_tci;
65 __u16 tp_padding;
65}; 66};
66 67
67/* Rx ring - header status */ 68/* Rx ring - header status */
@@ -101,6 +102,7 @@ struct tpacket2_hdr {
101 __u32 tp_sec; 102 __u32 tp_sec;
102 __u32 tp_nsec; 103 __u32 tp_nsec;
103 __u16 tp_vlan_tci; 104 __u16 tp_vlan_tci;
105 __u16 tp_padding;
104}; 106};
105 107
106#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll)) 108#define TPACKET2_HDRLEN (TPACKET_ALIGN(sizeof(struct tpacket2_hdr)) + sizeof(struct sockaddr_ll))
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index dc01681fbb42..affa27380b72 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -225,7 +225,7 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
225} 225}
226 226
227/** 227/**
228 * __vlan_put_tag - regular VLAN tag inserting 228 * vlan_insert_tag - regular VLAN tag inserting
229 * @skb: skbuff to tag 229 * @skb: skbuff to tag
230 * @vlan_tci: VLAN TCI to insert 230 * @vlan_tci: VLAN TCI to insert
231 * 231 *
@@ -234,8 +234,10 @@ static inline int vlan_hwaccel_receive_skb(struct sk_buff *skb,
234 * 234 *
235 * Following the skb_unshare() example, in case of error, the calling function 235 * Following the skb_unshare() example, in case of error, the calling function
236 * doesn't have to worry about freeing the original skb. 236 * doesn't have to worry about freeing the original skb.
237 *
238 * Does not change skb->protocol so this function can be used during receive.
237 */ 239 */
238static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci) 240static inline struct sk_buff *vlan_insert_tag(struct sk_buff *skb, u16 vlan_tci)
239{ 241{
240 struct vlan_ethhdr *veth; 242 struct vlan_ethhdr *veth;
241 243
@@ -255,8 +257,25 @@ static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
255 /* now, the TCI */ 257 /* now, the TCI */
256 veth->h_vlan_TCI = htons(vlan_tci); 258 veth->h_vlan_TCI = htons(vlan_tci);
257 259
258 skb->protocol = htons(ETH_P_8021Q); 260 return skb;
261}
259 262
263/**
264 * __vlan_put_tag - regular VLAN tag inserting
265 * @skb: skbuff to tag
266 * @vlan_tci: VLAN TCI to insert
267 *
268 * Inserts the VLAN tag into @skb as part of the payload
269 * Returns a VLAN tagged skb. If a new skb is created, @skb is freed.
270 *
271 * Following the skb_unshare() example, in case of error, the calling function
272 * doesn't have to worry about freeing the original skb.
273 */
274static inline struct sk_buff *__vlan_put_tag(struct sk_buff *skb, u16 vlan_tci)
275{
276 skb = vlan_insert_tag(skb, vlan_tci);
277 if (skb)
278 skb->protocol = htons(ETH_P_8021Q);
260 return skb; 279 return skb;
261} 280}
262 281
diff --git a/include/linux/input/sh_keysc.h b/include/linux/input/sh_keysc.h
index 649dc7f12925..5d253cd93691 100644
--- a/include/linux/input/sh_keysc.h
+++ b/include/linux/input/sh_keysc.h
@@ -1,7 +1,7 @@
1#ifndef __SH_KEYSC_H__ 1#ifndef __SH_KEYSC_H__
2#define __SH_KEYSC_H__ 2#define __SH_KEYSC_H__
3 3
4#define SH_KEYSC_MAXKEYS 49 4#define SH_KEYSC_MAXKEYS 64
5 5
6struct sh_keysc_info { 6struct sh_keysc_info {
7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3, 7 enum { SH_KEYSC_MODE_1, SH_KEYSC_MODE_2, SH_KEYSC_MODE_3,
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 6c12989839d9..f6efed0039ed 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -414,6 +414,7 @@ enum
414 TASKLET_SOFTIRQ, 414 TASKLET_SOFTIRQ,
415 SCHED_SOFTIRQ, 415 SCHED_SOFTIRQ,
416 HRTIMER_SOFTIRQ, 416 HRTIMER_SOFTIRQ,
417 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
417 418
418 NR_SOFTIRQS 419 NR_SOFTIRQS
419}; 420};
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h
index 819acaaac3f5..714ba08dc092 100644
--- a/include/linux/irqreturn.h
+++ b/include/linux/irqreturn.h
@@ -8,9 +8,9 @@
8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread 8 * @IRQ_WAKE_THREAD handler requests to wake the handler thread
9 */ 9 */
10enum irqreturn { 10enum irqreturn {
11 IRQ_NONE, 11 IRQ_NONE = (0 << 0),
12 IRQ_HANDLED, 12 IRQ_HANDLED = (1 << 0),
13 IRQ_WAKE_THREAD, 13 IRQ_WAKE_THREAD = (1 << 1),
14}; 14};
15 15
16typedef enum irqreturn irqreturn_t; 16typedef enum irqreturn irqreturn_t;
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index fb0e7329fee1..953352a88336 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -671,8 +671,8 @@ struct sysinfo {
671 671
672#ifdef __CHECKER__ 672#ifdef __CHECKER__
673#define BUILD_BUG_ON_NOT_POWER_OF_2(n) 673#define BUILD_BUG_ON_NOT_POWER_OF_2(n)
674#define BUILD_BUG_ON_ZERO(e) 674#define BUILD_BUG_ON_ZERO(e) (0)
675#define BUILD_BUG_ON_NULL(e) 675#define BUILD_BUG_ON_NULL(e) ((void*)0)
676#define BUILD_BUG_ON(condition) 676#define BUILD_BUG_ON(condition)
677#else /* __CHECKER__ */ 677#else /* __CHECKER__ */
678 678
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index d4a5c84c503d..0da38cf7db7b 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -45,7 +45,7 @@ static inline int request_module_nowait(const char *name, ...) { return -ENOSYS;
45#endif 45#endif
46 46
47 47
48struct key; 48struct cred;
49struct file; 49struct file;
50 50
51enum umh_wait { 51enum umh_wait {
@@ -62,7 +62,7 @@ struct subprocess_info {
62 char **envp; 62 char **envp;
63 enum umh_wait wait; 63 enum umh_wait wait;
64 int retval; 64 int retval;
65 int (*init)(struct subprocess_info *info); 65 int (*init)(struct subprocess_info *info, struct cred *new);
66 void (*cleanup)(struct subprocess_info *info); 66 void (*cleanup)(struct subprocess_info *info);
67 void *data; 67 void *data;
68}; 68};
@@ -73,7 +73,7 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
73 73
74/* Set various pieces of state into the subprocess_info structure */ 74/* Set various pieces of state into the subprocess_info structure */
75void call_usermodehelper_setfns(struct subprocess_info *info, 75void call_usermodehelper_setfns(struct subprocess_info *info,
76 int (*init)(struct subprocess_info *info), 76 int (*init)(struct subprocess_info *info, struct cred *new),
77 void (*cleanup)(struct subprocess_info *info), 77 void (*cleanup)(struct subprocess_info *info),
78 void *data); 78 void *data);
79 79
@@ -87,7 +87,7 @@ void call_usermodehelper_freeinfo(struct subprocess_info *info);
87static inline int 87static inline int
88call_usermodehelper_fns(char *path, char **argv, char **envp, 88call_usermodehelper_fns(char *path, char **argv, char **envp,
89 enum umh_wait wait, 89 enum umh_wait wait,
90 int (*init)(struct subprocess_info *info), 90 int (*init)(struct subprocess_info *info, struct cred *new),
91 void (*cleanup)(struct subprocess_info *), void *data) 91 void (*cleanup)(struct subprocess_info *), void *data)
92{ 92{
93 struct subprocess_info *info; 93 struct subprocess_info *info;
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 2a0d7d651dc3..ee0c952188de 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -12,6 +12,7 @@
12#ifndef _LINUX_KMSG_DUMP_H 12#ifndef _LINUX_KMSG_DUMP_H
13#define _LINUX_KMSG_DUMP_H 13#define _LINUX_KMSG_DUMP_H
14 14
15#include <linux/errno.h>
15#include <linux/list.h> 16#include <linux/list.h>
16 17
17enum kmsg_dump_reason { 18enum kmsg_dump_reason {
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
index 82cb5bf461fb..f66b065a8b5f 100644
--- a/include/linux/kobject_ns.h
+++ b/include/linux/kobject_ns.h
@@ -32,15 +32,17 @@ enum kobj_ns_type {
32 32
33/* 33/*
34 * Callbacks so sysfs can determine namespaces 34 * Callbacks so sysfs can determine namespaces
35 * @current_ns: return calling task's namespace 35 * @grab_current_ns: return a new reference to calling task's namespace
36 * @netlink_ns: return namespace to which a sock belongs (right?) 36 * @netlink_ns: return namespace to which a sock belongs (right?)
37 * @initial_ns: return the initial namespace (i.e. init_net_ns) 37 * @initial_ns: return the initial namespace (i.e. init_net_ns)
38 * @drop_ns: drops a reference to namespace
38 */ 39 */
39struct kobj_ns_type_operations { 40struct kobj_ns_type_operations {
40 enum kobj_ns_type type; 41 enum kobj_ns_type type;
41 const void *(*current_ns)(void); 42 void *(*grab_current_ns)(void);
42 const void *(*netlink_ns)(struct sock *sk); 43 const void *(*netlink_ns)(struct sock *sk);
43 const void *(*initial_ns)(void); 44 const void *(*initial_ns)(void);
45 void (*drop_ns)(void *);
44}; 46};
45 47
46int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); 48int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
@@ -48,9 +50,9 @@ int kobj_ns_type_registered(enum kobj_ns_type type);
48const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); 50const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent);
49const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); 51const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj);
50 52
51const void *kobj_ns_current(enum kobj_ns_type type); 53void *kobj_ns_grab_current(enum kobj_ns_type type);
52const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); 54const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk);
53const void *kobj_ns_initial(enum kobj_ns_type type); 55const void *kobj_ns_initial(enum kobj_ns_type type);
54void kobj_ns_exit(enum kobj_ns_type type, const void *ns); 56void kobj_ns_drop(enum kobj_ns_type type, void *ns);
55 57
56#endif /* _LINUX_KOBJECT_NS_H */ 58#endif /* _LINUX_KOBJECT_NS_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 9724a38ee69d..50940da6adf3 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -84,6 +84,7 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem);
84 84
85extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 85extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
86extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); 86extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
87extern struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm);
87 88
88static inline 89static inline
89int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 90int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
@@ -246,6 +247,11 @@ static inline struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
246 return NULL; 247 return NULL;
247} 248}
248 249
250static inline struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
251{
252 return NULL;
253}
254
249static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem) 255static inline int mm_match_cgroup(struct mm_struct *mm, struct mem_cgroup *mem)
250{ 256{
251 return 1; 257 return 1;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 6469fa942d1d..22a8ceca0ed0 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2551,7 +2551,7 @@ extern void netdev_class_remove_file(struct class_attribute *class_attr);
2551 2551
2552extern struct kobj_ns_type_operations net_ns_type_operations; 2552extern struct kobj_ns_type_operations net_ns_type_operations;
2553 2553
2554extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); 2554extern const char *netdev_drivername(const struct net_device *dev);
2555 2555
2556extern void linkwatch_run_queue(void); 2556extern void linkwatch_run_queue(void);
2557 2557
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index 50cdc2559a5a..0d3dd66322ec 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -18,6 +18,9 @@ enum ip_conntrack_info {
18 /* >= this indicates reply direction */ 18 /* >= this indicates reply direction */
19 IP_CT_IS_REPLY, 19 IP_CT_IS_REPLY,
20 20
21 IP_CT_ESTABLISHED_REPLY = IP_CT_ESTABLISHED + IP_CT_IS_REPLY,
22 IP_CT_RELATED_REPLY = IP_CT_RELATED + IP_CT_IS_REPLY,
23 IP_CT_NEW_REPLY = IP_CT_NEW + IP_CT_IS_REPLY,
21 /* Number of distinct IP_CT types (no NEW in reply dirn). */ 24 /* Number of distinct IP_CT types (no NEW in reply dirn). */
22 IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1 25 IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
23}; 26};
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 8b97308e65df..9ca008f0c542 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -259,6 +259,9 @@ extern void __bad_size_call_parameter(void);
259 * Special handling for cmpxchg_double. cmpxchg_double is passed two 259 * Special handling for cmpxchg_double. cmpxchg_double is passed two
260 * percpu variables. The first has to be aligned to a double word 260 * percpu variables. The first has to be aligned to a double word
261 * boundary and the second has to follow directly thereafter. 261 * boundary and the second has to follow directly thereafter.
262 * We enforce this on all architectures even if they don't support
263 * a double cmpxchg instruction, since it's a cheap requirement, and it
264 * avoids breaking the requirement for architectures with the instruction.
262 */ 265 */
263#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ 266#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \
264({ \ 267({ \
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 3412684ce5d5..e0786e35f247 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -137,14 +137,14 @@ enum perf_event_sample_format {
137 * 137 *
138 * struct read_format { 138 * struct read_format {
139 * { u64 value; 139 * { u64 value;
140 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 140 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
141 * { u64 time_running; } && PERF_FORMAT_RUNNING 141 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
142 * { u64 id; } && PERF_FORMAT_ID 142 * { u64 id; } && PERF_FORMAT_ID
143 * } && !PERF_FORMAT_GROUP 143 * } && !PERF_FORMAT_GROUP
144 * 144 *
145 * { u64 nr; 145 * { u64 nr;
146 * { u64 time_enabled; } && PERF_FORMAT_ENABLED 146 * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
147 * { u64 time_running; } && PERF_FORMAT_RUNNING 147 * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
148 * { u64 value; 148 * { u64 value;
149 * { u64 id; } && PERF_FORMAT_ID 149 * { u64 id; } && PERF_FORMAT_ID
150 * } cntr[nr]; 150 * } cntr[nr];
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2a8621c4be1e..a837b20ba190 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1063,6 +1063,7 @@ struct sched_domain;
1063 */ 1063 */
1064#define WF_SYNC 0x01 /* waker goes to sleep after wakup */ 1064#define WF_SYNC 0x01 /* waker goes to sleep after wakup */
1065#define WF_FORK 0x02 /* child wakeup after fork */ 1065#define WF_FORK 0x02 /* child wakeup after fork */
1066#define WF_MIGRATED 0x04 /* internal use, task got migrated */
1066 1067
1067#define ENQUEUE_WAKEUP 1 1068#define ENQUEUE_WAKEUP 1
1068#define ENQUEUE_HEAD 2 1069#define ENQUEUE_HEAD 2
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index e9811892844f..c6db9fb33c44 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -28,6 +28,7 @@
28 28
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/preempt.h> 30#include <linux/preempt.h>
31#include <asm/processor.h>
31 32
32typedef struct { 33typedef struct {
33 unsigned sequence; 34 unsigned sequence;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f3af147c211d..3e543371254e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1256,6 +1256,11 @@ static inline void skb_reserve(struct sk_buff *skb, int len)
1256 skb->tail += len; 1256 skb->tail += len;
1257} 1257}
1258 1258
1259static inline void skb_reset_mac_len(struct sk_buff *skb)
1260{
1261 skb->mac_len = skb->network_header - skb->mac_header;
1262}
1263
1259#ifdef NET_SKBUFF_DATA_USES_OFFSET 1264#ifdef NET_SKBUFF_DATA_USES_OFFSET
1260static inline unsigned char *skb_transport_header(const struct sk_buff *skb) 1265static inline unsigned char *skb_transport_header(const struct sk_buff *skb)
1261{ 1266{
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 7ad824d510a2..8cc38d3bab0c 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -85,12 +85,15 @@ int smp_call_function_any(const struct cpumask *mask,
85 * Generic and arch helpers 85 * Generic and arch helpers
86 */ 86 */
87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS 87#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
88void __init call_function_init(void);
88void generic_smp_call_function_single_interrupt(void); 89void generic_smp_call_function_single_interrupt(void);
89void generic_smp_call_function_interrupt(void); 90void generic_smp_call_function_interrupt(void);
90void ipi_call_lock(void); 91void ipi_call_lock(void);
91void ipi_call_unlock(void); 92void ipi_call_unlock(void);
92void ipi_call_lock_irq(void); 93void ipi_call_lock_irq(void);
93void ipi_call_unlock_irq(void); 94void ipi_call_unlock_irq(void);
95#else
96static inline void call_function_init(void) { }
94#endif 97#endif
95 98
96/* 99/*
@@ -134,7 +137,7 @@ static inline void smp_send_reschedule(int cpu) { }
134#define smp_prepare_boot_cpu() do {} while (0) 137#define smp_prepare_boot_cpu() do {} while (0)
135#define smp_call_function_many(mask, func, info, wait) \ 138#define smp_call_function_many(mask, func, info, wait) \
136 (up_smp_call_function(func, info)) 139 (up_smp_call_function(func, info))
137static inline void init_call_single_data(void) { } 140static inline void call_function_init(void) { }
138 141
139static inline int 142static inline int
140smp_call_function_any(const struct cpumask *mask, smp_call_func_t func, 143smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h
new file mode 100644
index 000000000000..ec6234eee89c
--- /dev/null
+++ b/include/linux/sunrpc/gss_krb5_enctypes.h
@@ -0,0 +1,4 @@
1/*
2 * Dumb way to share this static piece of information with nfsd
3 */
4#define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2"
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 384eb5fe530b..e70564647039 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -358,6 +358,7 @@ struct backing_dev_info;
358extern struct mm_struct *swap_token_mm; 358extern struct mm_struct *swap_token_mm;
359extern void grab_swap_token(struct mm_struct *); 359extern void grab_swap_token(struct mm_struct *);
360extern void __put_swap_token(struct mm_struct *); 360extern void __put_swap_token(struct mm_struct *);
361extern void disable_swap_token(struct mem_cgroup *memcg);
361 362
362static inline int has_swap_token(struct mm_struct *mm) 363static inline int has_swap_token(struct mm_struct *mm)
363{ 364{
@@ -370,11 +371,6 @@ static inline void put_swap_token(struct mm_struct *mm)
370 __put_swap_token(mm); 371 __put_swap_token(mm);
371} 372}
372 373
373static inline void disable_swap_token(void)
374{
375 put_swap_token(swap_token_mm);
376}
377
378#ifdef CONFIG_CGROUP_MEM_RES_CTLR 374#ifdef CONFIG_CGROUP_MEM_RES_CTLR
379extern void 375extern void
380mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); 376mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
@@ -500,7 +496,7 @@ static inline int has_swap_token(struct mm_struct *mm)
500 return 0; 496 return 0;
501} 497}
502 498
503static inline void disable_swap_token(void) 499static inline void disable_swap_token(struct mem_cgroup *memcg)
504{ 500{
505} 501}
506 502
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 8c0e349f4a6c..445702c60d04 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -24,6 +24,7 @@ extern int swiotlb_force;
24 24
25extern void swiotlb_init(int verbose); 25extern void swiotlb_init(int verbose);
26extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); 26extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
27extern unsigned long swioltb_nr_tbl(void);
27 28
28/* 29/*
29 * Enumeration for sync targets 30 * Enumeration for sync targets
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index c3acda60eee0..e2696d76a599 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -177,9 +177,6 @@ struct sysfs_dirent *sysfs_get_dirent(struct sysfs_dirent *parent_sd,
177struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd); 177struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd);
178void sysfs_put(struct sysfs_dirent *sd); 178void sysfs_put(struct sysfs_dirent *sd);
179 179
180/* Called to clear a ns tag when it is no longer valid */
181void sysfs_exit_ns(enum kobj_ns_type type, const void *tag);
182
183int __must_check sysfs_init(void); 180int __must_check sysfs_init(void);
184 181
185#else /* CONFIG_SYSFS */ 182#else /* CONFIG_SYSFS */
@@ -338,10 +335,6 @@ static inline void sysfs_put(struct sysfs_dirent *sd)
338{ 335{
339} 336}
340 337
341static inline void sysfs_exit_ns(int type, const void *tag)
342{
343}
344
345static inline int __must_check sysfs_init(void) 338static inline int __must_check sysfs_init(void)
346{ 339{
347 return 0; 340 return 0;
diff --git a/include/linux/topology.h b/include/linux/topology.h
index b91a40e847d2..fc839bfa7935 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -60,7 +60,7 @@ int arch_update_cpu_topology(void);
60 * (in whatever arch specific measurement units returned by node_distance()) 60 * (in whatever arch specific measurement units returned by node_distance())
61 * then switch on zone reclaim on boot. 61 * then switch on zone reclaim on boot.
62 */ 62 */
63#define RECLAIM_DISTANCE 20 63#define RECLAIM_DISTANCE 30
64#endif 64#endif
65#ifndef PENALTY_FOR_NODE_WITH_CPUS 65#ifndef PENALTY_FOR_NODE_WITH_CPUS
66#define PENALTY_FOR_NODE_WITH_CPUS (1) 66#define PENALTY_FOR_NODE_WITH_CPUS (1)
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 71693d4a4fe1..17df3600bcef 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -62,7 +62,9 @@
62 US_FLAG(NO_READ_DISC_INFO, 0x00040000) \ 62 US_FLAG(NO_READ_DISC_INFO, 0x00040000) \
63 /* cannot handle READ_DISC_INFO */ \ 63 /* cannot handle READ_DISC_INFO */ \
64 US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \ 64 US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \
65 /* cannot handle READ_CAPACITY_16 */ 65 /* cannot handle READ_CAPACITY_16 */ \
66 US_FLAG(INITIAL_READ10, 0x00100000) \
67 /* Initial READ(10) (and others) must be retried */
66 68
67#define US_FLAG(name, value) US_FL_##name = value , 69#define US_FLAG(name, value) US_FL_##name = value ,
68enum { US_DO_ALL_FLAGS }; 70enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/uts.h b/include/linux/uts.h
index 73eb1ed36ec4..6ddbd86377de 100644
--- a/include/linux/uts.h
+++ b/include/linux/uts.h
@@ -9,7 +9,7 @@
9#endif 9#endif
10 10
11#ifndef UTS_NODENAME 11#ifndef UTS_NODENAME
12#define UTS_NODENAME "(none)" /* set by sethostname() */ 12#define UTS_NODENAME CONFIG_DEFAULT_HOSTNAME /* set by sethostname() */
13#endif 13#endif
14 14
15#ifndef UTS_DOMAINNAME 15#ifndef UTS_DOMAINNAME
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 93e96fb93452..c7c40f1d2624 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -128,8 +128,8 @@ struct video_device
128 struct mutex *lock; 128 struct mutex *lock;
129}; 129};
130 130
131#define media_entity_to_video_device(entity) \ 131#define media_entity_to_video_device(__e) \
132 container_of(entity, struct video_device, entity) 132 container_of(__e, struct video_device, entity)
133/* dev to video-device */ 133/* dev to video-device */
134#define to_video_device(cd) container_of(cd, struct video_device, dev) 134#define to_video_device(cd) container_of(cd, struct video_device, dev)
135 135
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 2bf9ed9ef26b..aef430d779bd 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -35,8 +35,11 @@ struct netns_ipvs;
35#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS) 35#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
36 36
37struct net { 37struct net {
38 atomic_t passive; /* To decided when the network
39 * namespace should be freed.
40 */
38 atomic_t count; /* To decided when the network 41 atomic_t count; /* To decided when the network
39 * namespace should be freed. 42 * namespace should be shut down.
40 */ 43 */
41#ifdef NETNS_REFCNT_DEBUG 44#ifdef NETNS_REFCNT_DEBUG
42 atomic_t use_count; /* To track references we 45 atomic_t use_count; /* To track references we
@@ -154,6 +157,9 @@ int net_eq(const struct net *net1, const struct net *net2)
154{ 157{
155 return net1 == net2; 158 return net1 == net2;
156} 159}
160
161extern void net_drop_ns(void *);
162
157#else 163#else
158 164
159static inline struct net *get_net(struct net *net) 165static inline struct net *get_net(struct net *net)
@@ -175,6 +181,8 @@ int net_eq(const struct net *net1, const struct net *net2)
175{ 181{
176 return 1; 182 return 1;
177} 183}
184
185#define net_drop_ns NULL
178#endif 186#endif
179 187
180 188
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index c7c42e7acc31..5d4f8e586e32 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -307,6 +307,12 @@ static inline int nf_ct_is_untracked(const struct nf_conn *ct)
307 return test_bit(IPS_UNTRACKED_BIT, &ct->status); 307 return test_bit(IPS_UNTRACKED_BIT, &ct->status);
308} 308}
309 309
310/* Packet is received from loopback */
311static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
312{
313 return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
314}
315
310extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp); 316extern int nf_conntrack_set_hashsize(const char *val, struct kernel_param *kp);
311extern unsigned int nf_conntrack_htable_size; 317extern unsigned int nf_conntrack_htable_size;
312extern unsigned int nf_conntrack_max; 318extern unsigned int nf_conntrack_max;
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
index ae045ca7d356..1c09820df585 100644
--- a/include/trace/events/irq.h
+++ b/include/trace/events/irq.h
@@ -20,7 +20,8 @@ struct softirq_action;
20 softirq_name(BLOCK_IOPOLL), \ 20 softirq_name(BLOCK_IOPOLL), \
21 softirq_name(TASKLET), \ 21 softirq_name(TASKLET), \
22 softirq_name(SCHED), \ 22 softirq_name(SCHED), \
23 softirq_name(HRTIMER)) 23 softirq_name(HRTIMER), \
24 softirq_name(RCU))
24 25
25/** 26/**
26 * irq_handler_entry - called immediately before the irq action handler 27 * irq_handler_entry - called immediately before the irq action handler
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index ea422aaa23e1..b2c33bd955fa 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -6,6 +6,8 @@
6 6
7#include <linux/types.h> 7#include <linux/types.h>
8#include <linux/tracepoint.h> 8#include <linux/tracepoint.h>
9#include <linux/mm.h>
10#include <linux/memcontrol.h>
9#include "gfpflags.h" 11#include "gfpflags.h"
10 12
11#define RECLAIM_WB_ANON 0x0001u 13#define RECLAIM_WB_ANON 0x0001u
@@ -310,6 +312,87 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
310 show_reclaim_flags(__entry->reclaim_flags)) 312 show_reclaim_flags(__entry->reclaim_flags))
311); 313);
312 314
315TRACE_EVENT(replace_swap_token,
316 TP_PROTO(struct mm_struct *old_mm,
317 struct mm_struct *new_mm),
318
319 TP_ARGS(old_mm, new_mm),
320
321 TP_STRUCT__entry(
322 __field(struct mm_struct*, old_mm)
323 __field(unsigned int, old_prio)
324 __field(struct mm_struct*, new_mm)
325 __field(unsigned int, new_prio)
326 ),
327
328 TP_fast_assign(
329 __entry->old_mm = old_mm;
330 __entry->old_prio = old_mm ? old_mm->token_priority : 0;
331 __entry->new_mm = new_mm;
332 __entry->new_prio = new_mm->token_priority;
333 ),
334
335 TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
336 __entry->old_mm, __entry->old_prio,
337 __entry->new_mm, __entry->new_prio)
338);
339
340DECLARE_EVENT_CLASS(put_swap_token_template,
341 TP_PROTO(struct mm_struct *swap_token_mm),
342
343 TP_ARGS(swap_token_mm),
344
345 TP_STRUCT__entry(
346 __field(struct mm_struct*, swap_token_mm)
347 ),
348
349 TP_fast_assign(
350 __entry->swap_token_mm = swap_token_mm;
351 ),
352
353 TP_printk("token_mm=%p", __entry->swap_token_mm)
354);
355
356DEFINE_EVENT(put_swap_token_template, put_swap_token,
357 TP_PROTO(struct mm_struct *swap_token_mm),
358 TP_ARGS(swap_token_mm)
359);
360
361DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
362 TP_PROTO(struct mm_struct *swap_token_mm),
363 TP_ARGS(swap_token_mm),
364 TP_CONDITION(swap_token_mm != NULL)
365);
366
367TRACE_EVENT_CONDITION(update_swap_token_priority,
368 TP_PROTO(struct mm_struct *mm,
369 unsigned int old_prio,
370 struct mm_struct *swap_token_mm),
371
372 TP_ARGS(mm, old_prio, swap_token_mm),
373
374 TP_CONDITION(mm->token_priority != old_prio),
375
376 TP_STRUCT__entry(
377 __field(struct mm_struct*, mm)
378 __field(unsigned int, old_prio)
379 __field(unsigned int, new_prio)
380 __field(struct mm_struct*, swap_token_mm)
381 __field(unsigned int, swap_token_prio)
382 ),
383
384 TP_fast_assign(
385 __entry->mm = mm;
386 __entry->old_prio = old_prio;
387 __entry->new_prio = mm->token_priority;
388 __entry->swap_token_mm = swap_token_mm;
389 __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
390 ),
391
392 TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
393 __entry->mm, __entry->old_prio, __entry->new_prio,
394 __entry->swap_token_mm, __entry->swap_token_prio)
395);
313 396
314#endif /* _TRACE_VMSCAN_H */ 397#endif /* _TRACE_VMSCAN_H */
315 398
diff --git a/init/Kconfig b/init/Kconfig
index ebafac4231ee..412c21b00d51 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -19,7 +19,6 @@ config DEFCONFIG_LIST
19config CONSTRUCTORS 19config CONSTRUCTORS
20 bool 20 bool
21 depends on !UML 21 depends on !UML
22 default y
23 22
24config HAVE_IRQ_WORK 23config HAVE_IRQ_WORK
25 bool 24 bool
@@ -204,6 +203,15 @@ config KERNEL_LZO
204 203
205endchoice 204endchoice
206 205
206config DEFAULT_HOSTNAME
207 string "Default hostname"
208 default "(none)"
209 help
210 This option determines the default system hostname before userspace
211 calls sethostname(2). The kernel traditionally uses "(none)" here,
212 but you may wish to use a different default here to make a minimal
213 system more usable with less configuration.
214
207config SWAP 215config SWAP
208 bool "Support for paging of anonymous memory (swap)" 216 bool "Support for paging of anonymous memory (swap)"
209 depends on MMU && BLOCK 217 depends on MMU && BLOCK
diff --git a/init/calibrate.c b/init/calibrate.c
index cfd7000c9d71..2568d22a304e 100644
--- a/init/calibrate.c
+++ b/init/calibrate.c
@@ -93,9 +93,6 @@ static unsigned long __cpuinit calibrate_delay_direct(void)
93 * If the upper limit and lower limit of the timer_rate is 93 * If the upper limit and lower limit of the timer_rate is
94 * >= 12.5% apart, redo calibration. 94 * >= 12.5% apart, redo calibration.
95 */ 95 */
96 printk(KERN_DEBUG "calibrate_delay_direct() timer_rate_max=%lu "
97 "timer_rate_min=%lu pre_start=%lu pre_end=%lu\n",
98 timer_rate_max, timer_rate_min, pre_start, pre_end);
99 if (start >= post_end) 96 if (start >= post_end)
100 printk(KERN_NOTICE "calibrate_delay_direct() ignoring " 97 printk(KERN_NOTICE "calibrate_delay_direct() ignoring "
101 "timer_rate as we had a TSC wrap around" 98 "timer_rate as we had a TSC wrap around"
diff --git a/init/main.c b/init/main.c
index cafba67c13bf..d7211faed2ad 100644
--- a/init/main.c
+++ b/init/main.c
@@ -542,6 +542,7 @@ asmlinkage void __init start_kernel(void)
542 timekeeping_init(); 542 timekeeping_init();
543 time_init(); 543 time_init();
544 profile_init(); 544 profile_init();
545 call_function_init();
545 if (!irqs_disabled()) 546 if (!irqs_disabled())
546 printk(KERN_CRIT "start_kernel(): bug: interrupts were " 547 printk(KERN_CRIT "start_kernel(): bug: interrupts were "
547 "enabled early\n"); 548 "enabled early\n");
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d863b3c057bb..9efe7108ccaf 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7402,26 +7402,12 @@ static int __perf_cgroup_move(void *info)
7402 return 0; 7402 return 0;
7403} 7403}
7404 7404
7405static void perf_cgroup_move(struct task_struct *task) 7405static void
7406perf_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *task)
7406{ 7407{
7407 task_function_call(task, __perf_cgroup_move, task); 7408 task_function_call(task, __perf_cgroup_move, task);
7408} 7409}
7409 7410
7410static void perf_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
7411 struct cgroup *old_cgrp, struct task_struct *task,
7412 bool threadgroup)
7413{
7414 perf_cgroup_move(task);
7415 if (threadgroup) {
7416 struct task_struct *c;
7417 rcu_read_lock();
7418 list_for_each_entry_rcu(c, &task->thread_group, thread_group) {
7419 perf_cgroup_move(c);
7420 }
7421 rcu_read_unlock();
7422 }
7423}
7424
7425static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp, 7411static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7426 struct cgroup *old_cgrp, struct task_struct *task) 7412 struct cgroup *old_cgrp, struct task_struct *task)
7427{ 7413{
@@ -7433,7 +7419,7 @@ static void perf_cgroup_exit(struct cgroup_subsys *ss, struct cgroup *cgrp,
7433 if (!(task->flags & PF_EXITING)) 7419 if (!(task->flags & PF_EXITING))
7434 return; 7420 return;
7435 7421
7436 perf_cgroup_move(task); 7422 perf_cgroup_attach_task(cgrp, task);
7437} 7423}
7438 7424
7439struct cgroup_subsys perf_subsys = { 7425struct cgroup_subsys perf_subsys = {
@@ -7442,6 +7428,6 @@ struct cgroup_subsys perf_subsys = {
7442 .create = perf_cgroup_create, 7428 .create = perf_cgroup_create,
7443 .destroy = perf_cgroup_destroy, 7429 .destroy = perf_cgroup_destroy,
7444 .exit = perf_cgroup_exit, 7430 .exit = perf_cgroup_exit,
7445 .attach = perf_cgroup_attach, 7431 .attach_task = perf_cgroup_attach_task,
7446}; 7432};
7447#endif /* CONFIG_CGROUP_PERF */ 7433#endif /* CONFIG_CGROUP_PERF */
diff --git a/kernel/exit.c b/kernel/exit.c
index 20a406471525..f2b321bae440 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -561,29 +561,28 @@ void exit_files(struct task_struct *tsk)
561 561
562#ifdef CONFIG_MM_OWNER 562#ifdef CONFIG_MM_OWNER
563/* 563/*
564 * Task p is exiting and it owned mm, lets find a new owner for it 564 * A task is exiting. If it owned this mm, find a new owner for the mm.
565 */ 565 */
566static inline int
567mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
568{
569 /*
570 * If there are other users of the mm and the owner (us) is exiting
571 * we need to find a new owner to take on the responsibility.
572 */
573 if (atomic_read(&mm->mm_users) <= 1)
574 return 0;
575 if (mm->owner != p)
576 return 0;
577 return 1;
578}
579
580void mm_update_next_owner(struct mm_struct *mm) 566void mm_update_next_owner(struct mm_struct *mm)
581{ 567{
582 struct task_struct *c, *g, *p = current; 568 struct task_struct *c, *g, *p = current;
583 569
584retry: 570retry:
585 if (!mm_need_new_owner(mm, p)) 571 /*
572 * If the exiting or execing task is not the owner, it's
573 * someone else's problem.
574 */
575 if (mm->owner != p)
586 return; 576 return;
577 /*
578 * The current owner is exiting/execing and there are no other
579 * candidates. Do not leave the mm pointing to a possibly
580 * freed task structure.
581 */
582 if (atomic_read(&mm->mm_users) <= 1) {
583 mm->owner = NULL;
584 return;
585 }
587 586
588 read_lock(&tasklist_lock); 587 read_lock(&tasklist_lock);
589 /* 588 /*
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index b8cadf70b1fb..5bf924d80b5c 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -2,7 +2,8 @@ menu "GCOV-based kernel profiling"
2 2
3config GCOV_KERNEL 3config GCOV_KERNEL
4 bool "Enable gcov-based kernel profiling" 4 bool "Enable gcov-based kernel profiling"
5 depends on DEBUG_FS && CONSTRUCTORS 5 depends on DEBUG_FS
6 select CONSTRUCTORS
6 default n 7 default n
7 ---help--- 8 ---help---
8 This option enables gcov-based code profiling (e.g. for code coverage 9 This option enables gcov-based code profiling (e.g. for code coverage
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 90cb55f6d7eb..470d08c82bbe 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -133,12 +133,6 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
133 switch (res) { 133 switch (res) {
134 case IRQ_WAKE_THREAD: 134 case IRQ_WAKE_THREAD:
135 /* 135 /*
136 * Set result to handled so the spurious check
137 * does not trigger.
138 */
139 res = IRQ_HANDLED;
140
141 /*
142 * Catch drivers which return WAKE_THREAD but 136 * Catch drivers which return WAKE_THREAD but
143 * did not set up a thread function 137 * did not set up a thread function
144 */ 138 */
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 886e80347b32..4c60a50e66b2 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -257,13 +257,11 @@ int __init early_irq_init(void)
257 count = ARRAY_SIZE(irq_desc); 257 count = ARRAY_SIZE(irq_desc);
258 258
259 for (i = 0; i < count; i++) { 259 for (i = 0; i < count; i++) {
260 desc[i].irq_data.irq = i;
261 desc[i].irq_data.chip = &no_irq_chip;
262 desc[i].kstat_irqs = alloc_percpu(unsigned int); 260 desc[i].kstat_irqs = alloc_percpu(unsigned int);
263 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); 261 alloc_masks(&desc[i], GFP_KERNEL, node);
264 alloc_masks(desc + i, GFP_KERNEL, node); 262 raw_spin_lock_init(&desc[i].lock);
265 desc_smp_init(desc + i, node);
266 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 263 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
264 desc_set_defaults(i, &desc[i], node);
267 } 265 }
268 return arch_early_irq_init(); 266 return arch_early_irq_init();
269} 267}
@@ -346,6 +344,12 @@ irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node)
346 if (!cnt) 344 if (!cnt)
347 return -EINVAL; 345 return -EINVAL;
348 346
347 if (irq >= 0) {
348 if (from > irq)
349 return -EINVAL;
350 from = irq;
351 }
352
349 mutex_lock(&sparse_irq_lock); 353 mutex_lock(&sparse_irq_lock);
350 354
351 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS, 355 start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f7ce0021e1c4..0a7840aeb0fb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
492 int ret = 0; 492 int ret = 0;
493 493
494 if (!desc)
495 return -EINVAL;
496
494 /* wakeup-capable irqs can be shared between drivers that 497 /* wakeup-capable irqs can be shared between drivers that
495 * don't need to have the same sleep mode behaviors. 498 * don't need to have the same sleep mode behaviors.
496 */ 499 */
@@ -723,13 +726,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
723 * context. So we need to disable bh here to avoid deadlocks and other 726 * context. So we need to disable bh here to avoid deadlocks and other
724 * side effects. 727 * side effects.
725 */ 728 */
726static void 729static irqreturn_t
727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 730irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
728{ 731{
732 irqreturn_t ret;
733
729 local_bh_disable(); 734 local_bh_disable();
730 action->thread_fn(action->irq, action->dev_id); 735 ret = action->thread_fn(action->irq, action->dev_id);
731 irq_finalize_oneshot(desc, action, false); 736 irq_finalize_oneshot(desc, action, false);
732 local_bh_enable(); 737 local_bh_enable();
738 return ret;
733} 739}
734 740
735/* 741/*
@@ -737,10 +743,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
737 * preemtible - many of them need to sleep and wait for slow busses to 743 * preemtible - many of them need to sleep and wait for slow busses to
738 * complete. 744 * complete.
739 */ 745 */
740static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 746static irqreturn_t irq_thread_fn(struct irq_desc *desc,
747 struct irqaction *action)
741{ 748{
742 action->thread_fn(action->irq, action->dev_id); 749 irqreturn_t ret;
750
751 ret = action->thread_fn(action->irq, action->dev_id);
743 irq_finalize_oneshot(desc, action, false); 752 irq_finalize_oneshot(desc, action, false);
753 return ret;
744} 754}
745 755
746/* 756/*
@@ -753,7 +763,8 @@ static int irq_thread(void *data)
753 }; 763 };
754 struct irqaction *action = data; 764 struct irqaction *action = data;
755 struct irq_desc *desc = irq_to_desc(action->irq); 765 struct irq_desc *desc = irq_to_desc(action->irq);
756 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); 766 irqreturn_t (*handler_fn)(struct irq_desc *desc,
767 struct irqaction *action);
757 int wake; 768 int wake;
758 769
759 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, 770 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
@@ -783,8 +794,12 @@ static int irq_thread(void *data)
783 desc->istate |= IRQS_PENDING; 794 desc->istate |= IRQS_PENDING;
784 raw_spin_unlock_irq(&desc->lock); 795 raw_spin_unlock_irq(&desc->lock);
785 } else { 796 } else {
797 irqreturn_t action_ret;
798
786 raw_spin_unlock_irq(&desc->lock); 799 raw_spin_unlock_irq(&desc->lock);
787 handler_fn(desc, action); 800 action_ret = handler_fn(desc, action);
801 if (!noirqdebug)
802 note_interrupt(action->irq, desc, action_ret);
788 } 803 }
789 804
790 wake = atomic_dec_and_test(&desc->threads_active); 805 wake = atomic_dec_and_test(&desc->threads_active);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index dfbd550401b2..aa57d5da18c1 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -167,6 +167,13 @@ out:
167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
168} 168}
169 169
170static inline int bad_action_ret(irqreturn_t action_ret)
171{
172 if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
173 return 0;
174 return 1;
175}
176
170/* 177/*
171 * If 99,900 of the previous 100,000 interrupts have not been handled 178 * If 99,900 of the previous 100,000 interrupts have not been handled
172 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 179 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -182,7 +189,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
182 struct irqaction *action; 189 struct irqaction *action;
183 unsigned long flags; 190 unsigned long flags;
184 191
185 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 192 if (bad_action_ret(action_ret)) {
186 printk(KERN_ERR "irq event %d: bogus return value %x\n", 193 printk(KERN_ERR "irq event %d: bogus return value %x\n",
187 irq, action_ret); 194 irq, action_ret);
188 } else { 195 } else {
@@ -201,10 +208,11 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
201 raw_spin_lock_irqsave(&desc->lock, flags); 208 raw_spin_lock_irqsave(&desc->lock, flags);
202 action = desc->action; 209 action = desc->action;
203 while (action) { 210 while (action) {
204 printk(KERN_ERR "[<%p>]", action->handler); 211 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
205 print_symbol(" (%s)", 212 if (action->thread_fn)
206 (unsigned long)action->handler); 213 printk(KERN_CONT " threaded [<%p>] %pf",
207 printk("\n"); 214 action->thread_fn, action->thread_fn);
215 printk(KERN_CONT "\n");
208 action = action->next; 216 action = action->next;
209 } 217 }
210 raw_spin_unlock_irqrestore(&desc->lock, flags); 218 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -262,7 +270,16 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
262 if (desc->istate & IRQS_POLL_INPROGRESS) 270 if (desc->istate & IRQS_POLL_INPROGRESS)
263 return; 271 return;
264 272
265 if (unlikely(action_ret != IRQ_HANDLED)) { 273 /* we get here again via the threaded handler */
274 if (action_ret == IRQ_WAKE_THREAD)
275 return;
276
277 if (bad_action_ret(action_ret)) {
278 report_bad_irq(irq, desc, action_ret);
279 return;
280 }
281
282 if (unlikely(action_ret == IRQ_NONE)) {
266 /* 283 /*
267 * If we are seeing only the odd spurious IRQ caused by 284 * If we are seeing only the odd spurious IRQ caused by
268 * bus asynchronicity then don't eventually trigger an error, 285 * bus asynchronicity then don't eventually trigger an error,
@@ -274,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
274 else 291 else
275 desc->irqs_unhandled++; 292 desc->irqs_unhandled++;
276 desc->last_unhandled = jiffies; 293 desc->last_unhandled = jiffies;
277 if (unlikely(action_ret != IRQ_NONE))
278 report_bad_irq(irq, desc, action_ret);
279 } 294 }
280 295
281 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 296 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
diff --git a/kernel/kmod.c b/kernel/kmod.c
index ad6a81c58b44..47613dfb7b28 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -156,12 +156,6 @@ static int ____call_usermodehelper(void *data)
156 */ 156 */
157 set_user_nice(current, 0); 157 set_user_nice(current, 0);
158 158
159 if (sub_info->init) {
160 retval = sub_info->init(sub_info);
161 if (retval)
162 goto fail;
163 }
164
165 retval = -ENOMEM; 159 retval = -ENOMEM;
166 new = prepare_kernel_cred(current); 160 new = prepare_kernel_cred(current);
167 if (!new) 161 if (!new)
@@ -173,6 +167,14 @@ static int ____call_usermodehelper(void *data)
173 new->cap_inheritable); 167 new->cap_inheritable);
174 spin_unlock(&umh_sysctl_lock); 168 spin_unlock(&umh_sysctl_lock);
175 169
170 if (sub_info->init) {
171 retval = sub_info->init(sub_info, new);
172 if (retval) {
173 abort_creds(new);
174 goto fail;
175 }
176 }
177
176 commit_creds(new); 178 commit_creds(new);
177 179
178 retval = kernel_execve(sub_info->path, 180 retval = kernel_execve(sub_info->path,
@@ -388,7 +390,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
388 * context in which call_usermodehelper_exec is called. 390 * context in which call_usermodehelper_exec is called.
389 */ 391 */
390void call_usermodehelper_setfns(struct subprocess_info *info, 392void call_usermodehelper_setfns(struct subprocess_info *info,
391 int (*init)(struct subprocess_info *info), 393 int (*init)(struct subprocess_info *info, struct cred *new),
392 void (*cleanup)(struct subprocess_info *info), 394 void (*cleanup)(struct subprocess_info *info),
393 void *data) 395 void *data)
394{ 396{
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 63437d065ac8..298c9276dfdb 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -3426,7 +3426,7 @@ int lock_is_held(struct lockdep_map *lock)
3426 int ret = 0; 3426 int ret = 0;
3427 3427
3428 if (unlikely(current->lockdep_recursion)) 3428 if (unlikely(current->lockdep_recursion))
3429 return ret; 3429 return 1; /* avoid false negative lockdep_assert_held() */
3430 3430
3431 raw_local_irq_save(flags); 3431 raw_local_irq_save(flags);
3432 check_flags(flags); 3432 check_flags(flags);
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 89419ff92e99..7e59ffb3d0ba 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -87,6 +87,8 @@ static struct rcu_state *rcu_state;
87int rcu_scheduler_active __read_mostly; 87int rcu_scheduler_active __read_mostly;
88EXPORT_SYMBOL_GPL(rcu_scheduler_active); 88EXPORT_SYMBOL_GPL(rcu_scheduler_active);
89 89
90#ifdef CONFIG_RCU_BOOST
91
90/* 92/*
91 * Control variables for per-CPU and per-rcu_node kthreads. These 93 * Control variables for per-CPU and per-rcu_node kthreads. These
92 * handle all flavors of RCU. 94 * handle all flavors of RCU.
@@ -98,8 +100,11 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
98DEFINE_PER_CPU(char, rcu_cpu_has_work); 100DEFINE_PER_CPU(char, rcu_cpu_has_work);
99static char rcu_kthreads_spawnable; 101static char rcu_kthreads_spawnable;
100 102
103#endif /* #ifdef CONFIG_RCU_BOOST */
104
101static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu); 105static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
102static void invoke_rcu_cpu_kthread(void); 106static void invoke_rcu_core(void);
107static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
103 108
104#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */ 109#define RCU_KTHREAD_PRIO 1 /* RT priority for per-CPU kthreads. */
105 110
@@ -1088,14 +1093,8 @@ static void __rcu_offline_cpu(int cpu, struct rcu_state *rsp)
1088 int need_report = 0; 1093 int need_report = 0;
1089 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); 1094 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
1090 struct rcu_node *rnp; 1095 struct rcu_node *rnp;
1091 struct task_struct *t;
1092 1096
1093 /* Stop the CPU's kthread. */ 1097 rcu_stop_cpu_kthread(cpu);
1094 t = per_cpu(rcu_cpu_kthread_task, cpu);
1095 if (t != NULL) {
1096 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1097 kthread_stop(t);
1098 }
1099 1098
1100 /* Exclude any attempts to start a new grace period. */ 1099 /* Exclude any attempts to start a new grace period. */
1101 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1100 raw_spin_lock_irqsave(&rsp->onofflock, flags);
@@ -1231,7 +1230,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1231 1230
1232 /* Re-raise the RCU softirq if there are callbacks remaining. */ 1231 /* Re-raise the RCU softirq if there are callbacks remaining. */
1233 if (cpu_has_callbacks_ready_to_invoke(rdp)) 1232 if (cpu_has_callbacks_ready_to_invoke(rdp))
1234 invoke_rcu_cpu_kthread(); 1233 invoke_rcu_core();
1235} 1234}
1236 1235
1237/* 1236/*
@@ -1277,7 +1276,7 @@ void rcu_check_callbacks(int cpu, int user)
1277 } 1276 }
1278 rcu_preempt_check_callbacks(cpu); 1277 rcu_preempt_check_callbacks(cpu);
1279 if (rcu_pending(cpu)) 1278 if (rcu_pending(cpu))
1280 invoke_rcu_cpu_kthread(); 1279 invoke_rcu_core();
1281} 1280}
1282 1281
1283#ifdef CONFIG_SMP 1282#ifdef CONFIG_SMP
@@ -1442,13 +1441,14 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1442 } 1441 }
1443 1442
1444 /* If there are callbacks ready, invoke them. */ 1443 /* If there are callbacks ready, invoke them. */
1445 rcu_do_batch(rsp, rdp); 1444 if (cpu_has_callbacks_ready_to_invoke(rdp))
1445 invoke_rcu_callbacks(rsp, rdp);
1446} 1446}
1447 1447
1448/* 1448/*
1449 * Do softirq processing for the current CPU. 1449 * Do softirq processing for the current CPU.
1450 */ 1450 */
1451static void rcu_process_callbacks(void) 1451static void rcu_process_callbacks(struct softirq_action *unused)
1452{ 1452{
1453 __rcu_process_callbacks(&rcu_sched_state, 1453 __rcu_process_callbacks(&rcu_sched_state,
1454 &__get_cpu_var(rcu_sched_data)); 1454 &__get_cpu_var(rcu_sched_data));
@@ -1465,342 +1465,20 @@ static void rcu_process_callbacks(void)
1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task 1465 * the current CPU with interrupts disabled, the rcu_cpu_kthread_task
1466 * cannot disappear out from under us. 1466 * cannot disappear out from under us.
1467 */ 1467 */
1468static void invoke_rcu_cpu_kthread(void) 1468static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
1469{
1470 unsigned long flags;
1471
1472 local_irq_save(flags);
1473 __this_cpu_write(rcu_cpu_has_work, 1);
1474 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1475 local_irq_restore(flags);
1476 return;
1477 }
1478 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1479 local_irq_restore(flags);
1480}
1481
1482/*
1483 * Wake up the specified per-rcu_node-structure kthread.
1484 * Because the per-rcu_node kthreads are immortal, we don't need
1485 * to do anything to keep them alive.
1486 */
1487static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1488{
1489 struct task_struct *t;
1490
1491 t = rnp->node_kthread_task;
1492 if (t != NULL)
1493 wake_up_process(t);
1494}
1495
1496/*
1497 * Set the specified CPU's kthread to run RT or not, as specified by
1498 * the to_rt argument. The CPU-hotplug locks are held, so the task
1499 * is not going away.
1500 */
1501static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1502{
1503 int policy;
1504 struct sched_param sp;
1505 struct task_struct *t;
1506
1507 t = per_cpu(rcu_cpu_kthread_task, cpu);
1508 if (t == NULL)
1509 return;
1510 if (to_rt) {
1511 policy = SCHED_FIFO;
1512 sp.sched_priority = RCU_KTHREAD_PRIO;
1513 } else {
1514 policy = SCHED_NORMAL;
1515 sp.sched_priority = 0;
1516 }
1517 sched_setscheduler_nocheck(t, policy, &sp);
1518}
1519
1520/*
1521 * Timer handler to initiate the waking up of per-CPU kthreads that
1522 * have yielded the CPU due to excess numbers of RCU callbacks.
1523 * We wake up the per-rcu_node kthread, which in turn will wake up
1524 * the booster kthread.
1525 */
1526static void rcu_cpu_kthread_timer(unsigned long arg)
1527{
1528 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1529 struct rcu_node *rnp = rdp->mynode;
1530
1531 atomic_or(rdp->grpmask, &rnp->wakemask);
1532 invoke_rcu_node_kthread(rnp);
1533}
1534
1535/*
1536 * Drop to non-real-time priority and yield, but only after posting a
1537 * timer that will cause us to regain our real-time priority if we
1538 * remain preempted. Either way, we restore our real-time priority
1539 * before returning.
1540 */
1541static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1542{
1543 struct sched_param sp;
1544 struct timer_list yield_timer;
1545
1546 setup_timer_on_stack(&yield_timer, f, arg);
1547 mod_timer(&yield_timer, jiffies + 2);
1548 sp.sched_priority = 0;
1549 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1550 set_user_nice(current, 19);
1551 schedule();
1552 sp.sched_priority = RCU_KTHREAD_PRIO;
1553 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1554 del_timer(&yield_timer);
1555}
1556
1557/*
1558 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1559 * This can happen while the corresponding CPU is either coming online
1560 * or going offline. We cannot wait until the CPU is fully online
1561 * before starting the kthread, because the various notifier functions
1562 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1563 * the corresponding CPU is online.
1564 *
1565 * Return 1 if the kthread needs to stop, 0 otherwise.
1566 *
1567 * Caller must disable bh. This function can momentarily enable it.
1568 */
1569static int rcu_cpu_kthread_should_stop(int cpu)
1570{
1571 while (cpu_is_offline(cpu) ||
1572 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1573 smp_processor_id() != cpu) {
1574 if (kthread_should_stop())
1575 return 1;
1576 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1577 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1578 local_bh_enable();
1579 schedule_timeout_uninterruptible(1);
1580 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1581 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1582 local_bh_disable();
1583 }
1584 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1585 return 0;
1586}
1587
1588/*
1589 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1590 * earlier RCU softirq.
1591 */
1592static int rcu_cpu_kthread(void *arg)
1593{
1594 int cpu = (int)(long)arg;
1595 unsigned long flags;
1596 int spincnt = 0;
1597 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1598 char work;
1599 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1600
1601 for (;;) {
1602 *statusp = RCU_KTHREAD_WAITING;
1603 rcu_wait(*workp != 0 || kthread_should_stop());
1604 local_bh_disable();
1605 if (rcu_cpu_kthread_should_stop(cpu)) {
1606 local_bh_enable();
1607 break;
1608 }
1609 *statusp = RCU_KTHREAD_RUNNING;
1610 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1611 local_irq_save(flags);
1612 work = *workp;
1613 *workp = 0;
1614 local_irq_restore(flags);
1615 if (work)
1616 rcu_process_callbacks();
1617 local_bh_enable();
1618 if (*workp != 0)
1619 spincnt++;
1620 else
1621 spincnt = 0;
1622 if (spincnt > 10) {
1623 *statusp = RCU_KTHREAD_YIELDING;
1624 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1625 spincnt = 0;
1626 }
1627 }
1628 *statusp = RCU_KTHREAD_STOPPED;
1629 return 0;
1630}
1631
1632/*
1633 * Spawn a per-CPU kthread, setting up affinity and priority.
1634 * Because the CPU hotplug lock is held, no other CPU will be attempting
1635 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1636 * attempting to access it during boot, but the locking in kthread_bind()
1637 * will enforce sufficient ordering.
1638 */
1639static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1640{ 1469{
1641 struct sched_param sp; 1470 if (likely(!rsp->boost)) {
1642 struct task_struct *t; 1471 rcu_do_batch(rsp, rdp);
1643
1644 if (!rcu_kthreads_spawnable ||
1645 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1646 return 0;
1647 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 kthread_bind(t, cpu);
1651 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1652 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1653 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1654 sp.sched_priority = RCU_KTHREAD_PRIO;
1655 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1656 return 0;
1657}
1658
1659/*
1660 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1661 * kthreads when needed. We ignore requests to wake up kthreads
1662 * for offline CPUs, which is OK because force_quiescent_state()
1663 * takes care of this case.
1664 */
1665static int rcu_node_kthread(void *arg)
1666{
1667 int cpu;
1668 unsigned long flags;
1669 unsigned long mask;
1670 struct rcu_node *rnp = (struct rcu_node *)arg;
1671 struct sched_param sp;
1672 struct task_struct *t;
1673
1674 for (;;) {
1675 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1676 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1677 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1678 raw_spin_lock_irqsave(&rnp->lock, flags);
1679 mask = atomic_xchg(&rnp->wakemask, 0);
1680 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1681 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1682 if ((mask & 0x1) == 0)
1683 continue;
1684 preempt_disable();
1685 t = per_cpu(rcu_cpu_kthread_task, cpu);
1686 if (!cpu_online(cpu) || t == NULL) {
1687 preempt_enable();
1688 continue;
1689 }
1690 per_cpu(rcu_cpu_has_work, cpu) = 1;
1691 sp.sched_priority = RCU_KTHREAD_PRIO;
1692 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1693 preempt_enable();
1694 }
1695 }
1696 /* NOTREACHED */
1697 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1698 return 0;
1699}
1700
1701/*
1702 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1703 * served by the rcu_node in question. The CPU hotplug lock is still
1704 * held, so the value of rnp->qsmaskinit will be stable.
1705 *
1706 * We don't include outgoingcpu in the affinity set, use -1 if there is
1707 * no outgoing CPU. If there are no CPUs left in the affinity set,
1708 * this function allows the kthread to execute on any CPU.
1709 */
1710static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1711{
1712 cpumask_var_t cm;
1713 int cpu;
1714 unsigned long mask = rnp->qsmaskinit;
1715
1716 if (rnp->node_kthread_task == NULL)
1717 return;
1718 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1719 return; 1472 return;
1720 cpumask_clear(cm);
1721 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1722 if ((mask & 0x1) && cpu != outgoingcpu)
1723 cpumask_set_cpu(cpu, cm);
1724 if (cpumask_weight(cm) == 0) {
1725 cpumask_setall(cm);
1726 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1727 cpumask_clear_cpu(cpu, cm);
1728 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1729 } 1473 }
1730 set_cpus_allowed_ptr(rnp->node_kthread_task, cm); 1474 invoke_rcu_callbacks_kthread();
1731 rcu_boost_kthread_setaffinity(rnp, cm);
1732 free_cpumask_var(cm);
1733} 1475}
1734 1476
1735/* 1477static void invoke_rcu_core(void)
1736 * Spawn a per-rcu_node kthread, setting priority and affinity.
1737 * Called during boot before online/offline can happen, or, if
1738 * during runtime, with the main CPU-hotplug locks held. So only
1739 * one of these can be executing at a time.
1740 */
1741static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1742 struct rcu_node *rnp)
1743{ 1478{
1744 unsigned long flags; 1479 raise_softirq(RCU_SOFTIRQ);
1745 int rnp_index = rnp - &rsp->node[0];
1746 struct sched_param sp;
1747 struct task_struct *t;
1748
1749 if (!rcu_kthreads_spawnable ||
1750 rnp->qsmaskinit == 0)
1751 return 0;
1752 if (rnp->node_kthread_task == NULL) {
1753 t = kthread_create(rcu_node_kthread, (void *)rnp,
1754 "rcun%d", rnp_index);
1755 if (IS_ERR(t))
1756 return PTR_ERR(t);
1757 raw_spin_lock_irqsave(&rnp->lock, flags);
1758 rnp->node_kthread_task = t;
1759 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1760 sp.sched_priority = 99;
1761 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1762 }
1763 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1764} 1480}
1765 1481
1766static void rcu_wake_one_boost_kthread(struct rcu_node *rnp);
1767
1768/*
1769 * Spawn all kthreads -- called as soon as the scheduler is running.
1770 */
1771static int __init rcu_spawn_kthreads(void)
1772{
1773 int cpu;
1774 struct rcu_node *rnp;
1775 struct task_struct *t;
1776
1777 rcu_kthreads_spawnable = 1;
1778 for_each_possible_cpu(cpu) {
1779 per_cpu(rcu_cpu_has_work, cpu) = 0;
1780 if (cpu_online(cpu)) {
1781 (void)rcu_spawn_one_cpu_kthread(cpu);
1782 t = per_cpu(rcu_cpu_kthread_task, cpu);
1783 if (t)
1784 wake_up_process(t);
1785 }
1786 }
1787 rnp = rcu_get_root(rcu_state);
1788 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1789 if (rnp->node_kthread_task)
1790 wake_up_process(rnp->node_kthread_task);
1791 if (NUM_RCU_NODES > 1) {
1792 rcu_for_each_leaf_node(rcu_state, rnp) {
1793 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1794 t = rnp->node_kthread_task;
1795 if (t)
1796 wake_up_process(t);
1797 rcu_wake_one_boost_kthread(rnp);
1798 }
1799 }
1800 return 0;
1801}
1802early_initcall(rcu_spawn_kthreads);
1803
1804static void 1482static void
1805__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), 1483__call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
1806 struct rcu_state *rsp) 1484 struct rcu_state *rsp)
@@ -2207,44 +1885,6 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
2207 rcu_preempt_init_percpu_data(cpu); 1885 rcu_preempt_init_percpu_data(cpu);
2208} 1886}
2209 1887
2210static void __cpuinit rcu_prepare_kthreads(int cpu)
2211{
2212 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2213 struct rcu_node *rnp = rdp->mynode;
2214
2215 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
2216 if (rcu_kthreads_spawnable) {
2217 (void)rcu_spawn_one_cpu_kthread(cpu);
2218 if (rnp->node_kthread_task == NULL)
2219 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
2220 }
2221}
2222
2223/*
2224 * kthread_create() creates threads in TASK_UNINTERRUPTIBLE state,
2225 * but the RCU threads are woken on demand, and if demand is low this
2226 * could be a while triggering the hung task watchdog.
2227 *
2228 * In order to avoid this, poke all tasks once the CPU is fully
2229 * up and running.
2230 */
2231static void __cpuinit rcu_online_kthreads(int cpu)
2232{
2233 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
2234 struct rcu_node *rnp = rdp->mynode;
2235 struct task_struct *t;
2236
2237 t = per_cpu(rcu_cpu_kthread_task, cpu);
2238 if (t)
2239 wake_up_process(t);
2240
2241 t = rnp->node_kthread_task;
2242 if (t)
2243 wake_up_process(t);
2244
2245 rcu_wake_one_boost_kthread(rnp);
2246}
2247
2248/* 1888/*
2249 * Handle CPU online/offline notification events. 1889 * Handle CPU online/offline notification events.
2250 */ 1890 */
@@ -2262,7 +1902,6 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
2262 rcu_prepare_kthreads(cpu); 1902 rcu_prepare_kthreads(cpu);
2263 break; 1903 break;
2264 case CPU_ONLINE: 1904 case CPU_ONLINE:
2265 rcu_online_kthreads(cpu);
2266 case CPU_DOWN_FAILED: 1905 case CPU_DOWN_FAILED:
2267 rcu_node_kthread_setaffinity(rnp, -1); 1906 rcu_node_kthread_setaffinity(rnp, -1);
2268 rcu_cpu_kthread_setrt(cpu, 1); 1907 rcu_cpu_kthread_setrt(cpu, 1);
@@ -2410,6 +2049,7 @@ void __init rcu_init(void)
2410 rcu_init_one(&rcu_sched_state, &rcu_sched_data); 2049 rcu_init_one(&rcu_sched_state, &rcu_sched_data);
2411 rcu_init_one(&rcu_bh_state, &rcu_bh_data); 2050 rcu_init_one(&rcu_bh_state, &rcu_bh_data);
2412 __rcu_init_preempt(); 2051 __rcu_init_preempt();
2052 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
2413 2053
2414 /* 2054 /*
2415 * We don't need protection against CPU-hotplug here because 2055 * We don't need protection against CPU-hotplug here because
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7b9a08b4aaea..01b2ccda26fb 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -369,6 +369,7 @@ struct rcu_state {
369 /* period because */ 369 /* period because */
370 /* force_quiescent_state() */ 370 /* force_quiescent_state() */
371 /* was running. */ 371 /* was running. */
372 u8 boost; /* Subject to priority boost. */
372 unsigned long gpnum; /* Current gp number. */ 373 unsigned long gpnum; /* Current gp number. */
373 unsigned long completed; /* # of last completed gp. */ 374 unsigned long completed; /* # of last completed gp. */
374 375
@@ -426,6 +427,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
426#ifdef CONFIG_HOTPLUG_CPU 427#ifdef CONFIG_HOTPLUG_CPU
427static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, 428static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
428 unsigned long flags); 429 unsigned long flags);
430static void rcu_stop_cpu_kthread(int cpu);
429#endif /* #ifdef CONFIG_HOTPLUG_CPU */ 431#endif /* #ifdef CONFIG_HOTPLUG_CPU */
430static void rcu_print_detail_task_stall(struct rcu_state *rsp); 432static void rcu_print_detail_task_stall(struct rcu_state *rsp);
431static void rcu_print_task_stall(struct rcu_node *rnp); 433static void rcu_print_task_stall(struct rcu_node *rnp);
@@ -450,11 +452,19 @@ static void rcu_preempt_send_cbs_to_online(void);
450static void __init __rcu_init_preempt(void); 452static void __init __rcu_init_preempt(void);
451static void rcu_needs_cpu_flush(void); 453static void rcu_needs_cpu_flush(void);
452static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); 454static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static void invoke_rcu_callbacks_kthread(void);
457#ifdef CONFIG_RCU_BOOST
458static void rcu_preempt_do_callbacks(void);
453static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 459static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp,
454 cpumask_var_t cm); 460 cpumask_var_t cm);
455static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
456static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 461static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
457 struct rcu_node *rnp, 462 struct rcu_node *rnp,
458 int rnp_index); 463 int rnp_index);
464static void invoke_rcu_node_kthread(struct rcu_node *rnp);
465static void rcu_yield(void (*f)(unsigned long), unsigned long arg);
466#endif /* #ifdef CONFIG_RCU_BOOST */
467static void rcu_cpu_kthread_setrt(int cpu, int to_rt);
468static void __cpuinit rcu_prepare_kthreads(int cpu);
459 469
460#endif /* #ifndef RCU_TREE_NONCORE */ 470#endif /* #ifndef RCU_TREE_NONCORE */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index c8bff3099a89..14dc7dd00902 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -602,6 +602,15 @@ static void rcu_preempt_process_callbacks(void)
602 &__get_cpu_var(rcu_preempt_data)); 602 &__get_cpu_var(rcu_preempt_data));
603} 603}
604 604
605#ifdef CONFIG_RCU_BOOST
606
607static void rcu_preempt_do_callbacks(void)
608{
609 rcu_do_batch(&rcu_preempt_state, &__get_cpu_var(rcu_preempt_data));
610}
611
612#endif /* #ifdef CONFIG_RCU_BOOST */
613
605/* 614/*
606 * Queue a preemptible-RCU callback for invocation after a grace period. 615 * Queue a preemptible-RCU callback for invocation after a grace period.
607 */ 616 */
@@ -1249,6 +1258,23 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1249} 1258}
1250 1259
1251/* 1260/*
1261 * Wake up the per-CPU kthread to invoke RCU callbacks.
1262 */
1263static void invoke_rcu_callbacks_kthread(void)
1264{
1265 unsigned long flags;
1266
1267 local_irq_save(flags);
1268 __this_cpu_write(rcu_cpu_has_work, 1);
1269 if (__this_cpu_read(rcu_cpu_kthread_task) == NULL) {
1270 local_irq_restore(flags);
1271 return;
1272 }
1273 wake_up_process(__this_cpu_read(rcu_cpu_kthread_task));
1274 local_irq_restore(flags);
1275}
1276
1277/*
1252 * Set the affinity of the boost kthread. The CPU-hotplug locks are 1278 * Set the affinity of the boost kthread. The CPU-hotplug locks are
1253 * held, so no one should be messing with the existence of the boost 1279 * held, so no one should be messing with the existence of the boost
1254 * kthread. 1280 * kthread.
@@ -1288,6 +1314,7 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1288 1314
1289 if (&rcu_preempt_state != rsp) 1315 if (&rcu_preempt_state != rsp)
1290 return 0; 1316 return 0;
1317 rsp->boost = 1;
1291 if (rnp->boost_kthread_task != NULL) 1318 if (rnp->boost_kthread_task != NULL)
1292 return 0; 1319 return 0;
1293 t = kthread_create(rcu_boost_kthread, (void *)rnp, 1320 t = kthread_create(rcu_boost_kthread, (void *)rnp,
@@ -1299,13 +1326,372 @@ static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1299 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1326 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1300 sp.sched_priority = RCU_KTHREAD_PRIO; 1327 sp.sched_priority = RCU_KTHREAD_PRIO;
1301 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); 1328 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1329 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1302 return 0; 1330 return 0;
1303} 1331}
1304 1332
1305static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1333#ifdef CONFIG_HOTPLUG_CPU
1334
1335/*
1336 * Stop the RCU's per-CPU kthread when its CPU goes offline,.
1337 */
1338static void rcu_stop_cpu_kthread(int cpu)
1306{ 1339{
1307 if (rnp->boost_kthread_task) 1340 struct task_struct *t;
1308 wake_up_process(rnp->boost_kthread_task); 1341
1342 /* Stop the CPU's kthread. */
1343 t = per_cpu(rcu_cpu_kthread_task, cpu);
1344 if (t != NULL) {
1345 per_cpu(rcu_cpu_kthread_task, cpu) = NULL;
1346 kthread_stop(t);
1347 }
1348}
1349
1350#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1351
1352static void rcu_kthread_do_work(void)
1353{
1354 rcu_do_batch(&rcu_sched_state, &__get_cpu_var(rcu_sched_data));
1355 rcu_do_batch(&rcu_bh_state, &__get_cpu_var(rcu_bh_data));
1356 rcu_preempt_do_callbacks();
1357}
1358
1359/*
1360 * Wake up the specified per-rcu_node-structure kthread.
1361 * Because the per-rcu_node kthreads are immortal, we don't need
1362 * to do anything to keep them alive.
1363 */
1364static void invoke_rcu_node_kthread(struct rcu_node *rnp)
1365{
1366 struct task_struct *t;
1367
1368 t = rnp->node_kthread_task;
1369 if (t != NULL)
1370 wake_up_process(t);
1371}
1372
1373/*
1374 * Set the specified CPU's kthread to run RT or not, as specified by
1375 * the to_rt argument. The CPU-hotplug locks are held, so the task
1376 * is not going away.
1377 */
1378static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1379{
1380 int policy;
1381 struct sched_param sp;
1382 struct task_struct *t;
1383
1384 t = per_cpu(rcu_cpu_kthread_task, cpu);
1385 if (t == NULL)
1386 return;
1387 if (to_rt) {
1388 policy = SCHED_FIFO;
1389 sp.sched_priority = RCU_KTHREAD_PRIO;
1390 } else {
1391 policy = SCHED_NORMAL;
1392 sp.sched_priority = 0;
1393 }
1394 sched_setscheduler_nocheck(t, policy, &sp);
1395}
1396
1397/*
1398 * Timer handler to initiate the waking up of per-CPU kthreads that
1399 * have yielded the CPU due to excess numbers of RCU callbacks.
1400 * We wake up the per-rcu_node kthread, which in turn will wake up
1401 * the booster kthread.
1402 */
1403static void rcu_cpu_kthread_timer(unsigned long arg)
1404{
1405 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, arg);
1406 struct rcu_node *rnp = rdp->mynode;
1407
1408 atomic_or(rdp->grpmask, &rnp->wakemask);
1409 invoke_rcu_node_kthread(rnp);
1410}
1411
1412/*
1413 * Drop to non-real-time priority and yield, but only after posting a
1414 * timer that will cause us to regain our real-time priority if we
1415 * remain preempted. Either way, we restore our real-time priority
1416 * before returning.
1417 */
1418static void rcu_yield(void (*f)(unsigned long), unsigned long arg)
1419{
1420 struct sched_param sp;
1421 struct timer_list yield_timer;
1422
1423 setup_timer_on_stack(&yield_timer, f, arg);
1424 mod_timer(&yield_timer, jiffies + 2);
1425 sp.sched_priority = 0;
1426 sched_setscheduler_nocheck(current, SCHED_NORMAL, &sp);
1427 set_user_nice(current, 19);
1428 schedule();
1429 sp.sched_priority = RCU_KTHREAD_PRIO;
1430 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
1431 del_timer(&yield_timer);
1432}
1433
1434/*
1435 * Handle cases where the rcu_cpu_kthread() ends up on the wrong CPU.
1436 * This can happen while the corresponding CPU is either coming online
1437 * or going offline. We cannot wait until the CPU is fully online
1438 * before starting the kthread, because the various notifier functions
1439 * can wait for RCU grace periods. So we park rcu_cpu_kthread() until
1440 * the corresponding CPU is online.
1441 *
1442 * Return 1 if the kthread needs to stop, 0 otherwise.
1443 *
1444 * Caller must disable bh. This function can momentarily enable it.
1445 */
1446static int rcu_cpu_kthread_should_stop(int cpu)
1447{
1448 while (cpu_is_offline(cpu) ||
1449 !cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)) ||
1450 smp_processor_id() != cpu) {
1451 if (kthread_should_stop())
1452 return 1;
1453 per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
1454 per_cpu(rcu_cpu_kthread_cpu, cpu) = raw_smp_processor_id();
1455 local_bh_enable();
1456 schedule_timeout_uninterruptible(1);
1457 if (!cpumask_equal(&current->cpus_allowed, cpumask_of(cpu)))
1458 set_cpus_allowed_ptr(current, cpumask_of(cpu));
1459 local_bh_disable();
1460 }
1461 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1462 return 0;
1463}
1464
1465/*
1466 * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
1467 * earlier RCU softirq.
1468 */
1469static int rcu_cpu_kthread(void *arg)
1470{
1471 int cpu = (int)(long)arg;
1472 unsigned long flags;
1473 int spincnt = 0;
1474 unsigned int *statusp = &per_cpu(rcu_cpu_kthread_status, cpu);
1475 char work;
1476 char *workp = &per_cpu(rcu_cpu_has_work, cpu);
1477
1478 for (;;) {
1479 *statusp = RCU_KTHREAD_WAITING;
1480 rcu_wait(*workp != 0 || kthread_should_stop());
1481 local_bh_disable();
1482 if (rcu_cpu_kthread_should_stop(cpu)) {
1483 local_bh_enable();
1484 break;
1485 }
1486 *statusp = RCU_KTHREAD_RUNNING;
1487 per_cpu(rcu_cpu_kthread_loops, cpu)++;
1488 local_irq_save(flags);
1489 work = *workp;
1490 *workp = 0;
1491 local_irq_restore(flags);
1492 if (work)
1493 rcu_kthread_do_work();
1494 local_bh_enable();
1495 if (*workp != 0)
1496 spincnt++;
1497 else
1498 spincnt = 0;
1499 if (spincnt > 10) {
1500 *statusp = RCU_KTHREAD_YIELDING;
1501 rcu_yield(rcu_cpu_kthread_timer, (unsigned long)cpu);
1502 spincnt = 0;
1503 }
1504 }
1505 *statusp = RCU_KTHREAD_STOPPED;
1506 return 0;
1507}
1508
1509/*
1510 * Spawn a per-CPU kthread, setting up affinity and priority.
1511 * Because the CPU hotplug lock is held, no other CPU will be attempting
1512 * to manipulate rcu_cpu_kthread_task. There might be another CPU
1513 * attempting to access it during boot, but the locking in kthread_bind()
1514 * will enforce sufficient ordering.
1515 *
1516 * Please note that we cannot simply refuse to wake up the per-CPU
1517 * kthread because kthreads are created in TASK_UNINTERRUPTIBLE state,
1518 * which can result in softlockup complaints if the task ends up being
1519 * idle for more than a couple of minutes.
1520 *
1521 * However, please note also that we cannot bind the per-CPU kthread to its
1522 * CPU until that CPU is fully online. We also cannot wait until the
1523 * CPU is fully online before we create its per-CPU kthread, as this would
1524 * deadlock the system when CPU notifiers tried waiting for grace
1525 * periods. So we bind the per-CPU kthread to its CPU only if the CPU
1526 * is online. If its CPU is not yet fully online, then the code in
1527 * rcu_cpu_kthread() will wait until it is fully online, and then do
1528 * the binding.
1529 */
1530static int __cpuinit rcu_spawn_one_cpu_kthread(int cpu)
1531{
1532 struct sched_param sp;
1533 struct task_struct *t;
1534
1535 if (!rcu_kthreads_spawnable ||
1536 per_cpu(rcu_cpu_kthread_task, cpu) != NULL)
1537 return 0;
1538 t = kthread_create(rcu_cpu_kthread, (void *)(long)cpu, "rcuc%d", cpu);
1539 if (IS_ERR(t))
1540 return PTR_ERR(t);
1541 if (cpu_online(cpu))
1542 kthread_bind(t, cpu);
1543 per_cpu(rcu_cpu_kthread_cpu, cpu) = cpu;
1544 WARN_ON_ONCE(per_cpu(rcu_cpu_kthread_task, cpu) != NULL);
1545 sp.sched_priority = RCU_KTHREAD_PRIO;
1546 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1547 per_cpu(rcu_cpu_kthread_task, cpu) = t;
1548 wake_up_process(t); /* Get to TASK_INTERRUPTIBLE quickly. */
1549 return 0;
1550}
1551
1552/*
1553 * Per-rcu_node kthread, which is in charge of waking up the per-CPU
1554 * kthreads when needed. We ignore requests to wake up kthreads
1555 * for offline CPUs, which is OK because force_quiescent_state()
1556 * takes care of this case.
1557 */
1558static int rcu_node_kthread(void *arg)
1559{
1560 int cpu;
1561 unsigned long flags;
1562 unsigned long mask;
1563 struct rcu_node *rnp = (struct rcu_node *)arg;
1564 struct sched_param sp;
1565 struct task_struct *t;
1566
1567 for (;;) {
1568 rnp->node_kthread_status = RCU_KTHREAD_WAITING;
1569 rcu_wait(atomic_read(&rnp->wakemask) != 0);
1570 rnp->node_kthread_status = RCU_KTHREAD_RUNNING;
1571 raw_spin_lock_irqsave(&rnp->lock, flags);
1572 mask = atomic_xchg(&rnp->wakemask, 0);
1573 rcu_initiate_boost(rnp, flags); /* releases rnp->lock. */
1574 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1) {
1575 if ((mask & 0x1) == 0)
1576 continue;
1577 preempt_disable();
1578 t = per_cpu(rcu_cpu_kthread_task, cpu);
1579 if (!cpu_online(cpu) || t == NULL) {
1580 preempt_enable();
1581 continue;
1582 }
1583 per_cpu(rcu_cpu_has_work, cpu) = 1;
1584 sp.sched_priority = RCU_KTHREAD_PRIO;
1585 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1586 preempt_enable();
1587 }
1588 }
1589 /* NOTREACHED */
1590 rnp->node_kthread_status = RCU_KTHREAD_STOPPED;
1591 return 0;
1592}
1593
1594/*
1595 * Set the per-rcu_node kthread's affinity to cover all CPUs that are
1596 * served by the rcu_node in question. The CPU hotplug lock is still
1597 * held, so the value of rnp->qsmaskinit will be stable.
1598 *
1599 * We don't include outgoingcpu in the affinity set, use -1 if there is
1600 * no outgoing CPU. If there are no CPUs left in the affinity set,
1601 * this function allows the kthread to execute on any CPU.
1602 */
1603static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1604{
1605 cpumask_var_t cm;
1606 int cpu;
1607 unsigned long mask = rnp->qsmaskinit;
1608
1609 if (rnp->node_kthread_task == NULL)
1610 return;
1611 if (!alloc_cpumask_var(&cm, GFP_KERNEL))
1612 return;
1613 cpumask_clear(cm);
1614 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++, mask >>= 1)
1615 if ((mask & 0x1) && cpu != outgoingcpu)
1616 cpumask_set_cpu(cpu, cm);
1617 if (cpumask_weight(cm) == 0) {
1618 cpumask_setall(cm);
1619 for (cpu = rnp->grplo; cpu <= rnp->grphi; cpu++)
1620 cpumask_clear_cpu(cpu, cm);
1621 WARN_ON_ONCE(cpumask_weight(cm) == 0);
1622 }
1623 set_cpus_allowed_ptr(rnp->node_kthread_task, cm);
1624 rcu_boost_kthread_setaffinity(rnp, cm);
1625 free_cpumask_var(cm);
1626}
1627
1628/*
1629 * Spawn a per-rcu_node kthread, setting priority and affinity.
1630 * Called during boot before online/offline can happen, or, if
1631 * during runtime, with the main CPU-hotplug locks held. So only
1632 * one of these can be executing at a time.
1633 */
1634static int __cpuinit rcu_spawn_one_node_kthread(struct rcu_state *rsp,
1635 struct rcu_node *rnp)
1636{
1637 unsigned long flags;
1638 int rnp_index = rnp - &rsp->node[0];
1639 struct sched_param sp;
1640 struct task_struct *t;
1641
1642 if (!rcu_kthreads_spawnable ||
1643 rnp->qsmaskinit == 0)
1644 return 0;
1645 if (rnp->node_kthread_task == NULL) {
1646 t = kthread_create(rcu_node_kthread, (void *)rnp,
1647 "rcun%d", rnp_index);
1648 if (IS_ERR(t))
1649 return PTR_ERR(t);
1650 raw_spin_lock_irqsave(&rnp->lock, flags);
1651 rnp->node_kthread_task = t;
1652 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1653 sp.sched_priority = 99;
1654 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
1655 wake_up_process(t); /* get to TASK_INTERRUPTIBLE quickly. */
1656 }
1657 return rcu_spawn_one_boost_kthread(rsp, rnp, rnp_index);
1658}
1659
1660/*
1661 * Spawn all kthreads -- called as soon as the scheduler is running.
1662 */
1663static int __init rcu_spawn_kthreads(void)
1664{
1665 int cpu;
1666 struct rcu_node *rnp;
1667
1668 rcu_kthreads_spawnable = 1;
1669 for_each_possible_cpu(cpu) {
1670 per_cpu(rcu_cpu_has_work, cpu) = 0;
1671 if (cpu_online(cpu))
1672 (void)rcu_spawn_one_cpu_kthread(cpu);
1673 }
1674 rnp = rcu_get_root(rcu_state);
1675 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1676 if (NUM_RCU_NODES > 1) {
1677 rcu_for_each_leaf_node(rcu_state, rnp)
1678 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1679 }
1680 return 0;
1681}
1682early_initcall(rcu_spawn_kthreads);
1683
1684static void __cpuinit rcu_prepare_kthreads(int cpu)
1685{
1686 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1687 struct rcu_node *rnp = rdp->mynode;
1688
1689 /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
1690 if (rcu_kthreads_spawnable) {
1691 (void)rcu_spawn_one_cpu_kthread(cpu);
1692 if (rnp->node_kthread_task == NULL)
1693 (void)rcu_spawn_one_node_kthread(rcu_state, rnp);
1694 }
1309} 1695}
1310 1696
1311#else /* #ifdef CONFIG_RCU_BOOST */ 1697#else /* #ifdef CONFIG_RCU_BOOST */
@@ -1315,23 +1701,32 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags)
1315 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1701 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1316} 1702}
1317 1703
1318static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, 1704static void invoke_rcu_callbacks_kthread(void)
1319 cpumask_var_t cm)
1320{ 1705{
1706 WARN_ON_ONCE(1);
1321} 1707}
1322 1708
1323static void rcu_preempt_boost_start_gp(struct rcu_node *rnp) 1709static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1324{ 1710{
1325} 1711}
1326 1712
1327static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1713#ifdef CONFIG_HOTPLUG_CPU
1328 struct rcu_node *rnp, 1714
1329 int rnp_index) 1715static void rcu_stop_cpu_kthread(int cpu)
1716{
1717}
1718
1719#endif /* #ifdef CONFIG_HOTPLUG_CPU */
1720
1721static void rcu_node_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
1722{
1723}
1724
1725static void rcu_cpu_kthread_setrt(int cpu, int to_rt)
1330{ 1726{
1331 return 0;
1332} 1727}
1333 1728
1334static void __cpuinit rcu_wake_one_boost_kthread(struct rcu_node *rnp) 1729static void __cpuinit rcu_prepare_kthreads(int cpu)
1335{ 1730{
1336} 1731}
1337 1732
@@ -1509,7 +1904,7 @@ static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1509 * 1904 *
1510 * Because it is not legal to invoke rcu_process_callbacks() with irqs 1905 * Because it is not legal to invoke rcu_process_callbacks() with irqs
1511 * disabled, we do one pass of force_quiescent_state(), then do a 1906 * disabled, we do one pass of force_quiescent_state(), then do a
1512 * invoke_rcu_cpu_kthread() to cause rcu_process_callbacks() to be invoked 1907 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
1513 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 1908 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing.
1514 */ 1909 */
1515int rcu_needs_cpu(int cpu) 1910int rcu_needs_cpu(int cpu)
@@ -1560,7 +1955,7 @@ int rcu_needs_cpu(int cpu)
1560 1955
1561 /* If RCU callbacks are still pending, RCU still needs this CPU. */ 1956 /* If RCU callbacks are still pending, RCU still needs this CPU. */
1562 if (c) 1957 if (c)
1563 invoke_rcu_cpu_kthread(); 1958 invoke_rcu_core();
1564 return c; 1959 return c;
1565} 1960}
1566 1961
diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
index 9678cc3650f5..4e144876dc68 100644
--- a/kernel/rcutree_trace.c
+++ b/kernel/rcutree_trace.c
@@ -46,6 +46,8 @@
46#define RCU_TREE_NONCORE 46#define RCU_TREE_NONCORE
47#include "rcutree.h" 47#include "rcutree.h"
48 48
49#ifdef CONFIG_RCU_BOOST
50
49DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); 51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
50DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu); 52DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_cpu);
51DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); 53DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
@@ -58,6 +60,8 @@ static char convert_kthread_status(unsigned int kthread_status)
58 return "SRWOY"[kthread_status]; 60 return "SRWOY"[kthread_status];
59} 61}
60 62
63#endif /* #ifdef CONFIG_RCU_BOOST */
64
61static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp) 65static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
62{ 66{
63 if (!rdp->beenonline) 67 if (!rdp->beenonline)
@@ -76,7 +80,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76 rdp->dynticks_fqs); 80 rdp->dynticks_fqs);
77#endif /* #ifdef CONFIG_NO_HZ */ 81#endif /* #ifdef CONFIG_NO_HZ */
78 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi); 82 seq_printf(m, " of=%lu ri=%lu", rdp->offline_fqs, rdp->resched_ipi);
79 seq_printf(m, " ql=%ld qs=%c%c%c%c kt=%d/%c/%d ktl=%x b=%ld", 83 seq_printf(m, " ql=%ld qs=%c%c%c%c",
80 rdp->qlen, 84 rdp->qlen,
81 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 85 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
82 rdp->nxttail[RCU_NEXT_TAIL]], 86 rdp->nxttail[RCU_NEXT_TAIL]],
@@ -84,13 +88,16 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
84 rdp->nxttail[RCU_NEXT_READY_TAIL]], 88 rdp->nxttail[RCU_NEXT_READY_TAIL]],
85 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 89 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
86 rdp->nxttail[RCU_WAIT_TAIL]], 90 rdp->nxttail[RCU_WAIT_TAIL]],
87 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 91 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
92#ifdef CONFIG_RCU_BOOST
93 seq_printf(m, " kt=%d/%c/%d ktl=%x",
88 per_cpu(rcu_cpu_has_work, rdp->cpu), 94 per_cpu(rcu_cpu_has_work, rdp->cpu),
89 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 95 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
90 rdp->cpu)), 96 rdp->cpu)),
91 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu), 97 per_cpu(rcu_cpu_kthread_cpu, rdp->cpu),
92 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff, 98 per_cpu(rcu_cpu_kthread_loops, rdp->cpu) & 0xffff);
93 rdp->blimit); 99#endif /* #ifdef CONFIG_RCU_BOOST */
100 seq_printf(m, " b=%ld", rdp->blimit);
94 seq_printf(m, " ci=%lu co=%lu ca=%lu\n", 101 seq_printf(m, " ci=%lu co=%lu ca=%lu\n",
95 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 102 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
96} 103}
@@ -147,18 +154,21 @@ static void print_one_rcu_data_csv(struct seq_file *m, struct rcu_data *rdp)
147 rdp->dynticks_fqs); 154 rdp->dynticks_fqs);
148#endif /* #ifdef CONFIG_NO_HZ */ 155#endif /* #ifdef CONFIG_NO_HZ */
149 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi); 156 seq_printf(m, ",%lu,%lu", rdp->offline_fqs, rdp->resched_ipi);
150 seq_printf(m, ",%ld,\"%c%c%c%c\",%d,\"%c\",%ld", rdp->qlen, 157 seq_printf(m, ",%ld,\"%c%c%c%c\"", rdp->qlen,
151 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] != 158 ".N"[rdp->nxttail[RCU_NEXT_READY_TAIL] !=
152 rdp->nxttail[RCU_NEXT_TAIL]], 159 rdp->nxttail[RCU_NEXT_TAIL]],
153 ".R"[rdp->nxttail[RCU_WAIT_TAIL] != 160 ".R"[rdp->nxttail[RCU_WAIT_TAIL] !=
154 rdp->nxttail[RCU_NEXT_READY_TAIL]], 161 rdp->nxttail[RCU_NEXT_READY_TAIL]],
155 ".W"[rdp->nxttail[RCU_DONE_TAIL] != 162 ".W"[rdp->nxttail[RCU_DONE_TAIL] !=
156 rdp->nxttail[RCU_WAIT_TAIL]], 163 rdp->nxttail[RCU_WAIT_TAIL]],
157 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]], 164 ".D"[&rdp->nxtlist != rdp->nxttail[RCU_DONE_TAIL]]);
165#ifdef CONFIG_RCU_BOOST
166 seq_printf(m, ",%d,\"%c\"",
158 per_cpu(rcu_cpu_has_work, rdp->cpu), 167 per_cpu(rcu_cpu_has_work, rdp->cpu),
159 convert_kthread_status(per_cpu(rcu_cpu_kthread_status, 168 convert_kthread_status(per_cpu(rcu_cpu_kthread_status,
160 rdp->cpu)), 169 rdp->cpu)));
161 rdp->blimit); 170#endif /* #ifdef CONFIG_RCU_BOOST */
171 seq_printf(m, ",%ld", rdp->blimit);
162 seq_printf(m, ",%lu,%lu,%lu\n", 172 seq_printf(m, ",%lu,%lu,%lu\n",
163 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted); 173 rdp->n_cbs_invoked, rdp->n_cbs_orphaned, rdp->n_cbs_adopted);
164} 174}
@@ -169,7 +179,11 @@ static int show_rcudata_csv(struct seq_file *m, void *unused)
169#ifdef CONFIG_NO_HZ 179#ifdef CONFIG_NO_HZ
170 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\","); 180 seq_puts(m, "\"dt\",\"dt nesting\",\"dt NMI nesting\",\"df\",");
171#endif /* #ifdef CONFIG_NO_HZ */ 181#endif /* #ifdef CONFIG_NO_HZ */
172 seq_puts(m, "\"of\",\"ri\",\"ql\",\"b\",\"ci\",\"co\",\"ca\"\n"); 182 seq_puts(m, "\"of\",\"ri\",\"ql\",\"qs\"");
183#ifdef CONFIG_RCU_BOOST
184 seq_puts(m, "\"kt\",\"ktl\"");
185#endif /* #ifdef CONFIG_RCU_BOOST */
186 seq_puts(m, ",\"b\",\"ci\",\"co\",\"ca\"\n");
173#ifdef CONFIG_TREE_PREEMPT_RCU 187#ifdef CONFIG_TREE_PREEMPT_RCU
174 seq_puts(m, "\"rcu_preempt:\"\n"); 188 seq_puts(m, "\"rcu_preempt:\"\n");
175 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m); 189 PRINT_RCU_DATA(rcu_preempt_data, print_one_rcu_data_csv, m);
diff --git a/kernel/sched.c b/kernel/sched.c
index cbb3a0eee58e..3f2e502d609b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -605,10 +605,10 @@ static inline int cpu_of(struct rq *rq)
605/* 605/*
606 * Return the group to which this tasks belongs. 606 * Return the group to which this tasks belongs.
607 * 607 *
608 * We use task_subsys_state_check() and extend the RCU verification 608 * We use task_subsys_state_check() and extend the RCU verification with
609 * with lockdep_is_held(&p->pi_lock) because cpu_cgroup_attach() 609 * pi->lock and rq->lock because cpu_cgroup_attach() holds those locks for each
610 * holds that lock for each task it moves into the cgroup. Therefore 610 * task it moves into the cgroup. Therefore by holding either of those locks,
611 * by holding that lock, we pin the task to the current cgroup. 611 * we pin the task to the current cgroup.
612 */ 612 */
613static inline struct task_group *task_group(struct task_struct *p) 613static inline struct task_group *task_group(struct task_struct *p)
614{ 614{
@@ -616,7 +616,8 @@ static inline struct task_group *task_group(struct task_struct *p)
616 struct cgroup_subsys_state *css; 616 struct cgroup_subsys_state *css;
617 617
618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id, 618 css = task_subsys_state_check(p, cpu_cgroup_subsys_id,
619 lockdep_is_held(&p->pi_lock)); 619 lockdep_is_held(&p->pi_lock) ||
620 lockdep_is_held(&task_rq(p)->lock));
620 tg = container_of(css, struct task_group, css); 621 tg = container_of(css, struct task_group, css);
621 622
622 return autogroup_task_group(p, tg); 623 return autogroup_task_group(p, tg);
@@ -2200,6 +2201,16 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
2200 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)); 2201 !(task_thread_info(p)->preempt_count & PREEMPT_ACTIVE));
2201 2202
2202#ifdef CONFIG_LOCKDEP 2203#ifdef CONFIG_LOCKDEP
2204 /*
2205 * The caller should hold either p->pi_lock or rq->lock, when changing
2206 * a task's CPU. ->pi_lock for waking tasks, rq->lock for runnable tasks.
2207 *
2208 * sched_move_task() holds both and thus holding either pins the cgroup,
2209 * see set_task_rq().
2210 *
2211 * Furthermore, all task_rq users should acquire both locks, see
2212 * task_rq_lock().
2213 */
2203 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) || 2214 WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
2204 lockdep_is_held(&task_rq(p)->lock))); 2215 lockdep_is_held(&task_rq(p)->lock)));
2205#endif 2216#endif
@@ -2447,6 +2458,10 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2447 } 2458 }
2448 rcu_read_unlock(); 2459 rcu_read_unlock();
2449 } 2460 }
2461
2462 if (wake_flags & WF_MIGRATED)
2463 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2464
2450#endif /* CONFIG_SMP */ 2465#endif /* CONFIG_SMP */
2451 2466
2452 schedstat_inc(rq, ttwu_count); 2467 schedstat_inc(rq, ttwu_count);
@@ -2455,9 +2470,6 @@ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
2455 if (wake_flags & WF_SYNC) 2470 if (wake_flags & WF_SYNC)
2456 schedstat_inc(p, se.statistics.nr_wakeups_sync); 2471 schedstat_inc(p, se.statistics.nr_wakeups_sync);
2457 2472
2458 if (cpu != task_cpu(p))
2459 schedstat_inc(p, se.statistics.nr_wakeups_migrate);
2460
2461#endif /* CONFIG_SCHEDSTATS */ 2473#endif /* CONFIG_SCHEDSTATS */
2462} 2474}
2463 2475
@@ -2600,6 +2612,7 @@ static void ttwu_queue(struct task_struct *p, int cpu)
2600 2612
2601#if defined(CONFIG_SMP) 2613#if defined(CONFIG_SMP)
2602 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) { 2614 if (sched_feat(TTWU_QUEUE) && cpu != smp_processor_id()) {
2615 sched_clock_cpu(cpu); /* sync clocks x-cpu */
2603 ttwu_queue_remote(p, cpu); 2616 ttwu_queue_remote(p, cpu);
2604 return; 2617 return;
2605 } 2618 }
@@ -2674,8 +2687,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
2674 p->sched_class->task_waking(p); 2687 p->sched_class->task_waking(p);
2675 2688
2676 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags); 2689 cpu = select_task_rq(p, SD_BALANCE_WAKE, wake_flags);
2677 if (task_cpu(p) != cpu) 2690 if (task_cpu(p) != cpu) {
2691 wake_flags |= WF_MIGRATED;
2678 set_task_cpu(p, cpu); 2692 set_task_cpu(p, cpu);
2693 }
2679#endif /* CONFIG_SMP */ 2694#endif /* CONFIG_SMP */
2680 2695
2681 ttwu_queue(p, cpu); 2696 ttwu_queue(p, cpu);
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 88725c939e0b..10d018212bab 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1096,7 +1096,7 @@ static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int flag
1096 * to move current somewhere else, making room for our non-migratable 1096 * to move current somewhere else, making room for our non-migratable
1097 * task. 1097 * task.
1098 */ 1098 */
1099 if (p->prio == rq->curr->prio && !need_resched()) 1099 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr))
1100 check_preempt_equal_prio(rq, p); 1100 check_preempt_equal_prio(rq, p);
1101#endif 1101#endif
1102} 1102}
@@ -1239,6 +1239,10 @@ static int find_lowest_rq(struct task_struct *task)
1239 int this_cpu = smp_processor_id(); 1239 int this_cpu = smp_processor_id();
1240 int cpu = task_cpu(task); 1240 int cpu = task_cpu(task);
1241 1241
1242 /* Make sure the mask is initialized first */
1243 if (unlikely(!lowest_mask))
1244 return -1;
1245
1242 if (task->rt.nr_cpus_allowed == 1) 1246 if (task->rt.nr_cpus_allowed == 1)
1243 return -1; /* No other targets possible */ 1247 return -1; /* No other targets possible */
1244 1248
diff --git a/kernel/signal.c b/kernel/signal.c
index 86c32b884f8e..ff7678603328 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2365,7 +2365,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2365/** 2365/**
2366 * sys_rt_sigprocmask - change the list of currently blocked signals 2366 * sys_rt_sigprocmask - change the list of currently blocked signals
2367 * @how: whether to add, remove, or set signals 2367 * @how: whether to add, remove, or set signals
2368 * @set: stores pending signals 2368 * @nset: stores pending signals
2369 * @oset: previous value of signal mask if non-null 2369 * @oset: previous value of signal mask if non-null
2370 * @sigsetsize: size of sigset_t type 2370 * @sigsetsize: size of sigset_t type
2371 */ 2371 */
diff --git a/kernel/smp.c b/kernel/smp.c
index 73a195193558..fb67dfa8394e 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -74,7 +74,7 @@ static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
74 .notifier_call = hotplug_cfd, 74 .notifier_call = hotplug_cfd,
75}; 75};
76 76
77static int __cpuinit init_call_single_data(void) 77void __init call_function_init(void)
78{ 78{
79 void *cpu = (void *)(long)smp_processor_id(); 79 void *cpu = (void *)(long)smp_processor_id();
80 int i; 80 int i;
@@ -88,10 +88,7 @@ static int __cpuinit init_call_single_data(void)
88 88
89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); 89 hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu);
90 register_cpu_notifier(&hotplug_cfd_notifier); 90 register_cpu_notifier(&hotplug_cfd_notifier);
91
92 return 0;
93} 91}
94early_initcall(init_call_single_data);
95 92
96/* 93/*
97 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources 94 * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 13960170cad4..40cf63ddd4b3 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -58,7 +58,7 @@ DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
58 58
59char *softirq_to_name[NR_SOFTIRQS] = { 59char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", 60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER" 61 "TASKLET", "SCHED", "HRTIMER", "RCU"
62}; 62};
63 63
64/* 64/*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4fc92445a29c..f175d98bd355 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -938,6 +938,12 @@ static struct ctl_table kern_table[] = {
938 }, 938 },
939#endif 939#endif
940#ifdef CONFIG_PERF_EVENTS 940#ifdef CONFIG_PERF_EVENTS
941 /*
942 * User-space scripts rely on the existence of this file
943 * as a feature check for perf_events being enabled.
944 *
945 * So it's an ABI, do not remove!
946 */
941 { 947 {
942 .procname = "perf_event_paranoid", 948 .procname = "perf_event_paranoid",
943 .data = &sysctl_perf_event_paranoid, 949 .data = &sysctl_perf_event_paranoid,
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index c027d4f602f1..e4c699dfa4e8 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -182,7 +182,10 @@ void clockevents_register_device(struct clock_event_device *dev)
182 unsigned long flags; 182 unsigned long flags;
183 183
184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
185 BUG_ON(!dev->cpumask); 185 if (!dev->cpumask) {
186 WARN_ON(num_possible_cpus() > 1);
187 dev->cpumask = cpumask_of(smp_processor_id());
188 }
186 189
187 raw_spin_lock_irqsave(&clockevents_lock, flags); 190 raw_spin_lock_irqsave(&clockevents_lock, flags);
188 191
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1c95fd677328..e0980f0d9a0a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -185,7 +185,6 @@ static struct clocksource *watchdog;
185static struct timer_list watchdog_timer; 185static struct timer_list watchdog_timer;
186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work); 186static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
187static DEFINE_SPINLOCK(watchdog_lock); 187static DEFINE_SPINLOCK(watchdog_lock);
188static cycle_t watchdog_last;
189static int watchdog_running; 188static int watchdog_running;
190 189
191static int clocksource_watchdog_kthread(void *data); 190static int clocksource_watchdog_kthread(void *data);
@@ -254,11 +253,6 @@ static void clocksource_watchdog(unsigned long data)
254 if (!watchdog_running) 253 if (!watchdog_running)
255 goto out; 254 goto out;
256 255
257 wdnow = watchdog->read(watchdog);
258 wd_nsec = clocksource_cyc2ns((wdnow - watchdog_last) & watchdog->mask,
259 watchdog->mult, watchdog->shift);
260 watchdog_last = wdnow;
261
262 list_for_each_entry(cs, &watchdog_list, wd_list) { 256 list_for_each_entry(cs, &watchdog_list, wd_list) {
263 257
264 /* Clocksource already marked unstable? */ 258 /* Clocksource already marked unstable? */
@@ -268,19 +262,28 @@ static void clocksource_watchdog(unsigned long data)
268 continue; 262 continue;
269 } 263 }
270 264
265 local_irq_disable();
271 csnow = cs->read(cs); 266 csnow = cs->read(cs);
267 wdnow = watchdog->read(watchdog);
268 local_irq_enable();
272 269
273 /* Clocksource initialized ? */ 270 /* Clocksource initialized ? */
274 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) { 271 if (!(cs->flags & CLOCK_SOURCE_WATCHDOG)) {
275 cs->flags |= CLOCK_SOURCE_WATCHDOG; 272 cs->flags |= CLOCK_SOURCE_WATCHDOG;
276 cs->wd_last = csnow; 273 cs->wd_last = wdnow;
274 cs->cs_last = csnow;
277 continue; 275 continue;
278 } 276 }
279 277
280 /* Check the deviation from the watchdog clocksource. */ 278 wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask,
281 cs_nsec = clocksource_cyc2ns((csnow - cs->wd_last) & 279 watchdog->mult, watchdog->shift);
280
281 cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) &
282 cs->mask, cs->mult, cs->shift); 282 cs->mask, cs->mult, cs->shift);
283 cs->wd_last = csnow; 283 cs->cs_last = csnow;
284 cs->wd_last = wdnow;
285
286 /* Check the deviation from the watchdog clocksource. */
284 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) { 287 if (abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD) {
285 clocksource_unstable(cs, cs_nsec - wd_nsec); 288 clocksource_unstable(cs, cs_nsec - wd_nsec);
286 continue; 289 continue;
@@ -318,7 +321,6 @@ static inline void clocksource_start_watchdog(void)
318 return; 321 return;
319 init_timer(&watchdog_timer); 322 init_timer(&watchdog_timer);
320 watchdog_timer.function = clocksource_watchdog; 323 watchdog_timer.function = clocksource_watchdog;
321 watchdog_last = watchdog->read(watchdog);
322 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL; 324 watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
323 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask)); 325 add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
324 watchdog_running = 1; 326 watchdog_running = 1;
diff --git a/kernel/timer.c b/kernel/timer.c
index fd6198692b57..8cff36119e4d 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -749,16 +749,15 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
749 unsigned long expires_limit, mask; 749 unsigned long expires_limit, mask;
750 int bit; 750 int bit;
751 751
752 expires_limit = expires;
753
754 if (timer->slack >= 0) { 752 if (timer->slack >= 0) {
755 expires_limit = expires + timer->slack; 753 expires_limit = expires + timer->slack;
756 } else { 754 } else {
757 unsigned long now = jiffies; 755 long delta = expires - jiffies;
756
757 if (delta < 256)
758 return expires;
758 759
759 /* No slack, if already expired else auto slack 0.4% */ 760 expires_limit = expires + delta / 256;
760 if (time_after(expires, now))
761 expires_limit = expires + (expires - now)/256;
762 } 761 }
763 mask = expires ^ expires_limit; 762 mask = expires ^ expires_limit;
764 if (mask == 0) 763 if (mask == 0)
@@ -795,6 +794,8 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
795 */ 794 */
796int mod_timer(struct timer_list *timer, unsigned long expires) 795int mod_timer(struct timer_list *timer, unsigned long expires)
797{ 796{
797 expires = apply_slack(timer, expires);
798
798 /* 799 /*
799 * This is a common optimization triggered by the 800 * This is a common optimization triggered by the
800 * networking code - if the timer is re-modified 801 * networking code - if the timer is re-modified
@@ -803,8 +804,6 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
803 if (timer_pending(timer) && timer->expires == expires) 804 if (timer_pending(timer) && timer->expires == expires)
804 return 1; 805 return 1;
805 806
806 expires = apply_slack(timer, expires);
807
808 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED); 807 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
809} 808}
810EXPORT_SYMBOL(mod_timer); 809EXPORT_SYMBOL(mod_timer);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1ee417fcbfa5..908038f57440 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2740,7 +2740,7 @@ static int ftrace_process_regex(struct ftrace_hash *hash,
2740{ 2740{
2741 char *func, *command, *next = buff; 2741 char *func, *command, *next = buff;
2742 struct ftrace_func_command *p; 2742 struct ftrace_func_command *p;
2743 int ret; 2743 int ret = -EINVAL;
2744 2744
2745 func = strsep(&next, ":"); 2745 func = strsep(&next, ":");
2746 2746
@@ -3330,6 +3330,7 @@ static int ftrace_process_locs(struct module *mod,
3330{ 3330{
3331 unsigned long *p; 3331 unsigned long *p;
3332 unsigned long addr; 3332 unsigned long addr;
3333 unsigned long flags;
3333 3334
3334 mutex_lock(&ftrace_lock); 3335 mutex_lock(&ftrace_lock);
3335 p = start; 3336 p = start;
@@ -3346,7 +3347,13 @@ static int ftrace_process_locs(struct module *mod,
3346 ftrace_record_ip(addr); 3347 ftrace_record_ip(addr);
3347 } 3348 }
3348 3349
3350 /*
3351 * Disable interrupts to prevent interrupts from executing
3352 * code that is being modified.
3353 */
3354 local_irq_save(flags);
3349 ftrace_update_code(mod); 3355 ftrace_update_code(mod);
3356 local_irq_restore(flags);
3350 mutex_unlock(&ftrace_lock); 3357 mutex_unlock(&ftrace_lock);
3351 3358
3352 return 0; 3359 return 0;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index f925c45f0afa..27d13b36b8be 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1870,8 +1870,12 @@ fs_initcall(init_kprobe_trace);
1870 1870
1871#ifdef CONFIG_FTRACE_STARTUP_TEST 1871#ifdef CONFIG_FTRACE_STARTUP_TEST
1872 1872
1873static int kprobe_trace_selftest_target(int a1, int a2, int a3, 1873/*
1874 int a4, int a5, int a6) 1874 * The "__used" keeps gcc from removing the function symbol
1875 * from the kallsyms table.
1876 */
1877static __used int kprobe_trace_selftest_target(int a1, int a2, int a3,
1878 int a4, int a5, int a6)
1875{ 1879{
1876 return a1 + a2 + a3 + a4 + a5 + a6; 1880 return a1 + a2 + a3 + a4 + a5 + a6;
1877} 1881}
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index dff763b7baf1..1f06468a10d7 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -240,13 +240,10 @@ static const char **find_next(void *v, loff_t *pos)
240 const char **fmt = v; 240 const char **fmt = v;
241 int start_index; 241 int start_index;
242 242
243 if (!fmt)
244 fmt = __start___trace_bprintk_fmt + *pos;
245
246 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt; 243 start_index = __stop___trace_bprintk_fmt - __start___trace_bprintk_fmt;
247 244
248 if (*pos < start_index) 245 if (*pos < start_index)
249 return fmt; 246 return __start___trace_bprintk_fmt + *pos;
250 247
251 return find_next_mod_format(start_index, v, fmt, pos); 248 return find_next_mod_format(start_index, v, fmt, pos);
252} 249}
diff --git a/lib/bitmap.c b/lib/bitmap.c
index 41baf02924e6..3f3b68199d74 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -572,7 +572,7 @@ EXPORT_SYMBOL(bitmap_scnlistprintf);
572 572
573/** 573/**
574 * __bitmap_parselist - convert list format ASCII string to bitmap 574 * __bitmap_parselist - convert list format ASCII string to bitmap
575 * @bp: read nul-terminated user string from this buffer 575 * @buf: read nul-terminated user string from this buffer
576 * @buflen: buffer size in bytes. If string is smaller than this 576 * @buflen: buffer size in bytes. If string is smaller than this
577 * then it must be terminated with a \0. 577 * then it must be terminated with a \0.
578 * @is_user: location of buffer, 0 indicates kernel space 578 * @is_user: location of buffer, 0 indicates kernel space
diff --git a/lib/kobject.c b/lib/kobject.c
index 82dc34c095c2..640bd98a4c8a 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -948,14 +948,14 @@ const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj)
948} 948}
949 949
950 950
951const void *kobj_ns_current(enum kobj_ns_type type) 951void *kobj_ns_grab_current(enum kobj_ns_type type)
952{ 952{
953 const void *ns = NULL; 953 void *ns = NULL;
954 954
955 spin_lock(&kobj_ns_type_lock); 955 spin_lock(&kobj_ns_type_lock);
956 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) && 956 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
957 kobj_ns_ops_tbl[type]) 957 kobj_ns_ops_tbl[type])
958 ns = kobj_ns_ops_tbl[type]->current_ns(); 958 ns = kobj_ns_ops_tbl[type]->grab_current_ns();
959 spin_unlock(&kobj_ns_type_lock); 959 spin_unlock(&kobj_ns_type_lock);
960 960
961 return ns; 961 return ns;
@@ -987,23 +987,15 @@ const void *kobj_ns_initial(enum kobj_ns_type type)
987 return ns; 987 return ns;
988} 988}
989 989
990/* 990void kobj_ns_drop(enum kobj_ns_type type, void *ns)
991 * kobj_ns_exit - invalidate a namespace tag
992 *
993 * @type: the namespace type (i.e. KOBJ_NS_TYPE_NET)
994 * @ns: the actual namespace being invalidated
995 *
996 * This is called when a tag is no longer valid. For instance,
997 * when a network namespace exits, it uses this helper to
998 * make sure no sb's sysfs_info points to the now-invalidated
999 * netns.
1000 */
1001void kobj_ns_exit(enum kobj_ns_type type, const void *ns)
1002{ 991{
1003 sysfs_exit_ns(type, ns); 992 spin_lock(&kobj_ns_type_lock);
993 if ((type > KOBJ_NS_TYPE_NONE) && (type < KOBJ_NS_TYPES) &&
994 kobj_ns_ops_tbl[type] && kobj_ns_ops_tbl[type]->drop_ns)
995 kobj_ns_ops_tbl[type]->drop_ns(ns);
996 spin_unlock(&kobj_ns_type_lock);
1004} 997}
1005 998
1006
1007EXPORT_SYMBOL(kobject_get); 999EXPORT_SYMBOL(kobject_get);
1008EXPORT_SYMBOL(kobject_put); 1000EXPORT_SYMBOL(kobject_put);
1009EXPORT_SYMBOL(kobject_del); 1001EXPORT_SYMBOL(kobject_del);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 93ca08b8a451..99093b396145 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -110,6 +110,11 @@ setup_io_tlb_npages(char *str)
110__setup("swiotlb=", setup_io_tlb_npages); 110__setup("swiotlb=", setup_io_tlb_npages);
111/* make io_tlb_overflow tunable too? */ 111/* make io_tlb_overflow tunable too? */
112 112
113unsigned long swioltb_nr_tbl(void)
114{
115 return io_tlb_nslabs;
116}
117
113/* Note that this doesn't work with highmem page */ 118/* Note that this doesn't work with highmem page */
114static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, 119static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
115 volatile void *address) 120 volatile void *address)
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index c11205688fb4..4365df31a1d5 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -666,6 +666,8 @@ char *ip6_compressed_string(char *p, const char *addr)
666 colonpos = i; 666 colonpos = i;
667 } 667 }
668 } 668 }
669 if (longest == 1) /* don't compress a single 0 */
670 colonpos = -1;
669 671
670 /* emit address */ 672 /* emit address */
671 for (i = 0; i < range; i++) { 673 for (i = 0; i < range; i++) {
@@ -826,7 +828,7 @@ int kptr_restrict __read_mostly;
826 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) 828 * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006)
827 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order 829 * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order
828 * - 'I6c' for IPv6 addresses printed as specified by 830 * - 'I6c' for IPv6 addresses printed as specified by
829 * http://tools.ietf.org/html/draft-ietf-6man-text-addr-representation-00 831 * http://tools.ietf.org/html/rfc5952
830 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form 832 * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form
831 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" 833 * "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
832 * Options for %pU are: 834 * Options for %pU are:
diff --git a/mm/compaction.c b/mm/compaction.c
index 021a2960ef9e..6cc604bd5649 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -144,9 +144,20 @@ static void isolate_freepages(struct zone *zone,
144 int nr_freepages = cc->nr_freepages; 144 int nr_freepages = cc->nr_freepages;
145 struct list_head *freelist = &cc->freepages; 145 struct list_head *freelist = &cc->freepages;
146 146
147 /*
148 * Initialise the free scanner. The starting point is where we last
149 * scanned from (or the end of the zone if starting). The low point
150 * is the end of the pageblock the migration scanner is using.
151 */
147 pfn = cc->free_pfn; 152 pfn = cc->free_pfn;
148 low_pfn = cc->migrate_pfn + pageblock_nr_pages; 153 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
149 high_pfn = low_pfn; 154
155 /*
156 * Take care that if the migration scanner is at the end of the zone
157 * that the free scanner does not accidentally move to the next zone
158 * in the next isolation cycle.
159 */
160 high_pfn = min(low_pfn, pfn);
150 161
151 /* 162 /*
152 * Isolate free pages until enough are available to migrate the 163 * Isolate free pages until enough are available to migrate the
@@ -240,11 +251,18 @@ static bool too_many_isolated(struct zone *zone)
240 return isolated > (inactive + active) / 2; 251 return isolated > (inactive + active) / 2;
241} 252}
242 253
254/* possible outcome of isolate_migratepages */
255typedef enum {
256 ISOLATE_ABORT, /* Abort compaction now */
257 ISOLATE_NONE, /* No pages isolated, continue scanning */
258 ISOLATE_SUCCESS, /* Pages isolated, migrate */
259} isolate_migrate_t;
260
243/* 261/*
244 * Isolate all pages that can be migrated from the block pointed to by 262 * Isolate all pages that can be migrated from the block pointed to by
245 * the migrate scanner within compact_control. 263 * the migrate scanner within compact_control.
246 */ 264 */
247static unsigned long isolate_migratepages(struct zone *zone, 265static isolate_migrate_t isolate_migratepages(struct zone *zone,
248 struct compact_control *cc) 266 struct compact_control *cc)
249{ 267{
250 unsigned long low_pfn, end_pfn; 268 unsigned long low_pfn, end_pfn;
@@ -261,7 +279,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
261 /* Do not cross the free scanner or scan within a memory hole */ 279 /* Do not cross the free scanner or scan within a memory hole */
262 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 280 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
263 cc->migrate_pfn = end_pfn; 281 cc->migrate_pfn = end_pfn;
264 return 0; 282 return ISOLATE_NONE;
265 } 283 }
266 284
267 /* 285 /*
@@ -270,10 +288,14 @@ static unsigned long isolate_migratepages(struct zone *zone,
270 * delay for some time until fewer pages are isolated 288 * delay for some time until fewer pages are isolated
271 */ 289 */
272 while (unlikely(too_many_isolated(zone))) { 290 while (unlikely(too_many_isolated(zone))) {
291 /* async migration should just abort */
292 if (!cc->sync)
293 return ISOLATE_ABORT;
294
273 congestion_wait(BLK_RW_ASYNC, HZ/10); 295 congestion_wait(BLK_RW_ASYNC, HZ/10);
274 296
275 if (fatal_signal_pending(current)) 297 if (fatal_signal_pending(current))
276 return 0; 298 return ISOLATE_ABORT;
277 } 299 }
278 300
279 /* Time to isolate some pages for migration */ 301 /* Time to isolate some pages for migration */
@@ -358,7 +380,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
358 380
359 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 381 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
360 382
361 return cc->nr_migratepages; 383 return ISOLATE_SUCCESS;
362} 384}
363 385
364/* 386/*
@@ -420,13 +442,6 @@ static int compact_finished(struct zone *zone,
420 if (cc->free_pfn <= cc->migrate_pfn) 442 if (cc->free_pfn <= cc->migrate_pfn)
421 return COMPACT_COMPLETE; 443 return COMPACT_COMPLETE;
422 444
423 /* Compaction run is not finished if the watermark is not met */
424 watermark = low_wmark_pages(zone);
425 watermark += (1 << cc->order);
426
427 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
428 return COMPACT_CONTINUE;
429
430 /* 445 /*
431 * order == -1 is expected when compacting via 446 * order == -1 is expected when compacting via
432 * /proc/sys/vm/compact_memory 447 * /proc/sys/vm/compact_memory
@@ -434,6 +449,13 @@ static int compact_finished(struct zone *zone,
434 if (cc->order == -1) 449 if (cc->order == -1)
435 return COMPACT_CONTINUE; 450 return COMPACT_CONTINUE;
436 451
452 /* Compaction run is not finished if the watermark is not met */
453 watermark = low_wmark_pages(zone);
454 watermark += (1 << cc->order);
455
456 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
457 return COMPACT_CONTINUE;
458
437 /* Direct compactor: Is a suitable page free? */ 459 /* Direct compactor: Is a suitable page free? */
438 for (order = cc->order; order < MAX_ORDER; order++) { 460 for (order = cc->order; order < MAX_ORDER; order++) {
439 /* Job done if page is free of the right migratetype */ 461 /* Job done if page is free of the right migratetype */
@@ -461,6 +483,13 @@ unsigned long compaction_suitable(struct zone *zone, int order)
461 unsigned long watermark; 483 unsigned long watermark;
462 484
463 /* 485 /*
486 * order == -1 is expected when compacting via
487 * /proc/sys/vm/compact_memory
488 */
489 if (order == -1)
490 return COMPACT_CONTINUE;
491
492 /*
464 * Watermarks for order-0 must be met for compaction. Note the 2UL. 493 * Watermarks for order-0 must be met for compaction. Note the 2UL.
465 * This is because during migration, copies of pages need to be 494 * This is because during migration, copies of pages need to be
466 * allocated and for a short time, the footprint is higher 495 * allocated and for a short time, the footprint is higher
@@ -470,17 +499,11 @@ unsigned long compaction_suitable(struct zone *zone, int order)
470 return COMPACT_SKIPPED; 499 return COMPACT_SKIPPED;
471 500
472 /* 501 /*
473 * order == -1 is expected when compacting via
474 * /proc/sys/vm/compact_memory
475 */
476 if (order == -1)
477 return COMPACT_CONTINUE;
478
479 /*
480 * fragmentation index determines if allocation failures are due to 502 * fragmentation index determines if allocation failures are due to
481 * low memory or external fragmentation 503 * low memory or external fragmentation
482 * 504 *
483 * index of -1 implies allocations might succeed dependingon watermarks 505 * index of -1000 implies allocations might succeed depending on
506 * watermarks
484 * index towards 0 implies failure is due to lack of memory 507 * index towards 0 implies failure is due to lack of memory
485 * index towards 1000 implies failure is due to fragmentation 508 * index towards 1000 implies failure is due to fragmentation
486 * 509 *
@@ -490,7 +513,8 @@ unsigned long compaction_suitable(struct zone *zone, int order)
490 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) 513 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
491 return COMPACT_SKIPPED; 514 return COMPACT_SKIPPED;
492 515
493 if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) 516 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
517 0, 0))
494 return COMPACT_PARTIAL; 518 return COMPACT_PARTIAL;
495 519
496 return COMPACT_CONTINUE; 520 return COMPACT_CONTINUE;
@@ -522,8 +546,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
522 unsigned long nr_migrate, nr_remaining; 546 unsigned long nr_migrate, nr_remaining;
523 int err; 547 int err;
524 548
525 if (!isolate_migratepages(zone, cc)) 549 switch (isolate_migratepages(zone, cc)) {
550 case ISOLATE_ABORT:
551 ret = COMPACT_PARTIAL;
552 goto out;
553 case ISOLATE_NONE:
526 continue; 554 continue;
555 case ISOLATE_SUCCESS:
556 ;
557 }
527 558
528 nr_migrate = cc->nr_migratepages; 559 nr_migrate = cc->nr_migratepages;
529 err = migrate_pages(&cc->migratepages, compaction_alloc, 560 err = migrate_pages(&cc->migratepages, compaction_alloc,
@@ -547,6 +578,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
547 578
548 } 579 }
549 580
581out:
550 /* Release free pages and check accounting */ 582 /* Release free pages and check accounting */
551 cc->nr_freepages -= release_freepages(&cc->freepages); 583 cc->nr_freepages -= release_freepages(&cc->freepages);
552 VM_BUG_ON(cc->nr_freepages != 0); 584 VM_BUG_ON(cc->nr_freepages != 0);
diff --git a/mm/filemap.c b/mm/filemap.c
index d7b10578a64b..a8251a8d3457 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2000,7 +2000,7 @@ int file_remove_suid(struct file *file)
2000 error = security_inode_killpriv(dentry); 2000 error = security_inode_killpriv(dentry);
2001 if (!error && killsuid) 2001 if (!error && killsuid)
2002 error = __remove_suid(dentry, killsuid); 2002 error = __remove_suid(dentry, killsuid);
2003 if (!error) 2003 if (!error && (inode->i_sb->s_flags & MS_NOSEC))
2004 inode->i_flags |= S_NOSEC; 2004 inode->i_flags |= S_NOSEC;
2005 2005
2006 return error; 2006 return error;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 615d9743a3cb..81532f297fd2 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2234,11 +2234,8 @@ static void khugepaged_loop(void)
2234 while (likely(khugepaged_enabled())) { 2234 while (likely(khugepaged_enabled())) {
2235#ifndef CONFIG_NUMA 2235#ifndef CONFIG_NUMA
2236 hpage = khugepaged_alloc_hugepage(); 2236 hpage = khugepaged_alloc_hugepage();
2237 if (unlikely(!hpage)) { 2237 if (unlikely(!hpage))
2238 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2239 break; 2238 break;
2240 }
2241 count_vm_event(THP_COLLAPSE_ALLOC);
2242#else 2239#else
2243 if (IS_ERR(hpage)) { 2240 if (IS_ERR(hpage)) {
2244 khugepaged_alloc_sleep(); 2241 khugepaged_alloc_sleep();
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f33bb319b73f..bfcf153bc829 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1033,10 +1033,10 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma,
1033 */ 1033 */
1034 chg = vma_needs_reservation(h, vma, addr); 1034 chg = vma_needs_reservation(h, vma, addr);
1035 if (chg < 0) 1035 if (chg < 0)
1036 return ERR_PTR(chg); 1036 return ERR_PTR(-VM_FAULT_OOM);
1037 if (chg) 1037 if (chg)
1038 if (hugetlb_get_quota(inode->i_mapping, chg)) 1038 if (hugetlb_get_quota(inode->i_mapping, chg))
1039 return ERR_PTR(-ENOSPC); 1039 return ERR_PTR(-VM_FAULT_SIGBUS);
1040 1040
1041 spin_lock(&hugetlb_lock); 1041 spin_lock(&hugetlb_lock);
1042 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); 1042 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
@@ -1111,6 +1111,14 @@ static void __init gather_bootmem_prealloc(void)
1111 WARN_ON(page_count(page) != 1); 1111 WARN_ON(page_count(page) != 1);
1112 prep_compound_huge_page(page, h->order); 1112 prep_compound_huge_page(page, h->order);
1113 prep_new_huge_page(h, page, page_to_nid(page)); 1113 prep_new_huge_page(h, page, page_to_nid(page));
1114 /*
1115 * If we had gigantic hugepages allocated at boot time, we need
1116 * to restore the 'stolen' pages to totalram_pages in order to
1117 * fix confusing memory reports from free(1) and another
1118 * side-effects, like CommitLimit going negative.
1119 */
1120 if (h->order > (MAX_ORDER - 1))
1121 totalram_pages += 1 << h->order;
1114 } 1122 }
1115} 1123}
1116 1124
diff --git a/mm/ksm.c b/mm/ksm.c
index d708b3ef2260..9a68b0cf0a1c 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1302,6 +1302,12 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
1302 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 1302 slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list);
1303 ksm_scan.mm_slot = slot; 1303 ksm_scan.mm_slot = slot;
1304 spin_unlock(&ksm_mmlist_lock); 1304 spin_unlock(&ksm_mmlist_lock);
1305 /*
1306 * Although we tested list_empty() above, a racing __ksm_exit
1307 * of the last mm on the list may have removed it since then.
1308 */
1309 if (slot == &ksm_mm_head)
1310 return NULL;
1305next_mm: 1311next_mm:
1306 ksm_scan.address = 0; 1312 ksm_scan.address = 0;
1307 ksm_scan.rmap_list = &slot->rmap_list; 1313 ksm_scan.rmap_list = &slot->rmap_list;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index bd9052a5d3ad..cf7d027a8844 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -359,7 +359,7 @@ enum charge_type {
359static void mem_cgroup_get(struct mem_cgroup *mem); 359static void mem_cgroup_get(struct mem_cgroup *mem);
360static void mem_cgroup_put(struct mem_cgroup *mem); 360static void mem_cgroup_put(struct mem_cgroup *mem);
361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem); 361static struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *mem);
362static void drain_all_stock_async(void); 362static void drain_all_stock_async(struct mem_cgroup *mem);
363 363
364static struct mem_cgroup_per_zone * 364static struct mem_cgroup_per_zone *
365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid) 365mem_cgroup_zoneinfo(struct mem_cgroup *mem, int nid, int zid)
@@ -735,7 +735,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
735 struct mem_cgroup, css); 735 struct mem_cgroup, css);
736} 736}
737 737
738static struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 738struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
739{ 739{
740 struct mem_cgroup *mem = NULL; 740 struct mem_cgroup *mem = NULL;
741 741
@@ -1663,15 +1663,21 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; 1663 excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT;
1664 1664
1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */ 1665 /* If memsw_is_minimum==1, swap-out is of-no-use. */
1666 if (root_mem->memsw_is_minimum) 1666 if (!check_soft && root_mem->memsw_is_minimum)
1667 noswap = true; 1667 noswap = true;
1668 1668
1669 while (1) { 1669 while (1) {
1670 victim = mem_cgroup_select_victim(root_mem); 1670 victim = mem_cgroup_select_victim(root_mem);
1671 if (victim == root_mem) { 1671 if (victim == root_mem) {
1672 loop++; 1672 loop++;
1673 if (loop >= 1) 1673 /*
1674 drain_all_stock_async(); 1674 * We are not draining per cpu cached charges during
1675 * soft limit reclaim because global reclaim doesn't
1676 * care about charges. It tries to free some memory and
1677 * charges will not give any.
1678 */
1679 if (!check_soft && loop >= 1)
1680 drain_all_stock_async(root_mem);
1675 if (loop >= 2) { 1681 if (loop >= 2) {
1676 /* 1682 /*
1677 * If we have not been able to reclaim 1683 * If we have not been able to reclaim
@@ -1934,9 +1940,11 @@ struct memcg_stock_pcp {
1934 struct mem_cgroup *cached; /* this never be root cgroup */ 1940 struct mem_cgroup *cached; /* this never be root cgroup */
1935 unsigned int nr_pages; 1941 unsigned int nr_pages;
1936 struct work_struct work; 1942 struct work_struct work;
1943 unsigned long flags;
1944#define FLUSHING_CACHED_CHARGE (0)
1937}; 1945};
1938static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1946static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1939static atomic_t memcg_drain_count; 1947static DEFINE_MUTEX(percpu_charge_mutex);
1940 1948
1941/* 1949/*
1942 * Try to consume stocked charge on this cpu. If success, one page is consumed 1950 * Try to consume stocked charge on this cpu. If success, one page is consumed
@@ -1984,6 +1992,7 @@ static void drain_local_stock(struct work_struct *dummy)
1984{ 1992{
1985 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); 1993 struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock);
1986 drain_stock(stock); 1994 drain_stock(stock);
1995 clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
1987} 1996}
1988 1997
1989/* 1998/*
@@ -2008,26 +2017,45 @@ static void refill_stock(struct mem_cgroup *mem, unsigned int nr_pages)
2008 * expects some charges will be back to res_counter later but cannot wait for 2017 * expects some charges will be back to res_counter later but cannot wait for
2009 * it. 2018 * it.
2010 */ 2019 */
2011static void drain_all_stock_async(void) 2020static void drain_all_stock_async(struct mem_cgroup *root_mem)
2012{ 2021{
2013 int cpu; 2022 int cpu, curcpu;
2014 /* This function is for scheduling "drain" in asynchronous way. 2023 /*
2015 * The result of "drain" is not directly handled by callers. Then, 2024 * If someone calls draining, avoid adding more kworker runs.
2016 * if someone is calling drain, we don't have to call drain more.
2017 * Anyway, WORK_STRUCT_PENDING check in queue_work_on() will catch if
2018 * there is a race. We just do loose check here.
2019 */ 2025 */
2020 if (atomic_read(&memcg_drain_count)) 2026 if (!mutex_trylock(&percpu_charge_mutex))
2021 return; 2027 return;
2022 /* Notify other cpus that system-wide "drain" is running */ 2028 /* Notify other cpus that system-wide "drain" is running */
2023 atomic_inc(&memcg_drain_count);
2024 get_online_cpus(); 2029 get_online_cpus();
2030 /*
2031 * Get a hint for avoiding draining charges on the current cpu,
2032 * which must be exhausted by our charging. It is not required that
2033 * this be a precise check, so we use raw_smp_processor_id() instead of
2034 * getcpu()/putcpu().
2035 */
2036 curcpu = raw_smp_processor_id();
2025 for_each_online_cpu(cpu) { 2037 for_each_online_cpu(cpu) {
2026 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); 2038 struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
2027 schedule_work_on(cpu, &stock->work); 2039 struct mem_cgroup *mem;
2040
2041 if (cpu == curcpu)
2042 continue;
2043
2044 mem = stock->cached;
2045 if (!mem)
2046 continue;
2047 if (mem != root_mem) {
2048 if (!root_mem->use_hierarchy)
2049 continue;
2050 /* check whether "mem" is under tree of "root_mem" */
2051 if (!css_is_ancestor(&mem->css, &root_mem->css))
2052 continue;
2053 }
2054 if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags))
2055 schedule_work_on(cpu, &stock->work);
2028 } 2056 }
2029 put_online_cpus(); 2057 put_online_cpus();
2030 atomic_dec(&memcg_drain_count); 2058 mutex_unlock(&percpu_charge_mutex);
2031 /* We don't wait for flush_work */ 2059 /* We don't wait for flush_work */
2032} 2060}
2033 2061
@@ -2035,9 +2063,9 @@ static void drain_all_stock_async(void)
2035static void drain_all_stock_sync(void) 2063static void drain_all_stock_sync(void)
2036{ 2064{
2037 /* called when force_empty is called */ 2065 /* called when force_empty is called */
2038 atomic_inc(&memcg_drain_count); 2066 mutex_lock(&percpu_charge_mutex);
2039 schedule_on_each_cpu(drain_local_stock); 2067 schedule_on_each_cpu(drain_local_stock);
2040 atomic_dec(&memcg_drain_count); 2068 mutex_unlock(&percpu_charge_mutex);
2041} 2069}
2042 2070
2043/* 2071/*
@@ -4640,6 +4668,7 @@ static struct cftype mem_cgroup_files[] = {
4640 { 4668 {
4641 .name = "numa_stat", 4669 .name = "numa_stat",
4642 .open = mem_control_numa_stat_open, 4670 .open = mem_control_numa_stat_open,
4671 .mode = S_IRUGO,
4643 }, 4672 },
4644#endif 4673#endif
4645}; 4674};
@@ -5414,18 +5443,16 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
5414 struct cgroup *old_cont, 5443 struct cgroup *old_cont,
5415 struct task_struct *p) 5444 struct task_struct *p)
5416{ 5445{
5417 struct mm_struct *mm; 5446 struct mm_struct *mm = get_task_mm(p);
5418 5447
5419 if (!mc.to)
5420 /* no need to move charge */
5421 return;
5422
5423 mm = get_task_mm(p);
5424 if (mm) { 5448 if (mm) {
5425 mem_cgroup_move_charge(mm); 5449 if (mc.to)
5450 mem_cgroup_move_charge(mm);
5451 put_swap_token(mm);
5426 mmput(mm); 5452 mmput(mm);
5427 } 5453 }
5428 mem_cgroup_clear_mc(); 5454 if (mc.to)
5455 mem_cgroup_clear_mc();
5429} 5456}
5430#else /* !CONFIG_MMU */ 5457#else /* !CONFIG_MMU */
5431static int mem_cgroup_can_attach(struct cgroup_subsys *ss, 5458static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5c8f7e08928d..eac0ba561491 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -52,6 +52,7 @@
52#include <linux/swapops.h> 52#include <linux/swapops.h>
53#include <linux/hugetlb.h> 53#include <linux/hugetlb.h>
54#include <linux/memory_hotplug.h> 54#include <linux/memory_hotplug.h>
55#include <linux/mm_inline.h>
55#include "internal.h" 56#include "internal.h"
56 57
57int sysctl_memory_failure_early_kill __read_mostly = 0; 58int sysctl_memory_failure_early_kill __read_mostly = 0;
@@ -1468,7 +1469,8 @@ int soft_offline_page(struct page *page, int flags)
1468 put_page(page); 1469 put_page(page);
1469 if (!ret) { 1470 if (!ret) {
1470 LIST_HEAD(pagelist); 1471 LIST_HEAD(pagelist);
1471 1472 inc_zone_page_state(page, NR_ISOLATED_ANON +
1473 page_is_file_cache(page));
1472 list_add(&page->lru, &pagelist); 1474 list_add(&page->lru, &pagelist);
1473 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 1475 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1474 0, true); 1476 0, true);
diff --git a/mm/memory.c b/mm/memory.c
index 6953d3926e01..87d935333f0d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1112,11 +1112,13 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
1112 int force_flush = 0; 1112 int force_flush = 0;
1113 int rss[NR_MM_COUNTERS]; 1113 int rss[NR_MM_COUNTERS];
1114 spinlock_t *ptl; 1114 spinlock_t *ptl;
1115 pte_t *start_pte;
1115 pte_t *pte; 1116 pte_t *pte;
1116 1117
1117again: 1118again:
1118 init_rss_vec(rss); 1119 init_rss_vec(rss);
1119 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); 1120 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
1121 pte = start_pte;
1120 arch_enter_lazy_mmu_mode(); 1122 arch_enter_lazy_mmu_mode();
1121 do { 1123 do {
1122 pte_t ptent = *pte; 1124 pte_t ptent = *pte;
@@ -1196,7 +1198,7 @@ again:
1196 1198
1197 add_mm_rss_vec(mm, rss); 1199 add_mm_rss_vec(mm, rss);
1198 arch_leave_lazy_mmu_mode(); 1200 arch_leave_lazy_mmu_mode();
1199 pte_unmap_unlock(pte - 1, ptl); 1201 pte_unmap_unlock(start_pte, ptl);
1200 1202
1201 /* 1203 /*
1202 * mmu_gather ran out of room to batch pages, we break out of 1204 * mmu_gather ran out of room to batch pages, we break out of
@@ -1296,7 +1298,7 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
1296 1298
1297/** 1299/**
1298 * unmap_vmas - unmap a range of memory covered by a list of vma's 1300 * unmap_vmas - unmap a range of memory covered by a list of vma's
1299 * @tlbp: address of the caller's struct mmu_gather 1301 * @tlb: address of the caller's struct mmu_gather
1300 * @vma: the starting vma 1302 * @vma: the starting vma
1301 * @start_addr: virtual address at which to start unmapping 1303 * @start_addr: virtual address at which to start unmapping
1302 * @end_addr: virtual address at which to end unmapping 1304 * @end_addr: virtual address at which to end unmapping
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9f646374e32f..02159c755136 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -494,6 +494,12 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
494 /* init node's zones as empty zones, we don't have any present pages.*/ 494 /* init node's zones as empty zones, we don't have any present pages.*/
495 free_area_init_node(nid, zones_size, start_pfn, zholes_size); 495 free_area_init_node(nid, zones_size, start_pfn, zholes_size);
496 496
497 /*
498 * The node we allocated has no zone fallback lists. For avoiding
499 * to access not-initialized zonelist, build here.
500 */
501 build_all_zonelists(NULL);
502
497 return pgdat; 503 return pgdat;
498} 504}
499 505
diff --git a/mm/migrate.c b/mm/migrate.c
index e4a5c912983d..666e4e677414 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -288,7 +288,7 @@ static int migrate_page_move_mapping(struct address_space *mapping,
288 */ 288 */
289 __dec_zone_page_state(page, NR_FILE_PAGES); 289 __dec_zone_page_state(page, NR_FILE_PAGES);
290 __inc_zone_page_state(newpage, NR_FILE_PAGES); 290 __inc_zone_page_state(newpage, NR_FILE_PAGES);
291 if (PageSwapBacked(page)) { 291 if (!PageSwapCache(page) && PageSwapBacked(page)) {
292 __dec_zone_page_state(page, NR_SHMEM); 292 __dec_zone_page_state(page, NR_SHMEM);
293 __inc_zone_page_state(newpage, NR_SHMEM); 293 __inc_zone_page_state(newpage, NR_SHMEM);
294 } 294 }
diff --git a/mm/mmap.c b/mm/mmap.c
index bbdc9af5e117..d49736ff8a8d 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -906,14 +906,7 @@ struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
906 if (anon_vma) 906 if (anon_vma)
907 return anon_vma; 907 return anon_vma;
908try_prev: 908try_prev:
909 /* 909 near = vma->vm_prev;
910 * It is potentially slow to have to call find_vma_prev here.
911 * But it's only on the first write fault on the vma, not
912 * every time, and we could devise a way to avoid it later
913 * (e.g. stash info in next's anon_vma_node when assigning
914 * an anon_vma, or when trying vma_merge). Another time.
915 */
916 BUG_ON(find_vma_prev(vma->vm_mm, vma->vm_start, &near) != vma);
917 if (!near) 910 if (!near)
918 goto none; 911 goto none;
919 912
@@ -2044,9 +2037,10 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2044 return -EINVAL; 2037 return -EINVAL;
2045 2038
2046 /* Find the first overlapping VMA */ 2039 /* Find the first overlapping VMA */
2047 vma = find_vma_prev(mm, start, &prev); 2040 vma = find_vma(mm, start);
2048 if (!vma) 2041 if (!vma)
2049 return 0; 2042 return 0;
2043 prev = vma->vm_prev;
2050 /* we have start < vma->vm_end */ 2044 /* we have start < vma->vm_end */
2051 2045
2052 /* if it doesn't overlap, we have nothing.. */ 2046 /* if it doesn't overlap, we have nothing.. */
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 74ccff61d1be..53bffc6c293e 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -162,13 +162,13 @@ static void free_page_cgroup(void *addr)
162} 162}
163#endif 163#endif
164 164
165static int __meminit init_section_page_cgroup(unsigned long pfn) 165static int __meminit init_section_page_cgroup(unsigned long pfn, int nid)
166{ 166{
167 struct page_cgroup *base, *pc; 167 struct page_cgroup *base, *pc;
168 struct mem_section *section; 168 struct mem_section *section;
169 unsigned long table_size; 169 unsigned long table_size;
170 unsigned long nr; 170 unsigned long nr;
171 int nid, index; 171 int index;
172 172
173 nr = pfn_to_section_nr(pfn); 173 nr = pfn_to_section_nr(pfn);
174 section = __nr_to_section(nr); 174 section = __nr_to_section(nr);
@@ -176,7 +176,6 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
176 if (section->page_cgroup) 176 if (section->page_cgroup)
177 return 0; 177 return 0;
178 178
179 nid = page_to_nid(pfn_to_page(pfn));
180 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; 179 table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION;
181 base = alloc_page_cgroup(table_size, nid); 180 base = alloc_page_cgroup(table_size, nid);
182 181
@@ -196,7 +195,11 @@ static int __meminit init_section_page_cgroup(unsigned long pfn)
196 pc = base + index; 195 pc = base + index;
197 init_page_cgroup(pc, nr); 196 init_page_cgroup(pc, nr);
198 } 197 }
199 198 /*
199 * The passed "pfn" may not be aligned to SECTION. For the calculation
200 * we need to apply a mask.
201 */
202 pfn &= PAGE_SECTION_MASK;
200 section->page_cgroup = base - pfn; 203 section->page_cgroup = base - pfn;
201 total_usage += table_size; 204 total_usage += table_size;
202 return 0; 205 return 0;
@@ -225,10 +228,20 @@ int __meminit online_page_cgroup(unsigned long start_pfn,
225 start = start_pfn & ~(PAGES_PER_SECTION - 1); 228 start = start_pfn & ~(PAGES_PER_SECTION - 1);
226 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION); 229 end = ALIGN(start_pfn + nr_pages, PAGES_PER_SECTION);
227 230
231 if (nid == -1) {
232 /*
233 * In this case, "nid" already exists and contains valid memory.
234 * "start_pfn" passed to us is a pfn which is an arg for
235 * online__pages(), and start_pfn should exist.
236 */
237 nid = pfn_to_nid(start_pfn);
238 VM_BUG_ON(!node_state(nid, N_ONLINE));
239 }
240
228 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { 241 for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) {
229 if (!pfn_present(pfn)) 242 if (!pfn_present(pfn))
230 continue; 243 continue;
231 fail = init_section_page_cgroup(pfn); 244 fail = init_section_page_cgroup(pfn, nid);
232 } 245 }
233 if (!fail) 246 if (!fail)
234 return 0; 247 return 0;
@@ -284,25 +297,47 @@ static int __meminit page_cgroup_callback(struct notifier_block *self,
284void __init page_cgroup_init(void) 297void __init page_cgroup_init(void)
285{ 298{
286 unsigned long pfn; 299 unsigned long pfn;
287 int fail = 0; 300 int nid;
288 301
289 if (mem_cgroup_disabled()) 302 if (mem_cgroup_disabled())
290 return; 303 return;
291 304
292 for (pfn = 0; !fail && pfn < max_pfn; pfn += PAGES_PER_SECTION) { 305 for_each_node_state(nid, N_HIGH_MEMORY) {
293 if (!pfn_present(pfn)) 306 unsigned long start_pfn, end_pfn;
294 continue; 307
295 fail = init_section_page_cgroup(pfn); 308 start_pfn = node_start_pfn(nid);
296 } 309 end_pfn = node_end_pfn(nid);
297 if (fail) { 310 /*
298 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); 311 * start_pfn and end_pfn may not be aligned to SECTION and the
299 panic("Out of memory"); 312 * page->flags of out of node pages are not initialized. So we
300 } else { 313 * scan [start_pfn, the biggest section's pfn < end_pfn) here.
301 hotplug_memory_notifier(page_cgroup_callback, 0); 314 */
315 for (pfn = start_pfn;
316 pfn < end_pfn;
317 pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) {
318
319 if (!pfn_valid(pfn))
320 continue;
321 /*
322 * Nodes's pfns can be overlapping.
323 * We know some arch can have a nodes layout such as
324 * -------------pfn-------------->
325 * N0 | N1 | N2 | N0 | N1 | N2|....
326 */
327 if (pfn_to_nid(pfn) != nid)
328 continue;
329 if (init_section_page_cgroup(pfn, nid))
330 goto oom;
331 }
302 } 332 }
333 hotplug_memory_notifier(page_cgroup_callback, 0);
303 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); 334 printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage);
304 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you don't" 335 printk(KERN_INFO "please try 'cgroup_disable=memory' option if you "
305 " want memory cgroups\n"); 336 "don't want memory cgroups\n");
337 return;
338oom:
339 printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n");
340 panic("Out of memory");
306} 341}
307 342
308void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) 343void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat)
diff --git a/mm/rmap.c b/mm/rmap.c
index 0eb463ea88dd..27dfd3b82b0f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -112,9 +112,9 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
112 kmem_cache_free(anon_vma_cachep, anon_vma); 112 kmem_cache_free(anon_vma_cachep, anon_vma);
113} 113}
114 114
115static inline struct anon_vma_chain *anon_vma_chain_alloc(void) 115static inline struct anon_vma_chain *anon_vma_chain_alloc(gfp_t gfp)
116{ 116{
117 return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL); 117 return kmem_cache_alloc(anon_vma_chain_cachep, gfp);
118} 118}
119 119
120static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) 120static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
@@ -159,7 +159,7 @@ int anon_vma_prepare(struct vm_area_struct *vma)
159 struct mm_struct *mm = vma->vm_mm; 159 struct mm_struct *mm = vma->vm_mm;
160 struct anon_vma *allocated; 160 struct anon_vma *allocated;
161 161
162 avc = anon_vma_chain_alloc(); 162 avc = anon_vma_chain_alloc(GFP_KERNEL);
163 if (!avc) 163 if (!avc)
164 goto out_enomem; 164 goto out_enomem;
165 165
@@ -200,6 +200,32 @@ int anon_vma_prepare(struct vm_area_struct *vma)
200 return -ENOMEM; 200 return -ENOMEM;
201} 201}
202 202
203/*
204 * This is a useful helper function for locking the anon_vma root as
205 * we traverse the vma->anon_vma_chain, looping over anon_vma's that
206 * have the same vma.
207 *
208 * Such anon_vma's should have the same root, so you'd expect to see
209 * just a single mutex_lock for the whole traversal.
210 */
211static inline struct anon_vma *lock_anon_vma_root(struct anon_vma *root, struct anon_vma *anon_vma)
212{
213 struct anon_vma *new_root = anon_vma->root;
214 if (new_root != root) {
215 if (WARN_ON_ONCE(root))
216 mutex_unlock(&root->mutex);
217 root = new_root;
218 mutex_lock(&root->mutex);
219 }
220 return root;
221}
222
223static inline void unlock_anon_vma_root(struct anon_vma *root)
224{
225 if (root)
226 mutex_unlock(&root->mutex);
227}
228
203static void anon_vma_chain_link(struct vm_area_struct *vma, 229static void anon_vma_chain_link(struct vm_area_struct *vma,
204 struct anon_vma_chain *avc, 230 struct anon_vma_chain *avc,
205 struct anon_vma *anon_vma) 231 struct anon_vma *anon_vma)
@@ -208,13 +234,11 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
208 avc->anon_vma = anon_vma; 234 avc->anon_vma = anon_vma;
209 list_add(&avc->same_vma, &vma->anon_vma_chain); 235 list_add(&avc->same_vma, &vma->anon_vma_chain);
210 236
211 anon_vma_lock(anon_vma);
212 /* 237 /*
213 * It's critical to add new vmas to the tail of the anon_vma, 238 * It's critical to add new vmas to the tail of the anon_vma,
214 * see comment in huge_memory.c:__split_huge_page(). 239 * see comment in huge_memory.c:__split_huge_page().
215 */ 240 */
216 list_add_tail(&avc->same_anon_vma, &anon_vma->head); 241 list_add_tail(&avc->same_anon_vma, &anon_vma->head);
217 anon_vma_unlock(anon_vma);
218} 242}
219 243
220/* 244/*
@@ -224,13 +248,24 @@ static void anon_vma_chain_link(struct vm_area_struct *vma,
224int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) 248int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
225{ 249{
226 struct anon_vma_chain *avc, *pavc; 250 struct anon_vma_chain *avc, *pavc;
251 struct anon_vma *root = NULL;
227 252
228 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) { 253 list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
229 avc = anon_vma_chain_alloc(); 254 struct anon_vma *anon_vma;
230 if (!avc) 255
231 goto enomem_failure; 256 avc = anon_vma_chain_alloc(GFP_NOWAIT | __GFP_NOWARN);
232 anon_vma_chain_link(dst, avc, pavc->anon_vma); 257 if (unlikely(!avc)) {
258 unlock_anon_vma_root(root);
259 root = NULL;
260 avc = anon_vma_chain_alloc(GFP_KERNEL);
261 if (!avc)
262 goto enomem_failure;
263 }
264 anon_vma = pavc->anon_vma;
265 root = lock_anon_vma_root(root, anon_vma);
266 anon_vma_chain_link(dst, avc, anon_vma);
233 } 267 }
268 unlock_anon_vma_root(root);
234 return 0; 269 return 0;
235 270
236 enomem_failure: 271 enomem_failure:
@@ -263,7 +298,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
263 anon_vma = anon_vma_alloc(); 298 anon_vma = anon_vma_alloc();
264 if (!anon_vma) 299 if (!anon_vma)
265 goto out_error; 300 goto out_error;
266 avc = anon_vma_chain_alloc(); 301 avc = anon_vma_chain_alloc(GFP_KERNEL);
267 if (!avc) 302 if (!avc)
268 goto out_error_free_anon_vma; 303 goto out_error_free_anon_vma;
269 304
@@ -280,7 +315,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
280 get_anon_vma(anon_vma->root); 315 get_anon_vma(anon_vma->root);
281 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 316 /* Mark this anon_vma as the one where our new (COWed) pages go. */
282 vma->anon_vma = anon_vma; 317 vma->anon_vma = anon_vma;
318 anon_vma_lock(anon_vma);
283 anon_vma_chain_link(vma, avc, anon_vma); 319 anon_vma_chain_link(vma, avc, anon_vma);
320 anon_vma_unlock(anon_vma);
284 321
285 return 0; 322 return 0;
286 323
@@ -291,36 +328,43 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
291 return -ENOMEM; 328 return -ENOMEM;
292} 329}
293 330
294static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
295{
296 struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
297 int empty;
298
299 /* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
300 if (!anon_vma)
301 return;
302
303 anon_vma_lock(anon_vma);
304 list_del(&anon_vma_chain->same_anon_vma);
305
306 /* We must garbage collect the anon_vma if it's empty */
307 empty = list_empty(&anon_vma->head);
308 anon_vma_unlock(anon_vma);
309
310 if (empty)
311 put_anon_vma(anon_vma);
312}
313
314void unlink_anon_vmas(struct vm_area_struct *vma) 331void unlink_anon_vmas(struct vm_area_struct *vma)
315{ 332{
316 struct anon_vma_chain *avc, *next; 333 struct anon_vma_chain *avc, *next;
334 struct anon_vma *root = NULL;
317 335
318 /* 336 /*
319 * Unlink each anon_vma chained to the VMA. This list is ordered 337 * Unlink each anon_vma chained to the VMA. This list is ordered
320 * from newest to oldest, ensuring the root anon_vma gets freed last. 338 * from newest to oldest, ensuring the root anon_vma gets freed last.
321 */ 339 */
322 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { 340 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
323 anon_vma_unlink(avc); 341 struct anon_vma *anon_vma = avc->anon_vma;
342
343 root = lock_anon_vma_root(root, anon_vma);
344 list_del(&avc->same_anon_vma);
345
346 /*
347 * Leave empty anon_vmas on the list - we'll need
348 * to free them outside the lock.
349 */
350 if (list_empty(&anon_vma->head))
351 continue;
352
353 list_del(&avc->same_vma);
354 anon_vma_chain_free(avc);
355 }
356 unlock_anon_vma_root(root);
357
358 /*
359 * Iterate the list once more, it now only contains empty and unlinked
360 * anon_vmas, destroy them. Could not do before due to __put_anon_vma()
361 * needing to acquire the anon_vma->root->mutex.
362 */
363 list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
364 struct anon_vma *anon_vma = avc->anon_vma;
365
366 put_anon_vma(anon_vma);
367
324 list_del(&avc->same_vma); 368 list_del(&avc->same_vma);
325 anon_vma_chain_free(avc); 369 anon_vma_chain_free(avc);
326 } 370 }
diff --git a/mm/slab.c b/mm/slab.c
index bcfa4987c8ae..d96e223de775 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3604,13 +3604,14 @@ free_done:
3604 * Release an obj back to its cache. If the obj has a constructed state, it must 3604 * Release an obj back to its cache. If the obj has a constructed state, it must
3605 * be in this state _before_ it is released. Called with disabled ints. 3605 * be in this state _before_ it is released. Called with disabled ints.
3606 */ 3606 */
3607static inline void __cache_free(struct kmem_cache *cachep, void *objp) 3607static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3608 void *caller)
3608{ 3609{
3609 struct array_cache *ac = cpu_cache_get(cachep); 3610 struct array_cache *ac = cpu_cache_get(cachep);
3610 3611
3611 check_irq_off(); 3612 check_irq_off();
3612 kmemleak_free_recursive(objp, cachep->flags); 3613 kmemleak_free_recursive(objp, cachep->flags);
3613 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3614 objp = cache_free_debugcheck(cachep, objp, caller);
3614 3615
3615 kmemcheck_slab_free(cachep, objp, obj_size(cachep)); 3616 kmemcheck_slab_free(cachep, objp, obj_size(cachep));
3616 3617
@@ -3801,7 +3802,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3801 debug_check_no_locks_freed(objp, obj_size(cachep)); 3802 debug_check_no_locks_freed(objp, obj_size(cachep));
3802 if (!(cachep->flags & SLAB_DEBUG_OBJECTS)) 3803 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3803 debug_check_no_obj_freed(objp, obj_size(cachep)); 3804 debug_check_no_obj_freed(objp, obj_size(cachep));
3804 __cache_free(cachep, objp); 3805 __cache_free(cachep, objp, __builtin_return_address(0));
3805 local_irq_restore(flags); 3806 local_irq_restore(flags);
3806 3807
3807 trace_kmem_cache_free(_RET_IP_, objp); 3808 trace_kmem_cache_free(_RET_IP_, objp);
@@ -3831,7 +3832,7 @@ void kfree(const void *objp)
3831 c = virt_to_cache(objp); 3832 c = virt_to_cache(objp);
3832 debug_check_no_locks_freed(objp, obj_size(c)); 3833 debug_check_no_locks_freed(objp, obj_size(c));
3833 debug_check_no_obj_freed(objp, obj_size(c)); 3834 debug_check_no_obj_freed(objp, obj_size(c));
3834 __cache_free(c, (void *)objp); 3835 __cache_free(c, (void *)objp, __builtin_return_address(0));
3835 local_irq_restore(flags); 3836 local_irq_restore(flags);
3836} 3837}
3837EXPORT_SYMBOL(kfree); 3838EXPORT_SYMBOL(kfree);
diff --git a/mm/slub.c b/mm/slub.c
index 7be0223531b0..35f351f26193 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2320,16 +2320,12 @@ static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE < 2320 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu)); 2321 SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
2322 2322
2323#ifdef CONFIG_CMPXCHG_LOCAL
2324 /* 2323 /*
2325 * Must align to double word boundary for the double cmpxchg instructions 2324 * Must align to double word boundary for the double cmpxchg
2326 * to work. 2325 * instructions to work; see __pcpu_double_call_return_bool().
2327 */ 2326 */
2328 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu), 2 * sizeof(void *)); 2327 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2329#else 2328 2 * sizeof(void *));
2330 /* Regular alignment is sufficient */
2331 s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
2332#endif
2333 2329
2334 if (!s->cpu_slab) 2330 if (!s->cpu_slab)
2335 return 0; 2331 return 0;
diff --git a/mm/thrash.c b/mm/thrash.c
index 2372d4ed5dd8..fabf2d0f5169 100644
--- a/mm/thrash.c
+++ b/mm/thrash.c
@@ -21,14 +21,40 @@
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/swap.h> 23#include <linux/swap.h>
24#include <linux/memcontrol.h>
25
26#include <trace/events/vmscan.h>
27
28#define TOKEN_AGING_INTERVAL (0xFF)
24 29
25static DEFINE_SPINLOCK(swap_token_lock); 30static DEFINE_SPINLOCK(swap_token_lock);
26struct mm_struct *swap_token_mm; 31struct mm_struct *swap_token_mm;
32struct mem_cgroup *swap_token_memcg;
27static unsigned int global_faults; 33static unsigned int global_faults;
34static unsigned int last_aging;
35
36#ifdef CONFIG_CGROUP_MEM_RES_CTLR
37static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
38{
39 struct mem_cgroup *memcg;
40
41 memcg = try_get_mem_cgroup_from_mm(mm);
42 if (memcg)
43 css_put(mem_cgroup_css(memcg));
44
45 return memcg;
46}
47#else
48static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
49{
50 return NULL;
51}
52#endif
28 53
29void grab_swap_token(struct mm_struct *mm) 54void grab_swap_token(struct mm_struct *mm)
30{ 55{
31 int current_interval; 56 int current_interval;
57 unsigned int old_prio = mm->token_priority;
32 58
33 global_faults++; 59 global_faults++;
34 60
@@ -38,40 +64,81 @@ void grab_swap_token(struct mm_struct *mm)
38 return; 64 return;
39 65
40 /* First come first served */ 66 /* First come first served */
41 if (swap_token_mm == NULL) { 67 if (!swap_token_mm)
42 mm->token_priority = mm->token_priority + 2; 68 goto replace_token;
43 swap_token_mm = mm; 69
44 goto out; 70 if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
71 swap_token_mm->token_priority /= 2;
72 last_aging = global_faults;
45 } 73 }
46 74
47 if (mm != swap_token_mm) { 75 if (mm == swap_token_mm) {
48 if (current_interval < mm->last_interval)
49 mm->token_priority++;
50 else {
51 if (likely(mm->token_priority > 0))
52 mm->token_priority--;
53 }
54 /* Check if we deserve the token */
55 if (mm->token_priority > swap_token_mm->token_priority) {
56 mm->token_priority += 2;
57 swap_token_mm = mm;
58 }
59 } else {
60 /* Token holder came in again! */
61 mm->token_priority += 2; 76 mm->token_priority += 2;
77 goto update_priority;
78 }
79
80 if (current_interval < mm->last_interval)
81 mm->token_priority++;
82 else {
83 if (likely(mm->token_priority > 0))
84 mm->token_priority--;
62 } 85 }
63 86
87 /* Check if we deserve the token */
88 if (mm->token_priority > swap_token_mm->token_priority)
89 goto replace_token;
90
91update_priority:
92 trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
93
64out: 94out:
65 mm->faultstamp = global_faults; 95 mm->faultstamp = global_faults;
66 mm->last_interval = current_interval; 96 mm->last_interval = current_interval;
67 spin_unlock(&swap_token_lock); 97 spin_unlock(&swap_token_lock);
98 return;
99
100replace_token:
101 mm->token_priority += 2;
102 trace_replace_swap_token(swap_token_mm, mm);
103 swap_token_mm = mm;
104 swap_token_memcg = swap_token_memcg_from_mm(mm);
105 last_aging = global_faults;
106 goto out;
68} 107}
69 108
70/* Called on process exit. */ 109/* Called on process exit. */
71void __put_swap_token(struct mm_struct *mm) 110void __put_swap_token(struct mm_struct *mm)
72{ 111{
73 spin_lock(&swap_token_lock); 112 spin_lock(&swap_token_lock);
74 if (likely(mm == swap_token_mm)) 113 if (likely(mm == swap_token_mm)) {
114 trace_put_swap_token(swap_token_mm);
75 swap_token_mm = NULL; 115 swap_token_mm = NULL;
116 swap_token_memcg = NULL;
117 }
76 spin_unlock(&swap_token_lock); 118 spin_unlock(&swap_token_lock);
77} 119}
120
121static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
122{
123 if (!a)
124 return true;
125 if (!b)
126 return true;
127 if (a == b)
128 return true;
129 return false;
130}
131
132void disable_swap_token(struct mem_cgroup *memcg)
133{
134 /* memcg reclaim don't disable unrelated mm token. */
135 if (match_memcg(memcg, swap_token_memcg)) {
136 spin_lock(&swap_token_lock);
137 if (match_memcg(memcg, swap_token_memcg)) {
138 trace_disable_swap_token(swap_token_mm);
139 swap_token_mm = NULL;
140 swap_token_memcg = NULL;
141 }
142 spin_unlock(&swap_token_lock);
143 }
144}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index faa0a088f9cc..8ff834e19c24 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1124,8 +1124,20 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1124 nr_lumpy_dirty++; 1124 nr_lumpy_dirty++;
1125 scan++; 1125 scan++;
1126 } else { 1126 } else {
1127 /* the page is freed already. */ 1127 /*
1128 if (!page_count(cursor_page)) 1128 * Check if the page is freed already.
1129 *
1130 * We can't use page_count() as that
1131 * requires compound_head and we don't
1132 * have a pin on the page here. If a
1133 * page is tail, we may or may not
1134 * have isolated the head, so assume
1135 * it's not free, it'd be tricky to
1136 * track the head status without a
1137 * page pin.
1138 */
1139 if (!PageTail(cursor_page) &&
1140 !atomic_read(&cursor_page->_count))
1129 continue; 1141 continue;
1130 break; 1142 break;
1131 } 1143 }
@@ -2081,7 +2093,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2081 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2093 for (priority = DEF_PRIORITY; priority >= 0; priority--) {
2082 sc->nr_scanned = 0; 2094 sc->nr_scanned = 0;
2083 if (!priority) 2095 if (!priority)
2084 disable_swap_token(); 2096 disable_swap_token(sc->mem_cgroup);
2085 total_scanned += shrink_zones(priority, zonelist, sc); 2097 total_scanned += shrink_zones(priority, zonelist, sc);
2086 /* 2098 /*
2087 * Don't shrink slabs when reclaiming memory from 2099 * Don't shrink slabs when reclaiming memory from
@@ -2407,7 +2419,7 @@ loop_again:
2407 2419
2408 /* The swap token gets in the way of swapout... */ 2420 /* The swap token gets in the way of swapout... */
2409 if (!priority) 2421 if (!priority)
2410 disable_swap_token(); 2422 disable_swap_token(NULL);
2411 2423
2412 all_zones_ok = 1; 2424 all_zones_ok = 1;
2413 balanced = 0; 2425 balanced = 0;
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index cfa9afe9b11e..d24c4644b930 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -207,7 +207,7 @@ int register_vlan_dev(struct net_device *dev)
207 grp->nr_vlans++; 207 grp->nr_vlans++;
208 208
209 if (ngrp) { 209 if (ngrp) {
210 if (ops->ndo_vlan_rx_register) 210 if (ops->ndo_vlan_rx_register && (real_dev->features & NETIF_F_HW_VLAN_RX))
211 ops->ndo_vlan_rx_register(real_dev, ngrp); 211 ops->ndo_vlan_rx_register(real_dev, ngrp);
212 rcu_assign_pointer(real_dev->vlgrp, ngrp); 212 rcu_assign_pointer(real_dev->vlgrp, ngrp);
213 } 213 }
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 41495dc2a4c9..fcc684678af6 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -23,6 +23,31 @@ bool vlan_do_receive(struct sk_buff **skbp)
23 return false; 23 return false;
24 24
25 skb->dev = vlan_dev; 25 skb->dev = vlan_dev;
26 if (skb->pkt_type == PACKET_OTHERHOST) {
27 /* Our lower layer thinks this is not local, let's make sure.
28 * This allows the VLAN to have a different MAC than the
29 * underlying device, and still route correctly. */
30 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
31 vlan_dev->dev_addr))
32 skb->pkt_type = PACKET_HOST;
33 }
34
35 if (!(vlan_dev_info(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
36 unsigned int offset = skb->data - skb_mac_header(skb);
37
38 /*
39 * vlan_insert_tag expect skb->data pointing to mac header.
40 * So change skb->data before calling it and change back to
41 * original position later
42 */
43 skb_push(skb, offset);
44 skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci);
45 if (!skb)
46 return false;
47 skb_pull(skb, offset + VLAN_HLEN);
48 skb_reset_mac_len(skb);
49 }
50
26 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); 51 skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
27 skb->vlan_tci = 0; 52 skb->vlan_tci = 0;
28 53
@@ -31,22 +56,8 @@ bool vlan_do_receive(struct sk_buff **skbp)
31 u64_stats_update_begin(&rx_stats->syncp); 56 u64_stats_update_begin(&rx_stats->syncp);
32 rx_stats->rx_packets++; 57 rx_stats->rx_packets++;
33 rx_stats->rx_bytes += skb->len; 58 rx_stats->rx_bytes += skb->len;
34 59 if (skb->pkt_type == PACKET_MULTICAST)
35 switch (skb->pkt_type) {
36 case PACKET_BROADCAST:
37 break;
38 case PACKET_MULTICAST:
39 rx_stats->rx_multicast++; 60 rx_stats->rx_multicast++;
40 break;
41 case PACKET_OTHERHOST:
42 /* Our lower layer thinks this is not local, let's make sure.
43 * This allows the VLAN to have a different MAC than the
44 * underlying device, and still route correctly. */
45 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
46 vlan_dev->dev_addr))
47 skb->pkt_type = PACKET_HOST;
48 break;
49 }
50 u64_stats_update_end(&rx_stats->syncp); 61 u64_stats_update_end(&rx_stats->syncp);
51 62
52 return true; 63 return true;
@@ -89,18 +100,13 @@ gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
89} 100}
90EXPORT_SYMBOL(vlan_gro_frags); 101EXPORT_SYMBOL(vlan_gro_frags);
91 102
92static struct sk_buff *vlan_check_reorder_header(struct sk_buff *skb) 103static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
93{ 104{
94 if (vlan_dev_info(skb->dev)->flags & VLAN_FLAG_REORDER_HDR) { 105 if (skb_cow(skb, skb_headroom(skb)) < 0)
95 if (skb_cow(skb, skb_headroom(skb)) < 0) 106 return NULL;
96 skb = NULL; 107 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
97 if (skb) { 108 skb->mac_header += VLAN_HLEN;
98 /* Lifted from Gleb's VLAN code... */ 109 skb_reset_mac_len(skb);
99 memmove(skb->data - ETH_HLEN,
100 skb->data - VLAN_ETH_HLEN, 12);
101 skb->mac_header += VLAN_HLEN;
102 }
103 }
104 return skb; 110 return skb;
105} 111}
106 112
@@ -161,7 +167,7 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
161 skb_pull_rcsum(skb, VLAN_HLEN); 167 skb_pull_rcsum(skb, VLAN_HLEN);
162 vlan_set_encap_proto(skb, vhdr); 168 vlan_set_encap_proto(skb, vhdr);
163 169
164 skb = vlan_check_reorder_header(skb); 170 skb = vlan_reorder_header(skb);
165 if (unlikely(!skb)) 171 if (unlikely(!skb))
166 goto err_free; 172 goto err_free;
167 173
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index f13ddbf858ba..77930aa522e3 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -477,14 +477,16 @@ static void hci_setup_event_mask(struct hci_dev *hdev)
477 * command otherwise */ 477 * command otherwise */
478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 478 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
479 479
480 /* Events for 1.2 and newer controllers */ 480 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
481 if (hdev->lmp_ver > 1) { 481 * any event mask for pre 1.2 devices */
482 events[4] |= 0x01; /* Flow Specification Complete */ 482 if (hdev->lmp_ver <= 1)
483 events[4] |= 0x02; /* Inquiry Result with RSSI */ 483 return;
484 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 484
485 events[5] |= 0x08; /* Synchronous Connection Complete */ 485 events[4] |= 0x01; /* Flow Specification Complete */
486 events[5] |= 0x10; /* Synchronous Connection Changed */ 486 events[4] |= 0x02; /* Inquiry Result with RSSI */
487 } 487 events[4] |= 0x04; /* Read Remote Extended Features Complete */
488 events[5] |= 0x08; /* Synchronous Connection Complete */
489 events[5] |= 0x10; /* Synchronous Connection Changed */
488 490
489 if (hdev->features[3] & LMP_RSSI_INQ) 491 if (hdev->features[3] & LMP_RSSI_INQ)
490 events[4] |= 0x04; /* Inquiry Result with RSSI */ 492 events[4] |= 0x04; /* Inquiry Result with RSSI */
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 18dc9888d8c2..8248303f44e8 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -413,6 +413,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __us
413 break; 413 break;
414 } 414 }
415 415
416 memset(&cinfo, 0, sizeof(cinfo));
416 cinfo.hci_handle = chan->conn->hcon->handle; 417 cinfo.hci_handle = chan->conn->hcon->handle;
417 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); 418 memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
418 419
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 386cfaffd4b7..1b10727ce523 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -788,6 +788,7 @@ static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __u
788 788
789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; 789 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
790 790
791 memset(&cinfo, 0, sizeof(cinfo));
791 cinfo.hci_handle = conn->hcon->handle; 792 cinfo.hci_handle = conn->hcon->handle;
792 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); 793 memcpy(cinfo.dev_class, conn->hcon->dev_class, 3);
793 794
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 42fdffd1d76c..cb4fb7837e5c 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -369,6 +369,15 @@ static void __sco_sock_close(struct sock *sk)
369 369
370 case BT_CONNECTED: 370 case BT_CONNECTED:
371 case BT_CONFIG: 371 case BT_CONFIG:
372 if (sco_pi(sk)->conn) {
373 sk->sk_state = BT_DISCONN;
374 sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT);
375 hci_conn_put(sco_pi(sk)->conn->hcon);
376 sco_pi(sk)->conn->hcon = NULL;
377 } else
378 sco_chan_del(sk, ECONNRESET);
379 break;
380
372 case BT_CONNECT: 381 case BT_CONNECT:
373 case BT_DISCONN: 382 case BT_DISCONN:
374 sco_chan_del(sk, ECONNRESET); 383 sco_chan_del(sk, ECONNRESET);
@@ -819,7 +828,9 @@ static void sco_chan_del(struct sock *sk, int err)
819 conn->sk = NULL; 828 conn->sk = NULL;
820 sco_pi(sk)->conn = NULL; 829 sco_pi(sk)->conn = NULL;
821 sco_conn_unlock(conn); 830 sco_conn_unlock(conn);
822 hci_conn_put(conn->hcon); 831
832 if (conn->hcon)
833 hci_conn_put(conn->hcon);
823 } 834 }
824 835
825 sk->sk_state = BT_CLOSED; 836 sk->sk_state = BT_CLOSED;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index a6b2f86378c7..c188c803c09c 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -243,6 +243,7 @@ int br_netpoll_enable(struct net_bridge_port *p)
243 goto out; 243 goto out;
244 244
245 np->dev = p->dev; 245 np->dev = p->dev;
246 strlcpy(np->dev_name, p->dev->name, IFNAMSIZ);
246 247
247 err = __netpoll_setup(np); 248 err = __netpoll_setup(np);
248 if (err) { 249 if (err) {
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 2f14eafdeeab..29b9812c8da0 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1424,7 +1424,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1424 switch (ih->type) { 1424 switch (ih->type) {
1425 case IGMP_HOST_MEMBERSHIP_REPORT: 1425 case IGMP_HOST_MEMBERSHIP_REPORT:
1426 case IGMPV2_HOST_MEMBERSHIP_REPORT: 1426 case IGMPV2_HOST_MEMBERSHIP_REPORT:
1427 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1427 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1428 err = br_ip4_multicast_add_group(br, port, ih->group); 1428 err = br_ip4_multicast_add_group(br, port, ih->group);
1429 break; 1429 break;
1430 case IGMPV3_HOST_MEMBERSHIP_REPORT: 1430 case IGMPV3_HOST_MEMBERSHIP_REPORT:
@@ -1543,7 +1543,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
1543 goto out; 1543 goto out;
1544 } 1544 }
1545 mld = (struct mld_msg *)skb_transport_header(skb2); 1545 mld = (struct mld_msg *)skb_transport_header(skb2);
1546 BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; 1546 BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca); 1547 err = br_ip6_multicast_add_group(br, port, &mld->mld_mca);
1548 break; 1548 break;
1549 } 1549 }
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 3fa123185e89..56149ec36d7f 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -104,10 +104,16 @@ static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
104{ 104{
105} 105}
106 106
107static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
108{
109 return NULL;
110}
111
107static struct dst_ops fake_dst_ops = { 112static struct dst_ops fake_dst_ops = {
108 .family = AF_INET, 113 .family = AF_INET,
109 .protocol = cpu_to_be16(ETH_P_IP), 114 .protocol = cpu_to_be16(ETH_P_IP),
110 .update_pmtu = fake_update_pmtu, 115 .update_pmtu = fake_update_pmtu,
116 .cow_metrics = fake_cow_metrics,
111}; 117};
112 118
113/* 119/*
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
index 3a66b8c10e09..c23979e79dfa 100644
--- a/net/caif/cfmuxl.c
+++ b/net/caif/cfmuxl.c
@@ -255,7 +255,7 @@ static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
255 255
256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { 256 if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) {
257 257
258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND || 258 if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND ||
259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && 259 ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) &&
260 layer->id != 0) { 260 layer->id != 0) {
261 261
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6ea2b892f44b..9cb627a4073a 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1144,6 +1144,13 @@ static void handle_osds_timeout(struct work_struct *work)
1144 round_jiffies_relative(delay)); 1144 round_jiffies_relative(delay));
1145} 1145}
1146 1146
1147static void complete_request(struct ceph_osd_request *req)
1148{
1149 if (req->r_safe_callback)
1150 req->r_safe_callback(req, NULL);
1151 complete_all(&req->r_safe_completion); /* fsync waiter */
1152}
1153
1147/* 1154/*
1148 * handle osd op reply. either call the callback if it is specified, 1155 * handle osd op reply. either call the callback if it is specified,
1149 * or do the completion to wake up the waiting thread. 1156 * or do the completion to wake up the waiting thread.
@@ -1226,11 +1233,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1226 else 1233 else
1227 complete_all(&req->r_completion); 1234 complete_all(&req->r_completion);
1228 1235
1229 if (flags & CEPH_OSD_FLAG_ONDISK) { 1236 if (flags & CEPH_OSD_FLAG_ONDISK)
1230 if (req->r_safe_callback) 1237 complete_request(req);
1231 req->r_safe_callback(req, msg);
1232 complete_all(&req->r_safe_completion); /* fsync waiter */
1233 }
1234 1238
1235done: 1239done:
1236 dout("req=%p req->r_linger=%d\n", req, req->r_linger); 1240 dout("req=%p req->r_linger=%d\n", req, req->r_linger);
@@ -1732,6 +1736,7 @@ int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
1732 __cancel_request(req); 1736 __cancel_request(req);
1733 __unregister_request(osdc, req); 1737 __unregister_request(osdc, req);
1734 mutex_unlock(&osdc->request_mutex); 1738 mutex_unlock(&osdc->request_mutex);
1739 complete_request(req);
1735 dout("wait_request tid %llu canceled/timed out\n", req->r_tid); 1740 dout("wait_request tid %llu canceled/timed out\n", req->r_tid);
1736 return rc; 1741 return rc;
1737 } 1742 }
diff --git a/net/core/dev.c b/net/core/dev.c
index b3f52d2f56d7..8efe85070131 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3114,7 +3114,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
3114 3114
3115 skb_reset_network_header(skb); 3115 skb_reset_network_header(skb);
3116 skb_reset_transport_header(skb); 3116 skb_reset_transport_header(skb);
3117 skb->mac_len = skb->network_header - skb->mac_header; 3117 skb_reset_mac_len(skb);
3118 3118
3119 pt_prev = NULL; 3119 pt_prev = NULL;
3120 3120
@@ -6173,6 +6173,11 @@ static int dev_cpu_callback(struct notifier_block *nfb,
6173 oldsd->output_queue = NULL; 6173 oldsd->output_queue = NULL;
6174 oldsd->output_queue_tailp = &oldsd->output_queue; 6174 oldsd->output_queue_tailp = &oldsd->output_queue;
6175 } 6175 }
6176 /* Append NAPI poll list from offline CPU. */
6177 if (!list_empty(&oldsd->poll_list)) {
6178 list_splice_init(&oldsd->poll_list, &sd->poll_list);
6179 raise_softirq_irqoff(NET_RX_SOFTIRQ);
6180 }
6176 6181
6177 raise_softirq_irqoff(NET_TX_SOFTIRQ); 6182 raise_softirq_irqoff(NET_TX_SOFTIRQ);
6178 local_irq_enable(); 6183 local_irq_enable();
@@ -6259,29 +6264,23 @@ err_name:
6259/** 6264/**
6260 * netdev_drivername - network driver for the device 6265 * netdev_drivername - network driver for the device
6261 * @dev: network device 6266 * @dev: network device
6262 * @buffer: buffer for resulting name
6263 * @len: size of buffer
6264 * 6267 *
6265 * Determine network driver for device. 6268 * Determine network driver for device.
6266 */ 6269 */
6267char *netdev_drivername(const struct net_device *dev, char *buffer, int len) 6270const char *netdev_drivername(const struct net_device *dev)
6268{ 6271{
6269 const struct device_driver *driver; 6272 const struct device_driver *driver;
6270 const struct device *parent; 6273 const struct device *parent;
6271 6274 const char *empty = "";
6272 if (len <= 0 || !buffer)
6273 return buffer;
6274 buffer[0] = 0;
6275 6275
6276 parent = dev->dev.parent; 6276 parent = dev->dev.parent;
6277
6278 if (!parent) 6277 if (!parent)
6279 return buffer; 6278 return empty;
6280 6279
6281 driver = parent->driver; 6280 driver = parent->driver;
6282 if (driver && driver->name) 6281 if (driver && driver->name)
6283 strlcpy(buffer, driver->name, len); 6282 return driver->name;
6284 return buffer; 6283 return empty;
6285} 6284}
6286 6285
6287static int __netdev_printk(const char *level, const struct net_device *dev, 6286static int __netdev_printk(const char *level, const struct net_device *dev,
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 11b98bc2aa8f..33d2a1fba131 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1179,9 +1179,14 @@ static void remove_queue_kobjects(struct net_device *net)
1179#endif 1179#endif
1180} 1180}
1181 1181
1182static const void *net_current_ns(void) 1182static void *net_grab_current_ns(void)
1183{ 1183{
1184 return current->nsproxy->net_ns; 1184 struct net *ns = current->nsproxy->net_ns;
1185#ifdef CONFIG_NET_NS
1186 if (ns)
1187 atomic_inc(&ns->passive);
1188#endif
1189 return ns;
1185} 1190}
1186 1191
1187static const void *net_initial_ns(void) 1192static const void *net_initial_ns(void)
@@ -1196,22 +1201,13 @@ static const void *net_netlink_ns(struct sock *sk)
1196 1201
1197struct kobj_ns_type_operations net_ns_type_operations = { 1202struct kobj_ns_type_operations net_ns_type_operations = {
1198 .type = KOBJ_NS_TYPE_NET, 1203 .type = KOBJ_NS_TYPE_NET,
1199 .current_ns = net_current_ns, 1204 .grab_current_ns = net_grab_current_ns,
1200 .netlink_ns = net_netlink_ns, 1205 .netlink_ns = net_netlink_ns,
1201 .initial_ns = net_initial_ns, 1206 .initial_ns = net_initial_ns,
1207 .drop_ns = net_drop_ns,
1202}; 1208};
1203EXPORT_SYMBOL_GPL(net_ns_type_operations); 1209EXPORT_SYMBOL_GPL(net_ns_type_operations);
1204 1210
1205static void net_kobj_ns_exit(struct net *net)
1206{
1207 kobj_ns_exit(KOBJ_NS_TYPE_NET, net);
1208}
1209
1210static struct pernet_operations kobj_net_ops = {
1211 .exit = net_kobj_ns_exit,
1212};
1213
1214
1215#ifdef CONFIG_HOTPLUG 1211#ifdef CONFIG_HOTPLUG
1216static int netdev_uevent(struct device *d, struct kobj_uevent_env *env) 1212static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
1217{ 1213{
@@ -1339,6 +1335,5 @@ EXPORT_SYMBOL(netdev_class_remove_file);
1339int netdev_kobject_init(void) 1335int netdev_kobject_init(void)
1340{ 1336{
1341 kobj_ns_type_register(&net_ns_type_operations); 1337 kobj_ns_type_register(&net_ns_type_operations);
1342 register_pernet_subsys(&kobj_net_ops);
1343 return class_register(&net_class); 1338 return class_register(&net_class);
1344} 1339}
diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
index 6c6b86d0da15..ea489db1bc23 100644
--- a/net/core/net_namespace.c
+++ b/net/core/net_namespace.c
@@ -128,6 +128,7 @@ static __net_init int setup_net(struct net *net)
128 LIST_HEAD(net_exit_list); 128 LIST_HEAD(net_exit_list);
129 129
130 atomic_set(&net->count, 1); 130 atomic_set(&net->count, 1);
131 atomic_set(&net->passive, 1);
131 132
132#ifdef NETNS_REFCNT_DEBUG 133#ifdef NETNS_REFCNT_DEBUG
133 atomic_set(&net->use_count, 0); 134 atomic_set(&net->use_count, 0);
@@ -210,6 +211,13 @@ static void net_free(struct net *net)
210 kmem_cache_free(net_cachep, net); 211 kmem_cache_free(net_cachep, net);
211} 212}
212 213
214void net_drop_ns(void *p)
215{
216 struct net *ns = p;
217 if (ns && atomic_dec_and_test(&ns->passive))
218 net_free(ns);
219}
220
213struct net *copy_net_ns(unsigned long flags, struct net *old_net) 221struct net *copy_net_ns(unsigned long flags, struct net *old_net)
214{ 222{
215 struct net *net; 223 struct net *net;
@@ -230,7 +238,7 @@ struct net *copy_net_ns(unsigned long flags, struct net *old_net)
230 } 238 }
231 mutex_unlock(&net_mutex); 239 mutex_unlock(&net_mutex);
232 if (rv < 0) { 240 if (rv < 0) {
233 net_free(net); 241 net_drop_ns(net);
234 return ERR_PTR(rv); 242 return ERR_PTR(rv);
235 } 243 }
236 return net; 244 return net;
@@ -286,7 +294,7 @@ static void cleanup_net(struct work_struct *work)
286 /* Finally it is safe to free my network namespace structure */ 294 /* Finally it is safe to free my network namespace structure */
287 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) { 295 list_for_each_entry_safe(net, tmp, &net_exit_list, exit_list) {
288 list_del_init(&net->exit_list); 296 list_del_init(&net->exit_list);
289 net_free(net); 297 net_drop_ns(net);
290 } 298 }
291} 299}
292static DECLARE_WORK(net_cleanup_work, cleanup_net); 300static DECLARE_WORK(net_cleanup_work, cleanup_net);
@@ -310,19 +318,17 @@ struct net *get_net_ns_by_fd(int fd)
310 struct file *file; 318 struct file *file;
311 struct net *net; 319 struct net *net;
312 320
313 net = ERR_PTR(-EINVAL);
314 file = proc_ns_fget(fd); 321 file = proc_ns_fget(fd);
315 if (!file) 322 if (IS_ERR(file))
316 goto out; 323 return ERR_CAST(file);
317 324
318 ei = PROC_I(file->f_dentry->d_inode); 325 ei = PROC_I(file->f_dentry->d_inode);
319 if (ei->ns_ops != &netns_operations) 326 if (ei->ns_ops == &netns_operations)
320 goto out; 327 net = get_net(ei->ns);
328 else
329 net = ERR_PTR(-EINVAL);
321 330
322 net = get_net(ei->ns); 331 fput(file);
323out:
324 if (file)
325 fput(file);
326 return net; 332 return net;
327} 333}
328 334
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 2d7d6d473781..18d9cbda3a39 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -792,6 +792,13 @@ int netpoll_setup(struct netpoll *np)
792 return -ENODEV; 792 return -ENODEV;
793 } 793 }
794 794
795 if (ndev->master) {
796 printk(KERN_ERR "%s: %s is a slave device, aborting.\n",
797 np->name, np->dev_name);
798 err = -EBUSY;
799 goto put;
800 }
801
795 if (!netif_running(ndev)) { 802 if (!netif_running(ndev)) {
796 unsigned long atmost, atleast; 803 unsigned long atmost, atleast;
797 804
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index ed0eab39f531..02548b292b53 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -44,7 +44,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
44 pr_debug("%s\n", __func__); 44 pr_debug("%s\n", __func__);
45 45
46 if (!buf) 46 if (!buf)
47 goto out; 47 return -EMSGSIZE;
48 48
49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, 49 hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
50 IEEE802154_LIST_PHY); 50 IEEE802154_LIST_PHY);
@@ -65,6 +65,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 pid,
65 pages * sizeof(uint32_t), buf); 65 pages * sizeof(uint32_t), buf);
66 66
67 mutex_unlock(&phy->pib_lock); 67 mutex_unlock(&phy->pib_lock);
68 kfree(buf);
68 return genlmsg_end(msg, hdr); 69 return genlmsg_end(msg, hdr);
69 70
70nla_put_failure: 71nla_put_failure:
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 83673d23d4dd..0600f0fbe325 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -676,6 +676,7 @@ int inet_accept(struct socket *sock, struct socket *newsock, int flags)
676 676
677 lock_sock(sk2); 677 lock_sock(sk2);
678 678
679 sock_rps_record_flow(sk2);
679 WARN_ON(!((1 << sk2->sk_state) & 680 WARN_ON(!((1 << sk2->sk_state) &
680 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE))); 681 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_CLOSE)));
681 682
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 5ff47656fced..389a2e6a17fd 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -437,7 +437,7 @@ static int valid_cc(const void *bc, int len, int cc)
437 return 0; 437 return 0;
438 if (cc == len) 438 if (cc == len)
439 return 1; 439 return 1;
440 if (op->yes < 4) 440 if (op->yes < 4 || op->yes & 3)
441 return 0; 441 return 0;
442 len -= op->yes; 442 len -= op->yes;
443 bc += op->yes; 443 bc += op->yes;
@@ -447,11 +447,11 @@ static int valid_cc(const void *bc, int len, int cc)
447 447
448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len) 448static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
449{ 449{
450 const unsigned char *bc = bytecode; 450 const void *bc = bytecode;
451 int len = bytecode_len; 451 int len = bytecode_len;
452 452
453 while (len > 0) { 453 while (len > 0) {
454 struct inet_diag_bc_op *op = (struct inet_diag_bc_op *)bc; 454 const struct inet_diag_bc_op *op = bc;
455 455
456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len); 456//printk("BC: %d %d %d {%d} / %d\n", op->code, op->yes, op->no, op[1].no, len);
457 switch (op->code) { 457 switch (op->code) {
@@ -462,22 +462,20 @@ static int inet_diag_bc_audit(const void *bytecode, int bytecode_len)
462 case INET_DIAG_BC_S_LE: 462 case INET_DIAG_BC_S_LE:
463 case INET_DIAG_BC_D_GE: 463 case INET_DIAG_BC_D_GE:
464 case INET_DIAG_BC_D_LE: 464 case INET_DIAG_BC_D_LE:
465 if (op->yes < 4 || op->yes > len + 4)
466 return -EINVAL;
467 case INET_DIAG_BC_JMP: 465 case INET_DIAG_BC_JMP:
468 if (op->no < 4 || op->no > len + 4) 466 if (op->no < 4 || op->no > len + 4 || op->no & 3)
469 return -EINVAL; 467 return -EINVAL;
470 if (op->no < len && 468 if (op->no < len &&
471 !valid_cc(bytecode, bytecode_len, len - op->no)) 469 !valid_cc(bytecode, bytecode_len, len - op->no))
472 return -EINVAL; 470 return -EINVAL;
473 break; 471 break;
474 case INET_DIAG_BC_NOP: 472 case INET_DIAG_BC_NOP:
475 if (op->yes < 4 || op->yes > len + 4)
476 return -EINVAL;
477 break; 473 break;
478 default: 474 default:
479 return -EINVAL; 475 return -EINVAL;
480 } 476 }
477 if (op->yes < 4 || op->yes > len + 4 || op->yes & 3)
478 return -EINVAL;
481 bc += op->yes; 479 bc += op->yes;
482 len -= op->yes; 480 len -= op->yes;
483 } 481 }
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 98af3697c718..a8024eaa0e87 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -799,7 +799,9 @@ static int __ip_append_data(struct sock *sk,
799 int csummode = CHECKSUM_NONE; 799 int csummode = CHECKSUM_NONE;
800 struct rtable *rt = (struct rtable *)cork->dst; 800 struct rtable *rt = (struct rtable *)cork->dst;
801 801
802 exthdrlen = transhdrlen ? rt->dst.header_len : 0; 802 skb = skb_peek_tail(queue);
803
804 exthdrlen = !skb ? rt->dst.header_len : 0;
803 length += exthdrlen; 805 length += exthdrlen;
804 transhdrlen += exthdrlen; 806 transhdrlen += exthdrlen;
805 mtu = cork->fragsize; 807 mtu = cork->fragsize;
@@ -825,8 +827,6 @@ static int __ip_append_data(struct sock *sk,
825 !exthdrlen) 827 !exthdrlen)
826 csummode = CHECKSUM_PARTIAL; 828 csummode = CHECKSUM_PARTIAL;
827 829
828 skb = skb_peek_tail(queue);
829
830 cork->length += length; 830 cork->length += length;
831 if (((length > mtu) || (skb && skb_is_gso(skb))) && 831 if (((length > mtu) || (skb && skb_is_gso(skb))) &&
832 (sk->sk_protocol == IPPROTO_UDP) && 832 (sk->sk_protocol == IPPROTO_UDP) &&
diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
index d2c1311cb28d..5c9b9d963918 100644
--- a/net/ipv4/netfilter/ip_queue.c
+++ b/net/ipv4/netfilter/ip_queue.c
@@ -203,7 +203,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
203 else 203 else
204 pmsg->outdev_name[0] = '\0'; 204 pmsg->outdev_name[0] = '\0';
205 205
206 if (entry->indev && entry->skb->dev) { 206 if (entry->indev && entry->skb->dev &&
207 entry->skb->mac_header != entry->skb->network_header) {
207 pmsg->hw_type = entry->skb->dev->type; 208 pmsg->hw_type = entry->skb->dev->type;
208 pmsg->hw_addrlen = dev_parse_header(entry->skb, 209 pmsg->hw_addrlen = dev_parse_header(entry->skb,
209 pmsg->hw_addr); 210 pmsg->hw_addr);
@@ -402,7 +403,8 @@ ipq_dev_drop(int ifindex)
402static inline void 403static inline void
403__ipq_rcv_skb(struct sk_buff *skb) 404__ipq_rcv_skb(struct sk_buff *skb)
404{ 405{
405 int status, type, pid, flags, nlmsglen, skblen; 406 int status, type, pid, flags;
407 unsigned int nlmsglen, skblen;
406 struct nlmsghdr *nlh; 408 struct nlmsghdr *nlh;
407 409
408 skblen = skb->len; 410 skblen = skb->len;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index 764743843503..24e556e83a3b 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -566,7 +566,7 @@ check_entry(const struct ipt_entry *e, const char *name)
566 const struct xt_entry_target *t; 566 const struct xt_entry_target *t;
567 567
568 if (!ip_checkentry(&e->ip)) { 568 if (!ip_checkentry(&e->ip)) {
569 duprintf("ip check failed %p %s.\n", e, par->match->name); 569 duprintf("ip check failed %p %s.\n", e, name);
570 return -EINVAL; 570 return -EINVAL;
571 } 571 }
572 572
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index d609ac3cb9a4..5c9e97c79017 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -307,7 +307,7 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
307 * error messages (RELATED) and information requests (see below) */ 307 * error messages (RELATED) and information requests (see below) */
308 if (ip_hdr(skb)->protocol == IPPROTO_ICMP && 308 if (ip_hdr(skb)->protocol == IPPROTO_ICMP &&
309 (ctinfo == IP_CT_RELATED || 309 (ctinfo == IP_CT_RELATED ||
310 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)) 310 ctinfo == IP_CT_RELATED_REPLY))
311 return XT_CONTINUE; 311 return XT_CONTINUE;
312 312
313 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO, 313 /* ip_conntrack_icmp guarantees us that we only have ICMP_ECHO,
@@ -321,12 +321,12 @@ clusterip_tg(struct sk_buff *skb, const struct xt_action_param *par)
321 ct->mark = hash; 321 ct->mark = hash;
322 break; 322 break;
323 case IP_CT_RELATED: 323 case IP_CT_RELATED:
324 case IP_CT_RELATED+IP_CT_IS_REPLY: 324 case IP_CT_RELATED_REPLY:
325 /* FIXME: we don't handle expectations at the 325 /* FIXME: we don't handle expectations at the
326 * moment. they can arrive on a different node than 326 * moment. they can arrive on a different node than
327 * the master connection (e.g. FTP passive mode) */ 327 * the master connection (e.g. FTP passive mode) */
328 case IP_CT_ESTABLISHED: 328 case IP_CT_ESTABLISHED:
329 case IP_CT_ESTABLISHED+IP_CT_IS_REPLY: 329 case IP_CT_ESTABLISHED_REPLY:
330 break; 330 break;
331 default: 331 default:
332 break; 332 break;
diff --git a/net/ipv4/netfilter/ipt_MASQUERADE.c b/net/ipv4/netfilter/ipt_MASQUERADE.c
index d2ed9dc74ebc..9931152a78b5 100644
--- a/net/ipv4/netfilter/ipt_MASQUERADE.c
+++ b/net/ipv4/netfilter/ipt_MASQUERADE.c
@@ -60,7 +60,7 @@ masquerade_tg(struct sk_buff *skb, const struct xt_action_param *par)
60 nat = nfct_nat(ct); 60 nat = nfct_nat(ct);
61 61
62 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 62 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
63 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 63 ctinfo == IP_CT_RELATED_REPLY));
64 64
65 /* Source address is 0.0.0.0 - locally generated packet that is 65 /* Source address is 0.0.0.0 - locally generated packet that is
66 * probably not supposed to be masqueraded. 66 * probably not supposed to be masqueraded.
diff --git a/net/ipv4/netfilter/ipt_ecn.c b/net/ipv4/netfilter/ipt_ecn.c
index af6e9c778345..2b57e52c746c 100644
--- a/net/ipv4/netfilter/ipt_ecn.c
+++ b/net/ipv4/netfilter/ipt_ecn.c
@@ -25,7 +25,8 @@ MODULE_LICENSE("GPL");
25static inline bool match_ip(const struct sk_buff *skb, 25static inline bool match_ip(const struct sk_buff *skb,
26 const struct ipt_ecn_info *einfo) 26 const struct ipt_ecn_info *einfo)
27{ 27{
28 return (ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect; 28 return ((ip_hdr(skb)->tos & IPT_ECN_IP_MASK) == einfo->ip_ect) ^
29 !!(einfo->invert & IPT_ECN_OP_MATCH_IP);
29} 30}
30 31
31static inline bool match_tcp(const struct sk_buff *skb, 32static inline bool match_tcp(const struct sk_buff *skb,
@@ -76,8 +77,6 @@ static bool ecn_mt(const struct sk_buff *skb, struct xt_action_param *par)
76 return false; 77 return false;
77 78
78 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) { 79 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR)) {
79 if (ip_hdr(skb)->protocol != IPPROTO_TCP)
80 return false;
81 if (!match_tcp(skb, info, &par->hotdrop)) 80 if (!match_tcp(skb, info, &par->hotdrop))
82 return false; 81 return false;
83 } 82 }
@@ -97,7 +96,7 @@ static int ecn_mt_check(const struct xt_mtchk_param *par)
97 return -EINVAL; 96 return -EINVAL;
98 97
99 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) && 98 if (info->operation & (IPT_ECN_OP_MATCH_ECE|IPT_ECN_OP_MATCH_CWR) &&
100 ip->proto != IPPROTO_TCP) { 99 (ip->proto != IPPROTO_TCP || ip->invflags & IPT_INV_PROTO)) {
101 pr_info("cannot match TCP bits in rule for non-tcp packets\n"); 100 pr_info("cannot match TCP bits in rule for non-tcp packets\n");
102 return -EINVAL; 101 return -EINVAL;
103 } 102 }
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
index 5a03c02af999..de9da21113a1 100644
--- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -101,7 +101,7 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
101 101
102 /* This is where we call the helper: as the packet goes out. */ 102 /* This is where we call the helper: as the packet goes out. */
103 ct = nf_ct_get(skb, &ctinfo); 103 ct = nf_ct_get(skb, &ctinfo);
104 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) 104 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
105 goto out; 105 goto out;
106 106
107 help = nfct_help(ct); 107 help = nfct_help(ct);
@@ -121,7 +121,9 @@ static unsigned int ipv4_confirm(unsigned int hooknum,
121 return ret; 121 return ret;
122 } 122 }
123 123
124 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) { 124 /* adjust seqs for loopback traffic only in outgoing direction */
125 if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
126 !nf_is_loopback_packet(skb)) {
125 typeof(nf_nat_seq_adjust_hook) seq_adjust; 127 typeof(nf_nat_seq_adjust_hook) seq_adjust;
126 128
127 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook); 129 seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
index 7404bde95994..ab5b27a2916f 100644
--- a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -160,7 +160,7 @@ icmp_error_message(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb,
160 /* Update skb to refer to this connection */ 160 /* Update skb to refer to this connection */
161 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 161 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
162 skb->nfctinfo = *ctinfo; 162 skb->nfctinfo = *ctinfo;
163 return -NF_ACCEPT; 163 return NF_ACCEPT;
164} 164}
165 165
166/* Small and modified version of icmp_rcv */ 166/* Small and modified version of icmp_rcv */
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 9c71b2755ce3..3346de5d94d0 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -433,7 +433,7 @@ int nf_nat_icmp_reply_translation(struct nf_conn *ct,
433 433
434 /* Must be RELATED */ 434 /* Must be RELATED */
435 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED || 435 NF_CT_ASSERT(skb->nfctinfo == IP_CT_RELATED ||
436 skb->nfctinfo == IP_CT_RELATED+IP_CT_IS_REPLY); 436 skb->nfctinfo == IP_CT_RELATED_REPLY);
437 437
438 /* Redirects on non-null nats must be dropped, else they'll 438 /* Redirects on non-null nats must be dropped, else they'll
439 start talking to each other without our translation, and be 439 start talking to each other without our translation, and be
diff --git a/net/ipv4/netfilter/nf_nat_helper.c b/net/ipv4/netfilter/nf_nat_helper.c
index 99cfa28b6d38..ebc5f8894f99 100644
--- a/net/ipv4/netfilter/nf_nat_helper.c
+++ b/net/ipv4/netfilter/nf_nat_helper.c
@@ -160,7 +160,7 @@ static void nf_nat_csum(struct sk_buff *skb, const struct iphdr *iph, void *data
160 160
161 if (skb->ip_summed != CHECKSUM_PARTIAL) { 161 if (skb->ip_summed != CHECKSUM_PARTIAL) {
162 if (!(rt->rt_flags & RTCF_LOCAL) && 162 if (!(rt->rt_flags & RTCF_LOCAL) &&
163 skb->dev->features & NETIF_F_V4_CSUM) { 163 (!skb->dev || skb->dev->features & NETIF_F_V4_CSUM)) {
164 skb->ip_summed = CHECKSUM_PARTIAL; 164 skb->ip_summed = CHECKSUM_PARTIAL;
165 skb->csum_start = skb_headroom(skb) + 165 skb->csum_start = skb_headroom(skb) +
166 skb_network_offset(skb) + 166 skb_network_offset(skb) +
diff --git a/net/ipv4/netfilter/nf_nat_rule.c b/net/ipv4/netfilter/nf_nat_rule.c
index 21c30426480b..733c9abc1cbd 100644
--- a/net/ipv4/netfilter/nf_nat_rule.c
+++ b/net/ipv4/netfilter/nf_nat_rule.c
@@ -53,7 +53,7 @@ ipt_snat_target(struct sk_buff *skb, const struct xt_action_param *par)
53 53
54 /* Connection must be valid and new. */ 54 /* Connection must be valid and new. */
55 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED || 55 NF_CT_ASSERT(ct && (ctinfo == IP_CT_NEW || ctinfo == IP_CT_RELATED ||
56 ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)); 56 ctinfo == IP_CT_RELATED_REPLY));
57 NF_CT_ASSERT(par->out != NULL); 57 NF_CT_ASSERT(par->out != NULL);
58 58
59 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC); 59 return nf_nat_setup_info(ct, &mr->range[0], IP_NAT_MANIP_SRC);
diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c
index 7317bdf1d457..483b76d042da 100644
--- a/net/ipv4/netfilter/nf_nat_standalone.c
+++ b/net/ipv4/netfilter/nf_nat_standalone.c
@@ -116,7 +116,7 @@ nf_nat_fn(unsigned int hooknum,
116 116
117 switch (ctinfo) { 117 switch (ctinfo) {
118 case IP_CT_RELATED: 118 case IP_CT_RELATED:
119 case IP_CT_RELATED+IP_CT_IS_REPLY: 119 case IP_CT_RELATED_REPLY:
120 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) { 120 if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
121 if (!nf_nat_icmp_reply_translation(ct, ctinfo, 121 if (!nf_nat_icmp_reply_translation(ct, ctinfo,
122 hooknum, skb)) 122 hooknum, skb))
@@ -144,7 +144,7 @@ nf_nat_fn(unsigned int hooknum,
144 default: 144 default:
145 /* ESTABLISHED */ 145 /* ESTABLISHED */
146 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED || 146 NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
147 ctinfo == (IP_CT_ESTABLISHED+IP_CT_IS_REPLY)); 147 ctinfo == IP_CT_ESTABLISHED_REPLY);
148 } 148 }
149 149
150 return nf_nat_packet(ct, ctinfo, hooknum, skb); 150 return nf_nat_packet(ct, ctinfo, hooknum, skb);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 9aaa67165f42..39b403f854c6 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -41,7 +41,6 @@
41#include <linux/proc_fs.h> 41#include <linux/proc_fs.h>
42#include <net/sock.h> 42#include <net/sock.h>
43#include <net/ping.h> 43#include <net/ping.h>
44#include <net/icmp.h>
45#include <net/udp.h> 44#include <net/udp.h>
46#include <net/route.h> 45#include <net/route.h>
47#include <net/inet_common.h> 46#include <net/inet_common.h>
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index aa29c6291353..f24c3359e5d0 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1316,6 +1316,23 @@ reject_redirect:
1316 ; 1316 ;
1317} 1317}
1318 1318
1319static bool peer_pmtu_expired(struct inet_peer *peer)
1320{
1321 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1322
1323 return orig &&
1324 time_after_eq(jiffies, orig) &&
1325 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1326}
1327
1328static bool peer_pmtu_cleaned(struct inet_peer *peer)
1329{
1330 unsigned long orig = ACCESS_ONCE(peer->pmtu_expires);
1331
1332 return orig &&
1333 cmpxchg(&peer->pmtu_expires, orig, 0) == orig;
1334}
1335
1319static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) 1336static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1320{ 1337{
1321 struct rtable *rt = (struct rtable *)dst; 1338 struct rtable *rt = (struct rtable *)dst;
@@ -1331,14 +1348,8 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1331 rt_genid(dev_net(dst->dev))); 1348 rt_genid(dev_net(dst->dev)));
1332 rt_del(hash, rt); 1349 rt_del(hash, rt);
1333 ret = NULL; 1350 ret = NULL;
1334 } else if (rt->peer && 1351 } else if (rt->peer && peer_pmtu_expired(rt->peer)) {
1335 rt->peer->pmtu_expires && 1352 dst_metric_set(dst, RTAX_MTU, rt->peer->pmtu_orig);
1336 time_after_eq(jiffies, rt->peer->pmtu_expires)) {
1337 unsigned long orig = rt->peer->pmtu_expires;
1338
1339 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1340 dst_metric_set(dst, RTAX_MTU,
1341 rt->peer->pmtu_orig);
1342 } 1353 }
1343 } 1354 }
1344 return ret; 1355 return ret;
@@ -1531,8 +1542,10 @@ unsigned short ip_rt_frag_needed(struct net *net, const struct iphdr *iph,
1531 1542
1532static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer) 1543static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer)
1533{ 1544{
1534 unsigned long expires = peer->pmtu_expires; 1545 unsigned long expires = ACCESS_ONCE(peer->pmtu_expires);
1535 1546
1547 if (!expires)
1548 return;
1536 if (time_before(jiffies, expires)) { 1549 if (time_before(jiffies, expires)) {
1537 u32 orig_dst_mtu = dst_mtu(dst); 1550 u32 orig_dst_mtu = dst_mtu(dst);
1538 if (peer->pmtu_learned < orig_dst_mtu) { 1551 if (peer->pmtu_learned < orig_dst_mtu) {
@@ -1555,10 +1568,11 @@ static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu)
1555 rt_bind_peer(rt, rt->rt_dst, 1); 1568 rt_bind_peer(rt, rt->rt_dst, 1);
1556 peer = rt->peer; 1569 peer = rt->peer;
1557 if (peer) { 1570 if (peer) {
1571 unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires);
1572
1558 if (mtu < ip_rt_min_pmtu) 1573 if (mtu < ip_rt_min_pmtu)
1559 mtu = ip_rt_min_pmtu; 1574 mtu = ip_rt_min_pmtu;
1560 if (!peer->pmtu_expires || mtu < peer->pmtu_learned) { 1575 if (!pmtu_expires || mtu < peer->pmtu_learned) {
1561 unsigned long pmtu_expires;
1562 1576
1563 pmtu_expires = jiffies + ip_rt_mtu_expires; 1577 pmtu_expires = jiffies + ip_rt_mtu_expires;
1564 if (!pmtu_expires) 1578 if (!pmtu_expires)
@@ -1612,13 +1626,14 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1612 rt_bind_peer(rt, rt->rt_dst, 0); 1626 rt_bind_peer(rt, rt->rt_dst, 0);
1613 1627
1614 peer = rt->peer; 1628 peer = rt->peer;
1615 if (peer && peer->pmtu_expires) 1629 if (peer) {
1616 check_peer_pmtu(dst, peer); 1630 check_peer_pmtu(dst, peer);
1617 1631
1618 if (peer && peer->redirect_learned.a4 && 1632 if (peer->redirect_learned.a4 &&
1619 peer->redirect_learned.a4 != rt->rt_gateway) { 1633 peer->redirect_learned.a4 != rt->rt_gateway) {
1620 if (check_peer_redir(dst, peer)) 1634 if (check_peer_redir(dst, peer))
1621 return NULL; 1635 return NULL;
1636 }
1622 } 1637 }
1623 1638
1624 rt->rt_peer_genid = rt_peer_genid(); 1639 rt->rt_peer_genid = rt_peer_genid();
@@ -1649,14 +1664,8 @@ static void ipv4_link_failure(struct sk_buff *skb)
1649 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); 1664 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1650 1665
1651 rt = skb_rtable(skb); 1666 rt = skb_rtable(skb);
1652 if (rt && 1667 if (rt && rt->peer && peer_pmtu_cleaned(rt->peer))
1653 rt->peer && 1668 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1654 rt->peer->pmtu_expires) {
1655 unsigned long orig = rt->peer->pmtu_expires;
1656
1657 if (cmpxchg(&rt->peer->pmtu_expires, orig, 0) == orig)
1658 dst_metric_set(&rt->dst, RTAX_MTU, rt->peer->pmtu_orig);
1659 }
1660} 1669}
1661 1670
1662static int ip_rt_bug(struct sk_buff *skb) 1671static int ip_rt_bug(struct sk_buff *skb)
@@ -1770,8 +1779,7 @@ static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4,
1770 sizeof(u32) * RTAX_MAX); 1779 sizeof(u32) * RTAX_MAX);
1771 dst_init_metrics(&rt->dst, peer->metrics, false); 1780 dst_init_metrics(&rt->dst, peer->metrics, false);
1772 1781
1773 if (peer->pmtu_expires) 1782 check_peer_pmtu(&rt->dst, peer);
1774 check_peer_pmtu(&rt->dst, peer);
1775 if (peer->redirect_learned.a4 && 1783 if (peer->redirect_learned.a4 &&
1776 peer->redirect_learned.a4 != rt->rt_gateway) { 1784 peer->redirect_learned.a4 != rt->rt_gateway) {
1777 rt->rt_gateway = peer->redirect_learned.a4; 1785 rt->rt_gateway = peer->redirect_learned.a4;
@@ -1894,9 +1902,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1894 1902
1895 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev))); 1903 hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
1896 rth = rt_intern_hash(hash, rth, skb, dev->ifindex); 1904 rth = rt_intern_hash(hash, rth, skb, dev->ifindex);
1897 err = 0; 1905 return IS_ERR(rth) ? PTR_ERR(rth) : 0;
1898 if (IS_ERR(rth))
1899 err = PTR_ERR(rth);
1900 1906
1901e_nobufs: 1907e_nobufs:
1902 return -ENOBUFS; 1908 return -ENOBUFS;
@@ -2775,7 +2781,8 @@ static int rt_fill_info(struct net *net,
2775 struct rtable *rt = skb_rtable(skb); 2781 struct rtable *rt = skb_rtable(skb);
2776 struct rtmsg *r; 2782 struct rtmsg *r;
2777 struct nlmsghdr *nlh; 2783 struct nlmsghdr *nlh;
2778 long expires; 2784 long expires = 0;
2785 const struct inet_peer *peer = rt->peer;
2779 u32 id = 0, ts = 0, tsage = 0, error; 2786 u32 id = 0, ts = 0, tsage = 0, error;
2780 2787
2781 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); 2788 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
@@ -2823,15 +2830,16 @@ static int rt_fill_info(struct net *net,
2823 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark); 2830 NLA_PUT_BE32(skb, RTA_MARK, rt->rt_mark);
2824 2831
2825 error = rt->dst.error; 2832 error = rt->dst.error;
2826 expires = (rt->peer && rt->peer->pmtu_expires) ? 2833 if (peer) {
2827 rt->peer->pmtu_expires - jiffies : 0;
2828 if (rt->peer) {
2829 inet_peer_refcheck(rt->peer); 2834 inet_peer_refcheck(rt->peer);
2830 id = atomic_read(&rt->peer->ip_id_count) & 0xffff; 2835 id = atomic_read(&peer->ip_id_count) & 0xffff;
2831 if (rt->peer->tcp_ts_stamp) { 2836 if (peer->tcp_ts_stamp) {
2832 ts = rt->peer->tcp_ts; 2837 ts = peer->tcp_ts;
2833 tsage = get_seconds() - rt->peer->tcp_ts_stamp; 2838 tsage = get_seconds() - peer->tcp_ts_stamp;
2834 } 2839 }
2840 expires = ACCESS_ONCE(peer->pmtu_expires);
2841 if (expires)
2842 expires -= jiffies;
2835 } 2843 }
2836 2844
2837 if (rt_is_input_route(rt)) { 2845 if (rt_is_input_route(rt)) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 617dee3ccfb1..955b8e65b69e 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1594,6 +1594,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1594 goto discard; 1594 goto discard;
1595 1595
1596 if (nsk != sk) { 1596 if (nsk != sk) {
1597 sock_rps_save_rxhash(nsk, skb->rxhash);
1597 if (tcp_child_process(sk, nsk, skb)) { 1598 if (tcp_child_process(sk, nsk, skb)) {
1598 rsk = nsk; 1599 rsk = nsk;
1599 goto reset; 1600 goto reset;
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index b7919f901fbf..d450a2f9fc06 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -272,6 +272,10 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
272 272
273 if (addr_len < SIN6_LEN_RFC2133) 273 if (addr_len < SIN6_LEN_RFC2133)
274 return -EINVAL; 274 return -EINVAL;
275
276 if (addr->sin6_family != AF_INET6)
277 return -EINVAL;
278
275 addr_type = ipv6_addr_type(&addr->sin6_addr); 279 addr_type = ipv6_addr_type(&addr->sin6_addr);
276 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM) 280 if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
277 return -EINVAL; 281 return -EINVAL;
diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
index 413ab0754e1f..249394863284 100644
--- a/net/ipv6/netfilter/ip6_queue.c
+++ b/net/ipv6/netfilter/ip6_queue.c
@@ -204,7 +204,8 @@ ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
204 else 204 else
205 pmsg->outdev_name[0] = '\0'; 205 pmsg->outdev_name[0] = '\0';
206 206
207 if (entry->indev && entry->skb->dev) { 207 if (entry->indev && entry->skb->dev &&
208 entry->skb->mac_header != entry->skb->network_header) {
208 pmsg->hw_type = entry->skb->dev->type; 209 pmsg->hw_type = entry->skb->dev->type;
209 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr); 210 pmsg->hw_addrlen = dev_parse_header(entry->skb, pmsg->hw_addr);
210 } 211 }
@@ -403,7 +404,8 @@ ipq_dev_drop(int ifindex)
403static inline void 404static inline void
404__ipq_rcv_skb(struct sk_buff *skb) 405__ipq_rcv_skb(struct sk_buff *skb)
405{ 406{
406 int status, type, pid, flags, nlmsglen, skblen; 407 int status, type, pid, flags;
408 unsigned int nlmsglen, skblen;
407 struct nlmsghdr *nlh; 409 struct nlmsghdr *nlh;
408 410
409 skblen = skb->len; 411 skblen = skb->len;
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index c8af58b22562..4111050a9fc5 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -160,7 +160,7 @@ static unsigned int ipv6_confirm(unsigned int hooknum,
160 160
161 /* This is where we call the helper: as the packet goes out. */ 161 /* This is where we call the helper: as the packet goes out. */
162 ct = nf_ct_get(skb, &ctinfo); 162 ct = nf_ct_get(skb, &ctinfo);
163 if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY) 163 if (!ct || ctinfo == IP_CT_RELATED_REPLY)
164 goto out; 164 goto out;
165 165
166 help = nfct_help(ct); 166 help = nfct_help(ct);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
index 1df3c8b6bf47..7c05e7eacbc6 100644
--- a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -177,7 +177,7 @@ icmpv6_error_message(struct net *net, struct nf_conn *tmpl,
177 /* Update skb to refer to this connection */ 177 /* Update skb to refer to this connection */
178 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general; 178 skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
179 skb->nfctinfo = *ctinfo; 179 skb->nfctinfo = *ctinfo;
180 return -NF_ACCEPT; 180 return NF_ACCEPT;
181} 181}
182 182
183static int 183static int
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index a1ef61a889c3..78aa53492b3e 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1649,6 +1649,7 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1649 * the new socket.. 1649 * the new socket..
1650 */ 1650 */
1651 if(nsk != sk) { 1651 if(nsk != sk) {
1652 sock_rps_save_rxhash(nsk, skb->rxhash);
1652 if (tcp_child_process(sk, nsk, skb)) 1653 if (tcp_child_process(sk, nsk, skb))
1653 goto reset; 1654 goto reset;
1654 if (opt_skb) 1655 if (opt_skb)
diff --git a/net/irda/iriap.c b/net/irda/iriap.c
index dfc7b47d48fe..e71e85ba2bf1 100644
--- a/net/irda/iriap.c
+++ b/net/irda/iriap.c
@@ -87,6 +87,8 @@ static inline void iriap_start_watchdog_timer(struct iriap_cb *self,
87 iriap_watchdog_timer_expired); 87 iriap_watchdog_timer_expired);
88} 88}
89 89
90static struct lock_class_key irias_objects_key;
91
90/* 92/*
91 * Function iriap_init (void) 93 * Function iriap_init (void)
92 * 94 *
@@ -114,6 +116,9 @@ int __init iriap_init(void)
114 return -ENOMEM; 116 return -ENOMEM;
115 } 117 }
116 118
119 lockdep_set_class_and_name(&irias_objects->hb_spinlock, &irias_objects_key,
120 "irias_objects");
121
117 /* 122 /*
118 * Register some default services for IrLMP 123 * Register some default services for IrLMP
119 */ 124 */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index b8dbae82fab8..76130134bfa6 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -258,7 +258,7 @@ static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
258 */ 258 */
259 pd->net = get_net_ns_by_pid(current->pid); 259 pd->net = get_net_ns_by_pid(current->pid);
260 if (IS_ERR(pd->net)) { 260 if (IS_ERR(pd->net)) {
261 rc = -PTR_ERR(pd->net); 261 rc = PTR_ERR(pd->net);
262 goto err_free_pd; 262 goto err_free_pd;
263 } 263 }
264 264
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 421eaa6b0c2b..56c24cabf26d 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -965,6 +965,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
965 965
966 mutex_lock(&sdata->u.ibss.mtx); 966 mutex_lock(&sdata->u.ibss.mtx);
967 967
968 sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
969 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
970 sdata->u.ibss.ssid_len = 0;
971
968 active_ibss = ieee80211_sta_active_ibss(sdata); 972 active_ibss = ieee80211_sta_active_ibss(sdata);
969 973
970 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { 974 if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -999,8 +1003,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
999 kfree_skb(skb); 1003 kfree_skb(skb);
1000 1004
1001 skb_queue_purge(&sdata->skb_queue); 1005 skb_queue_purge(&sdata->skb_queue);
1002 memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
1003 sdata->u.ibss.ssid_len = 0;
1004 1006
1005 del_timer_sync(&sdata->u.ibss.timer); 1007 del_timer_sync(&sdata->u.ibss.timer);
1006 1008
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 456cccf26b51..d595265d6c22 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -232,9 +232,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type)); 232 WARN_ON(!ieee80211_set_channel_type(local, sdata, channel_type));
233 } 233 }
234 234
235 ieee80211_stop_queues_by_reason(&sdata->local->hw,
236 IEEE80211_QUEUE_STOP_REASON_CSA);
237
238 /* channel_type change automatically detected */ 235 /* channel_type change automatically detected */
239 ieee80211_hw_config(local, 0); 236 ieee80211_hw_config(local, 0);
240 237
@@ -248,9 +245,6 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
248 rcu_read_unlock(); 245 rcu_read_unlock();
249 } 246 }
250 247
251 ieee80211_wake_queues_by_reason(&sdata->local->hw,
252 IEEE80211_QUEUE_STOP_REASON_CSA);
253
254 ht_opmode = le16_to_cpu(hti->operation_mode); 248 ht_opmode = le16_to_cpu(hti->operation_mode);
255 249
256 /* if bss configuration changed store the new one */ 250 /* if bss configuration changed store the new one */
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 77c61b0b6d68..12571fb2881c 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -776,8 +776,16 @@ static void ip_vs_conn_expire(unsigned long data)
776 if (cp->control) 776 if (cp->control)
777 ip_vs_control_del(cp); 777 ip_vs_control_del(cp);
778 778
779 if (cp->flags & IP_VS_CONN_F_NFCT) 779 if (cp->flags & IP_VS_CONN_F_NFCT) {
780 ip_vs_conn_drop_conntrack(cp); 780 ip_vs_conn_drop_conntrack(cp);
781 /* Do not access conntracks during subsys cleanup
782 * because nf_conntrack_find_get can not be used after
783 * conntrack cleanup for the net.
784 */
785 smp_rmb();
786 if (ipvs->enable)
787 ip_vs_conn_drop_conntrack(cp);
788 }
781 789
782 ip_vs_pe_put(cp->pe); 790 ip_vs_pe_put(cp->pe);
783 kfree(cp->pe_data); 791 kfree(cp->pe_data);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 2200bae1d4dd..e33d48cae9fd 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1772,7 +1772,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1772 .owner = THIS_MODULE, 1772 .owner = THIS_MODULE,
1773 .pf = PF_INET, 1773 .pf = PF_INET,
1774 .hooknum = NF_INET_LOCAL_IN, 1774 .hooknum = NF_INET_LOCAL_IN,
1775 .priority = 99, 1775 .priority = NF_IP_PRI_NAT_SRC - 2,
1776 }, 1776 },
1777 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1777 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1778 * or VS/NAT(change destination), so that filtering rules can be 1778 * or VS/NAT(change destination), so that filtering rules can be
@@ -1782,7 +1782,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1782 .owner = THIS_MODULE, 1782 .owner = THIS_MODULE,
1783 .pf = PF_INET, 1783 .pf = PF_INET,
1784 .hooknum = NF_INET_LOCAL_IN, 1784 .hooknum = NF_INET_LOCAL_IN,
1785 .priority = 101, 1785 .priority = NF_IP_PRI_NAT_SRC - 1,
1786 }, 1786 },
1787 /* Before ip_vs_in, change source only for VS/NAT */ 1787 /* Before ip_vs_in, change source only for VS/NAT */
1788 { 1788 {
@@ -1790,7 +1790,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1790 .owner = THIS_MODULE, 1790 .owner = THIS_MODULE,
1791 .pf = PF_INET, 1791 .pf = PF_INET,
1792 .hooknum = NF_INET_LOCAL_OUT, 1792 .hooknum = NF_INET_LOCAL_OUT,
1793 .priority = -99, 1793 .priority = NF_IP_PRI_NAT_DST + 1,
1794 }, 1794 },
1795 /* After mangle, schedule and forward local requests */ 1795 /* After mangle, schedule and forward local requests */
1796 { 1796 {
@@ -1798,7 +1798,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1798 .owner = THIS_MODULE, 1798 .owner = THIS_MODULE,
1799 .pf = PF_INET, 1799 .pf = PF_INET,
1800 .hooknum = NF_INET_LOCAL_OUT, 1800 .hooknum = NF_INET_LOCAL_OUT,
1801 .priority = -98, 1801 .priority = NF_IP_PRI_NAT_DST + 2,
1802 }, 1802 },
1803 /* After packet filtering (but before ip_vs_out_icmp), catch icmp 1803 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1804 * destined for 0.0.0.0/0, which is for incoming IPVS connections */ 1804 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1824,7 +1824,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1824 .owner = THIS_MODULE, 1824 .owner = THIS_MODULE,
1825 .pf = PF_INET6, 1825 .pf = PF_INET6,
1826 .hooknum = NF_INET_LOCAL_IN, 1826 .hooknum = NF_INET_LOCAL_IN,
1827 .priority = 99, 1827 .priority = NF_IP6_PRI_NAT_SRC - 2,
1828 }, 1828 },
1829 /* After packet filtering, forward packet through VS/DR, VS/TUN, 1829 /* After packet filtering, forward packet through VS/DR, VS/TUN,
1830 * or VS/NAT(change destination), so that filtering rules can be 1830 * or VS/NAT(change destination), so that filtering rules can be
@@ -1834,7 +1834,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1834 .owner = THIS_MODULE, 1834 .owner = THIS_MODULE,
1835 .pf = PF_INET6, 1835 .pf = PF_INET6,
1836 .hooknum = NF_INET_LOCAL_IN, 1836 .hooknum = NF_INET_LOCAL_IN,
1837 .priority = 101, 1837 .priority = NF_IP6_PRI_NAT_SRC - 1,
1838 }, 1838 },
1839 /* Before ip_vs_in, change source only for VS/NAT */ 1839 /* Before ip_vs_in, change source only for VS/NAT */
1840 { 1840 {
@@ -1842,7 +1842,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1842 .owner = THIS_MODULE, 1842 .owner = THIS_MODULE,
1843 .pf = PF_INET, 1843 .pf = PF_INET,
1844 .hooknum = NF_INET_LOCAL_OUT, 1844 .hooknum = NF_INET_LOCAL_OUT,
1845 .priority = -99, 1845 .priority = NF_IP6_PRI_NAT_DST + 1,
1846 }, 1846 },
1847 /* After mangle, schedule and forward local requests */ 1847 /* After mangle, schedule and forward local requests */
1848 { 1848 {
@@ -1850,7 +1850,7 @@ static struct nf_hook_ops ip_vs_ops[] __read_mostly = {
1850 .owner = THIS_MODULE, 1850 .owner = THIS_MODULE,
1851 .pf = PF_INET6, 1851 .pf = PF_INET6,
1852 .hooknum = NF_INET_LOCAL_OUT, 1852 .hooknum = NF_INET_LOCAL_OUT,
1853 .priority = -98, 1853 .priority = NF_IP6_PRI_NAT_DST + 2,
1854 }, 1854 },
1855 /* After packet filtering (but before ip_vs_out_icmp), catch icmp 1855 /* After packet filtering (but before ip_vs_out_icmp), catch icmp
1856 * destined for 0.0.0.0/0, which is for incoming IPVS connections */ 1856 * destined for 0.0.0.0/0, which is for incoming IPVS connections */
@@ -1945,6 +1945,7 @@ static void __net_exit __ip_vs_dev_cleanup(struct net *net)
1945{ 1945{
1946 EnterFunction(2); 1946 EnterFunction(2);
1947 net_ipvs(net)->enable = 0; /* Disable packet reception */ 1947 net_ipvs(net)->enable = 0; /* Disable packet reception */
1948 smp_wmb();
1948 ip_vs_sync_net_cleanup(net); 1949 ip_vs_sync_net_cleanup(net);
1949 LeaveFunction(2); 1950 LeaveFunction(2);
1950} 1951}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 2e1c11f78419..f7af8b866017 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -850,7 +850,7 @@ resolve_normal_ct(struct net *net, struct nf_conn *tmpl,
850 850
851 /* It exists; we have (non-exclusive) reference. */ 851 /* It exists; we have (non-exclusive) reference. */
852 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) { 852 if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
853 *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY; 853 *ctinfo = IP_CT_ESTABLISHED_REPLY;
854 /* Please set reply bit if this packet OK */ 854 /* Please set reply bit if this packet OK */
855 *set_reply = 1; 855 *set_reply = 1;
856 } else { 856 } else {
@@ -922,6 +922,9 @@ nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
922 ret = -ret; 922 ret = -ret;
923 goto out; 923 goto out;
924 } 924 }
925 /* ICMP[v6] protocol trackers may assign one conntrack. */
926 if (skb->nfct)
927 goto out;
925 } 928 }
926 929
927 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, 930 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum,
@@ -1143,7 +1146,7 @@ static void nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
1143 /* This ICMP is in reverse direction to the packet which caused it */ 1146 /* This ICMP is in reverse direction to the packet which caused it */
1144 ct = nf_ct_get(skb, &ctinfo); 1147 ct = nf_ct_get(skb, &ctinfo);
1145 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) 1148 if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
1146 ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY; 1149 ctinfo = IP_CT_RELATED_REPLY;
1147 else 1150 else
1148 ctinfo = IP_CT_RELATED; 1151 ctinfo = IP_CT_RELATED;
1149 1152
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
index e17cb7c7dd8f..6f5801eac999 100644
--- a/net/netfilter/nf_conntrack_ftp.c
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -368,7 +368,7 @@ static int help(struct sk_buff *skb,
368 368
369 /* Until there's been traffic both ways, don't look in packets. */ 369 /* Until there's been traffic both ways, don't look in packets. */
370 if (ctinfo != IP_CT_ESTABLISHED && 370 if (ctinfo != IP_CT_ESTABLISHED &&
371 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) { 371 ctinfo != IP_CT_ESTABLISHED_REPLY) {
372 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo); 372 pr_debug("ftp: Conntrackinfo = %u\n", ctinfo);
373 return NF_ACCEPT; 373 return NF_ACCEPT;
374 } 374 }
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 18b2ce5c8ced..f03c2d4539f6 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -571,10 +571,9 @@ static int h245_help(struct sk_buff *skb, unsigned int protoff,
571 int ret; 571 int ret;
572 572
573 /* Until there's been traffic both ways, don't look in packets. */ 573 /* Until there's been traffic both ways, don't look in packets. */
574 if (ctinfo != IP_CT_ESTABLISHED && 574 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
575 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
576 return NF_ACCEPT; 575 return NF_ACCEPT;
577 } 576
578 pr_debug("nf_ct_h245: skblen = %u\n", skb->len); 577 pr_debug("nf_ct_h245: skblen = %u\n", skb->len);
579 578
580 spin_lock_bh(&nf_h323_lock); 579 spin_lock_bh(&nf_h323_lock);
@@ -1125,10 +1124,9 @@ static int q931_help(struct sk_buff *skb, unsigned int protoff,
1125 int ret; 1124 int ret;
1126 1125
1127 /* Until there's been traffic both ways, don't look in packets. */ 1126 /* Until there's been traffic both ways, don't look in packets. */
1128 if (ctinfo != IP_CT_ESTABLISHED && 1127 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
1129 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
1130 return NF_ACCEPT; 1128 return NF_ACCEPT;
1131 } 1129
1132 pr_debug("nf_ct_q931: skblen = %u\n", skb->len); 1130 pr_debug("nf_ct_q931: skblen = %u\n", skb->len);
1133 1131
1134 spin_lock_bh(&nf_h323_lock); 1132 spin_lock_bh(&nf_h323_lock);
diff --git a/net/netfilter/nf_conntrack_irc.c b/net/netfilter/nf_conntrack_irc.c
index b394aa318776..4f9390b98697 100644
--- a/net/netfilter/nf_conntrack_irc.c
+++ b/net/netfilter/nf_conntrack_irc.c
@@ -125,8 +125,7 @@ static int help(struct sk_buff *skb, unsigned int protoff,
125 return NF_ACCEPT; 125 return NF_ACCEPT;
126 126
127 /* Until there's been traffic both ways, don't look in packets. */ 127 /* Until there's been traffic both ways, don't look in packets. */
128 if (ctinfo != IP_CT_ESTABLISHED && 128 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
129 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
130 return NF_ACCEPT; 129 return NF_ACCEPT;
131 130
132 /* Not a full tcp header? */ 131 /* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_pptp.c b/net/netfilter/nf_conntrack_pptp.c
index 088944824e13..2fd4565144de 100644
--- a/net/netfilter/nf_conntrack_pptp.c
+++ b/net/netfilter/nf_conntrack_pptp.c
@@ -519,8 +519,7 @@ conntrack_pptp_help(struct sk_buff *skb, unsigned int protoff,
519 u_int16_t msg; 519 u_int16_t msg;
520 520
521 /* don't do any tracking before tcp handshake complete */ 521 /* don't do any tracking before tcp handshake complete */
522 if (ctinfo != IP_CT_ESTABLISHED && 522 if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
523 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)
524 return NF_ACCEPT; 523 return NF_ACCEPT;
525 524
526 nexthdr_off = protoff; 525 nexthdr_off = protoff;
diff --git a/net/netfilter/nf_conntrack_sane.c b/net/netfilter/nf_conntrack_sane.c
index d9e27734b2a2..8501823b3f9b 100644
--- a/net/netfilter/nf_conntrack_sane.c
+++ b/net/netfilter/nf_conntrack_sane.c
@@ -78,7 +78,7 @@ static int help(struct sk_buff *skb,
78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info; 78 ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
79 /* Until there's been traffic both ways, don't look in packets. */ 79 /* Until there's been traffic both ways, don't look in packets. */
80 if (ctinfo != IP_CT_ESTABLISHED && 80 if (ctinfo != IP_CT_ESTABLISHED &&
81 ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) 81 ctinfo != IP_CT_ESTABLISHED_REPLY)
82 return NF_ACCEPT; 82 return NF_ACCEPT;
83 83
84 /* Not a full tcp header? */ 84 /* Not a full tcp header? */
diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c
index cb5a28581782..93faf6a3a637 100644
--- a/net/netfilter/nf_conntrack_sip.c
+++ b/net/netfilter/nf_conntrack_sip.c
@@ -1423,7 +1423,7 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,
1423 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust; 1423 typeof(nf_nat_sip_seq_adjust_hook) nf_nat_sip_seq_adjust;
1424 1424
1425 if (ctinfo != IP_CT_ESTABLISHED && 1425 if (ctinfo != IP_CT_ESTABLISHED &&
1426 ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) 1426 ctinfo != IP_CT_ESTABLISHED_REPLY)
1427 return NF_ACCEPT; 1427 return NF_ACCEPT;
1428 1428
1429 /* No Data ? */ 1429 /* No Data ? */
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index e0ee010935e7..2e7ccbb43ddb 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -456,7 +456,8 @@ __build_packet_message(struct nfulnl_instance *inst,
456 if (skb->mark) 456 if (skb->mark)
457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark)); 457 NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));
458 458
459 if (indev && skb->dev) { 459 if (indev && skb->dev &&
460 skb->mac_header != skb->network_header) {
460 struct nfulnl_msg_packet_hw phw; 461 struct nfulnl_msg_packet_hw phw;
461 int len = dev_parse_header(skb, phw.hw_addr); 462 int len = dev_parse_header(skb, phw.hw_addr);
462 if (len > 0) { 463 if (len > 0) {
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index b83123f12b42..fdd2fafe0a14 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -335,7 +335,8 @@ nfqnl_build_packet_message(struct nfqnl_instance *queue,
335 if (entskb->mark) 335 if (entskb->mark)
336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark)); 336 NLA_PUT_BE32(skb, NFQA_MARK, htonl(entskb->mark));
337 337
338 if (indev && entskb->dev) { 338 if (indev && entskb->dev &&
339 entskb->mac_header != entskb->network_header) {
339 struct nfqnl_msg_packet_hw phw; 340 struct nfqnl_msg_packet_hw phw;
340 int len = dev_parse_header(entskb, phw.hw_addr); 341 int len = dev_parse_header(entskb, phw.hw_addr);
341 if (len) { 342 if (len) {
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 9cc46356b577..fe39f7e913df 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -143,9 +143,9 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
143 ct = nf_ct_get(skb, &ctinfo); 143 ct = nf_ct_get(skb, &ctinfo);
144 if (ct && !nf_ct_is_untracked(ct) && 144 if (ct && !nf_ct_is_untracked(ct) &&
145 ((iph->protocol != IPPROTO_ICMP && 145 ((iph->protocol != IPPROTO_ICMP &&
146 ctinfo == IP_CT_IS_REPLY + IP_CT_ESTABLISHED) || 146 ctinfo == IP_CT_ESTABLISHED_REPLY) ||
147 (iph->protocol == IPPROTO_ICMP && 147 (iph->protocol == IPPROTO_ICMP &&
148 ctinfo == IP_CT_IS_REPLY + IP_CT_RELATED)) && 148 ctinfo == IP_CT_RELATED_REPLY)) &&
149 (ct->status & IPS_SRC_NAT_DONE)) { 149 (ct->status & IPS_SRC_NAT_DONE)) {
150 150
151 daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip; 151 daddr = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index b54ec41adea9..461b16fa1c52 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -804,6 +804,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
804 } else { 804 } else {
805 h.h2->tp_vlan_tci = 0; 805 h.h2->tp_vlan_tci = 0;
806 } 806 }
807 h.h2->tp_padding = 0;
807 hdrlen = sizeof(*h.h2); 808 hdrlen = sizeof(*h.h2);
808 break; 809 break;
809 default: 810 default:
@@ -1743,6 +1744,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1743 } else { 1744 } else {
1744 aux.tp_vlan_tci = 0; 1745 aux.tp_vlan_tci = 0;
1745 } 1746 }
1747 aux.tp_padding = 0;
1746 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux); 1748 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
1747 } 1749 }
1748 1750
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index b1721d71c27c..b4c680900d7a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -251,9 +251,8 @@ static void dev_watchdog(unsigned long arg)
251 } 251 }
252 252
253 if (some_queue_timedout) { 253 if (some_queue_timedout) {
254 char drivername[64];
255 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", 254 WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
256 dev->name, netdev_drivername(dev, drivername, 64), i); 255 dev->name, netdev_drivername(dev), i);
257 dev->netdev_ops->ndo_tx_timeout(dev); 256 dev->netdev_ops->ndo_tx_timeout(dev);
258 } 257 }
259 if (!mod_timer(&dev->watchdog_timer, 258 if (!mod_timer(&dev->watchdog_timer,
diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c
index 0a9a2ec2e469..c3b75333b821 100644
--- a/net/sunrpc/auth_gss/gss_krb5_mech.c
+++ b/net/sunrpc/auth_gss/gss_krb5_mech.c
@@ -43,6 +43,7 @@
43#include <linux/sunrpc/gss_krb5.h> 43#include <linux/sunrpc/gss_krb5.h>
44#include <linux/sunrpc/xdr.h> 44#include <linux/sunrpc/xdr.h>
45#include <linux/crypto.h> 45#include <linux/crypto.h>
46#include <linux/sunrpc/gss_krb5_enctypes.h>
46 47
47#ifdef RPC_DEBUG 48#ifdef RPC_DEBUG
48# define RPCDBG_FACILITY RPCDBG_AUTH 49# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -750,7 +751,7 @@ static struct gss_api_mech gss_kerberos_mech = {
750 .gm_ops = &gss_kerberos_ops, 751 .gm_ops = &gss_kerberos_ops,
751 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs), 752 .gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
752 .gm_pfs = gss_kerberos_pfs, 753 .gm_pfs = gss_kerberos_pfs,
753 .gm_upcall_enctypes = "18,17,16,23,3,1,2", 754 .gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
754}; 755};
755 756
756static int __init init_kerberos_module(void) 757static int __init init_kerberos_module(void)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 70cbc8ca371e..10823e2b60ce 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -3406,11 +3406,11 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
3406 i = 0; 3406 i = 0;
3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3407 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { 3408 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) {
3409 request->ssids[i].ssid_len = nla_len(attr); 3409 if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
3410 if (request->ssids[i].ssid_len > IEEE80211_MAX_SSID_LEN) {
3411 err = -EINVAL; 3410 err = -EINVAL;
3412 goto out_free; 3411 goto out_free;
3413 } 3412 }
3413 request->ssids[i].ssid_len = nla_len(attr);
3414 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); 3414 memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr));
3415 i++; 3415 i++;
3416 } 3416 }
@@ -3572,12 +3572,11 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { 3572 if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) {
3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], 3573 nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS],
3574 tmp) { 3574 tmp) {
3575 request->ssids[i].ssid_len = nla_len(attr); 3575 if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) {
3576 if (request->ssids[i].ssid_len >
3577 IEEE80211_MAX_SSID_LEN) {
3578 err = -EINVAL; 3576 err = -EINVAL;
3579 goto out_free; 3577 goto out_free;
3580 } 3578 }
3579 request->ssids[i].ssid_len = nla_len(attr);
3581 memcpy(request->ssids[i].ssid, nla_data(attr), 3580 memcpy(request->ssids[i].ssid, nla_data(attr),
3582 nla_len(attr)); 3581 nla_len(attr));
3583 i++; 3582 i++;
diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c
index 47f1b8638df9..b11ea692bd7d 100644
--- a/net/xfrm/xfrm_replay.c
+++ b/net/xfrm/xfrm_replay.c
@@ -265,7 +265,7 @@ static void xfrm_replay_advance_bmp(struct xfrm_state *x, __be32 net_seq)
265 bitnr = bitnr & 0x1F; 265 bitnr = bitnr & 0x1F;
266 replay_esn->bmp[nr] |= (1U << bitnr); 266 replay_esn->bmp[nr] |= (1U << bitnr);
267 } else { 267 } else {
268 nr = replay_esn->replay_window >> 5; 268 nr = (replay_esn->replay_window - 1) >> 5;
269 for (i = 0; i <= nr; i++) 269 for (i = 0; i <= nr; i++)
270 replay_esn->bmp[i] = 0; 270 replay_esn->bmp[i] = 0;
271 271
@@ -471,7 +471,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
471 bitnr = bitnr & 0x1F; 471 bitnr = bitnr & 0x1F;
472 replay_esn->bmp[nr] |= (1U << bitnr); 472 replay_esn->bmp[nr] |= (1U << bitnr);
473 } else { 473 } else {
474 nr = replay_esn->replay_window >> 5; 474 nr = (replay_esn->replay_window - 1) >> 5;
475 for (i = 0; i <= nr; i++) 475 for (i = 0; i <= nr; i++)
476 replay_esn->bmp[i] = 0; 476 replay_esn->bmp[i] = 0;
477 477
diff --git a/scripts/Makefile.asm-generic b/scripts/Makefile.asm-generic
index 490122c3e2aa..40caf3c26cd5 100644
--- a/scripts/Makefile.asm-generic
+++ b/scripts/Makefile.asm-generic
@@ -17,6 +17,7 @@ quiet_cmd_wrap = WRAP $@
17cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@ 17cmd_wrap = echo "\#include <asm-generic/$*.h>" >$@
18 18
19all: $(patsubst %, $(obj)/%, $(generic-y)) 19all: $(patsubst %, $(obj)/%, $(generic-y))
20 @:
20 21
21$(obj)/%.h: 22$(obj)/%.h:
22 $(call cmd,wrap) 23 $(call cmd,wrap)
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 8657f99bfb2b..b0aa2c680593 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -1943,6 +1943,11 @@ sub process {
1943 WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr); 1943 WARN("LINUX_VERSION_CODE should be avoided, code should be for the version to which it is merged\n" . $herecurr);
1944 } 1944 }
1945 1945
1946# check for uses of printk_ratelimit
1947 if ($line =~ /\bprintk_ratelimit\s*\(/) {
1948 WARN("Prefer printk_ratelimited or pr_<level>_ratelimited to printk_ratelimit\n" . $herecurr);
1949 }
1950
1946# printk should use KERN_* levels. Note that follow on printk's on the 1951# printk should use KERN_* levels. Note that follow on printk's on the
1947# same line do not need a level, so we use the current block context 1952# same line do not need a level, so we use the current block context
1948# to try and find and validate the current printk. In summary the current 1953# to try and find and validate the current printk. In summary the current
diff --git a/scripts/depmod.sh b/scripts/depmod.sh
new file mode 100755
index 000000000000..3b029cba2baf
--- /dev/null
+++ b/scripts/depmod.sh
@@ -0,0 +1,48 @@
1#!/bin/sh
2#
3# A depmod wrapper used by the toplevel Makefile
4
5if test $# -ne 2; then
6 echo "Usage: $0 /sbin/depmod <kernelrelease>" >&2
7 exit 1
8fi
9DEPMOD=$1
10KERNELRELEASE=$2
11
12if ! "$DEPMOD" -V 2>/dev/null | grep -q module-init-tools; then
13 echo "Warning: you may need to install module-init-tools" >&2
14 echo "See http://www.codemonkey.org.uk/docs/post-halloween-2.6.txt" >&2
15 sleep 1
16fi
17
18if ! test -r System.map -a -x "$DEPMOD"; then
19 exit 0
20fi
21# older versions of depmod require the version string to start with three
22# numbers, so we cheat with a symlink here
23depmod_hack_needed=true
24mkdir -p .tmp_depmod/lib/modules/$KERNELRELEASE
25if "$DEPMOD" -b .tmp_depmod $KERNELRELEASE 2>/dev/null; then
26 if test -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep -o \
27 -e .tmp_depmod/lib/modules/$KERNELRELEASE/modules.dep.bin; then
28 depmod_hack_needed=false
29 fi
30fi
31if $depmod_hack_needed; then
32 symlink="$INSTALL_MOD_PATH/lib/modules/99.98.$KERNELRELEASE"
33 ln -s "$KERNELRELEASE" "$symlink"
34 KERNELRELEASE=99.98.$KERNELRELEASE
35fi
36
37set -- -ae -F System.map
38if test -n "$INSTALL_MOD_PATH"; then
39 set -- "$@" -b "$INSTALL_MOD_PATH"
40fi
41"$DEPMOD" "$@" "$KERNELRELEASE"
42ret=$?
43
44if $depmod_hack_needed; then
45 rm -f "$symlink"
46fi
47
48exit $ret
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index ec1bcecf2cda..3d2fd141dff7 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -612,7 +612,7 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
612static int apparmor_task_setrlimit(struct task_struct *task, 612static int apparmor_task_setrlimit(struct task_struct *task,
613 unsigned int resource, struct rlimit *new_rlim) 613 unsigned int resource, struct rlimit *new_rlim)
614{ 614{
615 struct aa_profile *profile = aa_current_profile(); 615 struct aa_profile *profile = __aa_current_profile();
616 int error = 0; 616 int error = 0;
617 617
618 if (!unconfined(profile)) 618 if (!unconfined(profile))
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index cd1f779fa51d..1be68269e1c2 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -474,17 +474,11 @@ struct cgroup_subsys devices_subsys = {
474 .subsys_id = devices_subsys_id, 474 .subsys_id = devices_subsys_id,
475}; 475};
476 476
477int devcgroup_inode_permission(struct inode *inode, int mask) 477int __devcgroup_inode_permission(struct inode *inode, int mask)
478{ 478{
479 struct dev_cgroup *dev_cgroup; 479 struct dev_cgroup *dev_cgroup;
480 struct dev_whitelist_item *wh; 480 struct dev_whitelist_item *wh;
481 481
482 dev_t device = inode->i_rdev;
483 if (!device)
484 return 0;
485 if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
486 return 0;
487
488 rcu_read_lock(); 482 rcu_read_lock();
489 483
490 dev_cgroup = task_devcgroup(current); 484 dev_cgroup = task_devcgroup(current);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index d31862e0aa1c..8e319a416eec 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -71,9 +71,8 @@ EXPORT_SYMBOL(complete_request_key);
71 * This is called in context of freshly forked kthread before kernel_execve(), 71 * This is called in context of freshly forked kthread before kernel_execve(),
72 * so we can simply install the desired session_keyring at this point. 72 * so we can simply install the desired session_keyring at this point.
73 */ 73 */
74static int umh_keys_init(struct subprocess_info *info) 74static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
75{ 75{
76 struct cred *cred = (struct cred*)current_cred();
77 struct key *keyring = info->data; 76 struct key *keyring = info->data;
78 77
79 return install_session_keyring_to_cred(cred, keyring); 78 return install_session_keyring_to_cred(cred, keyring);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index a0d38459d650..20219ef5439a 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -1476,7 +1476,6 @@ static int inode_has_perm(const struct cred *cred,
1476 unsigned flags) 1476 unsigned flags)
1477{ 1477{
1478 struct inode_security_struct *isec; 1478 struct inode_security_struct *isec;
1479 struct common_audit_data ad;
1480 u32 sid; 1479 u32 sid;
1481 1480
1482 validate_creds(cred); 1481 validate_creds(cred);
@@ -1487,15 +1486,21 @@ static int inode_has_perm(const struct cred *cred,
1487 sid = cred_sid(cred); 1486 sid = cred_sid(cred);
1488 isec = inode->i_security; 1487 isec = inode->i_security;
1489 1488
1490 if (!adp) {
1491 adp = &ad;
1492 COMMON_AUDIT_DATA_INIT(&ad, INODE);
1493 ad.u.inode = inode;
1494 }
1495
1496 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags); 1489 return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
1497} 1490}
1498 1491
1492static int inode_has_perm_noadp(const struct cred *cred,
1493 struct inode *inode,
1494 u32 perms,
1495 unsigned flags)
1496{
1497 struct common_audit_data ad;
1498
1499 COMMON_AUDIT_DATA_INIT(&ad, INODE);
1500 ad.u.inode = inode;
1501 return inode_has_perm(cred, inode, perms, &ad, flags);
1502}
1503
1499/* Same as inode_has_perm, but pass explicit audit data containing 1504/* Same as inode_has_perm, but pass explicit audit data containing
1500 the dentry to help the auditing code to more easily generate the 1505 the dentry to help the auditing code to more easily generate the
1501 pathname if needed. */ 1506 pathname if needed. */
@@ -2122,8 +2127,8 @@ static inline void flush_unauthorized_files(const struct cred *cred,
2122 struct tty_file_private, list); 2127 struct tty_file_private, list);
2123 file = file_priv->file; 2128 file = file_priv->file;
2124 inode = file->f_path.dentry->d_inode; 2129 inode = file->f_path.dentry->d_inode;
2125 if (inode_has_perm(cred, inode, 2130 if (inode_has_perm_noadp(cred, inode,
2126 FILE__READ | FILE__WRITE, NULL, 0)) { 2131 FILE__READ | FILE__WRITE, 0)) {
2127 drop_tty = 1; 2132 drop_tty = 1;
2128 } 2133 }
2129 } 2134 }
@@ -3228,7 +3233,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred)
3228 * new inode label or new policy. 3233 * new inode label or new policy.
3229 * This check is not redundant - do not remove. 3234 * This check is not redundant - do not remove.
3230 */ 3235 */
3231 return inode_has_perm(cred, inode, open_file_to_av(file), NULL, 0); 3236 return inode_has_perm_noadp(cred, inode, open_file_to_av(file), 0);
3232} 3237}
3233 3238
3234/* task security operations */ 3239/* task security operations */
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 77d44138864f..35459340019e 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -29,6 +29,7 @@
29#include <linux/audit.h> 29#include <linux/audit.h>
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/kobject.h> 31#include <linux/kobject.h>
32#include <linux/ctype.h>
32 33
33/* selinuxfs pseudo filesystem for exporting the security policy API. 34/* selinuxfs pseudo filesystem for exporting the security policy API.
34 Based on the proc code and the fs/nfsd/nfsctl.c code. */ 35 Based on the proc code and the fs/nfsd/nfsctl.c code. */
@@ -751,6 +752,14 @@ out:
751 return length; 752 return length;
752} 753}
753 754
755static inline int hexcode_to_int(int code) {
756 if (code == '\0' || !isxdigit(code))
757 return -1;
758 if (isdigit(code))
759 return code - '0';
760 return tolower(code) - 'a' + 10;
761}
762
754static ssize_t sel_write_create(struct file *file, char *buf, size_t size) 763static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
755{ 764{
756 char *scon = NULL, *tcon = NULL; 765 char *scon = NULL, *tcon = NULL;
@@ -785,8 +794,34 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
785 nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf); 794 nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
786 if (nargs < 3 || nargs > 4) 795 if (nargs < 3 || nargs > 4)
787 goto out; 796 goto out;
788 if (nargs == 4) 797 if (nargs == 4) {
798 /*
799 * If and when the name of new object to be queried contains
800 * either whitespace or multibyte characters, they shall be
801 * encoded based on the percentage-encoding rule.
802 * If not encoded, the sscanf logic picks up only left-half
803 * of the supplied name; splitted by a whitespace unexpectedly.
804 */
805 char *r, *w;
806 int c1, c2;
807
808 r = w = namebuf;
809 do {
810 c1 = *r++;
811 if (c1 == '+')
812 c1 = ' ';
813 else if (c1 == '%') {
814 if ((c1 = hexcode_to_int(*r++)) < 0)
815 goto out;
816 if ((c2 = hexcode_to_int(*r++)) < 0)
817 goto out;
818 c1 = (c1 << 4) | c2;
819 }
820 *w++ = c1;
821 } while (c1 != '\0');
822
789 objname = namebuf; 823 objname = namebuf;
824 }
790 825
791 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); 826 length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
792 if (length) 827 if (length)
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index 102e9ec1b77a..d246aca3f4fb 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3222,6 +3222,9 @@ static int filename_trans_write(struct policydb *p, void *fp)
3222 __le32 buf[1]; 3222 __le32 buf[1];
3223 int rc; 3223 int rc;
3224 3224
3225 if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
3226 return 0;
3227
3225 nel = 0; 3228 nel = 0;
3226 rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel); 3229 rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
3227 if (rc) 3230 if (rc)
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index 162a864dba24..9fc2e15841c9 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -138,7 +138,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
138 } 138 }
139 if (need_dev) { 139 if (need_dev) {
140 /* Get mount point or device file. */ 140 /* Get mount point or device file. */
141 if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) { 141 if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
142 error = -ENOENT; 142 error = -ENOENT;
143 goto out; 143 goto out;
144 } 144 }
diff --git a/sound/core/misc.c b/sound/core/misc.c
index 2c41825c836e..eb9fe2e1d291 100644
--- a/sound/core/misc.c
+++ b/sound/core/misc.c
@@ -58,26 +58,6 @@ static const char *sanity_file_name(const char *path)
58 else 58 else
59 return path; 59 return path;
60} 60}
61
62/* print file and line with a certain printk prefix */
63static int print_snd_pfx(unsigned int level, const char *path, int line,
64 const char *format)
65{
66 const char *file = sanity_file_name(path);
67 char tmp[] = "<0>";
68 const char *pfx = level ? KERN_DEBUG : KERN_DEFAULT;
69 int ret = 0;
70
71 if (format[0] == '<' && format[2] == '>') {
72 tmp[1] = format[1];
73 pfx = tmp;
74 ret = 1;
75 }
76 printk("%sALSA %s:%d: ", pfx, file, line);
77 return ret;
78}
79#else
80#define print_snd_pfx(level, path, line, format) 0
81#endif 61#endif
82 62
83#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK) 63#if defined(CONFIG_SND_DEBUG) || defined(CONFIG_SND_VERBOSE_PRINTK)
@@ -85,15 +65,29 @@ void __snd_printk(unsigned int level, const char *path, int line,
85 const char *format, ...) 65 const char *format, ...)
86{ 66{
87 va_list args; 67 va_list args;
88 68#ifdef CONFIG_SND_VERBOSE_PRINTK
69 struct va_format vaf;
70 char verbose_fmt[] = KERN_DEFAULT "ALSA %s:%d %pV";
71#endif
72
89#ifdef CONFIG_SND_DEBUG 73#ifdef CONFIG_SND_DEBUG
90 if (debug < level) 74 if (debug < level)
91 return; 75 return;
92#endif 76#endif
77
93 va_start(args, format); 78 va_start(args, format);
94 if (print_snd_pfx(level, path, line, format)) 79#ifdef CONFIG_SND_VERBOSE_PRINTK
95 format += 3; /* skip the printk level-prefix */ 80 vaf.fmt = format;
81 vaf.va = &args;
82 if (format[0] == '<' && format[2] == '>') {
83 memcpy(verbose_fmt, format, 3);
84 vaf.fmt = format + 3;
85 } else if (level)
86 memcpy(verbose_fmt, KERN_DEBUG, 3);
87 printk(verbose_fmt, sanity_file_name(path), line, &vaf);
88#else
96 vprintk(format, args); 89 vprintk(format, args);
90#endif
97 va_end(args); 91 va_end(args);
98} 92}
99EXPORT_SYMBOL_GPL(__snd_printk); 93EXPORT_SYMBOL_GPL(__snd_printk);
diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
index 86ee16ca365e..440030818db7 100644
--- a/sound/firewire/isight.c
+++ b/sound/firewire/isight.c
@@ -209,6 +209,7 @@ static void isight_packet(struct fw_iso_context *context, u32 cycle,
209 isight->packet_index = -1; 209 isight->packet_index = -1;
210 return; 210 return;
211 } 211 }
212 fw_iso_context_queue_flush(isight->context);
212 213
213 if (++index >= QUEUE_LENGTH) 214 if (++index >= QUEUE_LENGTH)
214 index = 0; 215 index = 0;
diff --git a/sound/pci/asihpi/hpidspcd.c b/sound/pci/asihpi/hpidspcd.c
index fb311d8c05bf..5c6ea113d219 100644
--- a/sound/pci/asihpi/hpidspcd.c
+++ b/sound/pci/asihpi/hpidspcd.c
@@ -60,7 +60,7 @@ struct code_header {
60 HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER))) 60 HPI_VER_MINOR(HPI_VER) * 100 + HPI_VER_RELEASE(HPI_VER)))
61 61
62/***********************************************************************/ 62/***********************************************************************/
63#include "linux/pci.h" 63#include <linux/pci.h>
64/*-------------------------------------------------------------------*/ 64/*-------------------------------------------------------------------*/
65short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code, 65short hpi_dsp_code_open(u32 adapter, struct dsp_code *ps_dsp_code,
66 u32 *pos_error_code) 66 u32 *pos_error_code)
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 5e619a84da06..15f0161ce4a2 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -1440,6 +1440,14 @@ static struct snd_emu_chip_details emu_chip_details[] = {
1440 .ca0102_chip = 1, 1440 .ca0102_chip = 1,
1441 .spk71 = 1, 1441 .spk71 = 1,
1442 .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */ 1442 .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 */
1443 /* EMU0404 PCIe */
1444 {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40051102,
1445 .driver = "Audigy2", .name = "E-mu 0404 PCIe [MAEM8984]",
1446 .id = "EMU0404",
1447 .emu10k2_chip = 1,
1448 .ca0108_chip = 1,
1449 .spk71 = 1,
1450 .emu_model = EMU_MODEL_EMU0404}, /* EMU 0404 PCIe ver_03 */
1443 /* Note that all E-mu cards require kernel 2.6 or newer. */ 1451 /* Note that all E-mu cards require kernel 2.6 or newer. */
1444 {.vendor = 0x1102, .device = 0x0008, 1452 {.vendor = 0x1102, .device = 0x0008,
1445 .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]", 1453 .driver = "Audigy2", .name = "SB Audigy 2 Value [Unknown]",
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index eacd4901a308..a7ec7030cf87 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1234,9 +1234,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1234 sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci)); 1234 sprintf(chip->tea.bus_info, "PCI:%s", pci_name(pci));
1235 if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 && 1235 if ((tea575x_tuner & TUNER_TYPE_MASK) > 0 &&
1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) { 1236 (tea575x_tuner & TUNER_TYPE_MASK) < 4) {
1237 if (snd_tea575x_init(&chip->tea)) 1237 if (snd_tea575x_init(&chip->tea)) {
1238 snd_printk(KERN_ERR "TEA575x radio not found\n"); 1238 snd_printk(KERN_ERR "TEA575x radio not found\n");
1239 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) 1239 snd_fm801_free(chip);
1240 return -ENODEV;
1241 }
1242 } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) {
1240 /* autodetect tuner connection */ 1243 /* autodetect tuner connection */
1241 for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) { 1244 for (tea575x_tuner = 1; tea575x_tuner <= 3; tea575x_tuner++) {
1242 chip->tea575x_tuner = tea575x_tuner; 1245 chip->tea575x_tuner = tea575x_tuner;
@@ -1246,6 +1249,12 @@ static int __devinit snd_fm801_create(struct snd_card *card,
1246 break; 1249 break;
1247 } 1250 }
1248 } 1251 }
1252 if (tea575x_tuner == 4) {
1253 snd_printk(KERN_ERR "TEA575x radio not found\n");
1254 snd_fm801_free(chip);
1255 return -ENODEV;
1256 }
1257 }
1249 strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card)); 1258 strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card));
1250#endif 1259#endif
1251 1260
diff --git a/sound/pci/hda/hda_beep.h b/sound/pci/hda/hda_beep.h
index f1de1bac042c..55f0647458c7 100644
--- a/sound/pci/hda/hda_beep.h
+++ b/sound/pci/hda/hda_beep.h
@@ -50,7 +50,12 @@ int snd_hda_enable_beep_device(struct hda_codec *codec, int enable);
50int snd_hda_attach_beep_device(struct hda_codec *codec, int nid); 50int snd_hda_attach_beep_device(struct hda_codec *codec, int nid);
51void snd_hda_detach_beep_device(struct hda_codec *codec); 51void snd_hda_detach_beep_device(struct hda_codec *codec);
52#else 52#else
53#define snd_hda_attach_beep_device(...) 0 53static inline int snd_hda_attach_beep_device(struct hda_codec *codec, int nid)
54#define snd_hda_detach_beep_device(...) 54{
55 return 0;
56}
57static inline void snd_hda_detach_beep_device(struct hda_codec *codec)
58{
59}
55#endif 60#endif
56#endif 61#endif
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 696ac2590307..d694e9d4921d 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -506,9 +506,11 @@ static void ad198x_power_eapd_write(struct hda_codec *codec, hda_nid_t front,
506 hda_nid_t hp) 506 hda_nid_t hp)
507{ 507{
508 struct ad198x_spec *spec = codec->spec; 508 struct ad198x_spec *spec = codec->spec;
509 snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE, 509 if (snd_hda_query_pin_caps(codec, front) & AC_PINCAP_EAPD)
510 snd_hda_codec_write(codec, front, 0, AC_VERB_SET_EAPD_BTLENABLE,
510 !spec->inv_eapd ? 0x00 : 0x02); 511 !spec->inv_eapd ? 0x00 : 0x02);
511 snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE, 512 if (snd_hda_query_pin_caps(codec, hp) & AC_PINCAP_EAPD)
513 snd_hda_codec_write(codec, hp, 0, AC_VERB_SET_EAPD_BTLENABLE,
512 !spec->inv_eapd ? 0x00 : 0x02); 514 !spec->inv_eapd ? 0x00 : 0x02);
513} 515}
514 516
@@ -524,6 +526,10 @@ static void ad198x_power_eapd(struct hda_codec *codec)
524 case 0x11d4184a: 526 case 0x11d4184a:
525 case 0x11d4194a: 527 case 0x11d4194a:
526 case 0x11d4194b: 528 case 0x11d4194b:
529 case 0x11d41988:
530 case 0x11d4198b:
531 case 0x11d4989a:
532 case 0x11d4989b:
527 ad198x_power_eapd_write(codec, 0x12, 0x11); 533 ad198x_power_eapd_write(codec, 0x12, 0x11);
528 break; 534 break;
529 case 0x11d41981: 535 case 0x11d41981:
@@ -533,12 +539,6 @@ static void ad198x_power_eapd(struct hda_codec *codec)
533 case 0x11d41986: 539 case 0x11d41986:
534 ad198x_power_eapd_write(codec, 0x1b, 0x1a); 540 ad198x_power_eapd_write(codec, 0x1b, 0x1a);
535 break; 541 break;
536 case 0x11d41988:
537 case 0x11d4198b:
538 case 0x11d4989a:
539 case 0x11d4989b:
540 ad198x_power_eapd_write(codec, 0x29, 0x22);
541 break;
542 } 542 }
543} 543}
544 544
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3e6b9a8539c2..694b9daf691f 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3102,6 +3102,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3102 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), 3102 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS),
3103 SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO), 3103 SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO),
3104 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ 3104 SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */
3105 SND_PCI_QUIRK(0x1b0a, 0x2092, "CyberpowerPC Gamer Xplorer N57001", CXT5066_AUTO),
3105 {} 3106 {}
3106}; 3107};
3107 3108
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 7a4e10002f56..61a774b3d3cb 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1141,6 +1141,13 @@ static void update_speakers(struct hda_codec *codec)
1141 struct alc_spec *spec = codec->spec; 1141 struct alc_spec *spec = codec->spec;
1142 int on; 1142 int on;
1143 1143
1144 /* Control HP pins/amps depending on master_mute state;
1145 * in general, HP pins/amps control should be enabled in all cases,
1146 * but currently set only for master_mute, just to be safe
1147 */
1148 do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
1149 spec->autocfg.hp_pins, spec->master_mute, true);
1150
1144 if (!spec->automute) 1151 if (!spec->automute)
1145 on = 0; 1152 on = 0;
1146 else 1153 else
@@ -6201,11 +6208,6 @@ static const struct snd_kcontrol_new alc260_input_mixer[] = {
6201/* update HP, line and mono out pins according to the master switch */ 6208/* update HP, line and mono out pins according to the master switch */
6202static void alc260_hp_master_update(struct hda_codec *codec) 6209static void alc260_hp_master_update(struct hda_codec *codec)
6203{ 6210{
6204 struct alc_spec *spec = codec->spec;
6205
6206 /* change HP pins */
6207 do_automute(codec, ARRAY_SIZE(spec->autocfg.hp_pins),
6208 spec->autocfg.hp_pins, spec->master_mute, true);
6209 update_speakers(codec); 6211 update_speakers(codec);
6210} 6212}
6211 6213
@@ -11924,7 +11926,7 @@ static const struct hda_verb alc262_nec_verbs[] = {
11924 * 0x1b = port replicator headphone out 11926 * 0x1b = port replicator headphone out
11925 */ 11927 */
11926 11928
11927#define ALC_HP_EVENT 0x37 11929#define ALC_HP_EVENT ALC880_HP_EVENT
11928 11930
11929static const struct hda_verb alc262_fujitsu_unsol_verbs[] = { 11931static const struct hda_verb alc262_fujitsu_unsol_verbs[] = {
11930 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC_HP_EVENT}, 11932 {0x14, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | ALC_HP_EVENT},
@@ -13314,9 +13316,8 @@ static void alc268_acer_lc_setup(struct hda_codec *codec)
13314 struct alc_spec *spec = codec->spec; 13316 struct alc_spec *spec = codec->spec;
13315 spec->autocfg.hp_pins[0] = 0x15; 13317 spec->autocfg.hp_pins[0] = 0x15;
13316 spec->autocfg.speaker_pins[0] = 0x14; 13318 spec->autocfg.speaker_pins[0] = 0x14;
13317 spec->automute_mixer_nid[0] = 0x0f;
13318 spec->automute = 1; 13319 spec->automute = 1;
13319 spec->automute_mode = ALC_AUTOMUTE_MIXER; 13320 spec->automute_mode = ALC_AUTOMUTE_AMP;
13320 spec->ext_mic.pin = 0x18; 13321 spec->ext_mic.pin = 0x18;
13321 spec->ext_mic.mux_idx = 0; 13322 spec->ext_mic.mux_idx = 0;
13322 spec->int_mic.pin = 0x12; 13323 spec->int_mic.pin = 0x12;
@@ -13860,6 +13861,7 @@ static const struct snd_pci_quirk alc268_cfg_tbl[] = {
13860 SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One", 13861 SND_PCI_QUIRK(0x1025, 0x015b, "Acer Aspire One",
13861 ALC268_ACER_ASPIRE_ONE), 13862 ALC268_ACER_ASPIRE_ONE),
13862 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL), 13863 SND_PCI_QUIRK(0x1028, 0x0253, "Dell OEM", ALC268_DELL),
13864 SND_PCI_QUIRK(0x1028, 0x02b0, "Dell Inspiron 910", ALC268_AUTO),
13863 SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0, 13865 SND_PCI_QUIRK_MASK(0x1028, 0xfff0, 0x02b0,
13864 "Dell Inspiron Mini9/Vostro A90", ALC268_DELL), 13866 "Dell Inspiron Mini9/Vostro A90", ALC268_DELL),
13865 /* almost compatible with toshiba but with optional digital outs; 13867 /* almost compatible with toshiba but with optional digital outs;
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index 605c99e1e520..c952582fb218 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -832,10 +832,13 @@ static int via_hp_build(struct hda_codec *codec)
832 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid; 832 knew->subdevice = HDA_SUBDEV_NID_FLAG | nid;
833 knew->private_value = nid; 833 knew->private_value = nid;
834 834
835 knew = via_clone_control(spec, &via_hp_mixer[1]); 835 nid = side_mute_channel(spec);
836 if (knew == NULL) 836 if (nid) {
837 return -ENOMEM; 837 knew = via_clone_control(spec, &via_hp_mixer[1]);
838 knew->subdevice = side_mute_channel(spec); 838 if (knew == NULL)
839 return -ENOMEM;
840 knew->subdevice = nid;
841 }
839 842
840 return 0; 843 return 0;
841} 844}
diff --git a/sound/pci/lola/lola.c b/sound/pci/lola/lola.c
index 34b24286d279..2692e5ae5f2d 100644
--- a/sound/pci/lola/lola.c
+++ b/sound/pci/lola/lola.c
@@ -445,7 +445,7 @@ static void lola_reset_setups(struct lola *chip)
445 lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */ 445 lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */
446} 446}
447 447
448static int lola_parse_tree(struct lola *chip) 448static int __devinit lola_parse_tree(struct lola *chip)
449{ 449{
450 unsigned int val; 450 unsigned int val;
451 int nid, err; 451 int nid, err;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 949691a876d3..3f08afc0f0d3 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -521,6 +521,7 @@ MODULE_SUPPORTED_DEVICE("{{RME HDSPM-MADI}}");
521#define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024) 521#define HDSPM_DMA_AREA_KILOBYTES (HDSPM_DMA_AREA_BYTES/1024)
522 522
523/* revisions >= 230 indicate AES32 card */ 523/* revisions >= 230 indicate AES32 card */
524#define HDSPM_MADI_OLD_REV 207
524#define HDSPM_MADI_REV 210 525#define HDSPM_MADI_REV 210
525#define HDSPM_RAYDAT_REV 211 526#define HDSPM_RAYDAT_REV 211
526#define HDSPM_AIO_REV 212 527#define HDSPM_AIO_REV 212
@@ -1143,7 +1144,7 @@ static int hdspm_external_sample_rate(struct hdspm *hdspm)
1143 1144
1144 /* if wordclock has synced freq and wordclock is valid */ 1145 /* if wordclock has synced freq and wordclock is valid */
1145 if ((status2 & HDSPM_wcLock) != 0 && 1146 if ((status2 & HDSPM_wcLock) != 0 &&
1146 (status & HDSPM_SelSyncRef0) == 0) { 1147 (status2 & HDSPM_SelSyncRef0) == 0) {
1147 1148
1148 rate_bits = status2 & HDSPM_wcFreqMask; 1149 rate_bits = status2 & HDSPM_wcFreqMask;
1149 1150
@@ -1639,12 +1640,14 @@ static int snd_hdspm_midi_input_read (struct hdspm_midi *hmidi)
1639 } 1640 }
1640 } 1641 }
1641 hmidi->pending = 0; 1642 hmidi->pending = 0;
1643 spin_unlock_irqrestore(&hmidi->lock, flags);
1642 1644
1645 spin_lock_irqsave(&hmidi->hdspm->lock, flags);
1643 hmidi->hdspm->control_register |= hmidi->ie; 1646 hmidi->hdspm->control_register |= hmidi->ie;
1644 hdspm_write(hmidi->hdspm, HDSPM_controlRegister, 1647 hdspm_write(hmidi->hdspm, HDSPM_controlRegister,
1645 hmidi->hdspm->control_register); 1648 hmidi->hdspm->control_register);
1649 spin_unlock_irqrestore(&hmidi->hdspm->lock, flags);
1646 1650
1647 spin_unlock_irqrestore (&hmidi->lock, flags);
1648 return snd_hdspm_midi_output_write (hmidi); 1651 return snd_hdspm_midi_output_write (hmidi);
1649} 1652}
1650 1653
@@ -6377,6 +6380,7 @@ static int __devinit snd_hdspm_create(struct snd_card *card,
6377 6380
6378 switch (hdspm->firmware_rev) { 6381 switch (hdspm->firmware_rev) {
6379 case HDSPM_MADI_REV: 6382 case HDSPM_MADI_REV:
6383 case HDSPM_MADI_OLD_REV:
6380 hdspm->io_type = MADI; 6384 hdspm->io_type = MADI;
6381 hdspm->card_name = "RME MADI"; 6385 hdspm->card_name = "RME MADI";
6382 hdspm->midiPorts = 3; 6386 hdspm->midiPorts = 3;
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 7fbfa051f6e1..eda955b15834 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -848,9 +848,10 @@ int atmel_ssc_set_audio(int ssc_id)
848 if (IS_ERR(ssc)) 848 if (IS_ERR(ssc))
849 pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n", 849 pr_warn("Unable to parent ASoC SSC DAI on SSC: %ld\n",
850 PTR_ERR(ssc)); 850 PTR_ERR(ssc));
851 else 851 else {
852 ssc_pdev->dev.parent = &(ssc->pdev->dev); 852 ssc_pdev->dev.parent = &(ssc->pdev->dev);
853 ssc_free(ssc); 853 ssc_free(ssc);
854 }
854 855
855 ret = platform_device_add(ssc_pdev); 856 ret = platform_device_add(ssc_pdev);
856 if (ret < 0) 857 if (ret < 0)
diff --git a/sound/soc/blackfin/bf5xx-ad1836.c b/sound/soc/blackfin/bf5xx-ad1836.c
index ea4951cf5526..f79d1655e035 100644
--- a/sound/soc/blackfin/bf5xx-ad1836.c
+++ b/sound/soc/blackfin/bf5xx-ad1836.c
@@ -75,7 +75,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
75 .cpu_dai_name = "bfin-tdm.0", 75 .cpu_dai_name = "bfin-tdm.0",
76 .codec_dai_name = "ad1836-hifi", 76 .codec_dai_name = "ad1836-hifi",
77 .platform_name = "bfin-tdm-pcm-audio", 77 .platform_name = "bfin-tdm-pcm-audio",
78 .codec_name = "ad1836.0", 78 .codec_name = "spi0.4",
79 .ops = &bf5xx_ad1836_ops, 79 .ops = &bf5xx_ad1836_ops,
80 }, 80 },
81 { 81 {
@@ -84,7 +84,7 @@ static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
84 .cpu_dai_name = "bfin-tdm.1", 84 .cpu_dai_name = "bfin-tdm.1",
85 .codec_dai_name = "ad1836-hifi", 85 .codec_dai_name = "ad1836-hifi",
86 .platform_name = "bfin-tdm-pcm-audio", 86 .platform_name = "bfin-tdm-pcm-audio",
87 .codec_name = "ad1836.0", 87 .codec_name = "spi0.4",
88 .ops = &bf5xx_ad1836_ops, 88 .ops = &bf5xx_ad1836_ops,
89 }, 89 },
90}; 90};
diff --git a/sound/soc/codecs/ad1836.c b/sound/soc/codecs/ad1836.c
index ab63d52e36e1..754c496412bd 100644
--- a/sound/soc/codecs/ad1836.c
+++ b/sound/soc/codecs/ad1836.c
@@ -145,22 +145,22 @@ static int ad1836_hw_params(struct snd_pcm_substream *substream,
145 /* bit size */ 145 /* bit size */
146 switch (params_format(params)) { 146 switch (params_format(params)) {
147 case SNDRV_PCM_FORMAT_S16_LE: 147 case SNDRV_PCM_FORMAT_S16_LE:
148 word_len = 3; 148 word_len = AD1836_WORD_LEN_16;
149 break; 149 break;
150 case SNDRV_PCM_FORMAT_S20_3LE: 150 case SNDRV_PCM_FORMAT_S20_3LE:
151 word_len = 1; 151 word_len = AD1836_WORD_LEN_20;
152 break; 152 break;
153 case SNDRV_PCM_FORMAT_S24_LE: 153 case SNDRV_PCM_FORMAT_S24_LE:
154 case SNDRV_PCM_FORMAT_S32_LE: 154 case SNDRV_PCM_FORMAT_S32_LE:
155 word_len = 0; 155 word_len = AD1836_WORD_LEN_24;
156 break; 156 break;
157 } 157 }
158 158
159 snd_soc_update_bits(codec, AD1836_DAC_CTRL1, 159 snd_soc_update_bits(codec, AD1836_DAC_CTRL1, AD1836_DAC_WORD_LEN_MASK,
160 AD1836_DAC_WORD_LEN_MASK, word_len); 160 word_len << AD1836_DAC_WORD_LEN_OFFSET);
161 161
162 snd_soc_update_bits(codec, AD1836_ADC_CTRL2, 162 snd_soc_update_bits(codec, AD1836_ADC_CTRL2, AD1836_ADC_WORD_LEN_MASK,
163 AD1836_ADC_WORD_LEN_MASK, word_len); 163 word_len << AD1836_ADC_WORD_OFFSET);
164 164
165 return 0; 165 return 0;
166} 166}
diff --git a/sound/soc/codecs/ad1836.h b/sound/soc/codecs/ad1836.h
index 845596717fdf..9d6a3f8f8aaf 100644
--- a/sound/soc/codecs/ad1836.h
+++ b/sound/soc/codecs/ad1836.h
@@ -25,6 +25,7 @@
25#define AD1836_DAC_SERFMT_PCK256 (0x4 << 5) 25#define AD1836_DAC_SERFMT_PCK256 (0x4 << 5)
26#define AD1836_DAC_SERFMT_PCK128 (0x5 << 5) 26#define AD1836_DAC_SERFMT_PCK128 (0x5 << 5)
27#define AD1836_DAC_WORD_LEN_MASK 0x18 27#define AD1836_DAC_WORD_LEN_MASK 0x18
28#define AD1836_DAC_WORD_LEN_OFFSET 3
28 29
29#define AD1836_DAC_CTRL2 1 30#define AD1836_DAC_CTRL2 1
30#define AD1836_DACL1_MUTE 0 31#define AD1836_DACL1_MUTE 0
@@ -51,6 +52,7 @@
51#define AD1836_ADCL2_MUTE 2 52#define AD1836_ADCL2_MUTE 2
52#define AD1836_ADCR2_MUTE 3 53#define AD1836_ADCR2_MUTE 3
53#define AD1836_ADC_WORD_LEN_MASK 0x30 54#define AD1836_ADC_WORD_LEN_MASK 0x30
55#define AD1836_ADC_WORD_OFFSET 5
54#define AD1836_ADC_SERFMT_MASK (7 << 6) 56#define AD1836_ADC_SERFMT_MASK (7 << 6)
55#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6) 57#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
56#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6) 58#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
@@ -60,4 +62,8 @@
60 62
61#define AD1836_NUM_REGS 16 63#define AD1836_NUM_REGS 16
62 64
65#define AD1836_WORD_LEN_24 0x0
66#define AD1836_WORD_LEN_20 0x1
67#define AD1836_WORD_LEN_16 0x2
68
63#endif 69#endif
diff --git a/sound/soc/codecs/wm8804.c b/sound/soc/codecs/wm8804.c
index 6785688f8806..9a5e67c5a6bd 100644
--- a/sound/soc/codecs/wm8804.c
+++ b/sound/soc/codecs/wm8804.c
@@ -680,20 +680,25 @@ static struct snd_soc_dai_ops wm8804_dai_ops = {
680#define WM8804_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ 680#define WM8804_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
681 SNDRV_PCM_FMTBIT_S24_LE) 681 SNDRV_PCM_FMTBIT_S24_LE)
682 682
683#define WM8804_RATES (SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \
684 SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \
685 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
686 SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
687
683static struct snd_soc_dai_driver wm8804_dai = { 688static struct snd_soc_dai_driver wm8804_dai = {
684 .name = "wm8804-spdif", 689 .name = "wm8804-spdif",
685 .playback = { 690 .playback = {
686 .stream_name = "Playback", 691 .stream_name = "Playback",
687 .channels_min = 2, 692 .channels_min = 2,
688 .channels_max = 2, 693 .channels_max = 2,
689 .rates = SNDRV_PCM_RATE_8000_192000, 694 .rates = WM8804_RATES,
690 .formats = WM8804_FORMATS, 695 .formats = WM8804_FORMATS,
691 }, 696 },
692 .capture = { 697 .capture = {
693 .stream_name = "Capture", 698 .stream_name = "Capture",
694 .channels_min = 2, 699 .channels_min = 2,
695 .channels_max = 2, 700 .channels_max = 2,
696 .rates = SNDRV_PCM_RATE_8000_192000, 701 .rates = WM8804_RATES,
697 .formats = WM8804_FORMATS, 702 .formats = WM8804_FORMATS,
698 }, 703 },
699 .ops = &wm8804_dai_ops, 704 .ops = &wm8804_dai_ops,
diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c
index a0b1a7278284..e2ab4fac2819 100644
--- a/sound/soc/codecs/wm8915.c
+++ b/sound/soc/codecs/wm8915.c
@@ -1839,7 +1839,7 @@ static int wm8915_set_sysclk(struct snd_soc_dai *dai,
1839 int old; 1839 int old;
1840 1840
1841 /* Disable SYSCLK while we reconfigure */ 1841 /* Disable SYSCLK while we reconfigure */
1842 old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1); 1842 old = snd_soc_read(codec, WM8915_AIF_CLOCKING_1) & WM8915_SYSCLK_ENA;
1843 snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1, 1843 snd_soc_update_bits(codec, WM8915_AIF_CLOCKING_1,
1844 WM8915_SYSCLK_ENA, 0); 1844 WM8915_SYSCLK_ENA, 0);
1845 1845
@@ -2038,6 +2038,7 @@ static int wm8915_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
2038 break; 2038 break;
2039 case WM8915_FLL_MCLK2: 2039 case WM8915_FLL_MCLK2:
2040 reg = 1; 2040 reg = 1;
2041 break;
2041 case WM8915_FLL_DACLRCLK1: 2042 case WM8915_FLL_DACLRCLK1:
2042 reg = 2; 2043 reg = 2;
2043 break; 2044 break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index f90ae427242b..5e05eed96c38 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -1999,12 +1999,12 @@ static int wm8962_put_hp_sw(struct snd_kcontrol *kcontrol,
1999 return 0; 1999 return 0;
2000 2000
2001 /* If the left PGA is enabled hit that VU bit... */ 2001 /* If the left PGA is enabled hit that VU bit... */
2002 if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTL_PGA_ENA) 2002 if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTL_PGA_ENA)
2003 return snd_soc_write(codec, WM8962_HPOUTL_VOLUME, 2003 return snd_soc_write(codec, WM8962_HPOUTL_VOLUME,
2004 reg_cache[WM8962_HPOUTL_VOLUME]); 2004 reg_cache[WM8962_HPOUTL_VOLUME]);
2005 2005
2006 /* ...otherwise the right. The VU is stereo. */ 2006 /* ...otherwise the right. The VU is stereo. */
2007 if (reg_cache[WM8962_PWR_MGMT_2] & WM8962_HPOUTR_PGA_ENA) 2007 if (snd_soc_read(codec, WM8962_PWR_MGMT_2) & WM8962_HPOUTR_PGA_ENA)
2008 return snd_soc_write(codec, WM8962_HPOUTR_VOLUME, 2008 return snd_soc_write(codec, WM8962_HPOUTR_VOLUME,
2009 reg_cache[WM8962_HPOUTR_VOLUME]); 2009 reg_cache[WM8962_HPOUTR_VOLUME]);
2010 2010
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index e55b298c14a0..9e370d14ad88 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -215,23 +215,23 @@ static const struct snd_kcontrol_new analogue_snd_controls[] = {
215SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0, 215SOC_SINGLE_TLV("IN1L Volume", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
216 inpga_tlv), 216 inpga_tlv),
217SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1), 217SOC_SINGLE("IN1L Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
218SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 7, 1, 0), 218SOC_SINGLE("IN1L ZC Switch", WM8993_LEFT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
219 219
220SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0, 220SOC_SINGLE_TLV("IN1R Volume", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 0, 31, 0,
221 inpga_tlv), 221 inpga_tlv),
222SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1), 222SOC_SINGLE("IN1R Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 1),
223SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 7, 1, 0), 223SOC_SINGLE("IN1R ZC Switch", WM8993_RIGHT_LINE_INPUT_1_2_VOLUME, 6, 1, 0),
224 224
225 225
226SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0, 226SOC_SINGLE_TLV("IN2L Volume", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
227 inpga_tlv), 227 inpga_tlv),
228SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1), 228SOC_SINGLE("IN2L Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
229SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 7, 1, 0), 229SOC_SINGLE("IN2L ZC Switch", WM8993_LEFT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
230 230
231SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0, 231SOC_SINGLE_TLV("IN2R Volume", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 0, 31, 0,
232 inpga_tlv), 232 inpga_tlv),
233SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1), 233SOC_SINGLE("IN2R Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 1),
234SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 7, 1, 0), 234SOC_SINGLE("IN2R ZC Switch", WM8993_RIGHT_LINE_INPUT_3_4_VOLUME, 6, 1, 0),
235 235
236SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0, 236SOC_SINGLE_TLV("MIXINL IN2L Volume", WM8993_INPUT_MIXER3, 7, 1, 0,
237 inmix_sw_tlv), 237 inmix_sw_tlv),
diff --git a/sound/soc/fsl/fsl_dma.c b/sound/soc/fsl/fsl_dma.c
index 15dac0f20cd8..6680c0b4d203 100644
--- a/sound/soc/fsl/fsl_dma.c
+++ b/sound/soc/fsl/fsl_dma.c
@@ -310,7 +310,7 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
310 * should allocate a DMA buffer only for the streams that are valid. 310 * should allocate a DMA buffer only for the streams that are valid.
311 */ 311 */
312 312
313 if (dai->driver->playback.channels_min) { 313 if (pcm->streams[0].substream) {
314 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev, 314 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
315 fsl_dma_hardware.buffer_bytes_max, 315 fsl_dma_hardware.buffer_bytes_max,
316 &pcm->streams[0].substream->dma_buffer); 316 &pcm->streams[0].substream->dma_buffer);
@@ -320,13 +320,13 @@ static int fsl_dma_new(struct snd_card *card, struct snd_soc_dai *dai,
320 } 320 }
321 } 321 }
322 322
323 if (dai->driver->capture.channels_min) { 323 if (pcm->streams[1].substream) {
324 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev, 324 ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, card->dev,
325 fsl_dma_hardware.buffer_bytes_max, 325 fsl_dma_hardware.buffer_bytes_max,
326 &pcm->streams[1].substream->dma_buffer); 326 &pcm->streams[1].substream->dma_buffer);
327 if (ret) { 327 if (ret) {
328 snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
329 dev_err(card->dev, "can't alloc capture dma buffer\n"); 328 dev_err(card->dev, "can't alloc capture dma buffer\n");
329 snd_dma_free_pages(&pcm->streams[0].substream->dma_buffer);
330 return ret; 330 return ret;
331 } 331 }
332 } 332 }
@@ -449,7 +449,8 @@ static int fsl_dma_open(struct snd_pcm_substream *substream)
449 dma_private->ld_buf_phys = ld_buf_phys; 449 dma_private->ld_buf_phys = ld_buf_phys;
450 dma_private->dma_buf_phys = substream->dma_buffer.addr; 450 dma_private->dma_buf_phys = substream->dma_buffer.addr;
451 451
452 ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "DMA", dma_private); 452 ret = request_irq(dma_private->irq, fsl_dma_isr, 0, "fsldma-audio",
453 dma_private);
453 if (ret) { 454 if (ret) {
454 dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n", 455 dev_err(dev, "can't register ISR for IRQ %u (ret=%i)\n",
455 dma_private->irq, ret); 456 dma_private->irq, ret);
diff --git a/sound/soc/samsung/i2s.c b/sound/soc/samsung/i2s.c
index ffa09b3b2caa..992a732b5211 100644
--- a/sound/soc/samsung/i2s.c
+++ b/sound/soc/samsung/i2s.c
@@ -191,7 +191,7 @@ static inline bool tx_active(struct i2s_dai *i2s)
191 if (!i2s) 191 if (!i2s)
192 return false; 192 return false;
193 193
194 active = readl(i2s->addr + I2SMOD); 194 active = readl(i2s->addr + I2SCON);
195 195
196 if (is_secondary(i2s)) 196 if (is_secondary(i2s))
197 active &= CON_TXSDMA_ACTIVE; 197 active &= CON_TXSDMA_ACTIVE;
@@ -223,7 +223,7 @@ static inline bool rx_active(struct i2s_dai *i2s)
223 if (!i2s) 223 if (!i2s)
224 return false; 224 return false;
225 225
226 active = readl(i2s->addr + I2SMOD) & CON_RXDMA_ACTIVE; 226 active = readl(i2s->addr + I2SCON) & CON_RXDMA_ACTIVE;
227 227
228 return active ? true : false; 228 return active ? true : false;
229} 229}
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index 06b7b81a1601..c005ceb70c9d 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -466,6 +466,9 @@ static bool snd_soc_set_cache_val(void *base, unsigned int idx,
466static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx, 466static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
467 unsigned int word_size) 467 unsigned int word_size)
468{ 468{
469 if (!base)
470 return -1;
471
469 switch (word_size) { 472 switch (word_size) {
470 case 1: { 473 case 1: {
471 const u8 *cache = base; 474 const u8 *cache = base;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 999bb08cdfb1..32ab7fc4579a 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -325,6 +325,7 @@ static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
325} 325}
326 326
327static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm, 327static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
328 struct snd_soc_dapm_widget *kcontrolw,
328 const struct snd_kcontrol_new *kcontrol_new, 329 const struct snd_kcontrol_new *kcontrol_new,
329 struct snd_kcontrol **kcontrol) 330 struct snd_kcontrol **kcontrol)
330{ 331{
@@ -334,6 +335,8 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
334 *kcontrol = NULL; 335 *kcontrol = NULL;
335 336
336 list_for_each_entry(w, &dapm->card->widgets, list) { 337 list_for_each_entry(w, &dapm->card->widgets, list) {
338 if (w == kcontrolw || w->dapm != kcontrolw->dapm)
339 continue;
337 for (i = 0; i < w->num_kcontrols; i++) { 340 for (i = 0; i < w->num_kcontrols; i++) {
338 if (&w->kcontrol_news[i] == kcontrol_new) { 341 if (&w->kcontrol_news[i] == kcontrol_new) {
339 if (w->kcontrols) 342 if (w->kcontrols)
@@ -347,9 +350,9 @@ static int dapm_is_shared_kcontrol(struct snd_soc_dapm_context *dapm,
347} 350}
348 351
349/* create new dapm mixer control */ 352/* create new dapm mixer control */
350static int dapm_new_mixer(struct snd_soc_dapm_context *dapm, 353static int dapm_new_mixer(struct snd_soc_dapm_widget *w)
351 struct snd_soc_dapm_widget *w)
352{ 354{
355 struct snd_soc_dapm_context *dapm = w->dapm;
353 int i, ret = 0; 356 int i, ret = 0;
354 size_t name_len, prefix_len; 357 size_t name_len, prefix_len;
355 struct snd_soc_dapm_path *path; 358 struct snd_soc_dapm_path *path;
@@ -447,9 +450,9 @@ static int dapm_new_mixer(struct snd_soc_dapm_context *dapm,
447} 450}
448 451
449/* create new dapm mux control */ 452/* create new dapm mux control */
450static int dapm_new_mux(struct snd_soc_dapm_context *dapm, 453static int dapm_new_mux(struct snd_soc_dapm_widget *w)
451 struct snd_soc_dapm_widget *w)
452{ 454{
455 struct snd_soc_dapm_context *dapm = w->dapm;
453 struct snd_soc_dapm_path *path = NULL; 456 struct snd_soc_dapm_path *path = NULL;
454 struct snd_kcontrol *kcontrol; 457 struct snd_kcontrol *kcontrol;
455 struct snd_card *card = dapm->card->snd_card; 458 struct snd_card *card = dapm->card->snd_card;
@@ -468,7 +471,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
468 return -EINVAL; 471 return -EINVAL;
469 } 472 }
470 473
471 shared = dapm_is_shared_kcontrol(dapm, &w->kcontrol_news[0], 474 shared = dapm_is_shared_kcontrol(dapm, w, &w->kcontrol_news[0],
472 &kcontrol); 475 &kcontrol);
473 if (kcontrol) { 476 if (kcontrol) {
474 wlist = kcontrol->private_data; 477 wlist = kcontrol->private_data;
@@ -532,8 +535,7 @@ static int dapm_new_mux(struct snd_soc_dapm_context *dapm,
532} 535}
533 536
534/* create new dapm volume control */ 537/* create new dapm volume control */
535static int dapm_new_pga(struct snd_soc_dapm_context *dapm, 538static int dapm_new_pga(struct snd_soc_dapm_widget *w)
536 struct snd_soc_dapm_widget *w)
537{ 539{
538 if (w->num_kcontrols) 540 if (w->num_kcontrols)
539 dev_err(w->dapm->dev, 541 dev_err(w->dapm->dev,
@@ -1823,13 +1825,13 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1823 case snd_soc_dapm_mixer: 1825 case snd_soc_dapm_mixer:
1824 case snd_soc_dapm_mixer_named_ctl: 1826 case snd_soc_dapm_mixer_named_ctl:
1825 w->power_check = dapm_generic_check_power; 1827 w->power_check = dapm_generic_check_power;
1826 dapm_new_mixer(dapm, w); 1828 dapm_new_mixer(w);
1827 break; 1829 break;
1828 case snd_soc_dapm_mux: 1830 case snd_soc_dapm_mux:
1829 case snd_soc_dapm_virt_mux: 1831 case snd_soc_dapm_virt_mux:
1830 case snd_soc_dapm_value_mux: 1832 case snd_soc_dapm_value_mux:
1831 w->power_check = dapm_generic_check_power; 1833 w->power_check = dapm_generic_check_power;
1832 dapm_new_mux(dapm, w); 1834 dapm_new_mux(w);
1833 break; 1835 break;
1834 case snd_soc_dapm_adc: 1836 case snd_soc_dapm_adc:
1835 case snd_soc_dapm_aif_out: 1837 case snd_soc_dapm_aif_out:
@@ -1842,7 +1844,7 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1842 case snd_soc_dapm_pga: 1844 case snd_soc_dapm_pga:
1843 case snd_soc_dapm_out_drv: 1845 case snd_soc_dapm_out_drv:
1844 w->power_check = dapm_generic_check_power; 1846 w->power_check = dapm_generic_check_power;
1845 dapm_new_pga(dapm, w); 1847 dapm_new_pga(w);
1846 break; 1848 break;
1847 case snd_soc_dapm_input: 1849 case snd_soc_dapm_input:
1848 case snd_soc_dapm_output: 1850 case snd_soc_dapm_output:
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index d47beffedb0f..1e3ae3327dd3 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -227,6 +227,7 @@ static int usb6fire_fw_ezusb_upload(
227 ret = usb6fire_fw_ihex_init(fw, rec); 227 ret = usb6fire_fw_ihex_init(fw, rec);
228 if (ret < 0) { 228 if (ret < 0) {
229 kfree(rec); 229 kfree(rec);
230 release_firmware(fw);
230 snd_printk(KERN_ERR PREFIX "error validating ezusb " 231 snd_printk(KERN_ERR PREFIX "error validating ezusb "
231 "firmware %s.\n", fwname); 232 "firmware %s.\n", fwname);
232 return ret; 233 return ret;
@@ -269,7 +270,6 @@ static int usb6fire_fw_ezusb_upload(
269 data = 0x00; /* resume ezusb cpu */ 270 data = 0x00; /* resume ezusb cpu */
270 ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); 271 ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1);
271 if (ret < 0) { 272 if (ret < 0) {
272 release_firmware(fw);
273 snd_printk(KERN_ERR PREFIX "unable to upload ezusb " 273 snd_printk(KERN_ERR PREFIX "unable to upload ezusb "
274 "firmware %s: end message.\n", fwname); 274 "firmware %s: end message.\n", fwname);
275 return ret; 275 return ret;
diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c
index b137b25865cc..d144cdb2f159 100644
--- a/sound/usb/6fire/pcm.c
+++ b/sound/usb/6fire/pcm.c
@@ -395,12 +395,12 @@ static int usb6fire_pcm_open(struct snd_pcm_substream *alsa_sub)
395 alsa_rt->hw = pcm_hw; 395 alsa_rt->hw = pcm_hw;
396 396
397 if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) { 397 if (alsa_sub->stream == SNDRV_PCM_STREAM_PLAYBACK) {
398 if (rt->rate >= 0) 398 if (rt->rate < ARRAY_SIZE(rates))
399 alsa_rt->hw.rates = rates_alsaid[rt->rate]; 399 alsa_rt->hw.rates = rates_alsaid[rt->rate];
400 alsa_rt->hw.channels_max = OUT_N_CHANNELS; 400 alsa_rt->hw.channels_max = OUT_N_CHANNELS;
401 sub = &rt->playback; 401 sub = &rt->playback;
402 } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) { 402 } else if (alsa_sub->stream == SNDRV_PCM_STREAM_CAPTURE) {
403 if (rt->rate >= 0) 403 if (rt->rate < ARRAY_SIZE(rates))
404 alsa_rt->hw.rates = rates_alsaid[rt->rate]; 404 alsa_rt->hw.rates = rates_alsaid[rt->rate];
405 alsa_rt->hw.channels_max = IN_N_CHANNELS; 405 alsa_rt->hw.channels_max = IN_N_CHANNELS;
406 sub = &rt->capture; 406 sub = &rt->capture;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 2e969cbb393b..090e1930dfdc 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -403,7 +403,7 @@ static int snd_usb_cm106_boot_quirk(struct usb_device *dev)
403static int snd_usb_cm6206_boot_quirk(struct usb_device *dev) 403static int snd_usb_cm6206_boot_quirk(struct usb_device *dev)
404{ 404{
405 int err, reg; 405 int err, reg;
406 int val[] = {0x200c, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000}; 406 int val[] = {0x2004, 0x3000, 0xf800, 0x143f, 0x0000, 0x3000};
407 407
408 for (reg = 0; reg < ARRAY_SIZE(val); reg++) { 408 for (reg = 0; reg < ARRAY_SIZE(val); reg++) {
409 err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]); 409 err = snd_usb_cm106_write_int_reg(dev, reg, val[reg]);
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 032ba6398a5c..940257b5774e 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -633,7 +633,7 @@ prefix_SQ = $(subst ','\'',$(prefix))
633 633
634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH)) 634SHELL_PATH_SQ = $(subst ','\'',$(SHELL_PATH))
635 635
636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive $(EXTLIBS) 636LIBS = -Wl,--whole-archive $(PERFLIBS) -Wl,--no-whole-archive -Wl,--start-group $(EXTLIBS) -Wl,--end-group
637 637
638ALL_CFLAGS += $(BASIC_CFLAGS) 638ALL_CFLAGS += $(BASIC_CFLAGS)
639ALL_CFLAGS += $(ARCH_CFLAGS) 639ALL_CFLAGS += $(ARCH_CFLAGS)
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index b67186228c89..2da9162262b0 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -474,7 +474,7 @@ static int test__basic_mmap(void)
474 unsigned int nr_events[nsyscalls], 474 unsigned int nr_events[nsyscalls],
475 expected_nr_events[nsyscalls], i, j; 475 expected_nr_events[nsyscalls], i, j;
476 struct perf_evsel *evsels[nsyscalls], *evsel; 476 struct perf_evsel *evsels[nsyscalls], *evsel;
477 int sample_size = perf_sample_size(attr.sample_type); 477 int sample_size = __perf_evsel__sample_size(attr.sample_type);
478 478
479 for (i = 0; i < nsyscalls; ++i) { 479 for (i = 0; i < nsyscalls; ++i) {
480 char name[64]; 480 char name[64];
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index 26d4d3fd6deb..ad73300f7bac 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -23,12 +23,7 @@ if test -d ../../.git -o -f ../../.git &&
23then 23then
24 VN=$(echo "$VN" | sed -e 's/-/./g'); 24 VN=$(echo "$VN" | sed -e 's/-/./g');
25else 25else
26 eval $(grep '^VERSION[[:space:]]*=' ../../Makefile|tr -d ' ') 26 VN=$(MAKEFLAGS= make -sC ../.. kernelversion)
27 eval $(grep '^PATCHLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
28 eval $(grep '^SUBLEVEL[[:space:]]*=' ../../Makefile|tr -d ' ')
29 eval $(grep '^EXTRAVERSION[[:space:]]*=' ../../Makefile|tr -d ' ')
30
31 VN="${VERSION}.${PATCHLEVEL}.${SUBLEVEL}${EXTRAVERSION}"
32fi 27fi
33 28
34VN=$(expr "$VN" : v*'\(.*\)') 29VN=$(expr "$VN" : v*'\(.*\)')
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 0fe9adf76379..3c1b8a632101 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -35,22 +35,6 @@ const char *perf_event__name(unsigned int id)
35 return perf_event__names[id]; 35 return perf_event__names[id];
36} 36}
37 37
38int perf_sample_size(u64 sample_type)
39{
40 u64 mask = sample_type & PERF_SAMPLE_MASK;
41 int size = 0;
42 int i;
43
44 for (i = 0; i < 64; i++) {
45 if (mask & (1ULL << i))
46 size++;
47 }
48
49 size *= sizeof(u64);
50
51 return size;
52}
53
54static struct perf_sample synth_sample = { 38static struct perf_sample synth_sample = {
55 .pid = -1, 39 .pid = -1,
56 .tid = -1, 40 .tid = -1,
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index c08332871408..1d7f66488a88 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -82,8 +82,6 @@ struct perf_sample {
82 struct ip_callchain *callchain; 82 struct ip_callchain *callchain;
83}; 83};
84 84
85int perf_sample_size(u64 sample_type);
86
87#define BUILD_ID_SIZE 20 85#define BUILD_ID_SIZE 20
88 86
89struct build_id_event { 87struct build_id_event {
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 50aa34879c33..b021ea9265c3 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -12,7 +12,6 @@
12#include "evlist.h" 12#include "evlist.h"
13#include "evsel.h" 13#include "evsel.h"
14#include "util.h" 14#include "util.h"
15#include "debug.h"
16 15
17#include <sys/mman.h> 16#include <sys/mman.h>
18 17
@@ -257,19 +256,15 @@ int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
257 return evlist->mmap != NULL ? 0 : -ENOMEM; 256 return evlist->mmap != NULL ? 0 : -ENOMEM;
258} 257}
259 258
260static int __perf_evlist__mmap(struct perf_evlist *evlist, struct perf_evsel *evsel, 259static int __perf_evlist__mmap(struct perf_evlist *evlist,
261 int idx, int prot, int mask, int fd) 260 int idx, int prot, int mask, int fd)
262{ 261{
263 evlist->mmap[idx].prev = 0; 262 evlist->mmap[idx].prev = 0;
264 evlist->mmap[idx].mask = mask; 263 evlist->mmap[idx].mask = mask;
265 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 264 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
266 MAP_SHARED, fd, 0); 265 MAP_SHARED, fd, 0);
267 if (evlist->mmap[idx].base == MAP_FAILED) { 266 if (evlist->mmap[idx].base == MAP_FAILED)
268 if (evlist->cpus->map[idx] == -1 && evsel->attr.inherit)
269 ui__warning("Inherit is not allowed on per-task "
270 "events using mmap.\n");
271 return -1; 267 return -1;
272 }
273 268
274 perf_evlist__add_pollfd(evlist, fd); 269 perf_evlist__add_pollfd(evlist, fd);
275 return 0; 270 return 0;
@@ -289,7 +284,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
289 284
290 if (output == -1) { 285 if (output == -1) {
291 output = fd; 286 output = fd;
292 if (__perf_evlist__mmap(evlist, evsel, cpu, 287 if (__perf_evlist__mmap(evlist, cpu,
293 prot, mask, output) < 0) 288 prot, mask, output) < 0)
294 goto out_unmap; 289 goto out_unmap;
295 } else { 290 } else {
@@ -329,7 +324,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
329 324
330 if (output == -1) { 325 if (output == -1) {
331 output = fd; 326 output = fd;
332 if (__perf_evlist__mmap(evlist, evsel, thread, 327 if (__perf_evlist__mmap(evlist, thread,
333 prot, mask, output) < 0) 328 prot, mask, output) < 0)
334 goto out_unmap; 329 goto out_unmap;
335 } else { 330 } else {
@@ -460,33 +455,46 @@ int perf_evlist__set_filters(struct perf_evlist *evlist)
460 return 0; 455 return 0;
461} 456}
462 457
463u64 perf_evlist__sample_type(struct perf_evlist *evlist) 458bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
464{ 459{
465 struct perf_evsel *pos; 460 struct perf_evsel *pos, *first;
466 u64 type = 0; 461
467 462 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
468 list_for_each_entry(pos, &evlist->entries, node) { 463
469 if (!type) 464 list_for_each_entry_continue(pos, &evlist->entries, node) {
470 type = pos->attr.sample_type; 465 if (first->attr.sample_type != pos->attr.sample_type)
471 else if (type != pos->attr.sample_type) 466 return false;
472 die("non matching sample_type");
473 } 467 }
474 468
475 return type; 469 return true;
476} 470}
477 471
478bool perf_evlist__sample_id_all(const struct perf_evlist *evlist) 472u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
473{
474 struct perf_evsel *first;
475
476 first = list_entry(evlist->entries.next, struct perf_evsel, node);
477 return first->attr.sample_type;
478}
479
480bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
479{ 481{
480 bool value = false, first = true; 482 struct perf_evsel *pos, *first;
481 struct perf_evsel *pos; 483
482 484 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
483 list_for_each_entry(pos, &evlist->entries, node) { 485
484 if (first) { 486 list_for_each_entry_continue(pos, &evlist->entries, node) {
485 value = pos->attr.sample_id_all; 487 if (first->attr.sample_id_all != pos->attr.sample_id_all)
486 first = false; 488 return false;
487 } else if (value != pos->attr.sample_id_all)
488 die("non matching sample_id_all");
489 } 489 }
490 490
491 return value; 491 return true;
492}
493
494bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
495{
496 struct perf_evsel *first;
497
498 first = list_entry(evlist->entries.next, struct perf_evsel, node);
499 return first->attr.sample_id_all;
492} 500}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 0a1ef1f051f0..b2b862374f37 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -66,7 +66,9 @@ int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
66void perf_evlist__delete_maps(struct perf_evlist *evlist); 66void perf_evlist__delete_maps(struct perf_evlist *evlist);
67int perf_evlist__set_filters(struct perf_evlist *evlist); 67int perf_evlist__set_filters(struct perf_evlist *evlist);
68 68
69u64 perf_evlist__sample_type(struct perf_evlist *evlist); 69u64 perf_evlist__sample_type(const struct perf_evlist *evlist);
70bool perf_evlist__sample_id_all(const struct perf_evlist *evlist); 70bool perf_evlist__sample_id_all(const const struct perf_evlist *evlist);
71 71
72bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist);
73bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist);
72#endif /* __PERF_EVLIST_H */ 74#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index cca29ededb5b..0239eb87b232 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -15,6 +15,22 @@
15 15
16#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 16#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
17 17
18int __perf_evsel__sample_size(u64 sample_type)
19{
20 u64 mask = sample_type & PERF_SAMPLE_MASK;
21 int size = 0;
22 int i;
23
24 for (i = 0; i < 64; i++) {
25 if (mask & (1ULL << i))
26 size++;
27 }
28
29 size *= sizeof(u64);
30
31 return size;
32}
33
18void perf_evsel__init(struct perf_evsel *evsel, 34void perf_evsel__init(struct perf_evsel *evsel,
19 struct perf_event_attr *attr, int idx) 35 struct perf_event_attr *attr, int idx)
20{ 36{
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index f79bb2c09a6c..7e9366e4490b 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -149,4 +149,11 @@ static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
149 return __perf_evsel__read(evsel, ncpus, nthreads, true); 149 return __perf_evsel__read(evsel, ncpus, nthreads, true);
150} 150}
151 151
152int __perf_evsel__sample_size(u64 sample_type);
153
154static inline int perf_evsel__sample_size(struct perf_evsel *evsel)
155{
156 return __perf_evsel__sample_size(evsel->attr.sample_type);
157}
158
152#endif /* __PERF_EVSEL_H */ 159#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 69436b3200a4..a9ac0504aabd 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -674,7 +674,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
674 struct perf_evlist *evlist = &pevlist->evlist; 674 struct perf_evlist *evlist = &pevlist->evlist;
675 union perf_event *event; 675 union perf_event *event;
676 int sample_id_all = 1, cpu; 676 int sample_id_all = 1, cpu;
677 static char *kwlist[] = {"sample_id_all", NULL, NULL}; 677 static char *kwlist[] = {"cpu", "sample_id_all", NULL, NULL};
678 int err; 678 int err;
679 679
680 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist, 680 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|i", kwlist,
@@ -692,16 +692,14 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
692 692
693 first = list_entry(evlist->entries.next, struct perf_evsel, node); 693 first = list_entry(evlist->entries.next, struct perf_evsel, node);
694 err = perf_event__parse_sample(event, first->attr.sample_type, 694 err = perf_event__parse_sample(event, first->attr.sample_type,
695 perf_sample_size(first->attr.sample_type), 695 perf_evsel__sample_size(first),
696 sample_id_all, &pevent->sample); 696 sample_id_all, &pevent->sample);
697 if (err) { 697 if (err)
698 pr_err("Can't parse sample, err = %d\n", err); 698 return PyErr_Format(PyExc_OSError,
699 goto end; 699 "perf: can't parse sample, err=%d", err);
700 }
701
702 return pyevent; 700 return pyevent;
703 } 701 }
704end: 702
705 Py_INCREF(Py_None); 703 Py_INCREF(Py_None);
706 return Py_None; 704 return Py_None;
707} 705}
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 64500fc78799..f5a8fbdd3f76 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -58,6 +58,16 @@ static int perf_session__open(struct perf_session *self, bool force)
58 goto out_close; 58 goto out_close;
59 } 59 }
60 60
61 if (!perf_evlist__valid_sample_type(self->evlist)) {
62 pr_err("non matching sample_type");
63 goto out_close;
64 }
65
66 if (!perf_evlist__valid_sample_id_all(self->evlist)) {
67 pr_err("non matching sample_id_all");
68 goto out_close;
69 }
70
61 self->size = input_stat.st_size; 71 self->size = input_stat.st_size;
62 return 0; 72 return 0;
63 73
@@ -97,7 +107,7 @@ out:
97void perf_session__update_sample_type(struct perf_session *self) 107void perf_session__update_sample_type(struct perf_session *self)
98{ 108{
99 self->sample_type = perf_evlist__sample_type(self->evlist); 109 self->sample_type = perf_evlist__sample_type(self->evlist);
100 self->sample_size = perf_sample_size(self->sample_type); 110 self->sample_size = __perf_evsel__sample_size(self->sample_type);
101 self->sample_id_all = perf_evlist__sample_id_all(self->evlist); 111 self->sample_id_all = perf_evlist__sample_id_all(self->evlist);
102 perf_session__id_header_size(self); 112 perf_session__id_header_size(self);
103} 113}
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 1e88485c16a0..0a7ed5b5e281 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -2187,6 +2187,7 @@ static const struct flag flags[] = {
2187 { "TASKLET_SOFTIRQ", 6 }, 2187 { "TASKLET_SOFTIRQ", 6 },
2188 { "SCHED_SOFTIRQ", 7 }, 2188 { "SCHED_SOFTIRQ", 7 },
2189 { "HRTIMER_SOFTIRQ", 8 }, 2189 { "HRTIMER_SOFTIRQ", 8 },
2190 { "RCU_SOFTIRQ", 9 },
2190 2191
2191 { "HRTIMER_NORESTART", 0 }, 2192 { "HRTIMER_NORESTART", 0 },
2192 { "HRTIMER_RESTART", 1 }, 2193 { "HRTIMER_RESTART", 1 },
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 22cdb960660a..96ebc0679415 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -467,12 +467,8 @@ static struct kvm *kvm_create_vm(void)
467 if (!kvm->buses[i]) 467 if (!kvm->buses[i])
468 goto out_err; 468 goto out_err;
469 } 469 }
470 spin_lock_init(&kvm->mmu_lock);
471
472 r = kvm_init_mmu_notifier(kvm);
473 if (r)
474 goto out_err;
475 470
471 spin_lock_init(&kvm->mmu_lock);
476 kvm->mm = current->mm; 472 kvm->mm = current->mm;
477 atomic_inc(&kvm->mm->mm_count); 473 atomic_inc(&kvm->mm->mm_count);
478 kvm_eventfd_init(kvm); 474 kvm_eventfd_init(kvm);
@@ -480,6 +476,11 @@ static struct kvm *kvm_create_vm(void)
480 mutex_init(&kvm->irq_lock); 476 mutex_init(&kvm->irq_lock);
481 mutex_init(&kvm->slots_lock); 477 mutex_init(&kvm->slots_lock);
482 atomic_set(&kvm->users_count, 1); 478 atomic_set(&kvm->users_count, 1);
479
480 r = kvm_init_mmu_notifier(kvm);
481 if (r)
482 goto out_err;
483
483 raw_spin_lock(&kvm_lock); 484 raw_spin_lock(&kvm_lock);
484 list_add(&kvm->vm_list, &vm_list); 485 list_add(&kvm->vm_list, &vm_list);
485 raw_spin_unlock(&kvm_lock); 486 raw_spin_unlock(&kvm_lock);
@@ -651,7 +652,9 @@ int __kvm_set_memory_region(struct kvm *kvm,
651 /* We can read the guest memory with __xxx_user() later on. */ 652 /* We can read the guest memory with __xxx_user() later on. */
652 if (user_alloc && 653 if (user_alloc &&
653 ((mem->userspace_addr & (PAGE_SIZE - 1)) || 654 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
654 !access_ok(VERIFY_WRITE, mem->userspace_addr, mem->memory_size))) 655 !access_ok(VERIFY_WRITE,
656 (void __user *)(unsigned long)mem->userspace_addr,
657 mem->memory_size)))
655 goto out; 658 goto out;
656 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS) 659 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
657 goto out; 660 goto out;