aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-03-15 03:29:44 -0400
committerIngo Molnar <mingo@elte.hu>2011-03-15 03:29:44 -0400
commit8460b3e5bc64955aeefdd8357b3bf7b5ff79b3f2 (patch)
tree7e5f6d050b72ab08a4497e82a4a103fefb086e80
parent56396e6823fe9b42fe9cf9403d6ed67756255f70 (diff)
parent521cb40b0c44418a4fd36dc633f575813d59a43d (diff)
Merge commit 'v2.6.38' into x86/mm
Conflicts: arch/x86/mm/numa_64.c Merge reason: Resolve the conflict, update the branch to .38. Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--.gitignore1
-rw-r--r--Documentation/DocBook/drm.tmpl6
-rw-r--r--Documentation/DocBook/filesystems.tmpl5
-rw-r--r--Documentation/arm/Booting33
-rw-r--r--Documentation/devicetree/booting-without-of.txt40
-rw-r--r--Documentation/hwmon/jc4221
-rw-r--r--Documentation/hwmon/k10temp8
-rw-r--r--Documentation/kernel-parameters.txt24
-rw-r--r--Documentation/networking/00-INDEX6
-rw-r--r--Documentation/networking/Makefile2
-rw-r--r--Documentation/networking/dns_resolver.txt9
-rw-r--r--Documentation/workqueue.txt4
-rw-r--r--MAINTAINERS35
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig1
-rw-r--r--arch/alpha/kernel/irq.c13
-rw-r--r--arch/alpha/kernel/irq_alpha.c11
-rw-r--r--arch/alpha/kernel/irq_i8259.c18
-rw-r--r--arch/alpha/kernel/irq_impl.h8
-rw-r--r--arch/alpha/kernel/irq_pyxis.c20
-rw-r--r--arch/alpha/kernel/irq_srm.c16
-rw-r--r--arch/alpha/kernel/sys_alcor.c28
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c16
-rw-r--r--arch/alpha/kernel/sys_dp264.c52
-rw-r--r--arch/alpha/kernel/sys_eb64p.c18
-rw-r--r--arch/alpha/kernel/sys_eiger.c14
-rw-r--r--arch/alpha/kernel/sys_jensen.c24
-rw-r--r--arch/alpha/kernel/sys_marvel.c42
-rw-r--r--arch/alpha/kernel/sys_mikasa.c16
-rw-r--r--arch/alpha/kernel/sys_noritake.c16
-rw-r--r--arch/alpha/kernel/sys_rawhide.c17
-rw-r--r--arch/alpha/kernel/sys_rx164.c16
-rw-r--r--arch/alpha/kernel/sys_sable.c20
-rw-r--r--arch/alpha/kernel/sys_takara.c14
-rw-r--r--arch/alpha/kernel/sys_titan.c22
-rw-r--r--arch/alpha/kernel/sys_wildfire.c32
-rw-r--r--arch/arm/Kconfig27
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/.gitignore6
-rw-r--r--arch/arm/common/Kconfig2
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h1
-rw-r--r--arch/arm/include/asm/hardware/sp810.h3
-rw-r--r--arch/arm/include/asm/mach/arch.h4
-rw-r--r--arch/arm/include/asm/pgalloc.h2
-rw-r--r--arch/arm/include/asm/tlb.h105
-rw-r--r--arch/arm/include/asm/tlbflush.h7
-rw-r--r--arch/arm/kernel/head.S38
-rw-r--r--arch/arm/kernel/hw_breakpoint.c68
-rw-r--r--arch/arm/kernel/kprobes-decode.c2
-rw-r--r--arch/arm/kernel/module.c22
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/pmu.c22
-rw-r--r--arch/arm/kernel/ptrace.c6
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/signal.c4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm/mach-davinci/cpufreq.c2
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c7
-rw-r--r--arch/arm/mach-davinci/gpio-tnetv107x.c18
-rw-r--r--arch/arm/mach-davinci/include/mach/clkdev.h2
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/mailbox.c12
-rw-r--r--arch/arm/mach-omap2/mux.c2
-rw-r--r--arch/arm/mach-omap2/pm-debug.c8
-rw-r--r--arch/arm/mach-omap2/prcm_mpu44xx.h4
-rw-r--r--arch/arm/mach-omap2/smartreflex.c37
-rw-r--r--arch/arm/mach-omap2/timer-gp.c13
-rw-r--r--arch/arm/mach-pxa/colibri-evalboard.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/colibri.h2
-rw-r--r--arch/arm/mach-pxa/palm27x.c2
-rw-r--r--arch/arm/mach-pxa/pm.c4
-rw-r--r--arch/arm/mach-pxa/pxa25x.c1
-rw-r--r--arch/arm/mach-pxa/tosa-bt.c2
-rw-r--r--arch/arm/mach-pxa/tosa.c6
-rw-r--r--arch/arm/mach-s3c2440/Kconfig1
-rw-r--r--arch/arm/mach-s3c2440/include/mach/gta02.h26
-rw-r--r--arch/arm/mach-s3c64xx/clock.c6
-rw-r--r--arch/arm/mach-s3c64xx/dma.c11
-rw-r--r--arch/arm/mach-s3c64xx/gpiolib.c4
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c13
-rw-r--r--arch/arm/mach-s3c64xx/setup-keypad.c2
-rw-r--r--arch/arm/mach-s3c64xx/setup-sdhci.c2
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h69
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/gpio.h4
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/map.h83
-rw-r--r--arch/arm/mach-s5pc100/include/mach/map.h193
-rw-r--r--arch/arm/mach-s5pv210/include/mach/map.h168
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c15
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c15
-rw-r--r--arch/arm/mach-s5pv310/include/mach/map.h149
-rw-r--r--arch/arm/mach-sa1100/collie.c3
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c2
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c2
-rw-r--r--arch/arm/mach-shmobile/clock-sh73a0.c17
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-ap4evb.txt10
-rw-r--r--arch/arm/mach-shmobile/include/mach/head-mackerel.txt10
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear320.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/kbc.h1
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/cache-l2x0.c6
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/oprofile/common.c14
-rw-r--r--arch/arm/plat-omap/mailbox.c11
-rw-r--r--arch/arm/plat-pxa/mfp.c8
-rw-r--r--arch/arm/plat-s5p/dev-uart.c12
-rw-r--r--arch/arm/plat-samsung/dev-ts.c1
-rw-r--r--arch/arm/plat-samsung/dev-uart.c2
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-spear/include/plat/vmalloc.h2
-rw-r--r--arch/blackfin/lib/outs.S16
-rw-r--r--arch/blackfin/mach-common/cache.S2
-rw-r--r--arch/cris/kernel/vmlinux.lds.S5
-rw-r--r--arch/m68k/include/asm/string.h4
-rw-r--r--arch/m68k/lib/string.c11
-rw-r--r--arch/m68knommu/kernel/vmlinux.lds.S6
-rw-r--r--arch/m68knommu/lib/Makefile2
-rw-r--r--arch/m68knommu/lib/memmove.c105
-rw-r--r--arch/m68knommu/platform/5249/intc2.c4
-rw-r--r--arch/m68knommu/platform/68328/entry.S1
-rw-r--r--arch/m68knommu/platform/68360/commproc.c2
-rw-r--r--arch/m68knommu/platform/68360/config.c2
-rw-r--r--arch/m68knommu/platform/68360/entry.S1
-rw-r--r--arch/m68knommu/platform/68360/ints.c4
-rw-r--r--arch/m68knommu/platform/coldfire/entry.S1
-rw-r--r--arch/mips/Kconfig4
-rw-r--r--arch/mips/alchemy/mtx-1/board_setup.c4
-rw-r--r--arch/mips/alchemy/mtx-1/platform.c9
-rw-r--r--arch/mips/alchemy/xxs1500/board_setup.c4
-rw-r--r--arch/mips/include/asm/perf_event.h12
-rw-r--r--arch/mips/kernel/ftrace.c179
-rw-r--r--arch/mips/kernel/perf_event.c345
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c4
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp.c31
-rw-r--r--arch/mips/kernel/syscall.c5
-rw-r--r--arch/mips/kernel/vpe.c4
-rw-r--r--arch/mips/loongson/Kconfig5
-rw-r--r--arch/mips/loongson/common/cmdline.c5
-rw-r--r--arch/mips/loongson/common/machtype.c3
-rw-r--r--arch/mips/math-emu/ieee754int.h4
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/tlbex.c2
-rw-r--r--arch/mips/pci/ops-pmcmsp.c4
-rw-r--r--arch/mips/pmc-sierra/Kconfig4
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_time.c2
-rw-r--r--arch/mn10300/include/asm/atomic.h2
-rw-r--r--arch/mn10300/include/asm/uaccess.h5
-rw-r--r--arch/mn10300/mm/cache-inv-icache.c4
-rw-r--r--arch/powerpc/include/asm/lppaca.h16
-rw-r--r--arch/powerpc/include/asm/machdep.h6
-rw-r--r--arch/powerpc/kernel/machine_kexec.c5
-rw-r--r--arch/powerpc/kernel/paca.c14
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/mm/numa.c3
-rw-r--r--arch/powerpc/mm/tlb_hash64.c6
-rw-r--r--arch/powerpc/platforms/iseries/dt.c6
-rw-r--r--arch/powerpc/platforms/iseries/setup.c1
-rw-r--r--arch/s390/boot/compressed/misc.c5
-rw-r--r--arch/s390/crypto/sha_common.c1
-rw-r--r--arch/s390/include/asm/atomic.h26
-rw-r--r--arch/s390/include/asm/cache.h1
-rw-r--r--arch/s390/include/asm/processor.h5
-rw-r--r--arch/s390/kernel/traps.c37
-rw-r--r--arch/sh/include/asm/sections.h2
-rw-r--r--arch/sh/kernel/cpu/sh4/setup-sh7750.c13
-rw-r--r--arch/sh/lib/delay.c10
-rw-r--r--arch/sh/mm/cache.c3
-rw-r--r--arch/sparc/include/asm/pcr.h2
-rw-r--r--arch/sparc/kernel/iommu.c5
-rw-r--r--arch/sparc/kernel/pcr.c2
-rw-r--r--arch/sparc/kernel/smp_64.c2
-rw-r--r--arch/sparc/kernel/una_asm_32.S4
-rw-r--r--arch/sparc/lib/bitext.c5
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c7
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/apic.h1
-rw-r--r--arch/x86/include/asm/ce4100.h6
-rw-r--r--arch/x86/include/asm/cpu.h2
-rw-r--r--arch/x86/include/asm/msr-index.h5
-rw-r--r--arch/x86/include/asm/perf_event_p4.h1
-rw-r--r--arch/x86/include/asm/smpboot_hooks.h2
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c14
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/apic/apic.c9
-rw-r--r--arch/x86/kernel/apic/io_apic.c3
-rw-r--r--arch/x86/kernel/check.c8
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event_p4.c11
-rw-r--r--arch/x86/kernel/early-quirks.c16
-rw-r--r--arch/x86/kernel/irq.c3
-rw-r--r--arch/x86/kernel/process.c24
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/smpboot.c4
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/mm/fault.c14
-rw-r--r--arch/x86/mm/init_64.c6
-rw-r--r--arch/x86/mm/pageattr.c18
-rw-r--r--arch/x86/mm/pgtable.c11
-rw-r--r--arch/x86/pci/ce4100.c7
-rw-r--r--arch/x86/platform/ce4100/ce4100.c2
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c3
-rw-r--r--arch/x86/platform/uv/tlb_uv.c4
-rw-r--r--arch/x86/xen/mmu.c10
-rw-r--r--block/blk-core.c18
-rw-r--r--block/blk-flush.c8
-rw-r--r--block/blk-lib.c21
-rw-r--r--block/blk-throttle.c29
-rw-r--r--block/cfq-iosched.c6
-rw-r--r--block/elevator.c4
-rw-r--r--block/genhd.c2
-rw-r--r--block/ioctl.c8
-rw-r--r--drivers/acpi/acpica/aclocal.h7
-rw-r--r--drivers/acpi/acpica/evgpe.c17
-rw-r--r--drivers/acpi/acpica/evxfgpe.c67
-rw-r--r--drivers/acpi/debugfs.c20
-rw-r--r--drivers/acpi/osl.c25
-rw-r--r--drivers/acpi/video_detect.c5
-rw-r--r--drivers/acpi/wakeup.c6
-rw-r--r--drivers/atm/solos-pci.c5
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/loop.c5
-rw-r--r--drivers/bluetooth/ath3k.c5
-rw-r--r--drivers/bluetooth/btusb.c12
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c8
-rw-r--r--drivers/char/pcmcia/cm4000_cs.c3
-rw-r--r--drivers/char/pcmcia/ipwireless/main.c52
-rw-r--r--drivers/char/tpm/tpm.c28
-rw-r--r--drivers/char/tpm/tpm.h2
-rw-r--r--drivers/char/tpm/tpm_tis.c4
-rw-r--r--drivers/char/virtio_console.c8
-rw-r--r--drivers/cpufreq/cpufreq.c27
-rw-r--r--drivers/dma/amba-pl08x.c53
-rw-r--r--drivers/dma/imx-dma.c26
-rw-r--r--drivers/dma/imx-sdma.c88
-rw-r--r--drivers/dma/ipu/ipu_idmac.c50
-rw-r--r--drivers/firmware/dmi_scan.c11
-rw-r--r--drivers/gpio/ml_ioh_gpio.c1
-rw-r--r--drivers/gpio/pch_gpio.c1
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c4
-rw-r--r--drivers/gpu/drm/drm_info.c9
-rw-r--r--drivers/gpu/drm/drm_irq.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c22
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h25
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c196
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c36
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c39
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c12
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c9
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h13
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c66
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c43
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_pm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c12
-rw-r--r--drivers/gpu/drm/nouveau/nv40_graph.c46
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c100
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c25
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/evergreen_blit_shaders.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h1
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c5
-rw-r--r--drivers/gpu/drm/radeon/r100.c87
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h13
-rw-r--r--drivers/gpu/drm/radeon/r200.c18
-rw-r--r--drivers/gpu/drm/radeon/r300.c44
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r600.c25
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c33
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c31
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c46
-rw-r--r--drivers/gpu/drm/radeon/r600d.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c48
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c47
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c24
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c18
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r3006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r4207
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rs6006
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/rv5157
-rw-r--r--drivers/gpu/drm/radeon/rs600.c1
-rw-r--r--drivers/gpu/drm/radeon/rs690.c13
-rw-r--r--drivers/gpu/drm/radeon/rv770.c9
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h8
-rw-r--r--drivers/hwmon/Kconfig19
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/hwmon/f71882fg.c4
-rw-r--r--drivers/hwmon/jc42.c35
-rw-r--r--drivers/hwmon/k10temp.c5
-rw-r--r--drivers/hwmon/lm85.c23
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c1
-rw-r--r--drivers/i2c/busses/i2c-ocores.c2
-rw-r--r--drivers/i2c/busses/i2c-omap.c39
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/idle/intel_idle.c24
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c32
-rw-r--r--drivers/infiniband/hw/qib/qib_rc.c5
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/input/input.c37
-rw-r--r--drivers/input/keyboard/tegra-kbc.c62
-rw-r--r--drivers/input/misc/rotary_encoder.c4
-rw-r--r--drivers/input/mouse/synaptics.h23
-rw-r--r--drivers/input/serio/serio.c13
-rw-r--r--drivers/input/tablet/wacom_sys.c2
-rw-r--r--drivers/input/touchscreen/ads7846.c38
-rw-r--r--drivers/input/touchscreen/wacom_w8001.c13
-rw-r--r--drivers/isdn/hardware/eicon/istream.c2
-rw-r--r--drivers/isdn/hisax/isdnl2.c28
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/media/common/tuners/tda8290.c14
-rw-r--r--drivers/media/dvb/dvb-usb/dib0700_devices.c21
-rw-r--r--drivers/media/dvb/dvb-usb/lmedm04.c6
-rw-r--r--drivers/media/dvb/frontends/dib7000m.c19
-rw-r--r--drivers/media/dvb/frontends/dib7000m.h15
-rw-r--r--drivers/media/dvb/mantis/mantis_pci.c1
-rw-r--r--drivers/media/rc/ir-raw.c3
-rw-r--r--drivers/media/rc/mceusb.c27
-rw-r--r--drivers/media/rc/nuvoton-cir.c5
-rw-r--r--drivers/media/rc/nuvoton-cir.h7
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/video/au0828/au0828-video.c28
-rw-r--r--drivers/media/video/cx18/cx18-cards.c50
-rw-r--r--drivers/media/video/cx18/cx18-driver.c25
-rw-r--r--drivers/media/video/cx18/cx18-driver.h3
-rw-r--r--drivers/media/video/cx18/cx18-dvb.c38
-rw-r--r--drivers/media/video/cx23885/cx23885-i2c.c10
-rw-r--r--drivers/media/video/cx25840/cx25840-core.c3
-rw-r--r--drivers/media/video/ivtv/ivtv-irq.c58
-rw-r--r--drivers/media/video/mem2mem_testdev.c1
-rw-r--r--drivers/media/video/s2255drv.c10
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/message/fusion/mptbase.h4
-rw-r--r--drivers/message/fusion/mptctl.c8
-rw-r--r--drivers/message/fusion/mptscsih.c7
-rw-r--r--drivers/mfd/asic3.c4
-rw-r--r--drivers/mfd/davinci_voicecodec.c4
-rw-r--r--drivers/mfd/tps6586x.c10
-rw-r--r--drivers/mfd/ucb1x00-ts.c12
-rw-r--r--drivers/mfd/wm8994-core.c18
-rw-r--r--drivers/misc/bmp085.c1
-rw-r--r--drivers/misc/tifm_core.c2
-rw-r--r--drivers/misc/vmw_balloon.c2
-rw-r--r--drivers/mmc/core/core.c2
-rw-r--r--drivers/mmc/core/sdio.c3
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c43
-rw-r--r--drivers/mtd/chips/jedec_probe.c35
-rw-r--r--drivers/mtd/maps/amd76xrom.c1
-rw-r--r--drivers/mtd/mtd_blkdevs.c1
-rw-r--r--drivers/mtd/nand/omap2.c2
-rw-r--r--drivers/mtd/nand/r852.c2
-rw-r--r--drivers/mtd/onenand/generic.c2
-rw-r--r--drivers/mtd/onenand/omap2.c2
-rw-r--r--drivers/mtd/sm_ftl.c2
-rw-r--r--drivers/net/ariadne.c5
-rw-r--r--drivers/net/bnx2x/bnx2x.h31
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c87
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h29
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c39
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h2
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c37
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c4
-rw-r--r--drivers/net/bonding/bond_3ad.c32
-rw-r--r--drivers/net/bonding/bond_3ad.h3
-rw-r--r--drivers/net/can/mcp251x.c2
-rw-r--r--drivers/net/can/mscan/Kconfig2
-rw-r--r--drivers/net/can/softing/Kconfig2
-rw-r--r--drivers/net/can/softing/softing_main.c1
-rw-r--r--drivers/net/cnic.c33
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c80
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c2
-rw-r--r--drivers/net/davinci_emac.c2
-rw-r--r--drivers/net/dm9000.c9
-rw-r--r--drivers/net/dnet.c3
-rw-r--r--drivers/net/e1000/e1000_osdep.h3
-rw-r--r--drivers/net/e1000e/netdev.c55
-rw-r--r--drivers/net/fec.c3
-rw-r--r--drivers/net/forcedeth.c2
-rw-r--r--drivers/net/igbvf/vf.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c51
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/macb.c2
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/pch_gbe/pch_gbe.h2
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c104
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c1
-rw-r--r--drivers/net/r6040.c115
-rw-r--r--drivers/net/r8169.c50
-rw-r--r--drivers/net/sfc/ethtool.c22
-rw-r--r--drivers/net/skge.c3
-rw-r--r--drivers/net/smsc911x.c5
-rw-r--r--drivers/net/stmmac/stmmac_main.c4
-rw-r--r--drivers/net/tg3.c8
-rw-r--r--drivers/net/usb/dm9601.c4
-rw-r--r--drivers/net/usb/hso.c12
-rw-r--r--drivers/net/usb/usbnet.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c143
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c9
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c8
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c67
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c2
-rw-r--r--drivers/net/wireless/p54/p54pci.c14
-rw-r--r--drivers/net/wireless/p54/p54usb.c1
-rw-r--r--drivers/net/wireless/rndis_wlan.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c8
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c6
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/of/pdt.c112
-rw-r--r--drivers/pci/pci-sysfs.c3
-rw-r--r--drivers/pcmcia/pcmcia_resource.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.h1
-rw-r--r--drivers/pcmcia/pxa2xx_colibri.c3
-rw-r--r--drivers/pcmcia/pxa2xx_lubbock.c1
-rw-r--r--drivers/platform/x86/Kconfig2
-rw-r--r--drivers/platform/x86/acer-wmi.c4
-rw-r--r--drivers/platform/x86/asus_acpi.c8
-rw-r--r--drivers/platform/x86/dell-laptop.c24
-rw-r--r--drivers/platform/x86/intel_pmic_gpio.c116
-rw-r--r--drivers/platform/x86/tc1100-wmi.c2
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c8
-rw-r--r--drivers/pps/generators/Kconfig2
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/Kconfig12
-rw-r--r--drivers/rtc/interface.c23
-rw-r--r--drivers/rtc/rtc-at32ap700x.c19
-rw-r--r--drivers/rtc/rtc-at91rm9200.c20
-rw-r--r--drivers/rtc/rtc-at91sam9.c20
-rw-r--r--drivers/rtc/rtc-bfin.c21
-rw-r--r--drivers/rtc/rtc-dev.c125
-rw-r--r--drivers/rtc/rtc-ds1286.c41
-rw-r--r--drivers/rtc/rtc-ds1305.c43
-rw-r--r--drivers/rtc/rtc-ds1307.c49
-rw-r--r--drivers/rtc/rtc-ds1374.c37
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/rtc/rtc-m41t80.c30
-rw-r--r--drivers/rtc/rtc-m48t59.c21
-rw-r--r--drivers/rtc/rtc-mrst.c31
-rw-r--r--drivers/rtc/rtc-msm6242.c2
-rw-r--r--drivers/rtc/rtc-mv.c20
-rw-r--r--drivers/rtc/rtc-omap.c28
-rw-r--r--drivers/rtc/rtc-rp5c01.c2
-rw-r--r--drivers/rtc/rtc-rs5c372.c48
-rw-r--r--drivers/rtc/rtc-s3c.c12
-rw-r--r--drivers/rtc/rtc-sa1100.c22
-rw-r--r--drivers/rtc/rtc-sh.c11
-rw-r--r--drivers/rtc/rtc-test.c21
-rw-r--r--drivers/rtc/rtc-vr41xx.c38
-rw-r--r--drivers/s390/block/dasd_eckd.c2
-rw-r--r--drivers/s390/block/xpram.c4
-rw-r--r--drivers/s390/char/keyboard.c3
-rw-r--r--drivers/s390/char/tape.h8
-rw-r--r--drivers/s390/char/tape_34xx.c59
-rw-r--r--drivers/s390/char/tape_3590.c83
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c10
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c10
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/spi/pxa2xx_spi_pci.c61
-rw-r--r--drivers/target/Makefile3
-rw-r--r--drivers/target/target_core_configfs.c155
-rw-r--r--drivers/target/target_core_device.c13
-rw-r--r--drivers/target/target_core_fabric_configfs.c92
-rw-r--r--drivers/target/target_core_iblock.c8
-rw-r--r--drivers/target/target_core_mib.c1078
-rw-r--r--drivers/target/target_core_mib.h28
-rw-r--r--drivers/target/target_core_pscsi.c4
-rw-r--r--drivers/target/target_core_tmr.c5
-rw-r--r--drivers/target/target_core_tpg.c29
-rw-r--r--drivers/target/target_core_transport.c56
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/tty/serial/68328serial.c29
-rw-r--r--drivers/tty/serial/max3100.c2
-rw-r--r--drivers/tty/serial/max3107.c2
-rw-r--r--drivers/tty/serial/serial_cs.c1
-rw-r--r--drivers/tty/sysrq.c17
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/core/quirks.c8
-rw-r--r--drivers/usb/gadget/f_phonet.c15
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c1
-rw-r--r--drivers/usb/musb/musb_core.h17
-rw-r--r--drivers/usb/musb/omap2430.c1
-rw-r--r--drivers/usb/serial/sierra.c3
-rw-r--r--drivers/usb/serial/usb_wwan.c15
-rw-r--r--drivers/usb/serial/visor.c12
-rw-r--r--drivers/video/backlight/ltv350qv.c9
-rw-r--r--drivers/watchdog/Kconfig6
-rw-r--r--drivers/watchdog/Makefile2
-rw-r--r--drivers/watchdog/cpwd.c2
-rw-r--r--drivers/watchdog/hpwdt.c4
-rw-r--r--drivers/watchdog/m54xx_wdt.c (renamed from drivers/watchdog/m548x_wdt.c)50
-rw-r--r--drivers/watchdog/sbc_fitpc2_wdt.c7
-rw-r--r--drivers/watchdog/sch311x_wdt.c2
-rw-r--r--drivers/watchdog/w83697ug_wdt.c2
-rw-r--r--drivers/xen/manage.c10
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/aio.c52
-rw-r--r--fs/block_dev.c30
-rw-r--r--fs/btrfs/ctree.h12
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/btrfs/extent-tree.c46
-rw-r--r--fs/btrfs/extent_io.c213
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/extent_map.c4
-rw-r--r--fs/btrfs/file.c115
-rw-r--r--fs/btrfs/inode.c135
-rw-r--r--fs/btrfs/ioctl.c17
-rw-r--r--fs/btrfs/lzo.c21
-rw-r--r--fs/btrfs/relocation.c14
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/volumes.c15
-rw-r--r--fs/ceph/dir.c27
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/snap.c14
-rw-r--r--fs/cifs/cifsfs.h2
-rw-r--r--fs/cifs/netmisc.c8
-rw-r--r--fs/cifs/sess.c8
-rw-r--r--fs/compat.c8
-rw-r--r--fs/dcache.c26
-rw-r--r--fs/ecryptfs/dentry.c22
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h3
-rw-r--r--fs/ecryptfs/file.c1
-rw-r--r--fs/ecryptfs/inode.c138
-rw-r--r--fs/eventfd.c12
-rw-r--r--fs/eventpoll.c95
-rw-r--r--fs/exofs/namei.c8
-rw-r--r--fs/ext2/namei.c9
-rw-r--r--fs/fat/namei_vfat.c4
-rw-r--r--fs/fuse/dir.c9
-rw-r--r--fs/fuse/file.c52
-rw-r--r--fs/fuse/fuse_i.h6
-rw-r--r--fs/gfs2/dentry.c2
-rw-r--r--fs/gfs2/glock.c4
-rw-r--r--fs/gfs2/main.c11
-rw-r--r--fs/hfs/dir.c50
-rw-r--r--fs/inode.c31
-rw-r--r--fs/internal.h2
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/minix/namei.c8
-rw-r--r--fs/namei.c145
-rw-r--r--fs/namespace.c2
-rw-r--r--fs/nfs/inode.c7
-rw-r--r--fs/nfs/nfs4_fs.h10
-rw-r--r--fs/nfs/nfs4filelayoutdev.c4
-rw-r--r--fs/nfs/nfs4proc.c131
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/nfs4xdr.c4
-rw-r--r--fs/nfs/nfsroot.c29
-rw-r--r--fs/nfs/unlink.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfs4state.c187
-rw-r--r--fs/nfsd/nfs4xdr.c12
-rw-r--r--fs/nfsd/state.h5
-rw-r--r--fs/nfsd/vfs.c21
-rw-r--r--fs/nilfs2/btnode.c5
-rw-r--r--fs/nilfs2/btnode.h1
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/namei.c8
-rw-r--r--fs/nilfs2/page.c13
-rw-r--r--fs/nilfs2/page.h1
-rw-r--r--fs/nilfs2/segment.c3
-rw-r--r--fs/nilfs2/super.c2
-rw-r--r--fs/ocfs2/dcache.c2
-rw-r--r--fs/ocfs2/journal.h6
-rw-r--r--fs/ocfs2/refcounttree.c7
-rw-r--r--fs/ocfs2/super.c28
-rw-r--r--fs/open.c8
-rw-r--r--fs/partitions/ldm.c5
-rw-r--r--fs/partitions/mac.c17
-rw-r--r--fs/partitions/osf.c12
-rw-r--r--fs/proc/array.c3
-rw-r--r--fs/proc/base.c30
-rw-r--r--fs/proc/inode.c8
-rw-r--r--fs/proc/proc_devtree.c2
-rw-r--r--fs/proc/proc_sysctl.c7
-rw-r--r--fs/reiserfs/namei.c2
-rw-r--r--fs/reiserfs/xattr.c2
-rw-r--r--fs/sysv/namei.c8
-rw-r--r--fs/udf/namei.c11
-rw-r--r--fs/ufs/namei.c9
-rw-r--r--fs/xfs/linux-2.6/xfs_discard.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c11
-rw-r--r--fs/xfs/xfs_fsops.c3
-rw-r--r--include/asm-generic/pgtable.h2
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/keys/rxrpc-type.h1
-rw-r--r--include/linux/blkdev.h5
-rw-r--r--include/linux/blktrace_api.h1
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/dcbnl.h2
-rw-r--r--include/linux/freezer.h2
-rw-r--r--include/linux/fs.h4
-rw-r--r--include/linux/gfp.h11
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--include/linux/input/matrix_keypad.h4
-rw-r--r--include/linux/list.h12
-rw-r--r--include/linux/mfd/wm8994/core.h1
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/netdevice.h3
-rw-r--r--include/linux/nfs_fs_sb.h10
-rw-r--r--include/linux/oprofile.h13
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_wakeup.h25
-rw-r--r--include/linux/ptrace.h3
-rw-r--r--include/linux/rio_regs.h4
-rw-r--r--include/linux/rtc.h15
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/sunrpc/sched.h1
-rw-r--r--include/linux/sysctl.h14
-rw-r--r--include/linux/thermal.h8
-rw-r--r--include/linux/workqueue.h8
-rw-r--r--include/net/ipv6.h12
-rw-r--r--include/net/netfilter/nf_tproxy_core.h12
-rw-r--r--include/net/sch_generic.h2
-rw-r--r--include/pcmcia/ds.h1
-rw-r--r--include/sound/wm8903.h10
-rw-r--r--include/target/target_core_base.h28
-rw-r--r--include/target/target_core_transport.h4
-rw-r--r--include/trace/events/block.h6
-rw-r--r--kernel/cpuset.c7
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/irqdesc.c11
-rw-r--r--kernel/irq/manage.c2
-rw-r--r--kernel/irq/resend.c2
-rw-r--r--kernel/perf_event.c19
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/power/process.c6
-rw-r--r--kernel/power/snapshot.c7
-rw-r--r--kernel/ptrace.c6
-rw-r--r--kernel/sched.c1
-rw-r--r--kernel/sched_rt.c14
-rw-r--r--kernel/sysctl.c15
-rw-r--r--kernel/time/tick-broadcast.c10
-rw-r--r--kernel/time/tick-common.c6
-rw-r--r--kernel/time/tick-internal.h3
-rw-r--r--kernel/time/timer_list.c4
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/trace/blktrace.c16
-rw-r--r--kernel/watchdog.c10
-rw-r--r--kernel/workqueue.c37
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/list_debug.c39
-rw-r--r--lib/nlattr.c2
-rw-r--r--lib/swiotlb.c6
-rw-r--r--mm/huge_memory.c69
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/migrate.c6
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/rmap.c54
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/vmscan.c32
-rw-r--r--net/Makefile4
-rw-r--r--net/bluetooth/l2cap.c1
-rw-r--r--net/bluetooth/rfcomm/tty.c2
-rw-r--r--net/bridge/Kconfig1
-rw-r--r--net/bridge/br_input.c2
-rw-r--r--net/bridge/br_multicast.c42
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/ceph/messenger.c133
-rw-r--r--net/ceph/pagevec.c18
-rw-r--r--net/core/dev.c21
-rw-r--r--net/core/dev_addr_lists.c2
-rw-r--r--net/core/pktgen.c2
-rw-r--r--net/dcb/dcbnl.c11
-rw-r--r--net/dccp/input.c7
-rw-r--r--net/dns_resolver/dns_key.c20
-rw-r--r--net/ipv4/devinet.c32
-rw-r--r--net/ipv4/inet_timewait_sock.c2
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ipip.c2
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp_input.c5
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/netfilter/ip6t_LOG.c2
-rw-r--r--net/ipv6/route.c22
-rw-r--r--net/ipv6/sit.c2
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/mlme.c6
-rw-r--r--net/mac80211/util.c2
-rw-r--r--net/netfilter/core.c3
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c4
-rw-r--r--net/netfilter/nf_log.c4
-rw-r--r--net/netfilter/nf_tproxy_core.c27
-rw-r--r--net/netfilter/xt_TPROXY.c22
-rw-r--r--net/netfilter/xt_socket.c13
-rw-r--r--net/netlink/af_netlink.c18
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rds/loop.c11
-rw-r--r--net/rxrpc/ar-input.c1
-rw-r--r--net/rxrpc/ar-key.c8
-rw-r--r--net/sched/sch_generic.c1
-rw-r--r--net/sctp/sm_make_chunk.c10
-rw-r--r--net/sunrpc/sched.c75
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c1
-rw-r--r--net/sunrpc/xprtsock.c3
-rw-r--r--net/unix/af_unix.c17
-rw-r--r--net/wireless/wext-compat.c4
-rw-r--r--net/xfrm/xfrm_policy.c7
-rw-r--r--scripts/basic/fixdep.c21
-rw-r--r--scripts/mod/sumversion.c19
-rw-r--r--sound/core/jack.c1
-rw-r--r--sound/pci/au88x0/au88x0_core.c14
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_cirrus.c2
-rw-r--r--sound/pci/hda/patch_conexant.c68
-rw-r--r--sound/pci/hda/patch_hdmi.c5
-rw-r--r--sound/pci/hda/patch_realtek.c9
-rw-r--r--sound/pci/hda/patch_sigmatel.c15
-rw-r--r--sound/pci/hda/patch_via.c2
-rw-r--r--sound/soc/codecs/cx20442.c2
-rw-r--r--sound/soc/codecs/wm8903.c2
-rw-r--r--sound/soc/codecs/wm8903.h2
-rw-r--r--sound/soc/codecs/wm8978.c14
-rw-r--r--sound/soc/codecs/wm8994.c249
-rw-r--r--sound/soc/codecs/wm9081.c5
-rw-r--r--sound/soc/codecs/wm_hubs.c3
-rw-r--r--sound/soc/imx/eukrea-tlv320.c2
-rw-r--r--sound/soc/omap/am3517evm.c2
-rw-r--r--sound/soc/pxa/e740_wm9705.c4
-rw-r--r--sound/soc/pxa/e750_wm9705.c4
-rw-r--r--sound/soc/pxa/e800_wm9712.c4
-rw-r--r--sound/soc/pxa/em-x270.c4
-rw-r--r--sound/soc/pxa/mioa701_wm9713.c4
-rw-r--r--sound/soc/pxa/palm27x.c4
-rw-r--r--sound/soc/pxa/tosa.c4
-rw-r--r--sound/soc/pxa/zylonite.c4
-rw-r--r--sound/soc/soc-dapm.c25
-rw-r--r--sound/usb/caiaq/audio.c2
-rw-r--r--sound/usb/caiaq/midi.c2
-rw-r--r--sound/usb/card.c4
-rw-r--r--sound/usb/pcm.c7
-rw-r--r--sound/usb/usbaudio.h1
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-timechart.c6
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/util/event.c18
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/header.c11
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/svghelper.c6
-rw-r--r--tools/perf/util/symbol.c2
-rw-r--r--tools/power/x86/turbostat/turbostat.c200
810 files changed, 8447 insertions, 6214 deletions
diff --git a/.gitignore b/.gitignore
index 8faa6c02b39e..5d56a3fd0de6 100644
--- a/.gitignore
+++ b/.gitignore
@@ -28,6 +28,7 @@ modules.builtin
28*.gz 28*.gz
29*.bz2 29*.bz2
30*.lzma 30*.lzma
31*.xz
31*.lzo 32*.lzo
32*.patch 33*.patch
33*.gcno 34*.gcno
diff --git a/Documentation/DocBook/drm.tmpl b/Documentation/DocBook/drm.tmpl
index 2861055afd7a..c27915893974 100644
--- a/Documentation/DocBook/drm.tmpl
+++ b/Documentation/DocBook/drm.tmpl
@@ -73,8 +73,8 @@
73 services. 73 services.
74 </para> 74 </para>
75 <para> 75 <para>
76 The core of every DRM driver is struct drm_device. Drivers 76 The core of every DRM driver is struct drm_driver. Drivers
77 will typically statically initialize a drm_device structure, 77 will typically statically initialize a drm_driver structure,
78 then pass it to drm_init() at load time. 78 then pass it to drm_init() at load time.
79 </para> 79 </para>
80 80
@@ -84,7 +84,7 @@
84 <title>Driver initialization</title> 84 <title>Driver initialization</title>
85 <para> 85 <para>
86 Before calling the DRM initialization routines, the driver must 86 Before calling the DRM initialization routines, the driver must
87 first create and fill out a struct drm_device structure. 87 first create and fill out a struct drm_driver structure.
88 </para> 88 </para>
89 <programlisting> 89 <programlisting>
90 static struct drm_driver driver = { 90 static struct drm_driver driver = {
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl
index 5e87ad58c0b5..f51f28531b8d 100644
--- a/Documentation/DocBook/filesystems.tmpl
+++ b/Documentation/DocBook/filesystems.tmpl
@@ -82,6 +82,11 @@
82 </sect1> 82 </sect1>
83 </chapter> 83 </chapter>
84 84
85 <chapter id="fs_events">
86 <title>Events based on file descriptors</title>
87!Efs/eventfd.c
88 </chapter>
89
85 <chapter id="sysfs"> 90 <chapter id="sysfs">
86 <title>The Filesystem for Exporting Kernel Objects</title> 91 <title>The Filesystem for Exporting Kernel Objects</title>
87!Efs/sysfs/file.c 92!Efs/sysfs/file.c
diff --git a/Documentation/arm/Booting b/Documentation/arm/Booting
index 4e686a2ed91e..76850295af8f 100644
--- a/Documentation/arm/Booting
+++ b/Documentation/arm/Booting
@@ -65,19 +65,13 @@ looks at the connected hardware is beyond the scope of this document.
65The boot loader must ultimately be able to provide a MACH_TYPE_xxx 65The boot loader must ultimately be able to provide a MACH_TYPE_xxx
66value to the kernel. (see linux/arch/arm/tools/mach-types). 66value to the kernel. (see linux/arch/arm/tools/mach-types).
67 67
684. Setup boot data 68
69------------------ 694. Setup the kernel tagged list
70-------------------------------
70 71
71Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED 72Existing boot loaders: OPTIONAL, HIGHLY RECOMMENDED
72New boot loaders: MANDATORY 73New boot loaders: MANDATORY
73 74
74The boot loader must provide either a tagged list or a dtb image for
75passing configuration data to the kernel. The physical address of the
76boot data is passed to the kernel in register r2.
77
784a. Setup the kernel tagged list
79--------------------------------
80
81The boot loader must create and initialise the kernel tagged list. 75The boot loader must create and initialise the kernel tagged list.
82A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE. 76A valid tagged list starts with ATAG_CORE and ends with ATAG_NONE.
83The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag 77The ATAG_CORE tag may or may not be empty. An empty ATAG_CORE tag
@@ -107,24 +101,6 @@ The tagged list must be placed in a region of memory where neither
107the kernel decompressor nor initrd 'bootp' program will overwrite 101the kernel decompressor nor initrd 'bootp' program will overwrite
108it. The recommended placement is in the first 16KiB of RAM. 102it. The recommended placement is in the first 16KiB of RAM.
109 103
1104b. Setup the device tree
111-------------------------
112
113The boot loader must load a device tree image (dtb) into system ram
114at a 64bit aligned address and initialize it with the boot data. The
115dtb format is documented in Documentation/devicetree/booting-without-of.txt.
116The kernel will look for the dtb magic value of 0xd00dfeed at the dtb
117physical address to determine if a dtb has been passed instead of a
118tagged list.
119
120The boot loader must pass at a minimum the size and location of the
121system memory, and the root filesystem location. The dtb must be
122placed in a region of memory where the kernel decompressor will not
123overwrite it. The recommended placement is in the first 16KiB of RAM
124with the caveat that it may not be located at physical address 0 since
125the kernel interprets a value of 0 in r2 to mean neither a tagged list
126nor a dtb were passed.
127
1285. Calling the kernel image 1045. Calling the kernel image
129--------------------------- 105---------------------------
130 106
@@ -149,8 +125,7 @@ In either case, the following conditions must be met:
149- CPU register settings 125- CPU register settings
150 r0 = 0, 126 r0 = 0,
151 r1 = machine type number discovered in (3) above. 127 r1 = machine type number discovered in (3) above.
152 r2 = physical address of tagged list in system RAM, or 128 r2 = physical address of tagged list in system RAM.
153 physical address of device tree block (dtb) in system RAM
154 129
155- CPU mode 130- CPU mode
156 All forms of interrupts must be disabled (IRQs and FIQs) 131 All forms of interrupts must be disabled (IRQs and FIQs)
diff --git a/Documentation/devicetree/booting-without-of.txt b/Documentation/devicetree/booting-without-of.txt
index 9381a1481027..28b1c9d3d351 100644
--- a/Documentation/devicetree/booting-without-of.txt
+++ b/Documentation/devicetree/booting-without-of.txt
@@ -13,7 +13,6 @@ Table of Contents
13 13
14 I - Introduction 14 I - Introduction
15 1) Entry point for arch/powerpc 15 1) Entry point for arch/powerpc
16 2) Entry point for arch/arm
17 16
18 II - The DT block format 17 II - The DT block format
19 1) Header 18 1) Header
@@ -226,45 +225,6 @@ it with special cases.
226 cannot support both configurations with Book E and configurations 225 cannot support both configurations with Book E and configurations
227 with classic Powerpc architectures. 226 with classic Powerpc architectures.
228 227
2292) Entry point for arch/arm
230---------------------------
231
232 There is one single entry point to the kernel, at the start
233 of the kernel image. That entry point supports two calling
234 conventions. A summary of the interface is described here. A full
235 description of the boot requirements is documented in
236 Documentation/arm/Booting
237
238 a) ATAGS interface. Minimal information is passed from firmware
239 to the kernel with a tagged list of predefined parameters.
240
241 r0 : 0
242
243 r1 : Machine type number
244
245 r2 : Physical address of tagged list in system RAM
246
247 b) Entry with a flattened device-tree block. Firmware loads the
248 physical address of the flattened device tree block (dtb) into r2,
249 r1 is not used, but it is considered good practise to use a valid
250 machine number as described in Documentation/arm/Booting.
251
252 r0 : 0
253
254 r1 : Valid machine type number. When using a device tree,
255 a single machine type number will often be assigned to
256 represent a class or family of SoCs.
257
258 r2 : physical pointer to the device-tree block
259 (defined in chapter II) in RAM. Device tree can be located
260 anywhere in system RAM, but it should be aligned on a 32 bit
261 boundary.
262
263 The kernel will differentiate between ATAGS and device tree booting by
264 reading the memory pointed to by r1 and looking for either the flattened
265 device tree block magic value (0xd00dfeed) or the ATAG_CORE value at
266 offset 0x4 from r2 (0x54410001).
267
268 228
269II - The DT block format 229II - The DT block format
270======================== 230========================
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42
index 0e76ef12e4c6..a22ecf48f255 100644
--- a/Documentation/hwmon/jc42
+++ b/Documentation/hwmon/jc42
@@ -51,7 +51,8 @@ Supported chips:
51 * JEDEC JC 42.4 compliant temperature sensor chips 51 * JEDEC JC 42.4 compliant temperature sensor chips
52 Prefix: 'jc42' 52 Prefix: 'jc42'
53 Addresses scanned: I2C 0x18 - 0x1f 53 Addresses scanned: I2C 0x18 - 0x1f
54 Datasheet: - 54 Datasheet:
55 http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf
55 56
56Author: 57Author:
57 Guenter Roeck <guenter.roeck@ericsson.com> 58 Guenter Roeck <guenter.roeck@ericsson.com>
@@ -60,7 +61,11 @@ Author:
60Description 61Description
61----------- 62-----------
62 63
63This driver implements support for JEDEC JC 42.4 compliant temperature sensors. 64This driver implements support for JEDEC JC 42.4 compliant temperature sensors,
65which are used on many DDR3 memory modules for mobile devices and servers. Some
66systems use the sensor to prevent memory overheating by automatically throttling
67the memory controller.
68
64The driver auto-detects the chips listed above, but can be manually instantiated 69The driver auto-detects the chips listed above, but can be manually instantiated
65to support other JC 42.4 compliant chips. 70to support other JC 42.4 compliant chips.
66 71
@@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis,
81which applies to all limits. This register can be written by writing into 86which applies to all limits. This register can be written by writing into
82temp1_crit_hyst. Other hysteresis attributes are read-only. 87temp1_crit_hyst. Other hysteresis attributes are read-only.
83 88
89If the BIOS has configured the sensor for automatic temperature management, it
90is likely that it has locked the registers, i.e., that the temperature limits
91cannot be changed.
92
84Sysfs entries 93Sysfs entries
85------------- 94-------------
86 95
87temp1_input Temperature (RO) 96temp1_input Temperature (RO)
88temp1_min Minimum temperature (RW) 97temp1_min Minimum temperature (RO or RW)
89temp1_max Maximum temperature (RW) 98temp1_max Maximum temperature (RO or RW)
90temp1_crit Critical high temperature (RW) 99temp1_crit Critical high temperature (RO or RW)
91 100
92temp1_crit_hyst Critical hysteresis temperature (RW) 101temp1_crit_hyst Critical hysteresis temperature (RO or RW)
93temp1_max_hyst Maximum hysteresis temperature (RO) 102temp1_max_hyst Maximum hysteresis temperature (RO)
94 103
95temp1_min_alarm Temperature low alarm 104temp1_min_alarm Temperature low alarm
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp
index 6526eee525a6..d2b56a4fd1f5 100644
--- a/Documentation/hwmon/k10temp
+++ b/Documentation/hwmon/k10temp
@@ -9,6 +9,8 @@ Supported chips:
9 Socket S1G3: Athlon II, Sempron, Turion II 9 Socket S1G3: Athlon II, Sempron, Turion II
10* AMD Family 11h processors: 10* AMD Family 11h processors:
11 Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) 11 Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra)
12* AMD Family 12h processors: "Llano"
13* AMD Family 14h processors: "Brazos" (C/E/G-Series)
12 14
13 Prefix: 'k10temp' 15 Prefix: 'k10temp'
14 Addresses scanned: PCI space 16 Addresses scanned: PCI space
@@ -17,10 +19,14 @@ Supported chips:
17 http://support.amd.com/us/Processor_TechDocs/31116.pdf 19 http://support.amd.com/us/Processor_TechDocs/31116.pdf
18 BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: 20 BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors:
19 http://support.amd.com/us/Processor_TechDocs/41256.pdf 21 http://support.amd.com/us/Processor_TechDocs/41256.pdf
22 BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors:
23 http://support.amd.com/us/Processor_TechDocs/43170.pdf
20 Revision Guide for AMD Family 10h Processors: 24 Revision Guide for AMD Family 10h Processors:
21 http://support.amd.com/us/Processor_TechDocs/41322.pdf 25 http://support.amd.com/us/Processor_TechDocs/41322.pdf
22 Revision Guide for AMD Family 11h Processors: 26 Revision Guide for AMD Family 11h Processors:
23 http://support.amd.com/us/Processor_TechDocs/41788.pdf 27 http://support.amd.com/us/Processor_TechDocs/41788.pdf
28 Revision Guide for AMD Family 14h Models 00h-0Fh Processors:
29 http://support.amd.com/us/Processor_TechDocs/47534.pdf
24 AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: 30 AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks:
25 http://support.amd.com/us/Processor_TechDocs/43373.pdf 31 http://support.amd.com/us/Processor_TechDocs/43373.pdf
26 AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: 32 AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet:
@@ -34,7 +40,7 @@ Description
34----------- 40-----------
35 41
36This driver permits reading of the internal temperature sensor of AMD 42This driver permits reading of the internal temperature sensor of AMD
37Family 10h and 11h processors. 43Family 10h/11h/12h/14h processors.
38 44
39All these processors have a sensor, but on those for Socket F or AM2+, 45All these processors have a sensor, but on those for Socket F or AM2+,
40the sensor may return inconsistent values (erratum 319). The driver 46the sensor may return inconsistent values (erratum 319). The driver
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 89835a4766a6..f4a04c0c7edc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture
144and is between 256 and 4096 characters. It is defined in the file 144and is between 256 and 4096 characters. It is defined in the file
145./include/asm/setup.h as COMMAND_LINE_SIZE. 145./include/asm/setup.h as COMMAND_LINE_SIZE.
146 146
147Finally, the [KMG] suffix is commonly described after a number of kernel
148parameter values. These 'K', 'M', and 'G' letters represent the _binary_
149multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30
150bytes respectively. Such letter suffixes can also be entirely omitted.
151
147 152
148 acpi= [HW,ACPI,X86] 153 acpi= [HW,ACPI,X86]
149 Advanced Configuration and Power Interface 154 Advanced Configuration and Power Interface
@@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file
545 Format: 550 Format:
546 <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] 551 <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
547 552
548 crashkernel=nn[KMG]@ss[KMG] 553 crashkernel=size[KMG][@offset[KMG]]
549 [KNL] Reserve a chunk of physical memory to 554 [KNL] Using kexec, Linux can switch to a 'crash kernel'
550 hold a kernel to switch to with kexec on panic. 555 upon panic. This parameter reserves the physical
556 memory region [offset, offset + size] for that kernel
557 image. If '@offset' is omitted, then a suitable offset
558 is selected automatically. Check
559 Documentation/kdump/kdump.txt for further details.
551 560
552 crashkernel=range1:size1[,range2:size2,...][@offset] 561 crashkernel=range1:size1[,range2:size2,...][@offset]
553 [KNL] Same as above, but depends on the memory 562 [KNL] Same as above, but depends on the memory
554 in the running system. The syntax of range is 563 in the running system. The syntax of range is
555 start-[end] where start and end are both 564 start-[end] where start and end are both
556 a memory unit (amount[KMG]). See also 565 a memory unit (amount[KMG]). See also
557 Documentation/kdump/kdump.txt for a example. 566 Documentation/kdump/kdump.txt for an example.
558 567
559 cs89x0_dma= [HW,NET] 568 cs89x0_dma= [HW,NET]
560 Format: <dma> 569 Format: <dma>
@@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file
1262 6 (KERN_INFO) informational 1271 6 (KERN_INFO) informational
1263 7 (KERN_DEBUG) debug-level messages 1272 7 (KERN_DEBUG) debug-level messages
1264 1273
1265 log_buf_len=n Sets the size of the printk ring buffer, in bytes. 1274 log_buf_len=n[KMG] Sets the size of the printk ring buffer,
1266 Format: { n | nk | nM } 1275 in bytes. n must be a power of two. The default
1267 n must be a power of two. The default size 1276 size is set in the kernel config file.
1268 is set in the kernel config file.
1269 1277
1270 logo.nologo [FB] Disables display of the built-in Linux logo. 1278 logo.nologo [FB] Disables display of the built-in Linux logo.
1271 This may be used to provide more screen space for 1279 This may be used to provide more screen space for
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index fe5c099b8fc8..4edd78dfb362 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -40,8 +40,6 @@ decnet.txt
40 - info on using the DECnet networking layer in Linux. 40 - info on using the DECnet networking layer in Linux.
41depca.txt 41depca.txt
42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver 42 - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver
43dgrs.txt
44 - the Digi International RightSwitch SE-X Ethernet driver
45dmfe.txt 43dmfe.txt
46 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. 44 - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver.
47e100.txt 45e100.txt
@@ -50,8 +48,6 @@ e1000.txt
50 - info on Intel's E1000 line of gigabit ethernet boards 48 - info on Intel's E1000 line of gigabit ethernet boards
51eql.txt 49eql.txt
52 - serial IP load balancing 50 - serial IP load balancing
53ethertap.txt
54 - the Ethertap user space packet reception and transmission driver
55ewrk3.txt 51ewrk3.txt
56 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver 52 - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver
57filter.txt 53filter.txt
@@ -104,8 +100,6 @@ tuntap.txt
104 - TUN/TAP device driver, allowing user space Rx/Tx of packets. 100 - TUN/TAP device driver, allowing user space Rx/Tx of packets.
105vortex.txt 101vortex.txt
106 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. 102 - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
107wavelan.txt
108 - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
109x25.txt 103x25.txt
110 - general info on X.25 development. 104 - general info on X.25 development.
111x25-iface.txt 105x25-iface.txt
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile
index 5aba7a33aeeb..24c308dd3fd1 100644
--- a/Documentation/networking/Makefile
+++ b/Documentation/networking/Makefile
@@ -4,6 +4,8 @@ obj- := dummy.o
4# List of programs to build 4# List of programs to build
5hostprogs-y := ifenslave 5hostprogs-y := ifenslave
6 6
7HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include
8
7# Tell kbuild to always build the programs 9# Tell kbuild to always build the programs
8always := $(hostprogs-y) 10always := $(hostprogs-y)
9 11
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt
index aefd1e681804..04ca06325b08 100644
--- a/Documentation/networking/dns_resolver.txt
+++ b/Documentation/networking/dns_resolver.txt
@@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken.
61 create dns_resolver foo:* * /usr/sbin/dns.foo %k 61 create dns_resolver foo:* * /usr/sbin/dns.foo %k
62 62
63 63
64
65===== 64=====
66USAGE 65USAGE
67===== 66=====
@@ -104,6 +103,14 @@ implemented in the module can be called after doing:
104 returned also. 103 returned also.
105 104
106 105
106===============================
107READING DNS KEYS FROM USERSPACE
108===============================
109
110Keys of dns_resolver type can be read from userspace using keyctl_read() or
111"keyctl read/print/pipe".
112
113
107========= 114=========
108MECHANISM 115MECHANISM
109========= 116=========
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt
index 996a27d9b8db..01c513fac40e 100644
--- a/Documentation/workqueue.txt
+++ b/Documentation/workqueue.txt
@@ -190,9 +190,9 @@ resources, scheduled and executed.
190 * Long running CPU intensive workloads which can be better 190 * Long running CPU intensive workloads which can be better
191 managed by the system scheduler. 191 managed by the system scheduler.
192 192
193 WQ_FREEZEABLE 193 WQ_FREEZABLE
194 194
195 A freezeable wq participates in the freeze phase of the system 195 A freezable wq participates in the freeze phase of the system
196 suspend operations. Work items on the wq are drained and no 196 suspend operations. Work items on the wq are drained and no
197 new work item starts execution until thawed. 197 new work item starts execution until thawed.
198 198
diff --git a/MAINTAINERS b/MAINTAINERS
index 4837907a4eda..f1bc3dc6b369 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -885,7 +885,7 @@ S: Supported
885 885
886ARM/QUALCOMM MSM MACHINE SUPPORT 886ARM/QUALCOMM MSM MACHINE SUPPORT
887M: David Brown <davidb@codeaurora.org> 887M: David Brown <davidb@codeaurora.org>
888M: Daniel Walker <dwalker@codeaurora.org> 888M: Daniel Walker <dwalker@fifo99.com>
889M: Bryan Huntsman <bryanh@codeaurora.org> 889M: Bryan Huntsman <bryanh@codeaurora.org>
890L: linux-arm-msm@vger.kernel.org 890L: linux-arm-msm@vger.kernel.org
891F: arch/arm/mach-msm/ 891F: arch/arm/mach-msm/
@@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
1010S: Maintained 1010S: Maintained
1011F: arch/arm/mach-s5p*/ 1011F: arch/arm/mach-s5p*/
1012 1012
1013ARM/SAMSUNG MOBILE MACHINE SUPPORT
1014M: Kyungmin Park <kyungmin.park@samsung.com>
1015L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1016S: Maintained
1017F: arch/arm/mach-s5pv210/mach-aquila.c
1018F: arch/arm/mach-s5pv210/mach-goni.c
1019F: arch/arm/mach-exynos4/mach-universal_c210.c
1020F: arch/arm/mach-exynos4/mach-nuri.c
1021
1013ARM/SAMSUNG S5P SERIES FIMC SUPPORT 1022ARM/SAMSUNG S5P SERIES FIMC SUPPORT
1014M: Kyungmin Park <kyungmin.park@samsung.com> 1023M: Kyungmin Park <kyungmin.park@samsung.com>
1015M: Sylwester Nawrocki <s.nawrocki@samsung.com> 1024M: Sylwester Nawrocki <s.nawrocki@samsung.com>
@@ -1467,6 +1476,7 @@ F: include/net/bluetooth/
1467 1476
1468BONDING DRIVER 1477BONDING DRIVER
1469M: Jay Vosburgh <fubar@us.ibm.com> 1478M: Jay Vosburgh <fubar@us.ibm.com>
1479M: Andy Gospodarek <andy@greyhouse.net>
1470L: netdev@vger.kernel.org 1480L: netdev@vger.kernel.org
1471W: http://sourceforge.net/projects/bonding/ 1481W: http://sourceforge.net/projects/bonding/
1472S: Supported 1482S: Supported
@@ -1692,6 +1702,13 @@ M: Andy Whitcroft <apw@canonical.com>
1692S: Supported 1702S: Supported
1693F: scripts/checkpatch.pl 1703F: scripts/checkpatch.pl
1694 1704
1705CHINESE DOCUMENTATION
1706M: Harry Wei <harryxiyou@gmail.com>
1707L: xiyoulinuxkernelgroup@googlegroups.com
1708L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
1709S: Maintained
1710F: Documentation/zh_CN/
1711
1695CISCO VIC ETHERNET NIC DRIVER 1712CISCO VIC ETHERNET NIC DRIVER
1696M: Vasanthy Kolluri <vkolluri@cisco.com> 1713M: Vasanthy Kolluri <vkolluri@cisco.com>
1697M: Roopa Prabhu <roprabhu@cisco.com> 1714M: Roopa Prabhu <roprabhu@cisco.com>
@@ -2026,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt
2026F: drivers/scsi/dc395x.* 2043F: drivers/scsi/dc395x.*
2027 2044
2028DCCP PROTOCOL 2045DCCP PROTOCOL
2029M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> 2046M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
2030L: dccp@vger.kernel.org 2047L: dccp@vger.kernel.org
2031W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp 2048W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
2032S: Maintained 2049S: Maintained
@@ -2126,6 +2143,7 @@ S: Supported
2126F: fs/dlm/ 2143F: fs/dlm/
2127 2144
2128DMA GENERIC OFFLOAD ENGINE SUBSYSTEM 2145DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
2146M: Vinod Koul <vinod.koul@intel.com>
2129M: Dan Williams <dan.j.williams@intel.com> 2147M: Dan Williams <dan.j.williams@intel.com>
2130S: Supported 2148S: Supported
2131F: drivers/dma/ 2149F: drivers/dma/
@@ -2872,7 +2890,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com>
2872L: lm-sensors@lm-sensors.org 2890L: lm-sensors@lm-sensors.org
2873W: http://www.lm-sensors.org/ 2891W: http://www.lm-sensors.org/
2874T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ 2892T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
2875T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/
2876T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git 2893T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
2877S: Maintained 2894S: Maintained
2878F: Documentation/hwmon/ 2895F: Documentation/hwmon/
@@ -3512,7 +3529,7 @@ F: drivers/hwmon/jc42.c
3512F: Documentation/hwmon/jc42 3529F: Documentation/hwmon/jc42
3513 3530
3514JFS FILESYSTEM 3531JFS FILESYSTEM
3515M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> 3532M: Dave Kleikamp <shaggy@kernel.org>
3516L: jfs-discussion@lists.sourceforge.net 3533L: jfs-discussion@lists.sourceforge.net
3517W: http://jfs.sourceforge.net/ 3534W: http://jfs.sourceforge.net/
3518T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git 3535T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git
@@ -4275,10 +4292,7 @@ S: Maintained
4275F: net/sched/sch_netem.c 4292F: net/sched/sch_netem.c
4276 4293
4277NETERION 10GbE DRIVERS (s2io/vxge) 4294NETERION 10GbE DRIVERS (s2io/vxge)
4278M: Ramkrishna Vepa <ramkrishna.vepa@exar.com> 4295M: Jon Mason <jdmason@kudzu.us>
4279M: Sivakumar Subramani <sivakumar.subramani@exar.com>
4280M: Sreenivasa Honnur <sreenivasa.honnur@exar.com>
4281M: Jon Mason <jon.mason@exar.com>
4282L: netdev@vger.kernel.org 4296L: netdev@vger.kernel.org
4283W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous 4297W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
4284W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous 4298W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@ -5164,6 +5178,7 @@ F: drivers/char/random.c
5164 5178
5165RAPIDIO SUBSYSTEM 5179RAPIDIO SUBSYSTEM
5166M: Matt Porter <mporter@kernel.crashing.org> 5180M: Matt Porter <mporter@kernel.crashing.org>
5181M: Alexandre Bounine <alexandre.bounine@idt.com>
5167S: Maintained 5182S: Maintained
5168F: drivers/rapidio/ 5183F: drivers/rapidio/
5169 5184
@@ -5266,7 +5281,7 @@ S: Maintained
5266F: drivers/net/wireless/rtl818x/rtl8180/ 5281F: drivers/net/wireless/rtl818x/rtl8180/
5267 5282
5268RTL8187 WIRELESS DRIVER 5283RTL8187 WIRELESS DRIVER
5269M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> 5284M: Herton Ronaldo Krzesinski <herton@canonical.com>
5270M: Hin-Tak Leung <htl10@users.sourceforge.net> 5285M: Hin-Tak Leung <htl10@users.sourceforge.net>
5271M: Larry Finger <Larry.Finger@lwfinger.net> 5286M: Larry Finger <Larry.Finger@lwfinger.net>
5272L: linux-wireless@vger.kernel.org 5287L: linux-wireless@vger.kernel.org
@@ -6104,7 +6119,7 @@ S: Maintained
6104F: security/tomoyo/ 6119F: security/tomoyo/
6105 6120
6106TOPSTAR LAPTOP EXTRAS DRIVER 6121TOPSTAR LAPTOP EXTRAS DRIVER
6107M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> 6122M: Herton Ronaldo Krzesinski <herton@canonical.com>
6108L: platform-driver-x86@vger.kernel.org 6123L: platform-driver-x86@vger.kernel.org
6109S: Maintained 6124S: Maintained
6110F: drivers/platform/x86/topstar-laptop.c 6125F: drivers/platform/x86/topstar-laptop.c
diff --git a/Makefile b/Makefile
index c9c8c8fd2591..d6592b63c8cb 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 2 1VERSION = 2
2PATCHLEVEL = 6 2PATCHLEVEL = 6
3SUBLEVEL = 38 3SUBLEVEL = 38
4EXTRAVERSION = -rc4 4EXTRAVERSION =
5NAME = Flesh-Eating Bats with Fangs 5NAME = Flesh-Eating Bats with Fangs
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 47f63d480141..cc31bec2e316 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -11,6 +11,7 @@ config ALPHA
11 select HAVE_GENERIC_HARDIRQS 11 select HAVE_GENERIC_HARDIRQS
12 select GENERIC_IRQ_PROBE 12 select GENERIC_IRQ_PROBE
13 select AUTO_IRQ_AFFINITY if SMP 13 select AUTO_IRQ_AFFINITY if SMP
14 select GENERIC_HARDIRQS_NO_DEPRECATED
14 help 15 help
15 The Alpha is a 64-bit general-purpose processor designed and 16 The Alpha is a 64-bit general-purpose processor designed and
16 marketed by the Digital Equipment Corporation of blessed memory, 17 marketed by the Digital Equipment Corporation of blessed memory,
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
index 9ab234f48dd8..a19d60082299 100644
--- a/arch/alpha/kernel/irq.c
+++ b/arch/alpha/kernel/irq.c
@@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS];
44 44
45int irq_select_affinity(unsigned int irq) 45int irq_select_affinity(unsigned int irq)
46{ 46{
47 struct irq_desc *desc = irq_to_desc[irq]; 47 struct irq_data *data = irq_get_irq_data(irq);
48 struct irq_chip *chip;
48 static int last_cpu; 49 static int last_cpu;
49 int cpu = last_cpu + 1; 50 int cpu = last_cpu + 1;
50 51
51 if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) 52 if (!data)
53 return 1;
54 chip = irq_data_get_irq_chip(data);
55
56 if (!chip->irq_set_affinity || irq_user_affinity[irq])
52 return 1; 57 return 1;
53 58
54 while (!cpu_possible(cpu) || 59 while (!cpu_possible(cpu) ||
@@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq)
56 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); 61 cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
57 last_cpu = cpu; 62 last_cpu = cpu;
58 63
59 cpumask_copy(desc->affinity, cpumask_of(cpu)); 64 cpumask_copy(data->affinity, cpumask_of(cpu));
60 get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); 65 chip->irq_set_affinity(data, cpumask_of(cpu), false);
61 return 0; 66 return 0;
62} 67}
63#endif /* CONFIG_SMP */ 68#endif /* CONFIG_SMP */
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
index 2d0679b60939..411ca11d0a18 100644
--- a/arch/alpha/kernel/irq_alpha.c
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -228,14 +228,9 @@ struct irqaction timer_irqaction = {
228void __init 228void __init
229init_rtc_irq(void) 229init_rtc_irq(void)
230{ 230{
231 struct irq_desc *desc = irq_to_desc(RTC_IRQ); 231 set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
232 232 handle_simple_irq, "RTC");
233 if (desc) { 233 setup_irq(RTC_IRQ, &timer_irqaction);
234 desc->status |= IRQ_DISABLED;
235 set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip,
236 handle_simple_irq, "RTC");
237 setup_irq(RTC_IRQ, &timer_irqaction);
238 }
239} 234}
240 235
241/* Dummy irqactions. */ 236/* Dummy irqactions. */
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
index 956ea0ed1694..c7cc9813e45f 100644
--- a/arch/alpha/kernel/irq_i8259.c
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask)
33} 33}
34 34
35inline void 35inline void
36i8259a_enable_irq(unsigned int irq) 36i8259a_enable_irq(struct irq_data *d)
37{ 37{
38 spin_lock(&i8259_irq_lock); 38 spin_lock(&i8259_irq_lock);
39 i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 39 i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
40 spin_unlock(&i8259_irq_lock); 40 spin_unlock(&i8259_irq_lock);
41} 41}
42 42
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq)
47} 47}
48 48
49void 49void
50i8259a_disable_irq(unsigned int irq) 50i8259a_disable_irq(struct irq_data *d)
51{ 51{
52 spin_lock(&i8259_irq_lock); 52 spin_lock(&i8259_irq_lock);
53 __i8259a_disable_irq(irq); 53 __i8259a_disable_irq(d->irq);
54 spin_unlock(&i8259_irq_lock); 54 spin_unlock(&i8259_irq_lock);
55} 55}
56 56
57void 57void
58i8259a_mask_and_ack_irq(unsigned int irq) 58i8259a_mask_and_ack_irq(struct irq_data *d)
59{ 59{
60 unsigned int irq = d->irq;
61
60 spin_lock(&i8259_irq_lock); 62 spin_lock(&i8259_irq_lock);
61 __i8259a_disable_irq(irq); 63 __i8259a_disable_irq(irq);
62 64
@@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq)
71 73
72struct irq_chip i8259a_irq_type = { 74struct irq_chip i8259a_irq_type = {
73 .name = "XT-PIC", 75 .name = "XT-PIC",
74 .unmask = i8259a_enable_irq, 76 .irq_unmask = i8259a_enable_irq,
75 .mask = i8259a_disable_irq, 77 .irq_mask = i8259a_disable_irq,
76 .mask_ack = i8259a_mask_and_ack_irq, 78 .irq_mask_ack = i8259a_mask_and_ack_irq,
77}; 79};
78 80
79void __init 81void __init
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
index b63ccd7386f1..d507a234b05d 100644
--- a/arch/alpha/kernel/irq_impl.h
+++ b/arch/alpha/kernel/irq_impl.h
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void);
31 31
32extern void common_init_isa_dma(void); 32extern void common_init_isa_dma(void);
33 33
34extern void i8259a_enable_irq(unsigned int); 34extern void i8259a_enable_irq(struct irq_data *d);
35extern void i8259a_disable_irq(unsigned int); 35extern void i8259a_disable_irq(struct irq_data *d);
36extern void i8259a_mask_and_ack_irq(unsigned int); 36extern void i8259a_mask_and_ack_irq(struct irq_data *d);
37extern unsigned int i8259a_startup_irq(unsigned int);
38extern void i8259a_end_irq(unsigned int);
39extern struct irq_chip i8259a_irq_type; 37extern struct irq_chip i8259a_irq_type;
40extern void init_i8259a_irqs(void); 38extern void init_i8259a_irqs(void);
41 39
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
index 2863458c853e..b30227fa7f5f 100644
--- a/arch/alpha/kernel/irq_pyxis.c
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask)
29} 29}
30 30
31static inline void 31static inline void
32pyxis_enable_irq(unsigned int irq) 32pyxis_enable_irq(struct irq_data *d)
33{ 33{
34 pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 34 pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
35} 35}
36 36
37static void 37static void
38pyxis_disable_irq(unsigned int irq) 38pyxis_disable_irq(struct irq_data *d)
39{ 39{
40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 40 pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
41} 41}
42 42
43static void 43static void
44pyxis_mask_and_ack_irq(unsigned int irq) 44pyxis_mask_and_ack_irq(struct irq_data *d)
45{ 45{
46 unsigned long bit = 1UL << (irq - 16); 46 unsigned long bit = 1UL << (d->irq - 16);
47 unsigned long mask = cached_irq_mask &= ~bit; 47 unsigned long mask = cached_irq_mask &= ~bit;
48 48
49 /* Disable the interrupt. */ 49 /* Disable the interrupt. */
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq)
58 58
59static struct irq_chip pyxis_irq_type = { 59static struct irq_chip pyxis_irq_type = {
60 .name = "PYXIS", 60 .name = "PYXIS",
61 .mask_ack = pyxis_mask_and_ack_irq, 61 .irq_mask_ack = pyxis_mask_and_ack_irq,
62 .mask = pyxis_disable_irq, 62 .irq_mask = pyxis_disable_irq,
63 .unmask = pyxis_enable_irq, 63 .irq_unmask = pyxis_enable_irq,
64}; 64};
65 65
66void 66void
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask)
103 if ((ignore_mask >> i) & 1) 103 if ((ignore_mask >> i) & 1)
104 continue; 104 continue;
105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); 105 set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq);
106 irq_to_desc(i)->status |= IRQ_LEVEL; 106 irq_set_status_flags(i, IRQ_LEVEL);
107 } 107 }
108 108
109 setup_irq(16+7, &isa_cascade_irqaction); 109 setup_irq(16+7, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
index 0e57e828b413..82a47bba41c4 100644
--- a/arch/alpha/kernel/irq_srm.c
+++ b/arch/alpha/kernel/irq_srm.c
@@ -18,27 +18,27 @@
18DEFINE_SPINLOCK(srm_irq_lock); 18DEFINE_SPINLOCK(srm_irq_lock);
19 19
20static inline void 20static inline void
21srm_enable_irq(unsigned int irq) 21srm_enable_irq(struct irq_data *d)
22{ 22{
23 spin_lock(&srm_irq_lock); 23 spin_lock(&srm_irq_lock);
24 cserve_ena(irq - 16); 24 cserve_ena(d->irq - 16);
25 spin_unlock(&srm_irq_lock); 25 spin_unlock(&srm_irq_lock);
26} 26}
27 27
28static void 28static void
29srm_disable_irq(unsigned int irq) 29srm_disable_irq(struct irq_data *d)
30{ 30{
31 spin_lock(&srm_irq_lock); 31 spin_lock(&srm_irq_lock);
32 cserve_dis(irq - 16); 32 cserve_dis(d->irq - 16);
33 spin_unlock(&srm_irq_lock); 33 spin_unlock(&srm_irq_lock);
34} 34}
35 35
36/* Handle interrupts from the SRM, assuming no additional weirdness. */ 36/* Handle interrupts from the SRM, assuming no additional weirdness. */
37static struct irq_chip srm_irq_type = { 37static struct irq_chip srm_irq_type = {
38 .name = "SRM", 38 .name = "SRM",
39 .unmask = srm_enable_irq, 39 .irq_unmask = srm_enable_irq,
40 .mask = srm_disable_irq, 40 .irq_mask = srm_disable_irq,
41 .mask_ack = srm_disable_irq, 41 .irq_mask_ack = srm_disable_irq,
42}; 42};
43 43
44void __init 44void __init
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask)
52 if (i < 64 && ((ignore_mask >> i) & 1)) 52 if (i < 64 && ((ignore_mask >> i) & 1))
53 continue; 53 continue;
54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); 54 set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq);
55 irq_to_desc(i)->status |= IRQ_LEVEL; 55 irq_set_status_flags(i, IRQ_LEVEL);
56 } 56 }
57} 57}
58 58
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c
index 7bef61768236..88d95e872f55 100644
--- a/arch/alpha/kernel/sys_alcor.c
+++ b/arch/alpha/kernel/sys_alcor.c
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask)
44} 44}
45 45
46static inline void 46static inline void
47alcor_enable_irq(unsigned int irq) 47alcor_enable_irq(struct irq_data *d)
48{ 48{
49 alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 49 alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
50} 50}
51 51
52static void 52static void
53alcor_disable_irq(unsigned int irq) 53alcor_disable_irq(struct irq_data *d)
54{ 54{
55 alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 55 alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
56} 56}
57 57
58static void 58static void
59alcor_mask_and_ack_irq(unsigned int irq) 59alcor_mask_and_ack_irq(struct irq_data *d)
60{ 60{
61 alcor_disable_irq(irq); 61 alcor_disable_irq(d);
62 62
63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 63 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
64 *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); 64 *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb();
65 *(vuip)GRU_INT_CLEAR = 0; mb(); 65 *(vuip)GRU_INT_CLEAR = 0; mb();
66} 66}
67 67
68static void 68static void
69alcor_isa_mask_and_ack_irq(unsigned int irq) 69alcor_isa_mask_and_ack_irq(struct irq_data *d)
70{ 70{
71 i8259a_mask_and_ack_irq(irq); 71 i8259a_mask_and_ack_irq(d);
72 72
73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ 73 /* On ALCOR/XLT, need to dismiss interrupt via GRU. */
74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); 74 *(vuip)GRU_INT_CLEAR = 0x80000000; mb();
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq)
77 77
78static struct irq_chip alcor_irq_type = { 78static struct irq_chip alcor_irq_type = {
79 .name = "ALCOR", 79 .name = "ALCOR",
80 .unmask = alcor_enable_irq, 80 .irq_unmask = alcor_enable_irq,
81 .mask = alcor_disable_irq, 81 .irq_mask = alcor_disable_irq,
82 .mask_ack = alcor_mask_and_ack_irq, 82 .irq_mask_ack = alcor_mask_and_ack_irq,
83}; 83};
84 84
85static void 85static void
@@ -126,9 +126,9 @@ alcor_init_irq(void)
126 if (i >= 16+20 && i <= 16+30) 126 if (i >= 16+20 && i <= 16+30)
127 continue; 127 continue;
128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); 128 set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq);
129 irq_to_desc(i)->status |= IRQ_LEVEL; 129 irq_set_status_flags(i, IRQ_LEVEL);
130 } 130 }
131 i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; 131 i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq;
132 132
133 init_i8259a_irqs(); 133 init_i8259a_irqs();
134 common_init_isa_dma(); 134 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c
index b0c916493aea..57eb6307bc27 100644
--- a/arch/alpha/kernel/sys_cabriolet.c
+++ b/arch/alpha/kernel/sys_cabriolet.c
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask)
46} 46}
47 47
48static inline void 48static inline void
49cabriolet_enable_irq(unsigned int irq) 49cabriolet_enable_irq(struct irq_data *d)
50{ 50{
51 cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); 51 cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq));
52} 52}
53 53
54static void 54static void
55cabriolet_disable_irq(unsigned int irq) 55cabriolet_disable_irq(struct irq_data *d)
56{ 56{
57 cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); 57 cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq);
58} 58}
59 59
60static struct irq_chip cabriolet_irq_type = { 60static struct irq_chip cabriolet_irq_type = {
61 .name = "CABRIOLET", 61 .name = "CABRIOLET",
62 .unmask = cabriolet_enable_irq, 62 .irq_unmask = cabriolet_enable_irq,
63 .mask = cabriolet_disable_irq, 63 .irq_mask = cabriolet_disable_irq,
64 .mask_ack = cabriolet_disable_irq, 64 .irq_mask_ack = cabriolet_disable_irq,
65}; 65};
66 66
67static void 67static void
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v))
107 for (i = 16; i < 35; ++i) { 107 for (i = 16; i < 35; ++i) {
108 set_irq_chip_and_handler(i, &cabriolet_irq_type, 108 set_irq_chip_and_handler(i, &cabriolet_irq_type,
109 handle_level_irq); 109 handle_level_irq);
110 irq_to_desc(i)->status |= IRQ_LEVEL; 110 irq_set_status_flags(i, IRQ_LEVEL);
111 } 111 }
112 } 112 }
113 113
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c
index edad5f759ccd..481df4ecb651 100644
--- a/arch/alpha/kernel/sys_dp264.c
+++ b/arch/alpha/kernel/sys_dp264.c
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask)
98} 98}
99 99
100static void 100static void
101dp264_enable_irq(unsigned int irq) 101dp264_enable_irq(struct irq_data *d)
102{ 102{
103 spin_lock(&dp264_irq_lock); 103 spin_lock(&dp264_irq_lock);
104 cached_irq_mask |= 1UL << irq; 104 cached_irq_mask |= 1UL << d->irq;
105 tsunami_update_irq_hw(cached_irq_mask); 105 tsunami_update_irq_hw(cached_irq_mask);
106 spin_unlock(&dp264_irq_lock); 106 spin_unlock(&dp264_irq_lock);
107} 107}
108 108
109static void 109static void
110dp264_disable_irq(unsigned int irq) 110dp264_disable_irq(struct irq_data *d)
111{ 111{
112 spin_lock(&dp264_irq_lock); 112 spin_lock(&dp264_irq_lock);
113 cached_irq_mask &= ~(1UL << irq); 113 cached_irq_mask &= ~(1UL << d->irq);
114 tsunami_update_irq_hw(cached_irq_mask); 114 tsunami_update_irq_hw(cached_irq_mask);
115 spin_unlock(&dp264_irq_lock); 115 spin_unlock(&dp264_irq_lock);
116} 116}
117 117
118static void 118static void
119clipper_enable_irq(unsigned int irq) 119clipper_enable_irq(struct irq_data *d)
120{ 120{
121 spin_lock(&dp264_irq_lock); 121 spin_lock(&dp264_irq_lock);
122 cached_irq_mask |= 1UL << (irq - 16); 122 cached_irq_mask |= 1UL << (d->irq - 16);
123 tsunami_update_irq_hw(cached_irq_mask); 123 tsunami_update_irq_hw(cached_irq_mask);
124 spin_unlock(&dp264_irq_lock); 124 spin_unlock(&dp264_irq_lock);
125} 125}
126 126
127static void 127static void
128clipper_disable_irq(unsigned int irq) 128clipper_disable_irq(struct irq_data *d)
129{ 129{
130 spin_lock(&dp264_irq_lock); 130 spin_lock(&dp264_irq_lock);
131 cached_irq_mask &= ~(1UL << (irq - 16)); 131 cached_irq_mask &= ~(1UL << (d->irq - 16));
132 tsunami_update_irq_hw(cached_irq_mask); 132 tsunami_update_irq_hw(cached_irq_mask);
133 spin_unlock(&dp264_irq_lock); 133 spin_unlock(&dp264_irq_lock);
134} 134}
@@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
149} 149}
150 150
151static int 151static int
152dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) 152dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity,
153{ 153 bool force)
154{
154 spin_lock(&dp264_irq_lock); 155 spin_lock(&dp264_irq_lock);
155 cpu_set_irq_affinity(irq, *affinity); 156 cpu_set_irq_affinity(d->irq, *affinity);
156 tsunami_update_irq_hw(cached_irq_mask); 157 tsunami_update_irq_hw(cached_irq_mask);
157 spin_unlock(&dp264_irq_lock); 158 spin_unlock(&dp264_irq_lock);
158 159
@@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
160} 161}
161 162
162static int 163static int
163clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) 164clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity,
164{ 165 bool force)
166{
165 spin_lock(&dp264_irq_lock); 167 spin_lock(&dp264_irq_lock);
166 cpu_set_irq_affinity(irq - 16, *affinity); 168 cpu_set_irq_affinity(d->irq - 16, *affinity);
167 tsunami_update_irq_hw(cached_irq_mask); 169 tsunami_update_irq_hw(cached_irq_mask);
168 spin_unlock(&dp264_irq_lock); 170 spin_unlock(&dp264_irq_lock);
169 171
@@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
171} 173}
172 174
173static struct irq_chip dp264_irq_type = { 175static struct irq_chip dp264_irq_type = {
174 .name = "DP264", 176 .name = "DP264",
175 .unmask = dp264_enable_irq, 177 .irq_unmask = dp264_enable_irq,
176 .mask = dp264_disable_irq, 178 .irq_mask = dp264_disable_irq,
177 .mask_ack = dp264_disable_irq, 179 .irq_mask_ack = dp264_disable_irq,
178 .set_affinity = dp264_set_affinity, 180 .irq_set_affinity = dp264_set_affinity,
179}; 181};
180 182
181static struct irq_chip clipper_irq_type = { 183static struct irq_chip clipper_irq_type = {
182 .name = "CLIPPER", 184 .name = "CLIPPER",
183 .unmask = clipper_enable_irq, 185 .irq_unmask = clipper_enable_irq,
184 .mask = clipper_disable_irq, 186 .irq_mask = clipper_disable_irq,
185 .mask_ack = clipper_disable_irq, 187 .irq_mask_ack = clipper_disable_irq,
186 .set_affinity = clipper_set_affinity, 188 .irq_set_affinity = clipper_set_affinity,
187}; 189};
188 190
189static void 191static void
@@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax)
268{ 270{
269 long i; 271 long i;
270 for (i = imin; i <= imax; ++i) { 272 for (i = imin; i <= imax; ++i) {
271 irq_to_desc(i)->status |= IRQ_LEVEL;
272 set_irq_chip_and_handler(i, ops, handle_level_irq); 273 set_irq_chip_and_handler(i, ops, handle_level_irq);
274 irq_set_status_flags(i, IRQ_LEVEL);
273 } 275 }
274} 276}
275 277
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c
index ae5f29d127b0..402e908ffb3e 100644
--- a/arch/alpha/kernel/sys_eb64p.c
+++ b/arch/alpha/kernel/sys_eb64p.c
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask)
44} 44}
45 45
46static inline void 46static inline void
47eb64p_enable_irq(unsigned int irq) 47eb64p_enable_irq(struct irq_data *d)
48{ 48{
49 eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); 49 eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq));
50} 50}
51 51
52static void 52static void
53eb64p_disable_irq(unsigned int irq) 53eb64p_disable_irq(struct irq_data *d)
54{ 54{
55 eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); 55 eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq);
56} 56}
57 57
58static struct irq_chip eb64p_irq_type = { 58static struct irq_chip eb64p_irq_type = {
59 .name = "EB64P", 59 .name = "EB64P",
60 .unmask = eb64p_enable_irq, 60 .irq_unmask = eb64p_enable_irq,
61 .mask = eb64p_disable_irq, 61 .irq_mask = eb64p_disable_irq,
62 .mask_ack = eb64p_disable_irq, 62 .irq_mask_ack = eb64p_disable_irq,
63}; 63};
64 64
65static void 65static void
@@ -118,9 +118,9 @@ eb64p_init_irq(void)
118 init_i8259a_irqs(); 118 init_i8259a_irqs();
119 119
120 for (i = 16; i < 32; ++i) { 120 for (i = 16; i < 32; ++i) {
121 irq_to_desc(i)->status |= IRQ_LEVEL;
122 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); 121 set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq);
123 } 122 irq_set_status_flags(i, IRQ_LEVEL);
123 }
124 124
125 common_init_isa_dma(); 125 common_init_isa_dma();
126 setup_irq(16+5, &isa_cascade_irqaction); 126 setup_irq(16+5, &isa_cascade_irqaction);
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c
index 1121bc5c6c6c..0b44a54c1522 100644
--- a/arch/alpha/kernel/sys_eiger.c
+++ b/arch/alpha/kernel/sys_eiger.c
@@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask)
51} 51}
52 52
53static inline void 53static inline void
54eiger_enable_irq(unsigned int irq) 54eiger_enable_irq(struct irq_data *d)
55{ 55{
56 unsigned int irq = d->irq;
56 unsigned long mask; 57 unsigned long mask;
57 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 58 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
58 eiger_update_irq_hw(irq, mask); 59 eiger_update_irq_hw(irq, mask);
59} 60}
60 61
61static void 62static void
62eiger_disable_irq(unsigned int irq) 63eiger_disable_irq(struct irq_data *d)
63{ 64{
65 unsigned int irq = d->irq;
64 unsigned long mask; 66 unsigned long mask;
65 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 67 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
66 eiger_update_irq_hw(irq, mask); 68 eiger_update_irq_hw(irq, mask);
@@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq)
68 70
69static struct irq_chip eiger_irq_type = { 71static struct irq_chip eiger_irq_type = {
70 .name = "EIGER", 72 .name = "EIGER",
71 .unmask = eiger_enable_irq, 73 .irq_unmask = eiger_enable_irq,
72 .mask = eiger_disable_irq, 74 .irq_mask = eiger_disable_irq,
73 .mask_ack = eiger_disable_irq, 75 .irq_mask_ack = eiger_disable_irq,
74}; 76};
75 77
76static void 78static void
@@ -136,8 +138,8 @@ eiger_init_irq(void)
136 init_i8259a_irqs(); 138 init_i8259a_irqs();
137 139
138 for (i = 16; i < 128; ++i) { 140 for (i = 16; i < 128; ++i) {
139 irq_to_desc(i)->status |= IRQ_LEVEL;
140 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); 141 set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq);
142 irq_set_status_flags(i, IRQ_LEVEL);
141 } 143 }
142} 144}
143 145
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c
index 34f55e03d331..00341b75c8b2 100644
--- a/arch/alpha/kernel/sys_jensen.c
+++ b/arch/alpha/kernel/sys_jensen.c
@@ -63,34 +63,34 @@
63 */ 63 */
64 64
65static void 65static void
66jensen_local_enable(unsigned int irq) 66jensen_local_enable(struct irq_data *d)
67{ 67{
68 /* the parport is really hw IRQ 1, silly Jensen. */ 68 /* the parport is really hw IRQ 1, silly Jensen. */
69 if (irq == 7) 69 if (d->irq == 7)
70 i8259a_enable_irq(1); 70 i8259a_enable_irq(d);
71} 71}
72 72
73static void 73static void
74jensen_local_disable(unsigned int irq) 74jensen_local_disable(struct irq_data *d)
75{ 75{
76 /* the parport is really hw IRQ 1, silly Jensen. */ 76 /* the parport is really hw IRQ 1, silly Jensen. */
77 if (irq == 7) 77 if (d->irq == 7)
78 i8259a_disable_irq(1); 78 i8259a_disable_irq(d);
79} 79}
80 80
81static void 81static void
82jensen_local_mask_ack(unsigned int irq) 82jensen_local_mask_ack(struct irq_data *d)
83{ 83{
84 /* the parport is really hw IRQ 1, silly Jensen. */ 84 /* the parport is really hw IRQ 1, silly Jensen. */
85 if (irq == 7) 85 if (d->irq == 7)
86 i8259a_mask_and_ack_irq(1); 86 i8259a_mask_and_ack_irq(d);
87} 87}
88 88
89static struct irq_chip jensen_local_irq_type = { 89static struct irq_chip jensen_local_irq_type = {
90 .name = "LOCAL", 90 .name = "LOCAL",
91 .unmask = jensen_local_enable, 91 .irq_unmask = jensen_local_enable,
92 .mask = jensen_local_disable, 92 .irq_mask = jensen_local_disable,
93 .mask_ack = jensen_local_mask_ack, 93 .irq_mask_ack = jensen_local_mask_ack,
94}; 94};
95 95
96static void 96static void
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c
index 2bfc9f1b1ddc..e61910734e41 100644
--- a/arch/alpha/kernel/sys_marvel.c
+++ b/arch/alpha/kernel/sys_marvel.c
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
104} 104}
105 105
106static void 106static void
107io7_enable_irq(unsigned int irq) 107io7_enable_irq(struct irq_data *d)
108{ 108{
109 volatile unsigned long *ctl; 109 volatile unsigned long *ctl;
110 unsigned int irq = d->irq;
110 struct io7 *io7; 111 struct io7 *io7;
111 112
112 ctl = io7_get_irq_ctl(irq, &io7); 113 ctl = io7_get_irq_ctl(irq, &io7);
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq)
115 __func__, irq); 116 __func__, irq);
116 return; 117 return;
117 } 118 }
118 119
119 spin_lock(&io7->irq_lock); 120 spin_lock(&io7->irq_lock);
120 *ctl |= 1UL << 24; 121 *ctl |= 1UL << 24;
121 mb(); 122 mb();
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq)
124} 125}
125 126
126static void 127static void
127io7_disable_irq(unsigned int irq) 128io7_disable_irq(struct irq_data *d)
128{ 129{
129 volatile unsigned long *ctl; 130 volatile unsigned long *ctl;
131 unsigned int irq = d->irq;
130 struct io7 *io7; 132 struct io7 *io7;
131 133
132 ctl = io7_get_irq_ctl(irq, &io7); 134 ctl = io7_get_irq_ctl(irq, &io7);
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq)
135 __func__, irq); 137 __func__, irq);
136 return; 138 return;
137 } 139 }
138 140
139 spin_lock(&io7->irq_lock); 141 spin_lock(&io7->irq_lock);
140 *ctl &= ~(1UL << 24); 142 *ctl &= ~(1UL << 24);
141 mb(); 143 mb();
@@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq)
144} 146}
145 147
146static void 148static void
147marvel_irq_noop(unsigned int irq) 149marvel_irq_noop(struct irq_data *d)
148{ 150{
149 return; 151 return;
150}
151
152static unsigned int
153marvel_irq_noop_return(unsigned int irq)
154{
155 return 0;
156} 152}
157 153
158static struct irq_chip marvel_legacy_irq_type = { 154static struct irq_chip marvel_legacy_irq_type = {
159 .name = "LEGACY", 155 .name = "LEGACY",
160 .mask = marvel_irq_noop, 156 .irq_mask = marvel_irq_noop,
161 .unmask = marvel_irq_noop, 157 .irq_unmask = marvel_irq_noop,
162}; 158};
163 159
164static struct irq_chip io7_lsi_irq_type = { 160static struct irq_chip io7_lsi_irq_type = {
165 .name = "LSI", 161 .name = "LSI",
166 .unmask = io7_enable_irq, 162 .irq_unmask = io7_enable_irq,
167 .mask = io7_disable_irq, 163 .irq_mask = io7_disable_irq,
168 .mask_ack = io7_disable_irq, 164 .irq_mask_ack = io7_disable_irq,
169}; 165};
170 166
171static struct irq_chip io7_msi_irq_type = { 167static struct irq_chip io7_msi_irq_type = {
172 .name = "MSI", 168 .name = "MSI",
173 .unmask = io7_enable_irq, 169 .irq_unmask = io7_enable_irq,
174 .mask = io7_disable_irq, 170 .irq_mask = io7_disable_irq,
175 .ack = marvel_irq_noop, 171 .irq_ack = marvel_irq_noop,
176}; 172};
177 173
178static void 174static void
@@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7,
280 276
281 /* Set up the lsi irqs. */ 277 /* Set up the lsi irqs. */
282 for (i = 0; i < 128; ++i) { 278 for (i = 0; i < 128; ++i) {
283 irq_to_desc(base + i)->status |= IRQ_LEVEL;
284 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); 279 set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq);
280 irq_set_status_flags(i, IRQ_LEVEL);
285 } 281 }
286 282
287 /* Disable the implemented irqs in hardware. */ 283 /* Disable the implemented irqs in hardware. */
@@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7,
294 290
295 /* Set up the msi irqs. */ 291 /* Set up the msi irqs. */
296 for (i = 128; i < (128 + 512); ++i) { 292 for (i = 128; i < (128 + 512); ++i) {
297 irq_to_desc(base + i)->status |= IRQ_LEVEL;
298 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); 293 set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq);
294 irq_set_status_flags(i, IRQ_LEVEL);
299 } 295 }
300 296
301 for (i = 0; i < 16; ++i) 297 for (i = 0; i < 16; ++i)
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c
index bcc1639e8efb..cf7f43dd3147 100644
--- a/arch/alpha/kernel/sys_mikasa.c
+++ b/arch/alpha/kernel/sys_mikasa.c
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask)
43} 43}
44 44
45static inline void 45static inline void
46mikasa_enable_irq(unsigned int irq) 46mikasa_enable_irq(struct irq_data *d)
47{ 47{
48 mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); 48 mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16));
49} 49}
50 50
51static void 51static void
52mikasa_disable_irq(unsigned int irq) 52mikasa_disable_irq(struct irq_data *d)
53{ 53{
54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); 54 mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16)));
55} 55}
56 56
57static struct irq_chip mikasa_irq_type = { 57static struct irq_chip mikasa_irq_type = {
58 .name = "MIKASA", 58 .name = "MIKASA",
59 .unmask = mikasa_enable_irq, 59 .irq_unmask = mikasa_enable_irq,
60 .mask = mikasa_disable_irq, 60 .irq_mask = mikasa_disable_irq,
61 .mask_ack = mikasa_disable_irq, 61 .irq_mask_ack = mikasa_disable_irq,
62}; 62};
63 63
64static void 64static void
@@ -98,8 +98,8 @@ mikasa_init_irq(void)
98 mikasa_update_irq_hw(0); 98 mikasa_update_irq_hw(0);
99 99
100 for (i = 16; i < 32; ++i) { 100 for (i = 16; i < 32; ++i) {
101 irq_to_desc(i)->status |= IRQ_LEVEL;
102 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); 101 set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq);
102 irq_set_status_flags(i, IRQ_LEVEL);
103 } 103 }
104 104
105 init_i8259a_irqs(); 105 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c
index e88f4ae1260e..92bc188e94a9 100644
--- a/arch/alpha/kernel/sys_noritake.c
+++ b/arch/alpha/kernel/sys_noritake.c
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask)
48} 48}
49 49
50static void 50static void
51noritake_enable_irq(unsigned int irq) 51noritake_enable_irq(struct irq_data *d)
52{ 52{
53 noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); 53 noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16));
54} 54}
55 55
56static void 56static void
57noritake_disable_irq(unsigned int irq) 57noritake_disable_irq(struct irq_data *d)
58{ 58{
59 noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); 59 noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16)));
60} 60}
61 61
62static struct irq_chip noritake_irq_type = { 62static struct irq_chip noritake_irq_type = {
63 .name = "NORITAKE", 63 .name = "NORITAKE",
64 .unmask = noritake_enable_irq, 64 .irq_unmask = noritake_enable_irq,
65 .mask = noritake_disable_irq, 65 .irq_mask = noritake_disable_irq,
66 .mask_ack = noritake_disable_irq, 66 .irq_mask_ack = noritake_disable_irq,
67}; 67};
68 68
69static void 69static void
@@ -127,8 +127,8 @@ noritake_init_irq(void)
127 outw(0, 0x54c); 127 outw(0, 0x54c);
128 128
129 for (i = 16; i < 48; ++i) { 129 for (i = 16; i < 48; ++i) {
130 irq_to_desc(i)->status |= IRQ_LEVEL;
131 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); 130 set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq);
131 irq_set_status_flags(i, IRQ_LEVEL);
132 } 132 }
133 133
134 init_i8259a_irqs(); 134 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c
index 6a51364dd1cc..936d4140ed5f 100644
--- a/arch/alpha/kernel/sys_rawhide.c
+++ b/arch/alpha/kernel/sys_rawhide.c
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask)
56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) 56 (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0))
57 57
58static inline void 58static inline void
59rawhide_enable_irq(unsigned int irq) 59rawhide_enable_irq(struct irq_data *d)
60{ 60{
61 unsigned int mask, hose; 61 unsigned int mask, hose;
62 unsigned int irq = d->irq;
62 63
63 irq -= 16; 64 irq -= 16;
64 hose = irq / 24; 65 hose = irq / 24;
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq)
76} 77}
77 78
78static void 79static void
79rawhide_disable_irq(unsigned int irq) 80rawhide_disable_irq(struct irq_data *d)
80{ 81{
81 unsigned int mask, hose; 82 unsigned int mask, hose;
83 unsigned int irq = d->irq;
82 84
83 irq -= 16; 85 irq -= 16;
84 hose = irq / 24; 86 hose = irq / 24;
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq)
96} 98}
97 99
98static void 100static void
99rawhide_mask_and_ack_irq(unsigned int irq) 101rawhide_mask_and_ack_irq(struct irq_data *d)
100{ 102{
101 unsigned int mask, mask1, hose; 103 unsigned int mask, mask1, hose;
104 unsigned int irq = d->irq;
102 105
103 irq -= 16; 106 irq -= 16;
104 hose = irq / 24; 107 hose = irq / 24;
@@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq)
123 126
124static struct irq_chip rawhide_irq_type = { 127static struct irq_chip rawhide_irq_type = {
125 .name = "RAWHIDE", 128 .name = "RAWHIDE",
126 .unmask = rawhide_enable_irq, 129 .irq_unmask = rawhide_enable_irq,
127 .mask = rawhide_disable_irq, 130 .irq_mask = rawhide_disable_irq,
128 .mask_ack = rawhide_mask_and_ack_irq, 131 .irq_mask_ack = rawhide_mask_and_ack_irq,
129}; 132};
130 133
131static void 134static void
@@ -177,8 +180,8 @@ rawhide_init_irq(void)
177 } 180 }
178 181
179 for (i = 16; i < 128; ++i) { 182 for (i = 16; i < 128; ++i) {
180 irq_to_desc(i)->status |= IRQ_LEVEL;
181 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); 183 set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq);
184 irq_set_status_flags(i, IRQ_LEVEL);
182 } 185 }
183 186
184 init_i8259a_irqs(); 187 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c
index 89e7e37ec84c..cea22a62913b 100644
--- a/arch/alpha/kernel/sys_rx164.c
+++ b/arch/alpha/kernel/sys_rx164.c
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask)
47} 47}
48 48
49static inline void 49static inline void
50rx164_enable_irq(unsigned int irq) 50rx164_enable_irq(struct irq_data *d)
51{ 51{
52 rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); 52 rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16));
53} 53}
54 54
55static void 55static void
56rx164_disable_irq(unsigned int irq) 56rx164_disable_irq(struct irq_data *d)
57{ 57{
58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); 58 rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16)));
59} 59}
60 60
61static struct irq_chip rx164_irq_type = { 61static struct irq_chip rx164_irq_type = {
62 .name = "RX164", 62 .name = "RX164",
63 .unmask = rx164_enable_irq, 63 .irq_unmask = rx164_enable_irq,
64 .mask = rx164_disable_irq, 64 .irq_mask = rx164_disable_irq,
65 .mask_ack = rx164_disable_irq, 65 .irq_mask_ack = rx164_disable_irq,
66}; 66};
67 67
68static void 68static void
@@ -99,8 +99,8 @@ rx164_init_irq(void)
99 99
100 rx164_update_irq_hw(0); 100 rx164_update_irq_hw(0);
101 for (i = 16; i < 40; ++i) { 101 for (i = 16; i < 40; ++i) {
102 irq_to_desc(i)->status |= IRQ_LEVEL;
103 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); 102 set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq);
103 irq_set_status_flags(i, IRQ_LEVEL);
104 } 104 }
105 105
106 init_i8259a_irqs(); 106 init_i8259a_irqs();
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c
index 5c4423d1b06c..a349538aabc9 100644
--- a/arch/alpha/kernel/sys_sable.c
+++ b/arch/alpha/kernel/sys_sable.c
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp)
443/* GENERIC irq routines */ 443/* GENERIC irq routines */
444 444
445static inline void 445static inline void
446sable_lynx_enable_irq(unsigned int irq) 446sable_lynx_enable_irq(struct irq_data *d)
447{ 447{
448 unsigned long bit, mask; 448 unsigned long bit, mask;
449 449
450 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 450 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
451 spin_lock(&sable_lynx_irq_lock); 451 spin_lock(&sable_lynx_irq_lock);
452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); 452 mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit);
453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 453 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq)
459} 459}
460 460
461static void 461static void
462sable_lynx_disable_irq(unsigned int irq) 462sable_lynx_disable_irq(struct irq_data *d)
463{ 463{
464 unsigned long bit, mask; 464 unsigned long bit, mask;
465 465
466 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 466 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
467 spin_lock(&sable_lynx_irq_lock); 467 spin_lock(&sable_lynx_irq_lock);
468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 468 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 469 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq)
475} 475}
476 476
477static void 477static void
478sable_lynx_mask_and_ack_irq(unsigned int irq) 478sable_lynx_mask_and_ack_irq(struct irq_data *d)
479{ 479{
480 unsigned long bit, mask; 480 unsigned long bit, mask;
481 481
482 bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; 482 bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq];
483 spin_lock(&sable_lynx_irq_lock); 483 spin_lock(&sable_lynx_irq_lock);
484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; 484 mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit;
485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask); 485 sable_lynx_irq_swizzle->update_irq_hw(bit, mask);
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq)
489 489
490static struct irq_chip sable_lynx_irq_type = { 490static struct irq_chip sable_lynx_irq_type = {
491 .name = "SABLE/LYNX", 491 .name = "SABLE/LYNX",
492 .unmask = sable_lynx_enable_irq, 492 .irq_unmask = sable_lynx_enable_irq,
493 .mask = sable_lynx_disable_irq, 493 .irq_mask = sable_lynx_disable_irq,
494 .mask_ack = sable_lynx_mask_and_ack_irq, 494 .irq_mask_ack = sable_lynx_mask_and_ack_irq,
495}; 495};
496 496
497static void 497static void
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs)
518 long i; 518 long i;
519 519
520 for (i = 0; i < nr_of_irqs; ++i) { 520 for (i = 0; i < nr_of_irqs; ++i) {
521 irq_to_desc(i)->status |= IRQ_LEVEL;
522 set_irq_chip_and_handler(i, &sable_lynx_irq_type, 521 set_irq_chip_and_handler(i, &sable_lynx_irq_type,
523 handle_level_irq); 522 handle_level_irq);
523 irq_set_status_flags(i, IRQ_LEVEL);
524 } 524 }
525 525
526 common_init_isa_dma(); 526 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c
index f8a1e8a862fb..42a5331f13c4 100644
--- a/arch/alpha/kernel/sys_takara.c
+++ b/arch/alpha/kernel/sys_takara.c
@@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask)
45} 45}
46 46
47static inline void 47static inline void
48takara_enable_irq(unsigned int irq) 48takara_enable_irq(struct irq_data *d)
49{ 49{
50 unsigned int irq = d->irq;
50 unsigned long mask; 51 unsigned long mask;
51 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); 52 mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63)));
52 takara_update_irq_hw(irq, mask); 53 takara_update_irq_hw(irq, mask);
53} 54}
54 55
55static void 56static void
56takara_disable_irq(unsigned int irq) 57takara_disable_irq(struct irq_data *d)
57{ 58{
59 unsigned int irq = d->irq;
58 unsigned long mask; 60 unsigned long mask;
59 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); 61 mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63));
60 takara_update_irq_hw(irq, mask); 62 takara_update_irq_hw(irq, mask);
@@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq)
62 64
63static struct irq_chip takara_irq_type = { 65static struct irq_chip takara_irq_type = {
64 .name = "TAKARA", 66 .name = "TAKARA",
65 .unmask = takara_enable_irq, 67 .irq_unmask = takara_enable_irq,
66 .mask = takara_disable_irq, 68 .irq_mask = takara_disable_irq,
67 .mask_ack = takara_disable_irq, 69 .irq_mask_ack = takara_disable_irq,
68}; 70};
69 71
70static void 72static void
@@ -136,8 +138,8 @@ takara_init_irq(void)
136 takara_update_irq_hw(i, -1); 138 takara_update_irq_hw(i, -1);
137 139
138 for (i = 16; i < 128; ++i) { 140 for (i = 16; i < 128; ++i) {
139 irq_to_desc(i)->status |= IRQ_LEVEL;
140 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); 141 set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq);
142 irq_set_status_flags(i, IRQ_LEVEL);
141 } 143 }
142 144
143 common_init_isa_dma(); 145 common_init_isa_dma();
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index e02494bf5ef3..8c13a0c77830 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask)
112} 112}
113 113
114static inline void 114static inline void
115titan_enable_irq(unsigned int irq) 115titan_enable_irq(struct irq_data *d)
116{ 116{
117 unsigned int irq = d->irq;
117 spin_lock(&titan_irq_lock); 118 spin_lock(&titan_irq_lock);
118 titan_cached_irq_mask |= 1UL << (irq - 16); 119 titan_cached_irq_mask |= 1UL << (irq - 16);
119 titan_update_irq_hw(titan_cached_irq_mask); 120 titan_update_irq_hw(titan_cached_irq_mask);
@@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq)
121} 122}
122 123
123static inline void 124static inline void
124titan_disable_irq(unsigned int irq) 125titan_disable_irq(struct irq_data *d)
125{ 126{
127 unsigned int irq = d->irq;
126 spin_lock(&titan_irq_lock); 128 spin_lock(&titan_irq_lock);
127 titan_cached_irq_mask &= ~(1UL << (irq - 16)); 129 titan_cached_irq_mask &= ~(1UL << (irq - 16));
128 titan_update_irq_hw(titan_cached_irq_mask); 130 titan_update_irq_hw(titan_cached_irq_mask);
@@ -144,8 +146,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
144} 146}
145 147
146static int 148static int
147titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) 149titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
150 bool force)
148{ 151{
152 unsigned int irq = d->irq;
149 spin_lock(&titan_irq_lock); 153 spin_lock(&titan_irq_lock);
150 titan_cpu_set_irq_affinity(irq - 16, *affinity); 154 titan_cpu_set_irq_affinity(irq - 16, *affinity);
151 titan_update_irq_hw(titan_cached_irq_mask); 155 titan_update_irq_hw(titan_cached_irq_mask);
@@ -175,17 +179,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax)
175{ 179{
176 long i; 180 long i;
177 for (i = imin; i <= imax; ++i) { 181 for (i = imin; i <= imax; ++i) {
178 irq_to_desc(i)->status |= IRQ_LEVEL;
179 set_irq_chip_and_handler(i, ops, handle_level_irq); 182 set_irq_chip_and_handler(i, ops, handle_level_irq);
183 irq_set_status_flags(i, IRQ_LEVEL);
180 } 184 }
181} 185}
182 186
183static struct irq_chip titan_irq_type = { 187static struct irq_chip titan_irq_type = {
184 .name = "TITAN", 188 .name = "TITAN",
185 .unmask = titan_enable_irq, 189 .irq_unmask = titan_enable_irq,
186 .mask = titan_disable_irq, 190 .irq_mask = titan_disable_irq,
187 .mask_ack = titan_disable_irq, 191 .irq_mask_ack = titan_disable_irq,
188 .set_affinity = titan_set_irq_affinity, 192 .irq_set_affinity = titan_set_irq_affinity,
189}; 193};
190 194
191static irqreturn_t 195static irqreturn_t
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c
index eec52594d410..ca60a387ef0a 100644
--- a/arch/alpha/kernel/sys_wildfire.c
+++ b/arch/alpha/kernel/sys_wildfire.c
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void)
104} 104}
105 105
106static void 106static void
107wildfire_enable_irq(unsigned int irq) 107wildfire_enable_irq(struct irq_data *d)
108{ 108{
109 unsigned int irq = d->irq;
110
109 if (irq < 16) 111 if (irq < 16)
110 i8259a_enable_irq(irq); 112 i8259a_enable_irq(d);
111 113
112 spin_lock(&wildfire_irq_lock); 114 spin_lock(&wildfire_irq_lock);
113 set_bit(irq, &cached_irq_mask); 115 set_bit(irq, &cached_irq_mask);
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq)
116} 118}
117 119
118static void 120static void
119wildfire_disable_irq(unsigned int irq) 121wildfire_disable_irq(struct irq_data *d)
120{ 122{
123 unsigned int irq = d->irq;
124
121 if (irq < 16) 125 if (irq < 16)
122 i8259a_disable_irq(irq); 126 i8259a_disable_irq(d);
123 127
124 spin_lock(&wildfire_irq_lock); 128 spin_lock(&wildfire_irq_lock);
125 clear_bit(irq, &cached_irq_mask); 129 clear_bit(irq, &cached_irq_mask);
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq)
128} 132}
129 133
130static void 134static void
131wildfire_mask_and_ack_irq(unsigned int irq) 135wildfire_mask_and_ack_irq(struct irq_data *d)
132{ 136{
137 unsigned int irq = d->irq;
138
133 if (irq < 16) 139 if (irq < 16)
134 i8259a_mask_and_ack_irq(irq); 140 i8259a_mask_and_ack_irq(d);
135 141
136 spin_lock(&wildfire_irq_lock); 142 spin_lock(&wildfire_irq_lock);
137 clear_bit(irq, &cached_irq_mask); 143 clear_bit(irq, &cached_irq_mask);
@@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq)
141 147
142static struct irq_chip wildfire_irq_type = { 148static struct irq_chip wildfire_irq_type = {
143 .name = "WILDFIRE", 149 .name = "WILDFIRE",
144 .unmask = wildfire_enable_irq, 150 .irq_unmask = wildfire_enable_irq,
145 .mask = wildfire_disable_irq, 151 .irq_mask = wildfire_disable_irq,
146 .mask_ack = wildfire_mask_and_ack_irq, 152 .irq_mask_ack = wildfire_mask_and_ack_irq,
147}; 153};
148 154
149static void __init 155static void __init
@@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano)
177 for (i = 0; i < 16; ++i) { 183 for (i = 0; i < 16; ++i) {
178 if (i == 2) 184 if (i == 2)
179 continue; 185 continue;
180 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
181 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 186 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
182 handle_level_irq); 187 handle_level_irq);
188 irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
183 } 189 }
184 190
185 irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL;
186 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, 191 set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type,
187 handle_level_irq); 192 handle_level_irq);
193 irq_set_status_flags(36 + irq_bias, IRQ_LEVEL);
188 for (i = 40; i < 64; ++i) { 194 for (i = 40; i < 64; ++i) {
189 irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL;
190 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, 195 set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type,
191 handle_level_irq); 196 handle_level_irq);
197 irq_set_status_flags(i + irq_bias, IRQ_LEVEL);
192 } 198 }
193 199
194 setup_irq(32+irq_bias, &isa_enable); 200 setup_irq(32+irq_bias, &isa_enable);
195} 201}
196 202
197static void __init 203static void __init
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5cff165b7eb0..166efa2a19cd 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622
1177 visible impact on the overall performance or power consumption of the 1177 visible impact on the overall performance or power consumption of the
1178 processor. 1178 processor.
1179 1179
1180config ARM_ERRATA_751472
1181 bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
1182 depends on CPU_V7 && SMP
1183 help
1184 This option enables the workaround for the 751472 Cortex-A9 (prior
1185 to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
1186 completion of a following broadcasted operation if the second
1187 operation is received by a CPU before the ICIALLUIS has completed,
1188 potentially leading to corrupted entries in the cache or TLB.
1189
1190config ARM_ERRATA_753970
1191 bool "ARM errata: cache sync operation may be faulty"
1192 depends on CACHE_PL310
1193 help
1194 This option enables the workaround for the 753970 PL310 (r3p0) erratum.
1195
1196 Under some condition the effect of cache sync operation on
1197 the store buffer still remains when the operation completes.
1198 This means that the store buffer is always asked to drain and
1199 this prevents it from merging any further writes. The workaround
1200 is to replace the normal offset of cache sync operation (0x730)
1201 by another offset targeting an unmapped PL310 register 0x740.
1202 This has the same effect as the cache sync operation: store buffer
1203 drain and waiting for all buffers empty.
1204
1180endmenu 1205endmenu
1181 1206
1182source "arch/arm/common/Kconfig" 1207source "arch/arm/common/Kconfig"
@@ -1391,7 +1416,7 @@ config AEABI
1391 1416
1392config OABI_COMPAT 1417config OABI_COMPAT
1393 bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)" 1418 bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
1394 depends on AEABI && EXPERIMENTAL 1419 depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
1395 default y 1420 default y
1396 help 1421 help
1397 This option preserves the old syscall interface along with the 1422 This option preserves the old syscall interface along with the
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c22c1adfedd6..6f7b29294c80 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
15LDFLAGS_vmlinux += --be8 15LDFLAGS_vmlinux += --be8
16endif 16endif
17 17
18OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S 18OBJCOPYFLAGS :=-O binary -R .comment -S
19GZFLAGS :=-9 19GZFLAGS :=-9
20#KBUILD_CFLAGS +=-pipe 20#KBUILD_CFLAGS +=-pipe
21# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: 21# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index ab204db594d3..c6028967d336 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,3 +1,7 @@
1font.c 1font.c
2piggy.gz 2lib1funcs.S
3piggy.gzip
4piggy.lzo
5piggy.lzma
6vmlinux
3vmlinux.lds 7vmlinux.lds
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig
index 778655f0257a..ea5ee4d067f3 100644
--- a/arch/arm/common/Kconfig
+++ b/arch/arm/common/Kconfig
@@ -6,6 +6,8 @@ config ARM_VIC
6 6
7config ARM_VIC_NR 7config ARM_VIC_NR
8 int 8 int
9 default 4 if ARCH_S5PV210
10 default 3 if ARCH_S5P6442 || ARCH_S5PC100
9 default 2 11 default 2
10 depends on ARM_VIC 12 depends on ARM_VIC
11 help 13 help
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 5aeec1e1735c..16bd48031583 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -36,6 +36,7 @@
36#define L2X0_RAW_INTR_STAT 0x21C 36#define L2X0_RAW_INTR_STAT 0x21C
37#define L2X0_INTR_CLEAR 0x220 37#define L2X0_INTR_CLEAR 0x220
38#define L2X0_CACHE_SYNC 0x730 38#define L2X0_CACHE_SYNC 0x730
39#define L2X0_DUMMY_REG 0x740
39#define L2X0_INV_LINE_PA 0x770 40#define L2X0_INV_LINE_PA 0x770
40#define L2X0_INV_WAY 0x77C 41#define L2X0_INV_WAY 0x77C
41#define L2X0_CLEAN_LINE_PA 0x7B0 42#define L2X0_CLEAN_LINE_PA 0x7B0
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index 721847dc68ab..e0d1c0cfa548 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -58,6 +58,9 @@
58 58
59static inline void sysctl_soft_reset(void __iomem *base) 59static inline void sysctl_soft_reset(void __iomem *base)
60{ 60{
61 /* switch to slow mode */
62 writel(0x2, base + SCCTRL);
63
61 /* writing any value to SCSYSSTAT reg will reset system */ 64 /* writing any value to SCSYSSTAT reg will reset system */
62 writel(0, base + SCSYSSTAT); 65 writel(0, base + SCSYSSTAT);
63} 66}
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 3a0893a76a3b..bf13b814c1b8 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -15,10 +15,6 @@ struct meminfo;
15struct sys_timer; 15struct sys_timer;
16 16
17struct machine_desc { 17struct machine_desc {
18 /*
19 * Note! The first two elements are used
20 * by assembler code in head.S, head-common.S
21 */
22 unsigned int nr; /* architecture number */ 18 unsigned int nr; /* architecture number */
23 const char *name; /* architecture name */ 19 const char *name; /* architecture name */
24 unsigned long boot_params; /* tagged list */ 20 unsigned long boot_params; /* tagged list */
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 9763be04f77e..22de005f159c 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -10,6 +10,8 @@
10#ifndef _ASMARM_PGALLOC_H 10#ifndef _ASMARM_PGALLOC_H
11#define _ASMARM_PGALLOC_H 11#define _ASMARM_PGALLOC_H
12 12
13#include <linux/pagemap.h>
14
13#include <asm/domain.h> 15#include <asm/domain.h>
14#include <asm/pgtable-hwdef.h> 16#include <asm/pgtable-hwdef.h>
15#include <asm/processor.h> 17#include <asm/processor.h>
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f41a6f57cd12..82dfe5d0c41e 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -18,16 +18,34 @@
18#define __ASMARM_TLB_H 18#define __ASMARM_TLB_H
19 19
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <asm/tlbflush.h>
22 21
23#ifndef CONFIG_MMU 22#ifndef CONFIG_MMU
24 23
25#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25
26#define tlb_flush(tlb) ((void) tlb)
27
26#include <asm-generic/tlb.h> 28#include <asm-generic/tlb.h>
27 29
28#else /* !CONFIG_MMU */ 30#else /* !CONFIG_MMU */
29 31
32#include <linux/swap.h>
30#include <asm/pgalloc.h> 33#include <asm/pgalloc.h>
34#include <asm/tlbflush.h>
35
36/*
37 * We need to delay page freeing for SMP as other CPUs can access pages
38 * which have been removed but not yet had their TLB entries invalidated.
39 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
40 * we need to apply this same delaying tactic to ensure correct operation.
41 */
42#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
43#define tlb_fast_mode(tlb) 0
44#define FREE_PTE_NR 500
45#else
46#define tlb_fast_mode(tlb) 1
47#define FREE_PTE_NR 0
48#endif
31 49
32/* 50/*
33 * TLB handling. This allows us to remove pages from the page 51 * TLB handling. This allows us to remove pages from the page
@@ -36,12 +54,58 @@
36struct mmu_gather { 54struct mmu_gather {
37 struct mm_struct *mm; 55 struct mm_struct *mm;
38 unsigned int fullmm; 56 unsigned int fullmm;
57 struct vm_area_struct *vma;
39 unsigned long range_start; 58 unsigned long range_start;
40 unsigned long range_end; 59 unsigned long range_end;
60 unsigned int nr;
61 struct page *pages[FREE_PTE_NR];
41}; 62};
42 63
43DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 64DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
44 65
66/*
67 * This is unnecessarily complex. There's three ways the TLB shootdown
68 * code is used:
69 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
70 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
71 * tlb->vma will be non-NULL.
72 * 2. Unmapping all vmas. See exit_mmap().
73 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
74 * tlb->vma will be non-NULL. Additionally, page tables will be freed.
75 * 3. Unmapping argument pages. See shift_arg_pages().
76 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
77 * tlb->vma will be NULL.
78 */
79static inline void tlb_flush(struct mmu_gather *tlb)
80{
81 if (tlb->fullmm || !tlb->vma)
82 flush_tlb_mm(tlb->mm);
83 else if (tlb->range_end > 0) {
84 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
85 tlb->range_start = TASK_SIZE;
86 tlb->range_end = 0;
87 }
88}
89
90static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
91{
92 if (!tlb->fullmm) {
93 if (addr < tlb->range_start)
94 tlb->range_start = addr;
95 if (addr + PAGE_SIZE > tlb->range_end)
96 tlb->range_end = addr + PAGE_SIZE;
97 }
98}
99
100static inline void tlb_flush_mmu(struct mmu_gather *tlb)
101{
102 tlb_flush(tlb);
103 if (!tlb_fast_mode(tlb)) {
104 free_pages_and_swap_cache(tlb->pages, tlb->nr);
105 tlb->nr = 0;
106 }
107}
108
45static inline struct mmu_gather * 109static inline struct mmu_gather *
46tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 110tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
47{ 111{
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
49 113
50 tlb->mm = mm; 114 tlb->mm = mm;
51 tlb->fullmm = full_mm_flush; 115 tlb->fullmm = full_mm_flush;
116 tlb->vma = NULL;
117 tlb->nr = 0;
52 118
53 return tlb; 119 return tlb;
54} 120}
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
56static inline void 122static inline void
57tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 123tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
58{ 124{
59 if (tlb->fullmm) 125 tlb_flush_mmu(tlb);
60 flush_tlb_mm(tlb->mm);
61 126
62 /* keep the page table cache within bounds */ 127 /* keep the page table cache within bounds */
63 check_pgt_cache(); 128 check_pgt_cache();
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
71static inline void 136static inline void
72tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) 137tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
73{ 138{
74 if (!tlb->fullmm) { 139 tlb_add_flush(tlb, addr);
75 if (addr < tlb->range_start)
76 tlb->range_start = addr;
77 if (addr + PAGE_SIZE > tlb->range_end)
78 tlb->range_end = addr + PAGE_SIZE;
79 }
80} 140}
81 141
82/* 142/*
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
89{ 149{
90 if (!tlb->fullmm) { 150 if (!tlb->fullmm) {
91 flush_cache_range(vma, vma->vm_start, vma->vm_end); 151 flush_cache_range(vma, vma->vm_start, vma->vm_end);
152 tlb->vma = vma;
92 tlb->range_start = TASK_SIZE; 153 tlb->range_start = TASK_SIZE;
93 tlb->range_end = 0; 154 tlb->range_end = 0;
94 } 155 }
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
97static inline void 158static inline void
98tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 159tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
99{ 160{
100 if (!tlb->fullmm && tlb->range_end > 0) 161 if (!tlb->fullmm)
101 flush_tlb_range(vma, tlb->range_start, tlb->range_end); 162 tlb_flush(tlb);
163}
164
165static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
166{
167 if (tlb_fast_mode(tlb)) {
168 free_page_and_swap_cache(page);
169 } else {
170 tlb->pages[tlb->nr++] = page;
171 if (tlb->nr >= FREE_PTE_NR)
172 tlb_flush_mmu(tlb);
173 }
174}
175
176static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
177 unsigned long addr)
178{
179 pgtable_page_dtor(pte);
180 tlb_add_flush(tlb, addr);
181 tlb_remove_page(tlb, pte);
102} 182}
103 183
104#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 184#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
105#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
106#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 185#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
107 186
108#define tlb_migrate_finish(mm) do { } while (0) 187#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index ce7378ea15a2..d2005de383b8 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -10,12 +10,7 @@
10#ifndef _ASMARM_TLBFLUSH_H 10#ifndef _ASMARM_TLBFLUSH_H
11#define _ASMARM_TLBFLUSH_H 11#define _ASMARM_TLBFLUSH_H
12 12
13 13#ifdef CONFIG_MMU
14#ifndef CONFIG_MMU
15
16#define tlb_flush(tlb) ((void) tlb)
17
18#else /* CONFIG_MMU */
19 14
20#include <asm/glue.h> 15#include <asm/glue.h>
21 16
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index c0225da3fb21..f06ff9feb0db 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -391,6 +391,7 @@ ENDPROC(__turn_mmu_on)
391 391
392 392
393#ifdef CONFIG_SMP_ON_UP 393#ifdef CONFIG_SMP_ON_UP
394 __INIT
394__fixup_smp: 395__fixup_smp:
395 and r3, r9, #0x000f0000 @ architecture version 396 and r3, r9, #0x000f0000 @ architecture version
396 teq r3, #0x000f0000 @ CPU ID supported? 397 teq r3, #0x000f0000 @ CPU ID supported?
@@ -415,18 +416,7 @@ __fixup_smp_on_up:
415 sub r3, r0, r3 416 sub r3, r0, r3
416 add r4, r4, r3 417 add r4, r4, r3
417 add r5, r5, r3 418 add r5, r5, r3
4182: cmp r4, r5 419 b __do_fixup_smp_on_up
419 movhs pc, lr
420 ldmia r4!, {r0, r6}
421 ARM( str r6, [r0, r3] )
422 THUMB( add r0, r0, r3 )
423#ifdef __ARMEB__
424 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
425#endif
426 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
427 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
428 THUMB( strh r6, [r0] )
429 b 2b
430ENDPROC(__fixup_smp) 420ENDPROC(__fixup_smp)
431 421
432 .align 422 .align
@@ -440,7 +430,31 @@ smp_on_up:
440 ALT_SMP(.long 1) 430 ALT_SMP(.long 1)
441 ALT_UP(.long 0) 431 ALT_UP(.long 0)
442 .popsection 432 .popsection
433#endif
443 434
435 .text
436__do_fixup_smp_on_up:
437 cmp r4, r5
438 movhs pc, lr
439 ldmia r4!, {r0, r6}
440 ARM( str r6, [r0, r3] )
441 THUMB( add r0, r0, r3 )
442#ifdef __ARMEB__
443 THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
444#endif 444#endif
445 THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
446 THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
447 THUMB( strh r6, [r0] )
448 b __do_fixup_smp_on_up
449ENDPROC(__do_fixup_smp_on_up)
450
451ENTRY(fixup_smp)
452 stmfd sp!, {r4 - r6, lr}
453 mov r4, r0
454 add r5, r0, r1
455 mov r3, #0
456 bl __do_fixup_smp_on_up
457 ldmfd sp!, {r4 - r6, pc}
458ENDPROC(fixup_smp)
445 459
446#include "head-common.S" 460#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index c9f3f0467570..44b84fe6e1b0 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void)
137 u32 didr; 137 u32 didr;
138 138
139 /* Do we implement the extended CPUID interface? */ 139 /* Do we implement the extended CPUID interface? */
140 if (((read_cpuid_id() >> 16) & 0xf) != 0xf) { 140 if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
141 pr_warning("CPUID feature registers not supported. " 141 "CPUID feature registers not supported. "
142 "Assuming v6 debug is present.\n"); 142 "Assuming v6 debug is present.\n"))
143 return ARM_DEBUG_ARCH_V6; 143 return ARM_DEBUG_ARCH_V6;
144 }
145 144
146 ARM_DBG_READ(c0, 0, didr); 145 ARM_DBG_READ(c0, 0, didr);
147 return (didr >> 16) & 0xf; 146 return (didr >> 16) & 0xf;
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void)
152 return debug_arch; 151 return debug_arch;
153} 152}
154 153
154static int debug_arch_supported(void)
155{
156 u8 arch = get_debug_arch();
157 return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
158}
159
155/* Determine number of BRP register available. */ 160/* Determine number of BRP register available. */
156static int get_num_brp_resources(void) 161static int get_num_brp_resources(void)
157{ 162{
@@ -268,6 +273,9 @@ out:
268 273
269int hw_breakpoint_slots(int type) 274int hw_breakpoint_slots(int type)
270{ 275{
276 if (!debug_arch_supported())
277 return 0;
278
271 /* 279 /*
272 * We can be called early, so don't rely on 280 * We can be called early, so don't rely on
273 * our static variables being initialised. 281 * our static variables being initialised.
@@ -828,20 +836,33 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
828/* 836/*
829 * One-time initialisation. 837 * One-time initialisation.
830 */ 838 */
831static void reset_ctrl_regs(void *unused) 839static void reset_ctrl_regs(void *info)
832{ 840{
833 int i; 841 int i, cpu = smp_processor_id();
842 u32 dbg_power;
843 cpumask_t *cpumask = info;
834 844
835 /* 845 /*
836 * v7 debug contains save and restore registers so that debug state 846 * v7 debug contains save and restore registers so that debug state
837 * can be maintained across low-power modes without leaving 847 * can be maintained across low-power modes without leaving the debug
838 * the debug logic powered up. It is IMPLEMENTATION DEFINED whether 848 * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
839 * we can write to the debug registers out of reset, so we must 849 * the debug registers out of reset, so we must unlock the OS Lock
840 * unlock the OS Lock Access Register to avoid taking undefined 850 * Access Register to avoid taking undefined instruction exceptions
841 * instruction exceptions later on. 851 * later on.
842 */ 852 */
843 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { 853 if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
844 /* 854 /*
855 * Ensure sticky power-down is clear (i.e. debug logic is
856 * powered up).
857 */
858 asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power));
859 if ((dbg_power & 0x1) == 0) {
860 pr_warning("CPU %d debug is powered down!\n", cpu);
861 cpumask_or(cpumask, cpumask, cpumask_of(cpu));
862 return;
863 }
864
865 /*
845 * Unconditionally clear the lock by writing a value 866 * Unconditionally clear the lock by writing a value
846 * other than 0xC5ACCE55 to the access register. 867 * other than 0xC5ACCE55 to the access register.
847 */ 868 */
@@ -879,10 +900,11 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = {
879static int __init arch_hw_breakpoint_init(void) 900static int __init arch_hw_breakpoint_init(void)
880{ 901{
881 u32 dscr; 902 u32 dscr;
903 cpumask_t cpumask = { CPU_BITS_NONE };
882 904
883 debug_arch = get_debug_arch(); 905 debug_arch = get_debug_arch();
884 906
885 if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) { 907 if (!debug_arch_supported()) {
886 pr_info("debug architecture 0x%x unsupported.\n", debug_arch); 908 pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
887 return 0; 909 return 0;
888 } 910 }
@@ -899,18 +921,24 @@ static int __init arch_hw_breakpoint_init(void)
899 pr_info("%d breakpoint(s) reserved for watchpoint " 921 pr_info("%d breakpoint(s) reserved for watchpoint "
900 "single-step.\n", core_num_reserved_brps); 922 "single-step.\n", core_num_reserved_brps);
901 923
924 /*
925 * Reset the breakpoint resources. We assume that a halting
926 * debugger will leave the world in a nice state for us.
927 */
928 on_each_cpu(reset_ctrl_regs, &cpumask, 1);
929 if (!cpumask_empty(&cpumask)) {
930 core_num_brps = 0;
931 core_num_reserved_brps = 0;
932 core_num_wrps = 0;
933 return 0;
934 }
935
902 ARM_DBG_READ(c1, 0, dscr); 936 ARM_DBG_READ(c1, 0, dscr);
903 if (dscr & ARM_DSCR_HDBGEN) { 937 if (dscr & ARM_DSCR_HDBGEN) {
938 max_watchpoint_len = 4;
904 pr_warning("halting debug mode enabled. Assuming maximum " 939 pr_warning("halting debug mode enabled. Assuming maximum "
905 "watchpoint size of 4 bytes."); 940 "watchpoint size of %u bytes.", max_watchpoint_len);
906 } else { 941 } else {
907 /*
908 * Reset the breakpoint resources. We assume that a halting
909 * debugger will leave the world in a nice state for us.
910 */
911 smp_call_function(reset_ctrl_regs, NULL, 1);
912 reset_ctrl_regs(NULL);
913
914 /* Work out the maximum supported watchpoint length. */ 942 /* Work out the maximum supported watchpoint length. */
915 max_watchpoint_len = get_max_wp_len(); 943 max_watchpoint_len = get_max_wp_len();
916 pr_info("maximum watchpoint size is %u bytes.\n", 944 pr_info("maximum watchpoint size is %u bytes.\n",
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c
index 2c1f0050c9c4..8f6ed43861f1 100644
--- a/arch/arm/kernel/kprobes-decode.c
+++ b/arch/arm/kernel/kprobes-decode.c
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
1437 1437
1438 return space_cccc_1100_010x(insn, asi); 1438 return space_cccc_1100_010x(insn, asi);
1439 1439
1440 } else if ((insn & 0x0e000000) == 0x0c400000) { 1440 } else if ((insn & 0x0e000000) == 0x0c000000) {
1441 1441
1442 return space_cccc_110x(insn, asi); 1442 return space_cccc_110x(insn, asi);
1443 1443
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 2cfe8161b478..6d4105e6872f 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -22,6 +22,7 @@
22 22
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include <asm/smp_plat.h>
25#include <asm/unwind.h> 26#include <asm/unwind.h>
26 27
27#ifdef CONFIG_XIP_KERNEL 28#ifdef CONFIG_XIP_KERNEL
@@ -268,12 +269,28 @@ struct mod_unwind_map {
268 const Elf_Shdr *txt_sec; 269 const Elf_Shdr *txt_sec;
269}; 270};
270 271
272static const Elf_Shdr *find_mod_section(const Elf32_Ehdr *hdr,
273 const Elf_Shdr *sechdrs, const char *name)
274{
275 const Elf_Shdr *s, *se;
276 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
277
278 for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++)
279 if (strcmp(name, secstrs + s->sh_name) == 0)
280 return s;
281
282 return NULL;
283}
284
285extern void fixup_smp(const void *, unsigned long);
286
271int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs, 287int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
272 struct module *mod) 288 struct module *mod)
273{ 289{
290 const Elf_Shdr * __maybe_unused s = NULL;
274#ifdef CONFIG_ARM_UNWIND 291#ifdef CONFIG_ARM_UNWIND
275 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 292 const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
276 const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum; 293 const Elf_Shdr *sechdrs_end = sechdrs + hdr->e_shnum;
277 struct mod_unwind_map maps[ARM_SEC_MAX]; 294 struct mod_unwind_map maps[ARM_SEC_MAX];
278 int i; 295 int i;
279 296
@@ -315,6 +332,9 @@ int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
315 maps[i].txt_sec->sh_addr, 332 maps[i].txt_sec->sh_addr,
316 maps[i].txt_sec->sh_size); 333 maps[i].txt_sec->sh_size);
317#endif 334#endif
335 s = find_mod_section(hdr, sechdrs, ".alt.smp.init");
336 if (s && !is_smp())
337 fixup_smp((void *)s->sh_addr, s->sh_size);
318 return 0; 338 return 0;
319} 339}
320 340
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 5efa2647a2fb..d150ad1ccb5d 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -700,7 +700,7 @@ user_backtrace(struct frame_tail __user *tail,
700 * Frame pointers should strictly progress back up the stack 700 * Frame pointers should strictly progress back up the stack
701 * (towards higher addresses). 701 * (towards higher addresses).
702 */ 702 */
703 if (tail >= buftail.fp) 703 if (tail + 1 >= buftail.fp)
704 return NULL; 704 return NULL;
705 705
706 return buftail.fp - 1; 706 return buftail.fp - 1;
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index b8af96ea62e6..2c79eec19262 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -97,28 +97,34 @@ set_irq_affinity(int irq,
97 irq, cpu); 97 irq, cpu);
98 return err; 98 return err;
99#else 99#else
100 return 0; 100 return -EINVAL;
101#endif 101#endif
102} 102}
103 103
104static int 104static int
105init_cpu_pmu(void) 105init_cpu_pmu(void)
106{ 106{
107 int i, err = 0; 107 int i, irqs, err = 0;
108 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; 108 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
109 109
110 if (!pdev) { 110 if (!pdev)
111 err = -ENODEV; 111 return -ENODEV;
112 goto out; 112
113 } 113 irqs = pdev->num_resources;
114
115 /*
116 * If we have a single PMU interrupt that we can't shift, assume that
117 * we're running on a uniprocessor machine and continue.
118 */
119 if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
120 return 0;
114 121
115 for (i = 0; i < pdev->num_resources; ++i) { 122 for (i = 0; i < irqs; ++i) {
116 err = set_irq_affinity(platform_get_irq(pdev, i), i); 123 err = set_irq_affinity(platform_get_irq(pdev, i), i);
117 if (err) 124 if (err)
118 break; 125 break;
119 } 126 }
120 127
121out:
122 return err; 128 return err;
123} 129}
124 130
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 19c6816db61e..b13e70f63d71 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num,
996 while (!(arch_ctrl.len & 0x1)) 996 while (!(arch_ctrl.len & 0x1))
997 arch_ctrl.len >>= 1; 997 arch_ctrl.len >>= 1;
998 998
999 if (idx & 0x1) 999 if (num & 0x1)
1000 reg = encode_ctrl_reg(arch_ctrl);
1001 else
1002 reg = bp->attr.bp_addr; 1000 reg = bp->attr.bp_addr;
1001 else
1002 reg = encode_ctrl_reg(arch_ctrl);
1003 } 1003 }
1004 1004
1005put: 1005put:
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 420b8d6485d6..5ea4fb718b97 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -226,8 +226,8 @@ int cpu_architecture(void)
226 * Register 0 and check for VMSAv7 or PMSAv7 */ 226 * Register 0 and check for VMSAv7 or PMSAv7 */
227 asm("mrc p15, 0, %0, c0, c1, 4" 227 asm("mrc p15, 0, %0, c0, c1, 4"
228 : "=r" (mmfr0)); 228 : "=r" (mmfr0));
229 if ((mmfr0 & 0x0000000f) == 0x00000003 || 229 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
230 (mmfr0 & 0x000000f0) == 0x00000030) 230 (mmfr0 & 0x000000f0) >= 0x00000030)
231 cpu_arch = CPU_ARCH_ARMv7; 231 cpu_arch = CPU_ARCH_ARMv7;
232 else if ((mmfr0 & 0x0000000f) == 0x00000002 || 232 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
233 (mmfr0 & 0x000000f0) == 0x00000020) 233 (mmfr0 & 0x000000f0) == 0x00000020)
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 907d5a620bca..abaf8445ce25 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka,
474 unsigned long handler = (unsigned long)ka->sa.sa_handler; 474 unsigned long handler = (unsigned long)ka->sa.sa_handler;
475 unsigned long retcode; 475 unsigned long retcode;
476 int thumb = 0; 476 int thumb = 0;
477 unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; 477 unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
478
479 cpsr |= PSR_ENDSTATE;
478 480
479 /* 481 /*
480 * Maybe we need to deliver a 32-bit signal to a 26-bit task. 482 * Maybe we need to deliver a 32-bit signal to a 26-bit task.
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 86b66f3f2031..61462790757f 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -21,6 +21,12 @@
21#define ARM_CPU_KEEP(x) 21#define ARM_CPU_KEEP(x)
22#endif 22#endif
23 23
24#if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)
25#define ARM_EXIT_KEEP(x) x
26#else
27#define ARM_EXIT_KEEP(x)
28#endif
29
24OUTPUT_ARCH(arm) 30OUTPUT_ARCH(arm)
25ENTRY(stext) 31ENTRY(stext)
26 32
@@ -43,6 +49,7 @@ SECTIONS
43 _sinittext = .; 49 _sinittext = .;
44 HEAD_TEXT 50 HEAD_TEXT
45 INIT_TEXT 51 INIT_TEXT
52 ARM_EXIT_KEEP(EXIT_TEXT)
46 _einittext = .; 53 _einittext = .;
47 ARM_CPU_DISCARD(PROC_INFO) 54 ARM_CPU_DISCARD(PROC_INFO)
48 __arch_info_begin = .; 55 __arch_info_begin = .;
@@ -67,6 +74,7 @@ SECTIONS
67#ifndef CONFIG_XIP_KERNEL 74#ifndef CONFIG_XIP_KERNEL
68 __init_begin = _stext; 75 __init_begin = _stext;
69 INIT_DATA 76 INIT_DATA
77 ARM_EXIT_KEEP(EXIT_DATA)
70#endif 78#endif
71 } 79 }
72 80
@@ -162,6 +170,7 @@ SECTIONS
162 . = ALIGN(PAGE_SIZE); 170 . = ALIGN(PAGE_SIZE);
163 __init_begin = .; 171 __init_begin = .;
164 INIT_DATA 172 INIT_DATA
173 ARM_EXIT_KEEP(EXIT_DATA)
165 . = ALIGN(PAGE_SIZE); 174 . = ALIGN(PAGE_SIZE);
166 __init_end = .; 175 __init_end = .;
167#endif 176#endif
@@ -247,6 +256,8 @@ SECTIONS
247 } 256 }
248#endif 257#endif
249 258
259 NOTES
260
250 BSS_SECTION(0, 0, 0) 261 BSS_SECTION(0, 0, 0)
251 _end = .; 262 _end = .;
252 263
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 343de73161fa..4a68c2b1ec11 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -132,7 +132,7 @@ out:
132 return ret; 132 return ret;
133} 133}
134 134
135static int __init davinci_cpu_init(struct cpufreq_policy *policy) 135static int davinci_cpu_init(struct cpufreq_policy *policy)
136{ 136{
137 int result = 0; 137 int result = 0;
138 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; 138 struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data;
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index 9eec63070e0c..beda8a4133a0 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = {
480 .resource = da850_mcasp_resources, 480 .resource = da850_mcasp_resources,
481}; 481};
482 482
483struct platform_device davinci_pcm_device = {
484 .name = "davinci-pcm-audio",
485 .id = -1,
486};
487
483void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) 488void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata)
484{ 489{
490 platform_device_register(&davinci_pcm_device);
491
485 /* DA830/OMAP-L137 has 3 instances of McASP */ 492 /* DA830/OMAP-L137 has 3 instances of McASP */
486 if (cpu_is_davinci_da830() && id == 1) { 493 if (cpu_is_davinci_da830() && id == 1) {
487 da830_mcasp1_device.dev.platform_data = pdata; 494 da830_mcasp1_device.dev.platform_data = pdata;
diff --git a/arch/arm/mach-davinci/gpio-tnetv107x.c b/arch/arm/mach-davinci/gpio-tnetv107x.c
index d10298620e2c..3fa3e2867e19 100644
--- a/arch/arm/mach-davinci/gpio-tnetv107x.c
+++ b/arch/arm/mach-davinci/gpio-tnetv107x.c
@@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset)
58 58
59 spin_lock_irqsave(&ctlr->lock, flags); 59 spin_lock_irqsave(&ctlr->lock, flags);
60 60
61 gpio_reg_set_bit(&regs->enable, gpio); 61 gpio_reg_set_bit(regs->enable, gpio);
62 62
63 spin_unlock_irqrestore(&ctlr->lock, flags); 63 spin_unlock_irqrestore(&ctlr->lock, flags);
64 64
@@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset)
74 74
75 spin_lock_irqsave(&ctlr->lock, flags); 75 spin_lock_irqsave(&ctlr->lock, flags);
76 76
77 gpio_reg_clear_bit(&regs->enable, gpio); 77 gpio_reg_clear_bit(regs->enable, gpio);
78 78
79 spin_unlock_irqrestore(&ctlr->lock, flags); 79 spin_unlock_irqrestore(&ctlr->lock, flags);
80} 80}
@@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
88 88
89 spin_lock_irqsave(&ctlr->lock, flags); 89 spin_lock_irqsave(&ctlr->lock, flags);
90 90
91 gpio_reg_set_bit(&regs->direction, gpio); 91 gpio_reg_set_bit(regs->direction, gpio);
92 92
93 spin_unlock_irqrestore(&ctlr->lock, flags); 93 spin_unlock_irqrestore(&ctlr->lock, flags);
94 94
@@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip,
106 spin_lock_irqsave(&ctlr->lock, flags); 106 spin_lock_irqsave(&ctlr->lock, flags);
107 107
108 if (value) 108 if (value)
109 gpio_reg_set_bit(&regs->data_out, gpio); 109 gpio_reg_set_bit(regs->data_out, gpio);
110 else 110 else
111 gpio_reg_clear_bit(&regs->data_out, gpio); 111 gpio_reg_clear_bit(regs->data_out, gpio);
112 112
113 gpio_reg_clear_bit(&regs->direction, gpio); 113 gpio_reg_clear_bit(regs->direction, gpio);
114 114
115 spin_unlock_irqrestore(&ctlr->lock, flags); 115 spin_unlock_irqrestore(&ctlr->lock, flags);
116 116
@@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset)
124 unsigned gpio = chip->base + offset; 124 unsigned gpio = chip->base + offset;
125 int ret; 125 int ret;
126 126
127 ret = gpio_reg_get_bit(&regs->data_in, gpio); 127 ret = gpio_reg_get_bit(regs->data_in, gpio);
128 128
129 return ret ? 1 : 0; 129 return ret ? 1 : 0;
130} 130}
@@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip,
140 spin_lock_irqsave(&ctlr->lock, flags); 140 spin_lock_irqsave(&ctlr->lock, flags);
141 141
142 if (value) 142 if (value)
143 gpio_reg_set_bit(&regs->data_out, gpio); 143 gpio_reg_set_bit(regs->data_out, gpio);
144 else 144 else
145 gpio_reg_clear_bit(&regs->data_out, gpio); 145 gpio_reg_clear_bit(regs->data_out, gpio);
146 146
147 spin_unlock_irqrestore(&ctlr->lock, flags); 147 spin_unlock_irqrestore(&ctlr->lock, flags);
148} 148}
diff --git a/arch/arm/mach-davinci/include/mach/clkdev.h b/arch/arm/mach-davinci/include/mach/clkdev.h
index 730c49d1ebd8..14a504887189 100644
--- a/arch/arm/mach-davinci/include/mach/clkdev.h
+++ b/arch/arm/mach-davinci/include/mach/clkdev.h
@@ -1,6 +1,8 @@
1#ifndef __MACH_CLKDEV_H 1#ifndef __MACH_CLKDEV_H
2#define __MACH_CLKDEV_H 2#define __MACH_CLKDEV_H
3 3
4struct clk;
5
4static inline int __clk_get(struct clk *clk) 6static inline int __clk_get(struct clk *clk)
5{ 7{
6 return 1; 8 return 1;
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 337392c3f549..acb7ae5b0a25 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
77 dd = clk->dpll_data; 77 dd = clk->dpll_data;
78 78
79 /* DPLL divider must result in a valid jitter correction val */ 79 /* DPLL divider must result in a valid jitter correction val */
80 fint = clk->parent->rate / (n + 1); 80 fint = clk->parent->rate / n;
81 if (fint < DPLL_FINT_BAND1_MIN) { 81 if (fint < DPLL_FINT_BAND1_MIN) {
82 82
83 pr_debug("rejecting n=%d due to Fint failure, " 83 pr_debug("rejecting n=%d due to Fint failure, "
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c
index 394413dc7deb..24b88504df0f 100644
--- a/arch/arm/mach-omap2/mailbox.c
+++ b/arch/arm/mach-omap2/mailbox.c
@@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox,
193 omap_mbox_type_t irq) 193 omap_mbox_type_t irq)
194{ 194{
195 struct omap_mbox2_priv *p = mbox->priv; 195 struct omap_mbox2_priv *p = mbox->priv;
196 u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; 196 u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
197 l = mbox_read_reg(p->irqdisable); 197
198 l &= ~bit; 198 if (!cpu_is_omap44xx())
199 mbox_write_reg(l, p->irqdisable); 199 bit = mbox_read_reg(p->irqdisable) & ~bit;
200
201 mbox_write_reg(bit, p->irqdisable);
200} 202}
201 203
202static void omap2_mbox_ack_irq(struct omap_mbox *mbox, 204static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
@@ -334,7 +336,7 @@ static struct omap_mbox mbox_iva_info = {
334 .priv = &omap2_mbox_iva_priv, 336 .priv = &omap2_mbox_iva_priv,
335}; 337};
336 338
337struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL }; 339struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
338#endif 340#endif
339 341
340#if defined(CONFIG_ARCH_OMAP4) 342#if defined(CONFIG_ARCH_OMAP4)
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 98148b6c36e9..6c84659cf846 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
605 list_for_each_entry(e, &partition->muxmodes, node) { 605 list_for_each_entry(e, &partition->muxmodes, node) {
606 struct omap_mux *m = &e->mux; 606 struct omap_mux *m = &e->mux;
607 607
608 (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir, 608 (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
609 m, &omap_mux_dbg_signal_fops); 609 m, &omap_mux_dbg_signal_fops);
610 } 610 }
611} 611}
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c
index 125f56591fb5..a5a83b358ddd 100644
--- a/arch/arm/mach-omap2/pm-debug.c
+++ b/arch/arm/mach-omap2/pm-debug.c
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
637 637
638 } 638 }
639 639
640 (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d, 640 (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
641 &enable_off_mode, &pm_dbg_option_fops); 641 &enable_off_mode, &pm_dbg_option_fops);
642 (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d, 642 (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
643 &sleep_while_idle, &pm_dbg_option_fops); 643 &sleep_while_idle, &pm_dbg_option_fops);
644 (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d, 644 (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
645 &wakeup_timer_seconds, &pm_dbg_option_fops); 645 &wakeup_timer_seconds, &pm_dbg_option_fops);
646 (void) debugfs_create_file("wakeup_timer_milliseconds", 646 (void) debugfs_create_file("wakeup_timer_milliseconds",
647 S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds, 647 S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
648 &pm_dbg_option_fops); 648 &pm_dbg_option_fops);
649 pm_dbg_init_done = 1; 649 pm_dbg_init_done = 1;
650 650
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h
index 729a644ce852..3300ff6e3cfe 100644
--- a/arch/arm/mach-omap2/prcm_mpu44xx.h
+++ b/arch/arm/mach-omap2/prcm_mpu44xx.h
@@ -38,8 +38,8 @@
38#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 38#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
39 39
40/* PRCM_MPU clockdomain register offsets (from instance start) */ 40/* PRCM_MPU clockdomain register offsets (from instance start) */
41#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000 41#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
42#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000 42#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
43 43
44 44
45/* 45/*
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c
index c37e823266d3..1a777e34d0c2 100644
--- a/arch/arm/mach-omap2/smartreflex.c
+++ b/arch/arm/mach-omap2/smartreflex.c
@@ -282,6 +282,7 @@ error:
282 dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" 282 dev_err(&sr_info->pdev->dev, "%s: ERROR in registering"
283 "interrupt handler. Smartreflex will" 283 "interrupt handler. Smartreflex will"
284 "not function as desired\n", __func__); 284 "not function as desired\n", __func__);
285 kfree(name);
285 kfree(sr_info); 286 kfree(sr_info);
286 return ret; 287 return ret;
287} 288}
@@ -879,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
879 ret = sr_late_init(sr_info); 880 ret = sr_late_init(sr_info);
880 if (ret) { 881 if (ret) {
881 pr_warning("%s: Error in SR late init\n", __func__); 882 pr_warning("%s: Error in SR late init\n", __func__);
882 return ret; 883 goto err_release_region;
883 } 884 }
884 } 885 }
885 886
@@ -890,17 +891,20 @@ static int __init omap_sr_probe(struct platform_device *pdev)
890 * not try to create rest of the debugfs entries. 891 * not try to create rest of the debugfs entries.
891 */ 892 */
892 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); 893 vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm);
893 if (!vdd_dbg_dir) 894 if (!vdd_dbg_dir) {
894 return -EINVAL; 895 ret = -EINVAL;
896 goto err_release_region;
897 }
895 898
896 dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); 899 dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir);
897 if (IS_ERR(dbg_dir)) { 900 if (IS_ERR(dbg_dir)) {
898 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", 901 dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n",
899 __func__); 902 __func__);
900 return PTR_ERR(dbg_dir); 903 ret = PTR_ERR(dbg_dir);
904 goto err_release_region;
901 } 905 }
902 906
903 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir, 907 (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
904 (void *)sr_info, &pm_sr_fops); 908 (void *)sr_info, &pm_sr_fops);
905 (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, 909 (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
906 &sr_info->err_weight); 910 &sr_info->err_weight);
@@ -913,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev)
913 if (IS_ERR(nvalue_dir)) { 917 if (IS_ERR(nvalue_dir)) {
914 dev_err(&pdev->dev, "%s: Unable to create debugfs directory" 918 dev_err(&pdev->dev, "%s: Unable to create debugfs directory"
915 "for n-values\n", __func__); 919 "for n-values\n", __func__);
916 return PTR_ERR(nvalue_dir); 920 ret = PTR_ERR(nvalue_dir);
921 goto err_release_region;
917 } 922 }
918 923
919 omap_voltage_get_volttable(sr_info->voltdm, &volt_data); 924 omap_voltage_get_volttable(sr_info->voltdm, &volt_data);
@@ -922,24 +927,16 @@ static int __init omap_sr_probe(struct platform_device *pdev)
922 " corresponding vdd vdd_%s. Cannot create debugfs" 927 " corresponding vdd vdd_%s. Cannot create debugfs"
923 "entries for n-values\n", 928 "entries for n-values\n",
924 __func__, sr_info->voltdm->name); 929 __func__, sr_info->voltdm->name);
925 return -ENODATA; 930 ret = -ENODATA;
931 goto err_release_region;
926 } 932 }
927 933
928 for (i = 0; i < sr_info->nvalue_count; i++) { 934 for (i = 0; i < sr_info->nvalue_count; i++) {
929 char *name; 935 char name[NVALUE_NAME_LEN + 1];
930 char volt_name[32];
931
932 name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL);
933 if (!name) {
934 dev_err(&pdev->dev, "%s: Unable to allocate memory"
935 " for n-value directory name\n", __func__);
936 return -ENOMEM;
937 }
938 936
939 strcpy(name, "volt_"); 937 snprintf(name, sizeof(name), "volt_%d",
940 sprintf(volt_name, "%d", volt_data[i].volt_nominal); 938 volt_data[i].volt_nominal);
941 strcat(name, volt_name); 939 (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
942 (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
943 &(sr_info->nvalue_table[i].nvalue)); 940 &(sr_info->nvalue_table[i].nvalue));
944 } 941 }
945 942
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
index 7b7c2683ae7b..0fc550e7e482 100644
--- a/arch/arm/mach-omap2/timer-gp.c
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -39,6 +39,7 @@
39#include <asm/mach/time.h> 39#include <asm/mach/time.h>
40#include <plat/dmtimer.h> 40#include <plat/dmtimer.h>
41#include <asm/localtimer.h> 41#include <asm/localtimer.h>
42#include <asm/sched_clock.h>
42 43
43#include "timer-gp.h" 44#include "timer-gp.h"
44 45
@@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void)
190/* 191/*
191 * clocksource 192 * clocksource
192 */ 193 */
194static DEFINE_CLOCK_DATA(cd);
193static struct omap_dm_timer *gpt_clocksource; 195static struct omap_dm_timer *gpt_clocksource;
194static cycle_t clocksource_read_cycles(struct clocksource *cs) 196static cycle_t clocksource_read_cycles(struct clocksource *cs)
195{ 197{
@@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = {
204 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 206 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
205}; 207};
206 208
209static void notrace dmtimer_update_sched_clock(void)
210{
211 u32 cyc;
212
213 cyc = omap_dm_timer_read_counter(gpt_clocksource);
214
215 update_sched_clock(&cd, cyc, (u32)~0);
216}
217
207/* Setup free-running counter for clocksource */ 218/* Setup free-running counter for clocksource */
208static void __init omap2_gp_clocksource_init(void) 219static void __init omap2_gp_clocksource_init(void)
209{ 220{
@@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
224 235
225 omap_dm_timer_set_load_start(gpt, 1, 0); 236 omap_dm_timer_set_load_start(gpt, 1, 0);
226 237
238 init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
239
227 if (clocksource_register_hz(&clocksource_gpt, tick_rate)) 240 if (clocksource_register_hz(&clocksource_gpt, tick_rate))
228 printk(err2, clocksource_gpt.name); 241 printk(err2, clocksource_gpt.name);
229} 242}
diff --git a/arch/arm/mach-pxa/colibri-evalboard.c b/arch/arm/mach-pxa/colibri-evalboard.c
index 6b2c800a1133..28f667e52ef9 100644
--- a/arch/arm/mach-pxa/colibri-evalboard.c
+++ b/arch/arm/mach-pxa/colibri-evalboard.c
@@ -50,7 +50,7 @@ static void __init colibri_mmc_init(void)
50 GPIO0_COLIBRI_PXA270_SD_DETECT; 50 GPIO0_COLIBRI_PXA270_SD_DETECT;
51 if (machine_is_colibri300()) /* PXA300 Colibri */ 51 if (machine_is_colibri300()) /* PXA300 Colibri */
52 colibri_mci_platform_data.gpio_card_detect = 52 colibri_mci_platform_data.gpio_card_detect =
53 GPIO39_COLIBRI_PXA300_SD_DETECT; 53 GPIO13_COLIBRI_PXA300_SD_DETECT;
54 else /* PXA320 Colibri */ 54 else /* PXA320 Colibri */
55 colibri_mci_platform_data.gpio_card_detect = 55 colibri_mci_platform_data.gpio_card_detect =
56 GPIO28_COLIBRI_PXA320_SD_DETECT; 56 GPIO28_COLIBRI_PXA320_SD_DETECT;
diff --git a/arch/arm/mach-pxa/colibri-pxa300.c b/arch/arm/mach-pxa/colibri-pxa300.c
index fddb16d07eb0..66dd81cbc8a0 100644
--- a/arch/arm/mach-pxa/colibri-pxa300.c
+++ b/arch/arm/mach-pxa/colibri-pxa300.c
@@ -41,7 +41,7 @@ static mfp_cfg_t colibri_pxa300_evalboard_pin_config[] __initdata = {
41 GPIO4_MMC1_DAT1, 41 GPIO4_MMC1_DAT1,
42 GPIO5_MMC1_DAT2, 42 GPIO5_MMC1_DAT2,
43 GPIO6_MMC1_DAT3, 43 GPIO6_MMC1_DAT3,
44 GPIO39_GPIO, /* SD detect */ 44 GPIO13_GPIO, /* GPIO13_COLIBRI_PXA300_SD_DETECT */
45 45
46 /* UHC */ 46 /* UHC */
47 GPIO0_2_USBH_PEN, 47 GPIO0_2_USBH_PEN,
diff --git a/arch/arm/mach-pxa/include/mach/colibri.h b/arch/arm/mach-pxa/include/mach/colibri.h
index 388a96f1ef93..cb4236e98a0f 100644
--- a/arch/arm/mach-pxa/include/mach/colibri.h
+++ b/arch/arm/mach-pxa/include/mach/colibri.h
@@ -60,7 +60,7 @@ static inline void colibri_pxa3xx_init_nand(void) {}
60#define GPIO113_COLIBRI_PXA270_TS_IRQ 113 60#define GPIO113_COLIBRI_PXA270_TS_IRQ 113
61 61
62/* GPIO definitions for Colibri PXA300/310 */ 62/* GPIO definitions for Colibri PXA300/310 */
63#define GPIO39_COLIBRI_PXA300_SD_DETECT 39 63#define GPIO13_COLIBRI_PXA300_SD_DETECT 13
64 64
65/* GPIO definitions for Colibri PXA320 */ 65/* GPIO definitions for Colibri PXA320 */
66#define GPIO28_COLIBRI_PXA320_SD_DETECT 28 66#define GPIO28_COLIBRI_PXA320_SD_DETECT 28
diff --git a/arch/arm/mach-pxa/palm27x.c b/arch/arm/mach-pxa/palm27x.c
index 405b92a29793..35572c427fa8 100644
--- a/arch/arm/mach-pxa/palm27x.c
+++ b/arch/arm/mach-pxa/palm27x.c
@@ -323,7 +323,7 @@ static struct platform_pwm_backlight_data palm27x_backlight_data = {
323 .pwm_id = 0, 323 .pwm_id = 0,
324 .max_brightness = 0xfe, 324 .max_brightness = 0xfe,
325 .dft_brightness = 0x7e, 325 .dft_brightness = 0x7e,
326 .pwm_period_ns = 3500, 326 .pwm_period_ns = 3500 * 1024,
327 .init = palm27x_backlight_init, 327 .init = palm27x_backlight_init,
328 .notify = palm27x_backlight_notify, 328 .notify = palm27x_backlight_notify,
329 .exit = palm27x_backlight_exit, 329 .exit = palm27x_backlight_exit,
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 978e1b289544..1807c9abdde0 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -33,7 +33,7 @@ int pxa_pm_enter(suspend_state_t state)
33#endif 33#endif
34 34
35 /* skip registers saving for standby */ 35 /* skip registers saving for standby */
36 if (state != PM_SUSPEND_STANDBY) { 36 if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->save) {
37 pxa_cpu_pm_fns->save(sleep_save); 37 pxa_cpu_pm_fns->save(sleep_save);
38 /* before sleeping, calculate and save a checksum */ 38 /* before sleeping, calculate and save a checksum */
39 for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) 39 for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
@@ -44,7 +44,7 @@ int pxa_pm_enter(suspend_state_t state)
44 pxa_cpu_pm_fns->enter(state); 44 pxa_cpu_pm_fns->enter(state);
45 cpu_init(); 45 cpu_init();
46 46
47 if (state != PM_SUSPEND_STANDBY) { 47 if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
48 /* after sleeping, validate the checksum */ 48 /* after sleeping, validate the checksum */
49 for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++) 49 for (i = 0; i < pxa_cpu_pm_fns->save_count - 1; i++)
50 checksum += sleep_save[i]; 50 checksum += sleep_save[i];
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index fbc5b775f895..b166b1d845d7 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = {
347 &pxa25x_device_assp, 347 &pxa25x_device_assp,
348 &pxa25x_device_pwm0, 348 &pxa25x_device_pwm0,
349 &pxa25x_device_pwm1, 349 &pxa25x_device_pwm1,
350 &pxa_device_asoc_platform,
350}; 351};
351 352
352static struct sys_device pxa25x_sysdev[] = { 353static struct sys_device pxa25x_sysdev[] = {
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c
index c31e601eb49c..b9b1e5c2b290 100644
--- a/arch/arm/mach-pxa/tosa-bt.c
+++ b/arch/arm/mach-pxa/tosa-bt.c
@@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev)
81 goto err_rfk_alloc; 81 goto err_rfk_alloc;
82 } 82 }
83 83
84 rfkill_set_led_trigger_name(rfk, "tosa-bt");
85
86 rc = rfkill_register(rfk); 84 rc = rfkill_register(rfk);
87 if (rc) 85 if (rc)
88 goto err_rfkill; 86 goto err_rfkill;
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index af152e70cfcf..f2582ec300d9 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = {
875 .dev.platform_data = &sharpsl_rom_data, 875 .dev.platform_data = &sharpsl_rom_data,
876}; 876};
877 877
878static struct platform_device wm9712_device = {
879 .name = "wm9712-codec",
880 .id = -1,
881};
882
878static struct platform_device *devices[] __initdata = { 883static struct platform_device *devices[] __initdata = {
879 &tosascoop_device, 884 &tosascoop_device,
880 &tosascoop_jc_device, 885 &tosascoop_jc_device,
@@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = {
885 &tosaled_device, 890 &tosaled_device,
886 &tosa_bt_device, 891 &tosa_bt_device,
887 &sharpsl_rom_device, 892 &sharpsl_rom_device,
893 &wm9712_device,
888}; 894};
889 895
890static void tosa_poweroff(void) 896static void tosa_poweroff(void)
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig
index a0cb2581894f..50825a3f91cc 100644
--- a/arch/arm/mach-s3c2440/Kconfig
+++ b/arch/arm/mach-s3c2440/Kconfig
@@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02
99 select POWER_SUPPLY 99 select POWER_SUPPLY
100 select MACH_NEO1973 100 select MACH_NEO1973
101 select S3C2410_PWM 101 select S3C2410_PWM
102 select S3C_DEV_USB_HOST
102 help 103 help
103 Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone 104 Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone
104 105
diff --git a/arch/arm/mach-s3c2440/include/mach/gta02.h b/arch/arm/mach-s3c2440/include/mach/gta02.h
index 953331d8d56a..3a56a229cac6 100644
--- a/arch/arm/mach-s3c2440/include/mach/gta02.h
+++ b/arch/arm/mach-s3c2440/include/mach/gta02.h
@@ -44,19 +44,19 @@
44#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ 44#define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */
45#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ 45#define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */
46 46
47#define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */ 47#define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */
48#define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2 48#define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2)
49#define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */ 49#define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */
50#define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */ 50#define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */
51#define GTA02_GPIO_nGSM_EN S3C2440_GPJ4 51#define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4)
52#define GTA02_GPIO_3D_RESET S3C2440_GPJ5 52#define GTA02_GPIO_3D_RESET S3C2410_GPJ(5)
53#define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */ 53#define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */
54#define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7 54#define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7)
55#define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8 55#define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8)
56#define GTA02_GPIO_KEEPACT S3C2440_GPJ8 56#define GTA02_GPIO_KEEPACT S3C2410_GPJ(8)
57#define GTA02v1_GPIO_HP_IN S3C2440_GPJ10 57#define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10)
58#define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */ 58#define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */
59#define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */ 59#define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */
60 60
61#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 61#define GTA02_IRQ_GSENSOR_1 IRQ_EINT0
62#define GTA02_IRQ_MODEM IRQ_EINT1 62#define GTA02_IRQ_MODEM IRQ_EINT1
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c
index dd3782064508..fdfc4d5e37a1 100644
--- a/arch/arm/mach-s3c64xx/clock.c
+++ b/arch/arm/mach-s3c64xx/clock.c
@@ -151,6 +151,12 @@ static struct clk init_clocks_off[] = {
151 .enable = s3c64xx_pclk_ctrl, 151 .enable = s3c64xx_pclk_ctrl,
152 .ctrlbit = S3C_CLKCON_PCLK_IIC, 152 .ctrlbit = S3C_CLKCON_PCLK_IIC,
153 }, { 153 }, {
154 .name = "i2c",
155 .id = 1,
156 .parent = &clk_p,
157 .enable = s3c64xx_pclk_ctrl,
158 .ctrlbit = S3C6410_CLKCON_PCLK_I2C1,
159 }, {
154 .name = "iis", 160 .name = "iis",
155 .id = 0, 161 .id = 0,
156 .parent = &clk_p, 162 .parent = &clk_p,
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index 135db1b41252..c35585cf8c4f 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
690 690
691 regptr = regs + PL080_Cx_BASE(0); 691 regptr = regs + PL080_Cx_BASE(0);
692 692
693 for (ch = 0; ch < 8; ch++, chno++, chptr++) { 693 for (ch = 0; ch < 8; ch++, chptr++) {
694 printk(KERN_INFO "%s: registering DMA %d (%p)\n", 694 pr_debug("%s: registering DMA %d (%p)\n",
695 __func__, chno, regptr); 695 __func__, chno + ch, regptr);
696 696
697 chptr->bit = 1 << ch; 697 chptr->bit = 1 << ch;
698 chptr->number = chno; 698 chptr->number = chno + ch;
699 chptr->dmac = dmac; 699 chptr->dmac = dmac;
700 chptr->regs = regptr; 700 chptr->regs = regptr;
701 regptr += PL080_Cx_STRIDE; 701 regptr += PL080_Cx_STRIDE;
@@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase,
704 /* for the moment, permanently enable the controller */ 704 /* for the moment, permanently enable the controller */
705 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); 705 writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG);
706 706
707 printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs); 707 printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n",
708 irq, regs, chno, chno+8);
708 709
709 return 0; 710 return 0;
710 711
diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c
index fd99a82e82c4..92b09085caaa 100644
--- a/arch/arm/mach-s3c64xx/gpiolib.c
+++ b/arch/arm/mach-s3c64xx/gpiolib.c
@@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = {
72 .get_pull = s3c_gpio_getpull_updown, 72 .get_pull = s3c_gpio_getpull_updown,
73}; 73};
74 74
75int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) 75static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin)
76{ 76{
77 return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; 77 return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO;
78} 78}
@@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = {
138 }, 138 },
139}; 139};
140 140
141int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) 141static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin)
142{ 142{
143 return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; 143 return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO;
144} 144}
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index e85192a86fbe..a80a3163dd30 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -28,6 +28,7 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/smsc911x.h> 29#include <linux/smsc911x.h>
30#include <linux/regulator/fixed.h> 30#include <linux/regulator/fixed.h>
31#include <linux/regulator/machine.h>
31 32
32#ifdef CONFIG_SMDK6410_WM1190_EV1 33#ifdef CONFIG_SMDK6410_WM1190_EV1
33#include <linux/mfd/wm8350/core.h> 34#include <linux/mfd/wm8350/core.h>
@@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = {
351/* VDD_UH_MMC, LDO5 on J5 */ 352/* VDD_UH_MMC, LDO5 on J5 */
352static struct regulator_init_data smdk6410_vdduh_mmc = { 353static struct regulator_init_data smdk6410_vdduh_mmc = {
353 .constraints = { 354 .constraints = {
354 .name = "PVDD_UH/PVDD_MMC", 355 .name = "PVDD_UH+PVDD_MMC",
355 .always_on = 1, 356 .always_on = 1,
356 }, 357 },
357}; 358};
@@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = {
417/* S3C64xx internal logic & PLL */ 418/* S3C64xx internal logic & PLL */
418static struct regulator_init_data wm8350_dcdc1_data = { 419static struct regulator_init_data wm8350_dcdc1_data = {
419 .constraints = { 420 .constraints = {
420 .name = "PVDD_INT/PVDD_PLL", 421 .name = "PVDD_INT+PVDD_PLL",
421 .min_uV = 1200000, 422 .min_uV = 1200000,
422 .max_uV = 1200000, 423 .max_uV = 1200000,
423 .always_on = 1, 424 .always_on = 1,
@@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = {
452 453
453static struct regulator_init_data wm8350_dcdc4_data = { 454static struct regulator_init_data wm8350_dcdc4_data = {
454 .constraints = { 455 .constraints = {
455 .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV", 456 .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV",
456 .min_uV = 3000000, 457 .min_uV = 3000000,
457 .max_uV = 3000000, 458 .max_uV = 3000000,
458 .always_on = 1, 459 .always_on = 1,
@@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = {
464/* OTGi/1190-EV1 HPVDD & AVDD */ 465/* OTGi/1190-EV1 HPVDD & AVDD */
465static struct regulator_init_data wm8350_ldo4_data = { 466static struct regulator_init_data wm8350_ldo4_data = {
466 .constraints = { 467 .constraints = {
467 .name = "PVDD_OTGI/HPVDD/AVDD", 468 .name = "PVDD_OTGI+HPVDD+AVDD",
468 .min_uV = 1200000, 469 .min_uV = 1200000,
469 .max_uV = 1200000, 470 .max_uV = 1200000,
470 .apply_uV = 1, 471 .apply_uV = 1,
@@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = {
552 553
553static struct regulator_init_data wm1192_dcdc3 = { 554static struct regulator_init_data wm1192_dcdc3 = {
554 .constraints = { 555 .constraints = {
555 .name = "PVDD_MEM/PVDD_GPS", 556 .name = "PVDD_MEM+PVDD_GPS",
556 .always_on = 1, 557 .always_on = 1,
557 }, 558 },
558}; 559};
@@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = {
563 564
564static struct regulator_init_data wm1192_ldo1 = { 565static struct regulator_init_data wm1192_ldo1 = {
565 .constraints = { 566 .constraints = {
566 .name = "PVDD_LCD/PVDD_EXT", 567 .name = "PVDD_LCD+PVDD_EXT",
567 .always_on = 1, 568 .always_on = 1,
568 }, 569 },
569 .consumer_supplies = wm1192_ldo1_consumers, 570 .consumer_supplies = wm1192_ldo1_consumers,
diff --git a/arch/arm/mach-s3c64xx/setup-keypad.c b/arch/arm/mach-s3c64xx/setup-keypad.c
index f8ed0d22db70..1d4d0ee9e870 100644
--- a/arch/arm/mach-s3c64xx/setup-keypad.c
+++ b/arch/arm/mach-s3c64xx/setup-keypad.c
@@ -17,7 +17,7 @@
17void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) 17void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols)
18{ 18{
19 /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ 19 /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */
20 s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3)); 20 s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3));
21 21
22 /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ 22 /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */
23 s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3)); 23 s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3));
diff --git a/arch/arm/mach-s3c64xx/setup-sdhci.c b/arch/arm/mach-s3c64xx/setup-sdhci.c
index 1a942037c4ef..f344a222bc84 100644
--- a/arch/arm/mach-s3c64xx/setup-sdhci.c
+++ b/arch/arm/mach-s3c64xx/setup-sdhci.c
@@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev,
56 else 56 else
57 ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); 57 ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
58 58
59 printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); 59 pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3);
60 writel(ctrl2, r + S3C_SDHCI_CONTROL2); 60 writel(ctrl2, r + S3C_SDHCI_CONTROL2);
61 writel(ctrl3, r + S3C_SDHCI_CONTROL3); 61 writel(ctrl3, r + S3C_SDHCI_CONTROL3);
62} 62}
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h
index 203dd5a18bd5..058dab4482a1 100644
--- a/arch/arm/mach-s5p6442/include/mach/map.h
+++ b/arch/arm/mach-s5p6442/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5p6442/include/mach/map.h 1/* linux/arch/arm/mach-s5p6442/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 4 * http://www.samsung.com/
5 * 5 *
6 * S5P6442 - Memory map definitions 6 * S5P6442 - Memory map definitions
@@ -16,56 +16,61 @@
16#include <plat/map-base.h> 16#include <plat/map-base.h>
17#include <plat/map-s5p.h> 17#include <plat/map-s5p.h>
18 18
19#define S5P6442_PA_CHIPID (0xE0000000) 19#define S5P6442_PA_SDRAM 0x20000000
20#define S5P_PA_CHIPID S5P6442_PA_CHIPID
21 20
22#define S5P6442_PA_SYSCON (0xE0100000) 21#define S5P6442_PA_I2S0 0xC0B00000
23#define S5P_PA_SYSCON S5P6442_PA_SYSCON 22#define S5P6442_PA_I2S1 0xF2200000
24 23
25#define S5P6442_PA_GPIO (0xE0200000) 24#define S5P6442_PA_CHIPID 0xE0000000
26 25
27#define S5P6442_PA_VIC0 (0xE4000000) 26#define S5P6442_PA_SYSCON 0xE0100000
28#define S5P6442_PA_VIC1 (0xE4100000)
29#define S5P6442_PA_VIC2 (0xE4200000)
30 27
31#define S5P6442_PA_SROMC (0xE7000000) 28#define S5P6442_PA_GPIO 0xE0200000
32#define S5P_PA_SROMC S5P6442_PA_SROMC
33 29
34#define S5P6442_PA_MDMA 0xE8000000 30#define S5P6442_PA_VIC0 0xE4000000
35#define S5P6442_PA_PDMA 0xE9000000 31#define S5P6442_PA_VIC1 0xE4100000
32#define S5P6442_PA_VIC2 0xE4200000
36 33
37#define S5P6442_PA_TIMER (0xEA000000) 34#define S5P6442_PA_SROMC 0xE7000000
38#define S5P_PA_TIMER S5P6442_PA_TIMER
39 35
40#define S5P6442_PA_SYSTIMER (0xEA100000) 36#define S5P6442_PA_MDMA 0xE8000000
37#define S5P6442_PA_PDMA 0xE9000000
41 38
42#define S5P6442_PA_WATCHDOG (0xEA200000) 39#define S5P6442_PA_TIMER 0xEA000000
43 40
44#define S5P6442_PA_UART (0xEC000000) 41#define S5P6442_PA_SYSTIMER 0xEA100000
45 42
46#define S5P_PA_UART0 (S5P6442_PA_UART + 0x0) 43#define S5P6442_PA_WATCHDOG 0xEA200000
47#define S5P_PA_UART1 (S5P6442_PA_UART + 0x400)
48#define S5P_PA_UART2 (S5P6442_PA_UART + 0x800)
49#define S5P_SZ_UART SZ_256
50 44
51#define S5P6442_PA_IIC0 (0xEC100000) 45#define S5P6442_PA_UART 0xEC000000
52 46
53#define S5P6442_PA_SDRAM (0x20000000) 47#define S5P6442_PA_IIC0 0xEC100000
54#define S5P_PA_SDRAM S5P6442_PA_SDRAM
55 48
56#define S5P6442_PA_SPI 0xEC300000 49#define S5P6442_PA_SPI 0xEC300000
57 50
58/* I2S */
59#define S5P6442_PA_I2S0 0xC0B00000
60#define S5P6442_PA_I2S1 0xF2200000
61
62/* PCM */
63#define S5P6442_PA_PCM0 0xF2400000 51#define S5P6442_PA_PCM0 0xF2400000
64#define S5P6442_PA_PCM1 0xF2500000 52#define S5P6442_PA_PCM1 0xF2500000
65 53
66/* compatibiltiy defines. */ 54/* Compatibiltiy Defines */
55
56#define S3C_PA_IIC S5P6442_PA_IIC0
67#define S3C_PA_WDT S5P6442_PA_WATCHDOG 57#define S3C_PA_WDT S5P6442_PA_WATCHDOG
58
59#define S5P_PA_CHIPID S5P6442_PA_CHIPID
60#define S5P_PA_SDRAM S5P6442_PA_SDRAM
61#define S5P_PA_SROMC S5P6442_PA_SROMC
62#define S5P_PA_SYSCON S5P6442_PA_SYSCON
63#define S5P_PA_TIMER S5P6442_PA_TIMER
64
65/* UART */
66
68#define S3C_PA_UART S5P6442_PA_UART 67#define S3C_PA_UART S5P6442_PA_UART
69#define S3C_PA_IIC S5P6442_PA_IIC0 68
69#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
70#define S5P_PA_UART0 S5P_PA_UART(0)
71#define S5P_PA_UART1 S5P_PA_UART(1)
72#define S5P_PA_UART2 S5P_PA_UART(2)
73
74#define S5P_SZ_UART SZ_256
70 75
71#endif /* __ASM_ARCH_MAP_H */ 76#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5p64x0/include/mach/gpio.h b/arch/arm/mach-s5p64x0/include/mach/gpio.h
index 5486c8f01f1d..adb5f298ead8 100644
--- a/arch/arm/mach-s5p64x0/include/mach/gpio.h
+++ b/arch/arm/mach-s5p64x0/include/mach/gpio.h
@@ -23,7 +23,7 @@
23#define S5P6440_GPIO_A_NR (6) 23#define S5P6440_GPIO_A_NR (6)
24#define S5P6440_GPIO_B_NR (7) 24#define S5P6440_GPIO_B_NR (7)
25#define S5P6440_GPIO_C_NR (8) 25#define S5P6440_GPIO_C_NR (8)
26#define S5P6440_GPIO_F_NR (2) 26#define S5P6440_GPIO_F_NR (16)
27#define S5P6440_GPIO_G_NR (7) 27#define S5P6440_GPIO_G_NR (7)
28#define S5P6440_GPIO_H_NR (10) 28#define S5P6440_GPIO_H_NR (10)
29#define S5P6440_GPIO_I_NR (16) 29#define S5P6440_GPIO_I_NR (16)
@@ -36,7 +36,7 @@
36#define S5P6450_GPIO_B_NR (7) 36#define S5P6450_GPIO_B_NR (7)
37#define S5P6450_GPIO_C_NR (8) 37#define S5P6450_GPIO_C_NR (8)
38#define S5P6450_GPIO_D_NR (8) 38#define S5P6450_GPIO_D_NR (8)
39#define S5P6450_GPIO_F_NR (2) 39#define S5P6450_GPIO_F_NR (16)
40#define S5P6450_GPIO_G_NR (14) 40#define S5P6450_GPIO_G_NR (14)
41#define S5P6450_GPIO_H_NR (10) 41#define S5P6450_GPIO_H_NR (10)
42#define S5P6450_GPIO_I_NR (16) 42#define S5P6450_GPIO_I_NR (16)
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h
index a9365e5ba614..95c91257c7ca 100644
--- a/arch/arm/mach-s5p64x0/include/mach/map.h
+++ b/arch/arm/mach-s5p64x0/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5p64x0/include/mach/map.h 1/* linux/arch/arm/mach-s5p64x0/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 4 * http://www.samsung.com
5 * 5 *
6 * S5P64X0 - Memory map definitions 6 * S5P64X0 - Memory map definitions
@@ -16,64 +16,46 @@
16#include <plat/map-base.h> 16#include <plat/map-base.h>
17#include <plat/map-s5p.h> 17#include <plat/map-s5p.h>
18 18
19#define S5P64X0_PA_SDRAM (0x20000000) 19#define S5P64X0_PA_SDRAM 0x20000000
20 20
21#define S5P64X0_PA_CHIPID (0xE0000000) 21#define S5P64X0_PA_CHIPID 0xE0000000
22#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
23
24#define S5P64X0_PA_SYSCON (0xE0100000)
25#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
26
27#define S5P64X0_PA_GPIO (0xE0308000)
28
29#define S5P64X0_PA_VIC0 (0xE4000000)
30#define S5P64X0_PA_VIC1 (0xE4100000)
31 22
32#define S5P64X0_PA_SROMC (0xE7000000) 23#define S5P64X0_PA_SYSCON 0xE0100000
33#define S5P_PA_SROMC S5P64X0_PA_SROMC
34
35#define S5P64X0_PA_PDMA (0xE9000000)
36
37#define S5P64X0_PA_TIMER (0xEA000000)
38#define S5P_PA_TIMER S5P64X0_PA_TIMER
39 24
40#define S5P64X0_PA_RTC (0xEA100000) 25#define S5P64X0_PA_GPIO 0xE0308000
41 26
42#define S5P64X0_PA_WDT (0xEA200000) 27#define S5P64X0_PA_VIC0 0xE4000000
28#define S5P64X0_PA_VIC1 0xE4100000
43 29
44#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) 30#define S5P64X0_PA_SROMC 0xE7000000
45#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
46 31
47#define S5P_PA_UART0 S5P6450_PA_UART(0) 32#define S5P64X0_PA_PDMA 0xE9000000
48#define S5P_PA_UART1 S5P6450_PA_UART(1)
49#define S5P_PA_UART2 S5P6450_PA_UART(2)
50#define S5P_PA_UART3 S5P6450_PA_UART(3)
51#define S5P_PA_UART4 S5P6450_PA_UART(4)
52#define S5P_PA_UART5 S5P6450_PA_UART(5)
53 33
54#define S5P_SZ_UART SZ_256 34#define S5P64X0_PA_TIMER 0xEA000000
35#define S5P64X0_PA_RTC 0xEA100000
36#define S5P64X0_PA_WDT 0xEA200000
55 37
56#define S5P6440_PA_IIC0 (0xEC104000) 38#define S5P6440_PA_IIC0 0xEC104000
57#define S5P6440_PA_IIC1 (0xEC20F000) 39#define S5P6440_PA_IIC1 0xEC20F000
58#define S5P6450_PA_IIC0 (0xEC100000) 40#define S5P6450_PA_IIC0 0xEC100000
59#define S5P6450_PA_IIC1 (0xEC200000) 41#define S5P6450_PA_IIC1 0xEC200000
60 42
61#define S5P64X0_PA_SPI0 (0xEC400000) 43#define S5P64X0_PA_SPI0 0xEC400000
62#define S5P64X0_PA_SPI1 (0xEC500000) 44#define S5P64X0_PA_SPI1 0xEC500000
63 45
64#define S5P64X0_PA_HSOTG (0xED100000) 46#define S5P64X0_PA_HSOTG 0xED100000
65 47
66#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) 48#define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
67 49
68#define S5P64X0_PA_I2S (0xF2000000) 50#define S5P64X0_PA_I2S 0xF2000000
69#define S5P6450_PA_I2S1 0xF2800000 51#define S5P6450_PA_I2S1 0xF2800000
70#define S5P6450_PA_I2S2 0xF2900000 52#define S5P6450_PA_I2S2 0xF2900000
71 53
72#define S5P64X0_PA_PCM (0xF2100000) 54#define S5P64X0_PA_PCM 0xF2100000
73 55
74#define S5P64X0_PA_ADC (0xF3000000) 56#define S5P64X0_PA_ADC 0xF3000000
75 57
76/* compatibiltiy defines. */ 58/* Compatibiltiy Defines */
77 59
78#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) 60#define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0)
79#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) 61#define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1)
@@ -83,6 +65,25 @@
83#define S3C_PA_RTC S5P64X0_PA_RTC 65#define S3C_PA_RTC S5P64X0_PA_RTC
84#define S3C_PA_WDT S5P64X0_PA_WDT 66#define S3C_PA_WDT S5P64X0_PA_WDT
85 67
68#define S5P_PA_CHIPID S5P64X0_PA_CHIPID
69#define S5P_PA_SROMC S5P64X0_PA_SROMC
70#define S5P_PA_SYSCON S5P64X0_PA_SYSCON
71#define S5P_PA_TIMER S5P64X0_PA_TIMER
72
86#define SAMSUNG_PA_ADC S5P64X0_PA_ADC 73#define SAMSUNG_PA_ADC S5P64X0_PA_ADC
87 74
75/* UART */
76
77#define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET))
78#define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000))
79
80#define S5P_PA_UART0 S5P6450_PA_UART(0)
81#define S5P_PA_UART1 S5P6450_PA_UART(1)
82#define S5P_PA_UART2 S5P6450_PA_UART(2)
83#define S5P_PA_UART3 S5P6450_PA_UART(3)
84#define S5P_PA_UART4 S5P6450_PA_UART(4)
85#define S5P_PA_UART5 S5P6450_PA_UART(5)
86
87#define S5P_SZ_UART SZ_256
88
88#endif /* __ASM_ARCH_MAP_H */ 89#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h
index 328467b346aa..ccbe6b767f7d 100644
--- a/arch/arm/mach-s5pc100/include/mach/map.h
+++ b/arch/arm/mach-s5pc100/include/mach/map.h
@@ -1,5 +1,8 @@
1/* linux/arch/arm/mach-s5pc100/include/mach/map.h 1/* linux/arch/arm/mach-s5pc100/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/
5 *
3 * Copyright 2009 Samsung Electronics Co. 6 * Copyright 2009 Samsung Electronics Co.
4 * Byungho Min <bhmin@samsung.com> 7 * Byungho Min <bhmin@samsung.com>
5 * 8 *
@@ -16,145 +19,115 @@
16#include <plat/map-base.h> 19#include <plat/map-base.h>
17#include <plat/map-s5p.h> 20#include <plat/map-s5p.h>
18 21
19/* 22#define S5PC100_PA_SDRAM 0x20000000
20 * map-base.h has already defined virtual memory address 23
21 * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s) 24#define S5PC100_PA_ONENAND 0xE7100000
22 * S3C_VA_SYS S3C_ADDR(0x00100000) system control 25#define S5PC100_PA_ONENAND_BUF 0xB0000000
23 * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used) 26
24 * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block 27#define S5PC100_PA_CHIPID 0xE0000000
25 * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog
26 * S3C_VA_UART S3C_ADDR(0x01000000) UART
27 *
28 * S5PC100 specific virtual memory address can be defined here
29 * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO
30 *
31 */
32 28
33#define S5PC100_PA_ONENAND_BUF (0xB0000000) 29#define S5PC100_PA_SYSCON 0xE0100000
34#define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
35 30
36/* Chip ID */ 31#define S5PC100_PA_OTHERS 0xE0200000
37 32
38#define S5PC100_PA_CHIPID (0xE0000000) 33#define S5PC100_PA_GPIO 0xE0300000
39#define S5P_PA_CHIPID S5PC100_PA_CHIPID
40 34
41#define S5PC100_PA_SYSCON (0xE0100000) 35#define S5PC100_PA_VIC0 0xE4000000
42#define S5P_PA_SYSCON S5PC100_PA_SYSCON 36#define S5PC100_PA_VIC1 0xE4100000
37#define S5PC100_PA_VIC2 0xE4200000
43 38
44#define S5PC100_PA_OTHERS (0xE0200000) 39#define S5PC100_PA_SROMC 0xE7000000
45#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
46 40
47#define S5PC100_PA_GPIO (0xE0300000) 41#define S5PC100_PA_CFCON 0xE7800000
48#define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000)
49 42
50/* Interrupt */ 43#define S5PC100_PA_MDMA 0xE8100000
51#define S5PC100_PA_VIC0 (0xE4000000) 44#define S5PC100_PA_PDMA0 0xE9000000
52#define S5PC100_PA_VIC1 (0xE4100000) 45#define S5PC100_PA_PDMA1 0xE9200000
53#define S5PC100_PA_VIC2 (0xE4200000)
54#define S5PC100_VA_VIC S3C_VA_IRQ
55#define S5PC100_VA_VIC_OFFSET 0x10000
56#define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET))
57 46
58#define S5PC100_PA_SROMC (0xE7000000) 47#define S5PC100_PA_TIMER 0xEA000000
59#define S5P_PA_SROMC S5PC100_PA_SROMC 48#define S5PC100_PA_SYSTIMER 0xEA100000
49#define S5PC100_PA_WATCHDOG 0xEA200000
50#define S5PC100_PA_RTC 0xEA300000
60 51
61#define S5PC100_PA_ONENAND (0xE7100000) 52#define S5PC100_PA_UART 0xEC000000
62 53
63#define S5PC100_PA_CFCON (0xE7800000) 54#define S5PC100_PA_IIC0 0xEC100000
55#define S5PC100_PA_IIC1 0xEC200000
64 56
65/* DMA */ 57#define S5PC100_PA_SPI0 0xEC300000
66#define S5PC100_PA_MDMA (0xE8100000) 58#define S5PC100_PA_SPI1 0xEC400000
67#define S5PC100_PA_PDMA0 (0xE9000000) 59#define S5PC100_PA_SPI2 0xEC500000
68#define S5PC100_PA_PDMA1 (0xE9200000)
69 60
70/* Timer */ 61#define S5PC100_PA_USB_HSOTG 0xED200000
71#define S5PC100_PA_TIMER (0xEA000000) 62#define S5PC100_PA_USB_HSPHY 0xED300000
72#define S5P_PA_TIMER S5PC100_PA_TIMER
73 63
74#define S5PC100_PA_SYSTIMER (0xEA100000) 64#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000))
75 65
76#define S5PC100_PA_WATCHDOG (0xEA200000) 66#define S5PC100_PA_FB 0xEE000000
77#define S5PC100_PA_RTC (0xEA300000)
78 67
79#define S5PC100_PA_UART (0xEC000000) 68#define S5PC100_PA_FIMC0 0xEE200000
69#define S5PC100_PA_FIMC1 0xEE300000
70#define S5PC100_PA_FIMC2 0xEE400000
80 71
81#define S5P_PA_UART0 (S5PC100_PA_UART + 0x0) 72#define S5PC100_PA_I2S0 0xF2000000
82#define S5P_PA_UART1 (S5PC100_PA_UART + 0x400) 73#define S5PC100_PA_I2S1 0xF2100000
83#define S5P_PA_UART2 (S5PC100_PA_UART + 0x800) 74#define S5PC100_PA_I2S2 0xF2200000
84#define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00)
85#define S5P_SZ_UART SZ_256
86 75
87#define S5PC100_PA_IIC0 (0xEC100000) 76#define S5PC100_PA_AC97 0xF2300000
88#define S5PC100_PA_IIC1 (0xEC200000)
89 77
90/* SPI */ 78#define S5PC100_PA_PCM0 0xF2400000
91#define S5PC100_PA_SPI0 0xEC300000 79#define S5PC100_PA_PCM1 0xF2500000
92#define S5PC100_PA_SPI1 0xEC400000
93#define S5PC100_PA_SPI2 0xEC500000
94 80
95/* USB HS OTG */ 81#define S5PC100_PA_SPDIF 0xF2600000
96#define S5PC100_PA_USB_HSOTG (0xED200000)
97#define S5PC100_PA_USB_HSPHY (0xED300000)
98 82
99#define S5PC100_PA_FB (0xEE000000) 83#define S5PC100_PA_TSADC 0xF3000000
100 84
101#define S5PC100_PA_FIMC0 (0xEE200000) 85#define S5PC100_PA_KEYPAD 0xF3100000
102#define S5PC100_PA_FIMC1 (0xEE300000)
103#define S5PC100_PA_FIMC2 (0xEE400000)
104 86
105#define S5PC100_PA_I2S0 (0xF2000000) 87/* Compatibiltiy Defines */
106#define S5PC100_PA_I2S1 (0xF2100000)
107#define S5PC100_PA_I2S2 (0xF2200000)
108 88
109#define S5PC100_PA_AC97 0xF2300000 89#define S3C_PA_FB S5PC100_PA_FB
90#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
91#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
92#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
93#define S3C_PA_IIC S5PC100_PA_IIC0
94#define S3C_PA_IIC1 S5PC100_PA_IIC1
95#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
96#define S3C_PA_ONENAND S5PC100_PA_ONENAND
97#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
98#define S3C_PA_RTC S5PC100_PA_RTC
99#define S3C_PA_TSADC S5PC100_PA_TSADC
100#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
101#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
102#define S3C_PA_WDT S5PC100_PA_WATCHDOG
110 103
111/* PCM */ 104#define S5P_PA_CHIPID S5PC100_PA_CHIPID
112#define S5PC100_PA_PCM0 0xF2400000 105#define S5P_PA_FIMC0 S5PC100_PA_FIMC0
113#define S5PC100_PA_PCM1 0xF2500000 106#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
107#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
108#define S5P_PA_SDRAM S5PC100_PA_SDRAM
109#define S5P_PA_SROMC S5PC100_PA_SROMC
110#define S5P_PA_SYSCON S5PC100_PA_SYSCON
111#define S5P_PA_TIMER S5PC100_PA_TIMER
114 112
115#define S5PC100_PA_SPDIF 0xF2600000 113#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
114#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
115#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
116 116
117#define S5PC100_PA_TSADC (0xF3000000) 117#define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000)
118 118
119/* KEYPAD */ 119#define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M)
120#define S5PC100_PA_KEYPAD (0xF3100000)
121 120
122#define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) 121/* UART */
123 122
124#define S5PC100_PA_SDRAM (0x20000000) 123#define S3C_PA_UART S5PC100_PA_UART
125#define S5P_PA_SDRAM S5PC100_PA_SDRAM
126 124
127/* compatibiltiy defines. */ 125#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
128#define S3C_PA_UART S5PC100_PA_UART 126#define S5P_PA_UART0 S5P_PA_UART(0)
129#define S3C_PA_IIC S5PC100_PA_IIC0 127#define S5P_PA_UART1 S5P_PA_UART(1)
130#define S3C_PA_IIC1 S5PC100_PA_IIC1 128#define S5P_PA_UART2 S5P_PA_UART(2)
131#define S3C_PA_FB S5PC100_PA_FB 129#define S5P_PA_UART3 S5P_PA_UART(3)
132#define S3C_PA_G2D S5PC100_PA_G2D
133#define S3C_PA_G3D S5PC100_PA_G3D
134#define S3C_PA_JPEG S5PC100_PA_JPEG
135#define S3C_PA_ROTATOR S5PC100_PA_ROTATOR
136#define S5P_VA_VIC0 S5PC1XX_VA_VIC(0)
137#define S5P_VA_VIC1 S5PC1XX_VA_VIC(1)
138#define S5P_VA_VIC2 S5PC1XX_VA_VIC(2)
139#define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG
140#define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY
141#define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0)
142#define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1)
143#define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2)
144#define S3C_PA_KEYPAD S5PC100_PA_KEYPAD
145#define S3C_PA_WDT S5PC100_PA_WATCHDOG
146#define S3C_PA_TSADC S5PC100_PA_TSADC
147#define S3C_PA_ONENAND S5PC100_PA_ONENAND
148#define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF
149#define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF
150#define S3C_PA_RTC S5PC100_PA_RTC
151
152#define SAMSUNG_PA_ADC S5PC100_PA_TSADC
153#define SAMSUNG_PA_CFCON S5PC100_PA_CFCON
154#define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD
155 130
156#define S5P_PA_FIMC0 S5PC100_PA_FIMC0 131#define S5P_SZ_UART SZ_256
157#define S5P_PA_FIMC1 S5PC100_PA_FIMC1
158#define S5P_PA_FIMC2 S5PC100_PA_FIMC2
159 132
160#endif /* __ASM_ARCH_C100_MAP_H */ 133#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h
index 3611492ad681..1dd58836fd4f 100644
--- a/arch/arm/mach-s5pv210/include/mach/map.h
+++ b/arch/arm/mach-s5pv210/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5pv210/include/mach/map.h 1/* linux/arch/arm/mach-s5pv210/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 4 * http://www.samsung.com/
5 * 5 *
6 * S5PV210 - Memory map definitions 6 * S5PV210 - Memory map definitions
@@ -16,122 +16,120 @@
16#include <plat/map-base.h> 16#include <plat/map-base.h>
17#include <plat/map-s5p.h> 17#include <plat/map-s5p.h>
18 18
19#define S5PV210_PA_SROM_BANK5 (0xA8000000) 19#define S5PV210_PA_SDRAM 0x20000000
20 20
21#define S5PC110_PA_ONENAND (0xB0000000) 21#define S5PV210_PA_SROM_BANK5 0xA8000000
22#define S5P_PA_ONENAND S5PC110_PA_ONENAND
23 22
24#define S5PC110_PA_ONENAND_DMA (0xB0600000) 23#define S5PC110_PA_ONENAND 0xB0000000
25#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA 24#define S5PC110_PA_ONENAND_DMA 0xB0600000
26 25
27#define S5PV210_PA_CHIPID (0xE0000000) 26#define S5PV210_PA_CHIPID 0xE0000000
28#define S5P_PA_CHIPID S5PV210_PA_CHIPID
29 27
30#define S5PV210_PA_SYSCON (0xE0100000) 28#define S5PV210_PA_SYSCON 0xE0100000
31#define S5P_PA_SYSCON S5PV210_PA_SYSCON
32 29
33#define S5PV210_PA_GPIO (0xE0200000) 30#define S5PV210_PA_GPIO 0xE0200000
34 31
35/* SPI */ 32#define S5PV210_PA_SPDIF 0xE1100000
36#define S5PV210_PA_SPI0 0xE1300000
37#define S5PV210_PA_SPI1 0xE1400000
38 33
39#define S5PV210_PA_KEYPAD (0xE1600000) 34#define S5PV210_PA_SPI0 0xE1300000
35#define S5PV210_PA_SPI1 0xE1400000
40 36
41#define S5PV210_PA_IIC0 (0xE1800000) 37#define S5PV210_PA_KEYPAD 0xE1600000
42#define S5PV210_PA_IIC1 (0xFAB00000)
43#define S5PV210_PA_IIC2 (0xE1A00000)
44 38
45#define S5PV210_PA_TIMER (0xE2500000) 39#define S5PV210_PA_ADC 0xE1700000
46#define S5P_PA_TIMER S5PV210_PA_TIMER
47 40
48#define S5PV210_PA_SYSTIMER (0xE2600000) 41#define S5PV210_PA_IIC0 0xE1800000
42#define S5PV210_PA_IIC1 0xFAB00000
43#define S5PV210_PA_IIC2 0xE1A00000
49 44
50#define S5PV210_PA_WATCHDOG (0xE2700000) 45#define S5PV210_PA_AC97 0xE2200000
51 46
52#define S5PV210_PA_RTC (0xE2800000) 47#define S5PV210_PA_PCM0 0xE2300000
53#define S5PV210_PA_UART (0xE2900000) 48#define S5PV210_PA_PCM1 0xE1200000
49#define S5PV210_PA_PCM2 0xE2B00000
54 50
55#define S5P_PA_UART0 (S5PV210_PA_UART + 0x0) 51#define S5PV210_PA_TIMER 0xE2500000
56#define S5P_PA_UART1 (S5PV210_PA_UART + 0x400) 52#define S5PV210_PA_SYSTIMER 0xE2600000
57#define S5P_PA_UART2 (S5PV210_PA_UART + 0x800) 53#define S5PV210_PA_WATCHDOG 0xE2700000
58#define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00) 54#define S5PV210_PA_RTC 0xE2800000
59 55
60#define S5P_SZ_UART SZ_256 56#define S5PV210_PA_UART 0xE2900000
61 57
62#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) 58#define S5PV210_PA_SROMC 0xE8000000
63 59
64#define S5PV210_PA_SROMC (0xE8000000) 60#define S5PV210_PA_CFCON 0xE8200000
65#define S5P_PA_SROMC S5PV210_PA_SROMC
66 61
67#define S5PV210_PA_CFCON (0xE8200000) 62#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000))
68 63
69#define S5PV210_PA_MDMA 0xFA200000 64#define S5PV210_PA_HSOTG 0xEC000000
70#define S5PV210_PA_PDMA0 0xE0900000 65#define S5PV210_PA_HSPHY 0xEC100000
71#define S5PV210_PA_PDMA1 0xE0A00000
72 66
73#define S5PV210_PA_FB (0xF8000000) 67#define S5PV210_PA_IIS0 0xEEE30000
68#define S5PV210_PA_IIS1 0xE2100000
69#define S5PV210_PA_IIS2 0xE2A00000
74 70
75#define S5PV210_PA_FIMC0 (0xFB200000) 71#define S5PV210_PA_DMC0 0xF0000000
76#define S5PV210_PA_FIMC1 (0xFB300000) 72#define S5PV210_PA_DMC1 0xF1400000
77#define S5PV210_PA_FIMC2 (0xFB400000)
78 73
79#define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) 74#define S5PV210_PA_VIC0 0xF2000000
75#define S5PV210_PA_VIC1 0xF2100000
76#define S5PV210_PA_VIC2 0xF2200000
77#define S5PV210_PA_VIC3 0xF2300000
80 78
81#define S5PV210_PA_HSOTG (0xEC000000) 79#define S5PV210_PA_FB 0xF8000000
82#define S5PV210_PA_HSPHY (0xEC100000)
83 80
84#define S5PV210_PA_VIC0 (0xF2000000) 81#define S5PV210_PA_MDMA 0xFA200000
85#define S5PV210_PA_VIC1 (0xF2100000) 82#define S5PV210_PA_PDMA0 0xE0900000
86#define S5PV210_PA_VIC2 (0xF2200000) 83#define S5PV210_PA_PDMA1 0xE0A00000
87#define S5PV210_PA_VIC3 (0xF2300000)
88 84
89#define S5PV210_PA_SDRAM (0x20000000) 85#define S5PV210_PA_MIPI_CSIS 0xFA600000
90#define S5P_PA_SDRAM S5PV210_PA_SDRAM
91 86
92/* S/PDIF */ 87#define S5PV210_PA_FIMC0 0xFB200000
93#define S5PV210_PA_SPDIF 0xE1100000 88#define S5PV210_PA_FIMC1 0xFB300000
89#define S5PV210_PA_FIMC2 0xFB400000
94 90
95/* I2S */ 91/* Compatibiltiy Defines */
96#define S5PV210_PA_IIS0 0xEEE30000
97#define S5PV210_PA_IIS1 0xE2100000
98#define S5PV210_PA_IIS2 0xE2A00000
99 92
100/* PCM */ 93#define S3C_PA_FB S5PV210_PA_FB
101#define S5PV210_PA_PCM0 0xE2300000 94#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
102#define S5PV210_PA_PCM1 0xE1200000 95#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
103#define S5PV210_PA_PCM2 0xE2B00000 96#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
97#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
98#define S3C_PA_IIC S5PV210_PA_IIC0
99#define S3C_PA_IIC1 S5PV210_PA_IIC1
100#define S3C_PA_IIC2 S5PV210_PA_IIC2
101#define S3C_PA_RTC S5PV210_PA_RTC
102#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
103#define S3C_PA_WDT S5PV210_PA_WATCHDOG
104 104
105/* AC97 */ 105#define S5P_PA_CHIPID S5PV210_PA_CHIPID
106#define S5PV210_PA_AC97 0xE2200000 106#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
107#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
108#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
109#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
110#define S5P_PA_ONENAND S5PC110_PA_ONENAND
111#define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA
112#define S5P_PA_SDRAM S5PV210_PA_SDRAM
113#define S5P_PA_SROMC S5PV210_PA_SROMC
114#define S5P_PA_SYSCON S5PV210_PA_SYSCON
115#define S5P_PA_TIMER S5PV210_PA_TIMER
107 116
108#define S5PV210_PA_ADC (0xE1700000) 117#define SAMSUNG_PA_ADC S5PV210_PA_ADC
118#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON
119#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD
109 120
110#define S5PV210_PA_DMC0 (0xF0000000) 121/* UART */
111#define S5PV210_PA_DMC1 (0xF1400000)
112 122
113#define S5PV210_PA_MIPI_CSIS 0xFA600000 123#define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET))
114 124
115/* compatibiltiy defines. */ 125#define S3C_PA_UART S5PV210_PA_UART
116#define S3C_PA_UART S5PV210_PA_UART
117#define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0)
118#define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1)
119#define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2)
120#define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3)
121#define S3C_PA_IIC S5PV210_PA_IIC0
122#define S3C_PA_IIC1 S5PV210_PA_IIC1
123#define S3C_PA_IIC2 S5PV210_PA_IIC2
124#define S3C_PA_FB S5PV210_PA_FB
125#define S3C_PA_RTC S5PV210_PA_RTC
126#define S3C_PA_WDT S5PV210_PA_WATCHDOG
127#define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG
128#define S5P_PA_FIMC0 S5PV210_PA_FIMC0
129#define S5P_PA_FIMC1 S5PV210_PA_FIMC1
130#define S5P_PA_FIMC2 S5PV210_PA_FIMC2
131#define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS
132 126
133#define SAMSUNG_PA_ADC S5PV210_PA_ADC 127#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
134#define SAMSUNG_PA_CFCON S5PV210_PA_CFCON 128#define S5P_PA_UART0 S5P_PA_UART(0)
135#define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD 129#define S5P_PA_UART1 S5P_PA_UART(1)
130#define S5P_PA_UART2 S5P_PA_UART(2)
131#define S5P_PA_UART3 S5P_PA_UART(3)
132
133#define S5P_SZ_UART SZ_256
136 134
137#endif /* __ASM_ARCH_MAP_H */ 135#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 461aa035afc0..557add4fc56c 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = {
149 149
150static struct regulator_init_data aquila_ldo3_data = { 150static struct regulator_init_data aquila_ldo3_data = {
151 .constraints = { 151 .constraints = {
152 .name = "VUSB/MIPI_1.1V", 152 .name = "VUSB+MIPI_1.1V",
153 .min_uV = 1100000, 153 .min_uV = 1100000,
154 .max_uV = 1100000, 154 .max_uV = 1100000,
155 .apply_uV = 1, 155 .apply_uV = 1,
@@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = {
197 197
198static struct regulator_init_data aquila_ldo8_data = { 198static struct regulator_init_data aquila_ldo8_data = {
199 .constraints = { 199 .constraints = {
200 .name = "VUSB/VADC_3.3V", 200 .name = "VUSB+VADC_3.3V",
201 .min_uV = 3300000, 201 .min_uV = 3300000,
202 .max_uV = 3300000, 202 .max_uV = 3300000,
203 .apply_uV = 1, 203 .apply_uV = 1,
@@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = {
207 207
208static struct regulator_init_data aquila_ldo9_data = { 208static struct regulator_init_data aquila_ldo9_data = {
209 .constraints = { 209 .constraints = {
210 .name = "VCC/VCAM_2.8V", 210 .name = "VCC+VCAM_2.8V",
211 .min_uV = 2800000, 211 .min_uV = 2800000,
212 .max_uV = 2800000, 212 .max_uV = 2800000,
213 .apply_uV = 1, 213 .apply_uV = 1,
@@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = {
381 .buck1_set1 = S5PV210_GPH0(3), 381 .buck1_set1 = S5PV210_GPH0(3),
382 .buck1_set2 = S5PV210_GPH0(4), 382 .buck1_set2 = S5PV210_GPH0(4),
383 .buck2_set3 = S5PV210_GPH0(5), 383 .buck2_set3 = S5PV210_GPH0(5),
384 .buck1_max_voltage1 = 1200000, 384 .buck1_voltage1 = 1200000,
385 .buck1_max_voltage2 = 1200000, 385 .buck1_voltage2 = 1200000,
386 .buck2_max_voltage = 1200000, 386 .buck1_voltage3 = 1200000,
387 .buck1_voltage4 = 1200000,
388 .buck2_voltage1 = 1200000,
389 .buck2_voltage2 = 1200000,
387}; 390};
388#endif 391#endif
389 392
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index e22d5112fd44..056f5c769b0a 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = {
288 288
289static struct regulator_init_data goni_ldo3_data = { 289static struct regulator_init_data goni_ldo3_data = {
290 .constraints = { 290 .constraints = {
291 .name = "VUSB/MIPI_1.1V", 291 .name = "VUSB+MIPI_1.1V",
292 .min_uV = 1100000, 292 .min_uV = 1100000,
293 .max_uV = 1100000, 293 .max_uV = 1100000,
294 .apply_uV = 1, 294 .apply_uV = 1,
@@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = {
337 337
338static struct regulator_init_data goni_ldo8_data = { 338static struct regulator_init_data goni_ldo8_data = {
339 .constraints = { 339 .constraints = {
340 .name = "VUSB/VADC_3.3V", 340 .name = "VUSB+VADC_3.3V",
341 .min_uV = 3300000, 341 .min_uV = 3300000,
342 .max_uV = 3300000, 342 .max_uV = 3300000,
343 .apply_uV = 1, 343 .apply_uV = 1,
@@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = {
347 347
348static struct regulator_init_data goni_ldo9_data = { 348static struct regulator_init_data goni_ldo9_data = {
349 .constraints = { 349 .constraints = {
350 .name = "VCC/VCAM_2.8V", 350 .name = "VCC+VCAM_2.8V",
351 .min_uV = 2800000, 351 .min_uV = 2800000,
352 .max_uV = 2800000, 352 .max_uV = 2800000,
353 .apply_uV = 1, 353 .apply_uV = 1,
@@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = {
521 .buck1_set1 = S5PV210_GPH0(3), 521 .buck1_set1 = S5PV210_GPH0(3),
522 .buck1_set2 = S5PV210_GPH0(4), 522 .buck1_set2 = S5PV210_GPH0(4),
523 .buck2_set3 = S5PV210_GPH0(5), 523 .buck2_set3 = S5PV210_GPH0(5),
524 .buck1_max_voltage1 = 1200000, 524 .buck1_voltage1 = 1200000,
525 .buck1_max_voltage2 = 1200000, 525 .buck1_voltage2 = 1200000,
526 .buck2_max_voltage = 1200000, 526 .buck1_voltage3 = 1200000,
527 .buck1_voltage4 = 1200000,
528 .buck2_voltage1 = 1200000,
529 .buck2_voltage2 = 1200000,
527}; 530};
528#endif 531#endif
529 532
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h
index 3060f78e12ab..901657fa7a12 100644
--- a/arch/arm/mach-s5pv310/include/mach/map.h
+++ b/arch/arm/mach-s5pv310/include/mach/map.h
@@ -1,6 +1,6 @@
1/* linux/arch/arm/mach-s5pv310/include/mach/map.h 1/* linux/arch/arm/mach-s5pv310/include/mach/map.h
2 * 2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 4 * http://www.samsung.com/
5 * 5 *
6 * S5PV310 - Memory map definitions 6 * S5PV310 - Memory map definitions
@@ -23,90 +23,43 @@
23 23
24#include <plat/map-s5p.h> 24#include <plat/map-s5p.h>
25 25
26#define S5PV310_PA_SYSRAM (0x02025000) 26#define S5PV310_PA_SYSRAM 0x02025000
27 27
28#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) 28#define S5PV310_PA_I2S0 0x03830000
29 29#define S5PV310_PA_I2S1 0xE3100000
30#define S5PC210_PA_ONENAND (0x0C000000) 30#define S5PV310_PA_I2S2 0xE2A00000
31#define S5P_PA_ONENAND S5PC210_PA_ONENAND
32
33#define S5PC210_PA_ONENAND_DMA (0x0C600000)
34#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
35
36#define S5PV310_PA_CHIPID (0x10000000)
37#define S5P_PA_CHIPID S5PV310_PA_CHIPID
38
39#define S5PV310_PA_SYSCON (0x10010000)
40#define S5P_PA_SYSCON S5PV310_PA_SYSCON
41 31
42#define S5PV310_PA_PMU (0x10020000) 32#define S5PV310_PA_PCM0 0x03840000
33#define S5PV310_PA_PCM1 0x13980000
34#define S5PV310_PA_PCM2 0x13990000
43 35
44#define S5PV310_PA_CMU (0x10030000) 36#define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000))
45
46#define S5PV310_PA_WATCHDOG (0x10060000)
47#define S5PV310_PA_RTC (0x10070000)
48
49#define S5PV310_PA_DMC0 (0x10400000)
50
51#define S5PV310_PA_COMBINER (0x10448000)
52
53#define S5PV310_PA_COREPERI (0x10500000)
54#define S5PV310_PA_GIC_CPU (0x10500100)
55#define S5PV310_PA_TWD (0x10500600)
56#define S5PV310_PA_GIC_DIST (0x10501000)
57#define S5PV310_PA_L2CC (0x10502000)
58
59/* DMA */
60#define S5PV310_PA_MDMA 0x10810000
61#define S5PV310_PA_PDMA0 0x12680000
62#define S5PV310_PA_PDMA1 0x12690000
63
64#define S5PV310_PA_GPIO1 (0x11400000)
65#define S5PV310_PA_GPIO2 (0x11000000)
66#define S5PV310_PA_GPIO3 (0x03860000)
67
68#define S5PV310_PA_MIPI_CSIS0 0x11880000
69#define S5PV310_PA_MIPI_CSIS1 0x11890000
70 37
71#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) 38#define S5PC210_PA_ONENAND 0x0C000000
39#define S5PC210_PA_ONENAND_DMA 0x0C600000
72 40
73#define S5PV310_PA_SROMC (0x12570000) 41#define S5PV310_PA_CHIPID 0x10000000
74#define S5P_PA_SROMC S5PV310_PA_SROMC
75 42
76/* S/PDIF */ 43#define S5PV310_PA_SYSCON 0x10010000
77#define S5PV310_PA_SPDIF 0xE1100000 44#define S5PV310_PA_PMU 0x10020000
45#define S5PV310_PA_CMU 0x10030000
78 46
79/* I2S */ 47#define S5PV310_PA_WATCHDOG 0x10060000
80#define S5PV310_PA_I2S0 0x03830000 48#define S5PV310_PA_RTC 0x10070000
81#define S5PV310_PA_I2S1 0xE3100000
82#define S5PV310_PA_I2S2 0xE2A00000
83 49
84/* PCM */ 50#define S5PV310_PA_DMC0 0x10400000
85#define S5PV310_PA_PCM0 0x03840000
86#define S5PV310_PA_PCM1 0x13980000
87#define S5PV310_PA_PCM2 0x13990000
88 51
89/* AC97 */ 52#define S5PV310_PA_COMBINER 0x10448000
90#define S5PV310_PA_AC97 0x139A0000
91 53
92#define S5PV310_PA_UART (0x13800000) 54#define S5PV310_PA_COREPERI 0x10500000
55#define S5PV310_PA_GIC_CPU 0x10500100
56#define S5PV310_PA_TWD 0x10500600
57#define S5PV310_PA_GIC_DIST 0x10501000
58#define S5PV310_PA_L2CC 0x10502000
93 59
94#define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET)) 60#define S5PV310_PA_MDMA 0x10810000
95#define S5P_PA_UART0 S5P_PA_UART(0) 61#define S5PV310_PA_PDMA0 0x12680000
96#define S5P_PA_UART1 S5P_PA_UART(1) 62#define S5PV310_PA_PDMA1 0x12690000
97#define S5P_PA_UART2 S5P_PA_UART(2)
98#define S5P_PA_UART3 S5P_PA_UART(3)
99#define S5P_PA_UART4 S5P_PA_UART(4)
100
101#define S5P_SZ_UART SZ_256
102
103#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
104
105#define S5PV310_PA_TIMER (0x139D0000)
106#define S5P_PA_TIMER S5PV310_PA_TIMER
107
108#define S5PV310_PA_SDRAM (0x40000000)
109#define S5P_PA_SDRAM S5PV310_PA_SDRAM
110 63
111#define S5PV310_PA_SYSMMU_MDMA 0x10A40000 64#define S5PV310_PA_SYSMMU_MDMA 0x10A40000
112#define S5PV310_PA_SYSMMU_SSS 0x10A50000 65#define S5PV310_PA_SYSMMU_SSS 0x10A50000
@@ -125,8 +78,31 @@
125#define S5PV310_PA_SYSMMU_MFC_L 0x13620000 78#define S5PV310_PA_SYSMMU_MFC_L 0x13620000
126#define S5PV310_PA_SYSMMU_MFC_R 0x13630000 79#define S5PV310_PA_SYSMMU_MFC_R 0x13630000
127 80
128/* compatibiltiy defines. */ 81#define S5PV310_PA_GPIO1 0x11400000
129#define S3C_PA_UART S5PV310_PA_UART 82#define S5PV310_PA_GPIO2 0x11000000
83#define S5PV310_PA_GPIO3 0x03860000
84
85#define S5PV310_PA_MIPI_CSIS0 0x11880000
86#define S5PV310_PA_MIPI_CSIS1 0x11890000
87
88#define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000))
89
90#define S5PV310_PA_SROMC 0x12570000
91
92#define S5PV310_PA_UART 0x13800000
93
94#define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000))
95
96#define S5PV310_PA_AC97 0x139A0000
97
98#define S5PV310_PA_TIMER 0x139D0000
99
100#define S5PV310_PA_SDRAM 0x40000000
101
102#define S5PV310_PA_SPDIF 0xE1100000
103
104/* Compatibiltiy Defines */
105
130#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) 106#define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0)
131#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) 107#define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1)
132#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) 108#define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2)
@@ -141,7 +117,28 @@
141#define S3C_PA_IIC7 S5PV310_PA_IIC(7) 117#define S3C_PA_IIC7 S5PV310_PA_IIC(7)
142#define S3C_PA_RTC S5PV310_PA_RTC 118#define S3C_PA_RTC S5PV310_PA_RTC
143#define S3C_PA_WDT S5PV310_PA_WATCHDOG 119#define S3C_PA_WDT S5PV310_PA_WATCHDOG
120
121#define S5P_PA_CHIPID S5PV310_PA_CHIPID
144#define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 122#define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0
145#define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 123#define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1
124#define S5P_PA_ONENAND S5PC210_PA_ONENAND
125#define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA
126#define S5P_PA_SDRAM S5PV310_PA_SDRAM
127#define S5P_PA_SROMC S5PV310_PA_SROMC
128#define S5P_PA_SYSCON S5PV310_PA_SYSCON
129#define S5P_PA_TIMER S5PV310_PA_TIMER
130
131/* UART */
132
133#define S3C_PA_UART S5PV310_PA_UART
134
135#define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET))
136#define S5P_PA_UART0 S5P_PA_UART(0)
137#define S5P_PA_UART1 S5P_PA_UART(1)
138#define S5P_PA_UART2 S5P_PA_UART(2)
139#define S5P_PA_UART3 S5P_PA_UART(3)
140#define S5P_PA_UART4 S5P_PA_UART(4)
141
142#define S5P_SZ_UART SZ_256
146 143
147#endif /* __ASM_ARCH_MAP_H */ 144#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index d43c5ef58eb6..bd3e1bfdd6aa 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -241,6 +241,9 @@ static struct locomo_platform_data locomo_info = {
241struct platform_device collie_locomo_device = { 241struct platform_device collie_locomo_device = {
242 .name = "locomo", 242 .name = "locomo",
243 .id = 0, 243 .id = 0,
244 .dev = {
245 .platform_data = &locomo_info,
246 },
244 .num_resources = ARRAY_SIZE(locomo_resources), 247 .num_resources = ARRAY_SIZE(locomo_resources),
245 .resource = locomo_resources, 248 .resource = locomo_resources,
246}; 249};
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 2123b96b5638..4303a86e6e38 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -454,6 +454,7 @@ static void __init ag5evm_init(void)
454 gpio_direction_output(GPIO_PORT217, 0); 454 gpio_direction_output(GPIO_PORT217, 0);
455 mdelay(1); 455 mdelay(1);
456 gpio_set_value(GPIO_PORT217, 1); 456 gpio_set_value(GPIO_PORT217, 1);
457 mdelay(100);
457 458
458 /* LCD backlight controller */ 459 /* LCD backlight controller */
459 gpio_request(GPIO_PORT235, NULL); /* RESET */ 460 gpio_request(GPIO_PORT235, NULL); /* RESET */
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 3cf0951caa2d..81d6536552a9 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void)
1303 1303
1304 lcdc_info.clock_source = LCDC_CLK_BUS; 1304 lcdc_info.clock_source = LCDC_CLK_BUS;
1305 lcdc_info.ch[0].interface_type = RGB18; 1305 lcdc_info.ch[0].interface_type = RGB18;
1306 lcdc_info.ch[0].clock_divider = 2; 1306 lcdc_info.ch[0].clock_divider = 3;
1307 lcdc_info.ch[0].flags = 0; 1307 lcdc_info.ch[0].flags = 0;
1308 lcdc_info.ch[0].lcd_size_cfg.width = 152; 1308 lcdc_info.ch[0].lcd_size_cfg.width = 152;
1309 lcdc_info.ch[0].lcd_size_cfg.height = 91; 1309 lcdc_info.ch[0].lcd_size_cfg.height = 91;
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index fb4213a4e15a..1657eac5dde2 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
303 .lcd_cfg = mackerel_lcdc_modes, 303 .lcd_cfg = mackerel_lcdc_modes,
304 .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), 304 .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
305 .interface_type = RGB24, 305 .interface_type = RGB24,
306 .clock_divider = 2, 306 .clock_divider = 3,
307 .flags = 0, 307 .flags = 0,
308 .lcd_size_cfg.width = 152, 308 .lcd_size_cfg.width = 152,
309 .lcd_size_cfg.height = 91, 309 .lcd_size_cfg.height = 91,
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c
index ddd4a1b775f0..7e58904c1c8c 100644
--- a/arch/arm/mach-shmobile/clock-sh73a0.c
+++ b/arch/arm/mach-shmobile/clock-sh73a0.c
@@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = {
263}; 263};
264 264
265enum { MSTP001, 265enum { MSTP001,
266 MSTP125, MSTP118, MSTP116, MSTP100, 266 MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100,
267 MSTP219, 267 MSTP219,
268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 268 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, 269 MSTP331, MSTP329, MSTP325, MSTP323, MSTP312,
@@ -275,6 +275,10 @@ enum { MSTP001,
275 275
276static struct clk mstp_clks[MSTP_NR] = { 276static struct clk mstp_clks[MSTP_NR] = {
277 [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ 277 [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */
278 [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */
279 [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */
280 [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */
281 [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */
278 [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ 282 [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
279 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ 283 [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */
280 [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ 284 [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */
@@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = {
306 CLKDEV_CON_ID("r_clk", &r_clk), 310 CLKDEV_CON_ID("r_clk", &r_clk),
307 311
308 /* DIV6 clocks */ 312 /* DIV6 clocks */
313 CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]),
314 CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]),
315 CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]),
309 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), 316 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
310 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), 317 CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
311 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), 318 CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
@@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = {
313 320
314 /* MSTP32 clocks */ 321 /* MSTP32 clocks */
315 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ 322 CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
316 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ 323 CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */
324 CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */
325 CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */
326 CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */
317 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ 327 CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */
318 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ 328 CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */
319 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
320 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ 329 CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */
330 CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */
331 CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
321 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ 332 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */
322 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ 333 CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
323 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ 334 CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
index efd3687ba190..3029aba38688 100644
--- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt
@@ -6,13 +6,10 @@ LIST "RWT Setting"
6EW 0xE6020004, 0xA500 6EW 0xE6020004, 0xA500
7EW 0xE6030004, 0xA500 7EW 0xE6030004, 0xA500
8 8
9DD 0x01001000, 0x01001000
10
11LIST "GPIO Setting" 9LIST "GPIO Setting"
12EB 0xE6051013, 0xA2 10EB 0xE6051013, 0xA2
13 11
14LIST "CPG" 12LIST "CPG"
15ED 0xE6150080, 0x00000180
16ED 0xE61500C0, 0x00000002 13ED 0xE61500C0, 0x00000002
17 14
18WAIT 1, 0xFE40009C 15WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
37 34
38WAIT 1, 0xFE40009C 35WAIT 1, 0xFE40009C
39 36
37LIST "SUB/USBClk"
38ED 0xE6150080, 0x00000180
39
40LIST "BSC" 40LIST "BSC"
41ED 0xFEC10000, 0x00E0001B 41ED 0xFEC10000, 0x00E0001B
42 42
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
53ED 0xFE40004C, 0x00110209 53ED 0xFE40004C, 0x00110209
54ED 0xFE400010, 0x00000087 54ED 0xFE400010, 0x00000087
55 55
56WAIT 10, 0xFE40009C 56WAIT 30, 0xFE40009C
57 57
58ED 0xFE400084, 0x0000003F 58ED 0xFE400084, 0x0000003F
59EB 0xFE500000, 0x00 59EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
84 84
85WAIT 1, 0xFE40009C 85WAIT 1, 0xFE40009C
86 86
87ED 0xE6150354, 0x00000002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11 90EB 0xE6053098, 0x11
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
index efd3687ba190..3029aba38688 100644
--- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
+++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt
@@ -6,13 +6,10 @@ LIST "RWT Setting"
6EW 0xE6020004, 0xA500 6EW 0xE6020004, 0xA500
7EW 0xE6030004, 0xA500 7EW 0xE6030004, 0xA500
8 8
9DD 0x01001000, 0x01001000
10
11LIST "GPIO Setting" 9LIST "GPIO Setting"
12EB 0xE6051013, 0xA2 10EB 0xE6051013, 0xA2
13 11
14LIST "CPG" 12LIST "CPG"
15ED 0xE6150080, 0x00000180
16ED 0xE61500C0, 0x00000002 13ED 0xE61500C0, 0x00000002
17 14
18WAIT 1, 0xFE40009C 15WAIT 1, 0xFE40009C
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040
37 34
38WAIT 1, 0xFE40009C 35WAIT 1, 0xFE40009C
39 36
37LIST "SUB/USBClk"
38ED 0xE6150080, 0x00000180
39
40LIST "BSC" 40LIST "BSC"
41ED 0xFEC10000, 0x00E0001B 41ED 0xFEC10000, 0x00E0001B
42 42
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505
53ED 0xFE40004C, 0x00110209 53ED 0xFE40004C, 0x00110209
54ED 0xFE400010, 0x00000087 54ED 0xFE400010, 0x00000087
55 55
56WAIT 10, 0xFE40009C 56WAIT 30, 0xFE40009C
57 57
58ED 0xFE400084, 0x0000003F 58ED 0xFE400084, 0x0000003F
59EB 0xFE500000, 0x00 59EB 0xFE500000, 0x00
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050
84 84
85WAIT 1, 0xFE40009C 85WAIT 1, 0xFE40009C
86 86
87ED 0xE6150354, 0x00000002 87ED 0xFE400354, 0x01AD8002
88 88
89LIST "SCIF0 - Serial port for earlyprintk" 89LIST "SCIF0 - Serial port for earlyprintk"
90EB 0xE6053098, 0x11 90EB 0xE6053098, 0x11
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h
index cacf17a958cd..53677e464d4b 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear320.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear320.h
@@ -62,7 +62,7 @@
62#define SPEAR320_SMII1_BASE 0xAB000000 62#define SPEAR320_SMII1_BASE 0xAB000000
63#define SPEAR320_SMII1_SIZE 0x01000000 63#define SPEAR320_SMII1_SIZE 0x01000000
64 64
65#define SPEAR320_SOC_CONFIG_BASE 0xB4000000 65#define SPEAR320_SOC_CONFIG_BASE 0xB3000000
66#define SPEAR320_SOC_CONFIG_SIZE 0x00000070 66#define SPEAR320_SOC_CONFIG_SIZE 0x00000070
67/* Interrupt registers offsets and masks */ 67/* Interrupt registers offsets and masks */
68#define INT_STS_MASK_REG 0x04 68#define INT_STS_MASK_REG 0x04
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h
index 66ad2760c621..04c779832c78 100644
--- a/arch/arm/mach-tegra/include/mach/kbc.h
+++ b/arch/arm/mach-tegra/include/mach/kbc.h
@@ -57,5 +57,6 @@ struct tegra_kbc_platform_data {
57 const struct matrix_keymap_data *keymap_data; 57 const struct matrix_keymap_data *keymap_data;
58 58
59 bool wakeup; 59 bool wakeup;
60 bool use_fn_map;
60}; 61};
61#endif 62#endif
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9d30c6f804b9..e4509bae8fc4 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -405,7 +405,7 @@ config CPU_V6
405config CPU_32v6K 405config CPU_32v6K
406 bool "Support ARM V6K processor extensions" if !SMP 406 bool "Support ARM V6K processor extensions" if !SMP
407 depends on CPU_V6 || CPU_V7 407 depends on CPU_V6 || CPU_V7
408 default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) 408 default y if SMP
409 help 409 help
410 Say Y here if your ARMv6 processor supports the 'K' extension. 410 Say Y here if your ARMv6 processor supports the 'K' extension.
411 This enables the kernel to use some instructions not present 411 This enables the kernel to use some instructions not present
@@ -416,7 +416,7 @@ config CPU_32v6K
416# ARMv7 416# ARMv7
417config CPU_V7 417config CPU_V7
418 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX 418 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX
419 select CPU_32v6K if !ARCH_OMAP2 419 select CPU_32v6K
420 select CPU_32v7 420 select CPU_32v7
421 select CPU_ABRT_EV7 421 select CPU_ABRT_EV7
422 select CPU_PABRT_V7 422 select CPU_PABRT_V7
@@ -644,7 +644,7 @@ config ARM_THUMBEE
644 644
645config SWP_EMULATE 645config SWP_EMULATE
646 bool "Emulate SWP/SWPB instructions" 646 bool "Emulate SWP/SWPB instructions"
647 depends on CPU_V7 && !CPU_V6 647 depends on !CPU_USE_DOMAINS && CPU_V7 && !CPU_V6
648 select HAVE_PROC_CPU if PROC_FS 648 select HAVE_PROC_CPU if PROC_FS
649 default y if SMP 649 default y if SMP
650 help 650 help
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 170c9bb95866..f2ce38e085d2 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask)
49static inline void cache_sync(void) 49static inline void cache_sync(void)
50{ 50{
51 void __iomem *base = l2x0_base; 51 void __iomem *base = l2x0_base;
52
53#ifdef CONFIG_ARM_ERRATA_753970
54 /* write to an unmmapped register */
55 writel_relaxed(0, base + L2X0_DUMMY_REG);
56#else
52 writel_relaxed(0, base + L2X0_CACHE_SYNC); 57 writel_relaxed(0, base + L2X0_CACHE_SYNC);
58#endif
53 cache_wait(base + L2X0_CACHE_SYNC, 1); 59 cache_wait(base + L2X0_CACHE_SYNC, 1);
54} 60}
55 61
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 0c1172b56b4e..8e3356239136 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -264,6 +264,12 @@ __v7_setup:
264 orreq r10, r10, #1 << 6 @ set bit #6 264 orreq r10, r10, #1 << 6 @ set bit #6
265 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register 265 mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register
266#endif 266#endif
267#ifdef CONFIG_ARM_ERRATA_751472
268 cmp r6, #0x30 @ present prior to r3p0
269 mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register
270 orrlt r10, r10, #1 << 11 @ set bit #11
271 mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register
272#endif
267 273
2683: mov r10, #0 2743: mov r10, #0
269#ifdef HARVARD_CACHE 275#ifdef HARVARD_CACHE
diff --git a/arch/arm/oprofile/common.c b/arch/arm/oprofile/common.c
index 8aa974491dfc..c074e66ad224 100644
--- a/arch/arm/oprofile/common.c
+++ b/arch/arm/oprofile/common.c
@@ -10,8 +10,6 @@
10 */ 10 */
11 11
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/err.h>
14#include <linux/errno.h>
15#include <linux/init.h> 13#include <linux/init.h>
16#include <linux/mutex.h> 14#include <linux/mutex.h>
17#include <linux/oprofile.h> 15#include <linux/oprofile.h>
@@ -46,6 +44,7 @@ char *op_name_from_perf_id(void)
46 return NULL; 44 return NULL;
47 } 45 }
48} 46}
47#endif
49 48
50static int report_trace(struct stackframe *frame, void *d) 49static int report_trace(struct stackframe *frame, void *d)
51{ 50{
@@ -85,7 +84,7 @@ static struct frame_tail* user_backtrace(struct frame_tail *tail)
85 84
86 /* frame pointers should strictly progress back up the stack 85 /* frame pointers should strictly progress back up the stack
87 * (towards higher addresses) */ 86 * (towards higher addresses) */
88 if (tail >= buftail[0].fp) 87 if (tail + 1 >= buftail[0].fp)
89 return NULL; 88 return NULL;
90 89
91 return buftail[0].fp-1; 90 return buftail[0].fp-1;
@@ -111,6 +110,7 @@ static void arm_backtrace(struct pt_regs * const regs, unsigned int depth)
111 110
112int __init oprofile_arch_init(struct oprofile_operations *ops) 111int __init oprofile_arch_init(struct oprofile_operations *ops)
113{ 112{
113 /* provide backtrace support also in timer mode: */
114 ops->backtrace = arm_backtrace; 114 ops->backtrace = arm_backtrace;
115 115
116 return oprofile_perf_init(ops); 116 return oprofile_perf_init(ops);
@@ -120,11 +120,3 @@ void __exit oprofile_arch_exit(void)
120{ 120{
121 oprofile_perf_exit(); 121 oprofile_perf_exit();
122} 122}
123#else
124int __init oprofile_arch_init(struct oprofile_operations *ops)
125{
126 pr_info("oprofile: hardware counters not available\n");
127 return -ENODEV;
128}
129void __exit oprofile_arch_exit(void) {}
130#endif /* CONFIG_HW_PERF_EVENTS */
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c
index 459b319a9fad..49d3208793e5 100644
--- a/arch/arm/plat-omap/mailbox.c
+++ b/arch/arm/plat-omap/mailbox.c
@@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
322 322
323struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) 323struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
324{ 324{
325 struct omap_mbox *mbox; 325 struct omap_mbox *_mbox, *mbox = NULL;
326 int ret; 326 int i, ret;
327 327
328 if (!mboxes) 328 if (!mboxes)
329 return ERR_PTR(-EINVAL); 329 return ERR_PTR(-EINVAL);
330 330
331 for (mbox = *mboxes; mbox; mbox++) 331 for (i = 0; (_mbox = mboxes[i]); i++) {
332 if (!strcmp(mbox->name, name)) 332 if (!strcmp(_mbox->name, name)) {
333 mbox = _mbox;
333 break; 334 break;
335 }
336 }
334 337
335 if (!mbox) 338 if (!mbox)
336 return ERR_PTR(-ENOENT); 339 return ERR_PTR(-ENOENT);
diff --git a/arch/arm/plat-pxa/mfp.c b/arch/arm/plat-pxa/mfp.c
index b77e018d36c1..a9aa5ad3f4eb 100644
--- a/arch/arm/plat-pxa/mfp.c
+++ b/arch/arm/plat-pxa/mfp.c
@@ -139,10 +139,11 @@ static const unsigned long mfpr_edge[] = {
139#define mfp_configured(p) ((p)->config != -1) 139#define mfp_configured(p) ((p)->config != -1)
140 140
141/* 141/*
142 * perform a read-back of any MFPR register to make sure the 142 * perform a read-back of any valid MFPR register to make sure the
143 * previous writings are finished 143 * previous writings are finished
144 */ 144 */
145#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + 0) 145static unsigned long mfpr_off_readback;
146#define mfpr_sync() (void)__raw_readl(mfpr_mmio_base + mfpr_off_readback)
146 147
147static inline void __mfp_config_run(struct mfp_pin *p) 148static inline void __mfp_config_run(struct mfp_pin *p)
148{ 149{
@@ -248,6 +249,9 @@ void __init mfp_init_addr(struct mfp_addr_map *map)
248 249
249 spin_lock_irqsave(&mfp_spin_lock, flags); 250 spin_lock_irqsave(&mfp_spin_lock, flags);
250 251
252 /* mfp offset for readback */
253 mfpr_off_readback = map[0].offset;
254
251 for (p = map; p->start != MFP_PIN_INVALID; p++) { 255 for (p = map; p->start != MFP_PIN_INVALID; p++) {
252 offset = p->offset; 256 offset = p->offset;
253 i = p->start; 257 i = p->start;
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c
index 6a7342886171..afaf87fdb93e 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-s5p/dev-uart.c
@@ -28,7 +28,7 @@
28static struct resource s5p_uart0_resource[] = { 28static struct resource s5p_uart0_resource[] = {
29 [0] = { 29 [0] = {
30 .start = S5P_PA_UART0, 30 .start = S5P_PA_UART0,
31 .end = S5P_PA_UART0 + S5P_SZ_UART, 31 .end = S5P_PA_UART0 + S5P_SZ_UART - 1,
32 .flags = IORESOURCE_MEM, 32 .flags = IORESOURCE_MEM,
33 }, 33 },
34 [1] = { 34 [1] = {
@@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = {
51static struct resource s5p_uart1_resource[] = { 51static struct resource s5p_uart1_resource[] = {
52 [0] = { 52 [0] = {
53 .start = S5P_PA_UART1, 53 .start = S5P_PA_UART1,
54 .end = S5P_PA_UART1 + S5P_SZ_UART, 54 .end = S5P_PA_UART1 + S5P_SZ_UART - 1,
55 .flags = IORESOURCE_MEM, 55 .flags = IORESOURCE_MEM,
56 }, 56 },
57 [1] = { 57 [1] = {
@@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = {
74static struct resource s5p_uart2_resource[] = { 74static struct resource s5p_uart2_resource[] = {
75 [0] = { 75 [0] = {
76 .start = S5P_PA_UART2, 76 .start = S5P_PA_UART2,
77 .end = S5P_PA_UART2 + S5P_SZ_UART, 77 .end = S5P_PA_UART2 + S5P_SZ_UART - 1,
78 .flags = IORESOURCE_MEM, 78 .flags = IORESOURCE_MEM,
79 }, 79 },
80 [1] = { 80 [1] = {
@@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = {
98#if CONFIG_SERIAL_SAMSUNG_UARTS > 3 98#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
99 [0] = { 99 [0] = {
100 .start = S5P_PA_UART3, 100 .start = S5P_PA_UART3,
101 .end = S5P_PA_UART3 + S5P_SZ_UART, 101 .end = S5P_PA_UART3 + S5P_SZ_UART - 1,
102 .flags = IORESOURCE_MEM, 102 .flags = IORESOURCE_MEM,
103 }, 103 },
104 [1] = { 104 [1] = {
@@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = {
123#if CONFIG_SERIAL_SAMSUNG_UARTS > 4 123#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
124 [0] = { 124 [0] = {
125 .start = S5P_PA_UART4, 125 .start = S5P_PA_UART4,
126 .end = S5P_PA_UART4 + S5P_SZ_UART, 126 .end = S5P_PA_UART4 + S5P_SZ_UART - 1,
127 .flags = IORESOURCE_MEM, 127 .flags = IORESOURCE_MEM,
128 }, 128 },
129 [1] = { 129 [1] = {
@@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = {
148#if CONFIG_SERIAL_SAMSUNG_UARTS > 5 148#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
149 [0] = { 149 [0] = {
150 .start = S5P_PA_UART5, 150 .start = S5P_PA_UART5,
151 .end = S5P_PA_UART5 + S5P_SZ_UART, 151 .end = S5P_PA_UART5 + S5P_SZ_UART - 1,
152 .flags = IORESOURCE_MEM, 152 .flags = IORESOURCE_MEM,
153 }, 153 },
154 [1] = { 154 [1] = {
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c
index 236ef8427d7d..3e4bd8147bf4 100644
--- a/arch/arm/plat-samsung/dev-ts.c
+++ b/arch/arm/plat-samsung/dev-ts.c
@@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd)
58 58
59 s3c_device_ts.dev.platform_data = npd; 59 s3c_device_ts.dev.platform_data = npd;
60} 60}
61EXPORT_SYMBOL(s3c24xx_ts_set_platdata);
diff --git a/arch/arm/plat-samsung/dev-uart.c b/arch/arm/plat-samsung/dev-uart.c
index 3776cd952450..5928105490fa 100644
--- a/arch/arm/plat-samsung/dev-uart.c
+++ b/arch/arm/plat-samsung/dev-uart.c
@@ -15,6 +15,8 @@
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/platform_device.h> 16#include <linux/platform_device.h>
17 17
18#include <plat/devs.h>
19
18/* uart devices */ 20/* uart devices */
19 21
20static struct platform_device s3c24xx_uart_device0 = { 22static struct platform_device s3c24xx_uart_device0 = {
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
index 99ba6789cc97..6dd455bafdfd 100644
--- a/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -24,10 +24,10 @@ static inline void putc(int c)
24{ 24{
25 void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; 25 void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE;
26 26
27 while (readl(base + UART01x_FR) & UART01x_FR_TXFF) 27 while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF)
28 barrier(); 28 barrier();
29 29
30 writel(c, base + UART01x_DR); 30 writel_relaxed(c, base + UART01x_DR);
31} 31}
32 32
33static inline void flush(void) 33static inline void flush(void)
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h
index 09e9372aea21..8c8b24d07046 100644
--- a/arch/arm/plat-spear/include/plat/vmalloc.h
+++ b/arch/arm/plat-spear/include/plat/vmalloc.h
@@ -14,6 +14,6 @@
14#ifndef __PLAT_VMALLOC_H 14#ifndef __PLAT_VMALLOC_H
15#define __PLAT_VMALLOC_H 15#define __PLAT_VMALLOC_H
16 16
17#define VMALLOC_END 0xF0000000 17#define VMALLOC_END 0xF0000000UL
18 18
19#endif /* __PLAT_VMALLOC_H */ 19#endif /* __PLAT_VMALLOC_H */
diff --git a/arch/blackfin/lib/outs.S b/arch/blackfin/lib/outs.S
index 250f4d4b9436..06a5e674401f 100644
--- a/arch/blackfin/lib/outs.S
+++ b/arch/blackfin/lib/outs.S
@@ -13,6 +13,8 @@
13.align 2 13.align 2
14 14
15ENTRY(_outsl) 15ENTRY(_outsl)
16 CC = R2 == 0;
17 IF CC JUMP 1f;
16 P0 = R0; /* P0 = port */ 18 P0 = R0; /* P0 = port */
17 P1 = R1; /* P1 = address */ 19 P1 = R1; /* P1 = address */
18 P2 = R2; /* P2 = count */ 20 P2 = R2; /* P2 = count */
@@ -20,10 +22,12 @@ ENTRY(_outsl)
20 LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; 22 LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
21.Llong_loop_s: R0 = [P1++]; 23.Llong_loop_s: R0 = [P1++];
22.Llong_loop_e: [P0] = R0; 24.Llong_loop_e: [P0] = R0;
23 RTS; 251: RTS;
24ENDPROC(_outsl) 26ENDPROC(_outsl)
25 27
26ENTRY(_outsw) 28ENTRY(_outsw)
29 CC = R2 == 0;
30 IF CC JUMP 1f;
27 P0 = R0; /* P0 = port */ 31 P0 = R0; /* P0 = port */
28 P1 = R1; /* P1 = address */ 32 P1 = R1; /* P1 = address */
29 P2 = R2; /* P2 = count */ 33 P2 = R2; /* P2 = count */
@@ -31,10 +35,12 @@ ENTRY(_outsw)
31 LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; 35 LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
32.Lword_loop_s: R0 = W[P1++]; 36.Lword_loop_s: R0 = W[P1++];
33.Lword_loop_e: W[P0] = R0; 37.Lword_loop_e: W[P0] = R0;
34 RTS; 381: RTS;
35ENDPROC(_outsw) 39ENDPROC(_outsw)
36 40
37ENTRY(_outsb) 41ENTRY(_outsb)
42 CC = R2 == 0;
43 IF CC JUMP 1f;
38 P0 = R0; /* P0 = port */ 44 P0 = R0; /* P0 = port */
39 P1 = R1; /* P1 = address */ 45 P1 = R1; /* P1 = address */
40 P2 = R2; /* P2 = count */ 46 P2 = R2; /* P2 = count */
@@ -42,10 +48,12 @@ ENTRY(_outsb)
42 LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; 48 LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
43.Lbyte_loop_s: R0 = B[P1++]; 49.Lbyte_loop_s: R0 = B[P1++];
44.Lbyte_loop_e: B[P0] = R0; 50.Lbyte_loop_e: B[P0] = R0;
45 RTS; 511: RTS;
46ENDPROC(_outsb) 52ENDPROC(_outsb)
47 53
48ENTRY(_outsw_8) 54ENTRY(_outsw_8)
55 CC = R2 == 0;
56 IF CC JUMP 1f;
49 P0 = R0; /* P0 = port */ 57 P0 = R0; /* P0 = port */
50 P1 = R1; /* P1 = address */ 58 P1 = R1; /* P1 = address */
51 P2 = R2; /* P2 = count */ 59 P2 = R2; /* P2 = count */
@@ -56,5 +64,5 @@ ENTRY(_outsw_8)
56 R0 = R0 << 8; 64 R0 = R0 << 8;
57 R0 = R0 + R1; 65 R0 = R0 + R1;
58.Lword8_loop_e: W[P0] = R0; 66.Lword8_loop_e: W[P0] = R0;
59 RTS; 671: RTS;
60ENDPROC(_outsw_8) 68ENDPROC(_outsw_8)
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 790c767ca95a..ab4a925a443e 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -58,6 +58,8 @@
581: 581:
59.ifeqs "\flushins", BROK_FLUSH_INST 59.ifeqs "\flushins", BROK_FLUSH_INST
60 \flushins [P0++]; 60 \flushins [P0++];
61 nop;
62 nop;
612: nop; 632: nop;
62.else 64.else
632: \flushins [P0++]; 652: \flushins [P0++];
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index 442218980db0..c49be845f96a 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -72,11 +72,6 @@ SECTIONS
72 INIT_TEXT_SECTION(PAGE_SIZE) 72 INIT_TEXT_SECTION(PAGE_SIZE)
73 .init.data : { INIT_DATA } 73 .init.data : { INIT_DATA }
74 .init.setup : { INIT_SETUP(16) } 74 .init.setup : { INIT_SETUP(16) }
75#ifdef CONFIG_ETRAX_ARCH_V32
76 __start___param = .;
77 __param : { *(__param) }
78 __stop___param = .;
79#endif
80 .initcall.init : { 75 .initcall.init : {
81 INIT_CALLS 76 INIT_CALLS
82 } 77 }
diff --git a/arch/m68k/include/asm/string.h b/arch/m68k/include/asm/string.h
index 65b131282837..32198454da70 100644
--- a/arch/m68k/include/asm/string.h
+++ b/arch/m68k/include/asm/string.h
@@ -99,14 +99,12 @@ static inline int strcmp(const char *cs, const char *ct)
99 : "+a" (cs), "+a" (ct), "=d" (res)); 99 : "+a" (cs), "+a" (ct), "=d" (res));
100 return res; 100 return res;
101} 101}
102#endif /* CONFIG_COLDFIRE */
102 103
103#define __HAVE_ARCH_MEMMOVE 104#define __HAVE_ARCH_MEMMOVE
104extern void *memmove(void *, const void *, __kernel_size_t); 105extern void *memmove(void *, const void *, __kernel_size_t);
105 106
106#define __HAVE_ARCH_MEMCMP
107extern int memcmp(const void *, const void *, __kernel_size_t);
108#define memcmp(d, s, n) __builtin_memcmp(d, s, n) 107#define memcmp(d, s, n) __builtin_memcmp(d, s, n)
109#endif /* CONFIG_COLDFIRE */
110 108
111#define __HAVE_ARCH_MEMSET 109#define __HAVE_ARCH_MEMSET
112extern void *memset(void *, int, __kernel_size_t); 110extern void *memset(void *, int, __kernel_size_t);
diff --git a/arch/m68k/lib/string.c b/arch/m68k/lib/string.c
index 4253f870e54f..d399c5f25636 100644
--- a/arch/m68k/lib/string.c
+++ b/arch/m68k/lib/string.c
@@ -243,14 +243,3 @@ void *memmove(void *dest, const void *src, size_t n)
243 return xdest; 243 return xdest;
244} 244}
245EXPORT_SYMBOL(memmove); 245EXPORT_SYMBOL(memmove);
246
247int memcmp(const void *cs, const void *ct, size_t count)
248{
249 const unsigned char *su1, *su2;
250
251 for (su1 = cs, su2 = ct; count > 0; ++su1, ++su2, count--)
252 if (*su1 != *su2)
253 return *su1 < *su2 ? -1 : +1;
254 return 0;
255}
256EXPORT_SYMBOL(memcmp);
diff --git a/arch/m68knommu/kernel/vmlinux.lds.S b/arch/m68knommu/kernel/vmlinux.lds.S
index ef332136f96d..47e15ebfd893 100644
--- a/arch/m68knommu/kernel/vmlinux.lds.S
+++ b/arch/m68knommu/kernel/vmlinux.lds.S
@@ -141,6 +141,12 @@ SECTIONS {
141 *(__param) 141 *(__param)
142 __stop___param = .; 142 __stop___param = .;
143 143
144 /* Built-in module versions */
145 . = ALIGN(4) ;
146 __start___modver = .;
147 *(__modver)
148 __stop___modver = .;
149
144 . = ALIGN(4) ; 150 . = ALIGN(4) ;
145 _etext = . ; 151 _etext = . ;
146 } > TEXT 152 } > TEXT
diff --git a/arch/m68knommu/lib/Makefile b/arch/m68knommu/lib/Makefile
index d94d709665aa..32d852e586d7 100644
--- a/arch/m68knommu/lib/Makefile
+++ b/arch/m68knommu/lib/Makefile
@@ -4,4 +4,4 @@
4 4
5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \ 5lib-y := ashldi3.o ashrdi3.o lshrdi3.o \
6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \ 6 muldi3.o mulsi3.o divsi3.o udivsi3.o modsi3.o umodsi3.o \
7 checksum.o memcpy.o memset.o delay.o 7 checksum.o memcpy.o memmove.o memset.o delay.o
diff --git a/arch/m68knommu/lib/memmove.c b/arch/m68knommu/lib/memmove.c
new file mode 100644
index 000000000000..b3dcfe9dab7e
--- /dev/null
+++ b/arch/m68knommu/lib/memmove.c
@@ -0,0 +1,105 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file COPYING in the main directory of this archive
4 * for more details.
5 */
6
7#define __IN_STRING_C
8
9#include <linux/module.h>
10#include <linux/string.h>
11
12void *memmove(void *dest, const void *src, size_t n)
13{
14 void *xdest = dest;
15 size_t temp;
16
17 if (!n)
18 return xdest;
19
20 if (dest < src) {
21 if ((long)dest & 1) {
22 char *cdest = dest;
23 const char *csrc = src;
24 *cdest++ = *csrc++;
25 dest = cdest;
26 src = csrc;
27 n--;
28 }
29 if (n > 2 && (long)dest & 2) {
30 short *sdest = dest;
31 const short *ssrc = src;
32 *sdest++ = *ssrc++;
33 dest = sdest;
34 src = ssrc;
35 n -= 2;
36 }
37 temp = n >> 2;
38 if (temp) {
39 long *ldest = dest;
40 const long *lsrc = src;
41 temp--;
42 do
43 *ldest++ = *lsrc++;
44 while (temp--);
45 dest = ldest;
46 src = lsrc;
47 }
48 if (n & 2) {
49 short *sdest = dest;
50 const short *ssrc = src;
51 *sdest++ = *ssrc++;
52 dest = sdest;
53 src = ssrc;
54 }
55 if (n & 1) {
56 char *cdest = dest;
57 const char *csrc = src;
58 *cdest = *csrc;
59 }
60 } else {
61 dest = (char *)dest + n;
62 src = (const char *)src + n;
63 if ((long)dest & 1) {
64 char *cdest = dest;
65 const char *csrc = src;
66 *--cdest = *--csrc;
67 dest = cdest;
68 src = csrc;
69 n--;
70 }
71 if (n > 2 && (long)dest & 2) {
72 short *sdest = dest;
73 const short *ssrc = src;
74 *--sdest = *--ssrc;
75 dest = sdest;
76 src = ssrc;
77 n -= 2;
78 }
79 temp = n >> 2;
80 if (temp) {
81 long *ldest = dest;
82 const long *lsrc = src;
83 temp--;
84 do
85 *--ldest = *--lsrc;
86 while (temp--);
87 dest = ldest;
88 src = lsrc;
89 }
90 if (n & 2) {
91 short *sdest = dest;
92 const short *ssrc = src;
93 *--sdest = *--ssrc;
94 dest = sdest;
95 src = ssrc;
96 }
97 if (n & 1) {
98 char *cdest = dest;
99 const char *csrc = src;
100 *--cdest = *--csrc;
101 }
102 }
103 return xdest;
104}
105EXPORT_SYMBOL(memmove);
diff --git a/arch/m68knommu/platform/5249/intc2.c b/arch/m68knommu/platform/5249/intc2.c
index d09d9da04537..c5151f846591 100644
--- a/arch/m68knommu/platform/5249/intc2.c
+++ b/arch/m68knommu/platform/5249/intc2.c
@@ -50,8 +50,10 @@ static int __init mcf_intc2_init(void)
50 int irq; 50 int irq;
51 51
52 /* GPIO interrupt sources */ 52 /* GPIO interrupt sources */
53 for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) 53 for (irq = MCFINTC2_GPIOIRQ0; (irq <= MCFINTC2_GPIOIRQ7); irq++) {
54 irq_desc[irq].chip = &intc2_irq_gpio_chip; 54 irq_desc[irq].chip = &intc2_irq_gpio_chip;
55 set_irq_handler(irq, handle_edge_irq);
56 }
55 57
56 return 0; 58 return 0;
57} 59}
diff --git a/arch/m68knommu/platform/68328/entry.S b/arch/m68knommu/platform/68328/entry.S
index 240a7a6e25c8..676960cf022a 100644
--- a/arch/m68knommu/platform/68328/entry.S
+++ b/arch/m68knommu/platform/68328/entry.S
@@ -108,7 +108,6 @@ Luser_return:
108 movel %d1,%a2 108 movel %d1,%a2
1091: 1091:
110 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 110 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
111 andl #_TIF_WORK_MASK,%d1
112 jne Lwork_to_do 111 jne Lwork_to_do
113 RESTORE_ALL 112 RESTORE_ALL
114 113
diff --git a/arch/m68knommu/platform/68360/commproc.c b/arch/m68knommu/platform/68360/commproc.c
index f27e688c404e..8e4e10cc0080 100644
--- a/arch/m68knommu/platform/68360/commproc.c
+++ b/arch/m68knommu/platform/68360/commproc.c
@@ -210,7 +210,7 @@ void
210cpm_install_handler(int vec, void (*handler)(), void *dev_id) 210cpm_install_handler(int vec, void (*handler)(), void *dev_id)
211{ 211{
212 212
213 request_irq(vec, handler, IRQ_FLG_LOCK, "timer", dev_id); 213 request_irq(vec, handler, 0, "timer", dev_id);
214 214
215/* if (cpm_vecs[vec].handler != 0) */ 215/* if (cpm_vecs[vec].handler != 0) */
216/* printk(KERN_INFO "CPM interrupt %x replacing %x\n", */ 216/* printk(KERN_INFO "CPM interrupt %x replacing %x\n", */
diff --git a/arch/m68knommu/platform/68360/config.c b/arch/m68knommu/platform/68360/config.c
index ac629fa30099..9dd5bca38749 100644
--- a/arch/m68knommu/platform/68360/config.c
+++ b/arch/m68knommu/platform/68360/config.c
@@ -75,7 +75,7 @@ void hw_timer_init(void)
75 /* Set compare register 32Khz / 32 / 10 = 100 */ 75 /* Set compare register 32Khz / 32 / 10 = 100 */
76 TCMP = 10; 76 TCMP = 10;
77 77
78 request_irq(IRQ_MACHSPEC | 1, timer_routine, IRQ_FLG_LOCK, "timer", NULL); 78 request_irq(IRQ_MACHSPEC | 1, timer_routine, 0, "timer", NULL);
79#endif 79#endif
80 80
81 /* General purpose quicc timers: MC68360UM p7-20 */ 81 /* General purpose quicc timers: MC68360UM p7-20 */
diff --git a/arch/m68knommu/platform/68360/entry.S b/arch/m68knommu/platform/68360/entry.S
index 8a28788c0eea..46c1b18c9dcb 100644
--- a/arch/m68knommu/platform/68360/entry.S
+++ b/arch/m68knommu/platform/68360/entry.S
@@ -104,7 +104,6 @@ Luser_return:
104 movel %d1,%a2 104 movel %d1,%a2
1051: 1051:
106 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */ 106 move %a2@(TI_FLAGS),%d1 /* thread_info->flags */
107 andl #_TIF_WORK_MASK,%d1
108 jne Lwork_to_do 107 jne Lwork_to_do
109 RESTORE_ALL 108 RESTORE_ALL
110 109
diff --git a/arch/m68knommu/platform/68360/ints.c b/arch/m68knommu/platform/68360/ints.c
index ad96ab1051f0..a29041c1a8a0 100644
--- a/arch/m68knommu/platform/68360/ints.c
+++ b/arch/m68knommu/platform/68360/ints.c
@@ -132,8 +132,8 @@ void init_IRQ(void)
132 pquicc->intr_cimr = 0x00000000; 132 pquicc->intr_cimr = 0x00000000;
133 133
134 for (i = 0; (i < NR_IRQS); i++) { 134 for (i = 0; (i < NR_IRQS); i++) {
135 set_irq_chip(irq, &intc_irq_chip); 135 set_irq_chip(i, &intc_irq_chip);
136 set_irq_handler(irq, handle_level_irq); 136 set_irq_handler(i, handle_level_irq);
137 } 137 }
138} 138}
139 139
diff --git a/arch/m68knommu/platform/coldfire/entry.S b/arch/m68knommu/platform/coldfire/entry.S
index 4ddfc3da70d8..5837cf080b6d 100644
--- a/arch/m68knommu/platform/coldfire/entry.S
+++ b/arch/m68knommu/platform/coldfire/entry.S
@@ -138,7 +138,6 @@ Luser_return:
138 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */ 138 andl #-THREAD_SIZE,%d1 /* at base of kernel stack */
139 movel %d1,%a0 139 movel %d1,%a0
140 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */ 140 movel %a0@(TI_FLAGS),%d1 /* get thread_info->flags */
141 andl #0xefff,%d1
142 jne Lwork_to_do /* still work to do */ 141 jne Lwork_to_do /* still work to do */
143 142
144Lreturn: 143Lreturn:
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index f5ecc0566bc2..d88983516e26 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -4,6 +4,7 @@ config MIPS
4 select HAVE_GENERIC_DMA_COHERENT 4 select HAVE_GENERIC_DMA_COHERENT
5 select HAVE_IDE 5 select HAVE_IDE
6 select HAVE_OPROFILE 6 select HAVE_OPROFILE
7 select HAVE_IRQ_WORK
7 select HAVE_PERF_EVENTS 8 select HAVE_PERF_EVENTS
8 select PERF_USE_VMALLOC 9 select PERF_USE_VMALLOC
9 select HAVE_ARCH_KGDB 10 select HAVE_ARCH_KGDB
@@ -208,6 +209,7 @@ config MACH_JZ4740
208 select ARCH_REQUIRE_GPIOLIB 209 select ARCH_REQUIRE_GPIOLIB
209 select SYS_HAS_EARLY_PRINTK 210 select SYS_HAS_EARLY_PRINTK
210 select HAVE_PWM 211 select HAVE_PWM
212 select HAVE_CLK
211 213
212config LASAT 214config LASAT
213 bool "LASAT Networks platforms" 215 bool "LASAT Networks platforms"
@@ -333,6 +335,8 @@ config PNX8550_STB810
333config PMC_MSP 335config PMC_MSP
334 bool "PMC-Sierra MSP chipsets" 336 bool "PMC-Sierra MSP chipsets"
335 depends on EXPERIMENTAL 337 depends on EXPERIMENTAL
338 select CEVT_R4K
339 select CSRC_R4K
336 select DMA_NONCOHERENT 340 select DMA_NONCOHERENT
337 select SWAP_IO_SPACE 341 select SWAP_IO_SPACE
338 select NO_EXCEPT_FILL 342 select NO_EXCEPT_FILL
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c
index 6398fa95905c..40b84b991191 100644
--- a/arch/mips/alchemy/mtx-1/board_setup.c
+++ b/arch/mips/alchemy/mtx-1/board_setup.c
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert);
54 54
55static void mtx1_reset(char *c) 55static void mtx1_reset(char *c)
56{ 56{
57 /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 57 /* Jump to the reset vector */
58 au_writel(0x00000000, 0xAE00001C); 58 __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
59} 59}
60 60
61static void mtx1_power_off(void) 61static void mtx1_power_off(void)
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c
index e30e42add697..956f946218c5 100644
--- a/arch/mips/alchemy/mtx-1/platform.c
+++ b/arch/mips/alchemy/mtx-1/platform.c
@@ -28,6 +28,8 @@
28#include <linux/mtd/physmap.h> 28#include <linux/mtd/physmap.h>
29#include <mtd/mtd-abi.h> 29#include <mtd/mtd-abi.h>
30 30
31#include <asm/mach-au1x00/au1xxx_eth.h>
32
31static struct gpio_keys_button mtx1_gpio_button[] = { 33static struct gpio_keys_button mtx1_gpio_button[] = {
32 { 34 {
33 .gpio = 207, 35 .gpio = 207,
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = {
140 &mtx1_mtd, 142 &mtx1_mtd,
141}; 143};
142 144
145static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = {
146 .phy_search_highest_addr = 1,
147 .phy1_search_mac0 = 1,
148};
149
143static int __init mtx1_register_devices(void) 150static int __init mtx1_register_devices(void)
144{ 151{
145 int rc; 152 int rc;
146 153
154 au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata);
155
147 rc = gpio_request(mtx1_gpio_button[0].gpio, 156 rc = gpio_request(mtx1_gpio_button[0].gpio,
148 mtx1_gpio_button[0].desc); 157 mtx1_gpio_button[0].desc);
149 if (rc < 0) { 158 if (rc < 0) {
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c
index b43c918925d3..80c521e5290d 100644
--- a/arch/mips/alchemy/xxs1500/board_setup.c
+++ b/arch/mips/alchemy/xxs1500/board_setup.c
@@ -36,8 +36,8 @@
36 36
37static void xxs1500_reset(char *c) 37static void xxs1500_reset(char *c)
38{ 38{
39 /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ 39 /* Jump to the reset vector */
40 au_writel(0x00000000, 0xAE00001C); 40 __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
41} 41}
42 42
43static void xxs1500_power_off(void) 43static void xxs1500_power_off(void)
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h
index e00007cf8162..d0c77496c728 100644
--- a/arch/mips/include/asm/perf_event.h
+++ b/arch/mips/include/asm/perf_event.h
@@ -11,15 +11,5 @@
11 11
12#ifndef __MIPS_PERF_EVENT_H__ 12#ifndef __MIPS_PERF_EVENT_H__
13#define __MIPS_PERF_EVENT_H__ 13#define __MIPS_PERF_EVENT_H__
14 14/* Leave it empty here. The file is required by linux/perf_event.h */
15/*
16 * MIPS performance counters do not raise NMI upon overflow, a regular
17 * interrupt will be signaled. Hence we can do the pending perf event
18 * work at the tail of the irq handler.
19 */
20static inline void
21set_perf_event_pending(void)
22{
23}
24
25#endif /* __MIPS_PERF_EVENT_H__ */ 15#endif /* __MIPS_PERF_EVENT_H__ */
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 5a84a1f11231..94ca2b018af7 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -17,29 +17,13 @@
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18#include <asm/uasm.h> 18#include <asm/uasm.h>
19 19
20/* 20#include <asm-generic/sections.h>
21 * If the Instruction Pointer is in module space (0xc0000000), return true;
22 * otherwise, it is in kernel space (0x80000000), return false.
23 *
24 * FIXME: This will not work when the kernel space and module space are the
25 * same. If they are the same, we need to modify scripts/recordmcount.pl,
26 * ftrace_make_nop/call() and the other related parts to ensure the
27 * enabling/disabling of the calling site to _mcount is right for both kernel
28 * and module.
29 */
30
31static inline int in_module(unsigned long ip)
32{
33 return ip & 0x40000000;
34}
35 21
36#ifdef CONFIG_DYNAMIC_FTRACE 22#ifdef CONFIG_DYNAMIC_FTRACE
37 23
38#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ 24#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
39#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ 25#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
40 26
41#define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */
42#define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */
43#define INSN_NOP 0x00000000 /* nop */ 27#define INSN_NOP 0x00000000 /* nop */
44#define INSN_JAL(addr) \ 28#define INSN_JAL(addr) \
45 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) 29 ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void)
69#endif 53#endif
70} 54}
71 55
56/*
57 * Check if the address is in kernel space
58 *
59 * Clone core_kernel_text() from kernel/extable.c, but doesn't call
60 * init_kernel_text() for Ftrace doesn't trace functions in init sections.
61 */
62static inline int in_kernel_space(unsigned long ip)
63{
64 if (ip >= (unsigned long)_stext &&
65 ip <= (unsigned long)_etext)
66 return 1;
67 return 0;
68}
69
72static int ftrace_modify_code(unsigned long ip, unsigned int new_code) 70static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
73{ 71{
74 int faulted; 72 int faulted;
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
84 return 0; 82 return 0;
85} 83}
86 84
85/*
86 * The details about the calling site of mcount on MIPS
87 *
88 * 1. For kernel:
89 *
90 * move at, ra
91 * jal _mcount --> nop
92 *
93 * 2. For modules:
94 *
95 * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
96 *
97 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
98 * addiu v1, v1, low_16bit_of_mcount
99 * move at, ra
100 * move $12, ra_address
101 * jalr v1
102 * sub sp, sp, 8
103 * 1: offset = 5 instructions
104 * 2.2 For the Other situations
105 *
106 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
107 * addiu v1, v1, low_16bit_of_mcount
108 * move at, ra
109 * jalr v1
110 * nop | move $12, ra_address | sub sp, sp, 8
111 * 1: offset = 4 instructions
112 */
113
114#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
115#define MCOUNT_OFFSET_INSNS 5
116#else
117#define MCOUNT_OFFSET_INSNS 4
118#endif
119#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
120
87int ftrace_make_nop(struct module *mod, 121int ftrace_make_nop(struct module *mod,
88 struct dyn_ftrace *rec, unsigned long addr) 122 struct dyn_ftrace *rec, unsigned long addr)
89{ 123{
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod,
91 unsigned long ip = rec->ip; 125 unsigned long ip = rec->ip;
92 126
93 /* 127 /*
94 * We have compiled module with -mlong-calls, but compiled the kernel 128 * If ip is in kernel space, no long call, otherwise, long call is
95 * without it, we need to cope with them respectively. 129 * needed.
96 */ 130 */
97 if (in_module(ip)) { 131 new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F;
98#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) 132
99 /*
100 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
101 * addiu v1, v1, low_16bit_of_mcount
102 * move at, ra
103 * move $12, ra_address
104 * jalr v1
105 * sub sp, sp, 8
106 * 1: offset = 5 instructions
107 */
108 new = INSN_B_1F_5;
109#else
110 /*
111 * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
112 * addiu v1, v1, low_16bit_of_mcount
113 * move at, ra
114 * jalr v1
115 * nop | move $12, ra_address | sub sp, sp, 8
116 * 1: offset = 4 instructions
117 */
118 new = INSN_B_1F_4;
119#endif
120 } else {
121 /*
122 * move at, ra
123 * jal _mcount --> nop
124 */
125 new = INSN_NOP;
126 }
127 return ftrace_modify_code(ip, new); 133 return ftrace_modify_code(ip, new);
128} 134}
129 135
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
132 unsigned int new; 138 unsigned int new;
133 unsigned long ip = rec->ip; 139 unsigned long ip = rec->ip;
134 140
135 /* ip, module: 0xc0000000, kernel: 0x80000000 */ 141 new = in_kernel_space(ip) ? insn_jal_ftrace_caller :
136 new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; 142 insn_lui_v1_hi16_mcount;
137 143
138 return ftrace_modify_code(ip, new); 144 return ftrace_modify_code(ip, new);
139} 145}
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void)
190#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ 196#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
191#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ 197#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
192 198
193unsigned long ftrace_get_parent_addr(unsigned long self_addr, 199unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
194 unsigned long parent, 200 old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
195 unsigned long parent_addr,
196 unsigned long fp)
197{ 201{
198 unsigned long sp, ip, ra; 202 unsigned long sp, ip, tmp;
199 unsigned int code; 203 unsigned int code;
200 int faulted; 204 int faulted;
201 205
202 /* 206 /*
203 * For module, move the ip from calling site of mcount to the 207 * For module, move the ip from the return address after the
204 * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for 208 * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
205 * kernel, move to the instruction "move ra, at"(offset is 12) 209 * kernel, move after the instruction "move ra, at"(offset is 16)
206 */ 210 */
207 ip = self_addr - (in_module(self_addr) ? 20 : 12); 211 ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24);
208 212
209 /* 213 /*
210 * search the text until finding the non-store instruction or "s{d,w} 214 * search the text until finding the non-store instruction or "s{d,w}
211 * ra, offset(sp)" instruction 215 * ra, offset(sp)" instruction
212 */ 216 */
213 do { 217 do {
214 ip -= 4;
215
216 /* get the code at "ip": code = *(unsigned int *)ip; */ 218 /* get the code at "ip": code = *(unsigned int *)ip; */
217 safe_load_code(code, ip, faulted); 219 safe_load_code(code, ip, faulted);
218 220
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
224 * store the ra on the stack 226 * store the ra on the stack
225 */ 227 */
226 if ((code & S_R_SP) != S_R_SP) 228 if ((code & S_R_SP) != S_R_SP)
227 return parent_addr; 229 return parent_ra_addr;
228 230
229 } while (((code & S_RA_SP) != S_RA_SP)); 231 /* Move to the next instruction */
232 ip -= 4;
233 } while ((code & S_RA_SP) != S_RA_SP);
230 234
231 sp = fp + (code & OFFSET_MASK); 235 sp = fp + (code & OFFSET_MASK);
232 236
233 /* ra = *(unsigned long *)sp; */ 237 /* tmp = *(unsigned long *)sp; */
234 safe_load_stack(ra, sp, faulted); 238 safe_load_stack(tmp, sp, faulted);
235 if (unlikely(faulted)) 239 if (unlikely(faulted))
236 return 0; 240 return 0;
237 241
238 if (ra == parent) 242 if (tmp == old_parent_ra)
239 return sp; 243 return sp;
240 return 0; 244 return 0;
241} 245}
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr,
246 * Hook the return address and push it in the stack of return addrs 250 * Hook the return address and push it in the stack of return addrs
247 * in current thread info. 251 * in current thread info.
248 */ 252 */
249void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, 253void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
250 unsigned long fp) 254 unsigned long fp)
251{ 255{
252 unsigned long old; 256 unsigned long old_parent_ra;
253 struct ftrace_graph_ent trace; 257 struct ftrace_graph_ent trace;
254 unsigned long return_hooker = (unsigned long) 258 unsigned long return_hooker = (unsigned long)
255 &return_to_handler; 259 &return_to_handler;
256 int faulted; 260 int faulted, insns;
257 261
258 if (unlikely(atomic_read(&current->tracing_graph_pause))) 262 if (unlikely(atomic_read(&current->tracing_graph_pause)))
259 return; 263 return;
260 264
261 /* 265 /*
262 * "parent" is the stack address saved the return address of the caller 266 * "parent_ra_addr" is the stack address saved the return address of
263 * of _mcount. 267 * the caller of _mcount.
264 * 268 *
265 * if the gcc < 4.5, a leaf function does not save the return address 269 * if the gcc < 4.5, a leaf function does not save the return address
266 * in the stack address, so, we "emulate" one in _mcount's stack space, 270 * in the stack address, so, we "emulate" one in _mcount's stack space,
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
275 * do it in ftrace_graph_caller of mcount.S. 279 * do it in ftrace_graph_caller of mcount.S.
276 */ 280 */
277 281
278 /* old = *parent; */ 282 /* old_parent_ra = *parent_ra_addr; */
279 safe_load_stack(old, parent, faulted); 283 safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
280 if (unlikely(faulted)) 284 if (unlikely(faulted))
281 goto out; 285 goto out;
282#ifndef KBUILD_MCOUNT_RA_ADDRESS 286#ifndef KBUILD_MCOUNT_RA_ADDRESS
283 parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, 287 parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
284 (unsigned long)parent, fp); 288 old_parent_ra, (unsigned long)parent_ra_addr, fp);
285 /* 289 /*
286 * If fails when getting the stack address of the non-leaf function's 290 * If fails when getting the stack address of the non-leaf function's
287 * ra, stop function graph tracer and return 291 * ra, stop function graph tracer and return
288 */ 292 */
289 if (parent == 0) 293 if (parent_ra_addr == 0)
290 goto out; 294 goto out;
291#endif 295#endif
292 /* *parent = return_hooker; */ 296 /* *parent_ra_addr = return_hooker; */
293 safe_store_stack(return_hooker, parent, faulted); 297 safe_store_stack(return_hooker, parent_ra_addr, faulted);
294 if (unlikely(faulted)) 298 if (unlikely(faulted))
295 goto out; 299 goto out;
296 300
297 if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == 301 if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp)
298 -EBUSY) { 302 == -EBUSY) {
299 *parent = old; 303 *parent_ra_addr = old_parent_ra;
300 return; 304 return;
301 } 305 }
302 306
303 trace.func = self_addr; 307 /*
308 * Get the recorded ip of the current mcount calling site in the
309 * __mcount_loc section, which will be used to filter the function
310 * entries configured through the tracing/set_graph_function interface.
311 */
312
313 insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
314 trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
304 315
305 /* Only trace if the calling function expects to */ 316 /* Only trace if the calling function expects to */
306 if (!ftrace_graph_entry(&trace)) { 317 if (!ftrace_graph_entry(&trace)) {
307 current->curr_ret_stack--; 318 current->curr_ret_stack--;
308 *parent = old; 319 *parent_ra_addr = old_parent_ra;
309 } 320 }
310 return; 321 return;
311out: 322out:
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index 2b7f3f703b83..a8244854d3dc 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event,
161 return ret; 161 return ret;
162} 162}
163 163
164static int mipspmu_enable(struct perf_event *event)
165{
166 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
167 struct hw_perf_event *hwc = &event->hw;
168 int idx;
169 int err = 0;
170
171 /* To look for a free counter for this event. */
172 idx = mipspmu->alloc_counter(cpuc, hwc);
173 if (idx < 0) {
174 err = idx;
175 goto out;
176 }
177
178 /*
179 * If there is an event in the counter we are going to use then
180 * make sure it is disabled.
181 */
182 event->hw.idx = idx;
183 mipspmu->disable_event(idx);
184 cpuc->events[idx] = event;
185
186 /* Set the period for the event. */
187 mipspmu_event_set_period(event, hwc, idx);
188
189 /* Enable the event. */
190 mipspmu->enable_event(hwc, idx);
191
192 /* Propagate our changes to the userspace mapping. */
193 perf_event_update_userpage(event);
194
195out:
196 return err;
197}
198
199static void mipspmu_event_update(struct perf_event *event, 164static void mipspmu_event_update(struct perf_event *event,
200 struct hw_perf_event *hwc, 165 struct hw_perf_event *hwc,
201 int idx) 166 int idx)
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event,
204 unsigned long flags; 169 unsigned long flags;
205 int shift = 64 - TOTAL_BITS; 170 int shift = 64 - TOTAL_BITS;
206 s64 prev_raw_count, new_raw_count; 171 s64 prev_raw_count, new_raw_count;
207 s64 delta; 172 u64 delta;
208 173
209again: 174again:
210 prev_raw_count = local64_read(&hwc->prev_count); 175 prev_raw_count = local64_read(&hwc->prev_count);
@@ -231,32 +196,90 @@ again:
231 return; 196 return;
232} 197}
233 198
234static void mipspmu_disable(struct perf_event *event) 199static void mipspmu_start(struct perf_event *event, int flags)
200{
201 struct hw_perf_event *hwc = &event->hw;
202
203 if (!mipspmu)
204 return;
205
206 if (flags & PERF_EF_RELOAD)
207 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
208
209 hwc->state = 0;
210
211 /* Set the period for the event. */
212 mipspmu_event_set_period(event, hwc, hwc->idx);
213
214 /* Enable the event. */
215 mipspmu->enable_event(hwc, hwc->idx);
216}
217
218static void mipspmu_stop(struct perf_event *event, int flags)
219{
220 struct hw_perf_event *hwc = &event->hw;
221
222 if (!mipspmu)
223 return;
224
225 if (!(hwc->state & PERF_HES_STOPPED)) {
226 /* We are working on a local event. */
227 mipspmu->disable_event(hwc->idx);
228 barrier();
229 mipspmu_event_update(event, hwc, hwc->idx);
230 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
231 }
232}
233
234static int mipspmu_add(struct perf_event *event, int flags)
235{ 235{
236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
237 struct hw_perf_event *hwc = &event->hw; 237 struct hw_perf_event *hwc = &event->hw;
238 int idx = hwc->idx; 238 int idx;
239 int err = 0;
239 240
241 perf_pmu_disable(event->pmu);
240 242
241 WARN_ON(idx < 0 || idx >= mipspmu->num_counters); 243 /* To look for a free counter for this event. */
244 idx = mipspmu->alloc_counter(cpuc, hwc);
245 if (idx < 0) {
246 err = idx;
247 goto out;
248 }
242 249
243 /* We are working on a local event. */ 250 /*
251 * If there is an event in the counter we are going to use then
252 * make sure it is disabled.
253 */
254 event->hw.idx = idx;
244 mipspmu->disable_event(idx); 255 mipspmu->disable_event(idx);
256 cpuc->events[idx] = event;
245 257
246 barrier(); 258 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
247 259 if (flags & PERF_EF_START)
248 mipspmu_event_update(event, hwc, idx); 260 mipspmu_start(event, PERF_EF_RELOAD);
249 cpuc->events[idx] = NULL;
250 clear_bit(idx, cpuc->used_mask);
251 261
262 /* Propagate our changes to the userspace mapping. */
252 perf_event_update_userpage(event); 263 perf_event_update_userpage(event);
264
265out:
266 perf_pmu_enable(event->pmu);
267 return err;
253} 268}
254 269
255static void mipspmu_unthrottle(struct perf_event *event) 270static void mipspmu_del(struct perf_event *event, int flags)
256{ 271{
272 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
257 struct hw_perf_event *hwc = &event->hw; 273 struct hw_perf_event *hwc = &event->hw;
274 int idx = hwc->idx;
258 275
259 mipspmu->enable_event(hwc, hwc->idx); 276 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
277
278 mipspmu_stop(event, PERF_EF_UPDATE);
279 cpuc->events[idx] = NULL;
280 clear_bit(idx, cpuc->used_mask);
281
282 perf_event_update_userpage(event);
260} 283}
261 284
262static void mipspmu_read(struct perf_event *event) 285static void mipspmu_read(struct perf_event *event)
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event)
270 mipspmu_event_update(event, hwc, hwc->idx); 293 mipspmu_event_update(event, hwc, hwc->idx);
271} 294}
272 295
273static struct pmu pmu = { 296static void mipspmu_enable(struct pmu *pmu)
274 .enable = mipspmu_enable, 297{
275 .disable = mipspmu_disable, 298 if (mipspmu)
276 .unthrottle = mipspmu_unthrottle, 299 mipspmu->start();
277 .read = mipspmu_read, 300}
278}; 301
302static void mipspmu_disable(struct pmu *pmu)
303{
304 if (mipspmu)
305 mipspmu->stop();
306}
279 307
280static atomic_t active_events = ATOMIC_INIT(0); 308static atomic_t active_events = ATOMIC_INIT(0);
281static DEFINE_MUTEX(pmu_reserve_mutex); 309static DEFINE_MUTEX(pmu_reserve_mutex);
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void)
318 perf_irq = save_perf_irq; 346 perf_irq = save_perf_irq;
319} 347}
320 348
349/*
350 * mipsxx/rm9000/loongson2 have different performance counters, they have
351 * specific low-level init routines.
352 */
353static void reset_counters(void *arg);
354static int __hw_perf_event_init(struct perf_event *event);
355
356static void hw_perf_event_destroy(struct perf_event *event)
357{
358 if (atomic_dec_and_mutex_lock(&active_events,
359 &pmu_reserve_mutex)) {
360 /*
361 * We must not call the destroy function with interrupts
362 * disabled.
363 */
364 on_each_cpu(reset_counters,
365 (void *)(long)mipspmu->num_counters, 1);
366 mipspmu_free_irq();
367 mutex_unlock(&pmu_reserve_mutex);
368 }
369}
370
371static int mipspmu_event_init(struct perf_event *event)
372{
373 int err = 0;
374
375 switch (event->attr.type) {
376 case PERF_TYPE_RAW:
377 case PERF_TYPE_HARDWARE:
378 case PERF_TYPE_HW_CACHE:
379 break;
380
381 default:
382 return -ENOENT;
383 }
384
385 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
386 (event->cpu >= 0 && !cpu_online(event->cpu)))
387 return -ENODEV;
388
389 if (!atomic_inc_not_zero(&active_events)) {
390 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
391 atomic_dec(&active_events);
392 return -ENOSPC;
393 }
394
395 mutex_lock(&pmu_reserve_mutex);
396 if (atomic_read(&active_events) == 0)
397 err = mipspmu_get_irq();
398
399 if (!err)
400 atomic_inc(&active_events);
401 mutex_unlock(&pmu_reserve_mutex);
402 }
403
404 if (err)
405 return err;
406
407 err = __hw_perf_event_init(event);
408 if (err)
409 hw_perf_event_destroy(event);
410
411 return err;
412}
413
414static struct pmu pmu = {
415 .pmu_enable = mipspmu_enable,
416 .pmu_disable = mipspmu_disable,
417 .event_init = mipspmu_event_init,
418 .add = mipspmu_add,
419 .del = mipspmu_del,
420 .start = mipspmu_start,
421 .stop = mipspmu_stop,
422 .read = mipspmu_read,
423};
424
321static inline unsigned int 425static inline unsigned int
322mipspmu_perf_event_encode(const struct mips_perf_event *pev) 426mipspmu_perf_event_encode(const struct mips_perf_event *pev)
323{ 427{
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc,
382{ 486{
383 struct hw_perf_event fake_hwc = event->hw; 487 struct hw_perf_event fake_hwc = event->hw;
384 488
385 if (event->pmu && event->pmu != &pmu) 489 /* Allow mixed event group. So return 1 to pass validation. */
386 return 0; 490 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
491 return 1;
387 492
388 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; 493 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
389} 494}
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event)
409 return 0; 514 return 0;
410} 515}
411 516
412/*
413 * mipsxx/rm9000/loongson2 have different performance counters, they have
414 * specific low-level init routines.
415 */
416static void reset_counters(void *arg);
417static int __hw_perf_event_init(struct perf_event *event);
418
419static void hw_perf_event_destroy(struct perf_event *event)
420{
421 if (atomic_dec_and_mutex_lock(&active_events,
422 &pmu_reserve_mutex)) {
423 /*
424 * We must not call the destroy function with interrupts
425 * disabled.
426 */
427 on_each_cpu(reset_counters,
428 (void *)(long)mipspmu->num_counters, 1);
429 mipspmu_free_irq();
430 mutex_unlock(&pmu_reserve_mutex);
431 }
432}
433
434const struct pmu *hw_perf_event_init(struct perf_event *event)
435{
436 int err = 0;
437
438 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
439 (event->cpu >= 0 && !cpu_online(event->cpu)))
440 return ERR_PTR(-ENODEV);
441
442 if (!atomic_inc_not_zero(&active_events)) {
443 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
444 atomic_dec(&active_events);
445 return ERR_PTR(-ENOSPC);
446 }
447
448 mutex_lock(&pmu_reserve_mutex);
449 if (atomic_read(&active_events) == 0)
450 err = mipspmu_get_irq();
451
452 if (!err)
453 atomic_inc(&active_events);
454 mutex_unlock(&pmu_reserve_mutex);
455 }
456
457 if (err)
458 return ERR_PTR(err);
459
460 err = __hw_perf_event_init(event);
461 if (err)
462 hw_perf_event_destroy(event);
463
464 return err ? ERR_PTR(err) : &pmu;
465}
466
467void hw_perf_enable(void)
468{
469 if (mipspmu)
470 mipspmu->start();
471}
472
473void hw_perf_disable(void)
474{
475 if (mipspmu)
476 mipspmu->stop();
477}
478
479/* This is needed by specific irq handlers in perf_event_*.c */ 517/* This is needed by specific irq handlers in perf_event_*.c */
480static void 518static void
481handle_associated_event(struct cpu_hw_events *cpuc, 519handle_associated_event(struct cpu_hw_events *cpuc,
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc,
496#include "perf_event_mipsxx.c" 534#include "perf_event_mipsxx.c"
497 535
498/* Callchain handling code. */ 536/* Callchain handling code. */
499static inline void
500callchain_store(struct perf_callchain_entry *entry,
501 u64 ip)
502{
503 if (entry->nr < PERF_MAX_STACK_DEPTH)
504 entry->ip[entry->nr++] = ip;
505}
506 537
507/* 538/*
508 * Leave userspace callchain empty for now. When we find a way to trace 539 * Leave userspace callchain empty for now. When we find a way to trace
509 * the user stack callchains, we add here. 540 * the user stack callchains, we add here.
510 */ 541 */
511static void 542void perf_callchain_user(struct perf_callchain_entry *entry,
512perf_callchain_user(struct pt_regs *regs, 543 struct pt_regs *regs)
513 struct perf_callchain_entry *entry)
514{ 544{
515} 545}
516 546
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
523 while (!kstack_end(sp)) { 553 while (!kstack_end(sp)) {
524 addr = *sp++; 554 addr = *sp++;
525 if (__kernel_text_address(addr)) { 555 if (__kernel_text_address(addr)) {
526 callchain_store(entry, addr); 556 perf_callchain_store(entry, addr);
527 if (entry->nr >= PERF_MAX_STACK_DEPTH) 557 if (entry->nr >= PERF_MAX_STACK_DEPTH)
528 break; 558 break;
529 } 559 }
530 } 560 }
531} 561}
532 562
533static void 563void perf_callchain_kernel(struct perf_callchain_entry *entry,
534perf_callchain_kernel(struct pt_regs *regs, 564 struct pt_regs *regs)
535 struct perf_callchain_entry *entry)
536{ 565{
537 unsigned long sp = regs->regs[29]; 566 unsigned long sp = regs->regs[29];
538#ifdef CONFIG_KALLSYMS 567#ifdef CONFIG_KALLSYMS
539 unsigned long ra = regs->regs[31]; 568 unsigned long ra = regs->regs[31];
540 unsigned long pc = regs->cp0_epc; 569 unsigned long pc = regs->cp0_epc;
541 570
542 callchain_store(entry, PERF_CONTEXT_KERNEL);
543 if (raw_show_trace || !__kernel_text_address(pc)) { 571 if (raw_show_trace || !__kernel_text_address(pc)) {
544 unsigned long stack_page = 572 unsigned long stack_page =
545 (unsigned long)task_stack_page(current); 573 (unsigned long)task_stack_page(current);
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs,
549 return; 577 return;
550 } 578 }
551 do { 579 do {
552 callchain_store(entry, pc); 580 perf_callchain_store(entry, pc);
553 if (entry->nr >= PERF_MAX_STACK_DEPTH) 581 if (entry->nr >= PERF_MAX_STACK_DEPTH)
554 break; 582 break;
555 pc = unwind_stack(current, &sp, pc, &ra); 583 pc = unwind_stack(current, &sp, pc, &ra);
556 } while (pc); 584 } while (pc);
557#else 585#else
558 callchain_store(entry, PERF_CONTEXT_KERNEL);
559 save_raw_perf_callchain(entry, sp); 586 save_raw_perf_callchain(entry, sp);
560#endif 587#endif
561} 588}
562
563static void
564perf_do_callchain(struct pt_regs *regs,
565 struct perf_callchain_entry *entry)
566{
567 int is_user;
568
569 if (!regs)
570 return;
571
572 is_user = user_mode(regs);
573
574 if (!current || !current->pid)
575 return;
576
577 if (is_user && current->state != TASK_RUNNING)
578 return;
579
580 if (!is_user) {
581 perf_callchain_kernel(regs, entry);
582 if (current->mm)
583 regs = task_pt_regs(current);
584 else
585 regs = NULL;
586 }
587 if (regs)
588 perf_callchain_user(regs, entry);
589}
590
591static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
592
593struct perf_callchain_entry *
594perf_callchain(struct pt_regs *regs)
595{
596 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
597
598 entry->nr = 0;
599 perf_do_callchain(regs, entry);
600 return entry;
601}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 183e0d226669..d9a7db78ed62 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
696 * interrupt, not NMI. 696 * interrupt, not NMI.
697 */ 697 */
698 if (handled == IRQ_HANDLED) 698 if (handled == IRQ_HANDLED)
699 perf_event_do_pending(); 699 irq_work_run();
700 700
701#ifdef CONFIG_MIPS_MT_SMP 701#ifdef CONFIG_MIPS_MT_SMP
702 read_unlock(&pmuint_rwlock); 702 read_unlock(&pmuint_rwlock);
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void)
1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq, 1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1046 irq < 0 ? " (share with timer interrupt)" : ""); 1046 irq < 0 ? " (share with timer interrupt)" : "");
1047 1047
1048 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1049
1048 return 0; 1050 return 0;
1049} 1051}
1050early_initcall(init_hw_perf_events); 1052early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 5922342bca39..dbbe0ce48d89 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc)
84 84
85static int protected_restore_fp_context(struct sigcontext __user *sc) 85static int protected_restore_fp_context(struct sigcontext __user *sc)
86{ 86{
87 int err, tmp; 87 int err, tmp __maybe_unused;
88 while (1) { 88 while (1) {
89 lock_fpu_owner(); 89 lock_fpu_owner();
90 own_fpu_inatomic(0); 90 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index a0ed0e052b2e..aae986613795 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc)
115 115
116static int protected_restore_fp_context32(struct sigcontext32 __user *sc) 116static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
117{ 117{
118 int err, tmp; 118 int err, tmp __maybe_unused;
119 while (1) { 119 while (1) {
120 lock_fpu_owner(); 120 lock_fpu_owner();
121 own_fpu_inatomic(0); 121 own_fpu_inatomic(0);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 383aeb95cb49..32a256101082 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void)
193 */ 193 */
194static struct task_struct *cpu_idle_thread[NR_CPUS]; 194static struct task_struct *cpu_idle_thread[NR_CPUS];
195 195
196struct create_idle {
197 struct work_struct work;
198 struct task_struct *idle;
199 struct completion done;
200 int cpu;
201};
202
203static void __cpuinit do_fork_idle(struct work_struct *work)
204{
205 struct create_idle *c_idle =
206 container_of(work, struct create_idle, work);
207
208 c_idle->idle = fork_idle(c_idle->cpu);
209 complete(&c_idle->done);
210}
211
196int __cpuinit __cpu_up(unsigned int cpu) 212int __cpuinit __cpu_up(unsigned int cpu)
197{ 213{
198 struct task_struct *idle; 214 struct task_struct *idle;
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu)
203 * Linux can schedule processes on this slave. 219 * Linux can schedule processes on this slave.
204 */ 220 */
205 if (!cpu_idle_thread[cpu]) { 221 if (!cpu_idle_thread[cpu]) {
206 idle = fork_idle(cpu); 222 /*
207 cpu_idle_thread[cpu] = idle; 223 * Schedule work item to avoid forking user task
224 * Ported from arch/x86/kernel/smpboot.c
225 */
226 struct create_idle c_idle = {
227 .cpu = cpu,
228 .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done),
229 };
230
231 INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle);
232 schedule_work(&c_idle.work);
233 wait_for_completion(&c_idle.done);
234 idle = cpu_idle_thread[cpu] = c_idle.idle;
208 235
209 if (IS_ERR(idle)) 236 if (IS_ERR(idle))
210 panic(KERN_ERR "Fork failed for CPU %d", cpu); 237 panic(KERN_ERR "Fork failed for CPU %d", cpu);
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 1dc6edff45e0..58beabf50b3c 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips);
383static int __used noinline 383static int __used noinline
384_sys_sysmips(nabi_no_regargs struct pt_regs regs) 384_sys_sysmips(nabi_no_regargs struct pt_regs regs)
385{ 385{
386 long cmd, arg1, arg2, arg3; 386 long cmd, arg1, arg2;
387 387
388 cmd = regs.regs[4]; 388 cmd = regs.regs[4];
389 arg1 = regs.regs[5]; 389 arg1 = regs.regs[5];
390 arg2 = regs.regs[6]; 390 arg2 = regs.regs[6];
391 arg3 = regs.regs[7];
392 391
393 switch (cmd) { 392 switch (cmd) {
394 case MIPS_ATOMIC_SET: 393 case MIPS_ATOMIC_SET:
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs)
405 if (arg1 & 2) 404 if (arg1 & 2)
406 set_thread_flag(TIF_LOGADE); 405 set_thread_flag(TIF_LOGADE);
407 else 406 else
408 clear_thread_flag(TIF_FIXADE); 407 clear_thread_flag(TIF_LOGADE);
409 408
410 return 0; 409 return 0;
411 410
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6a1fdfef8fde..ab52b7cf3b6b 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -148,9 +148,9 @@ struct {
148 spinlock_t tc_list_lock; 148 spinlock_t tc_list_lock;
149 struct list_head tc_list; /* Thread contexts */ 149 struct list_head tc_list; /* Thread contexts */
150} vpecontrol = { 150} vpecontrol = {
151 .vpe_list_lock = SPIN_LOCK_UNLOCKED, 151 .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), 152 .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
153 .tc_list_lock = SPIN_LOCK_UNLOCKED, 153 .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) 154 .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
155}; 155};
156 156
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 6e1b77fec7ea..aca93eed8779 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -1,6 +1,7 @@
1if MACH_LOONGSON
2
1choice 3choice
2 prompt "Machine Type" 4 prompt "Machine Type"
3 depends on MACH_LOONGSON
4 5
5config LEMOTE_FULOONG2E 6config LEMOTE_FULOONG2E
6 bool "Lemote Fuloong(2e) mini-PC" 7 bool "Lemote Fuloong(2e) mini-PC"
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE
87config LOONGSON_MC146818 88config LOONGSON_MC146818
88 bool 89 bool
89 default n 90 default n
91
92endif # MACH_LOONGSON
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c
index 1a06defc4f7f..353e1d2e41a5 100644
--- a/arch/mips/loongson/common/cmdline.c
+++ b/arch/mips/loongson/common/cmdline.c
@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void)
44 strcat(arcs_cmdline, " "); 44 strcat(arcs_cmdline, " ");
45 } 45 }
46 46
47 if ((strstr(arcs_cmdline, "console=")) == NULL)
48 strcat(arcs_cmdline, " console=ttyS0,115200");
49 if ((strstr(arcs_cmdline, "root=")) == NULL)
50 strcat(arcs_cmdline, " root=/dev/hda1");
51
52 prom_init_machtype(); 47 prom_init_machtype();
53} 48}
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 81fbe6b73f91..2efd5d9dee27 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void)
41 41
42void __init prom_init_machtype(void) 42void __init prom_init_machtype(void)
43{ 43{
44 char *p, str[MACHTYPE_LEN]; 44 char *p, str[MACHTYPE_LEN + 1];
45 int machtype = MACH_LEMOTE_FL2E; 45 int machtype = MACH_LEMOTE_FL2E;
46 46
47 mips_machtype = LOONGSON_MACHTYPE; 47 mips_machtype = LOONGSON_MACHTYPE;
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void)
53 } 53 }
54 p += strlen("machtype="); 54 p += strlen("machtype=");
55 strncpy(str, p, MACHTYPE_LEN); 55 strncpy(str, p, MACHTYPE_LEN);
56 str[MACHTYPE_LEN] = '\0';
56 p = strstr(str, " "); 57 p = strstr(str, " ");
57 if (p) 58 if (p)
58 *p = '\0'; 59 *p = '\0';
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index 2701d9500959..2a7d43f4f161 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -70,7 +70,7 @@
70 70
71 71
72#define COMPXSP \ 72#define COMPXSP \
73 unsigned xm; int xe; int xs; int xc 73 unsigned xm; int xe; int xs __maybe_unused; int xc
74 74
75#define COMPYSP \ 75#define COMPYSP \
76 unsigned ym; int ye; int ys; int yc 76 unsigned ym; int ye; int ys; int yc
@@ -104,7 +104,7 @@
104 104
105 105
106#define COMPXDP \ 106#define COMPXDP \
107u64 xm; int xe; int xs; int xc 107u64 xm; int xe; int xs __maybe_unused; int xc
108 108
109#define COMPYDP \ 109#define COMPYDP \
110u64 ym; int ye; int ys; int yc 110u64 ym; int ye; int ys; int yc
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 2efcbd24c82f..279599e9a779 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr)
324void __init paging_init(void) 324void __init paging_init(void)
325{ 325{
326 unsigned long max_zone_pfns[MAX_NR_ZONES]; 326 unsigned long max_zone_pfns[MAX_NR_ZONES];
327 unsigned long lastpfn; 327 unsigned long lastpfn __maybe_unused;
328 328
329 pagetable_init(); 329 pagetable_init();
330 330
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 083d3412d0bc..04f9e17db9d0 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -109,6 +109,8 @@ static bool scratchpad_available(void)
109static int scratchpad_offset(int i) 109static int scratchpad_offset(int i)
110{ 110{
111 BUG(); 111 BUG();
112 /* Really unreachable, but evidently some GCC want this. */
113 return 0;
112} 114}
113#endif 115#endif
114/* 116/*
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index b7c03d80c88c..68798f869c0f 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = {
308 * RETURNS: PCIBIOS_SUCCESSFUL - success 308 * RETURNS: PCIBIOS_SUCCESSFUL - success
309 * 309 *
310 ****************************************************************************/ 310 ****************************************************************************/
311static int bpci_interrupt(int irq, void *dev_id) 311static irqreturn_t bpci_interrupt(int irq, void *dev_id)
312{ 312{
313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG; 313 struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
314 unsigned int stat = preg->if_status; 314 unsigned int stat = preg->if_status;
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id)
326 /* write to clear all asserted interrupts */ 326 /* write to clear all asserted interrupts */
327 preg->if_status = stat; 327 preg->if_status = stat;
328 328
329 return PCIBIOS_SUCCESSFUL; 329 return IRQ_HANDLED;
330} 330}
331 331
332/***************************************************************************** 332/*****************************************************************************
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig
index c139988bb85d..8d798497c614 100644
--- a/arch/mips/pmc-sierra/Kconfig
+++ b/arch/mips/pmc-sierra/Kconfig
@@ -4,15 +4,11 @@ choice
4 4
5config PMC_MSP4200_EVAL 5config PMC_MSP4200_EVAL
6 bool "PMC-Sierra MSP4200 Eval Board" 6 bool "PMC-Sierra MSP4200 Eval Board"
7 select CEVT_R4K
8 select CSRC_R4K
9 select IRQ_MSP_SLP 7 select IRQ_MSP_SLP
10 select HW_HAS_PCI 8 select HW_HAS_PCI
11 9
12config PMC_MSP4200_GW 10config PMC_MSP4200_GW
13 bool "PMC-Sierra MSP4200 VoIP Gateway" 11 bool "PMC-Sierra MSP4200 VoIP Gateway"
14 select CEVT_R4K
15 select CSRC_R4K
16 select IRQ_MSP_SLP 12 select IRQ_MSP_SLP
17 select HW_HAS_PCI 13 select HW_HAS_PCI
18 14
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c
index cca64e15f57f..01df84ce31e2 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c
@@ -81,7 +81,7 @@ void __init plat_time_init(void)
81 mips_hpt_frequency = cpu_rate/2; 81 mips_hpt_frequency = cpu_rate/2;
82} 82}
83 83
84unsigned int __init get_c0_compare_int(void) 84unsigned int __cpuinit get_c0_compare_int(void)
85{ 85{
86 return MSP_INT_VPE0_TIMER; 86 return MSP_INT_VPE0_TIMER;
87} 87}
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 92d2f9298e38..9d773a639513 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
139 * Atomically reads the value of @v. Note that the guaranteed 139 * Atomically reads the value of @v. Note that the guaranteed
140 * useful range of an atomic_t is only 24 bits. 140 * useful range of an atomic_t is only 24 bits.
141 */ 141 */
142#define atomic_read(v) ((v)->counter) 142#define atomic_read(v) (ACCESS_ONCE((v)->counter))
143 143
144/** 144/**
145 * atomic_set - set atomic variable 145 * atomic_set - set atomic variable
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 679dee0bbd08..3d6e60dad9d9 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; };
160 160
161#define __get_user_check(x, ptr, size) \ 161#define __get_user_check(x, ptr, size) \
162({ \ 162({ \
163 const __typeof__(ptr) __guc_ptr = (ptr); \
163 int _e; \ 164 int _e; \
164 if (likely(__access_ok((unsigned long) (ptr), (size)))) \ 165 if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
165 _e = __get_user_nocheck((x), (ptr), (size)); \ 166 _e = __get_user_nocheck((x), __guc_ptr, (size)); \
166 else { \ 167 else { \
167 _e = -EFAULT; \ 168 _e = -EFAULT; \
168 (x) = (__typeof__(x))0; \ 169 (x) = (__typeof__(x))0; \
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
index a8933a60b2d4..a6b63dde603d 100644
--- a/arch/mn10300/mm/cache-inv-icache.c
+++ b/arch/mn10300/mm/cache-inv-icache.c
@@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end)
69 69
70 /* invalidate the icache coverage on that region */ 70 /* invalidate the icache coverage on that region */
71 mn10300_local_icache_inv_range2(addr + off, size); 71 mn10300_local_icache_inv_range2(addr + off, size);
72 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); 72 smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
73} 73}
74 74
75/** 75/**
@@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end)
101 * directly */ 101 * directly */
102 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; 102 start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
103 mn10300_icache_inv_range(start_page, end); 103 mn10300_icache_inv_range(start_page, end);
104 smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); 104 smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
105 if (start_page == start) 105 if (start_page == start)
106 goto done; 106 goto done;
107 end = start_page; 107 end = start_page;
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index 380d48bacd16..26b8c807f8f1 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -33,9 +33,25 @@
33// 33//
34//---------------------------------------------------------------------------- 34//----------------------------------------------------------------------------
35#include <linux/cache.h> 35#include <linux/cache.h>
36#include <linux/threads.h>
36#include <asm/types.h> 37#include <asm/types.h>
37#include <asm/mmu.h> 38#include <asm/mmu.h>
38 39
40/*
41 * We only have to have statically allocated lppaca structs on
42 * legacy iSeries, which supports at most 64 cpus.
43 */
44#ifdef CONFIG_PPC_ISERIES
45#if NR_CPUS < 64
46#define NR_LPPACAS NR_CPUS
47#else
48#define NR_LPPACAS 64
49#endif
50#else /* not iSeries */
51#define NR_LPPACAS 1
52#endif
53
54
39/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 55/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
40 * alignment is sufficient to prevent this */ 56 * alignment is sufficient to prevent this */
41struct lppaca { 57struct lppaca {
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 991d5998d6be..fe56a23e1ff0 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -240,6 +240,12 @@ struct machdep_calls {
240 * claims to support kexec. 240 * claims to support kexec.
241 */ 241 */
242 int (*machine_kexec_prepare)(struct kimage *image); 242 int (*machine_kexec_prepare)(struct kimage *image);
243
244 /* Called to perform the _real_ kexec.
245 * Do NOT allocate memory or fail here. We are past the point of
246 * no return.
247 */
248 void (*machine_kexec)(struct kimage *image);
243#endif /* CONFIG_KEXEC */ 249#endif /* CONFIG_KEXEC */
244 250
245#ifdef CONFIG_SUSPEND 251#ifdef CONFIG_SUSPEND
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 49a170af8145..a5f8672eeff3 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image)
87 87
88 save_ftrace_enabled = __ftrace_enabled_save(); 88 save_ftrace_enabled = __ftrace_enabled_save();
89 89
90 default_machine_kexec(image); 90 if (ppc_md.machine_kexec)
91 ppc_md.machine_kexec(image);
92 else
93 default_machine_kexec(image);
91 94
92 __ftrace_enabled_restore(save_ftrace_enabled); 95 __ftrace_enabled_restore(save_ftrace_enabled);
93 96
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index ebf9846f3c3b..f4adf89d7614 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -27,20 +27,6 @@ extern unsigned long __toc_start;
27#ifdef CONFIG_PPC_BOOK3S 27#ifdef CONFIG_PPC_BOOK3S
28 28
29/* 29/*
30 * We only have to have statically allocated lppaca structs on
31 * legacy iSeries, which supports at most 64 cpus.
32 */
33#ifdef CONFIG_PPC_ISERIES
34#if NR_CPUS < 64
35#define NR_LPPACAS NR_CPUS
36#else
37#define NR_LPPACAS 64
38#endif
39#else /* not iSeries */
40#define NR_LPPACAS 1
41#endif
42
43/*
44 * The structure which the hypervisor knows about - this structure 30 * The structure which the hypervisor knows about - this structure
45 * should not cross a page boundary. The vpa_init/register_vpa call 31 * should not cross a page boundary. The vpa_init/register_vpa call
46 * is now known to fail if the lppaca structure crosses a page 32 * is now known to fail if the lppaca structure crosses a page
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7a1d5cb76932..8303a6c65ef7 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread)
353 prime_debug_regs(new_thread); 353 prime_debug_regs(new_thread);
354} 354}
355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */ 355#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
356#ifndef CONFIG_HAVE_HW_BREAKPOINT
356static void set_debug_reg_defaults(struct thread_struct *thread) 357static void set_debug_reg_defaults(struct thread_struct *thread)
357{ 358{
358 if (thread->dabr) { 359 if (thread->dabr) {
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
360 set_dabr(0); 361 set_dabr(0);
361 } 362 }
362} 363}
364#endif /* !CONFIG_HAVE_HW_BREAKPOINT */
363#endif /* CONFIG_PPC_ADV_DEBUG_REGS */ 365#endif /* CONFIG_PPC_ADV_DEBUG_REGS */
364 366
365int set_dabr(unsigned long dabr) 367int set_dabr(unsigned long dabr)
@@ -670,11 +672,11 @@ void flush_thread(void)
670{ 672{
671 discard_lazy_cpu_state(); 673 discard_lazy_cpu_state();
672 674
673#ifdef CONFIG_HAVE_HW_BREAKPOINTS 675#ifdef CONFIG_HAVE_HW_BREAKPOINT
674 flush_ptrace_hw_breakpoint(current); 676 flush_ptrace_hw_breakpoint(current);
675#else /* CONFIG_HAVE_HW_BREAKPOINTS */ 677#else /* CONFIG_HAVE_HW_BREAKPOINT */
676 set_debug_reg_defaults(&current->thread); 678 set_debug_reg_defaults(&current->thread);
677#endif /* CONFIG_HAVE_HW_BREAKPOINTS */ 679#endif /* CONFIG_HAVE_HW_BREAKPOINT */
678} 680}
679 681
680void 682void
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index fd4812329570..0dc95c0aa3be 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -1516,7 +1516,8 @@ int start_topology_update(void)
1516{ 1516{
1517 int rc = 0; 1517 int rc = 0;
1518 1518
1519 if (firmware_has_feature(FW_FEATURE_VPHN) && 1519 /* Disabled until races with load balancing are fixed */
1520 if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
1520 get_lppaca()->shared_proc) { 1521 get_lppaca()->shared_proc) {
1521 vphn_enabled = 1; 1522 vphn_enabled = 1;
1522 setup_cpu_associativity_change_counters(); 1523 setup_cpu_associativity_change_counters();
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index 1ec06576f619..c14d09f614f3 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch);
38 * neesd to be flushed. This function will either perform the flush 38 * neesd to be flushed. This function will either perform the flush
39 * immediately or will batch it up if the current CPU has an active 39 * immediately or will batch it up if the current CPU has an active
40 * batch on it. 40 * batch on it.
41 *
42 * Must be called from within some kind of spinlock/non-preempt region...
43 */ 41 */
44void hpte_need_flush(struct mm_struct *mm, unsigned long addr, 42void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
45 pte_t *ptep, unsigned long pte, int huge) 43 pte_t *ptep, unsigned long pte, int huge)
46{ 44{
47 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 45 struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch);
48 unsigned long vsid, vaddr; 46 unsigned long vsid, vaddr;
49 unsigned int psize; 47 unsigned int psize;
50 int ssize; 48 int ssize;
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
99 */ 97 */
100 if (!batch->active) { 98 if (!batch->active) {
101 flush_hash_page(vaddr, rpte, psize, ssize, 0); 99 flush_hash_page(vaddr, rpte, psize, ssize, 0);
100 put_cpu_var(ppc64_tlb_batch);
102 return; 101 return;
103 } 102 }
104 103
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
127 batch->index = ++i; 126 batch->index = ++i;
128 if (i >= PPC64_TLB_BATCH_NR) 127 if (i >= PPC64_TLB_BATCH_NR)
129 __flush_tlb_pending(batch); 128 __flush_tlb_pending(batch);
129 put_cpu_var(ppc64_tlb_batch);
130} 130}
131 131
132/* 132/*
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
index fdb7384c0c4f..f0491cc28900 100644
--- a/arch/powerpc/platforms/iseries/dt.c
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ 242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); 243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
244 244
245 for (i = 0; i < NR_CPUS; i++) { 245 for (i = 0; i < NR_LPPACAS; i++) {
246 if (lppaca_of(i).dyn_proc_status >= 2) 246 if (lppaca[i].dyn_proc_status >= 2)
247 continue; 247 continue;
248 248
249 snprintf(p, 32 - (p - buf), "@%d", i); 249 snprintf(p, 32 - (p - buf), "@%d", i);
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt)
251 251
252 dt_prop_str(dt, "device_type", device_type_cpu); 252 dt_prop_str(dt, "device_type", device_type_cpu);
253 253
254 index = lppaca_of(i).dyn_hv_phys_proc_index; 254 index = lppaca[i].dyn_hv_phys_proc_index;
255 d = &xIoHriProcessorVpd[index]; 255 d = &xIoHriProcessorVpd[index];
256 256
257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); 257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index b0863410517f..2946ae10fbfd 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void)
680 * on but calling this function multiple times is fine. 680 * on but calling this function multiple times is fine.
681 */ 681 */
682 identify_cpu(0, mfspr(SPRN_PVR)); 682 identify_cpu(0, mfspr(SPRN_PVR));
683 initialise_paca(&boot_paca, 0);
683 684
684 powerpc_firmware_features |= FW_FEATURE_ISERIES; 685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
685 powerpc_firmware_features |= FW_FEATURE_LPAR; 686 powerpc_firmware_features |= FW_FEATURE_LPAR;
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c
index 0851eb1e919e..2751b3a8a66f 100644
--- a/arch/s390/boot/compressed/misc.c
+++ b/arch/s390/boot/compressed/misc.c
@@ -133,11 +133,12 @@ unsigned long decompress_kernel(void)
133 unsigned long output_addr; 133 unsigned long output_addr;
134 unsigned char *output; 134 unsigned char *output;
135 135
136 check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); 136 output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL;
137 check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start);
137 memset(&_bss, 0, &_ebss - &_bss); 138 memset(&_bss, 0, &_ebss - &_bss);
138 free_mem_ptr = (unsigned long)&_end; 139 free_mem_ptr = (unsigned long)&_end;
139 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; 140 free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
140 output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); 141 output = (unsigned char *) output_addr;
141 142
142#ifdef CONFIG_BLK_DEV_INITRD 143#ifdef CONFIG_BLK_DEV_INITRD
143 /* 144 /*
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c
index f42dbabc0d30..48884f89ab92 100644
--- a/arch/s390/crypto/sha_common.c
+++ b/arch/s390/crypto/sha_common.c
@@ -38,6 +38,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len)
38 BUG_ON(ret != bsize); 38 BUG_ON(ret != bsize);
39 data += bsize - index; 39 data += bsize - index;
40 len -= bsize - index; 40 len -= bsize - index;
41 index = 0;
41 } 42 }
42 43
43 /* process as many blocks as possible */ 44 /* process as many blocks as possible */
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
index 76daea117181..5c5ba10384c2 100644
--- a/arch/s390/include/asm/atomic.h
+++ b/arch/s390/include/asm/atomic.h
@@ -36,14 +36,19 @@
36 36
37static inline int atomic_read(const atomic_t *v) 37static inline int atomic_read(const atomic_t *v)
38{ 38{
39 barrier(); 39 int c;
40 return v->counter; 40
41 asm volatile(
42 " l %0,%1\n"
43 : "=d" (c) : "Q" (v->counter));
44 return c;
41} 45}
42 46
43static inline void atomic_set(atomic_t *v, int i) 47static inline void atomic_set(atomic_t *v, int i)
44{ 48{
45 v->counter = i; 49 asm volatile(
46 barrier(); 50 " st %1,%0\n"
51 : "=Q" (v->counter) : "d" (i));
47} 52}
48 53
49static inline int atomic_add_return(int i, atomic_t *v) 54static inline int atomic_add_return(int i, atomic_t *v)
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
128 133
129static inline long long atomic64_read(const atomic64_t *v) 134static inline long long atomic64_read(const atomic64_t *v)
130{ 135{
131 barrier(); 136 long long c;
132 return v->counter; 137
138 asm volatile(
139 " lg %0,%1\n"
140 : "=d" (c) : "Q" (v->counter));
141 return c;
133} 142}
134 143
135static inline void atomic64_set(atomic64_t *v, long long i) 144static inline void atomic64_set(atomic64_t *v, long long i)
136{ 145{
137 v->counter = i; 146 asm volatile(
138 barrier(); 147 " stg %1,%0\n"
148 : "=Q" (v->counter) : "d" (i));
139} 149}
140 150
141static inline long long atomic64_add_return(long long i, atomic64_t *v) 151static inline long long atomic64_add_return(long long i, atomic64_t *v)
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
index 24aafa68b643..2a30d5ac0667 100644
--- a/arch/s390/include/asm/cache.h
+++ b/arch/s390/include/asm/cache.h
@@ -13,6 +13,7 @@
13 13
14#define L1_CACHE_BYTES 256 14#define L1_CACHE_BYTES 256
15#define L1_CACHE_SHIFT 8 15#define L1_CACHE_SHIFT 8
16#define NET_SKB_PAD 32
16 17
17#define __read_mostly __attribute__((__section__(".data..read_mostly"))) 18#define __read_mostly __attribute__((__section__(".data..read_mostly")))
18 19
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index bf3de04170a7..2c79b6416271 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -148,11 +148,6 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
148 */ 148 */
149extern unsigned long thread_saved_pc(struct task_struct *t); 149extern unsigned long thread_saved_pc(struct task_struct *t);
150 150
151/*
152 * Print register of task into buffer. Used in fs/proc/array.c.
153 */
154extern void task_show_regs(struct seq_file *m, struct task_struct *task);
155
156extern void show_code(struct pt_regs *regs); 151extern void show_code(struct pt_regs *regs);
157 152
158unsigned long get_wchan(struct task_struct *p); 153unsigned long get_wchan(struct task_struct *p);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 5eb78dd584ce..b5a4a739b477 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -237,43 +237,6 @@ void show_regs(struct pt_regs *regs)
237 show_last_breaking_event(regs); 237 show_last_breaking_event(regs);
238} 238}
239 239
240/* This is called from fs/proc/array.c */
241void task_show_regs(struct seq_file *m, struct task_struct *task)
242{
243 struct pt_regs *regs;
244
245 regs = task_pt_regs(task);
246 seq_printf(m, "task: %p, ksp: %p\n",
247 task, (void *)task->thread.ksp);
248 seq_printf(m, "User PSW : %p %p\n",
249 (void *) regs->psw.mask, (void *)regs->psw.addr);
250
251 seq_printf(m, "User GPRS: " FOURLONG,
252 regs->gprs[0], regs->gprs[1],
253 regs->gprs[2], regs->gprs[3]);
254 seq_printf(m, " " FOURLONG,
255 regs->gprs[4], regs->gprs[5],
256 regs->gprs[6], regs->gprs[7]);
257 seq_printf(m, " " FOURLONG,
258 regs->gprs[8], regs->gprs[9],
259 regs->gprs[10], regs->gprs[11]);
260 seq_printf(m, " " FOURLONG,
261 regs->gprs[12], regs->gprs[13],
262 regs->gprs[14], regs->gprs[15]);
263 seq_printf(m, "User ACRS: %08x %08x %08x %08x\n",
264 task->thread.acrs[0], task->thread.acrs[1],
265 task->thread.acrs[2], task->thread.acrs[3]);
266 seq_printf(m, " %08x %08x %08x %08x\n",
267 task->thread.acrs[4], task->thread.acrs[5],
268 task->thread.acrs[6], task->thread.acrs[7]);
269 seq_printf(m, " %08x %08x %08x %08x\n",
270 task->thread.acrs[8], task->thread.acrs[9],
271 task->thread.acrs[10], task->thread.acrs[11]);
272 seq_printf(m, " %08x %08x %08x %08x\n",
273 task->thread.acrs[12], task->thread.acrs[13],
274 task->thread.acrs[14], task->thread.acrs[15]);
275}
276
277static DEFINE_SPINLOCK(die_lock); 240static DEFINE_SPINLOCK(die_lock);
278 241
279void die(const char * str, struct pt_regs * regs, long err) 242void die(const char * str, struct pt_regs * regs, long err)
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h
index a78701da775b..4a5350037c8f 100644
--- a/arch/sh/include/asm/sections.h
+++ b/arch/sh/include/asm/sections.h
@@ -3,7 +3,7 @@
3 3
4#include <asm-generic/sections.h> 4#include <asm-generic/sections.h>
5 5
6extern void __nosave_begin, __nosave_end; 6extern long __nosave_begin, __nosave_end;
7extern long __machvec_start, __machvec_end; 7extern long __machvec_start, __machvec_end;
8extern char __uncached_start, __uncached_end; 8extern char __uncached_start, __uncached_end;
9extern char _ebss[]; 9extern char _ebss[];
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
index 672944f5b19c..e53b4b38bd11 100644
--- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c
+++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c
@@ -14,7 +14,7 @@
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/sh_timer.h> 15#include <linux/sh_timer.h>
16#include <linux/serial_sci.h> 16#include <linux/serial_sci.h>
17#include <asm/machtypes.h> 17#include <generated/machtypes.h>
18 18
19static struct resource rtc_resources[] = { 19static struct resource rtc_resources[] = {
20 [0] = { 20 [0] = {
@@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = {
255 255
256void __init plat_early_device_setup(void) 256void __init plat_early_device_setup(void)
257{ 257{
258 struct platform_device *dev[1];
259
258 if (mach_is_rts7751r2d()) { 260 if (mach_is_rts7751r2d()) {
259 scif_platform_data.scscr |= SCSCR_CKE1; 261 scif_platform_data.scscr |= SCSCR_CKE1;
260 early_platform_add_devices(&scif_device, 1); 262 dev[0] = &scif_device;
263 early_platform_add_devices(dev, 1);
261 } else { 264 } else {
262 early_platform_add_devices(&sci_device, 1); 265 dev[0] = &sci_device;
263 early_platform_add_devices(&scif_device, 1); 266 early_platform_add_devices(dev, 1);
267 dev[0] = &scif_device;
268 early_platform_add_devices(dev, 1);
264 } 269 }
265 270
266 early_platform_add_devices(sh7750_early_devices, 271 early_platform_add_devices(sh7750_early_devices,
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index faa8f86c0db4..0901b2f14e15 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -10,6 +10,16 @@
10void __delay(unsigned long loops) 10void __delay(unsigned long loops)
11{ 11{
12 __asm__ __volatile__( 12 __asm__ __volatile__(
13 /*
14 * ST40-300 appears to have an issue with this code,
15 * normally taking two cycles each loop, as with all
16 * other SH variants. If however the branch and the
17 * delay slot straddle an 8 byte boundary, this increases
18 * to 3 cycles.
19 * This align directive ensures this doesn't occur.
20 */
21 ".balign 8\n\t"
22
13 "tst %0, %0\n\t" 23 "tst %0, %0\n\t"
14 "1:\t" 24 "1:\t"
15 "bf/s 1b\n\t" 25 "bf/s 1b\n\t"
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 88d3dc3d30d5..5a580ea04429 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from,
108 kunmap_atomic(vfrom, KM_USER0); 108 kunmap_atomic(vfrom, KM_USER0);
109 } 109 }
110 110
111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112 (vma->vm_flags & VM_EXEC))
112 __flush_purge_region(vto, PAGE_SIZE); 113 __flush_purge_region(vto, PAGE_SIZE);
113 114
114 kunmap_atomic(vto, KM_USER1); 115 kunmap_atomic(vto, KM_USER1);
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h
index a2f5c61f924e..843e4faf6a50 100644
--- a/arch/sparc/include/asm/pcr.h
+++ b/arch/sparc/include/asm/pcr.h
@@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz)
43 43
44extern u64 pcr_enable; 44extern u64 pcr_enable;
45 45
46extern int pcr_arch_init(void);
47
46#endif /* __PCR_H */ 48#endif /* __PCR_H */
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 47977a77f6c6..72509d0e34be 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
255static int iommu_alloc_ctx(struct iommu *iommu) 255static int iommu_alloc_ctx(struct iommu *iommu)
256{ 256{
257 int lowest = iommu->ctx_lowest_free; 257 int lowest = iommu->ctx_lowest_free;
258 int sz = IOMMU_NUM_CTXS - lowest; 258 int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
260 259
261 if (unlikely(n == sz)) { 260 if (unlikely(n == IOMMU_NUM_CTXS)) {
262 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); 261 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
263 if (unlikely(n == lowest)) { 262 if (unlikely(n == lowest)) {
264 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); 263 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c
index ae96cf52a955..7c2ced612b8f 100644
--- a/arch/sparc/kernel/pcr.c
+++ b/arch/sparc/kernel/pcr.c
@@ -167,5 +167,3 @@ out_unregister:
167 unregister_perf_hsvc(); 167 unregister_perf_hsvc();
168 return err; 168 return err;
169} 169}
170
171early_initcall(pcr_arch_init);
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b6a2b8f47040..555a76d1f4a1 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -49,6 +49,7 @@
49#include <asm/mdesc.h> 49#include <asm/mdesc.h>
50#include <asm/ldc.h> 50#include <asm/ldc.h>
51#include <asm/hypervisor.h> 51#include <asm/hypervisor.h>
52#include <asm/pcr.h>
52 53
53#include "cpumap.h" 54#include "cpumap.h"
54 55
@@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu)
1358 1359
1359void __init smp_cpus_done(unsigned int max_cpus) 1360void __init smp_cpus_done(unsigned int max_cpus)
1360{ 1361{
1362 pcr_arch_init();
1361} 1363}
1362 1364
1363void smp_send_reschedule(int cpu) 1365void smp_send_reschedule(int cpu)
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S
index 8cc03458eb7e..8f096e84a937 100644
--- a/arch/sparc/kernel/una_asm_32.S
+++ b/arch/sparc/kernel/una_asm_32.S
@@ -24,9 +24,9 @@ retl_efault:
24 .globl __do_int_store 24 .globl __do_int_store
25__do_int_store: 25__do_int_store:
26 ld [%o2], %g1 26 ld [%o2], %g1
27 cmp %1, 2 27 cmp %o1, 2
28 be 2f 28 be 2f
29 cmp %1, 4 29 cmp %o1, 4
30 be 1f 30 be 1f
31 srl %g1, 24, %g2 31 srl %g1, 24, %g2
32 srl %g1, 16, %g7 32 srl %g1, 16, %g7
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c
index 764b3eb7b604..48d00e72ce15 100644
--- a/arch/sparc/lib/bitext.c
+++ b/arch/sparc/lib/bitext.c
@@ -10,7 +10,7 @@
10 */ 10 */
11 11
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/bitops.h> 13#include <linux/bitmap.h>
14 14
15#include <asm/bitext.h> 15#include <asm/bitext.h>
16 16
@@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align)
80 while (test_bit(offset + i, t->map) == 0) { 80 while (test_bit(offset + i, t->map) == 0) {
81 i++; 81 i++;
82 if (i == len) { 82 if (i == len) {
83 for (i = 0; i < len; i++) 83 bitmap_set(t->map, offset, len);
84 __set_bit(offset + i, t->map);
85 if (offset == t->first_free) 84 if (offset == t->first_free)
86 t->first_free = find_next_zero_bit 85 t->first_free = find_next_zero_bit
87 (t->map, t->size, 86 (t->map, t->size,
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index 646aa78ba5fd..46a823882437 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -62,7 +62,12 @@ int main(int argc, char *argv[])
62 if (fseek(f, -4L, SEEK_END)) { 62 if (fseek(f, -4L, SEEK_END)) {
63 perror(argv[1]); 63 perror(argv[1]);
64 } 64 }
65 fread(&olen, sizeof olen, 1, f); 65
66 if (fread(&olen, sizeof(olen), 1, f) != 1) {
67 perror(argv[1]);
68 return 1;
69 }
70
66 ilen = ftell(f); 71 ilen = ftell(f);
67 olen = getle32(&olen); 72 olen = getle32(&olen);
68 fclose(f); 73 fclose(f);
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index a37da6df07f9..b964ec457546 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -88,6 +88,7 @@ extern int acpi_disabled;
88extern int acpi_pci_disabled; 88extern int acpi_pci_disabled;
89extern int acpi_skip_timer_override; 89extern int acpi_skip_timer_override;
90extern int acpi_use_timer_override; 90extern int acpi_use_timer_override;
91extern int acpi_fix_pin2_polarity;
91 92
92extern u8 acpi_sci_flags; 93extern u8 acpi_sci_flags;
93extern int acpi_sci_override_gsi; 94extern int acpi_sci_override_gsi;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index ad30ca4b6fe9..b8a3484d69e9 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -233,6 +233,7 @@ extern void sync_Arb_IDs(void);
233extern void init_bsp_APIC(void); 233extern void init_bsp_APIC(void);
234extern void setup_local_APIC(void); 234extern void setup_local_APIC(void);
235extern void end_local_APIC_setup(void); 235extern void end_local_APIC_setup(void);
236extern void bsp_end_local_APIC_setup(void);
236extern void init_apic_mappings(void); 237extern void init_apic_mappings(void);
237void register_lapic_address(unsigned long address); 238void register_lapic_address(unsigned long address);
238extern void setup_boot_APIC_clock(void); 239extern void setup_boot_APIC_clock(void);
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h
new file mode 100644
index 000000000000..e656ad8c0a2e
--- /dev/null
+++ b/arch/x86/include/asm/ce4100.h
@@ -0,0 +1,6 @@
1#ifndef _ASM_CE4100_H_
2#define _ASM_CE4100_H_
3
4int ce4100_pci_init(void);
5
6#endif
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h
index 6e6e7558e702..4564c8e28a33 100644
--- a/arch/x86/include/asm/cpu.h
+++ b/arch/x86/include/asm/cpu.h
@@ -32,6 +32,6 @@ extern void arch_unregister_cpu(int);
32 32
33DECLARE_PER_CPU(int, cpu_state); 33DECLARE_PER_CPU(int, cpu_state);
34 34
35int __cpuinit mwait_usable(const struct cpuinfo_x86 *); 35int mwait_usable(const struct cpuinfo_x86 *);
36 36
37#endif /* _ASM_X86_CPU_H */ 37#endif /* _ASM_X86_CPU_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 4d0dfa0d998e..43a18c77676d 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -36,6 +36,11 @@
36#define MSR_IA32_PERFCTR1 0x000000c2 36#define MSR_IA32_PERFCTR1 0x000000c2
37#define MSR_FSB_FREQ 0x000000cd 37#define MSR_FSB_FREQ 0x000000cd
38 38
39#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
40#define NHM_C3_AUTO_DEMOTE (1UL << 25)
41#define NHM_C1_AUTO_DEMOTE (1UL << 26)
42#define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25)
43
39#define MSR_MTRRcap 0x000000fe 44#define MSR_MTRRcap 0x000000fe
40#define MSR_IA32_BBL_CR_CTL 0x00000119 45#define MSR_IA32_BBL_CR_CTL 0x00000119
41 46
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h
index e2f6a99f14ab..cc29086e30cd 100644
--- a/arch/x86/include/asm/perf_event_p4.h
+++ b/arch/x86/include/asm/perf_event_p4.h
@@ -22,6 +22,7 @@
22 22
23#define ARCH_P4_CNTRVAL_BITS (40) 23#define ARCH_P4_CNTRVAL_BITS (40)
24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) 24#define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1)
25#define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1))
25 26
26#define P4_ESCR_EVENT_MASK 0x7e000000U 27#define P4_ESCR_EVENT_MASK 0x7e000000U
27#define P4_ESCR_EVENT_SHIFT 25 28#define P4_ESCR_EVENT_SHIFT 25
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h
index 6c22bf353f26..725b77831993 100644
--- a/arch/x86/include/asm/smpboot_hooks.h
+++ b/arch/x86/include/asm/smpboot_hooks.h
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
34 */ 34 */
35 CMOS_WRITE(0, 0xf); 35 CMOS_WRITE(0, 0xf);
36 36
37 *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; 37 *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
38} 38}
39 39
40static inline void __init smpboot_setup_io_apic(void) 40static inline void __init smpboot_setup_io_apic(void)
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index ce1d54c8a433..3e094af443c3 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -176,7 +176,7 @@ struct bau_msg_payload {
176struct bau_msg_header { 176struct bau_msg_header {
177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ 177 unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
178 /* bits 5:0 */ 178 /* bits 5:0 */
179 unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ 179 unsigned int base_dest_nodeid:15; /* nasid of the */
180 /* bits 20:6 */ /* first bit in uvhub map */ 180 /* bits 20:6 */ /* first bit in uvhub map */
181 unsigned int command:8; /* message type */ 181 unsigned int command:8; /* message type */
182 /* bits 28:21 */ 182 /* bits 28:21 */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index a2c512175395..9a966c579af5 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
72int acpi_sci_override_gsi __initdata; 72int acpi_sci_override_gsi __initdata;
73int acpi_skip_timer_override __initdata; 73int acpi_skip_timer_override __initdata;
74int acpi_use_timer_override __initdata; 74int acpi_use_timer_override __initdata;
75int acpi_fix_pin2_polarity __initdata;
75 76
76#ifdef CONFIG_X86_LOCAL_APIC 77#ifdef CONFIG_X86_LOCAL_APIC
77static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; 78static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
415 return 0; 416 return 0;
416 } 417 }
417 418
418 if (acpi_skip_timer_override && 419 if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
419 intsrc->source_irq == 0 && intsrc->global_irq == 2) { 420 if (acpi_skip_timer_override) {
420 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); 421 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
421 return 0; 422 return 0;
423 }
424 if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
425 intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
426 printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
427 }
422 } 428 }
423 429
424 mp_override_legacy_irq(intsrc->source_irq, 430 mp_override_legacy_irq(intsrc->source_irq,
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 123608531c8f..7038b95d363f 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -671,7 +671,7 @@ void __kprobes text_poke_smp_batch(struct text_poke_param *params, int n)
671 671
672 atomic_set(&stop_machine_first, 1); 672 atomic_set(&stop_machine_first, 1);
673 wrote_text = 0; 673 wrote_text = 0;
674 stop_machine(stop_machine_text_poke, (void *)&tpp, NULL); 674 __stop_machine(stop_machine_text_poke, (void *)&tpp, NULL);
675} 675}
676 676
677#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL) 677#if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 51ef31a89be9..51d4e1663066 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
284 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); 284 memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
285 285
286 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { 286 if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
287 apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; 287 adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
288 global_clock_event = &adev->evt; 288 global_clock_event = &adev->evt;
289 printk(KERN_DEBUG "%s clockevent registered as global\n", 289 printk(KERN_DEBUG "%s clockevent registered as global\n",
290 global_clock_event->name); 290 global_clock_event->name);
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 1390cf985afd..306386fbc105 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1403,12 +1403,17 @@ void __cpuinit end_local_APIC_setup(void)
1403#endif 1403#endif
1404 1404
1405 apic_pm_activate(); 1405 apic_pm_activate();
1406}
1407
1408void __init bsp_end_local_APIC_setup(void)
1409{
1410 end_local_APIC_setup();
1406 1411
1407 /* 1412 /*
1408 * Now that local APIC setup is completed for BP, configure the fault 1413 * Now that local APIC setup is completed for BP, configure the fault
1409 * handling for interrupt remapping. 1414 * handling for interrupt remapping.
1410 */ 1415 */
1411 if (!smp_processor_id() && intr_remapping_enabled) 1416 if (intr_remapping_enabled)
1412 enable_drhd_fault_handling(); 1417 enable_drhd_fault_handling();
1413 1418
1414} 1419}
@@ -1778,7 +1783,7 @@ int __init APIC_init_uniprocessor(void)
1778 enable_IO_APIC(); 1783 enable_IO_APIC();
1779#endif 1784#endif
1780 1785
1781 end_local_APIC_setup(); 1786 bsp_end_local_APIC_setup();
1782 1787
1783#ifdef CONFIG_X86_IO_APIC 1788#ifdef CONFIG_X86_IO_APIC
1784 if (smp_found_config && !skip_ioapic_setup && nr_ioapics) 1789 if (smp_found_config && !skip_ioapic_setup && nr_ioapics)
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 697dc34b7b87..ca9e2a3545a9 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -4002,6 +4002,9 @@ int mp_find_ioapic(u32 gsi)
4002{ 4002{
4003 int i = 0; 4003 int i = 0;
4004 4004
4005 if (nr_ioapics == 0)
4006 return -1;
4007
4005 /* Find the IOAPIC that manages this GSI. */ 4008 /* Find the IOAPIC that manages this GSI. */
4006 for (i = 0; i < nr_ioapics; i++) { 4009 for (i = 0; i < nr_ioapics; i++) {
4007 if ((gsi >= mp_gsi_routing[i].gsi_base) 4010 if ((gsi >= mp_gsi_routing[i].gsi_base)
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c
index 13a389179514..452932d34730 100644
--- a/arch/x86/kernel/check.c
+++ b/arch/x86/kernel/check.c
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void)
106 addr += size; 106 addr += size;
107 } 107 }
108 108
109 printk(KERN_INFO "Scanning %d areas for low memory corruption\n", 109 if (num_scan_areas)
110 num_scan_areas); 110 printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas);
111} 111}
112 112
113 113
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy)
143{ 143{
144 check_for_bios_corruption(); 144 check_for_bios_corruption();
145 schedule_delayed_work(&bios_check_work, 145 schedule_delayed_work(&bios_check_work,
146 round_jiffies_relative(corruption_check_period*HZ)); 146 round_jiffies_relative(corruption_check_period*HZ));
147} 147}
148 148
149static int start_periodic_check_for_corruption(void) 149static int start_periodic_check_for_corruption(void)
150{ 150{
151 if (!memory_corruption_check || corruption_check_period == 0) 151 if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0)
152 return 0; 152 return 0;
153 153
154 printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", 154 printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n",
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index bd1cac747f67..52c93648e492 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
158{ 158{
159 if (c->x86 == 0x06) { 159 if (c->x86 == 0x06) {
160 if (cpu_has(c, X86_FEATURE_EST)) 160 if (cpu_has(c, X86_FEATURE_EST))
161 printk(KERN_WARNING PFX "Warning: EST-capable CPU " 161 printk_once(KERN_WARNING PFX "Warning: EST-capable "
162 "detected. The acpi-cpufreq module offers " 162 "CPU detected. The acpi-cpufreq module offers "
163 "voltage scaling in addition of frequency " 163 "voltage scaling in addition to frequency "
164 "scaling. You should use that instead of " 164 "scaling. You should use that instead of "
165 "p4-clockmod, if possible.\n"); 165 "p4-clockmod, if possible.\n");
166 switch (c->x86_model) { 166 switch (c->x86_model) {
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 4f6f679f2799..4a5a42b842ad 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu)
195cmd_incomplete: 195cmd_incomplete:
196 iowrite16(0, &pcch_hdr->status); 196 iowrite16(0, &pcch_hdr->status);
197 spin_unlock(&pcc_lock); 197 spin_unlock(&pcc_lock);
198 return -EINVAL; 198 return 0;
199} 199}
200 200
201static int pcc_cpufreq_target(struct cpufreq_policy *policy, 201static int pcc_cpufreq_target(struct cpufreq_policy *policy,
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index 35c7e65e59be..c567dec854f6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = {
1537static int __cpuinit powernowk8_init(void) 1537static int __cpuinit powernowk8_init(void)
1538{ 1538{
1539 unsigned int i, supported_cpus = 0, cpu; 1539 unsigned int i, supported_cpus = 0, cpu;
1540 int rv;
1540 1541
1541 for_each_online_cpu(i) { 1542 for_each_online_cpu(i) {
1542 int rc; 1543 int rc;
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void)
1555 1556
1556 cpb_capable = true; 1557 cpb_capable = true;
1557 1558
1558 register_cpu_notifier(&cpb_nb);
1559
1560 msrs = msrs_alloc(); 1559 msrs = msrs_alloc();
1561 if (!msrs) { 1560 if (!msrs) {
1562 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); 1561 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1563 return -ENOMEM; 1562 return -ENOMEM;
1564 } 1563 }
1565 1564
1565 register_cpu_notifier(&cpb_nb);
1566
1566 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); 1567 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1567 1568
1568 for_each_cpu(cpu, cpu_online_mask) { 1569 for_each_cpu(cpu, cpu_online_mask) {
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void)
1574 (cpb_enabled ? "on" : "off")); 1575 (cpb_enabled ? "on" : "off"));
1575 } 1576 }
1576 1577
1577 return cpufreq_register_driver(&cpufreq_amd64_driver); 1578 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1579 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1580 unregister_cpu_notifier(&cpb_nb);
1581 msrs_free(msrs);
1582 msrs = NULL;
1583 }
1584 return rv;
1578} 1585}
1579 1586
1580/* driver entry point for term */ 1587/* driver entry point for term */
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c
index f7a0993c1e7c..ff751a9f182b 100644
--- a/arch/x86/kernel/cpu/perf_event_p4.c
+++ b/arch/x86/kernel/cpu/perf_event_p4.c
@@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc)
770 return 1; 770 return 1;
771 } 771 }
772 772
773 /* it might be unflagged overflow */ 773 /*
774 rdmsrl(hwc->event_base + hwc->idx, v); 774 * In some circumstances the overflow might issue an NMI but did
775 if (!(v & ARCH_P4_CNTRVAL_MASK)) 775 * not set P4_CCCR_OVF bit. Because a counter holds a negative value
776 * we simply check for high bit being set, if it's cleared it means
777 * the counter has reached zero value and continued counting before
778 * real NMI signal was received:
779 */
780 if (!(v & ARCH_P4_UNFLAGGED_BIT))
776 return 1; 781 return 1;
777 782
778 return 0; 783 return 0;
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c
index 76b8cd953dee..9efbdcc56425 100644
--- a/arch/x86/kernel/early-quirks.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
143 143
144static u32 __init ati_sbx00_rev(int num, int slot, int func) 144static u32 __init ati_sbx00_rev(int num, int slot, int func)
145{ 145{
146 u32 old, d; 146 u32 d;
147 147
148 d = read_pci_config(num, slot, func, 0x70);
149 old = d;
150 d &= ~(1<<8);
151 write_pci_config(num, slot, func, 0x70, d);
152 d = read_pci_config(num, slot, func, 0x8); 148 d = read_pci_config(num, slot, func, 0x8);
153 d &= 0xff; 149 d &= 0xff;
154 write_pci_config(num, slot, func, 0x70, old);
155 150
156 return d; 151 return d;
157} 152}
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func)
160{ 155{
161 u32 d, rev; 156 u32 d, rev;
162 157
163 if (acpi_use_timer_override)
164 return;
165
166 rev = ati_sbx00_rev(num, slot, func); 158 rev = ati_sbx00_rev(num, slot, func);
159 if (rev >= 0x40)
160 acpi_fix_pin2_polarity = 1;
161
167 if (rev > 0x13) 162 if (rev > 0x13)
168 return; 163 return;
169 164
165 if (acpi_use_timer_override)
166 return;
167
170 /* check for IRQ0 interrupt swap */ 168 /* check for IRQ0 interrupt swap */
171 d = read_pci_config(num, slot, func, 0x64); 169 d = read_pci_config(num, slot, func, 0x64);
172 if (!(d & (1<<14))) 170 if (!(d & (1<<14)))
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 52945da52a94..387b6a0c9e81 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -367,7 +367,8 @@ void fixup_irqs(void)
367 if (irr & (1 << (vector % 32))) { 367 if (irr & (1 << (vector % 32))) {
368 irq = __this_cpu_read(vector_irq[vector]); 368 irq = __this_cpu_read(vector_irq[vector]);
369 369
370 data = irq_get_irq_data(irq); 370 desc = irq_to_desc(irq);
371 data = &desc->irq_data;
371 raw_spin_lock(&desc->lock); 372 raw_spin_lock(&desc->lock);
372 if (data->chip->irq_retrigger) 373 if (data->chip->irq_retrigger)
373 data->chip->irq_retrigger(data); 374 data->chip->irq_retrigger(data);
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index e764fc05d700..ff4554198981 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -92,21 +92,31 @@ void show_regs(struct pt_regs *regs)
92 92
93void show_regs_common(void) 93void show_regs_common(void)
94{ 94{
95 const char *board, *product; 95 const char *vendor, *product, *board;
96 96
97 board = dmi_get_system_info(DMI_BOARD_NAME); 97 vendor = dmi_get_system_info(DMI_SYS_VENDOR);
98 if (!board) 98 if (!vendor)
99 board = ""; 99 vendor = "";
100 product = dmi_get_system_info(DMI_PRODUCT_NAME); 100 product = dmi_get_system_info(DMI_PRODUCT_NAME);
101 if (!product) 101 if (!product)
102 product = ""; 102 product = "";
103 103
104 /* Board Name is optional */
105 board = dmi_get_system_info(DMI_BOARD_NAME);
106
104 printk(KERN_CONT "\n"); 107 printk(KERN_CONT "\n");
105 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", 108 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
106 current->pid, current->comm, print_tainted(), 109 current->pid, current->comm, print_tainted(),
107 init_utsname()->release, 110 init_utsname()->release,
108 (int)strcspn(init_utsname()->version, " "), 111 (int)strcspn(init_utsname()->version, " "),
109 init_utsname()->version, board, product); 112 init_utsname()->version);
113 printk(KERN_CONT " ");
114 printk(KERN_CONT "%s %s", vendor, product);
115 if (board) {
116 printk(KERN_CONT "/");
117 printk(KERN_CONT "%s", board);
118 }
119 printk(KERN_CONT "\n");
110} 120}
111 121
112void flush_thread(void) 122void flush_thread(void)
@@ -506,7 +516,7 @@ static void poll_idle(void)
506#define MWAIT_ECX_EXTENDED_INFO 0x01 516#define MWAIT_ECX_EXTENDED_INFO 0x01
507#define MWAIT_EDX_C1 0xf0 517#define MWAIT_EDX_C1 0xf0
508 518
509int __cpuinit mwait_usable(const struct cpuinfo_x86 *c) 519int mwait_usable(const struct cpuinfo_x86 *c)
510{ 520{
511 u32 eax, ebx, ecx, edx; 521 u32 eax, ebx, ecx, edx;
512 522
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index fc7aae1e2bc7..715037caeb43 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
285 DMI_MATCH(DMI_BOARD_NAME, "P4S800"), 285 DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
286 }, 286 },
287 }, 287 },
288 { /* Handle problems with rebooting on VersaLogic Menlow boards */
289 .callback = set_bios_reboot,
290 .ident = "VersaLogic Menlow based board",
291 .matches = {
292 DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
293 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
294 },
295 },
288 { } 296 { }
289}; 297};
290 298
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fe4837136e02..a9d805f3920c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -999,7 +999,7 @@ static int __init smp_sanity_check(unsigned max_cpus)
999 999
1000 connect_bsp_APIC(); 1000 connect_bsp_APIC();
1001 setup_local_APIC(); 1001 setup_local_APIC();
1002 end_local_APIC_setup(); 1002 bsp_end_local_APIC_setup();
1003 return -1; 1003 return -1;
1004 } 1004 }
1005 1005
@@ -1074,7 +1074,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1074 if (!skip_ioapic_setup && nr_ioapics) 1074 if (!skip_ioapic_setup && nr_ioapics)
1075 enable_IO_APIC(); 1075 enable_IO_APIC();
1076 1076
1077 end_local_APIC_setup(); 1077 bsp_end_local_APIC_setup();
1078 1078
1079 if (apic->setup_portio_remap) 1079 if (apic->setup_portio_remap)
1080 apic->setup_portio_remap(); 1080 apic->setup_portio_remap();
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 54ce246a383e..63fec1531e89 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
2777 kvm_register_write(&svm->vcpu, reg, val); 2777 kvm_register_write(&svm->vcpu, reg, val);
2778 } 2778 }
2779 2779
2780 skip_emulated_instruction(&svm->vcpu);
2781
2780 return 1; 2782 return 1;
2781} 2783}
2782 2784
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 7d90ceb882a4..20e3f8702d1e 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void)
229 for (address = VMALLOC_START & PMD_MASK; 229 for (address = VMALLOC_START & PMD_MASK;
230 address >= TASK_SIZE && address < FIXADDR_TOP; 230 address >= TASK_SIZE && address < FIXADDR_TOP;
231 address += PMD_SIZE) { 231 address += PMD_SIZE) {
232
233 unsigned long flags;
234 struct page *page; 232 struct page *page;
235 233
236 spin_lock_irqsave(&pgd_lock, flags); 234 spin_lock(&pgd_lock);
237 list_for_each_entry(page, &pgd_list, lru) { 235 list_for_each_entry(page, &pgd_list, lru) {
238 spinlock_t *pgt_lock; 236 spinlock_t *pgt_lock;
239 pmd_t *ret; 237 pmd_t *ret;
240 238
239 /* the pgt_lock only for Xen */
241 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 240 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
242 241
243 spin_lock(pgt_lock); 242 spin_lock(pgt_lock);
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void)
247 if (!ret) 246 if (!ret)
248 break; 247 break;
249 } 248 }
250 spin_unlock_irqrestore(&pgd_lock, flags); 249 spin_unlock(&pgd_lock);
251 } 250 }
252} 251}
253 252
@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
828 unsigned long address, unsigned int fault) 827 unsigned long address, unsigned int fault)
829{ 828{
830 if (fault & VM_FAULT_OOM) { 829 if (fault & VM_FAULT_OOM) {
830 /* Kernel mode? Handle exceptions or die: */
831 if (!(error_code & PF_USER)) {
832 up_read(&current->mm->mmap_sem);
833 no_context(regs, error_code, address);
834 return;
835 }
836
831 out_of_memory(regs, error_code, address); 837 out_of_memory(regs, error_code, address);
832 } else { 838 } else {
833 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| 839 if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index c8813aa39740..a08a62cb136e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end)
105 105
106 for (address = start; address <= end; address += PGDIR_SIZE) { 106 for (address = start; address <= end; address += PGDIR_SIZE) {
107 const pgd_t *pgd_ref = pgd_offset_k(address); 107 const pgd_t *pgd_ref = pgd_offset_k(address);
108 unsigned long flags;
109 struct page *page; 108 struct page *page;
110 109
111 if (pgd_none(*pgd_ref)) 110 if (pgd_none(*pgd_ref))
112 continue; 111 continue;
113 112
114 spin_lock_irqsave(&pgd_lock, flags); 113 spin_lock(&pgd_lock);
115 list_for_each_entry(page, &pgd_list, lru) { 114 list_for_each_entry(page, &pgd_list, lru) {
116 pgd_t *pgd; 115 pgd_t *pgd;
117 spinlock_t *pgt_lock; 116 spinlock_t *pgt_lock;
118 117
119 pgd = (pgd_t *)page_address(page) + pgd_index(address); 118 pgd = (pgd_t *)page_address(page) + pgd_index(address);
119 /* the pgt_lock only for Xen */
120 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; 120 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
121 spin_lock(pgt_lock); 121 spin_lock(pgt_lock);
122 122
@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
128 128
129 spin_unlock(pgt_lock); 129 spin_unlock(pgt_lock);
130 } 130 }
131 spin_unlock_irqrestore(&pgd_lock, flags); 131 spin_unlock(&pgd_lock);
132 } 132 }
133} 133}
134 134
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index d343b3c81f3c..90825f2eb0f4 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM];
57 57
58void update_page_count(int level, unsigned long pages) 58void update_page_count(int level, unsigned long pages)
59{ 59{
60 unsigned long flags;
61
62 /* Protect against CPA */ 60 /* Protect against CPA */
63 spin_lock_irqsave(&pgd_lock, flags); 61 spin_lock(&pgd_lock);
64 direct_pages_count[level] += pages; 62 direct_pages_count[level] += pages;
65 spin_unlock_irqrestore(&pgd_lock, flags); 63 spin_unlock(&pgd_lock);
66} 64}
67 65
68static void split_page_count(int level) 66static void split_page_count(int level)
@@ -394,7 +392,7 @@ static int
394try_preserve_large_page(pte_t *kpte, unsigned long address, 392try_preserve_large_page(pte_t *kpte, unsigned long address,
395 struct cpa_data *cpa) 393 struct cpa_data *cpa)
396{ 394{
397 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; 395 unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
398 pte_t new_pte, old_pte, *tmp; 396 pte_t new_pte, old_pte, *tmp;
399 pgprot_t old_prot, new_prot, req_prot; 397 pgprot_t old_prot, new_prot, req_prot;
400 int i, do_split = 1; 398 int i, do_split = 1;
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
403 if (cpa->force_split) 401 if (cpa->force_split)
404 return 1; 402 return 1;
405 403
406 spin_lock_irqsave(&pgd_lock, flags); 404 spin_lock(&pgd_lock);
407 /* 405 /*
408 * Check for races, another CPU might have split this page 406 * Check for races, another CPU might have split this page
409 * up already: 407 * up already:
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
498 } 496 }
499 497
500out_unlock: 498out_unlock:
501 spin_unlock_irqrestore(&pgd_lock, flags); 499 spin_unlock(&pgd_lock);
502 500
503 return do_split; 501 return do_split;
504} 502}
505 503
506static int split_large_page(pte_t *kpte, unsigned long address) 504static int split_large_page(pte_t *kpte, unsigned long address)
507{ 505{
508 unsigned long flags, pfn, pfninc = 1; 506 unsigned long pfn, pfninc = 1;
509 unsigned int i, level; 507 unsigned int i, level;
510 pte_t *pbase, *tmp; 508 pte_t *pbase, *tmp;
511 pgprot_t ref_prot; 509 pgprot_t ref_prot;
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
519 if (!base) 517 if (!base)
520 return -ENOMEM; 518 return -ENOMEM;
521 519
522 spin_lock_irqsave(&pgd_lock, flags); 520 spin_lock(&pgd_lock);
523 /* 521 /*
524 * Check for races, another CPU might have split this page 522 * Check for races, another CPU might have split this page
525 * up for us already: 523 * up for us already:
@@ -591,7 +589,7 @@ out_unlock:
591 */ 589 */
592 if (base) 590 if (base)
593 __free_page(base); 591 __free_page(base);
594 spin_unlock_irqrestore(&pgd_lock, flags); 592 spin_unlock(&pgd_lock);
595 593
596 return 0; 594 return 0;
597} 595}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 500242d3c96d..0113d19c8aa6 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
121 121
122static void pgd_dtor(pgd_t *pgd) 122static void pgd_dtor(pgd_t *pgd)
123{ 123{
124 unsigned long flags; /* can be called from interrupt context */
125
126 if (SHARED_KERNEL_PMD) 124 if (SHARED_KERNEL_PMD)
127 return; 125 return;
128 126
129 spin_lock_irqsave(&pgd_lock, flags); 127 spin_lock(&pgd_lock);
130 pgd_list_del(pgd); 128 pgd_list_del(pgd);
131 spin_unlock_irqrestore(&pgd_lock, flags); 129 spin_unlock(&pgd_lock);
132} 130}
133 131
134/* 132/*
@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
260{ 258{
261 pgd_t *pgd; 259 pgd_t *pgd;
262 pmd_t *pmds[PREALLOCATED_PMDS]; 260 pmd_t *pmds[PREALLOCATED_PMDS];
263 unsigned long flags;
264 261
265 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); 262 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
266 263
@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
280 * respect to anything walking the pgd_list, so that they 277 * respect to anything walking the pgd_list, so that they
281 * never see a partially populated pgd. 278 * never see a partially populated pgd.
282 */ 279 */
283 spin_lock_irqsave(&pgd_lock, flags); 280 spin_lock(&pgd_lock);
284 281
285 pgd_ctor(mm, pgd); 282 pgd_ctor(mm, pgd);
286 pgd_prepopulate_pmd(mm, pgd, pmds); 283 pgd_prepopulate_pmd(mm, pgd, pmds);
287 284
288 spin_unlock_irqrestore(&pgd_lock, flags); 285 spin_unlock(&pgd_lock);
289 286
290 return pgd; 287 return pgd;
291 288
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c
index 85b68ef5e809..9260b3eb18d4 100644
--- a/arch/x86/pci/ce4100.c
+++ b/arch/x86/pci/ce4100.c
@@ -34,6 +34,7 @@
34#include <linux/pci.h> 34#include <linux/pci.h>
35#include <linux/init.h> 35#include <linux/init.h>
36 36
37#include <asm/ce4100.h>
37#include <asm/pci_x86.h> 38#include <asm/pci_x86.h>
38 39
39struct sim_reg { 40struct sim_reg {
@@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = {
306 .write = ce4100_conf_write, 307 .write = ce4100_conf_write,
307}; 308};
308 309
309static int __init ce4100_pci_init(void) 310int __init ce4100_pci_init(void)
310{ 311{
311 init_sim_regs(); 312 init_sim_regs();
312 raw_pci_ops = &ce4100_pci_conf; 313 raw_pci_ops = &ce4100_pci_conf;
313 return 0; 314 /* Indicate caller that it should invoke pci_legacy_init() */
315 return 1;
314} 316}
315subsys_initcall(ce4100_pci_init);
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index d2c0d51a7178..cd6f184c3b3f 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -15,6 +15,7 @@
15#include <linux/serial_reg.h> 15#include <linux/serial_reg.h>
16#include <linux/serial_8250.h> 16#include <linux/serial_8250.h>
17 17
18#include <asm/ce4100.h>
18#include <asm/setup.h> 19#include <asm/setup.h>
19#include <asm/io.h> 20#include <asm/io.h>
20 21
@@ -129,4 +130,5 @@ void __init x86_ce4100_early_setup(void)
129 x86_init.resources.probe_roms = x86_init_noop; 130 x86_init.resources.probe_roms = x86_init_noop;
130 x86_init.mpparse.get_smp_config = x86_init_uint_noop; 131 x86_init.mpparse.get_smp_config = x86_init_uint_noop;
131 x86_init.mpparse.find_smp_config = sdv_find_smp_config; 132 x86_init.mpparse.find_smp_config = sdv_find_smp_config;
133 x86_init.pci.init = ce4100_pci_init;
132} 134}
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
index dab874647530..044bda5b3174 100644
--- a/arch/x86/platform/olpc/olpc_dt.c
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size)
140 * wasted bootmem) and hand off chunks of it to callers. 140 * wasted bootmem) and hand off chunks of it to callers.
141 */ 141 */
142 res = alloc_bootmem(chunk_size); 142 res = alloc_bootmem(chunk_size);
143 if (!res) 143 BUG_ON(!res);
144 return NULL;
145 prom_early_allocated += chunk_size; 144 prom_early_allocated += chunk_size;
146 memset(res, 0, chunk_size); 145 memset(res, 0, chunk_size);
147 free_mem = chunk_size; 146 free_mem = chunk_size;
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index df58e9cad96a..a7b38d35c29a 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode)
1364 memset(bd2, 0, sizeof(struct bau_desc)); 1364 memset(bd2, 0, sizeof(struct bau_desc));
1365 bd2->header.sw_ack_flag = 1; 1365 bd2->header.sw_ack_flag = 1;
1366 /* 1366 /*
1367 * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub 1367 * base_dest_nodeid is the nasid of the first uvhub
1368 * in the partition. The bit map will indicate uvhub numbers, 1368 * in the partition. The bit map will indicate uvhub numbers,
1369 * which are 0-N in a partition. Pnodes are unique system-wide. 1369 * which are 0-N in a partition. Pnodes are unique system-wide.
1370 */ 1370 */
1371 bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; 1371 bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
1372 bd2->header.dest_subnodeid = 0x10; /* the LB */ 1372 bd2->header.dest_subnodeid = 0x10; /* the LB */
1373 bd2->header.command = UV_NET_ENDPOINT_INTD; 1373 bd2->header.command = UV_NET_ENDPOINT_INTD;
1374 bd2->header.int_both = 1; 1374 bd2->header.int_both = 1;
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 13783a18c6f1..77f3228cfc8d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -986,10 +986,9 @@ static void xen_pgd_pin(struct mm_struct *mm)
986 */ 986 */
987void xen_mm_pin_all(void) 987void xen_mm_pin_all(void)
988{ 988{
989 unsigned long flags;
990 struct page *page; 989 struct page *page;
991 990
992 spin_lock_irqsave(&pgd_lock, flags); 991 spin_lock(&pgd_lock);
993 992
994 list_for_each_entry(page, &pgd_list, lru) { 993 list_for_each_entry(page, &pgd_list, lru) {
995 if (!PagePinned(page)) { 994 if (!PagePinned(page)) {
@@ -998,7 +997,7 @@ void xen_mm_pin_all(void)
998 } 997 }
999 } 998 }
1000 999
1001 spin_unlock_irqrestore(&pgd_lock, flags); 1000 spin_unlock(&pgd_lock);
1002} 1001}
1003 1002
1004/* 1003/*
@@ -1099,10 +1098,9 @@ static void xen_pgd_unpin(struct mm_struct *mm)
1099 */ 1098 */
1100void xen_mm_unpin_all(void) 1099void xen_mm_unpin_all(void)
1101{ 1100{
1102 unsigned long flags;
1103 struct page *page; 1101 struct page *page;
1104 1102
1105 spin_lock_irqsave(&pgd_lock, flags); 1103 spin_lock(&pgd_lock);
1106 1104
1107 list_for_each_entry(page, &pgd_list, lru) { 1105 list_for_each_entry(page, &pgd_list, lru) {
1108 if (PageSavePinned(page)) { 1106 if (PageSavePinned(page)) {
@@ -1112,7 +1110,7 @@ void xen_mm_unpin_all(void)
1112 } 1110 }
1113 } 1111 }
1114 1112
1115 spin_unlock_irqrestore(&pgd_lock, flags); 1113 spin_unlock(&pgd_lock);
1116} 1114}
1117 1115
1118void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) 1116void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next)
diff --git a/block/blk-core.c b/block/blk-core.c
index 2f4002f79a24..518dd423a5fe 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q)
352 WARN_ON(!irqs_disabled()); 352 WARN_ON(!irqs_disabled());
353 353
354 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 354 queue_flag_clear(QUEUE_FLAG_STOPPED, q);
355 __blk_run_queue(q); 355 __blk_run_queue(q, false);
356} 356}
357EXPORT_SYMBOL(blk_start_queue); 357EXPORT_SYMBOL(blk_start_queue);
358 358
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue);
403/** 403/**
404 * __blk_run_queue - run a single device queue 404 * __blk_run_queue - run a single device queue
405 * @q: The queue to run 405 * @q: The queue to run
406 * @force_kblockd: Don't run @q->request_fn directly. Use kblockd.
406 * 407 *
407 * Description: 408 * Description:
408 * See @blk_run_queue. This variant must be called with the queue lock 409 * See @blk_run_queue. This variant must be called with the queue lock
409 * held and interrupts disabled. 410 * held and interrupts disabled.
410 * 411 *
411 */ 412 */
412void __blk_run_queue(struct request_queue *q) 413void __blk_run_queue(struct request_queue *q, bool force_kblockd)
413{ 414{
414 blk_remove_plug(q); 415 blk_remove_plug(q);
415 416
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q)
423 * Only recurse once to avoid overrunning the stack, let the unplug 424 * Only recurse once to avoid overrunning the stack, let the unplug
424 * handling reinvoke the handler shortly if we already got there. 425 * handling reinvoke the handler shortly if we already got there.
425 */ 426 */
426 if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 427 if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) {
427 q->request_fn(q); 428 q->request_fn(q);
428 queue_flag_clear(QUEUE_FLAG_REENTER, q); 429 queue_flag_clear(QUEUE_FLAG_REENTER, q);
429 } else { 430 } else {
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q)
446 unsigned long flags; 447 unsigned long flags;
447 448
448 spin_lock_irqsave(q->queue_lock, flags); 449 spin_lock_irqsave(q->queue_lock, flags);
449 __blk_run_queue(q); 450 __blk_run_queue(q, false);
450 spin_unlock_irqrestore(q->queue_lock, flags); 451 spin_unlock_irqrestore(q->queue_lock, flags);
451} 452}
452EXPORT_SYMBOL(blk_run_queue); 453EXPORT_SYMBOL(blk_run_queue);
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq,
1053 1054
1054 drive_stat_acct(rq, 1); 1055 drive_stat_acct(rq, 1);
1055 __elv_add_request(q, rq, where, 0); 1056 __elv_add_request(q, rq, where, 0);
1056 __blk_run_queue(q); 1057 __blk_run_queue(q, false);
1057 spin_unlock_irqrestore(q->queue_lock, flags); 1058 spin_unlock_irqrestore(q->queue_lock, flags);
1058} 1059}
1059EXPORT_SYMBOL(blk_insert_request); 1060EXPORT_SYMBOL(blk_insert_request);
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
2610} 2611}
2611EXPORT_SYMBOL(kblockd_schedule_work); 2612EXPORT_SYMBOL(kblockd_schedule_work);
2612 2613
2613int kblockd_schedule_delayed_work(struct request_queue *q,
2614 struct delayed_work *dwork, unsigned long delay)
2615{
2616 return queue_delayed_work(kblockd_workqueue, dwork, delay);
2617}
2618EXPORT_SYMBOL(kblockd_schedule_delayed_work);
2619
2620int __init blk_dev_init(void) 2614int __init blk_dev_init(void)
2621{ 2615{
2622 BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2616 BUILD_BUG_ON(__REQ_NR_BITS > 8 *
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 54b123d6563e..b27d0208611b 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q,
66 66
67 /* 67 /*
68 * Moving a request silently to empty queue_head may stall the 68 * Moving a request silently to empty queue_head may stall the
69 * queue. Kick the queue in those cases. 69 * queue. Kick the queue in those cases. This function is called
70 * from request completion path and calling directly into
71 * request_fn may confuse the driver. Always use kblockd.
70 */ 72 */
71 if (was_empty && next_rq) 73 if (was_empty && next_rq)
72 __blk_run_queue(q); 74 __blk_run_queue(q, true);
73} 75}
74 76
75static void pre_flush_end_io(struct request *rq, int error) 77static void pre_flush_end_io(struct request *rq, int error)
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q)
130 BUG(); 132 BUG();
131 } 133 }
132 134
133 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 135 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
134 return rq; 136 return rq;
135} 137}
136 138
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 1a320d2406b0..bd3e8df4d5e2 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -109,7 +109,6 @@ struct bio_batch
109 atomic_t done; 109 atomic_t done;
110 unsigned long flags; 110 unsigned long flags;
111 struct completion *wait; 111 struct completion *wait;
112 bio_end_io_t *end_io;
113}; 112};
114 113
115static void bio_batch_end_io(struct bio *bio, int err) 114static void bio_batch_end_io(struct bio *bio, int err)
@@ -122,17 +121,14 @@ static void bio_batch_end_io(struct bio *bio, int err)
122 else 121 else
123 clear_bit(BIO_UPTODATE, &bb->flags); 122 clear_bit(BIO_UPTODATE, &bb->flags);
124 } 123 }
125 if (bb) { 124 if (bb)
126 if (bb->end_io) 125 if (atomic_dec_and_test(&bb->done))
127 bb->end_io(bio, err); 126 complete(bb->wait);
128 atomic_inc(&bb->done);
129 complete(bb->wait);
130 }
131 bio_put(bio); 127 bio_put(bio);
132} 128}
133 129
134/** 130/**
135 * blkdev_issue_zeroout generate number of zero filed write bios 131 * blkdev_issue_zeroout - generate number of zero filed write bios
136 * @bdev: blockdev to issue 132 * @bdev: blockdev to issue
137 * @sector: start sector 133 * @sector: start sector
138 * @nr_sects: number of sectors to write 134 * @nr_sects: number of sectors to write
@@ -150,13 +146,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
150 int ret; 146 int ret;
151 struct bio *bio; 147 struct bio *bio;
152 struct bio_batch bb; 148 struct bio_batch bb;
153 unsigned int sz, issued = 0; 149 unsigned int sz;
154 DECLARE_COMPLETION_ONSTACK(wait); 150 DECLARE_COMPLETION_ONSTACK(wait);
155 151
156 atomic_set(&bb.done, 0); 152 atomic_set(&bb.done, 1);
157 bb.flags = 1 << BIO_UPTODATE; 153 bb.flags = 1 << BIO_UPTODATE;
158 bb.wait = &wait; 154 bb.wait = &wait;
159 bb.end_io = NULL;
160 155
161submit: 156submit:
162 ret = 0; 157 ret = 0;
@@ -185,12 +180,12 @@ submit:
185 break; 180 break;
186 } 181 }
187 ret = 0; 182 ret = 0;
188 issued++; 183 atomic_inc(&bb.done);
189 submit_bio(WRITE, bio); 184 submit_bio(WRITE, bio);
190 } 185 }
191 186
192 /* Wait for bios in-flight */ 187 /* Wait for bios in-flight */
193 while (issued != atomic_read(&bb.done)) 188 if (!atomic_dec_and_test(&bb.done))
194 wait_for_completion(&wait); 189 wait_for_completion(&wait);
195 190
196 if (!test_bit(BIO_UPTODATE, &bb.flags)) 191 if (!test_bit(BIO_UPTODATE, &bb.flags))
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index a89043a3caa4..e36cc10a346c 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -20,6 +20,11 @@ static int throtl_quantum = 32;
20/* Throttling is performed over 100ms slice and after that slice is renewed */ 20/* Throttling is performed over 100ms slice and after that slice is renewed */
21static unsigned long throtl_slice = HZ/10; /* 100 ms */ 21static unsigned long throtl_slice = HZ/10; /* 100 ms */
22 22
23/* A workqueue to queue throttle related work */
24static struct workqueue_struct *kthrotld_workqueue;
25static void throtl_schedule_delayed_work(struct throtl_data *td,
26 unsigned long delay);
27
23struct throtl_rb_root { 28struct throtl_rb_root {
24 struct rb_root rb; 29 struct rb_root rb;
25 struct rb_node *left; 30 struct rb_node *left;
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td)
345 update_min_dispatch_time(st); 350 update_min_dispatch_time(st);
346 351
347 if (time_before_eq(st->min_disptime, jiffies)) 352 if (time_before_eq(st->min_disptime, jiffies))
348 throtl_schedule_delayed_work(td->queue, 0); 353 throtl_schedule_delayed_work(td, 0);
349 else 354 else
350 throtl_schedule_delayed_work(td->queue, 355 throtl_schedule_delayed_work(td, (st->min_disptime - jiffies));
351 (st->min_disptime - jiffies));
352} 356}
353 357
354static inline void 358static inline void
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work)
815} 819}
816 820
817/* Call with queue lock held */ 821/* Call with queue lock held */
818void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) 822static void
823throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay)
819{ 824{
820 825
821 struct throtl_data *td = q->td;
822 struct delayed_work *dwork = &td->throtl_work; 826 struct delayed_work *dwork = &td->throtl_work;
823 827
824 if (total_nr_queued(td) > 0) { 828 if (total_nr_queued(td) > 0) {
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay)
827 * Cancel that and schedule a new one. 831 * Cancel that and schedule a new one.
828 */ 832 */
829 __cancel_delayed_work(dwork); 833 __cancel_delayed_work(dwork);
830 kblockd_schedule_delayed_work(q, dwork, delay); 834 queue_delayed_work(kthrotld_workqueue, dwork, delay);
831 throtl_log(td, "schedule work. delay=%lu jiffies=%lu", 835 throtl_log(td, "schedule work. delay=%lu jiffies=%lu",
832 delay, jiffies); 836 delay, jiffies);
833 } 837 }
834} 838}
835EXPORT_SYMBOL(throtl_schedule_delayed_work);
836 839
837static void 840static void
838throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) 841throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg)
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key,
920 smp_mb__after_atomic_inc(); 923 smp_mb__after_atomic_inc();
921 924
922 /* Schedule a work now to process the limit change */ 925 /* Schedule a work now to process the limit change */
923 throtl_schedule_delayed_work(td->queue, 0); 926 throtl_schedule_delayed_work(td, 0);
924} 927}
925 928
926static void throtl_update_blkio_group_write_bps(void *key, 929static void throtl_update_blkio_group_write_bps(void *key,
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key,
934 smp_mb__before_atomic_inc(); 937 smp_mb__before_atomic_inc();
935 atomic_inc(&td->limits_changed); 938 atomic_inc(&td->limits_changed);
936 smp_mb__after_atomic_inc(); 939 smp_mb__after_atomic_inc();
937 throtl_schedule_delayed_work(td->queue, 0); 940 throtl_schedule_delayed_work(td, 0);
938} 941}
939 942
940static void throtl_update_blkio_group_read_iops(void *key, 943static void throtl_update_blkio_group_read_iops(void *key,
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key,
948 smp_mb__before_atomic_inc(); 951 smp_mb__before_atomic_inc();
949 atomic_inc(&td->limits_changed); 952 atomic_inc(&td->limits_changed);
950 smp_mb__after_atomic_inc(); 953 smp_mb__after_atomic_inc();
951 throtl_schedule_delayed_work(td->queue, 0); 954 throtl_schedule_delayed_work(td, 0);
952} 955}
953 956
954static void throtl_update_blkio_group_write_iops(void *key, 957static void throtl_update_blkio_group_write_iops(void *key,
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key,
962 smp_mb__before_atomic_inc(); 965 smp_mb__before_atomic_inc();
963 atomic_inc(&td->limits_changed); 966 atomic_inc(&td->limits_changed);
964 smp_mb__after_atomic_inc(); 967 smp_mb__after_atomic_inc();
965 throtl_schedule_delayed_work(td->queue, 0); 968 throtl_schedule_delayed_work(td, 0);
966} 969}
967 970
968void throtl_shutdown_timer_wq(struct request_queue *q) 971void throtl_shutdown_timer_wq(struct request_queue *q)
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q)
1135 1138
1136static int __init throtl_init(void) 1139static int __init throtl_init(void)
1137{ 1140{
1141 kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
1142 if (!kthrotld_workqueue)
1143 panic("Failed to create kthrotld\n");
1144
1138 blkio_policy_register(&blkio_policy_throtl); 1145 blkio_policy_register(&blkio_policy_throtl);
1139 return 0; 1146 return 0;
1140} 1147}
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7be4c7959625..ea83a4f0c27d 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3355 cfqd->busy_queues > 1) { 3355 cfqd->busy_queues > 1) {
3356 cfq_del_timer(cfqd, cfqq); 3356 cfq_del_timer(cfqd, cfqq);
3357 cfq_clear_cfqq_wait_request(cfqq); 3357 cfq_clear_cfqq_wait_request(cfqq);
3358 __blk_run_queue(cfqd->queue); 3358 __blk_run_queue(cfqd->queue, false);
3359 } else { 3359 } else {
3360 cfq_blkiocg_update_idle_time_stats( 3360 cfq_blkiocg_update_idle_time_stats(
3361 &cfqq->cfqg->blkg); 3361 &cfqq->cfqg->blkg);
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3370 * this new queue is RT and the current one is BE 3370 * this new queue is RT and the current one is BE
3371 */ 3371 */
3372 cfq_preempt_queue(cfqd, cfqq); 3372 cfq_preempt_queue(cfqd, cfqq);
3373 __blk_run_queue(cfqd->queue); 3373 __blk_run_queue(cfqd->queue, false);
3374 } 3374 }
3375} 3375}
3376 3376
@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work)
3731 struct request_queue *q = cfqd->queue; 3731 struct request_queue *q = cfqd->queue;
3732 3732
3733 spin_lock_irq(q->queue_lock); 3733 spin_lock_irq(q->queue_lock);
3734 __blk_run_queue(cfqd->queue); 3734 __blk_run_queue(cfqd->queue, false);
3735 spin_unlock_irq(q->queue_lock); 3735 spin_unlock_irq(q->queue_lock);
3736} 3736}
3737 3737
diff --git a/block/elevator.c b/block/elevator.c
index 2569512830d3..236e93c1f46c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q)
602 */ 602 */
603 elv_drain_elevator(q); 603 elv_drain_elevator(q);
604 while (q->rq.elvpriv) { 604 while (q->rq.elvpriv) {
605 __blk_run_queue(q); 605 __blk_run_queue(q, false);
606 spin_unlock_irq(q->queue_lock); 606 spin_unlock_irq(q->queue_lock);
607 msleep(10); 607 msleep(10);
608 spin_lock_irq(q->queue_lock); 608 spin_lock_irq(q->queue_lock);
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
651 * with anything. There's no point in delaying queue 651 * with anything. There's no point in delaying queue
652 * processing. 652 * processing.
653 */ 653 */
654 __blk_run_queue(q); 654 __blk_run_queue(q, false);
655 break; 655 break;
656 656
657 case ELEVATOR_INSERT_SORT: 657 case ELEVATOR_INSERT_SORT:
diff --git a/block/genhd.c b/block/genhd.c
index 6a5b772aa201..cbf1112a885c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
1355 struct block_device *bdev = bdget_disk(disk, partno); 1355 struct block_device *bdev = bdget_disk(disk, partno);
1356 if (bdev) { 1356 if (bdev) {
1357 fsync_bdev(bdev); 1357 fsync_bdev(bdev);
1358 res = __invalidate_device(bdev); 1358 res = __invalidate_device(bdev, true);
1359 bdput(bdev); 1359 bdput(bdev);
1360 } 1360 }
1361 return res; 1361 return res;
diff --git a/block/ioctl.c b/block/ioctl.c
index 9049d460fa89..1124cd297263 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
294 return -EINVAL; 294 return -EINVAL;
295 if (get_user(n, (int __user *) arg)) 295 if (get_user(n, (int __user *) arg))
296 return -EFAULT; 296 return -EFAULT;
297 if (!(mode & FMODE_EXCL) && 297 if (!(mode & FMODE_EXCL)) {
298 blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) 298 bdgrab(bdev);
299 return -EBUSY; 299 if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
300 return -EBUSY;
301 }
300 ret = set_blocksize(bdev, n); 302 ret = set_blocksize(bdev, n);
301 if (!(mode & FMODE_EXCL)) 303 if (!(mode & FMODE_EXCL))
302 blkdev_put(bdev, mode | FMODE_EXCL); 304 blkdev_put(bdev, mode | FMODE_EXCL);
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 54784bb42cec..edc25867ad9d 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info {
416 u8 originally_enabled; /* True if GPE was originally enabled */ 416 u8 originally_enabled; /* True if GPE was originally enabled */
417}; 417};
418 418
419struct acpi_gpe_notify_object {
420 struct acpi_namespace_node *node;
421 struct acpi_gpe_notify_object *next;
422};
423
419union acpi_gpe_dispatch_info { 424union acpi_gpe_dispatch_info {
420 struct acpi_namespace_node *method_node; /* Method node for this GPE level */ 425 struct acpi_namespace_node *method_node; /* Method node for this GPE level */
421 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ 426 struct acpi_gpe_handler_info *handler; /* Installed GPE handler */
422 struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ 427 struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */
423}; 428};
424 429
425/* 430/*
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 14988a86066f..f4725212eb48 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
457 acpi_status status; 457 acpi_status status;
458 struct acpi_gpe_event_info *local_gpe_event_info; 458 struct acpi_gpe_event_info *local_gpe_event_info;
459 struct acpi_evaluate_info *info; 459 struct acpi_evaluate_info *info;
460 struct acpi_gpe_notify_object *notify_object;
460 461
461 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); 462 ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
462 463
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
508 * from this thread -- because handlers may in turn run other 509 * from this thread -- because handlers may in turn run other
509 * control methods. 510 * control methods.
510 */ 511 */
511 status = 512 status = acpi_ev_queue_notify_request(
512 acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. 513 local_gpe_event_info->dispatch.device.node,
513 device_node, 514 ACPI_NOTIFY_DEVICE_WAKE);
514 ACPI_NOTIFY_DEVICE_WAKE); 515
516 notify_object = local_gpe_event_info->dispatch.device.next;
517 while (ACPI_SUCCESS(status) && notify_object) {
518 status = acpi_ev_queue_notify_request(
519 notify_object->node,
520 ACPI_NOTIFY_DEVICE_WAKE);
521 notify_object = notify_object->next;
522 }
523
515 break; 524 break;
516 525
517 case ACPI_GPE_DISPATCH_METHOD: 526 case ACPI_GPE_DISPATCH_METHOD:
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index e9562a7cb2f9..52aaff3df562 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
198 acpi_status status = AE_BAD_PARAMETER; 198 acpi_status status = AE_BAD_PARAMETER;
199 struct acpi_gpe_event_info *gpe_event_info; 199 struct acpi_gpe_event_info *gpe_event_info;
200 struct acpi_namespace_node *device_node; 200 struct acpi_namespace_node *device_node;
201 struct acpi_gpe_notify_object *notify_object;
201 acpi_cpu_flags flags; 202 acpi_cpu_flags flags;
203 u8 gpe_dispatch_mask;
202 204
203 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); 205 ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake);
204 206
@@ -212,37 +214,62 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device,
212 return_ACPI_STATUS(AE_BAD_PARAMETER); 214 return_ACPI_STATUS(AE_BAD_PARAMETER);
213 } 215 }
214 216
217 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
218
219 /* Ensure that we have a valid GPE number */
220
221 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
222 if (!gpe_event_info) {
223 goto unlock_and_exit;
224 }
225
226 if (wake_device == ACPI_ROOT_OBJECT) {
227 goto out;
228 }
229
230 /*
231 * If there is no method or handler for this GPE, then the
232 * wake_device will be notified whenever this GPE fires (aka
233 * "implicit notify") Note: The GPE is assumed to be
234 * level-triggered (for windows compatibility).
235 */
236 gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK;
237 if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE
238 && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) {
239 goto out;
240 }
241
215 /* Validate wake_device is of type Device */ 242 /* Validate wake_device is of type Device */
216 243
217 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); 244 device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device);
218 if (device_node->type != ACPI_TYPE_DEVICE) { 245 if (device_node->type != ACPI_TYPE_DEVICE) {
219 return_ACPI_STATUS(AE_BAD_PARAMETER); 246 goto unlock_and_exit;
220 } 247 }
221 248
222 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 249 if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) {
250 gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY |
251 ACPI_GPE_LEVEL_TRIGGERED);
252 gpe_event_info->dispatch.device.node = device_node;
253 gpe_event_info->dispatch.device.next = NULL;
254 } else {
255 /* There are multiple devices to notify implicitly. */
223 256
224 /* Ensure that we have a valid GPE number */ 257 notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object));
225 258 if (!notify_object) {
226 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 259 status = AE_NO_MEMORY;
227 if (gpe_event_info) { 260 goto unlock_and_exit;
228 /*
229 * If there is no method or handler for this GPE, then the
230 * wake_device will be notified whenever this GPE fires (aka
231 * "implicit notify") Note: The GPE is assumed to be
232 * level-triggered (for windows compatibility).
233 */
234 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
235 ACPI_GPE_DISPATCH_NONE) {
236 gpe_event_info->flags =
237 (ACPI_GPE_DISPATCH_NOTIFY |
238 ACPI_GPE_LEVEL_TRIGGERED);
239 gpe_event_info->dispatch.device_node = device_node;
240 } 261 }
241 262
242 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; 263 notify_object->node = device_node;
243 status = AE_OK; 264 notify_object->next = gpe_event_info->dispatch.device.next;
265 gpe_event_info->dispatch.device.next = notify_object;
244 } 266 }
245 267
268 out:
269 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
270 status = AE_OK;
271
272 unlock_and_exit:
246 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 273 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
247 return_ACPI_STATUS(status); 274 return_ACPI_STATUS(status);
248} 275}
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c
index 5df67f1d6c61..384f7abcff77 100644
--- a/drivers/acpi/debugfs.c
+++ b/drivers/acpi/debugfs.c
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
26 size_t count, loff_t *ppos) 26 size_t count, loff_t *ppos)
27{ 27{
28 static char *buf; 28 static char *buf;
29 static int uncopied_bytes; 29 static u32 max_size;
30 static u32 uncopied_bytes;
31
30 struct acpi_table_header table; 32 struct acpi_table_header table;
31 acpi_status status; 33 acpi_status status;
32 34
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
37 if (copy_from_user(&table, user_buf, 39 if (copy_from_user(&table, user_buf,
38 sizeof(struct acpi_table_header))) 40 sizeof(struct acpi_table_header)))
39 return -EFAULT; 41 return -EFAULT;
40 uncopied_bytes = table.length; 42 uncopied_bytes = max_size = table.length;
41 buf = kzalloc(uncopied_bytes, GFP_KERNEL); 43 buf = kzalloc(max_size, GFP_KERNEL);
42 if (!buf) 44 if (!buf)
43 return -ENOMEM; 45 return -ENOMEM;
44 } 46 }
45 47
46 if (uncopied_bytes < count) { 48 if (buf == NULL)
47 kfree(buf); 49 return -EINVAL;
50
51 if ((*ppos > max_size) ||
52 (*ppos + count > max_size) ||
53 (*ppos + count < count) ||
54 (count > uncopied_bytes))
48 return -EINVAL; 55 return -EINVAL;
49 }
50 56
51 if (copy_from_user(buf + (*ppos), user_buf, count)) { 57 if (copy_from_user(buf + (*ppos), user_buf, count)) {
52 kfree(buf); 58 kfree(buf);
59 buf = NULL;
53 return -EFAULT; 60 return -EFAULT;
54 } 61 }
55 62
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
59 if (!uncopied_bytes) { 66 if (!uncopied_bytes) {
60 status = acpi_install_method(buf); 67 status = acpi_install_method(buf);
61 kfree(buf); 68 kfree(buf);
69 buf = NULL;
62 if (ACPI_FAILURE(status)) 70 if (ACPI_FAILURE(status))
63 return -EINVAL; 71 return -EINVAL;
64 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); 72 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index b0931818cf98..c90c76aa7f8b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -636,17 +636,21 @@ EXPORT_SYMBOL(acpi_os_write_port);
636acpi_status 636acpi_status
637acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) 637acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
638{ 638{
639 u32 dummy;
640 void __iomem *virt_addr; 639 void __iomem *virt_addr;
641 int size = width / 8, unmap = 0; 640 unsigned int size = width / 8;
641 bool unmap = false;
642 u32 dummy;
642 643
643 rcu_read_lock(); 644 rcu_read_lock();
644 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 645 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
645 rcu_read_unlock();
646 if (!virt_addr) { 646 if (!virt_addr) {
647 rcu_read_unlock();
647 virt_addr = acpi_os_ioremap(phys_addr, size); 648 virt_addr = acpi_os_ioremap(phys_addr, size);
648 unmap = 1; 649 if (!virt_addr)
650 return AE_BAD_ADDRESS;
651 unmap = true;
649 } 652 }
653
650 if (!value) 654 if (!value)
651 value = &dummy; 655 value = &dummy;
652 656
@@ -666,6 +670,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
666 670
667 if (unmap) 671 if (unmap)
668 iounmap(virt_addr); 672 iounmap(virt_addr);
673 else
674 rcu_read_unlock();
669 675
670 return AE_OK; 676 return AE_OK;
671} 677}
@@ -674,14 +680,17 @@ acpi_status
674acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) 680acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
675{ 681{
676 void __iomem *virt_addr; 682 void __iomem *virt_addr;
677 int size = width / 8, unmap = 0; 683 unsigned int size = width / 8;
684 bool unmap = false;
678 685
679 rcu_read_lock(); 686 rcu_read_lock();
680 virt_addr = acpi_map_vaddr_lookup(phys_addr, size); 687 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
681 rcu_read_unlock();
682 if (!virt_addr) { 688 if (!virt_addr) {
689 rcu_read_unlock();
683 virt_addr = acpi_os_ioremap(phys_addr, size); 690 virt_addr = acpi_os_ioremap(phys_addr, size);
684 unmap = 1; 691 if (!virt_addr)
692 return AE_BAD_ADDRESS;
693 unmap = true;
685 } 694 }
686 695
687 switch (width) { 696 switch (width) {
@@ -700,6 +709,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
700 709
701 if (unmap) 710 if (unmap)
702 iounmap(virt_addr); 711 iounmap(virt_addr);
712 else
713 rcu_read_unlock();
703 714
704 return AE_OK; 715 return AE_OK;
705} 716}
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 42d3d72dae85..5af3479714f6 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -82,6 +82,11 @@ long acpi_is_video_device(struct acpi_device *device)
82 if (!device) 82 if (!device)
83 return 0; 83 return 0;
84 84
85 /* Is this device able to support video switching ? */
86 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
87 ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
88 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
89
85 /* Is this device able to retrieve a video ROM ? */ 90 /* Is this device able to retrieve a video ROM ? */
86 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy))) 91 if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_ROM", &h_dummy)))
87 video_caps |= ACPI_VIDEO_ROM_AVAILABLE; 92 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
diff --git a/drivers/acpi/wakeup.c b/drivers/acpi/wakeup.c
index ed6501452507..7bfbe40bc43b 100644
--- a/drivers/acpi/wakeup.c
+++ b/drivers/acpi/wakeup.c
@@ -86,8 +86,12 @@ int __init acpi_wakeup_device_init(void)
86 struct acpi_device *dev = container_of(node, 86 struct acpi_device *dev = container_of(node,
87 struct acpi_device, 87 struct acpi_device,
88 wakeup_list); 88 wakeup_list);
89 if (device_can_wakeup(&dev->dev)) 89 if (device_can_wakeup(&dev->dev)) {
90 /* Button GPEs are supposed to be always enabled. */
91 acpi_enable_gpe(dev->wakeup.gpe_device,
92 dev->wakeup.gpe_number);
90 device_set_wakeup_enable(&dev->dev, true); 93 device_set_wakeup_enable(&dev->dev, true);
94 }
91 } 95 }
92 mutex_unlock(&acpi_device_lock); 96 mutex_unlock(&acpi_device_lock);
93 return 0; 97 return 0;
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 73fb1c4f4cd4..25ef1a4556e6 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc)
866 } 866 }
867 867
868 skb = alloc_skb(sizeof(*header), GFP_ATOMIC); 868 skb = alloc_skb(sizeof(*header), GFP_ATOMIC);
869 if (!skb && net_ratelimit()) { 869 if (!skb) {
870 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); 870 if (net_ratelimit())
871 dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n");
871 return -ENOMEM; 872 return -ENOMEM;
872 } 873 }
873 header = (void *)skb_put(skb, sizeof(*header)); 874 header = (void *)skb_put(skb, sizeof(*header));
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04fc2b34..77fc76f8aea9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3281 struct block_device *bdev = opened_bdev[cnt]; 3281 struct block_device *bdev = opened_bdev[cnt];
3282 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) 3282 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
3283 continue; 3283 continue;
3284 __invalidate_device(bdev); 3284 __invalidate_device(bdev, true);
3285 } 3285 }
3286 mutex_unlock(&open_lock); 3286 mutex_unlock(&open_lock);
3287 } else { 3287 } else {
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 49e6a545eb63..dbf31ec9114d 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -78,7 +78,6 @@
78 78
79#include <asm/uaccess.h> 79#include <asm/uaccess.h>
80 80
81static DEFINE_MUTEX(loop_mutex);
82static LIST_HEAD(loop_devices); 81static LIST_HEAD(loop_devices);
83static DEFINE_MUTEX(loop_devices_mutex); 82static DEFINE_MUTEX(loop_devices_mutex);
84 83
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1501{ 1500{
1502 struct loop_device *lo = bdev->bd_disk->private_data; 1501 struct loop_device *lo = bdev->bd_disk->private_data;
1503 1502
1504 mutex_lock(&loop_mutex);
1505 mutex_lock(&lo->lo_ctl_mutex); 1503 mutex_lock(&lo->lo_ctl_mutex);
1506 lo->lo_refcnt++; 1504 lo->lo_refcnt++;
1507 mutex_unlock(&lo->lo_ctl_mutex); 1505 mutex_unlock(&lo->lo_ctl_mutex);
1508 mutex_unlock(&loop_mutex);
1509 1506
1510 return 0; 1507 return 0;
1511} 1508}
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1515 struct loop_device *lo = disk->private_data; 1512 struct loop_device *lo = disk->private_data;
1516 int err; 1513 int err;
1517 1514
1518 mutex_lock(&loop_mutex);
1519 mutex_lock(&lo->lo_ctl_mutex); 1515 mutex_lock(&lo->lo_ctl_mutex);
1520 1516
1521 if (--lo->lo_refcnt) 1517 if (--lo->lo_refcnt)
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1540out: 1536out:
1541 mutex_unlock(&lo->lo_ctl_mutex); 1537 mutex_unlock(&lo->lo_ctl_mutex);
1542out_unlocked: 1538out_unlocked:
1543 mutex_unlock(&loop_mutex);
1544 return 0; 1539 return 0;
1545} 1540}
1546 1541
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index a126e614601f..6dcd55a74c0a 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -39,6 +39,11 @@ static struct usb_device_id ath3k_table[] = {
39 /* Atheros AR3011 with sflash firmware*/ 39 /* Atheros AR3011 with sflash firmware*/
40 { USB_DEVICE(0x0CF3, 0x3002) }, 40 { USB_DEVICE(0x0CF3, 0x3002) },
41 41
42 /* Atheros AR9285 Malbec with sflash firmware */
43 { USB_DEVICE(0x03F0, 0x311D) },
44
45 /* Atheros AR5BBU12 with sflash firmware */
46 { USB_DEVICE(0x0489, 0xE02C) },
42 { } /* Terminating entry */ 47 { } /* Terminating entry */
43}; 48};
44 49
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 1da773f899a2..700a3840fddc 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -102,6 +102,12 @@ static struct usb_device_id blacklist_table[] = {
102 /* Atheros 3011 with sflash firmware */ 102 /* Atheros 3011 with sflash firmware */
103 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, 103 { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE },
104 104
105 /* Atheros AR9285 Malbec with sflash firmware */
106 { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE },
107
108 /* Atheros AR5BBU12 with sflash firmware */
109 { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE },
110
105 /* Broadcom BCM2035 */ 111 /* Broadcom BCM2035 */
106 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, 112 { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU },
107 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, 113 { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU },
@@ -826,7 +832,7 @@ static void btusb_work(struct work_struct *work)
826 832
827 if (hdev->conn_hash.sco_num > 0) { 833 if (hdev->conn_hash.sco_num > 0) {
828 if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { 834 if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) {
829 err = usb_autopm_get_interface(data->isoc); 835 err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf);
830 if (err < 0) { 836 if (err < 0) {
831 clear_bit(BTUSB_ISOC_RUNNING, &data->flags); 837 clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
832 usb_kill_anchored_urbs(&data->isoc_anchor); 838 usb_kill_anchored_urbs(&data->isoc_anchor);
@@ -855,7 +861,7 @@ static void btusb_work(struct work_struct *work)
855 861
856 __set_isoc_interface(hdev, 0); 862 __set_isoc_interface(hdev, 0);
857 if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) 863 if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags))
858 usb_autopm_put_interface(data->isoc); 864 usb_autopm_put_interface(data->isoc ? data->isoc : data->intf);
859 } 865 }
860} 866}
861 867
@@ -1038,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf,
1038 1044
1039 usb_set_intfdata(intf, data); 1045 usb_set_intfdata(intf, data);
1040 1046
1041 usb_enable_autosuspend(interface_to_usbdev(intf));
1042
1043 return 0; 1047 return 0;
1044} 1048}
1045 1049
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 9252e85706ef..780498d76581 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void)
773#else 773#else
774 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); 774 printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n");
775#endif 775#endif
776 pci_unregister_driver(&agp_amd64_pci_driver);
776 return -ENODEV; 777 return -ENODEV;
777 } 778 }
778 779
779 /* First check that we have at least one AMD64 NB */ 780 /* First check that we have at least one AMD64 NB */
780 if (!pci_dev_present(amd_nb_misc_ids)) 781 if (!pci_dev_present(amd_nb_misc_ids)) {
782 pci_unregister_driver(&agp_amd64_pci_driver);
781 return -ENODEV; 783 return -ENODEV;
784 }
782 785
783 /* Look for any AGP bridge */ 786 /* Look for any AGP bridge */
784 agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; 787 agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table;
785 err = driver_attach(&agp_amd64_pci_driver.driver); 788 err = driver_attach(&agp_amd64_pci_driver.driver);
786 if (err == 0 && agp_bridges_found == 0) 789 if (err == 0 && agp_bridges_found == 0) {
790 pci_unregister_driver(&agp_amd64_pci_driver);
787 err = -ENODEV; 791 err = -ENODEV;
792 }
788 } 793 }
789 return err; 794 return err;
790} 795}
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfeade11..5feebe2800e9 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
130#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 130#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
131 131
132#define I915_IFPADDR 0x60 132#define I915_IFPADDR 0x60
133#define I830_HIC 0x70
133 134
134/* Intel 965G registers */ 135/* Intel 965G registers */
135#define I965_MSAC 0x62 136#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d3265adb..0d09b537bb9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/agp_backend.h> 23#include <linux/agp_backend.h>
24#include <linux/delay.h>
24#include <asm/smp.h> 25#include <asm/smp.h>
25#include "agp.h" 26#include "agp.h"
26#include "intel-agp.h" 27#include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
70 u32 __iomem *gtt; /* I915G */ 71 u32 __iomem *gtt; /* I915G */
71 bool clear_fake_agp; /* on first access via agp, fill with scratch */ 72 bool clear_fake_agp; /* on first access via agp, fill with scratch */
72 int num_dcache_entries; 73 int num_dcache_entries;
73 union { 74 void __iomem *i9xx_flush_page;
74 void __iomem *i9xx_flush_page;
75 void *i8xx_flush_page;
76 };
77 char *i81x_gtt_table; 75 char *i81x_gtt_table;
78 struct page *i8xx_page;
79 struct resource ifp_resource; 76 struct resource ifp_resource;
80 int resource_valid; 77 int resource_valid;
81 struct page *scratch_page; 78 struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
722 719
723static void i830_cleanup(void) 720static void i830_cleanup(void)
724{ 721{
725 if (intel_private.i8xx_flush_page) {
726 kunmap(intel_private.i8xx_flush_page);
727 intel_private.i8xx_flush_page = NULL;
728 }
729
730 __free_page(intel_private.i8xx_page);
731 intel_private.i8xx_page = NULL;
732}
733
734static void intel_i830_setup_flush(void)
735{
736 /* return if we've already set the flush mechanism up */
737 if (intel_private.i8xx_page)
738 return;
739
740 intel_private.i8xx_page = alloc_page(GFP_KERNEL);
741 if (!intel_private.i8xx_page)
742 return;
743
744 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
745 if (!intel_private.i8xx_flush_page)
746 i830_cleanup();
747} 722}
748 723
749/* The chipset_flush interface needs to get data that has already been 724/* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
758 */ 733 */
759static void i830_chipset_flush(void) 734static void i830_chipset_flush(void)
760{ 735{
761 unsigned int *pg = intel_private.i8xx_flush_page; 736 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
737
738 /* Forcibly evict everything from the CPU write buffers.
739 * clflush appears to be insufficient.
740 */
741 wbinvd_on_all_cpus();
742
743 /* Now we've only seen documents for this magic bit on 855GM,
744 * we hope it exists for the other gen2 chipsets...
745 *
746 * Also works as advertised on my 845G.
747 */
748 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
749 intel_private.registers+I830_HIC);
762 750
763 memset(pg, 0, 1024); 751 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
752 if (time_after(jiffies, timeout))
753 break;
764 754
765 if (cpu_has_clflush) 755 udelay(50);
766 clflush_cache_range(pg, 1024); 756 }
767 else if (wbinvd_on_all_cpus() != 0)
768 printk(KERN_ERR "Timed out waiting for cache flush.\n");
769} 757}
770 758
771static void i830_write_entry(dma_addr_t addr, unsigned int entry, 759static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
849 837
850 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; 838 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
851 839
852 intel_i830_setup_flush();
853
854 return 0; 840 return 0;
855} 841}
856 842
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 7855f9f45b8e..62787e30d508 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -900,6 +900,14 @@ static void sender(void *send_info,
900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); 900 printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
901#endif 901#endif
902 902
903 /*
904 * last_timeout_jiffies is updated here to avoid
905 * smi_timeout() handler passing very large time_diff
906 * value to smi_event_handler() that causes
907 * the send command to abort.
908 */
909 smi_info->last_timeout_jiffies = jiffies;
910
903 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); 911 mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
904 912
905 if (smi_info->thread) 913 if (smi_info->thread)
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 777181a2e603..bcbbc71febb7 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p)
830 test_bit(IS_ANY_T1, &dev->flags))) { 830 test_bit(IS_ANY_T1, &dev->flags))) {
831 DEBUGP(4, dev, "Perform AUTOPPS\n"); 831 DEBUGP(4, dev, "Perform AUTOPPS\n");
832 set_bit(IS_AUTOPPS_ACT, &dev->flags); 832 set_bit(IS_AUTOPPS_ACT, &dev->flags);
833 ptsreq.protocol = ptsreq.protocol = 833 ptsreq.protocol = (0x01 << dev->proto);
834 (0x01 << dev->proto);
835 ptsreq.flags = 0x01; 834 ptsreq.flags = 0x01;
836 ptsreq.pts1 = 0x00; 835 ptsreq.pts1 = 0x00;
837 ptsreq.pts2 = 0x00; 836 ptsreq.pts2 = 0x00;
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c
index 94b8eb4d691d..444155a305ae 100644
--- a/drivers/char/pcmcia/ipwireless/main.c
+++ b/drivers/char/pcmcia/ipwireless/main.c
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data)
78static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) 78static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
79{ 79{
80 struct ipw_dev *ipw = priv_data; 80 struct ipw_dev *ipw = priv_data;
81 struct resource *io_resource;
82 int ret; 81 int ret;
83 82
84 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 83 p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
92 if (ret) 91 if (ret)
93 return ret; 92 return ret;
94 93
95 io_resource = request_region(p_dev->resource[0]->start, 94 if (!request_region(p_dev->resource[0]->start,
96 resource_size(p_dev->resource[0]), 95 resource_size(p_dev->resource[0]),
97 IPWIRELESS_PCCARD_NAME); 96 IPWIRELESS_PCCARD_NAME)) {
97 ret = -EBUSY;
98 goto exit;
99 }
98 100
99 p_dev->resource[2]->flags |= 101 p_dev->resource[2]->flags |=
100 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; 102 WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE;
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
105 107
106 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); 108 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr);
107 if (ret != 0) 109 if (ret != 0)
108 goto exit2; 110 goto exit1;
109 111
110 ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; 112 ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100;
111 113
112 ipw->attr_memory = ioremap(p_dev->resource[2]->start, 114 ipw->common_memory = ioremap(p_dev->resource[2]->start,
113 resource_size(p_dev->resource[2])); 115 resource_size(p_dev->resource[2]));
114 request_mem_region(p_dev->resource[2]->start, 116 if (!request_mem_region(p_dev->resource[2]->start,
115 resource_size(p_dev->resource[2]), 117 resource_size(p_dev->resource[2]),
116 IPWIRELESS_PCCARD_NAME); 118 IPWIRELESS_PCCARD_NAME)) {
119 ret = -EBUSY;
120 goto exit2;
121 }
117 122
118 p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | 123 p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM |
119 WIN_ENABLE; 124 WIN_ENABLE;
120 p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ 125 p_dev->resource[3]->end = 0; /* this used to be 0x1000 */
121 ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); 126 ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0);
122 if (ret != 0) 127 if (ret != 0)
123 goto exit2; 128 goto exit3;
124 129
125 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); 130 ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0);
126 if (ret != 0) 131 if (ret != 0)
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
128 133
129 ipw->attr_memory = ioremap(p_dev->resource[3]->start, 134 ipw->attr_memory = ioremap(p_dev->resource[3]->start,
130 resource_size(p_dev->resource[3])); 135 resource_size(p_dev->resource[3]));
131 request_mem_region(p_dev->resource[3]->start, 136 if (!request_mem_region(p_dev->resource[3]->start,
132 resource_size(p_dev->resource[3]), 137 resource_size(p_dev->resource[3]),
133 IPWIRELESS_PCCARD_NAME); 138 IPWIRELESS_PCCARD_NAME)) {
139 ret = -EBUSY;
140 goto exit4;
141 }
134 142
135 return 0; 143 return 0;
136 144
145exit4:
146 iounmap(ipw->attr_memory);
137exit3: 147exit3:
148 release_mem_region(p_dev->resource[2]->start,
149 resource_size(p_dev->resource[2]));
138exit2: 150exit2:
139 if (ipw->common_memory) { 151 iounmap(ipw->common_memory);
140 release_mem_region(p_dev->resource[2]->start,
141 resource_size(p_dev->resource[2]));
142 iounmap(ipw->common_memory);
143 }
144exit1: 152exit1:
145 release_resource(io_resource); 153 release_region(p_dev->resource[0]->start,
154 resource_size(p_dev->resource[0]));
155exit:
146 pcmcia_disable_device(p_dev); 156 pcmcia_disable_device(p_dev);
147 return -1; 157 return ret;
148} 158}
149 159
150static int config_ipwireless(struct ipw_dev *ipw) 160static int config_ipwireless(struct ipw_dev *ipw)
@@ -219,6 +229,8 @@ exit:
219 229
220static void release_ipwireless(struct ipw_dev *ipw) 230static void release_ipwireless(struct ipw_dev *ipw)
221{ 231{
232 release_region(ipw->link->resource[0]->start,
233 resource_size(ipw->link->resource[0]));
222 if (ipw->common_memory) { 234 if (ipw->common_memory) {
223 release_mem_region(ipw->link->resource[2]->start, 235 release_mem_region(ipw->link->resource[2]->start,
224 resource_size(ipw->link->resource[2])); 236 resource_size(ipw->link->resource[2]));
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index faf5a2c65926..1f46f1cd9225 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
364 tpm_protected_ordinal_duration[ordinal & 364 tpm_protected_ordinal_duration[ordinal &
365 TPM_PROTECTED_ORDINAL_MASK]; 365 TPM_PROTECTED_ORDINAL_MASK];
366 366
367 if (duration_idx != TPM_UNDEFINED) { 367 if (duration_idx != TPM_UNDEFINED)
368 duration = chip->vendor.duration[duration_idx]; 368 duration = chip->vendor.duration[duration_idx];
369 /* if duration is 0, it's because chip->vendor.duration wasn't */ 369 if (duration <= 0)
370 /* filled yet, so we set the lowest timeout just to give enough */
371 /* time for tpm_get_timeouts() to succeed */
372 return (duration <= 0 ? HZ : duration);
373 } else
374 return 2 * 60 * HZ; 370 return 2 * 60 * HZ;
371 else
372 return duration;
375} 373}
376EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); 374EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
377 375
@@ -577,11 +575,9 @@ duration:
577 if (rc) 575 if (rc)
578 return; 576 return;
579 577
580 if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || 578 if (be32_to_cpu(tpm_cmd.header.out.return_code)
581 be32_to_cpu(tpm_cmd.header.out.length) 579 != 3 * sizeof(u32))
582 != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32))
583 return; 580 return;
584
585 duration_cap = &tpm_cmd.params.getcap_out.cap.duration; 581 duration_cap = &tpm_cmd.params.getcap_out.cap.duration;
586 chip->vendor.duration[TPM_SHORT] = 582 chip->vendor.duration[TPM_SHORT] =
587 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); 583 usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short));
@@ -941,18 +937,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev,
941} 937}
942EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); 938EXPORT_SYMBOL_GPL(tpm_show_caps_1_2);
943 939
944ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr,
945 char *buf)
946{
947 struct tpm_chip *chip = dev_get_drvdata(dev);
948
949 return sprintf(buf, "%d %d %d\n",
950 jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]),
951 jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]),
952 jiffies_to_usecs(chip->vendor.duration[TPM_LONG]));
953}
954EXPORT_SYMBOL_GPL(tpm_show_timeouts);
955
956ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, 940ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr,
957 const char *buf, size_t count) 941 const char *buf, size_t count)
958{ 942{
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h
index d84ff772c26f..72ddb031b69a 100644
--- a/drivers/char/tpm/tpm.h
+++ b/drivers/char/tpm/tpm.h
@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr,
56 char *); 56 char *);
57extern ssize_t tpm_show_temp_deactivated(struct device *, 57extern ssize_t tpm_show_temp_deactivated(struct device *,
58 struct device_attribute *attr, char *); 58 struct device_attribute *attr, char *);
59extern ssize_t tpm_show_timeouts(struct device *,
60 struct device_attribute *attr, char *);
61 59
62struct tpm_chip; 60struct tpm_chip;
63 61
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c
index 0d1d38e5f266..dd21df55689d 100644
--- a/drivers/char/tpm/tpm_tis.c
+++ b/drivers/char/tpm/tpm_tis.c
@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
376 NULL); 376 NULL);
377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); 377static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL);
378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); 378static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel);
379static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL);
380 379
381static struct attribute *tis_attrs[] = { 380static struct attribute *tis_attrs[] = {
382 &dev_attr_pubek.attr, 381 &dev_attr_pubek.attr,
@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = {
386 &dev_attr_owned.attr, 385 &dev_attr_owned.attr,
387 &dev_attr_temp_deactivated.attr, 386 &dev_attr_temp_deactivated.attr,
388 &dev_attr_caps.attr, 387 &dev_attr_caps.attr,
389 &dev_attr_cancel.attr, 388 &dev_attr_cancel.attr, NULL,
390 &dev_attr_timeouts.attr, NULL,
391}; 389};
392 390
393static struct attribute_group tis_attr_grp = { 391static struct attribute_group tis_attr_grp = {
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 490393186338..84b164d1eb2b 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -388,6 +388,10 @@ static void discard_port_data(struct port *port)
388 unsigned int len; 388 unsigned int len;
389 int ret; 389 int ret;
390 390
391 if (!port->portdev) {
392 /* Device has been unplugged. vqs are already gone. */
393 return;
394 }
391 vq = port->in_vq; 395 vq = port->in_vq;
392 if (port->inbuf) 396 if (port->inbuf)
393 buf = port->inbuf; 397 buf = port->inbuf;
@@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port)
470 void *buf; 474 void *buf;
471 unsigned int len; 475 unsigned int len;
472 476
477 if (!port->portdev) {
478 /* Device has been unplugged. vqs are already gone. */
479 return;
480 }
473 while ((buf = virtqueue_get_buf(port->out_vq, &len))) { 481 while ((buf = virtqueue_get_buf(port->out_vq, &len))) {
474 kfree(buf); 482 kfree(buf);
475 port->outvq_full = false; 483 port->outvq_full = false;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1109f6848a43..5cb4d09919d6 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1919 1919
1920 ret = sysdev_driver_register(&cpu_sysdev_class, 1920 ret = sysdev_driver_register(&cpu_sysdev_class,
1921 &cpufreq_sysdev_driver); 1921 &cpufreq_sysdev_driver);
1922 if (ret)
1923 goto err_null_driver;
1922 1924
1923 if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { 1925 if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) {
1924 int i; 1926 int i;
1925 ret = -ENODEV; 1927 ret = -ENODEV;
1926 1928
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
1935 if (ret) { 1937 if (ret) {
1936 dprintk("no CPU initialized for driver %s\n", 1938 dprintk("no CPU initialized for driver %s\n",
1937 driver_data->name); 1939 driver_data->name);
1938 sysdev_driver_unregister(&cpu_sysdev_class, 1940 goto err_sysdev_unreg;
1939 &cpufreq_sysdev_driver);
1940
1941 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1942 cpufreq_driver = NULL;
1943 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1944 } 1941 }
1945 } 1942 }
1946 1943
1947 if (!ret) { 1944 register_hotcpu_notifier(&cpufreq_cpu_notifier);
1948 register_hotcpu_notifier(&cpufreq_cpu_notifier); 1945 dprintk("driver %s up and running\n", driver_data->name);
1949 dprintk("driver %s up and running\n", driver_data->name); 1946 cpufreq_debug_enable_ratelimit();
1950 cpufreq_debug_enable_ratelimit();
1951 }
1952 1947
1948 return 0;
1949err_sysdev_unreg:
1950 sysdev_driver_unregister(&cpu_sysdev_class,
1951 &cpufreq_sysdev_driver);
1952err_null_driver:
1953 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1954 cpufreq_driver = NULL;
1955 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1953 return ret; 1956 return ret;
1954} 1957}
1955EXPORT_SYMBOL_GPL(cpufreq_register_driver); 1958EXPORT_SYMBOL_GPL(cpufreq_register_driver);
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 297f48b0cba9..07bca4970e50 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -79,6 +79,7 @@
79#include <linux/module.h> 79#include <linux/module.h>
80#include <linux/interrupt.h> 80#include <linux/interrupt.h>
81#include <linux/slab.h> 81#include <linux/slab.h>
82#include <linux/delay.h>
82#include <linux/dmapool.h> 83#include <linux/dmapool.h>
83#include <linux/dmaengine.h> 84#include <linux/dmaengine.h>
84#include <linux/amba/bus.h> 85#include <linux/amba/bus.h>
@@ -235,16 +236,19 @@ static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
235} 236}
236 237
237/* 238/*
238 * Overall DMAC remains enabled always. 239 * Pause the channel by setting the HALT bit.
239 * 240 *
240 * Disabling individual channels could lose data. 241 * For M->P transfers, pause the DMAC first and then stop the peripheral -
242 * the FIFO can only drain if the peripheral is still requesting data.
243 * (note: this can still timeout if the DMAC FIFO never drains of data.)
241 * 244 *
242 * Disable the peripheral DMA after disabling the DMAC in order to allow 245 * For P->M transfers, disable the peripheral first to stop it filling
243 * the DMAC FIFO to drain, and hence allow the channel to show inactive 246 * the DMAC FIFO, and then pause the DMAC.
244 */ 247 */
245static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 248static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
246{ 249{
247 u32 val; 250 u32 val;
251 int timeout;
248 252
249 /* Set the HALT bit and wait for the FIFO to drain */ 253 /* Set the HALT bit and wait for the FIFO to drain */
250 val = readl(ch->base + PL080_CH_CONFIG); 254 val = readl(ch->base + PL080_CH_CONFIG);
@@ -252,8 +256,13 @@ static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
252 writel(val, ch->base + PL080_CH_CONFIG); 256 writel(val, ch->base + PL080_CH_CONFIG);
253 257
254 /* Wait for channel inactive */ 258 /* Wait for channel inactive */
255 while (pl08x_phy_channel_busy(ch)) 259 for (timeout = 1000; timeout; timeout--) {
256 cpu_relax(); 260 if (!pl08x_phy_channel_busy(ch))
261 break;
262 udelay(1);
263 }
264 if (pl08x_phy_channel_busy(ch))
265 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
257} 266}
258 267
259static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 268static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
@@ -267,19 +276,24 @@ static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
267} 276}
268 277
269 278
270/* Stops the channel */ 279/*
271static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) 280 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
281 * clears any pending interrupt status. This should not be used for
282 * an on-going transfer, but as a method of shutting down a channel
283 * (eg, when it's no longer used) or terminating a transfer.
284 */
285static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
286 struct pl08x_phy_chan *ch)
272{ 287{
273 u32 val; 288 u32 val = readl(ch->base + PL080_CH_CONFIG);
274 289
275 pl08x_pause_phy_chan(ch); 290 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
291 PL080_CONFIG_TC_IRQ_MASK);
276 292
277 /* Disable channel */
278 val = readl(ch->base + PL080_CH_CONFIG);
279 val &= ~PL080_CONFIG_ENABLE;
280 val &= ~PL080_CONFIG_ERR_IRQ_MASK;
281 val &= ~PL080_CONFIG_TC_IRQ_MASK;
282 writel(val, ch->base + PL080_CH_CONFIG); 293 writel(val, ch->base + PL080_CH_CONFIG);
294
295 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
296 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
283} 297}
284 298
285static inline u32 get_bytes_in_cctl(u32 cctl) 299static inline u32 get_bytes_in_cctl(u32 cctl)
@@ -404,13 +418,12 @@ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
404{ 418{
405 unsigned long flags; 419 unsigned long flags;
406 420
421 spin_lock_irqsave(&ch->lock, flags);
422
407 /* Stop the channel and clear its interrupts */ 423 /* Stop the channel and clear its interrupts */
408 pl08x_stop_phy_chan(ch); 424 pl08x_terminate_phy_chan(pl08x, ch);
409 writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR);
410 writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR);
411 425
412 /* Mark it as free */ 426 /* Mark it as free */
413 spin_lock_irqsave(&ch->lock, flags);
414 ch->serving = NULL; 427 ch->serving = NULL;
415 spin_unlock_irqrestore(&ch->lock, flags); 428 spin_unlock_irqrestore(&ch->lock, flags);
416} 429}
@@ -1449,7 +1462,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1449 plchan->state = PL08X_CHAN_IDLE; 1462 plchan->state = PL08X_CHAN_IDLE;
1450 1463
1451 if (plchan->phychan) { 1464 if (plchan->phychan) {
1452 pl08x_stop_phy_chan(plchan->phychan); 1465 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
1453 1466
1454 /* 1467 /*
1455 * Mark physical channel as free and free any slave 1468 * Mark physical channel as free and free any slave
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index e53d438142bb..e18eaabe92b9 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -49,6 +49,7 @@ struct imxdma_channel {
49 49
50struct imxdma_engine { 50struct imxdma_engine {
51 struct device *dev; 51 struct device *dev;
52 struct device_dma_parameters dma_parms;
52 struct dma_device dma_device; 53 struct dma_device dma_device;
53 struct imxdma_channel channel[MAX_DMA_CHANNELS]; 54 struct imxdma_channel channel[MAX_DMA_CHANNELS];
54}; 55};
@@ -242,6 +243,21 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
242 else 243 else
243 dmamode = DMA_MODE_WRITE; 244 dmamode = DMA_MODE_WRITE;
244 245
246 switch (imxdmac->word_size) {
247 case DMA_SLAVE_BUSWIDTH_4_BYTES:
248 if (sgl->length & 3 || sgl->dma_address & 3)
249 return NULL;
250 break;
251 case DMA_SLAVE_BUSWIDTH_2_BYTES:
252 if (sgl->length & 1 || sgl->dma_address & 1)
253 return NULL;
254 break;
255 case DMA_SLAVE_BUSWIDTH_1_BYTE:
256 break;
257 default:
258 return NULL;
259 }
260
245 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, 261 ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
246 dma_length, imxdmac->per_address, dmamode); 262 dma_length, imxdmac->per_address, dmamode);
247 if (ret) 263 if (ret)
@@ -329,6 +345,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
329 345
330 INIT_LIST_HEAD(&imxdma->dma_device.channels); 346 INIT_LIST_HEAD(&imxdma->dma_device.channels);
331 347
348 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
349 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
350
332 /* Initialize channel parameters */ 351 /* Initialize channel parameters */
333 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 352 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
334 struct imxdma_channel *imxdmac = &imxdma->channel[i]; 353 struct imxdma_channel *imxdmac = &imxdma->channel[i];
@@ -346,11 +365,7 @@ static int __init imxdma_probe(struct platform_device *pdev)
346 imxdmac->imxdma = imxdma; 365 imxdmac->imxdma = imxdma;
347 spin_lock_init(&imxdmac->lock); 366 spin_lock_init(&imxdmac->lock);
348 367
349 dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
350 dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
351
352 imxdmac->chan.device = &imxdma->dma_device; 368 imxdmac->chan.device = &imxdma->dma_device;
353 imxdmac->chan.chan_id = i;
354 imxdmac->channel = i; 369 imxdmac->channel = i;
355 370
356 /* Add the channel to the DMAC list */ 371 /* Add the channel to the DMAC list */
@@ -370,6 +385,9 @@ static int __init imxdma_probe(struct platform_device *pdev)
370 385
371 platform_set_drvdata(pdev, imxdma); 386 platform_set_drvdata(pdev, imxdma);
372 387
388 imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
389 dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
390
373 ret = dma_async_device_register(&imxdma->dma_device); 391 ret = dma_async_device_register(&imxdma->dma_device);
374 if (ret) { 392 if (ret) {
375 dev_err(&pdev->dev, "unable to register\n"); 393 dev_err(&pdev->dev, "unable to register\n");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d5a5d4d9c19b..b6d1455fa936 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -230,7 +230,7 @@ struct sdma_engine;
230 * struct sdma_channel - housekeeping for a SDMA channel 230 * struct sdma_channel - housekeeping for a SDMA channel
231 * 231 *
232 * @sdma pointer to the SDMA engine for this channel 232 * @sdma pointer to the SDMA engine for this channel
233 * @channel the channel number, matches dmaengine chan_id 233 * @channel the channel number, matches dmaengine chan_id + 1
234 * @direction transfer type. Needed for setting SDMA script 234 * @direction transfer type. Needed for setting SDMA script
235 * @peripheral_type Peripheral type. Needed for setting SDMA script 235 * @peripheral_type Peripheral type. Needed for setting SDMA script
236 * @event_id0 aka dma request line 236 * @event_id0 aka dma request line
@@ -301,6 +301,7 @@ struct sdma_firmware_header {
301 301
302struct sdma_engine { 302struct sdma_engine {
303 struct device *dev; 303 struct device *dev;
304 struct device_dma_parameters dma_parms;
304 struct sdma_channel channel[MAX_DMA_CHANNELS]; 305 struct sdma_channel channel[MAX_DMA_CHANNELS];
305 struct sdma_channel_control *channel_control; 306 struct sdma_channel_control *channel_control;
306 void __iomem *regs; 307 void __iomem *regs;
@@ -449,7 +450,7 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
449 if (bd->mode.status & BD_RROR) 450 if (bd->mode.status & BD_RROR)
450 sdmac->status = DMA_ERROR; 451 sdmac->status = DMA_ERROR;
451 else 452 else
452 sdmac->status = DMA_SUCCESS; 453 sdmac->status = DMA_IN_PROGRESS;
453 454
454 bd->mode.status |= BD_DONE; 455 bd->mode.status |= BD_DONE;
455 sdmac->buf_tail++; 456 sdmac->buf_tail++;
@@ -770,15 +771,15 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
770 __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 771 __raw_writel(1 << channel, sdma->regs + SDMA_H_START);
771} 772}
772 773
773static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) 774static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdmac)
774{ 775{
775 dma_cookie_t cookie = sdma->chan.cookie; 776 dma_cookie_t cookie = sdmac->chan.cookie;
776 777
777 if (++cookie < 0) 778 if (++cookie < 0)
778 cookie = 1; 779 cookie = 1;
779 780
780 sdma->chan.cookie = cookie; 781 sdmac->chan.cookie = cookie;
781 sdma->desc.cookie = cookie; 782 sdmac->desc.cookie = cookie;
782 783
783 return cookie; 784 return cookie;
784} 785}
@@ -798,7 +799,7 @@ static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
798 799
799 cookie = sdma_assign_cookie(sdmac); 800 cookie = sdma_assign_cookie(sdmac);
800 801
801 sdma_enable_channel(sdma, tx->chan->chan_id); 802 sdma_enable_channel(sdma, sdmac->channel);
802 803
803 spin_unlock_irq(&sdmac->lock); 804 spin_unlock_irq(&sdmac->lock);
804 805
@@ -811,10 +812,6 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
811 struct imx_dma_data *data = chan->private; 812 struct imx_dma_data *data = chan->private;
812 int prio, ret; 813 int prio, ret;
813 814
814 /* No need to execute this for internal channel 0 */
815 if (chan->chan_id == 0)
816 return 0;
817
818 if (!data) 815 if (!data)
819 return -EINVAL; 816 return -EINVAL;
820 817
@@ -879,7 +876,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
879 struct sdma_channel *sdmac = to_sdma_chan(chan); 876 struct sdma_channel *sdmac = to_sdma_chan(chan);
880 struct sdma_engine *sdma = sdmac->sdma; 877 struct sdma_engine *sdma = sdmac->sdma;
881 int ret, i, count; 878 int ret, i, count;
882 int channel = chan->chan_id; 879 int channel = sdmac->channel;
883 struct scatterlist *sg; 880 struct scatterlist *sg;
884 881
885 if (sdmac->status == DMA_IN_PROGRESS) 882 if (sdmac->status == DMA_IN_PROGRESS)
@@ -924,22 +921,33 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
924 ret = -EINVAL; 921 ret = -EINVAL;
925 goto err_out; 922 goto err_out;
926 } 923 }
927 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 924
925 switch (sdmac->word_size) {
926 case DMA_SLAVE_BUSWIDTH_4_BYTES:
928 bd->mode.command = 0; 927 bd->mode.command = 0;
929 else 928 if (count & 3 || sg->dma_address & 3)
930 bd->mode.command = sdmac->word_size; 929 return NULL;
930 break;
931 case DMA_SLAVE_BUSWIDTH_2_BYTES:
932 bd->mode.command = 2;
933 if (count & 1 || sg->dma_address & 1)
934 return NULL;
935 break;
936 case DMA_SLAVE_BUSWIDTH_1_BYTE:
937 bd->mode.command = 1;
938 break;
939 default:
940 return NULL;
941 }
931 942
932 param = BD_DONE | BD_EXTD | BD_CONT; 943 param = BD_DONE | BD_EXTD | BD_CONT;
933 944
934 if (sdmac->flags & IMX_DMA_SG_LOOP) { 945 if (i + 1 == sg_len) {
935 param |= BD_INTR; 946 param |= BD_INTR;
936 if (i + 1 == sg_len) 947 param |= BD_LAST;
937 param |= BD_WRAP; 948 param &= ~BD_CONT;
938 } 949 }
939 950
940 if (i + 1 == sg_len)
941 param |= BD_INTR;
942
943 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 951 dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n",
944 i, count, sg->dma_address, 952 i, count, sg->dma_address,
945 param & BD_WRAP ? "wrap" : "", 953 param & BD_WRAP ? "wrap" : "",
@@ -953,6 +961,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
953 961
954 return &sdmac->desc; 962 return &sdmac->desc;
955err_out: 963err_out:
964 sdmac->status = DMA_ERROR;
956 return NULL; 965 return NULL;
957} 966}
958 967
@@ -963,7 +972,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
963 struct sdma_channel *sdmac = to_sdma_chan(chan); 972 struct sdma_channel *sdmac = to_sdma_chan(chan);
964 struct sdma_engine *sdma = sdmac->sdma; 973 struct sdma_engine *sdma = sdmac->sdma;
965 int num_periods = buf_len / period_len; 974 int num_periods = buf_len / period_len;
966 int channel = chan->chan_id; 975 int channel = sdmac->channel;
967 int ret, i = 0, buf = 0; 976 int ret, i = 0, buf = 0;
968 977
969 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 978 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
@@ -1066,14 +1075,12 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1066{ 1075{
1067 struct sdma_channel *sdmac = to_sdma_chan(chan); 1076 struct sdma_channel *sdmac = to_sdma_chan(chan);
1068 dma_cookie_t last_used; 1077 dma_cookie_t last_used;
1069 enum dma_status ret;
1070 1078
1071 last_used = chan->cookie; 1079 last_used = chan->cookie;
1072 1080
1073 ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used);
1074 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); 1081 dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
1075 1082
1076 return ret; 1083 return sdmac->status;
1077} 1084}
1078 1085
1079static void sdma_issue_pending(struct dma_chan *chan) 1086static void sdma_issue_pending(struct dma_chan *chan)
@@ -1135,7 +1142,7 @@ static int __init sdma_get_firmware(struct sdma_engine *sdma,
1135 /* download the RAM image for SDMA */ 1142 /* download the RAM image for SDMA */
1136 sdma_load_script(sdma, ram_code, 1143 sdma_load_script(sdma, ram_code,
1137 header->ram_code_size, 1144 header->ram_code_size,
1138 sdma->script_addrs->ram_code_start_addr); 1145 addr->ram_code_start_addr);
1139 clk_disable(sdma->clk); 1146 clk_disable(sdma->clk);
1140 1147
1141 sdma_add_scripts(sdma, addr); 1148 sdma_add_scripts(sdma, addr);
@@ -1237,7 +1244,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1237 struct resource *iores; 1244 struct resource *iores;
1238 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1245 struct sdma_platform_data *pdata = pdev->dev.platform_data;
1239 int i; 1246 int i;
1240 dma_cap_mask_t mask;
1241 struct sdma_engine *sdma; 1247 struct sdma_engine *sdma;
1242 1248
1243 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1249 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL);
@@ -1280,6 +1286,9 @@ static int __init sdma_probe(struct platform_device *pdev)
1280 1286
1281 sdma->version = pdata->sdma_version; 1287 sdma->version = pdata->sdma_version;
1282 1288
1289 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1290 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1291
1283 INIT_LIST_HEAD(&sdma->dma_device.channels); 1292 INIT_LIST_HEAD(&sdma->dma_device.channels);
1284 /* Initialize channel parameters */ 1293 /* Initialize channel parameters */
1285 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1294 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
@@ -1288,15 +1297,17 @@ static int __init sdma_probe(struct platform_device *pdev)
1288 sdmac->sdma = sdma; 1297 sdmac->sdma = sdma;
1289 spin_lock_init(&sdmac->lock); 1298 spin_lock_init(&sdmac->lock);
1290 1299
1291 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
1292 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
1293
1294 sdmac->chan.device = &sdma->dma_device; 1300 sdmac->chan.device = &sdma->dma_device;
1295 sdmac->chan.chan_id = i;
1296 sdmac->channel = i; 1301 sdmac->channel = i;
1297 1302
1298 /* Add the channel to the DMAC list */ 1303 /*
1299 list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); 1304 * Add the channel to the DMAC list. Do not add channel 0 though
1305 * because we need it internally in the SDMA driver. This also means
1306 * that channel 0 in dmaengine counting matches sdma channel 1.
1307 */
1308 if (i)
1309 list_add_tail(&sdmac->chan.device_node,
1310 &sdma->dma_device.channels);
1300 } 1311 }
1301 1312
1302 ret = sdma_init(sdma); 1313 ret = sdma_init(sdma);
@@ -1317,6 +1328,8 @@ static int __init sdma_probe(struct platform_device *pdev)
1317 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1328 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
1318 sdma->dma_device.device_control = sdma_control; 1329 sdma->dma_device.device_control = sdma_control;
1319 sdma->dma_device.device_issue_pending = sdma_issue_pending; 1330 sdma->dma_device.device_issue_pending = sdma_issue_pending;
1331 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
1332 dma_set_max_seg_size(sdma->dma_device.dev, 65535);
1320 1333
1321 ret = dma_async_device_register(&sdma->dma_device); 1334 ret = dma_async_device_register(&sdma->dma_device);
1322 if (ret) { 1335 if (ret) {
@@ -1324,13 +1337,6 @@ static int __init sdma_probe(struct platform_device *pdev)
1324 goto err_init; 1337 goto err_init;
1325 } 1338 }
1326 1339
1327 /* request channel 0. This is an internal control channel
1328 * to the SDMA engine and not available to clients.
1329 */
1330 dma_cap_zero(mask);
1331 dma_cap_set(DMA_SLAVE, mask);
1332 dma_request_channel(mask, NULL, NULL);
1333
1334 dev_info(sdma->dev, "initialized\n"); 1340 dev_info(sdma->dev, "initialized\n");
1335 1341
1336 return 0; 1342 return 0;
@@ -1348,7 +1354,7 @@ err_clk:
1348err_request_region: 1354err_request_region:
1349err_irq: 1355err_irq:
1350 kfree(sdma); 1356 kfree(sdma);
1351 return 0; 1357 return ret;
1352} 1358}
1353 1359
1354static int __exit sdma_remove(struct platform_device *pdev) 1360static int __exit sdma_remove(struct platform_device *pdev)
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index cb26ee9773d6..c1a125e7d1df 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1145,29 +1145,6 @@ static int ipu_disable_channel(struct idmac *idmac, struct idmac_channel *ichan,
1145 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN); 1145 reg = idmac_read_icreg(ipu, IDMAC_CHA_EN);
1146 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN); 1146 idmac_write_icreg(ipu, reg & ~chan_mask, IDMAC_CHA_EN);
1147 1147
1148 /*
1149 * Problem (observed with channel DMAIC_7): after enabling the channel
1150 * and initialising buffers, there comes an interrupt with current still
1151 * pointing at buffer 0, whereas it should use buffer 0 first and only
1152 * generate an interrupt when it is done, then current should already
1153 * point to buffer 1. This spurious interrupt also comes on channel
1154 * DMASDC_0. With DMAIC_7 normally, is we just leave the ISR after the
1155 * first interrupt, there comes the second with current correctly
1156 * pointing to buffer 1 this time. But sometimes this second interrupt
1157 * doesn't come and the channel hangs. Clearing BUFx_RDY when disabling
1158 * the channel seems to prevent the channel from hanging, but it doesn't
1159 * prevent the spurious interrupt. This might also be unsafe. Think
1160 * about the IDMAC controller trying to switch to a buffer, when we
1161 * clear the ready bit, and re-enable it a moment later.
1162 */
1163 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF0_RDY);
1164 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF0_RDY);
1165 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF0_RDY);
1166
1167 reg = idmac_read_ipureg(ipu, IPU_CHA_BUF1_RDY);
1168 idmac_write_ipureg(ipu, 0, IPU_CHA_BUF1_RDY);
1169 idmac_write_ipureg(ipu, reg & ~(1UL << channel), IPU_CHA_BUF1_RDY);
1170
1171 spin_unlock_irqrestore(&ipu->lock, flags); 1148 spin_unlock_irqrestore(&ipu->lock, flags);
1172 1149
1173 return 0; 1150 return 0;
@@ -1246,33 +1223,6 @@ static irqreturn_t idmac_interrupt(int irq, void *dev_id)
1246 1223
1247 /* Other interrupts do not interfere with this channel */ 1224 /* Other interrupts do not interfere with this channel */
1248 spin_lock(&ichan->lock); 1225 spin_lock(&ichan->lock);
1249 if (unlikely(chan_id != IDMAC_SDC_0 && chan_id != IDMAC_SDC_1 &&
1250 ((curbuf >> chan_id) & 1) == ichan->active_buffer &&
1251 !list_is_last(ichan->queue.next, &ichan->queue))) {
1252 int i = 100;
1253
1254 /* This doesn't help. See comment in ipu_disable_channel() */
1255 while (--i) {
1256 curbuf = idmac_read_ipureg(&ipu_data, IPU_CHA_CUR_BUF);
1257 if (((curbuf >> chan_id) & 1) != ichan->active_buffer)
1258 break;
1259 cpu_relax();
1260 }
1261
1262 if (!i) {
1263 spin_unlock(&ichan->lock);
1264 dev_dbg(dev,
1265 "IRQ on active buffer on channel %x, active "
1266 "%d, ready %x, %x, current %x!\n", chan_id,
1267 ichan->active_buffer, ready0, ready1, curbuf);
1268 return IRQ_NONE;
1269 } else
1270 dev_dbg(dev,
1271 "Buffer deactivated on channel %x, active "
1272 "%d, ready %x, %x, current %x, rest %d!\n", chan_id,
1273 ichan->active_buffer, ready0, ready1, curbuf, i);
1274 }
1275
1276 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) || 1226 if (unlikely((ichan->active_buffer && (ready1 >> chan_id) & 1) ||
1277 (!ichan->active_buffer && (ready0 >> chan_id) & 1) 1227 (!ichan->active_buffer && (ready0 >> chan_id) & 1)
1278 )) { 1228 )) {
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index e28e41668177..bcb1126e3d00 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -378,10 +378,17 @@ static void __init print_filtered(const char *info)
378 378
379static void __init dmi_dump_ids(void) 379static void __init dmi_dump_ids(void)
380{ 380{
381 const char *board; /* Board Name is optional */
382
381 printk(KERN_DEBUG "DMI: "); 383 printk(KERN_DEBUG "DMI: ");
382 print_filtered(dmi_get_system_info(DMI_BOARD_NAME)); 384 print_filtered(dmi_get_system_info(DMI_SYS_VENDOR));
383 printk(KERN_CONT "/"); 385 printk(KERN_CONT " ");
384 print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME)); 386 print_filtered(dmi_get_system_info(DMI_PRODUCT_NAME));
387 board = dmi_get_system_info(DMI_BOARD_NAME);
388 if (board) {
389 printk(KERN_CONT "/");
390 print_filtered(board);
391 }
385 printk(KERN_CONT ", BIOS "); 392 printk(KERN_CONT ", BIOS ");
386 print_filtered(dmi_get_system_info(DMI_BIOS_VERSION)); 393 print_filtered(dmi_get_system_info(DMI_BIOS_VERSION));
387 printk(KERN_CONT " "); 394 printk(KERN_CONT " ");
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c
index cead8e6ff345..7f6f01a4b145 100644
--- a/drivers/gpio/ml_ioh_gpio.c
+++ b/drivers/gpio/ml_ioh_gpio.c
@@ -326,6 +326,7 @@ static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = {
326 { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) }, 326 { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) },
327 { 0, } 327 { 0, }
328}; 328};
329MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id);
329 330
330static struct pci_driver ioh_gpio_driver = { 331static struct pci_driver ioh_gpio_driver = {
331 .name = "ml_ioh_gpio", 332 .name = "ml_ioh_gpio",
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c
index 0eba0a75c804..2c6af8705103 100644
--- a/drivers/gpio/pch_gpio.c
+++ b/drivers/gpio/pch_gpio.c
@@ -286,6 +286,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = {
286 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, 286 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) },
287 { 0, } 287 { 0, }
288}; 288};
289MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id);
289 290
290static struct pci_driver pch_gpio_driver = { 291static struct pci_driver pch_gpio_driver = {
291 .name = "pch_gpio", 292 .name = "pch_gpio",
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 6977a1ce9d98..f73ef4390db6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
672 struct drm_crtc_helper_funcs *crtc_funcs; 672 struct drm_crtc_helper_funcs *crtc_funcs;
673 u16 *red, *green, *blue, *transp; 673 u16 *red, *green, *blue, *transp;
674 struct drm_crtc *crtc; 674 struct drm_crtc *crtc;
675 int i, rc = 0; 675 int i, j, rc = 0;
676 int start; 676 int start;
677 677
678 for (i = 0; i < fb_helper->crtc_count; i++) { 678 for (i = 0; i < fb_helper->crtc_count; i++) {
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
685 transp = cmap->transp; 685 transp = cmap->transp;
686 start = cmap->start; 686 start = cmap->start;
687 687
688 for (i = 0; i < cmap->len; i++) { 688 for (j = 0; j < cmap->len; j++) {
689 u16 hred, hgreen, hblue, htransp = 0xffff; 689 u16 hred, hgreen, hblue, htransp = 0xffff;
690 690
691 hred = *red++; 691 hred = *red++;
diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
index 3cdbaf379bb5..be9a9c07d152 100644
--- a/drivers/gpu/drm/drm_info.c
+++ b/drivers/gpu/drm/drm_info.c
@@ -283,17 +283,18 @@ int drm_vma_info(struct seq_file *m, void *data)
283#endif 283#endif
284 284
285 mutex_lock(&dev->struct_mutex); 285 mutex_lock(&dev->struct_mutex);
286 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n", 286 seq_printf(m, "vma use count: %d, high_memory = %pK, 0x%pK\n",
287 atomic_read(&dev->vma_count), 287 atomic_read(&dev->vma_count),
288 high_memory, (u64)virt_to_phys(high_memory)); 288 high_memory, (void *)virt_to_phys(high_memory));
289 289
290 list_for_each_entry(pt, &dev->vmalist, head) { 290 list_for_each_entry(pt, &dev->vmalist, head) {
291 vma = pt->vma; 291 vma = pt->vma;
292 if (!vma) 292 if (!vma)
293 continue; 293 continue;
294 seq_printf(m, 294 seq_printf(m,
295 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000", 295 "\n%5d 0x%pK-0x%pK %c%c%c%c%c%c 0x%08lx000",
296 pt->pid, vma->vm_start, vma->vm_end, 296 pt->pid,
297 (void *)vma->vm_start, (void *)vma->vm_end,
297 vma->vm_flags & VM_READ ? 'r' : '-', 298 vma->vm_flags & VM_READ ? 'r' : '-',
298 vma->vm_flags & VM_WRITE ? 'w' : '-', 299 vma->vm_flags & VM_WRITE ? 'w' : '-',
299 vma->vm_flags & VM_EXEC ? 'x' : '-', 300 vma->vm_flags & VM_EXEC ? 'x' : '-',
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 3dadfa2a8528..28d1d3c24d65 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 * available. In that case we can't account for this and just 164 * available. In that case we can't account for this and just
165 * hope for the best. 165 * hope for the best.
166 */ 166 */
167 if ((vblrc > 0) && (abs(diff_ns) > 1000000)) 167 if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
168 atomic_inc(&dev->_vblank_count[crtc]); 168 atomic_inc(&dev->_vblank_count[crtc]);
169 smp_mb__after_atomic_inc();
170 }
169 171
170 /* Invalidate all timestamps while vblank irq's are off. */ 172 /* Invalidate all timestamps while vblank irq's are off. */
171 clear_vblank_timestamps(dev, crtc); 173 clear_vblank_timestamps(dev, crtc);
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc)
491 /* Dot clock in Hz: */ 493 /* Dot clock in Hz: */
492 dotclock = (u64) crtc->hwmode.clock * 1000; 494 dotclock = (u64) crtc->hwmode.clock * 1000;
493 495
496 /* Fields of interlaced scanout modes are only halve a frame duration.
497 * Double the dotclock to get halve the frame-/line-/pixelduration.
498 */
499 if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE)
500 dotclock *= 2;
501
494 /* Valid dotclock? */ 502 /* Valid dotclock? */
495 if (dotclock > 0) { 503 if (dotclock > 0) {
496 /* Convert scanline length in pixels and video dot clock to 504 /* Convert scanline length in pixels and video dot clock to
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
603 return -EAGAIN; 611 return -EAGAIN;
604 } 612 }
605 613
606 /* Don't know yet how to handle interlaced or
607 * double scan modes. Just no-op for now.
608 */
609 if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) {
610 DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc);
611 return -ENOTSUPP;
612 }
613
614 /* Get current scanout position with system timestamp. 614 /* Get current scanout position with system timestamp.
615 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times 615 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
616 * if single query takes longer than max_error nanoseconds. 616 * if single query takes longer than max_error nanoseconds.
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
858 if (rc) { 858 if (rc) {
859 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; 859 tslot = atomic_read(&dev->_vblank_count[crtc]) + diff;
860 vblanktimestamp(dev, crtc, tslot) = t_vblank; 860 vblanktimestamp(dev, crtc, tslot) = t_vblank;
861 smp_wmb();
862 } 861 }
863 862
863 smp_mb__before_atomic_inc();
864 atomic_add(diff, &dev->_vblank_count[crtc]); 864 atomic_add(diff, &dev->_vblank_count[crtc]);
865 smp_mb__after_atomic_inc();
865} 866}
866 867
867/** 868/**
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1011 struct drm_file *file_priv) 1012 struct drm_file *file_priv)
1012{ 1013{
1013 struct drm_modeset_ctl *modeset = data; 1014 struct drm_modeset_ctl *modeset = data;
1014 int crtc, ret = 0; 1015 int ret = 0;
1016 unsigned int crtc;
1015 1017
1016 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1018 /* If drm_vblank_init() hasn't been called yet, just no-op */
1017 if (!dev->num_crtcs) 1019 if (!dev->num_crtcs)
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
1293 * e.g., due to spurious vblank interrupts. We need to 1295 * e.g., due to spurious vblank interrupts. We need to
1294 * ignore those for accounting. 1296 * ignore those for accounting.
1295 */ 1297 */
1296 if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { 1298 if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) {
1297 /* Store new timestamp in ringbuffer. */ 1299 /* Store new timestamp in ringbuffer. */
1298 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; 1300 vblanktimestamp(dev, crtc, vblcount + 1) = tvblank;
1299 smp_wmb();
1300 1301
1301 /* Increment cooked vblank count. This also atomically commits 1302 /* Increment cooked vblank count. This also atomically commits
1302 * the timestamp computed above. 1303 * the timestamp computed above.
1303 */ 1304 */
1305 smp_mb__before_atomic_inc();
1304 atomic_inc(&dev->_vblank_count[crtc]); 1306 atomic_inc(&dev->_vblank_count[crtc]);
1307 smp_mb__after_atomic_inc();
1305 } else { 1308 } else {
1306 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", 1309 DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n",
1307 crtc, (int) diff_ns); 1310 crtc, (int) diff_ns);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 3601466c5502..4ff9b6cc973f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
865 int max_freq; 865 int max_freq;
866 866
867 /* RPSTAT1 is in the GT power well */ 867 /* RPSTAT1 is in the GT power well */
868 __gen6_force_wake_get(dev_priv); 868 __gen6_gt_force_wake_get(dev_priv);
869 869
870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); 870 seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); 871 seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 888 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
889 max_freq * 100); 889 max_freq * 100);
890 890
891 __gen6_force_wake_put(dev_priv); 891 __gen6_gt_force_wake_put(dev_priv);
892 } else { 892 } else {
893 seq_printf(m, "no P-state info available\n"); 893 seq_printf(m, "no P-state info available\n");
894 } 894 }
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 17bd766f2081..e33d9be7df3b 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1895 if (IS_GEN2(dev)) 1895 if (IS_GEN2(dev))
1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); 1896 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1897 1897
1898 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1899 * using 32bit addressing, overwriting memory if HWS is located
1900 * above 4GB.
1901 *
1902 * The documentation also mentions an issue with undefined
1903 * behaviour if any general state is accessed within a page above 4GB,
1904 * which also needs to be handled carefully.
1905 */
1906 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1907 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1908
1898 mmio_bar = IS_GEN2(dev) ? 1 : 0; 1909 mmio_bar = IS_GEN2(dev) ? 1 : 0;
1899 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); 1910 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
1900 if (!dev_priv->regs) { 1911 if (!dev_priv->regs) {
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cfb56d0ff367..22ec066adae6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -46,6 +46,12 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0600); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_semaphores = 0;
50module_param_named(semaphores, i915_semaphores, int, 0600);
51
52unsigned int i915_enable_rc6 = 0;
53module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
54
49unsigned int i915_lvds_downclock = 0; 55unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 56module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
51 57
@@ -251,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev)
251 } 257 }
252} 258}
253 259
254void __gen6_force_wake_get(struct drm_i915_private *dev_priv) 260void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
255{ 261{
256 int count; 262 int count;
257 263
@@ -267,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
267 udelay(10); 273 udelay(10);
268} 274}
269 275
270void __gen6_force_wake_put(struct drm_i915_private *dev_priv) 276void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
271{ 277{
272 I915_WRITE_NOTRACE(FORCEWAKE, 0); 278 I915_WRITE_NOTRACE(FORCEWAKE, 0);
273 POSTING_READ(FORCEWAKE); 279 POSTING_READ(FORCEWAKE);
274} 280}
275 281
282void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
283{
284 int loop = 500;
285 u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
286 while (fifo < 20 && loop--) {
287 udelay(10);
288 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
289 }
290}
291
276static int i915_drm_freeze(struct drm_device *dev) 292static int i915_drm_freeze(struct drm_device *dev)
277{ 293{
278 struct drm_i915_private *dev_priv = dev->dev_private; 294 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -360,7 +376,7 @@ static int i915_drm_thaw(struct drm_device *dev)
360 /* Resume the modeset for every activated CRTC */ 376 /* Resume the modeset for every activated CRTC */
361 drm_helper_resume_force_mode(dev); 377 drm_helper_resume_force_mode(dev);
362 378
363 if (dev_priv->renderctx && dev_priv->pwrctx) 379 if (IS_IRONLAKE_M(dev))
364 ironlake_enable_rc6(dev); 380 ironlake_enable_rc6(dev);
365 } 381 }
366 382
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a0149c619cdd..456f40484838 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -956,8 +956,10 @@ extern struct drm_ioctl_desc i915_ioctls[];
956extern int i915_max_ioctl; 956extern int i915_max_ioctl;
957extern unsigned int i915_fbpercrtc; 957extern unsigned int i915_fbpercrtc;
958extern unsigned int i915_powersave; 958extern unsigned int i915_powersave;
959extern unsigned int i915_semaphores;
959extern unsigned int i915_lvds_downclock; 960extern unsigned int i915_lvds_downclock;
960extern unsigned int i915_panel_use_ssc; 961extern unsigned int i915_panel_use_ssc;
962extern unsigned int i915_enable_rc6;
961 963
962extern int i915_suspend(struct drm_device *dev, pm_message_t state); 964extern int i915_suspend(struct drm_device *dev, pm_message_t state);
963extern int i915_resume(struct drm_device *dev); 965extern int i915_resume(struct drm_device *dev);
@@ -1176,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
1176void i915_gem_free_all_phys_object(struct drm_device *dev); 1178void i915_gem_free_all_phys_object(struct drm_device *dev);
1177void i915_gem_release(struct drm_device *dev, struct drm_file *file); 1179void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1178 1180
1181uint32_t
1182i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
1183
1179/* i915_gem_gtt.c */ 1184/* i915_gem_gtt.c */
1180void i915_gem_restore_gtt_mappings(struct drm_device *dev); 1185void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1181int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); 1186int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
@@ -1352,22 +1357,32 @@ __i915_write(64, q)
1352 * must be set to prevent GT core from power down and stale values being 1357 * must be set to prevent GT core from power down and stale values being
1353 * returned. 1358 * returned.
1354 */ 1359 */
1355void __gen6_force_wake_get(struct drm_i915_private *dev_priv); 1360void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1356void __gen6_force_wake_put (struct drm_i915_private *dev_priv); 1361void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1357static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) 1362void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1363
1364static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg)
1358{ 1365{
1359 u32 val; 1366 u32 val;
1360 1367
1361 if (dev_priv->info->gen >= 6) { 1368 if (dev_priv->info->gen >= 6) {
1362 __gen6_force_wake_get(dev_priv); 1369 __gen6_gt_force_wake_get(dev_priv);
1363 val = I915_READ(reg); 1370 val = I915_READ(reg);
1364 __gen6_force_wake_put(dev_priv); 1371 __gen6_gt_force_wake_put(dev_priv);
1365 } else 1372 } else
1366 val = I915_READ(reg); 1373 val = I915_READ(reg);
1367 1374
1368 return val; 1375 return val;
1369} 1376}
1370 1377
1378static inline void i915_gt_write(struct drm_i915_private *dev_priv,
1379 u32 reg, u32 val)
1380{
1381 if (dev_priv->info->gen >= 6)
1382 __gen6_gt_wait_for_fifo(dev_priv);
1383 I915_WRITE(reg, val);
1384}
1385
1371static inline void 1386static inline void
1372i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) 1387i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
1373{ 1388{
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index cf4f74c7c6fb..36e66cc5225e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
1398 * Return the required GTT alignment for an object, only taking into account 1398 * Return the required GTT alignment for an object, only taking into account
1399 * unfenced tiled surface requirements. 1399 * unfenced tiled surface requirements.
1400 */ 1400 */
1401static uint32_t 1401uint32_t
1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) 1402i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
1403{ 1403{
1404 struct drm_device *dev = obj->base.dev; 1404 struct drm_device *dev = obj->base.dev;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2f445e825f2..50ab1614571c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
772 if (from == NULL || to == from) 772 if (from == NULL || to == from)
773 return 0; 773 return 0;
774 774
775 /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ 775 /* XXX gpu semaphores are implicated in various hard hangs on SNB */
776 if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) 776 if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
777 return i915_gem_object_wait_rendering(obj, true); 777 return i915_gem_object_wait_rendering(obj, true);
778 778
779 idx = intel_ring_sync_index(from, to); 779 idx = intel_ring_sync_index(from, to);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..d64843e18df2 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && 349 (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
350 i915_gem_object_fence_ok(obj, args->tiling_mode)); 350 i915_gem_object_fence_ok(obj, args->tiling_mode));
351 351
352 obj->tiling_changed = true; 352 /* Rebind if we need a change of alignment */
353 obj->tiling_mode = args->tiling_mode; 353 if (!obj->map_and_fenceable) {
354 obj->stride = args->stride; 354 u32 unfenced_alignment =
355 i915_gem_get_unfenced_gtt_alignment(obj);
356 if (obj->gtt_offset & (unfenced_alignment - 1))
357 ret = i915_gem_object_unbind(obj);
358 }
359
360 if (ret == 0) {
361 obj->tiling_changed = true;
362 obj->tiling_mode = args->tiling_mode;
363 obj->stride = args->stride;
364 }
355 } 365 }
366 /* we have to maintain this existing ABI... */
367 args->stride = obj->stride;
368 args->tiling_mode = obj->tiling_mode;
356 drm_gem_object_unreference(&obj->base); 369 drm_gem_object_unreference(&obj->base);
357 mutex_unlock(&dev->struct_mutex); 370 mutex_unlock(&dev->struct_mutex);
358 371
359 return 0; 372 return ret;
360} 373}
361 374
362/** 375/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
316 struct drm_mode_config *mode_config = &dev->mode_config; 316 struct drm_mode_config *mode_config = &dev->mode_config;
317 struct intel_encoder *encoder; 317 struct intel_encoder *encoder;
318 318
319 DRM_DEBUG_KMS("running encoder hotplug functions\n");
320
319 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 321 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
320 if (encoder->hot_plug) 322 if (encoder->hot_plug)
321 encoder->hot_plug(encoder); 323 encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1649 } else { 1651 } else {
1650 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1652 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1651 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1653 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1652 hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; 1654 hotplug_mask |= SDE_AUX_MASK;
1653 I915_WRITE(FDI_RXA_IMR, 0);
1654 I915_WRITE(FDI_RXB_IMR, 0);
1655 } 1655 }
1656 1656
1657 dev_priv->pch_irq_mask = ~hotplug_mask; 1657 dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5cfc68940f17..2abe240dae58 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -174,7 +174,9 @@
174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 174 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
175 */ 175 */
176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 176#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
177#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */ 177#define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */
178#define MI_INVALIDATE_TLB (1<<18)
179#define MI_INVALIDATE_BSD (1<<7)
178#define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 180#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
179#define MI_BATCH_NON_SECURE (1) 181#define MI_BATCH_NON_SECURE (1)
180#define MI_BATCH_NON_SECURE_I965 (1<<8) 182#define MI_BATCH_NON_SECURE_I965 (1<<8)
@@ -3269,6 +3271,8 @@
3269#define FORCEWAKE 0xA18C 3271#define FORCEWAKE 0xA18C
3270#define FORCEWAKE_ACK 0x130090 3272#define FORCEWAKE_ACK 0x130090
3271 3273
3274#define GT_FIFO_FREE_ENTRIES 0x120008
3275
3272#define GEN6_RPNSWREQ 0xA008 3276#define GEN6_RPNSWREQ 0xA008
3273#define GEN6_TURBO_DISABLE (1<<31) 3277#define GEN6_TURBO_DISABLE (1<<31)
3274#define GEN6_FREQUENCY(x) ((x)<<25) 3278#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 7e42aa586504..49fb54fd9a18 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1219 u32 blt_ecoskpd; 1219 u32 blt_ecoskpd;
1220 1220
1221 /* Make sure blitter notifies FBC of writes */ 1221 /* Make sure blitter notifies FBC of writes */
1222 __gen6_force_wake_get(dev_priv); 1222 __gen6_gt_force_wake_get(dev_priv);
1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); 1223 blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD);
1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << 1224 blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY <<
1225 GEN6_BLITTER_LOCK_SHIFT; 1225 GEN6_BLITTER_LOCK_SHIFT;
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev)
1230 GEN6_BLITTER_LOCK_SHIFT); 1230 GEN6_BLITTER_LOCK_SHIFT);
1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); 1231 I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd);
1232 POSTING_READ(GEN6_BLITTER_ECOSKPD); 1232 POSTING_READ(GEN6_BLITTER_ECOSKPD);
1233 __gen6_force_wake_put(dev_priv); 1233 __gen6_gt_force_wake_put(dev_priv);
1234} 1234}
1235 1235
1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) 1236static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1631 1631
1632 wait_event(dev_priv->pending_flip_queue, 1632 wait_event(dev_priv->pending_flip_queue,
1633 atomic_read(&dev_priv->mm.wedged) ||
1633 atomic_read(&obj->pending_flip) == 0); 1634 atomic_read(&obj->pending_flip) == 0);
1634 1635
1635 /* Big Hammer, we also need to ensure that any pending 1636 /* Big Hammer, we also need to ensure that any pending
1636 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1637 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1637 * current scanout is retired before unpinning the old 1638 * current scanout is retired before unpinning the old
1638 * framebuffer. 1639 * framebuffer.
1640 *
1641 * This should only fail upon a hung GPU, in which case we
1642 * can safely continue.
1639 */ 1643 */
1640 ret = i915_gem_object_flush_gpu(obj, false); 1644 ret = i915_gem_object_flush_gpu(obj, false);
1641 if (ret) { 1645 (void) ret;
1642 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1643 mutex_unlock(&dev->struct_mutex);
1644 return ret;
1645 }
1646 } 1646 }
1647 1647
1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2045 atomic_read(&obj->pending_flip) == 0); 2045 atomic_read(&obj->pending_flip) == 0);
2046} 2046}
2047 2047
2048static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2049{
2050 struct drm_device *dev = crtc->dev;
2051 struct drm_mode_config *mode_config = &dev->mode_config;
2052 struct intel_encoder *encoder;
2053
2054 /*
2055 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2056 * must be driven by its own crtc; no sharing is possible.
2057 */
2058 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2059 if (encoder->base.crtc != crtc)
2060 continue;
2061
2062 switch (encoder->type) {
2063 case INTEL_OUTPUT_EDP:
2064 if (!intel_encoder_is_pch_edp(&encoder->base))
2065 return false;
2066 continue;
2067 }
2068 }
2069
2070 return true;
2071}
2072
2048static void ironlake_crtc_enable(struct drm_crtc *crtc) 2073static void ironlake_crtc_enable(struct drm_crtc *crtc)
2049{ 2074{
2050 struct drm_device *dev = crtc->dev; 2075 struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2053 int pipe = intel_crtc->pipe; 2078 int pipe = intel_crtc->pipe;
2054 int plane = intel_crtc->plane; 2079 int plane = intel_crtc->plane;
2055 u32 reg, temp; 2080 u32 reg, temp;
2081 bool is_pch_port = false;
2056 2082
2057 if (intel_crtc->active) 2083 if (intel_crtc->active)
2058 return; 2084 return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2066 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2092 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2067 } 2093 }
2068 2094
2069 ironlake_fdi_enable(crtc); 2095 is_pch_port = intel_crtc_driving_pch(crtc);
2096
2097 if (is_pch_port)
2098 ironlake_fdi_enable(crtc);
2099 else {
2100 /* disable CPU FDI tx and PCH FDI rx */
2101 reg = FDI_TX_CTL(pipe);
2102 temp = I915_READ(reg);
2103 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2104 POSTING_READ(reg);
2105
2106 reg = FDI_RX_CTL(pipe);
2107 temp = I915_READ(reg);
2108 temp &= ~(0x7 << 16);
2109 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2110 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2111
2112 POSTING_READ(reg);
2113 udelay(100);
2114
2115 /* Ironlake workaround, disable clock pointer after downing FDI */
2116 if (HAS_PCH_IBX(dev))
2117 I915_WRITE(FDI_RX_CHICKEN(pipe),
2118 I915_READ(FDI_RX_CHICKEN(pipe) &
2119 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2120
2121 /* still set train pattern 1 */
2122 reg = FDI_TX_CTL(pipe);
2123 temp = I915_READ(reg);
2124 temp &= ~FDI_LINK_TRAIN_NONE;
2125 temp |= FDI_LINK_TRAIN_PATTERN_1;
2126 I915_WRITE(reg, temp);
2127
2128 reg = FDI_RX_CTL(pipe);
2129 temp = I915_READ(reg);
2130 if (HAS_PCH_CPT(dev)) {
2131 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2132 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2133 } else {
2134 temp &= ~FDI_LINK_TRAIN_NONE;
2135 temp |= FDI_LINK_TRAIN_PATTERN_1;
2136 }
2137 /* BPC in FDI rx is consistent with that in PIPECONF */
2138 temp &= ~(0x07 << 16);
2139 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2140 I915_WRITE(reg, temp);
2141
2142 POSTING_READ(reg);
2143 udelay(100);
2144 }
2070 2145
2071 /* Enable panel fitting for LVDS */ 2146 /* Enable panel fitting for LVDS */
2072 if (dev_priv->pch_pf_size && 2147 if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2100 intel_flush_display_plane(dev, plane); 2175 intel_flush_display_plane(dev, plane);
2101 } 2176 }
2102 2177
2178 /* Skip the PCH stuff if possible */
2179 if (!is_pch_port)
2180 goto done;
2181
2103 /* For PCH output, training FDI link */ 2182 /* For PCH output, training FDI link */
2104 if (IS_GEN6(dev)) 2183 if (IS_GEN6(dev))
2105 gen6_fdi_link_train(crtc); 2184 gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2184 I915_WRITE(reg, temp | TRANS_ENABLE); 2263 I915_WRITE(reg, temp | TRANS_ENABLE);
2185 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2264 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2186 DRM_ERROR("failed to enable transcoder %d\n", pipe); 2265 DRM_ERROR("failed to enable transcoder %d\n", pipe);
2187 2266done:
2188 intel_crtc_load_lut(crtc); 2267 intel_crtc_load_lut(crtc);
2189 intel_update_fbc(dev); 2268 intel_update_fbc(dev);
2190 intel_crtc_update_cursor(crtc, true); 2269 intel_crtc_update_cursor(crtc, true);
@@ -5558,9 +5637,7 @@ static void intel_crtc_reset(struct drm_crtc *crtc)
5558 /* Reset flags back to the 'unknown' status so that they 5637 /* Reset flags back to the 'unknown' status so that they
5559 * will be correctly set on the initial modeset. 5638 * will be correctly set on the initial modeset.
5560 */ 5639 */
5561 intel_crtc->cursor_addr = 0;
5562 intel_crtc->dpms_mode = -1; 5640 intel_crtc->dpms_mode = -1;
5563 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5564} 5641}
5565 5642
5566static struct drm_crtc_helper_funcs intel_helper_funcs = { 5643static struct drm_crtc_helper_funcs intel_helper_funcs = {
@@ -5666,6 +5743,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5666 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; 5743 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
5667 5744
5668 intel_crtc_reset(&intel_crtc->base); 5745 intel_crtc_reset(&intel_crtc->base);
5746 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5669 5747
5670 if (HAS_PCH_SPLIT(dev)) { 5748 if (HAS_PCH_SPLIT(dev)) {
5671 intel_helper_funcs.prepare = ironlake_crtc_prepare; 5749 intel_helper_funcs.prepare = ironlake_crtc_prepare;
@@ -6204,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6204 * userspace... 6282 * userspace...
6205 */ 6283 */
6206 I915_WRITE(GEN6_RC_STATE, 0); 6284 I915_WRITE(GEN6_RC_STATE, 0);
6207 __gen6_force_wake_get(dev_priv); 6285 __gen6_gt_force_wake_get(dev_priv);
6208 6286
6209 /* disable the counters and set deterministic thresholds */ 6287 /* disable the counters and set deterministic thresholds */
6210 I915_WRITE(GEN6_RC_CONTROL, 0); 6288 I915_WRITE(GEN6_RC_CONTROL, 0);
@@ -6302,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
6302 /* enable all PM interrupts */ 6380 /* enable all PM interrupts */
6303 I915_WRITE(GEN6_PMINTRMSK, 0); 6381 I915_WRITE(GEN6_PMINTRMSK, 0);
6304 6382
6305 __gen6_force_wake_put(dev_priv); 6383 __gen6_gt_force_wake_put(dev_priv);
6306} 6384}
6307 6385
6308void intel_enable_clock_gating(struct drm_device *dev) 6386void intel_enable_clock_gating(struct drm_device *dev)
@@ -6463,52 +6541,60 @@ void intel_enable_clock_gating(struct drm_device *dev)
6463 } 6541 }
6464} 6542}
6465 6543
6466void intel_disable_clock_gating(struct drm_device *dev) 6544static void ironlake_teardown_rc6(struct drm_device *dev)
6467{ 6545{
6468 struct drm_i915_private *dev_priv = dev->dev_private; 6546 struct drm_i915_private *dev_priv = dev->dev_private;
6469 6547
6470 if (dev_priv->renderctx) { 6548 if (dev_priv->renderctx) {
6471 struct drm_i915_gem_object *obj = dev_priv->renderctx; 6549 i915_gem_object_unpin(dev_priv->renderctx);
6472 6550 drm_gem_object_unreference(&dev_priv->renderctx->base);
6473 I915_WRITE(CCID, 0);
6474 POSTING_READ(CCID);
6475
6476 i915_gem_object_unpin(obj);
6477 drm_gem_object_unreference(&obj->base);
6478 dev_priv->renderctx = NULL; 6551 dev_priv->renderctx = NULL;
6479 } 6552 }
6480 6553
6481 if (dev_priv->pwrctx) { 6554 if (dev_priv->pwrctx) {
6482 struct drm_i915_gem_object *obj = dev_priv->pwrctx; 6555 i915_gem_object_unpin(dev_priv->pwrctx);
6556 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6557 dev_priv->pwrctx = NULL;
6558 }
6559}
6560
6561static void ironlake_disable_rc6(struct drm_device *dev)
6562{
6563 struct drm_i915_private *dev_priv = dev->dev_private;
6564
6565 if (I915_READ(PWRCTXA)) {
6566 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */
6567 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT);
6568 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON),
6569 50);
6483 6570
6484 I915_WRITE(PWRCTXA, 0); 6571 I915_WRITE(PWRCTXA, 0);
6485 POSTING_READ(PWRCTXA); 6572 POSTING_READ(PWRCTXA);
6486 6573
6487 i915_gem_object_unpin(obj); 6574 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6488 drm_gem_object_unreference(&obj->base); 6575 POSTING_READ(RSTDBYCTL);
6489 dev_priv->pwrctx = NULL;
6490 } 6576 }
6577
6578 ironlake_teardown_rc6(dev);
6491} 6579}
6492 6580
6493static void ironlake_disable_rc6(struct drm_device *dev) 6581static int ironlake_setup_rc6(struct drm_device *dev)
6494{ 6582{
6495 struct drm_i915_private *dev_priv = dev->dev_private; 6583 struct drm_i915_private *dev_priv = dev->dev_private;
6496 6584
6497 /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ 6585 if (dev_priv->renderctx == NULL)
6498 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); 6586 dev_priv->renderctx = intel_alloc_context_page(dev);
6499 wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), 6587 if (!dev_priv->renderctx)
6500 10); 6588 return -ENOMEM;
6501 POSTING_READ(CCID); 6589
6502 I915_WRITE(PWRCTXA, 0); 6590 if (dev_priv->pwrctx == NULL)
6503 POSTING_READ(PWRCTXA); 6591 dev_priv->pwrctx = intel_alloc_context_page(dev);
6504 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6592 if (!dev_priv->pwrctx) {
6505 POSTING_READ(RSTDBYCTL); 6593 ironlake_teardown_rc6(dev);
6506 i915_gem_object_unpin(dev_priv->renderctx); 6594 return -ENOMEM;
6507 drm_gem_object_unreference(&dev_priv->renderctx->base); 6595 }
6508 dev_priv->renderctx = NULL; 6596
6509 i915_gem_object_unpin(dev_priv->pwrctx); 6597 return 0;
6510 drm_gem_object_unreference(&dev_priv->pwrctx->base);
6511 dev_priv->pwrctx = NULL;
6512} 6598}
6513 6599
6514void ironlake_enable_rc6(struct drm_device *dev) 6600void ironlake_enable_rc6(struct drm_device *dev)
@@ -6516,15 +6602,26 @@ void ironlake_enable_rc6(struct drm_device *dev)
6516 struct drm_i915_private *dev_priv = dev->dev_private; 6602 struct drm_i915_private *dev_priv = dev->dev_private;
6517 int ret; 6603 int ret;
6518 6604
6605 /* rc6 disabled by default due to repeated reports of hanging during
6606 * boot and resume.
6607 */
6608 if (!i915_enable_rc6)
6609 return;
6610
6611 ret = ironlake_setup_rc6(dev);
6612 if (ret)
6613 return;
6614
6519 /* 6615 /*
6520 * GPU can automatically power down the render unit if given a page 6616 * GPU can automatically power down the render unit if given a page
6521 * to save state. 6617 * to save state.
6522 */ 6618 */
6523 ret = BEGIN_LP_RING(6); 6619 ret = BEGIN_LP_RING(6);
6524 if (ret) { 6620 if (ret) {
6525 ironlake_disable_rc6(dev); 6621 ironlake_teardown_rc6(dev);
6526 return; 6622 return;
6527 } 6623 }
6624
6528 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); 6625 OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN);
6529 OUT_RING(MI_SET_CONTEXT); 6626 OUT_RING(MI_SET_CONTEXT);
6530 OUT_RING(dev_priv->renderctx->gtt_offset | 6627 OUT_RING(dev_priv->renderctx->gtt_offset |
@@ -6541,6 +6638,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
6541 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); 6638 I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT);
6542} 6639}
6543 6640
6641
6544/* Set up chip specific display functions */ 6642/* Set up chip specific display functions */
6545static void intel_init_display(struct drm_device *dev) 6643static void intel_init_display(struct drm_device *dev)
6546{ 6644{
@@ -6783,21 +6881,9 @@ void intel_modeset_init(struct drm_device *dev)
6783 if (IS_GEN6(dev)) 6881 if (IS_GEN6(dev))
6784 gen6_enable_rps(dev_priv); 6882 gen6_enable_rps(dev_priv);
6785 6883
6786 if (IS_IRONLAKE_M(dev)) { 6884 if (IS_IRONLAKE_M(dev))
6787 dev_priv->renderctx = intel_alloc_context_page(dev);
6788 if (!dev_priv->renderctx)
6789 goto skip_rc6;
6790 dev_priv->pwrctx = intel_alloc_context_page(dev);
6791 if (!dev_priv->pwrctx) {
6792 i915_gem_object_unpin(dev_priv->renderctx);
6793 drm_gem_object_unreference(&dev_priv->renderctx->base);
6794 dev_priv->renderctx = NULL;
6795 goto skip_rc6;
6796 }
6797 ironlake_enable_rc6(dev); 6885 ironlake_enable_rc6(dev);
6798 }
6799 6886
6800skip_rc6:
6801 INIT_WORK(&dev_priv->idle_work, intel_idle_update); 6887 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
6802 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, 6888 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
6803 (unsigned long)dev); 6889 (unsigned long)dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 1f4242b682c8..51cb4e36997f 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1639,6 +1639,24 @@ static int intel_dp_get_modes(struct drm_connector *connector)
1639 return 0; 1639 return 0;
1640} 1640}
1641 1641
1642static bool
1643intel_dp_detect_audio(struct drm_connector *connector)
1644{
1645 struct intel_dp *intel_dp = intel_attached_dp(connector);
1646 struct edid *edid;
1647 bool has_audio = false;
1648
1649 edid = drm_get_edid(connector, &intel_dp->adapter);
1650 if (edid) {
1651 has_audio = drm_detect_monitor_audio(edid);
1652
1653 connector->display_info.raw_edid = NULL;
1654 kfree(edid);
1655 }
1656
1657 return has_audio;
1658}
1659
1642static int 1660static int
1643intel_dp_set_property(struct drm_connector *connector, 1661intel_dp_set_property(struct drm_connector *connector,
1644 struct drm_property *property, 1662 struct drm_property *property,
@@ -1652,17 +1670,23 @@ intel_dp_set_property(struct drm_connector *connector,
1652 return ret; 1670 return ret;
1653 1671
1654 if (property == intel_dp->force_audio_property) { 1672 if (property == intel_dp->force_audio_property) {
1655 if (val == intel_dp->force_audio) 1673 int i = val;
1674 bool has_audio;
1675
1676 if (i == intel_dp->force_audio)
1656 return 0; 1677 return 0;
1657 1678
1658 intel_dp->force_audio = val; 1679 intel_dp->force_audio = i;
1659 1680
1660 if (val > 0 && intel_dp->has_audio) 1681 if (i == 0)
1661 return 0; 1682 has_audio = intel_dp_detect_audio(connector);
1662 if (val < 0 && !intel_dp->has_audio) 1683 else
1684 has_audio = i > 0;
1685
1686 if (has_audio == intel_dp->has_audio)
1663 return 0; 1687 return 0;
1664 1688
1665 intel_dp->has_audio = val > 0; 1689 intel_dp->has_audio = has_audio;
1666 goto done; 1690 goto done;
1667 } 1691 }
1668 1692
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 74db2557d644..2c431049963c 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -298,7 +298,6 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, 298extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
299 u16 *blue, int regno); 299 u16 *blue, int regno);
300extern void intel_enable_clock_gating(struct drm_device *dev); 300extern void intel_enable_clock_gating(struct drm_device *dev);
301extern void intel_disable_clock_gating(struct drm_device *dev);
302extern void ironlake_enable_drps(struct drm_device *dev); 301extern void ironlake_enable_drps(struct drm_device *dev);
303extern void ironlake_disable_drps(struct drm_device *dev); 302extern void ironlake_disable_drps(struct drm_device *dev);
304extern void gen6_enable_rps(struct drm_i915_private *dev_priv); 303extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0d0273e7b029..c635c9e357b9 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -251,6 +251,27 @@ static int intel_hdmi_get_modes(struct drm_connector *connector)
251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter); 251 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
252} 252}
253 253
254static bool
255intel_hdmi_detect_audio(struct drm_connector *connector)
256{
257 struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
258 struct drm_i915_private *dev_priv = connector->dev->dev_private;
259 struct edid *edid;
260 bool has_audio = false;
261
262 edid = drm_get_edid(connector,
263 &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
264 if (edid) {
265 if (edid->input & DRM_EDID_INPUT_DIGITAL)
266 has_audio = drm_detect_monitor_audio(edid);
267
268 connector->display_info.raw_edid = NULL;
269 kfree(edid);
270 }
271
272 return has_audio;
273}
274
254static int 275static int
255intel_hdmi_set_property(struct drm_connector *connector, 276intel_hdmi_set_property(struct drm_connector *connector,
256 struct drm_property *property, 277 struct drm_property *property,
@@ -264,17 +285,23 @@ intel_hdmi_set_property(struct drm_connector *connector,
264 return ret; 285 return ret;
265 286
266 if (property == intel_hdmi->force_audio_property) { 287 if (property == intel_hdmi->force_audio_property) {
267 if (val == intel_hdmi->force_audio) 288 int i = val;
289 bool has_audio;
290
291 if (i == intel_hdmi->force_audio)
268 return 0; 292 return 0;
269 293
270 intel_hdmi->force_audio = val; 294 intel_hdmi->force_audio = i;
271 295
272 if (val > 0 && intel_hdmi->has_audio) 296 if (i == 0)
273 return 0; 297 has_audio = intel_hdmi_detect_audio(connector);
274 if (val < 0 && !intel_hdmi->has_audio) 298 else
299 has_audio = i > 0;
300
301 if (has_audio == intel_hdmi->has_audio)
275 return 0; 302 return 0;
276 303
277 intel_hdmi->has_audio = val > 0; 304 intel_hdmi->has_audio = has_audio;
278 goto done; 305 goto done;
279 } 306 }
280 307
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index ace8d5d30dd2..bcdba7bd5cfa 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -261,12 +261,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
261 return true; 261 return true;
262 } 262 }
263 263
264 /* Make sure pre-965s set dither correctly */
265 if (INTEL_INFO(dev)->gen < 4) {
266 if (dev_priv->lvds_dither)
267 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
268 }
269
270 /* Native modes don't need fitting */ 264 /* Native modes don't need fitting */
271 if (adjusted_mode->hdisplay == mode->hdisplay && 265 if (adjusted_mode->hdisplay == mode->hdisplay &&
272 adjusted_mode->vdisplay == mode->vdisplay) 266 adjusted_mode->vdisplay == mode->vdisplay)
@@ -374,10 +368,16 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
374 } 368 }
375 369
376out: 370out:
371 /* If not enabling scaling, be consistent and always use 0. */
377 if ((pfit_control & PFIT_ENABLE) == 0) { 372 if ((pfit_control & PFIT_ENABLE) == 0) {
378 pfit_control = 0; 373 pfit_control = 0;
379 pfit_pgm_ratios = 0; 374 pfit_pgm_ratios = 0;
380 } 375 }
376
377 /* Make sure pre-965 set dither correctly */
378 if (INTEL_INFO(dev)->gen < 4 && dev_priv->lvds_dither)
379 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
380
381 if (pfit_control != intel_lvds->pfit_control || 381 if (pfit_control != intel_lvds->pfit_control ||
382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) { 382 pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
383 intel_lvds->pfit_control = pfit_control; 383 intel_lvds->pfit_control = pfit_control;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index c65992df458d..f8f86e57df22 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -208,7 +208,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
208 val &= ~1; 208 val &= ~1;
209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); 209 pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
210 val *= lbpc; 210 val *= lbpc;
211 val >>= 1;
212 } 211 }
213 } 212 }
214 213
@@ -235,11 +234,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
235 234
236 if (is_backlight_combination_mode(dev)){ 235 if (is_backlight_combination_mode(dev)){
237 u32 max = intel_panel_get_max_backlight(dev); 236 u32 max = intel_panel_get_max_backlight(dev);
238 u8 lpbc; 237 u8 lbpc;
239 238
240 lpbc = level * 0xfe / max + 1; 239 lbpc = level * 0xfe / max + 1;
241 level /= lpbc; 240 level /= lbpc;
242 pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); 241 pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc);
243 } 242 }
244 243
245 tmp = I915_READ(BLC_PWM_CTL); 244 tmp = I915_READ(BLC_PWM_CTL);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6218fa97aa1e..445f27efe677 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1059,22 +1059,25 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
1059} 1059}
1060 1060
1061static int gen6_ring_flush(struct intel_ring_buffer *ring, 1061static int gen6_ring_flush(struct intel_ring_buffer *ring,
1062 u32 invalidate_domains, 1062 u32 invalidate, u32 flush)
1063 u32 flush_domains)
1064{ 1063{
1064 uint32_t cmd;
1065 int ret; 1065 int ret;
1066 1066
1067 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1067 if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
1068 return 0; 1068 return 0;
1069 1069
1070 ret = intel_ring_begin(ring, 4); 1070 ret = intel_ring_begin(ring, 4);
1071 if (ret) 1071 if (ret)
1072 return ret; 1072 return ret;
1073 1073
1074 intel_ring_emit(ring, MI_FLUSH_DW); 1074 cmd = MI_FLUSH_DW;
1075 intel_ring_emit(ring, 0); 1075 if (invalidate & I915_GEM_GPU_DOMAINS)
1076 cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
1077 intel_ring_emit(ring, cmd);
1076 intel_ring_emit(ring, 0); 1078 intel_ring_emit(ring, 0);
1077 intel_ring_emit(ring, 0); 1079 intel_ring_emit(ring, 0);
1080 intel_ring_emit(ring, MI_NOOP);
1078 intel_ring_advance(ring); 1081 intel_ring_advance(ring);
1079 return 0; 1082 return 0;
1080} 1083}
@@ -1230,22 +1233,25 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
1230} 1233}
1231 1234
1232static int blt_ring_flush(struct intel_ring_buffer *ring, 1235static int blt_ring_flush(struct intel_ring_buffer *ring,
1233 u32 invalidate_domains, 1236 u32 invalidate, u32 flush)
1234 u32 flush_domains)
1235{ 1237{
1238 uint32_t cmd;
1236 int ret; 1239 int ret;
1237 1240
1238 if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 1241 if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0)
1239 return 0; 1242 return 0;
1240 1243
1241 ret = blt_ring_begin(ring, 4); 1244 ret = blt_ring_begin(ring, 4);
1242 if (ret) 1245 if (ret)
1243 return ret; 1246 return ret;
1244 1247
1245 intel_ring_emit(ring, MI_FLUSH_DW); 1248 cmd = MI_FLUSH_DW;
1246 intel_ring_emit(ring, 0); 1249 if (invalidate & I915_GEM_DOMAIN_RENDER)
1250 cmd |= MI_INVALIDATE_TLB;
1251 intel_ring_emit(ring, cmd);
1247 intel_ring_emit(ring, 0); 1252 intel_ring_emit(ring, 0);
1248 intel_ring_emit(ring, 0); 1253 intel_ring_emit(ring, 0);
1254 intel_ring_emit(ring, MI_NOOP);
1249 intel_ring_advance(ring); 1255 intel_ring_advance(ring);
1250 return 0; 1256 return 0;
1251} 1257}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6d6fde85a636..34306865a5df 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -14,22 +14,23 @@ struct intel_hw_status_page {
14 struct drm_i915_gem_object *obj; 14 struct drm_i915_gem_object *obj;
15}; 15};
16 16
17#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) 17#define I915_RING_READ(reg) i915_gt_read(dev_priv, reg)
18#define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val)
18 19
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) 20#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base))
20#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) 21#define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val)
21 22
22#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) 23#define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base))
23#define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) 24#define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val)
24 25
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) 26#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base))
26#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) 27#define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val)
27 28
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) 29#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base))
29#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) 30#define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val)
30 31
31#define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) 32#define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base))
33#define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val)
33 34
34#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) 35#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base))
35#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) 36#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base))
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 6a09c1413d60..7c50cdce84f0 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -46,6 +46,7 @@
46 SDVO_TV_MASK) 46 SDVO_TV_MASK)
47 47
48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK) 48#define IS_TV(c) (c->output_flag & SDVO_TV_MASK)
49#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
49#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK) 50#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
50#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK)) 51#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
51 52
@@ -1359,7 +1360,8 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1359 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); 1360 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1360 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); 1361 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1361 } 1362 }
1362 } 1363 } else
1364 status = connector_status_disconnected;
1363 connector->display_info.raw_edid = NULL; 1365 connector->display_info.raw_edid = NULL;
1364 kfree(edid); 1366 kfree(edid);
1365 } 1367 }
@@ -1407,10 +1409,25 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1407 1409
1408 if ((intel_sdvo_connector->output_flag & response) == 0) 1410 if ((intel_sdvo_connector->output_flag & response) == 0)
1409 ret = connector_status_disconnected; 1411 ret = connector_status_disconnected;
1410 else if (response & SDVO_TMDS_MASK) 1412 else if (IS_TMDS(intel_sdvo_connector))
1411 ret = intel_sdvo_hdmi_sink_detect(connector); 1413 ret = intel_sdvo_hdmi_sink_detect(connector);
1412 else 1414 else {
1413 ret = connector_status_connected; 1415 struct edid *edid;
1416
1417 /* if we have an edid check it matches the connection */
1418 edid = intel_sdvo_get_edid(connector);
1419 if (edid == NULL)
1420 edid = intel_sdvo_get_analog_edid(connector);
1421 if (edid != NULL) {
1422 if (edid->input & DRM_EDID_INPUT_DIGITAL)
1423 ret = connector_status_disconnected;
1424 else
1425 ret = connector_status_connected;
1426 connector->display_info.raw_edid = NULL;
1427 kfree(edid);
1428 } else
1429 ret = connector_status_connected;
1430 }
1414 1431
1415 /* May update encoder flag for like clock for SDVO TV, etc.*/ 1432 /* May update encoder flag for like clock for SDVO TV, etc.*/
1416 if (ret == connector_status_connected) { 1433 if (ret == connector_status_connected) {
@@ -1446,10 +1463,15 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1446 edid = intel_sdvo_get_analog_edid(connector); 1463 edid = intel_sdvo_get_analog_edid(connector);
1447 1464
1448 if (edid != NULL) { 1465 if (edid != NULL) {
1449 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1466 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1467 bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
1468 bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
1469
1470 if (connector_is_digital == monitor_is_digital) {
1450 drm_mode_connector_update_edid_property(connector, edid); 1471 drm_mode_connector_update_edid_property(connector, edid);
1451 drm_add_edid_modes(connector, edid); 1472 drm_add_edid_modes(connector, edid);
1452 } 1473 }
1474
1453 connector->display_info.raw_edid = NULL; 1475 connector->display_info.raw_edid = NULL;
1454 kfree(edid); 1476 kfree(edid);
1455 } 1477 }
@@ -1668,6 +1690,22 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1668 kfree(connector); 1690 kfree(connector);
1669} 1691}
1670 1692
1693static bool intel_sdvo_detect_hdmi_audio(struct drm_connector *connector)
1694{
1695 struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
1696 struct edid *edid;
1697 bool has_audio = false;
1698
1699 if (!intel_sdvo->is_hdmi)
1700 return false;
1701
1702 edid = intel_sdvo_get_edid(connector);
1703 if (edid != NULL && edid->input & DRM_EDID_INPUT_DIGITAL)
1704 has_audio = drm_detect_monitor_audio(edid);
1705
1706 return has_audio;
1707}
1708
1671static int 1709static int
1672intel_sdvo_set_property(struct drm_connector *connector, 1710intel_sdvo_set_property(struct drm_connector *connector,
1673 struct drm_property *property, 1711 struct drm_property *property,
@@ -1684,17 +1722,23 @@ intel_sdvo_set_property(struct drm_connector *connector,
1684 return ret; 1722 return ret;
1685 1723
1686 if (property == intel_sdvo_connector->force_audio_property) { 1724 if (property == intel_sdvo_connector->force_audio_property) {
1687 if (val == intel_sdvo_connector->force_audio) 1725 int i = val;
1726 bool has_audio;
1727
1728 if (i == intel_sdvo_connector->force_audio)
1688 return 0; 1729 return 0;
1689 1730
1690 intel_sdvo_connector->force_audio = val; 1731 intel_sdvo_connector->force_audio = i;
1691 1732
1692 if (val > 0 && intel_sdvo->has_hdmi_audio) 1733 if (i == 0)
1693 return 0; 1734 has_audio = intel_sdvo_detect_hdmi_audio(connector);
1694 if (val < 0 && !intel_sdvo->has_hdmi_audio) 1735 else
1736 has_audio = i > 0;
1737
1738 if (has_audio == intel_sdvo->has_hdmi_audio)
1695 return 0; 1739 return 0;
1696 1740
1697 intel_sdvo->has_hdmi_audio = val > 0; 1741 intel_sdvo->has_hdmi_audio = has_audio;
1698 goto done; 1742 goto done;
1699 } 1743 }
1700 1744
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 93206e4eaa6f..fe4a53a50b83 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1234,7 +1234,8 @@ static const struct drm_display_mode reported_modes[] = {
1234 * \return false if TV is disconnected. 1234 * \return false if TV is disconnected.
1235 */ 1235 */
1236static int 1236static int
1237intel_tv_detect_type (struct intel_tv *intel_tv) 1237intel_tv_detect_type (struct intel_tv *intel_tv,
1238 struct drm_connector *connector)
1238{ 1239{
1239 struct drm_encoder *encoder = &intel_tv->base.base; 1240 struct drm_encoder *encoder = &intel_tv->base.base;
1240 struct drm_device *dev = encoder->dev; 1241 struct drm_device *dev = encoder->dev;
@@ -1245,11 +1246,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1245 int type; 1246 int type;
1246 1247
1247 /* Disable TV interrupts around load detect or we'll recurse */ 1248 /* Disable TV interrupts around load detect or we'll recurse */
1248 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1249 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1249 i915_disable_pipestat(dev_priv, 0, 1250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1250 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1251 i915_disable_pipestat(dev_priv, 0,
1251 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1252 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1252 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1253 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1254 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1255 }
1253 1256
1254 save_tv_dac = tv_dac = I915_READ(TV_DAC); 1257 save_tv_dac = tv_dac = I915_READ(TV_DAC);
1255 save_tv_ctl = tv_ctl = I915_READ(TV_CTL); 1258 save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1302,11 +1305,13 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
1302 I915_WRITE(TV_CTL, save_tv_ctl); 1305 I915_WRITE(TV_CTL, save_tv_ctl);
1303 1306
1304 /* Restore interrupt config */ 1307 /* Restore interrupt config */
1305 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 1308 if (connector->polled & DRM_CONNECTOR_POLL_HPD) {
1306 i915_enable_pipestat(dev_priv, 0, 1309 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1307 PIPE_HOTPLUG_INTERRUPT_ENABLE | 1310 i915_enable_pipestat(dev_priv, 0,
1308 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE); 1311 PIPE_HOTPLUG_INTERRUPT_ENABLE |
1309 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 1312 PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
1313 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1314 }
1310 1315
1311 return type; 1316 return type;
1312} 1317}
@@ -1356,7 +1361,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1356 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V); 1361 drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
1357 1362
1358 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) { 1363 if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
1359 type = intel_tv_detect_type(intel_tv); 1364 type = intel_tv_detect_type(intel_tv, connector);
1360 } else if (force) { 1365 } else if (force) {
1361 struct drm_crtc *crtc; 1366 struct drm_crtc *crtc;
1362 int dpms_mode; 1367 int dpms_mode;
@@ -1364,7 +1369,7 @@ intel_tv_detect(struct drm_connector *connector, bool force)
1364 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector, 1369 crtc = intel_get_load_detect_pipe(&intel_tv->base, connector,
1365 &mode, &dpms_mode); 1370 &mode, &dpms_mode);
1366 if (crtc) { 1371 if (crtc) {
1367 type = intel_tv_detect_type(intel_tv); 1372 type = intel_tv_detect_type(intel_tv, connector);
1368 intel_release_load_detect_pipe(&intel_tv->base, connector, 1373 intel_release_load_detect_pipe(&intel_tv->base, connector,
1369 dpms_mode); 1374 dpms_mode);
1370 } else 1375 } else
@@ -1658,6 +1663,18 @@ intel_tv_init(struct drm_device *dev)
1658 intel_encoder = &intel_tv->base; 1663 intel_encoder = &intel_tv->base;
1659 connector = &intel_connector->base; 1664 connector = &intel_connector->base;
1660 1665
1666 /* The documentation, for the older chipsets at least, recommend
1667 * using a polling method rather than hotplug detection for TVs.
1668 * This is because in order to perform the hotplug detection, the PLLs
1669 * for the TV must be kept alive increasing power drain and starving
1670 * bandwidth from other encoders. Notably for instance, it causes
1671 * pipe underruns on Crestline when this encoder is supposedly idle.
1672 *
1673 * More recent chipsets favour HDMI rather than integrated S-Video.
1674 */
1675 connector->polled =
1676 DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
1677
1661 drm_connector_init(dev, connector, &intel_tv_connector_funcs, 1678 drm_connector_init(dev, connector, &intel_tv_connector_funcs,
1662 DRM_MODE_CONNECTOR_SVIDEO); 1679 DRM_MODE_CONNECTOR_SVIDEO);
1663 1680
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 49e5e99917e2..6bdab891c64e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
6228 entry->tvconf.has_component_output = false; 6228 entry->tvconf.has_component_output = false;
6229 break; 6229 break;
6230 case OUTPUT_LVDS: 6230 case OUTPUT_LVDS:
6231 if ((conn & 0x00003f00) != 0x10) 6231 if ((conn & 0x00003f00) >> 8 != 0x10)
6232 entry->lvdsconf.use_straps_for_mode = true; 6232 entry->lvdsconf.use_straps_for_mode = true;
6233 entry->lvdsconf.use_power_scripts = true; 6233 entry->lvdsconf.use_power_scripts = true;
6234 break; 6234 break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index a7fae26f4654..a52184007f5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 nouveau_vm_put(&nvbo->vma); 52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
53 kfree(nvbo); 56 kfree(nvbo);
54} 57}
55 58
@@ -128,6 +131,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
128 } 131 }
129 } 132 }
130 133
134 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
131 nouveau_bo_placement_set(nvbo, flags, 0); 135 nouveau_bo_placement_set(nvbo, flags, 0);
132 136
133 nvbo->channel = chan; 137 nvbo->channel = chan;
@@ -166,17 +170,17 @@ static void
166set_placement_range(struct nouveau_bo *nvbo, uint32_t type) 170set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
167{ 171{
168 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 172 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
173 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
169 174
170 if (dev_priv->card_type == NV_10 && 175 if (dev_priv->card_type == NV_10 &&
171 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { 176 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
177 nvbo->bo.mem.num_pages < vram_pages / 2) {
172 /* 178 /*
173 * Make sure that the color and depth buffers are handled 179 * Make sure that the color and depth buffers are handled
174 * by independent memory controller units. Up to a 9x 180 * by independent memory controller units. Up to a 9x
175 * speed up when alpha-blending and depth-test are enabled 181 * speed up when alpha-blending and depth-test are enabled
176 * at the same time. 182 * at the same time.
177 */ 183 */
178 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
179
180 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { 184 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
181 nvbo->placement.fpfn = vram_pages / 2; 185 nvbo->placement.fpfn = vram_pages / 2;
182 nvbo->placement.lpfn = ~0; 186 nvbo->placement.lpfn = ~0;
@@ -785,7 +789,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
785 if (ret) 789 if (ret)
786 goto out; 790 goto out;
787 791
788 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 792 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
789out: 793out:
790 ttm_bo_mem_put(bo, &tmp_mem); 794 ttm_bo_mem_put(bo, &tmp_mem);
791 return ret; 795 return ret;
@@ -811,11 +815,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
811 if (ret) 815 if (ret)
812 return ret; 816 return ret;
813 817
814 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); 818 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
815 if (ret) 819 if (ret)
816 goto out; 820 goto out;
817 821
818 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 822 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
819 if (ret) 823 if (ret)
820 goto out; 824 goto out;
821 825
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index a21e00076839..390d82c3c4b0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector)
507 int high_w = 0, high_h = 0, high_v = 0; 507 int high_w = 0, high_h = 0, high_v = 0;
508 508
509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { 509 list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
510 mode->vrefresh = drm_mode_vrefresh(mode);
510 if (helper->mode_valid(connector, mode) != MODE_OK || 511 if (helper->mode_valid(connector, mode) != MODE_OK ||
511 (mode->flags & DRM_MODE_FLAG_INTERLACE)) 512 (mode->flags & DRM_MODE_FLAG_INTERLACE))
512 continue; 513 continue;
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 65699bfaaaea..b368ed74aad7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan)
83 return ret; 83 return ret;
84 84
85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ 85 /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); 86 ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000,
87 &chan->m2mf_ntfy);
87 if (ret) 88 if (ret)
88 return ret; 89 return ret;
89 90
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 9821fcacc3d2..982d70b12722 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager;
852extern int nouveau_notifier_init_channel(struct nouveau_channel *); 852extern int nouveau_notifier_init_channel(struct nouveau_channel *);
853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); 853extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
854extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, 854extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
855 int cout, uint32_t *offset); 855 int cout, uint32_t start, uint32_t end,
856 uint32_t *offset);
856extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); 857extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *);
857extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, 858extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
858 struct drm_file *); 859 struct drm_file *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 26347b7cd872..b0fb9bdcddb7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, 725 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
726 mem->page_alignment << PAGE_SHIFT, size_nc, 726 mem->page_alignment << PAGE_SHIFT, size_nc,
727 (nvbo->tile_flags >> 8) & 0xff, &node); 727 (nvbo->tile_flags >> 8) & 0xff, &node);
728 if (ret) 728 if (ret) {
729 return ret; 729 mem->mm_node = NULL;
730 return (ret == -ENOSPC) ? 0 : ret;
731 }
730 732
731 node->page_shift = 12; 733 node->page_shift = 12;
732 if (nvbo->vma.node) 734 if (nvbo->vma.node)
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c
index 8844b50c3e54..7609756b6faf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mm.c
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
123 return 0; 123 return 0;
124 } 124 }
125 125
126 return -ENOMEM; 126 return -ENOSPC;
127} 127}
128 128
129int 129int
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index fe29d604b820..5ea167623a82 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
96 96
97int 97int
98nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, 98nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
99 int size, uint32_t *b_offset) 99 int size, uint32_t start, uint32_t end,
100 uint32_t *b_offset)
100{ 101{
101 struct drm_device *dev = chan->dev; 102 struct drm_device *dev = chan->dev;
102 struct nouveau_gpuobj *nobj = NULL; 103 struct nouveau_gpuobj *nobj = NULL;
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
104 uint32_t offset; 105 uint32_t offset;
105 int target, ret; 106 int target, ret;
106 107
107 mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); 108 mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
109 start, end, 0);
108 if (mem) 110 if (mem)
109 mem = drm_mm_get_block(mem, size, 0); 111 mem = drm_mm_get_block_range(mem, size, 0, start, end);
110 if (!mem) { 112 if (!mem) {
111 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); 113 NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
112 return -ENOMEM; 114 return -ENOMEM;
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
177 if (IS_ERR(chan)) 179 if (IS_ERR(chan))
178 return PTR_ERR(chan); 180 return PTR_ERR(chan);
179 181
180 ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); 182 ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000,
183 &na->offset);
181 nouveau_channel_put(&chan); 184 nouveau_channel_put(&chan);
182 return ret; 185 return ret;
183} 186}
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c
index f05c0cddfeca..4399e2f34db4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_pm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_pm.c
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev)
543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm; 543 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
544 struct nouveau_pm_level *perflvl; 544 struct nouveau_pm_level *perflvl;
545 545
546 if (pm->cur == &pm->boot) 546 if (!pm->cur || pm->cur == &pm->boot)
547 return; 547 return;
548 548
549 perflvl = pm->cur; 549 perflvl = pm->cur;
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index ef23550407b5..c82db37d9f41 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
342 if (nv_encoder->dcb->type == OUTPUT_LVDS) { 342 if (nv_encoder->dcb->type == OUTPUT_LVDS) {
343 bool duallink, dummy; 343 bool duallink, dummy;
344 344
345 nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> 345 nouveau_bios_parse_lvds_table(dev, output_mode->clock,
346 clock, &duallink, &dummy); 346 &duallink, &dummy);
347 if (duallink) 347 if (duallink)
348 regp->fp_control |= (8 << 28); 348 regp->fp_control |= (8 << 28);
349 } else 349 } else
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
518 return; 518 return;
519 519
520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) { 520 if (nv_encoder->dcb->lvdsconf.use_power_scripts) {
521 struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder);
522
523 /* when removing an output, crtc may not be set, but PANEL_OFF 521 /* when removing an output, crtc may not be set, but PANEL_OFF
524 * must still be run 522 * must still be run
525 */ 523 */
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode)
527 nv04_dfp_get_bound_head(dev, nv_encoder->dcb); 525 nv04_dfp_get_bound_head(dev, nv_encoder->dcb);
528 526
529 if (mode == DRM_MODE_DPMS_ON) { 527 if (mode == DRM_MODE_DPMS_ON) {
530 if (!nv_connector->native_mode) {
531 NV_ERROR(dev, "Not turning on LVDS without native mode\n");
532 return;
533 }
534 call_lvds_script(dev, nv_encoder->dcb, head, 528 call_lvds_script(dev, nv_encoder->dcb, head,
535 LVDS_PANEL_ON, nv_connector->native_mode->clock); 529 LVDS_PANEL_ON, nv_encoder->mode.clock);
536 } else 530 } else
537 /* pxclk of 0 is fine for PANEL_OFF, and for a 531 /* pxclk of 0 is fine for PANEL_OFF, and for a
538 * disconnected LVDS encoder there is no native_mode 532 * disconnected LVDS encoder there is no native_mode
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c
index 8870d72388c8..18d30c2c1aa6 100644
--- a/drivers/gpu/drm/nouveau/nv40_graph.c
+++ b/drivers/gpu/drm/nouveau/nv40_graph.c
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; 211 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
212 212
213 switch (dev_priv->chipset) { 213 switch (dev_priv->chipset) {
214 case 0x40:
215 case 0x41: /* guess */
216 case 0x42:
217 case 0x43:
218 case 0x45: /* guess */
219 case 0x4e:
220 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
221 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
222 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
223 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
224 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
225 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
226 break;
214 case 0x44: 227 case 0x44:
215 case 0x4a: 228 case 0x4a:
216 case 0x4e:
217 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); 229 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
218 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); 230 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
219 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); 231 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
220 break; 232 break;
221
222 case 0x46: 233 case 0x46:
223 case 0x47: 234 case 0x47:
224 case 0x49: 235 case 0x49:
225 case 0x4b: 236 case 0x4b:
237 case 0x4c:
238 case 0x67:
239 default:
226 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); 240 nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch);
227 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); 241 nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit);
228 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); 242 nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr);
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i)
230 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); 244 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
231 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); 245 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
232 break; 246 break;
233
234 default:
235 nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch);
236 nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit);
237 nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr);
238 nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch);
239 nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit);
240 nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr);
241 break;
242 } 247 }
243} 248}
244 249
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev)
396 break; 401 break;
397 default: 402 default:
398 switch (dev_priv->chipset) { 403 switch (dev_priv->chipset) {
399 case 0x46: 404 case 0x41:
400 case 0x47: 405 case 0x42:
401 case 0x49: 406 case 0x43:
402 case 0x4b: 407 case 0x45:
403 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); 408 case 0x4e:
404 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); 409 case 0x44:
405 break; 410 case 0x4a:
406 default:
407 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); 411 nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0));
408 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); 412 nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1));
409 break; 413 break;
414 default:
415 nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0));
416 nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1));
417 break;
410 } 418 }
411 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); 419 nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0));
412 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); 420 nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1));
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index ea0041810ae3..e57caa2a00e3 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj)
403void 403void
404nv50_instmem_flush(struct drm_device *dev) 404nv50_instmem_flush(struct drm_device *dev)
405{ 405{
406 struct drm_nouveau_private *dev_priv = dev->dev_private;
407
408 spin_lock(&dev_priv->ramin_lock);
406 nv_wr32(dev, 0x00330c, 0x00000001); 409 nv_wr32(dev, 0x00330c, 0x00000001);
407 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) 410 if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000))
408 NV_ERROR(dev, "PRAMIN flush timeout\n"); 411 NV_ERROR(dev, "PRAMIN flush timeout\n");
412 spin_unlock(&dev_priv->ramin_lock);
409} 413}
410 414
411void 415void
412nv84_instmem_flush(struct drm_device *dev) 416nv84_instmem_flush(struct drm_device *dev)
413{ 417{
418 struct drm_nouveau_private *dev_priv = dev->dev_private;
419
420 spin_lock(&dev_priv->ramin_lock);
414 nv_wr32(dev, 0x070000, 0x00000001); 421 nv_wr32(dev, 0x070000, 0x00000001);
415 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) 422 if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
416 NV_ERROR(dev, "PRAMIN flush timeout\n"); 423 NV_ERROR(dev, "PRAMIN flush timeout\n");
424 spin_unlock(&dev_priv->ramin_lock);
417} 425}
418 426
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c
index 459ff08241e5..6144156f255a 100644
--- a/drivers/gpu/drm/nouveau/nv50_vm.c
+++ b/drivers/gpu/drm/nouveau/nv50_vm.c
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm)
169void 169void
170nv50_vm_flush_engine(struct drm_device *dev, int engine) 170nv50_vm_flush_engine(struct drm_device *dev, int engine)
171{ 171{
172 struct drm_nouveau_private *dev_priv = dev->dev_private;
173
174 spin_lock(&dev_priv->ramin_lock);
172 nv_wr32(dev, 0x100c80, (engine << 16) | 1); 175 nv_wr32(dev, 0x100c80, (engine << 16) | 1);
173 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) 176 if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000))
174 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); 177 NV_ERROR(dev, "vm flush timeout: engine %d\n", engine);
178 spin_unlock(&dev_priv->ramin_lock);
175} 179}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index b1537000a104..a4e5e53e0a62 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -48,29 +48,29 @@ static void atombios_overscan_setup(struct drm_crtc *crtc,
48 48
49 switch (radeon_crtc->rmx_type) { 49 switch (radeon_crtc->rmx_type) {
50 case RMX_CENTER: 50 case RMX_CENTER:
51 args.usOverscanTop = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 51 args.usOverscanTop = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
52 args.usOverscanBottom = (adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2; 52 args.usOverscanBottom = cpu_to_le16((adjusted_mode->crtc_vdisplay - mode->crtc_vdisplay) / 2);
53 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 53 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
54 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2; 54 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - mode->crtc_hdisplay) / 2);
55 break; 55 break;
56 case RMX_ASPECT: 56 case RMX_ASPECT:
57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay; 57 a1 = mode->crtc_vdisplay * adjusted_mode->crtc_hdisplay;
58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay; 58 a2 = adjusted_mode->crtc_vdisplay * mode->crtc_hdisplay;
59 59
60 if (a1 > a2) { 60 if (a1 > a2) {
61 args.usOverscanLeft = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 61 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
62 args.usOverscanRight = (adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2; 62 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_hdisplay - (a2 / mode->crtc_vdisplay)) / 2);
63 } else if (a2 > a1) { 63 } else if (a2 > a1) {
64 args.usOverscanLeft = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 64 args.usOverscanLeft = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
65 args.usOverscanRight = (adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2; 65 args.usOverscanRight = cpu_to_le16((adjusted_mode->crtc_vdisplay - (a1 / mode->crtc_hdisplay)) / 2);
66 } 66 }
67 break; 67 break;
68 case RMX_FULL: 68 case RMX_FULL:
69 default: 69 default:
70 args.usOverscanRight = radeon_crtc->h_border; 70 args.usOverscanRight = cpu_to_le16(radeon_crtc->h_border);
71 args.usOverscanLeft = radeon_crtc->h_border; 71 args.usOverscanLeft = cpu_to_le16(radeon_crtc->h_border);
72 args.usOverscanBottom = radeon_crtc->v_border; 72 args.usOverscanBottom = cpu_to_le16(radeon_crtc->v_border);
73 args.usOverscanTop = radeon_crtc->v_border; 73 args.usOverscanTop = cpu_to_le16(radeon_crtc->v_border);
74 break; 74 break;
75 } 75 }
76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 76 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
@@ -419,23 +419,23 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
419 memset(&args, 0, sizeof(args)); 419 memset(&args, 0, sizeof(args));
420 420
421 if (ASIC_IS_DCE5(rdev)) { 421 if (ASIC_IS_DCE5(rdev)) {
422 args.v3.usSpreadSpectrumAmountFrac = 0; 422 args.v3.usSpreadSpectrumAmountFrac = cpu_to_le16(0);
423 args.v3.ucSpreadSpectrumType = ss->type; 423 args.v3.ucSpreadSpectrumType = ss->type;
424 switch (pll_id) { 424 switch (pll_id) {
425 case ATOM_PPLL1: 425 case ATOM_PPLL1:
426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL; 426 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P1PLL;
427 args.v3.usSpreadSpectrumAmount = ss->amount; 427 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
428 args.v3.usSpreadSpectrumStep = ss->step; 428 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
429 break; 429 break;
430 case ATOM_PPLL2: 430 case ATOM_PPLL2:
431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL; 431 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_P2PLL;
432 args.v3.usSpreadSpectrumAmount = ss->amount; 432 args.v3.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
433 args.v3.usSpreadSpectrumStep = ss->step; 433 args.v3.usSpreadSpectrumStep = cpu_to_le16(ss->step);
434 break; 434 break;
435 case ATOM_DCPLL: 435 case ATOM_DCPLL:
436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL; 436 args.v3.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V3_DCPLL;
437 args.v3.usSpreadSpectrumAmount = 0; 437 args.v3.usSpreadSpectrumAmount = cpu_to_le16(0);
438 args.v3.usSpreadSpectrumStep = 0; 438 args.v3.usSpreadSpectrumStep = cpu_to_le16(0);
439 break; 439 break;
440 case ATOM_PPLL_INVALID: 440 case ATOM_PPLL_INVALID:
441 return; 441 return;
@@ -447,18 +447,18 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
447 switch (pll_id) { 447 switch (pll_id) {
448 case ATOM_PPLL1: 448 case ATOM_PPLL1:
449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL; 449 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P1PLL;
450 args.v2.usSpreadSpectrumAmount = ss->amount; 450 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
451 args.v2.usSpreadSpectrumStep = ss->step; 451 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
452 break; 452 break;
453 case ATOM_PPLL2: 453 case ATOM_PPLL2:
454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL; 454 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_P2PLL;
455 args.v2.usSpreadSpectrumAmount = ss->amount; 455 args.v2.usSpreadSpectrumAmount = cpu_to_le16(ss->amount);
456 args.v2.usSpreadSpectrumStep = ss->step; 456 args.v2.usSpreadSpectrumStep = cpu_to_le16(ss->step);
457 break; 457 break;
458 case ATOM_DCPLL: 458 case ATOM_DCPLL:
459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL; 459 args.v2.ucSpreadSpectrumType |= ATOM_PPLL_SS_TYPE_V2_DCPLL;
460 args.v2.usSpreadSpectrumAmount = 0; 460 args.v2.usSpreadSpectrumAmount = cpu_to_le16(0);
461 args.v2.usSpreadSpectrumStep = 0; 461 args.v2.usSpreadSpectrumStep = cpu_to_le16(0);
462 break; 462 break;
463 case ATOM_PPLL_INVALID: 463 case ATOM_PPLL_INVALID:
464 return; 464 return;
@@ -538,7 +538,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV; 538 pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
539 else 539 else
540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV; 540 pll->flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
541
542 } 541 }
543 542
544 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 543 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -555,29 +554,28 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
555 dp_clock = dig_connector->dp_clock; 554 dp_clock = dig_connector->dp_clock;
556 } 555 }
557 } 556 }
558/* this might work properly with the new pll algo */ 557
559#if 0 /* doesn't work properly on some laptops */
560 /* use recommended ref_div for ss */ 558 /* use recommended ref_div for ss */
561 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { 559 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
562 if (ss_enabled) { 560 if (ss_enabled) {
563 if (ss->refdiv) { 561 if (ss->refdiv) {
562 pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
564 pll->flags |= RADEON_PLL_USE_REF_DIV; 563 pll->flags |= RADEON_PLL_USE_REF_DIV;
565 pll->reference_div = ss->refdiv; 564 pll->reference_div = ss->refdiv;
565 if (ASIC_IS_AVIVO(rdev))
566 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
566 } 567 }
567 } 568 }
568 } 569 }
569#endif 570
570 if (ASIC_IS_AVIVO(rdev)) { 571 if (ASIC_IS_AVIVO(rdev)) {
571 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */ 572 /* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
572 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1) 573 if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
573 adjusted_clock = mode->clock * 2; 574 adjusted_clock = mode->clock * 2;
574 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 575 if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))
575 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER; 576 pll->flags |= RADEON_PLL_PREFER_CLOSEST_LOWER;
576 /* rv515 needs more testing with this option */ 577 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
577 if (rdev->family != CHIP_RV515) { 578 pll->flags |= RADEON_PLL_IS_LCD;
578 if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT))
579 pll->flags |= RADEON_PLL_IS_LCD;
580 }
581 } else { 579 } else {
582 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC) 580 if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
583 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV; 581 pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -664,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
664 index, (uint32_t *)&args); 662 index, (uint32_t *)&args);
665 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; 663 adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
666 if (args.v3.sOutput.ucRefDiv) { 664 if (args.v3.sOutput.ucRefDiv) {
665 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
667 pll->flags |= RADEON_PLL_USE_REF_DIV; 666 pll->flags |= RADEON_PLL_USE_REF_DIV;
668 pll->reference_div = args.v3.sOutput.ucRefDiv; 667 pll->reference_div = args.v3.sOutput.ucRefDiv;
669 } 668 }
670 if (args.v3.sOutput.ucPostDiv) { 669 if (args.v3.sOutput.ucPostDiv) {
670 pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV;
671 pll->flags |= RADEON_PLL_USE_POST_DIV; 671 pll->flags |= RADEON_PLL_USE_POST_DIV;
672 pll->post_div = args.v3.sOutput.ucPostDiv; 672 pll->post_div = args.v3.sOutput.ucPostDiv;
673 } 673 }
@@ -721,14 +721,14 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
721 * SetPixelClock provides the dividers 721 * SetPixelClock provides the dividers
722 */ 722 */
723 args.v5.ucCRTC = ATOM_CRTC_INVALID; 723 args.v5.ucCRTC = ATOM_CRTC_INVALID;
724 args.v5.usPixelClock = dispclk; 724 args.v5.usPixelClock = cpu_to_le16(dispclk);
725 args.v5.ucPpll = ATOM_DCPLL; 725 args.v5.ucPpll = ATOM_DCPLL;
726 break; 726 break;
727 case 6: 727 case 6:
728 /* if the default dcpll clock is specified, 728 /* if the default dcpll clock is specified,
729 * SetPixelClock provides the dividers 729 * SetPixelClock provides the dividers
730 */ 730 */
731 args.v6.ulDispEngClkFreq = dispclk; 731 args.v6.ulDispEngClkFreq = cpu_to_le32(dispclk);
732 args.v6.ucPpll = ATOM_DCPLL; 732 args.v6.ucPpll = ATOM_DCPLL;
733 break; 733 break;
734 default: 734 default:
@@ -957,11 +957,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
957 /* adjust pixel clock as needed */ 957 /* adjust pixel clock as needed */
958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss);
959 959
960 /* rv515 seems happier with the old algo */ 960 if (ASIC_IS_AVIVO(rdev))
961 if (rdev->family == CHIP_RV515)
962 radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
963 &ref_div, &post_div);
964 else if (ASIC_IS_AVIVO(rdev))
965 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 961 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
966 &ref_div, &post_div); 962 &ref_div, &post_div);
967 else 963 else
@@ -995,9 +991,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
995 } 991 }
996} 992}
997 993
998static int evergreen_crtc_do_set_base(struct drm_crtc *crtc, 994static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
999 struct drm_framebuffer *fb, 995 struct drm_framebuffer *fb,
1000 int x, int y, int atomic) 996 int x, int y, int atomic)
1001{ 997{
1002 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc); 998 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1003 struct drm_device *dev = crtc->dev; 999 struct drm_device *dev = crtc->dev;
@@ -1137,12 +1133,6 @@ static int evergreen_crtc_do_set_base(struct drm_crtc *crtc,
1137 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1133 WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1138 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1134 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1139 1135
1140 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1141 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1142 EVERGREEN_INTERLEAVE_EN);
1143 else
1144 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1145
1146 if (!atomic && fb && fb != crtc->fb) { 1136 if (!atomic && fb && fb != crtc->fb) {
1147 radeon_fb = to_radeon_framebuffer(fb); 1137 radeon_fb = to_radeon_framebuffer(fb);
1148 rbo = radeon_fb->obj->driver_private; 1138 rbo = radeon_fb->obj->driver_private;
@@ -1300,12 +1290,6 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1300 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset, 1290 WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
1301 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay); 1291 (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
1302 1292
1303 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
1304 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1305 AVIVO_D1MODE_INTERLEAVE_EN);
1306 else
1307 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1308
1309 if (!atomic && fb && fb != crtc->fb) { 1293 if (!atomic && fb && fb != crtc->fb) {
1310 radeon_fb = to_radeon_framebuffer(fb); 1294 radeon_fb = to_radeon_framebuffer(fb);
1311 rbo = radeon_fb->obj->driver_private; 1295 rbo = radeon_fb->obj->driver_private;
@@ -1329,7 +1313,7 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
1329 struct radeon_device *rdev = dev->dev_private; 1313 struct radeon_device *rdev = dev->dev_private;
1330 1314
1331 if (ASIC_IS_DCE4(rdev)) 1315 if (ASIC_IS_DCE4(rdev))
1332 return evergreen_crtc_do_set_base(crtc, old_fb, x, y, 0); 1316 return dce4_crtc_do_set_base(crtc, old_fb, x, y, 0);
1333 else if (ASIC_IS_AVIVO(rdev)) 1317 else if (ASIC_IS_AVIVO(rdev))
1334 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0); 1318 return avivo_crtc_do_set_base(crtc, old_fb, x, y, 0);
1335 else 1319 else
@@ -1344,7 +1328,7 @@ int atombios_crtc_set_base_atomic(struct drm_crtc *crtc,
1344 struct radeon_device *rdev = dev->dev_private; 1328 struct radeon_device *rdev = dev->dev_private;
1345 1329
1346 if (ASIC_IS_DCE4(rdev)) 1330 if (ASIC_IS_DCE4(rdev))
1347 return evergreen_crtc_do_set_base(crtc, fb, x, y, 1); 1331 return dce4_crtc_do_set_base(crtc, fb, x, y, 1);
1348 else if (ASIC_IS_AVIVO(rdev)) 1332 else if (ASIC_IS_AVIVO(rdev))
1349 return avivo_crtc_do_set_base(crtc, fb, x, y, 1); 1333 return avivo_crtc_do_set_base(crtc, fb, x, y, 1);
1350 else 1334 else
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index ffdc8332b76e..6140ea1de45a 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1192,7 +1192,11 @@ void evergreen_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
1192 radeon_ring_write(rdev, 1); 1192 radeon_ring_write(rdev, 1);
1193 /* FIXME: implement */ 1193 /* FIXME: implement */
1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 1194 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
1195 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); 1195 radeon_ring_write(rdev,
1196#ifdef __BIG_ENDIAN
1197 (2 << 0) |
1198#endif
1199 (ib->gpu_addr & 0xFFFFFFFC));
1196 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 1200 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
1197 radeon_ring_write(rdev, ib->length_dw); 1201 radeon_ring_write(rdev, ib->length_dw);
1198} 1202}
@@ -1207,7 +1211,11 @@ static int evergreen_cp_load_microcode(struct radeon_device *rdev)
1207 return -EINVAL; 1211 return -EINVAL;
1208 1212
1209 r700_cp_stop(rdev); 1213 r700_cp_stop(rdev);
1210 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 1214 WREG32(CP_RB_CNTL,
1215#ifdef __BIG_ENDIAN
1216 BUF_SWAP_32BIT |
1217#endif
1218 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
1211 1219
1212 fw_data = (const __be32 *)rdev->pfp_fw->data; 1220 fw_data = (const __be32 *)rdev->pfp_fw->data;
1213 WREG32(CP_PFP_UCODE_ADDR, 0); 1221 WREG32(CP_PFP_UCODE_ADDR, 0);
@@ -1326,7 +1334,11 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1326 WREG32(CP_RB_WPTR, 0); 1334 WREG32(CP_RB_WPTR, 0);
1327 1335
1328 /* set the wb address wether it's enabled or not */ 1336 /* set the wb address wether it's enabled or not */
1329 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 1337 WREG32(CP_RB_RPTR_ADDR,
1338#ifdef __BIG_ENDIAN
1339 RB_RPTR_SWAP(2) |
1340#endif
1341 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
1330 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 1342 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
1331 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 1343 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
1332 1344
@@ -2182,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev)
2182 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 2194 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
2183 } 2195 }
2184 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2196 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2185 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2186 r700_vram_gtt_location(rdev, &rdev->mc); 2197 r700_vram_gtt_location(rdev, &rdev->mc);
2187 radeon_update_bandwidth_info(rdev); 2198 radeon_update_bandwidth_info(rdev);
2188 2199
@@ -2627,8 +2638,8 @@ restart_ih:
2627 while (rptr != wptr) { 2638 while (rptr != wptr) {
2628 /* wptr/rptr are in bytes! */ 2639 /* wptr/rptr are in bytes! */
2629 ring_index = rptr / 4; 2640 ring_index = rptr / 4;
2630 src_id = rdev->ih.ring[ring_index] & 0xff; 2641 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
2631 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 2642 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
2632 2643
2633 switch (src_id) { 2644 switch (src_id) {
2634 case 1: /* D1 vblank/vline */ 2645 case 1: /* D1 vblank/vline */
@@ -2922,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev)
2922 /* XXX: ontario has problems blitting to gart at the moment */ 2933 /* XXX: ontario has problems blitting to gart at the moment */
2923 if (rdev->family == CHIP_PALM) { 2934 if (rdev->family == CHIP_PALM) {
2924 rdev->asic->copy = NULL; 2935 rdev->asic->copy = NULL;
2925 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 2936 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
2926 } 2937 }
2927 2938
2928 /* allocate wb buffer */ 2939 /* allocate wb buffer */
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
index a1ba4b3053d0..2be698e78ff2 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c
@@ -55,7 +55,7 @@ set_render_target(struct radeon_device *rdev, int format,
55 if (h < 8) 55 if (h < 8)
56 h = 8; 56 h = 8;
57 57
58 cb_color_info = ((format << 2) | (1 << 24)); 58 cb_color_info = ((format << 2) | (1 << 24) | (1 << 8));
59 pitch = (w / 8) - 1; 59 pitch = (w / 8) - 1;
60 slice = ((w * h) / 64) - 1; 60 slice = ((w * h) / 64) - 1;
61 61
@@ -133,6 +133,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
133 133
134 /* high addr, stride */ 134 /* high addr, stride */
135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 135 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
136#ifdef __BIG_ENDIAN
137 sq_vtx_constant_word2 |= (2 << 30);
138#endif
136 /* xyzw swizzles */ 139 /* xyzw swizzles */
137 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12); 140 sq_vtx_constant_word3 = (0 << 3) | (1 << 6) | (2 << 9) | (3 << 12);
138 141
@@ -173,7 +176,7 @@ set_tex_resource(struct radeon_device *rdev,
173 sq_tex_resource_word0 = (1 << 0); /* 2D */ 176 sq_tex_resource_word0 = (1 << 0); /* 2D */
174 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) | 177 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 6) |
175 ((w - 1) << 18)); 178 ((w - 1) << 18));
176 sq_tex_resource_word1 = ((h - 1) << 0); 179 sq_tex_resource_word1 = ((h - 1) << 0) | (1 << 28);
177 /* xyzw swizzles */ 180 /* xyzw swizzles */
178 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25); 181 sq_tex_resource_word4 = (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25);
179 182
@@ -221,7 +224,11 @@ draw_auto(struct radeon_device *rdev)
221 radeon_ring_write(rdev, DI_PT_RECTLIST); 224 radeon_ring_write(rdev, DI_PT_RECTLIST);
222 225
223 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 226 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
224 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 227 radeon_ring_write(rdev,
228#ifdef __BIG_ENDIAN
229 (2 << 2) |
230#endif
231 DI_INDEX_SIZE_16_BIT);
225 232
226 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 233 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
227 radeon_ring_write(rdev, 1); 234 radeon_ring_write(rdev, 1);
@@ -541,7 +548,7 @@ static inline uint32_t i2f(uint32_t input)
541int evergreen_blit_init(struct radeon_device *rdev) 548int evergreen_blit_init(struct radeon_device *rdev)
542{ 549{
543 u32 obj_size; 550 u32 obj_size;
544 int r, dwords; 551 int i, r, dwords;
545 void *ptr; 552 void *ptr;
546 u32 packet2s[16]; 553 u32 packet2s[16];
547 int num_packet2s = 0; 554 int num_packet2s = 0;
@@ -557,7 +564,7 @@ int evergreen_blit_init(struct radeon_device *rdev)
557 564
558 dwords = rdev->r600_blit.state_len; 565 dwords = rdev->r600_blit.state_len;
559 while (dwords & 0xf) { 566 while (dwords & 0xf) {
560 packet2s[num_packet2s++] = PACKET2(0); 567 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
561 dwords++; 568 dwords++;
562 } 569 }
563 570
@@ -598,8 +605,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
598 if (num_packet2s) 605 if (num_packet2s)
599 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 606 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
600 packet2s, num_packet2s * 4); 607 packet2s, num_packet2s * 4);
601 memcpy(ptr + rdev->r600_blit.vs_offset, evergreen_vs, evergreen_vs_size * 4); 608 for (i = 0; i < evergreen_vs_size; i++)
602 memcpy(ptr + rdev->r600_blit.ps_offset, evergreen_ps, evergreen_ps_size * 4); 609 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
610 for (i = 0; i < evergreen_ps_size; i++)
611 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
603 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 612 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
604 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 613 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
605 614
@@ -614,7 +623,7 @@ done:
614 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 623 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
615 return r; 624 return r;
616 } 625 }
617 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 626 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
618 return 0; 627 return 0;
619} 628}
620 629
@@ -622,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev)
622{ 631{
623 int r; 632 int r;
624 633
625 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 634 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
626 if (rdev->r600_blit.shader_obj == NULL) 635 if (rdev->r600_blit.shader_obj == NULL)
627 return; 636 return;
628 /* If we can't reserve the bo, unref should be enough to destroy 637 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
index ef1d28c07fbf..3a10399e0066 100644
--- a/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/evergreen_blit_shaders.c
@@ -311,11 +311,19 @@ const u32 evergreen_vs[] =
311 0x00000000, 311 0x00000000,
312 0x3c000000, 312 0x3c000000,
313 0x67961001, 313 0x67961001,
314#ifdef __BIG_ENDIAN
315 0x000a0000,
316#else
314 0x00080000, 317 0x00080000,
318#endif
315 0x00000000, 319 0x00000000,
316 0x1c000000, 320 0x1c000000,
317 0x67961000, 321 0x67961000,
322#ifdef __BIG_ENDIAN
323 0x00020008,
324#else
318 0x00000008, 325 0x00000008,
326#endif
319 0x00000000, 327 0x00000000,
320}; 328};
321 329
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index afec1aca2a73..eb4acf4528ff 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -98,6 +98,7 @@
98#define BUF_SWAP_32BIT (2 << 16) 98#define BUF_SWAP_32BIT (2 << 16)
99#define CP_RB_RPTR 0x8700 99#define CP_RB_RPTR 0x8700
100#define CP_RB_RPTR_ADDR 0xC10C 100#define CP_RB_RPTR_ADDR 0xC10C
101#define RB_RPTR_SWAP(x) ((x) << 0)
101#define CP_RB_RPTR_ADDR_HI 0xC110 102#define CP_RB_RPTR_ADDR_HI 0xC110
102#define CP_RB_RPTR_WR 0xC108 103#define CP_RB_RPTR_WR 0xC108
103#define CP_RB_WPTR 0xC114 104#define CP_RB_WPTR 0xC114
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 607241c6a8a9..5a82b6b75849 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -673,8 +673,10 @@ static int parser_auth(struct table *t, const char *filename)
673 last_reg = strtol(last_reg_s, NULL, 16); 673 last_reg = strtol(last_reg_s, NULL, 16);
674 674
675 do { 675 do {
676 if (fgets(buf, 1024, file) == NULL) 676 if (fgets(buf, 1024, file) == NULL) {
677 fclose(file);
677 return -1; 678 return -1;
679 }
678 len = strlen(buf); 680 len = strlen(buf);
679 if (ftell(file) == end) 681 if (ftell(file) == end)
680 done = 1; 682 done = 1;
@@ -685,6 +687,7 @@ static int parser_auth(struct table *t, const char *filename)
685 fprintf(stderr, 687 fprintf(stderr,
686 "Error matching regular expression %d in %s\n", 688 "Error matching regular expression %d in %s\n",
687 r, filename); 689 r, filename);
690 fclose(file);
688 return -1; 691 return -1;
689 } else { 692 } else {
690 buf[match[0].rm_eo] = 0; 693 buf[match[0].rm_eo] = 0;
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 5f15820efe12..e372f9e1e5ce 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520);
70 70
71void r100_pre_page_flip(struct radeon_device *rdev, int crtc) 71void r100_pre_page_flip(struct radeon_device *rdev, int crtc)
72{ 72{
73 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc];
74 u32 tmp;
75
76 /* make sure flip is at vb rather than hb */
77 tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset);
78 tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL;
79 /* make sure pending bit is asserted */
80 tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
81 WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp);
82
83 /* set pageflip to happen as late as possible in the vblank interval.
84 * same field for crtc1/2
85 */
86 tmp = RREG32(RADEON_CRTC_GEN_CNTL);
87 tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK;
88 WREG32(RADEON_CRTC_GEN_CNTL, tmp);
89
90 /* enable the pflip int */ 73 /* enable the pflip int */
91 radeon_irq_kms_pflip_irq_get(rdev, crtc); 74 radeon_irq_kms_pflip_irq_get(rdev, crtc);
92} 75}
@@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
1041 return r; 1024 return r;
1042 } 1025 }
1043 rdev->cp.ready = true; 1026 rdev->cp.ready = true;
1044 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 1027 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
1045 return 0; 1028 return 0;
1046} 1029}
1047 1030
@@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev)
1059void r100_cp_disable(struct radeon_device *rdev) 1042void r100_cp_disable(struct radeon_device *rdev)
1060{ 1043{
1061 /* Disable ring */ 1044 /* Disable ring */
1062 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1045 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1063 rdev->cp.ready = false; 1046 rdev->cp.ready = false;
1064 WREG32(RADEON_CP_CSQ_MODE, 0); 1047 WREG32(RADEON_CP_CSQ_MODE, 0);
1065 WREG32(RADEON_CP_CSQ_CNTL, 0); 1048 WREG32(RADEON_CP_CSQ_CNTL, 0);
@@ -1427,6 +1410,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1427 } 1410 }
1428 track->zb.robj = reloc->robj; 1411 track->zb.robj = reloc->robj;
1429 track->zb.offset = idx_value; 1412 track->zb.offset = idx_value;
1413 track->zb_dirty = true;
1430 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1414 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1431 break; 1415 break;
1432 case RADEON_RB3D_COLOROFFSET: 1416 case RADEON_RB3D_COLOROFFSET:
@@ -1439,6 +1423,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1439 } 1423 }
1440 track->cb[0].robj = reloc->robj; 1424 track->cb[0].robj = reloc->robj;
1441 track->cb[0].offset = idx_value; 1425 track->cb[0].offset = idx_value;
1426 track->cb_dirty = true;
1442 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1427 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1443 break; 1428 break;
1444 case RADEON_PP_TXOFFSET_0: 1429 case RADEON_PP_TXOFFSET_0:
@@ -1454,6 +1439,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1454 } 1439 }
1455 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1440 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1456 track->textures[i].robj = reloc->robj; 1441 track->textures[i].robj = reloc->robj;
1442 track->tex_dirty = true;
1457 break; 1443 break;
1458 case RADEON_PP_CUBIC_OFFSET_T0_0: 1444 case RADEON_PP_CUBIC_OFFSET_T0_0:
1459 case RADEON_PP_CUBIC_OFFSET_T0_1: 1445 case RADEON_PP_CUBIC_OFFSET_T0_1:
@@ -1471,6 +1457,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1471 track->textures[0].cube_info[i].offset = idx_value; 1457 track->textures[0].cube_info[i].offset = idx_value;
1472 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1458 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1473 track->textures[0].cube_info[i].robj = reloc->robj; 1459 track->textures[0].cube_info[i].robj = reloc->robj;
1460 track->tex_dirty = true;
1474 break; 1461 break;
1475 case RADEON_PP_CUBIC_OFFSET_T1_0: 1462 case RADEON_PP_CUBIC_OFFSET_T1_0:
1476 case RADEON_PP_CUBIC_OFFSET_T1_1: 1463 case RADEON_PP_CUBIC_OFFSET_T1_1:
@@ -1488,6 +1475,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1488 track->textures[1].cube_info[i].offset = idx_value; 1475 track->textures[1].cube_info[i].offset = idx_value;
1489 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1476 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1490 track->textures[1].cube_info[i].robj = reloc->robj; 1477 track->textures[1].cube_info[i].robj = reloc->robj;
1478 track->tex_dirty = true;
1491 break; 1479 break;
1492 case RADEON_PP_CUBIC_OFFSET_T2_0: 1480 case RADEON_PP_CUBIC_OFFSET_T2_0:
1493 case RADEON_PP_CUBIC_OFFSET_T2_1: 1481 case RADEON_PP_CUBIC_OFFSET_T2_1:
@@ -1505,9 +1493,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1505 track->textures[2].cube_info[i].offset = idx_value; 1493 track->textures[2].cube_info[i].offset = idx_value;
1506 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 1494 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1507 track->textures[2].cube_info[i].robj = reloc->robj; 1495 track->textures[2].cube_info[i].robj = reloc->robj;
1496 track->tex_dirty = true;
1508 break; 1497 break;
1509 case RADEON_RE_WIDTH_HEIGHT: 1498 case RADEON_RE_WIDTH_HEIGHT:
1510 track->maxy = ((idx_value >> 16) & 0x7FF); 1499 track->maxy = ((idx_value >> 16) & 0x7FF);
1500 track->cb_dirty = true;
1501 track->zb_dirty = true;
1511 break; 1502 break;
1512 case RADEON_RB3D_COLORPITCH: 1503 case RADEON_RB3D_COLORPITCH:
1513 r = r100_cs_packet_next_reloc(p, &reloc); 1504 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1528,9 +1519,11 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1528 ib[idx] = tmp; 1519 ib[idx] = tmp;
1529 1520
1530 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 1521 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1522 track->cb_dirty = true;
1531 break; 1523 break;
1532 case RADEON_RB3D_DEPTHPITCH: 1524 case RADEON_RB3D_DEPTHPITCH:
1533 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 1525 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1526 track->zb_dirty = true;
1534 break; 1527 break;
1535 case RADEON_RB3D_CNTL: 1528 case RADEON_RB3D_CNTL:
1536 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1529 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -1555,6 +1548,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1555 return -EINVAL; 1548 return -EINVAL;
1556 } 1549 }
1557 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 1550 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1551 track->cb_dirty = true;
1552 track->zb_dirty = true;
1558 break; 1553 break;
1559 case RADEON_RB3D_ZSTENCILCNTL: 1554 case RADEON_RB3D_ZSTENCILCNTL:
1560 switch (idx_value & 0xf) { 1555 switch (idx_value & 0xf) {
@@ -1572,6 +1567,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1572 default: 1567 default:
1573 break; 1568 break;
1574 } 1569 }
1570 track->zb_dirty = true;
1575 break; 1571 break;
1576 case RADEON_RB3D_ZPASS_ADDR: 1572 case RADEON_RB3D_ZPASS_ADDR:
1577 r = r100_cs_packet_next_reloc(p, &reloc); 1573 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1588,6 +1584,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1588 uint32_t temp = idx_value >> 4; 1584 uint32_t temp = idx_value >> 4;
1589 for (i = 0; i < track->num_texture; i++) 1585 for (i = 0; i < track->num_texture; i++)
1590 track->textures[i].enabled = !!(temp & (1 << i)); 1586 track->textures[i].enabled = !!(temp & (1 << i));
1587 track->tex_dirty = true;
1591 } 1588 }
1592 break; 1589 break;
1593 case RADEON_SE_VF_CNTL: 1590 case RADEON_SE_VF_CNTL:
@@ -1602,12 +1599,14 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1602 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1599 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1603 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 1600 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1604 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1601 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1602 track->tex_dirty = true;
1605 break; 1603 break;
1606 case RADEON_PP_TEX_PITCH_0: 1604 case RADEON_PP_TEX_PITCH_0:
1607 case RADEON_PP_TEX_PITCH_1: 1605 case RADEON_PP_TEX_PITCH_1:
1608 case RADEON_PP_TEX_PITCH_2: 1606 case RADEON_PP_TEX_PITCH_2:
1609 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1607 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1610 track->textures[i].pitch = idx_value + 32; 1608 track->textures[i].pitch = idx_value + 32;
1609 track->tex_dirty = true;
1611 break; 1610 break;
1612 case RADEON_PP_TXFILTER_0: 1611 case RADEON_PP_TXFILTER_0:
1613 case RADEON_PP_TXFILTER_1: 1612 case RADEON_PP_TXFILTER_1:
@@ -1621,6 +1620,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1621 tmp = (idx_value >> 27) & 0x7; 1620 tmp = (idx_value >> 27) & 0x7;
1622 if (tmp == 2 || tmp == 6) 1621 if (tmp == 2 || tmp == 6)
1623 track->textures[i].roundup_h = false; 1622 track->textures[i].roundup_h = false;
1623 track->tex_dirty = true;
1624 break; 1624 break;
1625 case RADEON_PP_TXFORMAT_0: 1625 case RADEON_PP_TXFORMAT_0:
1626 case RADEON_PP_TXFORMAT_1: 1626 case RADEON_PP_TXFORMAT_1:
@@ -1673,6 +1673,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1673 } 1673 }
1674 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 1674 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1675 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 1675 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1676 track->tex_dirty = true;
1676 break; 1677 break;
1677 case RADEON_PP_CUBIC_FACES_0: 1678 case RADEON_PP_CUBIC_FACES_0:
1678 case RADEON_PP_CUBIC_FACES_1: 1679 case RADEON_PP_CUBIC_FACES_1:
@@ -1683,6 +1684,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1683 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1684 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
1684 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 1685 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
1685 } 1686 }
1687 track->tex_dirty = true;
1686 break; 1688 break;
1687 default: 1689 default:
1688 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 1690 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -2310,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
2310 /* FIXME we don't use the second aperture yet when we could use it */ 2312 /* FIXME we don't use the second aperture yet when we could use it */
2311 if (rdev->mc.visible_vram_size > rdev->mc.aper_size) 2313 if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
2312 rdev->mc.visible_vram_size = rdev->mc.aper_size; 2314 rdev->mc.visible_vram_size = rdev->mc.aper_size;
2313 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
2314 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); 2315 config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
2315 if (rdev->flags & RADEON_IS_IGP) { 2316 if (rdev->flags & RADEON_IS_IGP) {
2316 uint32_t tom; 2317 uint32_t tom;
@@ -3318,9 +3319,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3318 unsigned long size; 3319 unsigned long size;
3319 unsigned prim_walk; 3320 unsigned prim_walk;
3320 unsigned nverts; 3321 unsigned nverts;
3321 unsigned num_cb = track->num_cb; 3322 unsigned num_cb = track->cb_dirty ? track->num_cb : 0;
3322 3323
3323 if (!track->zb_cb_clear && !track->color_channel_mask && 3324 if (num_cb && !track->zb_cb_clear && !track->color_channel_mask &&
3324 !track->blend_read_enable) 3325 !track->blend_read_enable)
3325 num_cb = 0; 3326 num_cb = 0;
3326 3327
@@ -3341,7 +3342,9 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3341 return -EINVAL; 3342 return -EINVAL;
3342 } 3343 }
3343 } 3344 }
3344 if (track->z_enabled) { 3345 track->cb_dirty = false;
3346
3347 if (track->zb_dirty && track->z_enabled) {
3345 if (track->zb.robj == NULL) { 3348 if (track->zb.robj == NULL) {
3346 DRM_ERROR("[drm] No buffer for z buffer !\n"); 3349 DRM_ERROR("[drm] No buffer for z buffer !\n");
3347 return -EINVAL; 3350 return -EINVAL;
@@ -3358,6 +3361,28 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3358 return -EINVAL; 3361 return -EINVAL;
3359 } 3362 }
3360 } 3363 }
3364 track->zb_dirty = false;
3365
3366 if (track->aa_dirty && track->aaresolve) {
3367 if (track->aa.robj == NULL) {
3368 DRM_ERROR("[drm] No buffer for AA resolve buffer %d !\n", i);
3369 return -EINVAL;
3370 }
3371 /* I believe the format comes from colorbuffer0. */
3372 size = track->aa.pitch * track->cb[0].cpp * track->maxy;
3373 size += track->aa.offset;
3374 if (size > radeon_bo_size(track->aa.robj)) {
3375 DRM_ERROR("[drm] Buffer too small for AA resolve buffer %d "
3376 "(need %lu have %lu) !\n", i, size,
3377 radeon_bo_size(track->aa.robj));
3378 DRM_ERROR("[drm] AA resolve buffer %d (%u %u %u %u)\n",
3379 i, track->aa.pitch, track->cb[0].cpp,
3380 track->aa.offset, track->maxy);
3381 return -EINVAL;
3382 }
3383 }
3384 track->aa_dirty = false;
3385
3361 prim_walk = (track->vap_vf_cntl >> 4) & 0x3; 3386 prim_walk = (track->vap_vf_cntl >> 4) & 0x3;
3362 if (track->vap_vf_cntl & (1 << 14)) { 3387 if (track->vap_vf_cntl & (1 << 14)) {
3363 nverts = track->vap_alt_nverts; 3388 nverts = track->vap_alt_nverts;
@@ -3417,13 +3442,23 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
3417 prim_walk); 3442 prim_walk);
3418 return -EINVAL; 3443 return -EINVAL;
3419 } 3444 }
3420 return r100_cs_track_texture_check(rdev, track); 3445
3446 if (track->tex_dirty) {
3447 track->tex_dirty = false;
3448 return r100_cs_track_texture_check(rdev, track);
3449 }
3450 return 0;
3421} 3451}
3422 3452
3423void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track) 3453void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track)
3424{ 3454{
3425 unsigned i, face; 3455 unsigned i, face;
3426 3456
3457 track->cb_dirty = true;
3458 track->zb_dirty = true;
3459 track->tex_dirty = true;
3460 track->aa_dirty = true;
3461
3427 if (rdev->family < CHIP_R300) { 3462 if (rdev->family < CHIP_R300) {
3428 track->num_cb = 1; 3463 track->num_cb = 1;
3429 if (rdev->family <= CHIP_RS200) 3464 if (rdev->family <= CHIP_RS200)
@@ -3437,6 +3472,8 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track
3437 track->num_texture = 16; 3472 track->num_texture = 16;
3438 track->maxy = 4096; 3473 track->maxy = 4096;
3439 track->separate_cube = 0; 3474 track->separate_cube = 0;
3475 track->aaresolve = false;
3476 track->aa.robj = NULL;
3440 } 3477 }
3441 3478
3442 for (i = 0; i < track->num_cb; i++) { 3479 for (i = 0; i < track->num_cb; i++) {
@@ -3746,8 +3783,6 @@ static int r100_startup(struct radeon_device *rdev)
3746 r100_mc_program(rdev); 3783 r100_mc_program(rdev);
3747 /* Resume clock */ 3784 /* Resume clock */
3748 r100_clock_startup(rdev); 3785 r100_clock_startup(rdev);
3749 /* Initialize GPU configuration (# pipes, ...) */
3750// r100_gpu_init(rdev);
3751 /* Initialize GART (initialize after TTM so we can allocate 3786 /* Initialize GART (initialize after TTM so we can allocate
3752 * memory through TTM but finalize after TTM) */ 3787 * memory through TTM but finalize after TTM) */
3753 r100_enable_bm(rdev); 3788 r100_enable_bm(rdev);
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index af65600e6564..2fef9de7f363 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -52,14 +52,7 @@ struct r100_cs_track_texture {
52 unsigned compress_format; 52 unsigned compress_format;
53}; 53};
54 54
55struct r100_cs_track_limits {
56 unsigned num_cb;
57 unsigned num_texture;
58 unsigned max_levels;
59};
60
61struct r100_cs_track { 55struct r100_cs_track {
62 struct radeon_device *rdev;
63 unsigned num_cb; 56 unsigned num_cb;
64 unsigned num_texture; 57 unsigned num_texture;
65 unsigned maxy; 58 unsigned maxy;
@@ -73,11 +66,17 @@ struct r100_cs_track {
73 struct r100_cs_track_array arrays[11]; 66 struct r100_cs_track_array arrays[11];
74 struct r100_cs_track_cb cb[R300_MAX_CB]; 67 struct r100_cs_track_cb cb[R300_MAX_CB];
75 struct r100_cs_track_cb zb; 68 struct r100_cs_track_cb zb;
69 struct r100_cs_track_cb aa;
76 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE]; 70 struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
77 bool z_enabled; 71 bool z_enabled;
78 bool separate_cube; 72 bool separate_cube;
79 bool zb_cb_clear; 73 bool zb_cb_clear;
80 bool blend_read_enable; 74 bool blend_read_enable;
75 bool cb_dirty;
76 bool zb_dirty;
77 bool tex_dirty;
78 bool aa_dirty;
79 bool aaresolve;
81}; 80};
82 81
83int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track); 82int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index d2408c395619..f24058300413 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -184,6 +184,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
184 } 184 }
185 track->zb.robj = reloc->robj; 185 track->zb.robj = reloc->robj;
186 track->zb.offset = idx_value; 186 track->zb.offset = idx_value;
187 track->zb_dirty = true;
187 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 188 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
188 break; 189 break;
189 case RADEON_RB3D_COLOROFFSET: 190 case RADEON_RB3D_COLOROFFSET:
@@ -196,6 +197,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
196 } 197 }
197 track->cb[0].robj = reloc->robj; 198 track->cb[0].robj = reloc->robj;
198 track->cb[0].offset = idx_value; 199 track->cb[0].offset = idx_value;
200 track->cb_dirty = true;
199 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 201 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
200 break; 202 break;
201 case R200_PP_TXOFFSET_0: 203 case R200_PP_TXOFFSET_0:
@@ -214,6 +216,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
214 } 216 }
215 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 217 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
216 track->textures[i].robj = reloc->robj; 218 track->textures[i].robj = reloc->robj;
219 track->tex_dirty = true;
217 break; 220 break;
218 case R200_PP_CUBIC_OFFSET_F1_0: 221 case R200_PP_CUBIC_OFFSET_F1_0:
219 case R200_PP_CUBIC_OFFSET_F2_0: 222 case R200_PP_CUBIC_OFFSET_F2_0:
@@ -257,9 +260,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
257 track->textures[i].cube_info[face - 1].offset = idx_value; 260 track->textures[i].cube_info[face - 1].offset = idx_value;
258 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 261 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
259 track->textures[i].cube_info[face - 1].robj = reloc->robj; 262 track->textures[i].cube_info[face - 1].robj = reloc->robj;
263 track->tex_dirty = true;
260 break; 264 break;
261 case RADEON_RE_WIDTH_HEIGHT: 265 case RADEON_RE_WIDTH_HEIGHT:
262 track->maxy = ((idx_value >> 16) & 0x7FF); 266 track->maxy = ((idx_value >> 16) & 0x7FF);
267 track->cb_dirty = true;
268 track->zb_dirty = true;
263 break; 269 break;
264 case RADEON_RB3D_COLORPITCH: 270 case RADEON_RB3D_COLORPITCH:
265 r = r100_cs_packet_next_reloc(p, &reloc); 271 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -280,9 +286,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
280 ib[idx] = tmp; 286 ib[idx] = tmp;
281 287
282 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK; 288 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
289 track->cb_dirty = true;
283 break; 290 break;
284 case RADEON_RB3D_DEPTHPITCH: 291 case RADEON_RB3D_DEPTHPITCH:
285 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK; 292 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
293 track->zb_dirty = true;
286 break; 294 break;
287 case RADEON_RB3D_CNTL: 295 case RADEON_RB3D_CNTL:
288 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 296 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
@@ -312,6 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
312 } 320 }
313 321
314 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE); 322 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
323 track->cb_dirty = true;
324 track->zb_dirty = true;
315 break; 325 break;
316 case RADEON_RB3D_ZSTENCILCNTL: 326 case RADEON_RB3D_ZSTENCILCNTL:
317 switch (idx_value & 0xf) { 327 switch (idx_value & 0xf) {
@@ -329,6 +339,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
329 default: 339 default:
330 break; 340 break;
331 } 341 }
342 track->zb_dirty = true;
332 break; 343 break;
333 case RADEON_RB3D_ZPASS_ADDR: 344 case RADEON_RB3D_ZPASS_ADDR:
334 r = r100_cs_packet_next_reloc(p, &reloc); 345 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -345,6 +356,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
345 uint32_t temp = idx_value >> 4; 356 uint32_t temp = idx_value >> 4;
346 for (i = 0; i < track->num_texture; i++) 357 for (i = 0; i < track->num_texture; i++)
347 track->textures[i].enabled = !!(temp & (1 << i)); 358 track->textures[i].enabled = !!(temp & (1 << i));
359 track->tex_dirty = true;
348 } 360 }
349 break; 361 break;
350 case RADEON_SE_VF_CNTL: 362 case RADEON_SE_VF_CNTL:
@@ -369,6 +381,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
369 i = (reg - R200_PP_TXSIZE_0) / 32; 381 i = (reg - R200_PP_TXSIZE_0) / 32;
370 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1; 382 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
371 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 383 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
384 track->tex_dirty = true;
372 break; 385 break;
373 case R200_PP_TXPITCH_0: 386 case R200_PP_TXPITCH_0:
374 case R200_PP_TXPITCH_1: 387 case R200_PP_TXPITCH_1:
@@ -378,6 +391,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
378 case R200_PP_TXPITCH_5: 391 case R200_PP_TXPITCH_5:
379 i = (reg - R200_PP_TXPITCH_0) / 32; 392 i = (reg - R200_PP_TXPITCH_0) / 32;
380 track->textures[i].pitch = idx_value + 32; 393 track->textures[i].pitch = idx_value + 32;
394 track->tex_dirty = true;
381 break; 395 break;
382 case R200_PP_TXFILTER_0: 396 case R200_PP_TXFILTER_0:
383 case R200_PP_TXFILTER_1: 397 case R200_PP_TXFILTER_1:
@@ -394,6 +408,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
394 tmp = (idx_value >> 27) & 0x7; 408 tmp = (idx_value >> 27) & 0x7;
395 if (tmp == 2 || tmp == 6) 409 if (tmp == 2 || tmp == 6)
396 track->textures[i].roundup_h = false; 410 track->textures[i].roundup_h = false;
411 track->tex_dirty = true;
397 break; 412 break;
398 case R200_PP_TXMULTI_CTL_0: 413 case R200_PP_TXMULTI_CTL_0:
399 case R200_PP_TXMULTI_CTL_1: 414 case R200_PP_TXMULTI_CTL_1:
@@ -432,6 +447,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
432 track->textures[i].tex_coord_type = 1; 447 track->textures[i].tex_coord_type = 1;
433 break; 448 break;
434 } 449 }
450 track->tex_dirty = true;
435 break; 451 break;
436 case R200_PP_TXFORMAT_0: 452 case R200_PP_TXFORMAT_0:
437 case R200_PP_TXFORMAT_1: 453 case R200_PP_TXFORMAT_1:
@@ -488,6 +504,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
488 } 504 }
489 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf); 505 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
490 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf); 506 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
507 track->tex_dirty = true;
491 break; 508 break;
492 case R200_PP_CUBIC_FACES_0: 509 case R200_PP_CUBIC_FACES_0:
493 case R200_PP_CUBIC_FACES_1: 510 case R200_PP_CUBIC_FACES_1:
@@ -501,6 +518,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
501 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 518 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
502 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf); 519 track->textures[i].cube_info[face].height = 1 << ((tmp >> ((face * 8) + 4)) & 0xf);
503 } 520 }
521 track->tex_dirty = true;
504 break; 522 break;
505 default: 523 default:
506 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 524 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 55fe5ba7def3..069efa8c8ecf 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -667,6 +667,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
667 } 667 }
668 track->cb[i].robj = reloc->robj; 668 track->cb[i].robj = reloc->robj;
669 track->cb[i].offset = idx_value; 669 track->cb[i].offset = idx_value;
670 track->cb_dirty = true;
670 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 671 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
671 break; 672 break;
672 case R300_ZB_DEPTHOFFSET: 673 case R300_ZB_DEPTHOFFSET:
@@ -679,6 +680,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
679 } 680 }
680 track->zb.robj = reloc->robj; 681 track->zb.robj = reloc->robj;
681 track->zb.offset = idx_value; 682 track->zb.offset = idx_value;
683 track->zb_dirty = true;
682 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset); 684 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
683 break; 685 break;
684 case R300_TX_OFFSET_0: 686 case R300_TX_OFFSET_0:
@@ -717,6 +719,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
717 tmp |= tile_flags; 719 tmp |= tile_flags;
718 ib[idx] = tmp; 720 ib[idx] = tmp;
719 track->textures[i].robj = reloc->robj; 721 track->textures[i].robj = reloc->robj;
722 track->tex_dirty = true;
720 break; 723 break;
721 /* Tracked registers */ 724 /* Tracked registers */
722 case 0x2084: 725 case 0x2084:
@@ -743,6 +746,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
743 if (p->rdev->family < CHIP_RV515) { 746 if (p->rdev->family < CHIP_RV515) {
744 track->maxy -= 1440; 747 track->maxy -= 1440;
745 } 748 }
749 track->cb_dirty = true;
750 track->zb_dirty = true;
746 break; 751 break;
747 case 0x4E00: 752 case 0x4E00:
748 /* RB3D_CCTL */ 753 /* RB3D_CCTL */
@@ -752,6 +757,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
752 return -EINVAL; 757 return -EINVAL;
753 } 758 }
754 track->num_cb = ((idx_value >> 5) & 0x3) + 1; 759 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
760 track->cb_dirty = true;
755 break; 761 break;
756 case 0x4E38: 762 case 0x4E38:
757 case 0x4E3C: 763 case 0x4E3C:
@@ -814,6 +820,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
814 ((idx_value >> 21) & 0xF)); 820 ((idx_value >> 21) & 0xF));
815 return -EINVAL; 821 return -EINVAL;
816 } 822 }
823 track->cb_dirty = true;
817 break; 824 break;
818 case 0x4F00: 825 case 0x4F00:
819 /* ZB_CNTL */ 826 /* ZB_CNTL */
@@ -822,6 +829,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
822 } else { 829 } else {
823 track->z_enabled = false; 830 track->z_enabled = false;
824 } 831 }
832 track->zb_dirty = true;
825 break; 833 break;
826 case 0x4F10: 834 case 0x4F10:
827 /* ZB_FORMAT */ 835 /* ZB_FORMAT */
@@ -838,6 +846,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
838 (idx_value & 0xF)); 846 (idx_value & 0xF));
839 return -EINVAL; 847 return -EINVAL;
840 } 848 }
849 track->zb_dirty = true;
841 break; 850 break;
842 case 0x4F24: 851 case 0x4F24:
843 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
@@ -861,14 +870,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
861 ib[idx] = tmp; 870 ib[idx] = tmp;
862 871
863 track->zb.pitch = idx_value & 0x3FFC; 872 track->zb.pitch = idx_value & 0x3FFC;
873 track->zb_dirty = true;
864 break; 874 break;
865 case 0x4104: 875 case 0x4104:
876 /* TX_ENABLE */
866 for (i = 0; i < 16; i++) { 877 for (i = 0; i < 16; i++) {
867 bool enabled; 878 bool enabled;
868 879
869 enabled = !!(idx_value & (1 << i)); 880 enabled = !!(idx_value & (1 << i));
870 track->textures[i].enabled = enabled; 881 track->textures[i].enabled = enabled;
871 } 882 }
883 track->tex_dirty = true;
872 break; 884 break;
873 case 0x44C0: 885 case 0x44C0:
874 case 0x44C4: 886 case 0x44C4:
@@ -898,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
898 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 910 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
899 break; 911 break;
900 case R300_TX_FORMAT_X16: 912 case R300_TX_FORMAT_X16:
913 case R300_TX_FORMAT_FL_I16:
901 case R300_TX_FORMAT_Y8X8: 914 case R300_TX_FORMAT_Y8X8:
902 case R300_TX_FORMAT_Z5Y6X5: 915 case R300_TX_FORMAT_Z5Y6X5:
903 case R300_TX_FORMAT_Z6Y5X5: 916 case R300_TX_FORMAT_Z6Y5X5:
@@ -910,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
910 track->textures[i].compress_format = R100_TRACK_COMP_NONE; 923 track->textures[i].compress_format = R100_TRACK_COMP_NONE;
911 break; 924 break;
912 case R300_TX_FORMAT_Y16X16: 925 case R300_TX_FORMAT_Y16X16:
926 case R300_TX_FORMAT_FL_I16A16:
913 case R300_TX_FORMAT_Z11Y11X10: 927 case R300_TX_FORMAT_Z11Y11X10:
914 case R300_TX_FORMAT_Z10Y11X11: 928 case R300_TX_FORMAT_Z10Y11X11:
915 case R300_TX_FORMAT_W8Z8Y8X8: 929 case R300_TX_FORMAT_W8Z8Y8X8:
@@ -951,8 +965,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
951 DRM_ERROR("Invalid texture format %u\n", 965 DRM_ERROR("Invalid texture format %u\n",
952 (idx_value & 0x1F)); 966 (idx_value & 0x1F));
953 return -EINVAL; 967 return -EINVAL;
954 break;
955 } 968 }
969 track->tex_dirty = true;
956 break; 970 break;
957 case 0x4400: 971 case 0x4400:
958 case 0x4404: 972 case 0x4404:
@@ -980,6 +994,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
980 if (tmp == 2 || tmp == 4 || tmp == 6) { 994 if (tmp == 2 || tmp == 4 || tmp == 6) {
981 track->textures[i].roundup_h = false; 995 track->textures[i].roundup_h = false;
982 } 996 }
997 track->tex_dirty = true;
983 break; 998 break;
984 case 0x4500: 999 case 0x4500:
985 case 0x4504: 1000 case 0x4504:
@@ -1017,6 +1032,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1017 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n"); 1032 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1018 return -EINVAL; 1033 return -EINVAL;
1019 } 1034 }
1035 track->tex_dirty = true;
1020 break; 1036 break;
1021 case 0x4480: 1037 case 0x4480:
1022 case 0x4484: 1038 case 0x4484:
@@ -1046,6 +1062,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1046 track->textures[i].use_pitch = !!tmp; 1062 track->textures[i].use_pitch = !!tmp;
1047 tmp = (idx_value >> 22) & 0xF; 1063 tmp = (idx_value >> 22) & 0xF;
1048 track->textures[i].txdepth = tmp; 1064 track->textures[i].txdepth = tmp;
1065 track->tex_dirty = true;
1049 break; 1066 break;
1050 case R300_ZB_ZPASS_ADDR: 1067 case R300_ZB_ZPASS_ADDR:
1051 r = r100_cs_packet_next_reloc(p, &reloc); 1068 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1060,6 +1077,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1060 case 0x4e0c: 1077 case 0x4e0c:
1061 /* RB3D_COLOR_CHANNEL_MASK */ 1078 /* RB3D_COLOR_CHANNEL_MASK */
1062 track->color_channel_mask = idx_value; 1079 track->color_channel_mask = idx_value;
1080 track->cb_dirty = true;
1063 break; 1081 break;
1064 case 0x43a4: 1082 case 0x43a4:
1065 /* SC_HYPERZ_EN */ 1083 /* SC_HYPERZ_EN */
@@ -1073,6 +1091,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1073 case 0x4f1c: 1091 case 0x4f1c:
1074 /* ZB_BW_CNTL */ 1092 /* ZB_BW_CNTL */
1075 track->zb_cb_clear = !!(idx_value & (1 << 5)); 1093 track->zb_cb_clear = !!(idx_value & (1 << 5));
1094 track->cb_dirty = true;
1095 track->zb_dirty = true;
1076 if (p->rdev->hyperz_filp != p->filp) { 1096 if (p->rdev->hyperz_filp != p->filp) {
1077 if (idx_value & (R300_HIZ_ENABLE | 1097 if (idx_value & (R300_HIZ_ENABLE |
1078 R300_RD_COMP_ENABLE | 1098 R300_RD_COMP_ENABLE |
@@ -1084,8 +1104,28 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1084 case 0x4e04: 1104 case 0x4e04:
1085 /* RB3D_BLENDCNTL */ 1105 /* RB3D_BLENDCNTL */
1086 track->blend_read_enable = !!(idx_value & (1 << 2)); 1106 track->blend_read_enable = !!(idx_value & (1 << 2));
1107 track->cb_dirty = true;
1108 break;
1109 case R300_RB3D_AARESOLVE_OFFSET:
1110 r = r100_cs_packet_next_reloc(p, &reloc);
1111 if (r) {
1112 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1113 idx, reg);
1114 r100_cs_dump_packet(p, pkt);
1115 return r;
1116 }
1117 track->aa.robj = reloc->robj;
1118 track->aa.offset = idx_value;
1119 track->aa_dirty = true;
1120 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1121 break;
1122 case R300_RB3D_AARESOLVE_PITCH:
1123 track->aa.pitch = idx_value & 0x3FFE;
1124 track->aa_dirty = true;
1087 break; 1125 break;
1088 case 0x4f28: /* ZB_DEPTHCLEARVALUE */ 1126 case R300_RB3D_AARESOLVE_CTL:
1127 track->aaresolve = idx_value & 0x1;
1128 track->aa_dirty = true;
1089 break; 1129 break;
1090 case 0x4f30: /* ZB_MASK_OFFSET */ 1130 case 0x4f30: /* ZB_MASK_OFFSET */
1091 case 0x4f34: /* ZB_ZMASK_PITCH */ 1131 case 0x4f34: /* ZB_ZMASK_PITCH */
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1a0d5362cd79..f0bce399c9f3 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -1371,6 +1371,8 @@
1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */ 1371#define R300_RB3D_COLORPITCH2 0x4E40 /* GUESS */
1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */ 1372#define R300_RB3D_COLORPITCH3 0x4E44 /* GUESS */
1373 1373
1374#define R300_RB3D_AARESOLVE_OFFSET 0x4E80
1375#define R300_RB3D_AARESOLVE_PITCH 0x4E84
1374#define R300_RB3D_AARESOLVE_CTL 0x4E88 1376#define R300_RB3D_AARESOLVE_CTL 0x4E88
1375/* gap */ 1377/* gap */
1376 1378
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 650672a0f5ad..9b3fad23b76c 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev)
1255 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1255 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1256 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1256 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1257 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1257 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1258 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1259 r600_vram_gtt_location(rdev, &rdev->mc); 1258 r600_vram_gtt_location(rdev, &rdev->mc);
1260 1259
1261 if (rdev->flags & RADEON_IS_IGP) { 1260 if (rdev->flags & RADEON_IS_IGP) {
@@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
1937 */ 1936 */
1938void r600_cp_stop(struct radeon_device *rdev) 1937void r600_cp_stop(struct radeon_device *rdev)
1939{ 1938{
1940 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 1939 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
1941 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); 1940 WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1));
1942 WREG32(SCRATCH_UMSK, 0); 1941 WREG32(SCRATCH_UMSK, 0);
1943} 1942}
@@ -2105,7 +2104,11 @@ static int r600_cp_load_microcode(struct radeon_device *rdev)
2105 2104
2106 r600_cp_stop(rdev); 2105 r600_cp_stop(rdev);
2107 2106
2108 WREG32(CP_RB_CNTL, RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3)); 2107 WREG32(CP_RB_CNTL,
2108#ifdef __BIG_ENDIAN
2109 BUF_SWAP_32BIT |
2110#endif
2111 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
2109 2112
2110 /* Reset cp */ 2113 /* Reset cp */
2111 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 2114 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -2192,7 +2195,11 @@ int r600_cp_resume(struct radeon_device *rdev)
2192 WREG32(CP_RB_WPTR, 0); 2195 WREG32(CP_RB_WPTR, 0);
2193 2196
2194 /* set the wb address whether it's enabled or not */ 2197 /* set the wb address whether it's enabled or not */
2195 WREG32(CP_RB_RPTR_ADDR, (rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC); 2198 WREG32(CP_RB_RPTR_ADDR,
2199#ifdef __BIG_ENDIAN
2200 RB_RPTR_SWAP(2) |
2201#endif
2202 ((rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFFFFFFFC));
2196 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF); 2203 WREG32(CP_RB_RPTR_ADDR_HI, upper_32_bits(rdev->wb.gpu_addr + RADEON_WB_CP_RPTR_OFFSET) & 0xFF);
2197 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF); 2204 WREG32(SCRATCH_ADDR, ((rdev->wb.gpu_addr + RADEON_WB_SCRATCH_OFFSET) >> 8) & 0xFFFFFFFF);
2198 2205
@@ -2628,7 +2635,11 @@ void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
2628{ 2635{
2629 /* FIXME: implement */ 2636 /* FIXME: implement */
2630 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 2637 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
2631 radeon_ring_write(rdev, ib->gpu_addr & 0xFFFFFFFC); 2638 radeon_ring_write(rdev,
2639#ifdef __BIG_ENDIAN
2640 (2 << 0) |
2641#endif
2642 (ib->gpu_addr & 0xFFFFFFFC));
2632 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF); 2643 radeon_ring_write(rdev, upper_32_bits(ib->gpu_addr) & 0xFF);
2633 radeon_ring_write(rdev, ib->length_dw); 2644 radeon_ring_write(rdev, ib->length_dw);
2634} 2645}
@@ -3297,8 +3308,8 @@ restart_ih:
3297 while (rptr != wptr) { 3308 while (rptr != wptr) {
3298 /* wptr/rptr are in bytes! */ 3309 /* wptr/rptr are in bytes! */
3299 ring_index = rptr / 4; 3310 ring_index = rptr / 4;
3300 src_id = rdev->ih.ring[ring_index] & 0xff; 3311 src_id = le32_to_cpu(rdev->ih.ring[ring_index]) & 0xff;
3301 src_data = rdev->ih.ring[ring_index + 1] & 0xfffffff; 3312 src_data = le32_to_cpu(rdev->ih.ring[ring_index + 1]) & 0xfffffff;
3302 3313
3303 switch (src_id) { 3314 switch (src_id) {
3304 case 1: /* D1 vblank/vline */ 3315 case 1: /* D1 vblank/vline */
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index ca5c29f70779..7f1043448d25 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -137,9 +137,9 @@ set_shaders(struct drm_device *dev)
137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256); 137 ps = (u32 *) ((char *)dev->agp_buffer_map->handle + dev_priv->blit_vb->offset + 256);
138 138
139 for (i = 0; i < r6xx_vs_size; i++) 139 for (i = 0; i < r6xx_vs_size; i++)
140 vs[i] = r6xx_vs[i]; 140 vs[i] = cpu_to_le32(r6xx_vs[i]);
141 for (i = 0; i < r6xx_ps_size; i++) 141 for (i = 0; i < r6xx_ps_size; i++)
142 ps[i] = r6xx_ps[i]; 142 ps[i] = cpu_to_le32(r6xx_ps[i]);
143 143
144 dev_priv->blit_vb->used = 512; 144 dev_priv->blit_vb->used = 512;
145 145
@@ -192,6 +192,9 @@ set_vtx_resource(drm_radeon_private_t *dev_priv, u64 gpu_addr)
192 DRM_DEBUG("\n"); 192 DRM_DEBUG("\n");
193 193
194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8)); 194 sq_vtx_constant_word2 = (((gpu_addr >> 32) & 0xff) | (16 << 8));
195#ifdef __BIG_ENDIAN
196 sq_vtx_constant_word2 |= (2 << 30);
197#endif
195 198
196 BEGIN_RING(9); 199 BEGIN_RING(9);
197 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7)); 200 OUT_RING(CP_PACKET3(R600_IT_SET_RESOURCE, 7));
@@ -291,7 +294,11 @@ draw_auto(drm_radeon_private_t *dev_priv)
291 OUT_RING(DI_PT_RECTLIST); 294 OUT_RING(DI_PT_RECTLIST);
292 295
293 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0)); 296 OUT_RING(CP_PACKET3(R600_IT_INDEX_TYPE, 0));
297#ifdef __BIG_ENDIAN
298 OUT_RING((2 << 2) | DI_INDEX_SIZE_16_BIT);
299#else
294 OUT_RING(DI_INDEX_SIZE_16_BIT); 300 OUT_RING(DI_INDEX_SIZE_16_BIT);
301#endif
295 302
296 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0)); 303 OUT_RING(CP_PACKET3(R600_IT_NUM_INSTANCES, 0));
297 OUT_RING(1); 304 OUT_RING(1);
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 86e5aa07f0db..df68d91e8190 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -54,7 +54,7 @@ set_render_target(struct radeon_device *rdev, int format,
54 if (h < 8) 54 if (h < 8)
55 h = 8; 55 h = 8;
56 56
57 cb_color_info = ((format << 2) | (1 << 27)); 57 cb_color_info = ((format << 2) | (1 << 27) | (1 << 8));
58 pitch = (w / 8) - 1; 58 pitch = (w / 8) - 1;
59 slice = ((w * h) / 64) - 1; 59 slice = ((w * h) / 64) - 1;
60 60
@@ -165,6 +165,9 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
165 u32 sq_vtx_constant_word2; 165 u32 sq_vtx_constant_word2;
166 166
167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); 167 sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8));
168#ifdef __BIG_ENDIAN
169 sq_vtx_constant_word2 |= (2 << 30);
170#endif
168 171
169 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); 172 radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7));
170 radeon_ring_write(rdev, 0x460); 173 radeon_ring_write(rdev, 0x460);
@@ -199,7 +202,7 @@ set_tex_resource(struct radeon_device *rdev,
199 if (h < 1) 202 if (h < 1)
200 h = 1; 203 h = 1;
201 204
202 sq_tex_resource_word0 = (1 << 0); 205 sq_tex_resource_word0 = (1 << 0) | (1 << 3);
203 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | 206 sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) |
204 ((w - 1) << 19)); 207 ((w - 1) << 19));
205 208
@@ -253,7 +256,11 @@ draw_auto(struct radeon_device *rdev)
253 radeon_ring_write(rdev, DI_PT_RECTLIST); 256 radeon_ring_write(rdev, DI_PT_RECTLIST);
254 257
255 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); 258 radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0));
256 radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); 259 radeon_ring_write(rdev,
260#ifdef __BIG_ENDIAN
261 (2 << 2) |
262#endif
263 DI_INDEX_SIZE_16_BIT);
257 264
258 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); 265 radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0));
259 radeon_ring_write(rdev, 1); 266 radeon_ring_write(rdev, 1);
@@ -424,7 +431,11 @@ set_default_state(struct radeon_device *rdev)
424 dwords = ALIGN(rdev->r600_blit.state_len, 0x10); 431 dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
425 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; 432 gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
426 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); 433 radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
427 radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); 434 radeon_ring_write(rdev,
435#ifdef __BIG_ENDIAN
436 (2 << 0) |
437#endif
438 (gpu_addr & 0xFFFFFFFC));
428 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); 439 radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
429 radeon_ring_write(rdev, dwords); 440 radeon_ring_write(rdev, dwords);
430 441
@@ -467,7 +478,7 @@ static inline uint32_t i2f(uint32_t input)
467int r600_blit_init(struct radeon_device *rdev) 478int r600_blit_init(struct radeon_device *rdev)
468{ 479{
469 u32 obj_size; 480 u32 obj_size;
470 int r, dwords; 481 int i, r, dwords;
471 void *ptr; 482 void *ptr;
472 u32 packet2s[16]; 483 u32 packet2s[16];
473 int num_packet2s = 0; 484 int num_packet2s = 0;
@@ -486,7 +497,7 @@ int r600_blit_init(struct radeon_device *rdev)
486 497
487 dwords = rdev->r600_blit.state_len; 498 dwords = rdev->r600_blit.state_len;
488 while (dwords & 0xf) { 499 while (dwords & 0xf) {
489 packet2s[num_packet2s++] = PACKET2(0); 500 packet2s[num_packet2s++] = cpu_to_le32(PACKET2(0));
490 dwords++; 501 dwords++;
491 } 502 }
492 503
@@ -529,8 +540,10 @@ int r600_blit_init(struct radeon_device *rdev)
529 if (num_packet2s) 540 if (num_packet2s)
530 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), 541 memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
531 packet2s, num_packet2s * 4); 542 packet2s, num_packet2s * 4);
532 memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); 543 for (i = 0; i < r6xx_vs_size; i++)
533 memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); 544 *(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(r6xx_vs[i]);
545 for (i = 0; i < r6xx_ps_size; i++)
546 *(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(r6xx_ps[i]);
534 radeon_bo_kunmap(rdev->r600_blit.shader_obj); 547 radeon_bo_kunmap(rdev->r600_blit.shader_obj);
535 radeon_bo_unreserve(rdev->r600_blit.shader_obj); 548 radeon_bo_unreserve(rdev->r600_blit.shader_obj);
536 549
@@ -545,7 +558,7 @@ done:
545 dev_err(rdev->dev, "(%d) pin blit object failed\n", r); 558 dev_err(rdev->dev, "(%d) pin blit object failed\n", r);
546 return r; 559 return r;
547 } 560 }
548 rdev->mc.active_vram_size = rdev->mc.real_vram_size; 561 radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size);
549 return 0; 562 return 0;
550} 563}
551 564
@@ -553,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev)
553{ 566{
554 int r; 567 int r;
555 568
556 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 569 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
557 if (rdev->r600_blit.shader_obj == NULL) 570 if (rdev->r600_blit.shader_obj == NULL)
558 return; 571 return;
559 /* If we can't reserve the bo, unref should be enough to destroy 572 /* If we can't reserve the bo, unref should be enough to destroy
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index e8151c1d55b2..2d1f6c5ee2a7 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -684,7 +684,11 @@ const u32 r6xx_vs[] =
684 0x00000000, 684 0x00000000,
685 0x3c000000, 685 0x3c000000,
686 0x68cd1000, 686 0x68cd1000,
687#ifdef __BIG_ENDIAN
688 0x000a0000,
689#else
687 0x00080000, 690 0x00080000,
691#endif
688 0x00000000, 692 0x00000000,
689}; 693};
690 694
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 4f4cd8b286d5..c3ab959bdc7c 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -396,6 +396,9 @@ static void r600_cp_load_microcode(drm_radeon_private_t *dev_priv)
396 r600_do_cp_stop(dev_priv); 396 r600_do_cp_stop(dev_priv);
397 397
398 RADEON_WRITE(R600_CP_RB_CNTL, 398 RADEON_WRITE(R600_CP_RB_CNTL,
399#ifdef __BIG_ENDIAN
400 R600_BUF_SWAP_32BIT |
401#endif
399 R600_RB_NO_UPDATE | 402 R600_RB_NO_UPDATE |
400 R600_RB_BLKSZ(15) | 403 R600_RB_BLKSZ(15) |
401 R600_RB_BUFSZ(3)); 404 R600_RB_BUFSZ(3));
@@ -486,9 +489,12 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
486 r600_do_cp_stop(dev_priv); 489 r600_do_cp_stop(dev_priv);
487 490
488 RADEON_WRITE(R600_CP_RB_CNTL, 491 RADEON_WRITE(R600_CP_RB_CNTL,
492#ifdef __BIG_ENDIAN
493 R600_BUF_SWAP_32BIT |
494#endif
489 R600_RB_NO_UPDATE | 495 R600_RB_NO_UPDATE |
490 (15 << 8) | 496 R600_RB_BLKSZ(15) |
491 (3 << 0)); 497 R600_RB_BUFSZ(3));
492 498
493 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP); 499 RADEON_WRITE(R600_GRBM_SOFT_RESET, R600_SOFT_RESET_CP);
494 RADEON_READ(R600_GRBM_SOFT_RESET); 500 RADEON_READ(R600_GRBM_SOFT_RESET);
@@ -550,8 +556,12 @@ static void r600_test_writeback(drm_radeon_private_t *dev_priv)
550 556
551 if (!dev_priv->writeback_works) { 557 if (!dev_priv->writeback_works) {
552 /* Disable writeback to avoid unnecessary bus master transfer */ 558 /* Disable writeback to avoid unnecessary bus master transfer */
553 RADEON_WRITE(R600_CP_RB_CNTL, RADEON_READ(R600_CP_RB_CNTL) | 559 RADEON_WRITE(R600_CP_RB_CNTL,
554 RADEON_RB_NO_UPDATE); 560#ifdef __BIG_ENDIAN
561 R600_BUF_SWAP_32BIT |
562#endif
563 RADEON_READ(R600_CP_RB_CNTL) |
564 R600_RB_NO_UPDATE);
555 RADEON_WRITE(R600_SCRATCH_UMSK, 0); 565 RADEON_WRITE(R600_SCRATCH_UMSK, 0);
556 } 566 }
557} 567}
@@ -575,7 +585,11 @@ int r600_do_engine_reset(struct drm_device *dev)
575 585
576 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0); 586 RADEON_WRITE(R600_CP_RB_WPTR_DELAY, 0);
577 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL); 587 cp_rb_cntl = RADEON_READ(R600_CP_RB_CNTL);
578 RADEON_WRITE(R600_CP_RB_CNTL, R600_RB_RPTR_WR_ENA); 588 RADEON_WRITE(R600_CP_RB_CNTL,
589#ifdef __BIG_ENDIAN
590 R600_BUF_SWAP_32BIT |
591#endif
592 R600_RB_RPTR_WR_ENA);
579 593
580 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr); 594 RADEON_WRITE(R600_CP_RB_RPTR_WR, cp_ptr);
581 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr); 595 RADEON_WRITE(R600_CP_RB_WPTR, cp_ptr);
@@ -1838,7 +1852,10 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1838 + dev_priv->gart_vm_start; 1852 + dev_priv->gart_vm_start;
1839 } 1853 }
1840 RADEON_WRITE(R600_CP_RB_RPTR_ADDR, 1854 RADEON_WRITE(R600_CP_RB_RPTR_ADDR,
1841 rptr_addr & 0xffffffff); 1855#ifdef __BIG_ENDIAN
1856 (2 << 0) |
1857#endif
1858 (rptr_addr & 0xfffffffc));
1842 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI, 1859 RADEON_WRITE(R600_CP_RB_RPTR_ADDR_HI,
1843 upper_32_bits(rptr_addr)); 1860 upper_32_bits(rptr_addr));
1844 1861
@@ -1889,7 +1906,7 @@ static void r600_cp_init_ring_buffer(struct drm_device *dev,
1889 { 1906 {
1890 u64 scratch_addr; 1907 u64 scratch_addr;
1891 1908
1892 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR); 1909 scratch_addr = RADEON_READ(R600_CP_RB_RPTR_ADDR) & 0xFFFFFFFC;
1893 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32; 1910 scratch_addr |= ((u64)RADEON_READ(R600_CP_RB_RPTR_ADDR_HI)) << 32;
1894 scratch_addr += R600_SCRATCH_REG_OFFSET; 1911 scratch_addr += R600_SCRATCH_REG_OFFSET;
1895 scratch_addr >>= 8; 1912 scratch_addr >>= 8;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7831e0890210..153095fba62f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -295,17 +295,18 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
295 } 295 }
296 296
297 if (!IS_ALIGNED(pitch, pitch_align)) { 297 if (!IS_ALIGNED(pitch, pitch_align)) {
298 dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n", 298 dev_warn(p->dev, "%s:%d cb pitch (%d, 0x%x, %d) invalid\n",
299 __func__, __LINE__, pitch); 299 __func__, __LINE__, pitch, pitch_align, array_mode);
300 return -EINVAL; 300 return -EINVAL;
301 } 301 }
302 if (!IS_ALIGNED(height, height_align)) { 302 if (!IS_ALIGNED(height, height_align)) {
303 dev_warn(p->dev, "%s:%d cb height (%d) invalid\n", 303 dev_warn(p->dev, "%s:%d cb height (%d, 0x%x, %d) invalid\n",
304 __func__, __LINE__, height); 304 __func__, __LINE__, height, height_align, array_mode);
305 return -EINVAL; 305 return -EINVAL;
306 } 306 }
307 if (!IS_ALIGNED(base_offset, base_align)) { 307 if (!IS_ALIGNED(base_offset, base_align)) {
308 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 308 dev_warn(p->dev, "%s offset[%d] 0x%llx 0x%llx, %d not aligned\n", __func__, i,
309 base_offset, base_align, array_mode);
309 return -EINVAL; 310 return -EINVAL;
310 } 311 }
311 312
@@ -320,7 +321,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
320 * broken userspace. 321 * broken userspace.
321 */ 322 */
322 } else { 323 } else {
323 dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i])); 324 dev_warn(p->dev, "%s offset[%d] %d %d %d %lu too big\n", __func__, i,
325 array_mode,
326 track->cb_color_bo_offset[i], tmp,
327 radeon_bo_size(track->cb_color_bo[i]));
324 return -EINVAL; 328 return -EINVAL;
325 } 329 }
326 } 330 }
@@ -455,17 +459,18 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
455 } 459 }
456 460
457 if (!IS_ALIGNED(pitch, pitch_align)) { 461 if (!IS_ALIGNED(pitch, pitch_align)) {
458 dev_warn(p->dev, "%s:%d db pitch (%d) invalid\n", 462 dev_warn(p->dev, "%s:%d db pitch (%d, 0x%x, %d) invalid\n",
459 __func__, __LINE__, pitch); 463 __func__, __LINE__, pitch, pitch_align, array_mode);
460 return -EINVAL; 464 return -EINVAL;
461 } 465 }
462 if (!IS_ALIGNED(height, height_align)) { 466 if (!IS_ALIGNED(height, height_align)) {
463 dev_warn(p->dev, "%s:%d db height (%d) invalid\n", 467 dev_warn(p->dev, "%s:%d db height (%d, 0x%x, %d) invalid\n",
464 __func__, __LINE__, height); 468 __func__, __LINE__, height, height_align, array_mode);
465 return -EINVAL; 469 return -EINVAL;
466 } 470 }
467 if (!IS_ALIGNED(base_offset, base_align)) { 471 if (!IS_ALIGNED(base_offset, base_align)) {
468 dev_warn(p->dev, "%s offset[%d] 0x%llx not aligned\n", __func__, i, base_offset); 472 dev_warn(p->dev, "%s offset[%d] 0x%llx, 0x%llx, %d not aligned\n", __func__, i,
473 base_offset, base_align, array_mode);
469 return -EINVAL; 474 return -EINVAL;
470 } 475 }
471 476
@@ -473,9 +478,10 @@ static int r600_cs_track_check(struct radeon_cs_parser *p)
473 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1; 478 nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
474 tmp = ntiles * bpe * 64 * nviews; 479 tmp = ntiles * bpe * 64 * nviews;
475 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) { 480 if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
476 dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %u have %lu)\n", 481 dev_warn(p->dev, "z/stencil buffer (%d) too small (0x%08X %d %d %d -> %u have %lu)\n",
477 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset, 482 array_mode,
478 radeon_bo_size(track->db_bo)); 483 track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
484 radeon_bo_size(track->db_bo));
479 return -EINVAL; 485 return -EINVAL;
480 } 486 }
481 } 487 }
@@ -1227,18 +1233,18 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
1227 /* XXX check height as well... */ 1233 /* XXX check height as well... */
1228 1234
1229 if (!IS_ALIGNED(pitch, pitch_align)) { 1235 if (!IS_ALIGNED(pitch, pitch_align)) {
1230 dev_warn(p->dev, "%s:%d tex pitch (%d) invalid\n", 1236 dev_warn(p->dev, "%s:%d tex pitch (%d, 0x%x, %d) invalid\n",
1231 __func__, __LINE__, pitch); 1237 __func__, __LINE__, pitch, pitch_align, G_038000_TILE_MODE(word0));
1232 return -EINVAL; 1238 return -EINVAL;
1233 } 1239 }
1234 if (!IS_ALIGNED(base_offset, base_align)) { 1240 if (!IS_ALIGNED(base_offset, base_align)) {
1235 dev_warn(p->dev, "%s:%d tex base offset (0x%llx) invalid\n", 1241 dev_warn(p->dev, "%s:%d tex base offset (0x%llx, 0x%llx, %d) invalid\n",
1236 __func__, __LINE__, base_offset); 1242 __func__, __LINE__, base_offset, base_align, G_038000_TILE_MODE(word0));
1237 return -EINVAL; 1243 return -EINVAL;
1238 } 1244 }
1239 if (!IS_ALIGNED(mip_offset, base_align)) { 1245 if (!IS_ALIGNED(mip_offset, base_align)) {
1240 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx) invalid\n", 1246 dev_warn(p->dev, "%s:%d tex mip offset (0x%llx, 0x%llx, %d) invalid\n",
1241 __func__, __LINE__, mip_offset); 1247 __func__, __LINE__, mip_offset, base_align, G_038000_TILE_MODE(word0));
1242 return -EINVAL; 1248 return -EINVAL;
1243 } 1249 }
1244 1250
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index a5d898b4bad2..04bac0bbd3ec 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -154,13 +154,14 @@
154#define ROQ_IB2_START(x) ((x) << 8) 154#define ROQ_IB2_START(x) ((x) << 8)
155#define CP_RB_BASE 0xC100 155#define CP_RB_BASE 0xC100
156#define CP_RB_CNTL 0xC104 156#define CP_RB_CNTL 0xC104
157#define RB_BUFSZ(x) ((x)<<0) 157#define RB_BUFSZ(x) ((x) << 0)
158#define RB_BLKSZ(x) ((x)<<8) 158#define RB_BLKSZ(x) ((x) << 8)
159#define RB_NO_UPDATE (1<<27) 159#define RB_NO_UPDATE (1 << 27)
160#define RB_RPTR_WR_ENA (1<<31) 160#define RB_RPTR_WR_ENA (1 << 31)
161#define BUF_SWAP_32BIT (2 << 16) 161#define BUF_SWAP_32BIT (2 << 16)
162#define CP_RB_RPTR 0x8700 162#define CP_RB_RPTR 0x8700
163#define CP_RB_RPTR_ADDR 0xC10C 163#define CP_RB_RPTR_ADDR 0xC10C
164#define RB_RPTR_SWAP(x) ((x) << 0)
164#define CP_RB_RPTR_ADDR_HI 0xC110 165#define CP_RB_RPTR_ADDR_HI 0xC110
165#define CP_RB_RPTR_WR 0xC108 166#define CP_RB_RPTR_WR 0xC108
166#define CP_RB_WPTR 0xC114 167#define CP_RB_WPTR 0xC114
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 56c48b67ef3d..6b3429495118 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -345,7 +345,6 @@ struct radeon_mc {
345 * about vram size near mc fb location */ 345 * about vram size near mc fb location */
346 u64 mc_vram_size; 346 u64 mc_vram_size;
347 u64 visible_vram_size; 347 u64 visible_vram_size;
348 u64 active_vram_size;
349 u64 gtt_size; 348 u64 gtt_size;
350 u64 gtt_start; 349 u64 gtt_start;
351 u64 gtt_end; 350 u64 gtt_end;
@@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m
1448extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); 1447extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
1449extern int radeon_resume_kms(struct drm_device *dev); 1448extern int radeon_resume_kms(struct drm_device *dev);
1450extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); 1449extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
1450extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size);
1451 1451
1452/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ 1452/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
1453extern bool r600_card_posted(struct radeon_device *rdev); 1453extern bool r600_card_posted(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e75d63b8e21d..793c5e6026ad 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = {
834 .pm_finish = &evergreen_pm_finish, 834 .pm_finish = &evergreen_pm_finish,
835 .pm_init_profile = &rs780_pm_init_profile, 835 .pm_init_profile = &rs780_pm_init_profile,
836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 836 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
837 .pre_page_flip = &evergreen_pre_page_flip,
838 .page_flip = &evergreen_page_flip,
839 .post_page_flip = &evergreen_post_page_flip,
837}; 840};
838 841
839static struct radeon_asic btc_asic = { 842static struct radeon_asic btc_asic = {
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 5c1cc7ad9a15..02d5c415f499 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -88,7 +88,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
88 /* some evergreen boards have bad data for this entry */ 88 /* some evergreen boards have bad data for this entry */
89 if (ASIC_IS_DCE4(rdev)) { 89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) && 90 if ((i == 7) &&
91 (gpio->usClkMaskRegisterIndex == 0x1936) && 91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) { 92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97; 93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8; 94 gpio->ucDataMaskShift = 8;
@@ -101,7 +101,7 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
101 /* some DCE3 boards have bad data for this entry */ 101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) { 102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) && 103 if ((i == 4) &&
104 (gpio->usClkMaskRegisterIndex == 0x1fda) && 104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94)) 105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14; 106 gpio->sucI2cId.ucAccess = 0x14;
107 } 107 }
@@ -172,7 +172,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
172 /* some evergreen boards have bad data for this entry */ 172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) { 173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) && 174 if ((i == 7) &&
175 (gpio->usClkMaskRegisterIndex == 0x1936) && 175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) { 176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97; 177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8; 178 gpio->ucDataMaskShift = 8;
@@ -185,7 +185,7 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
185 /* some DCE3 boards have bad data for this entry */ 185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) { 186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) && 187 if ((i == 4) &&
188 (gpio->usClkMaskRegisterIndex == 0x1fda) && 188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94)) 189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14; 190 gpio->sucI2cId.ucAccess = 0x14;
191 } 191 }
@@ -252,7 +252,7 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
252 pin = &gpio_info->asGPIO_Pin[i]; 252 pin = &gpio_info->asGPIO_Pin[i];
253 if (id == pin->ucGPIO_ID) { 253 if (id == pin->ucGPIO_ID) {
254 gpio.id = pin->ucGPIO_ID; 254 gpio.id = pin->ucGPIO_ID;
255 gpio.reg = pin->usGpioPin_AIndex * 4; 255 gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4;
256 gpio.mask = (1 << pin->ucGpioPinBitShift); 256 gpio.mask = (1 << pin->ucGpioPinBitShift);
257 gpio.valid = true; 257 gpio.valid = true;
258 break; 258 break;
@@ -1274,11 +1274,11 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
1274 data_offset); 1274 data_offset);
1275 switch (crev) { 1275 switch (crev) {
1276 case 1: 1276 case 1:
1277 if (igp_info->info.ulBootUpMemoryClock) 1277 if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock))
1278 return true; 1278 return true;
1279 break; 1279 break;
1280 case 2: 1280 case 2:
1281 if (igp_info->info_2.ulBootUpSidePortClock) 1281 if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock))
1282 return true; 1282 return true;
1283 break; 1283 break;
1284 default: 1284 default:
@@ -1442,7 +1442,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1442 1442
1443 for (i = 0; i < num_indices; i++) { 1443 for (i = 0; i < num_indices; i++) {
1444 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && 1444 if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) &&
1445 (clock <= ss_info->info.asSpreadSpectrum[i].ulTargetClockRange)) { 1445 (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) {
1446 ss->percentage = 1446 ss->percentage =
1447 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1447 le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1448 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1448 ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1456,7 +1456,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1456 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); 1456 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
1457 for (i = 0; i < num_indices; i++) { 1457 for (i = 0; i < num_indices; i++) {
1458 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && 1458 if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) &&
1459 (clock <= ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange)) { 1459 (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) {
1460 ss->percentage = 1460 ss->percentage =
1461 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1461 le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1462 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1462 ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1470,7 +1470,7 @@ bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev,
1470 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); 1470 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
1471 for (i = 0; i < num_indices; i++) { 1471 for (i = 0; i < num_indices; i++) {
1472 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && 1472 if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) &&
1473 (clock <= ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange)) { 1473 (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) {
1474 ss->percentage = 1474 ss->percentage =
1475 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); 1475 le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage);
1476 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; 1476 ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode;
@@ -1553,8 +1553,8 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1553 if (misc & ATOM_DOUBLE_CLOCK_MODE) 1553 if (misc & ATOM_DOUBLE_CLOCK_MODE)
1554 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; 1554 lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
1555 1555
1556 lvds->native_mode.width_mm = lvds_info->info.sLCDTiming.usImageHSize; 1556 lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize);
1557 lvds->native_mode.height_mm = lvds_info->info.sLCDTiming.usImageVSize; 1557 lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize);
1558 1558
1559 /* set crtc values */ 1559 /* set crtc values */
1560 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); 1560 drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
@@ -1569,13 +1569,13 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
1569 lvds->linkb = false; 1569 lvds->linkb = false;
1570 1570
1571 /* parse the lcd record table */ 1571 /* parse the lcd record table */
1572 if (lvds_info->info.usModePatchTableOffset) { 1572 if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) {
1573 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; 1573 ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
1574 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; 1574 ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
1575 bool bad_record = false; 1575 bool bad_record = false;
1576 u8 *record = (u8 *)(mode_info->atom_context->bios + 1576 u8 *record = (u8 *)(mode_info->atom_context->bios +
1577 data_offset + 1577 data_offset +
1578 lvds_info->info.usModePatchTableOffset); 1578 le16_to_cpu(lvds_info->info.usModePatchTableOffset));
1579 while (*record != ATOM_RECORD_END_TYPE) { 1579 while (*record != ATOM_RECORD_END_TYPE) {
1580 switch (*record) { 1580 switch (*record) {
1581 case LCD_MODE_PATCH_RECORD_MODE_TYPE: 1581 case LCD_MODE_PATCH_RECORD_MODE_TYPE:
@@ -2189,7 +2189,7 @@ static u16 radeon_atombios_get_default_vddc(struct radeon_device *rdev)
2189 firmware_info = 2189 firmware_info =
2190 (union firmware_info *)(mode_info->atom_context->bios + 2190 (union firmware_info *)(mode_info->atom_context->bios +
2191 data_offset); 2191 data_offset);
2192 vddc = firmware_info->info_14.usBootUpVDDCVoltage; 2192 vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
2193 } 2193 }
2194 2194
2195 return vddc; 2195 return vddc;
@@ -2284,7 +2284,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2284 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2284 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2285 VOLTAGE_SW; 2285 VOLTAGE_SW;
2286 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2286 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2287 clock_info->evergreen.usVDDC; 2287 le16_to_cpu(clock_info->evergreen.usVDDC);
2288 } else { 2288 } else {
2289 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); 2289 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2290 sclk |= clock_info->r600.ucEngineClockHigh << 16; 2290 sclk |= clock_info->r600.ucEngineClockHigh << 16;
@@ -2295,7 +2295,7 @@ static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev,
2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = 2295 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
2296 VOLTAGE_SW; 2296 VOLTAGE_SW;
2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = 2297 rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
2298 clock_info->r600.usVDDC; 2298 le16_to_cpu(clock_info->r600.usVDDC);
2299 } 2299 }
2300 2300
2301 if (rdev->flags & RADEON_IS_IGP) { 2301 if (rdev->flags & RADEON_IS_IGP) {
@@ -2408,13 +2408,13 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2408 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); 2408 radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController);
2409 state_array = (struct StateArray *) 2409 state_array = (struct StateArray *)
2410 (mode_info->atom_context->bios + data_offset + 2410 (mode_info->atom_context->bios + data_offset +
2411 power_info->pplib.usStateArrayOffset); 2411 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2412 clock_info_array = (struct ClockInfoArray *) 2412 clock_info_array = (struct ClockInfoArray *)
2413 (mode_info->atom_context->bios + data_offset + 2413 (mode_info->atom_context->bios + data_offset +
2414 power_info->pplib.usClockInfoArrayOffset); 2414 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2415 non_clock_info_array = (struct NonClockInfoArray *) 2415 non_clock_info_array = (struct NonClockInfoArray *)
2416 (mode_info->atom_context->bios + data_offset + 2416 (mode_info->atom_context->bios + data_offset +
2417 power_info->pplib.usNonClockInfoArrayOffset); 2417 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2418 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) *
2419 state_array->ucNumEntries, GFP_KERNEL); 2419 state_array->ucNumEntries, GFP_KERNEL);
2420 if (!rdev->pm.power_state) 2420 if (!rdev->pm.power_state)
@@ -2533,7 +2533,7 @@ uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev)
2533 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); 2533 int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock);
2534 2534
2535 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2535 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2536 return args.ulReturnEngineClock; 2536 return le32_to_cpu(args.ulReturnEngineClock);
2537} 2537}
2538 2538
2539uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) 2539uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
@@ -2542,7 +2542,7 @@ uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev)
2542 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); 2542 int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock);
2543 2543
2544 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2544 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2545 return args.ulReturnMemoryClock; 2545 return le32_to_cpu(args.ulReturnMemoryClock);
2546} 2546}
2547 2547
2548void radeon_atom_set_engine_clock(struct radeon_device *rdev, 2548void radeon_atom_set_engine_clock(struct radeon_device *rdev,
@@ -2551,7 +2551,7 @@ void radeon_atom_set_engine_clock(struct radeon_device *rdev,
2551 SET_ENGINE_CLOCK_PS_ALLOCATION args; 2551 SET_ENGINE_CLOCK_PS_ALLOCATION args;
2552 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); 2552 int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock);
2553 2553
2554 args.ulTargetEngineClock = eng_clock; /* 10 khz */ 2554 args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */
2555 2555
2556 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2556 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2557} 2557}
@@ -2565,7 +2565,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev,
2565 if (rdev->flags & RADEON_IS_IGP) 2565 if (rdev->flags & RADEON_IS_IGP)
2566 return; 2566 return;
2567 2567
2568 args.ulTargetMemoryClock = mem_clock; /* 10 khz */ 2568 args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */
2569 2569
2570 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 2570 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
2571} 2571}
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index d27ef74590cd..cf7c8d5b4ec2 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1504,6 +1504,11 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
1504 (rdev->pdev->subsystem_device == 0x4a48)) { 1504 (rdev->pdev->subsystem_device == 0x4a48)) {
1505 /* Mac X800 */ 1505 /* Mac X800 */
1506 rdev->mode_info.connector_table = CT_MAC_X800; 1506 rdev->mode_info.connector_table = CT_MAC_X800;
1507 } else if ((rdev->pdev->device == 0x4150) &&
1508 (rdev->pdev->subsystem_vendor == 0x1002) &&
1509 (rdev->pdev->subsystem_device == 0x4150)) {
1510 /* Mac G5 9600 */
1511 rdev->mode_info.connector_table = CT_MAC_G5_9600;
1507 } else 1512 } else
1508#endif /* CONFIG_PPC_PMAC */ 1513#endif /* CONFIG_PPC_PMAC */
1509#ifdef CONFIG_PPC64 1514#ifdef CONFIG_PPC64
@@ -2022,6 +2027,48 @@ bool radeon_get_legacy_connector_info_from_table(struct drm_device *dev)
2022 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, 2027 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I,
2023 &hpd); 2028 &hpd);
2024 break; 2029 break;
2030 case CT_MAC_G5_9600:
2031 DRM_INFO("Connector Table: %d (mac g5 9600)\n",
2032 rdev->mode_info.connector_table);
2033 /* DVI - tv dac, dvo */
2034 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_DVI, 0, 0);
2035 hpd.hpd = RADEON_HPD_1; /* ??? */
2036 radeon_add_legacy_encoder(dev,
2037 radeon_get_encoder_enum(dev,
2038 ATOM_DEVICE_DFP2_SUPPORT,
2039 0),
2040 ATOM_DEVICE_DFP2_SUPPORT);
2041 radeon_add_legacy_encoder(dev,
2042 radeon_get_encoder_enum(dev,
2043 ATOM_DEVICE_CRT2_SUPPORT,
2044 2),
2045 ATOM_DEVICE_CRT2_SUPPORT);
2046 radeon_add_legacy_connector(dev, 0,
2047 ATOM_DEVICE_DFP2_SUPPORT |
2048 ATOM_DEVICE_CRT2_SUPPORT,
2049 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2050 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2051 &hpd);
2052 /* ADC - primary dac, internal tmds */
2053 ddc_i2c = combios_setup_i2c_bus(rdev, DDC_VGA, 0, 0);
2054 hpd.hpd = RADEON_HPD_2; /* ??? */
2055 radeon_add_legacy_encoder(dev,
2056 radeon_get_encoder_enum(dev,
2057 ATOM_DEVICE_DFP1_SUPPORT,
2058 0),
2059 ATOM_DEVICE_DFP1_SUPPORT);
2060 radeon_add_legacy_encoder(dev,
2061 radeon_get_encoder_enum(dev,
2062 ATOM_DEVICE_CRT1_SUPPORT,
2063 1),
2064 ATOM_DEVICE_CRT1_SUPPORT);
2065 radeon_add_legacy_connector(dev, 1,
2066 ATOM_DEVICE_DFP1_SUPPORT |
2067 ATOM_DEVICE_CRT1_SUPPORT,
2068 DRM_MODE_CONNECTOR_DVII, &ddc_i2c,
2069 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I,
2070 &hpd);
2071 break;
2025 default: 2072 default:
2026 DRM_INFO("Connector table: %d (invalid)\n", 2073 DRM_INFO("Connector table: %d (invalid)\n",
2027 rdev->mode_info.connector_table); 2074 rdev->mode_info.connector_table);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0d478932b1a9..4954e2d6ffa2 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -936,8 +936,11 @@ int radeon_resume_kms(struct drm_device *dev)
936int radeon_gpu_reset(struct radeon_device *rdev) 936int radeon_gpu_reset(struct radeon_device *rdev)
937{ 937{
938 int r; 938 int r;
939 int resched;
939 940
940 radeon_save_bios_scratch_regs(rdev); 941 radeon_save_bios_scratch_regs(rdev);
942 /* block TTM */
943 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
941 radeon_suspend(rdev); 944 radeon_suspend(rdev);
942 945
943 r = radeon_asic_reset(rdev); 946 r = radeon_asic_reset(rdev);
@@ -946,6 +949,7 @@ int radeon_gpu_reset(struct radeon_device *rdev)
946 radeon_resume(rdev); 949 radeon_resume(rdev);
947 radeon_restore_bios_scratch_regs(rdev); 950 radeon_restore_bios_scratch_regs(rdev);
948 drm_helper_resume_force_mode(rdev->ddev); 951 drm_helper_resume_force_mode(rdev->ddev);
952 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
949 return 0; 953 return 0;
950 } 954 }
951 /* bad news, how to tell it to userspace ? */ 955 /* bad news, how to tell it to userspace ? */
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 2eff98cfd728..3e7e7f9eb781 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -793,6 +793,11 @@ static void avivo_get_fb_div(struct radeon_pll *pll,
793 tmp *= target_clock; 793 tmp *= target_clock;
794 *fb_div = tmp / pll->reference_freq; 794 *fb_div = tmp / pll->reference_freq;
795 *frac_fb_div = tmp % pll->reference_freq; 795 *frac_fb_div = tmp % pll->reference_freq;
796
797 if (*fb_div > pll->max_feedback_div)
798 *fb_div = pll->max_feedback_div;
799 else if (*fb_div < pll->min_feedback_div)
800 *fb_div = pll->min_feedback_div;
796} 801}
797 802
798static u32 avivo_get_post_div(struct radeon_pll *pll, 803static u32 avivo_get_post_div(struct radeon_pll *pll,
@@ -826,6 +831,11 @@ static u32 avivo_get_post_div(struct radeon_pll *pll,
826 post_div--; 831 post_div--;
827 } 832 }
828 833
834 if (post_div > pll->max_post_div)
835 post_div = pll->max_post_div;
836 else if (post_div < pll->min_post_div)
837 post_div = pll->min_post_div;
838
829 return post_div; 839 return post_div;
830} 840}
831 841
@@ -961,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll,
961 max_fractional_feed_div = pll->max_frac_feedback_div; 971 max_fractional_feed_div = pll->max_frac_feedback_div;
962 } 972 }
963 973
964 for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { 974 for (post_div = max_post_div; post_div >= min_post_div; --post_div) {
965 uint32_t ref_div; 975 uint32_t ref_div;
966 976
967 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) 977 if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 448eba89d1e6..5cba46b9779a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -1524,6 +1524,7 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
1524#define R600_CP_RB_CNTL 0xc104 1524#define R600_CP_RB_CNTL 0xc104
1525# define R600_RB_BUFSZ(x) ((x) << 0) 1525# define R600_RB_BUFSZ(x) ((x) << 0)
1526# define R600_RB_BLKSZ(x) ((x) << 8) 1526# define R600_RB_BLKSZ(x) ((x) << 8)
1527# define R600_BUF_SWAP_32BIT (2 << 16)
1527# define R600_RB_NO_UPDATE (1 << 27) 1528# define R600_RB_NO_UPDATE (1 << 27)
1528# define R600_RB_RPTR_WR_ENA (1 << 31) 1529# define R600_RB_RPTR_WR_ENA (1 << 31)
1529#define R600_CP_RB_RPTR_WR 0xc108 1530#define R600_CP_RB_RPTR_WR 0xc108
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index d4a542247618..b4274883227f 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -910,7 +910,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
910 910
911 args.v1.ucAction = action; 911 args.v1.ucAction = action;
912 if (action == ATOM_TRANSMITTER_ACTION_INIT) { 912 if (action == ATOM_TRANSMITTER_ACTION_INIT) {
913 args.v1.usInitInfo = connector_object_id; 913 args.v1.usInitInfo = cpu_to_le16(connector_object_id);
914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) { 914 } else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
915 args.v1.asMode.ucLaneSel = lane_num; 915 args.v1.asMode.ucLaneSel = lane_num;
916 args.v1.asMode.ucLaneSet = lane_set; 916 args.v1.asMode.ucLaneSet = lane_set;
@@ -1140,7 +1140,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
1140 case 3: 1140 case 3:
1141 args.v3.sExtEncoder.ucAction = action; 1141 args.v3.sExtEncoder.ucAction = action;
1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT) 1142 if (action == EXTERNAL_ENCODER_ACTION_V3_ENCODER_INIT)
1143 args.v3.sExtEncoder.usConnectorId = connector_object_id; 1143 args.v3.sExtEncoder.usConnectorId = cpu_to_le16(connector_object_id);
1144 else 1144 else
1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10); 1145 args.v3.sExtEncoder.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder); 1146 args.v3.sExtEncoder.ucEncoderMode = atombios_get_encoder_mode(encoder);
@@ -1570,11 +1570,21 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
1570 } 1570 }
1571 1571
1572 /* set scaler clears this on some chips */ 1572 /* set scaler clears this on some chips */
1573 /* XXX check DCE4 */ 1573 if (ASIC_IS_AVIVO(rdev) &&
1574 if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) { 1574 (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)))) {
1575 if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE)) 1575 if (ASIC_IS_DCE4(rdev)) {
1576 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 1576 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1577 AVIVO_D1MODE_INTERLEAVE_EN); 1577 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
1578 EVERGREEN_INTERLEAVE_EN);
1579 else
1580 WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1581 } else {
1582 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
1583 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
1584 AVIVO_D1MODE_INTERLEAVE_EN);
1585 else
1586 WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
1587 }
1578 } 1588 }
1579} 1589}
1580 1590
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 66324b5bb5ba..cc44bdfec80f 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev,
113 u32 tiling_flags = 0; 113 u32 tiling_flags = 0;
114 int ret; 114 int ret;
115 int aligned_size, size; 115 int aligned_size, size;
116 int height = mode_cmd->height;
116 117
117 /* need to align pitch with crtc limits */ 118 /* need to align pitch with crtc limits */
118 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); 119 mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8);
119 120
120 size = mode_cmd->pitch * mode_cmd->height; 121 if (rdev->family >= CHIP_R600)
122 height = ALIGN(mode_cmd->height, 8);
123 size = mode_cmd->pitch * height;
121 aligned_size = ALIGN(size, PAGE_SIZE); 124 aligned_size = ALIGN(size, PAGE_SIZE);
122 ret = radeon_gem_object_create(rdev, aligned_size, 0, 125 ret = radeon_gem_object_create(rdev, aligned_size, 0,
123 RADEON_GEM_DOMAIN_VRAM, 126 RADEON_GEM_DOMAIN_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index df95eb83dac6..1fe95dfe48c9 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
156{ 156{
157 struct radeon_device *rdev = dev->dev_private; 157 struct radeon_device *rdev = dev->dev_private;
158 struct drm_radeon_gem_info *args = data; 158 struct drm_radeon_gem_info *args = data;
159 struct ttm_mem_type_manager *man;
160
161 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
159 162
160 args->vram_size = rdev->mc.real_vram_size; 163 args->vram_size = rdev->mc.real_vram_size;
161 args->vram_visible = rdev->mc.real_vram_size; 164 args->vram_visible = (u64)man->size << PAGE_SHIFT;
162 if (rdev->stollen_vga_memory) 165 if (rdev->stollen_vga_memory)
163 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); 166 args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
164 args->vram_visible -= radeon_fbdev_total_size(rdev); 167 args->vram_visible -= radeon_fbdev_total_size(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index cf0638c3b7c7..78968b738e88 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
443 (target_fb->bits_per_pixel * 8)); 443 (target_fb->bits_per_pixel * 8));
444 crtc_pitch |= crtc_pitch << 16; 444 crtc_pitch |= crtc_pitch << 16;
445 445
446 446 crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN;
447 if (tiling_flags & RADEON_TILING_MACRO) { 447 if (tiling_flags & RADEON_TILING_MACRO) {
448 if (ASIC_IS_R300(rdev)) 448 if (ASIC_IS_R300(rdev))
449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | 449 crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN |
@@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc,
502 gen_cntl_val = RREG32(gen_cntl_reg); 502 gen_cntl_val = RREG32(gen_cntl_reg);
503 gen_cntl_val &= ~(0xf << 8); 503 gen_cntl_val &= ~(0xf << 8);
504 gen_cntl_val |= (format << 8); 504 gen_cntl_val |= (format << 8);
505 gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK;
505 WREG32(gen_cntl_reg, gen_cntl_val); 506 WREG32(gen_cntl_reg, gen_cntl_val);
506 507
507 crtc_offset = (u32)base; 508 crtc_offset = (u32)base;
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 6794cdf91f28..a670caaee29e 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -209,6 +209,7 @@ enum radeon_connector_table {
209 CT_EMAC, 209 CT_EMAC,
210 CT_RN50_POWER, 210 CT_RN50_POWER,
211 CT_MAC_X800, 211 CT_MAC_X800,
212 CT_MAC_G5_9600,
212}; 213};
213 214
214enum radeon_dvo_chip { 215enum radeon_dvo_chip {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 1272e4b6a1d4..8389b4c63d12 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev)
589 DRM_INFO("radeon: ttm finalized\n"); 589 DRM_INFO("radeon: ttm finalized\n");
590} 590}
591 591
592/* this should only be called at bootup or when userspace
593 * isn't running */
594void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
595{
596 struct ttm_mem_type_manager *man;
597
598 if (!rdev->mman.initialized)
599 return;
600
601 man = &rdev->mman.bdev.man[TTM_PL_VRAM];
602 /* this just adjusts TTM size idea, which sets lpfn to the correct value */
603 man->size = size >> PAGE_SHIFT;
604}
605
592static struct vm_operations_struct radeon_ttm_vm_ops; 606static struct vm_operations_struct radeon_ttm_vm_ops;
593static const struct vm_operations_struct *ttm_vm_ops = NULL; 607static const struct vm_operations_struct *ttm_vm_ops = NULL;
594 608
@@ -787,9 +801,9 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
787 radeon_mem_types_list[i].show = &radeon_mm_dump_table; 801 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
788 radeon_mem_types_list[i].driver_features = 0; 802 radeon_mem_types_list[i].driver_features = 0;
789 if (i == 0) 803 if (i == 0)
790 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].priv; 804 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
791 else 805 else
792 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].priv; 806 radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
793 807
794 } 808 }
795 /* Add ttm page pool to debugfs */ 809 /* Add ttm page pool to debugfs */
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r300 b/drivers/gpu/drm/radeon/reg_srcs/r300
index b506ec1cab4b..e8a1786b6426 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r300
+++ b/drivers/gpu/drm/radeon/reg_srcs/r300
@@ -683,9 +683,7 @@ r300 0x4f60
6830x4DF4 US_ALU_CONST_G_31 6830x4DF4 US_ALU_CONST_G_31
6840x4DF8 US_ALU_CONST_B_31 6840x4DF8 US_ALU_CONST_B_31
6850x4DFC US_ALU_CONST_A_31 6850x4DFC US_ALU_CONST_A_31
6860x4E04 RB3D_BLENDCNTL_R3
6870x4E08 RB3D_ABLENDCNTL_R3 6860x4E08 RB3D_ABLENDCNTL_R3
6880x4E0C RB3D_COLOR_CHANNEL_MASK
6890x4E10 RB3D_CONSTANT_COLOR 6870x4E10 RB3D_CONSTANT_COLOR
6900x4E14 RB3D_COLOR_CLEAR_VALUE 6880x4E14 RB3D_COLOR_CLEAR_VALUE
6910x4E18 RB3D_ROPCNTL_R3 6890x4E18 RB3D_ROPCNTL_R3
@@ -706,13 +704,11 @@ r300 0x4f60
7060x4E74 RB3D_CMASK_WRINDEX 7040x4E74 RB3D_CMASK_WRINDEX
7070x4E78 RB3D_CMASK_DWORD 7050x4E78 RB3D_CMASK_DWORD
7080x4E7C RB3D_CMASK_RDINDEX 7060x4E7C RB3D_CMASK_RDINDEX
7090x4E80 RB3D_AARESOLVE_OFFSET
7100x4E84 RB3D_AARESOLVE_PITCH
7110x4E88 RB3D_AARESOLVE_CTL
7120x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7070x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7130x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7080x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7140x4F04 ZB_ZSTENCILCNTL 7090x4F04 ZB_ZSTENCILCNTL
7150x4F08 ZB_STENCILREFMASK 7100x4F08 ZB_STENCILREFMASK
7160x4F14 ZB_ZTOP 7110x4F14 ZB_ZTOP
7170x4F18 ZB_ZCACHE_CTLSTAT 7120x4F18 ZB_ZCACHE_CTLSTAT
7130x4F28 ZB_DEPTHCLEARVALUE
7180x4F58 ZB_ZPASS_DATA 7140x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r420 b/drivers/gpu/drm/radeon/reg_srcs/r420
index 8c1214c2390f..722074e21e2f 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r420
+++ b/drivers/gpu/drm/radeon/reg_srcs/r420
@@ -130,7 +130,6 @@ r420 0x4f60
1300x401C GB_SELECT 1300x401C GB_SELECT
1310x4020 GB_AA_CONFIG 1310x4020 GB_AA_CONFIG
1320x4024 GB_FIFO_SIZE 1320x4024 GB_FIFO_SIZE
1330x4028 GB_Z_PEQ_CONFIG
1340x4100 TX_INVALTAGS 1330x4100 TX_INVALTAGS
1350x4200 GA_POINT_S0 1340x4200 GA_POINT_S0
1360x4204 GA_POINT_T0 1350x4204 GA_POINT_T0
@@ -750,9 +749,7 @@ r420 0x4f60
7500x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7510x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7520x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7530x4E04 RB3D_BLENDCNTL_R3
7540x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7550x4E0C RB3D_COLOR_CHANNEL_MASK
7560x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7570x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7580x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -773,13 +770,11 @@ r420 0x4f60
7730x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7740x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7750x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7760x4E80 RB3D_AARESOLVE_OFFSET
7770x4E84 RB3D_AARESOLVE_PITCH
7780x4E88 RB3D_AARESOLVE_CTL
7790x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7800x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7810x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7820x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7830x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7840x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7850x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rs600 b/drivers/gpu/drm/radeon/reg_srcs/rs600
index 0828d80396f2..d9f62866bbc1 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rs600
+++ b/drivers/gpu/drm/radeon/reg_srcs/rs600
@@ -749,9 +749,7 @@ rs600 0x6d40
7490x4DF4 US_ALU_CONST_G_31 7490x4DF4 US_ALU_CONST_G_31
7500x4DF8 US_ALU_CONST_B_31 7500x4DF8 US_ALU_CONST_B_31
7510x4DFC US_ALU_CONST_A_31 7510x4DFC US_ALU_CONST_A_31
7520x4E04 RB3D_BLENDCNTL_R3
7530x4E08 RB3D_ABLENDCNTL_R3 7520x4E08 RB3D_ABLENDCNTL_R3
7540x4E0C RB3D_COLOR_CHANNEL_MASK
7550x4E10 RB3D_CONSTANT_COLOR 7530x4E10 RB3D_CONSTANT_COLOR
7560x4E14 RB3D_COLOR_CLEAR_VALUE 7540x4E14 RB3D_COLOR_CLEAR_VALUE
7570x4E18 RB3D_ROPCNTL_R3 7550x4E18 RB3D_ROPCNTL_R3
@@ -772,13 +770,11 @@ rs600 0x6d40
7720x4E74 RB3D_CMASK_WRINDEX 7700x4E74 RB3D_CMASK_WRINDEX
7730x4E78 RB3D_CMASK_DWORD 7710x4E78 RB3D_CMASK_DWORD
7740x4E7C RB3D_CMASK_RDINDEX 7720x4E7C RB3D_CMASK_RDINDEX
7750x4E80 RB3D_AARESOLVE_OFFSET
7760x4E84 RB3D_AARESOLVE_PITCH
7770x4E88 RB3D_AARESOLVE_CTL
7780x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 7730x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
7790x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 7740x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
7800x4F04 ZB_ZSTENCILCNTL 7750x4F04 ZB_ZSTENCILCNTL
7810x4F08 ZB_STENCILREFMASK 7760x4F08 ZB_STENCILREFMASK
7820x4F14 ZB_ZTOP 7770x4F14 ZB_ZTOP
7830x4F18 ZB_ZCACHE_CTLSTAT 7780x4F18 ZB_ZCACHE_CTLSTAT
7790x4F28 ZB_DEPTHCLEARVALUE
7840x4F58 ZB_ZPASS_DATA 7800x4F58 ZB_ZPASS_DATA
diff --git a/drivers/gpu/drm/radeon/reg_srcs/rv515 b/drivers/gpu/drm/radeon/reg_srcs/rv515
index ef422bbacfc1..911a8fbd32bb 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/rv515
+++ b/drivers/gpu/drm/radeon/reg_srcs/rv515
@@ -164,7 +164,6 @@ rv515 0x6d40
1640x401C GB_SELECT 1640x401C GB_SELECT
1650x4020 GB_AA_CONFIG 1650x4020 GB_AA_CONFIG
1660x4024 GB_FIFO_SIZE 1660x4024 GB_FIFO_SIZE
1670x4028 GB_Z_PEQ_CONFIG
1680x4100 TX_INVALTAGS 1670x4100 TX_INVALTAGS
1690x4114 SU_TEX_WRAP_PS3 1680x4114 SU_TEX_WRAP_PS3
1700x4118 PS3_ENABLE 1690x4118 PS3_ENABLE
@@ -461,9 +460,7 @@ rv515 0x6d40
4610x4DF4 US_ALU_CONST_G_31 4600x4DF4 US_ALU_CONST_G_31
4620x4DF8 US_ALU_CONST_B_31 4610x4DF8 US_ALU_CONST_B_31
4630x4DFC US_ALU_CONST_A_31 4620x4DFC US_ALU_CONST_A_31
4640x4E04 RB3D_BLENDCNTL_R3
4650x4E08 RB3D_ABLENDCNTL_R3 4630x4E08 RB3D_ABLENDCNTL_R3
4660x4E0C RB3D_COLOR_CHANNEL_MASK
4670x4E10 RB3D_CONSTANT_COLOR 4640x4E10 RB3D_CONSTANT_COLOR
4680x4E14 RB3D_COLOR_CLEAR_VALUE 4650x4E14 RB3D_COLOR_CLEAR_VALUE
4690x4E18 RB3D_ROPCNTL_R3 4660x4E18 RB3D_ROPCNTL_R3
@@ -484,9 +481,6 @@ rv515 0x6d40
4840x4E74 RB3D_CMASK_WRINDEX 4810x4E74 RB3D_CMASK_WRINDEX
4850x4E78 RB3D_CMASK_DWORD 4820x4E78 RB3D_CMASK_DWORD
4860x4E7C RB3D_CMASK_RDINDEX 4830x4E7C RB3D_CMASK_RDINDEX
4870x4E80 RB3D_AARESOLVE_OFFSET
4880x4E84 RB3D_AARESOLVE_PITCH
4890x4E88 RB3D_AARESOLVE_CTL
4900x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD 4840x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
4910x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD 4850x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
4920x4EF8 RB3D_CONSTANT_COLOR_AR 4860x4EF8 RB3D_CONSTANT_COLOR_AR
@@ -496,4 +490,5 @@ rv515 0x6d40
4960x4F14 ZB_ZTOP 4900x4F14 ZB_ZTOP
4970x4F18 ZB_ZCACHE_CTLSTAT 4910x4F18 ZB_ZCACHE_CTLSTAT
4980x4F58 ZB_ZPASS_DATA 4920x4F58 ZB_ZPASS_DATA
4930x4F28 ZB_DEPTHCLEARVALUE
4990x4FD4 ZB_STENCILREFMASK_BF 4940x4FD4 ZB_STENCILREFMASK_BF
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5afe294ed51f..8af4679db23e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev)
751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); 751 rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size; 752 rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
753 rdev->mc.visible_vram_size = rdev->mc.aper_size; 753 rdev->mc.visible_vram_size = rdev->mc.aper_size;
754 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
755 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 754 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
756 base = RREG32_MC(R_000004_MC_FB_LOCATION); 755 base = RREG32_MC(R_000004_MC_FB_LOCATION);
757 base = G_000004_MC_FB_START(base) << 16; 756 base = G_000004_MC_FB_START(base) << 16;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0137d3e3728d..66c949b7c18c 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -77,9 +77,9 @@ void rs690_pm_info(struct radeon_device *rdev)
77 switch (crev) { 77 switch (crev) {
78 case 1: 78 case 1:
79 tmp.full = dfixed_const(100); 79 tmp.full = dfixed_const(100);
80 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info.ulBootUpMemoryClock); 80 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info.ulBootUpMemoryClock));
81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 81 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
82 if (info->info.usK8MemoryClock) 82 if (le16_to_cpu(info->info.usK8MemoryClock))
83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock)); 83 rdev->pm.igp_system_mclk.full = dfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
84 else if (rdev->clock.default_mclk) { 84 else if (rdev->clock.default_mclk) {
85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 85 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
@@ -91,16 +91,16 @@ void rs690_pm_info(struct radeon_device *rdev)
91 break; 91 break;
92 case 2: 92 case 2:
93 tmp.full = dfixed_const(100); 93 tmp.full = dfixed_const(100);
94 rdev->pm.igp_sideport_mclk.full = dfixed_const(info->info_v2.ulBootUpSidePortClock); 94 rdev->pm.igp_sideport_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpSidePortClock));
95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp); 95 rdev->pm.igp_sideport_mclk.full = dfixed_div(rdev->pm.igp_sideport_mclk, tmp);
96 if (info->info_v2.ulBootUpUMAClock) 96 if (le32_to_cpu(info->info_v2.ulBootUpUMAClock))
97 rdev->pm.igp_system_mclk.full = dfixed_const(info->info_v2.ulBootUpUMAClock); 97 rdev->pm.igp_system_mclk.full = dfixed_const(le32_to_cpu(info->info_v2.ulBootUpUMAClock));
98 else if (rdev->clock.default_mclk) 98 else if (rdev->clock.default_mclk)
99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk); 99 rdev->pm.igp_system_mclk.full = dfixed_const(rdev->clock.default_mclk);
100 else 100 else
101 rdev->pm.igp_system_mclk.full = dfixed_const(66700); 101 rdev->pm.igp_system_mclk.full = dfixed_const(66700);
102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp); 102 rdev->pm.igp_system_mclk.full = dfixed_div(rdev->pm.igp_system_mclk, tmp);
103 rdev->pm.igp_ht_link_clk.full = dfixed_const(info->info_v2.ulHTLinkFreq); 103 rdev->pm.igp_ht_link_clk.full = dfixed_const(le32_to_cpu(info->info_v2.ulHTLinkFreq));
104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp); 104 rdev->pm.igp_ht_link_clk.full = dfixed_div(rdev->pm.igp_ht_link_clk, tmp);
105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth)); 105 rdev->pm.igp_ht_link_width.full = dfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
106 break; 106 break;
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev)
157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 157 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 158 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
159 rdev->mc.visible_vram_size = rdev->mc.aper_size; 159 rdev->mc.visible_vram_size = rdev->mc.aper_size;
160 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
161 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); 160 base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
162 base = G_000100_MC_FB_START(base) << 16; 161 base = G_000100_MC_FB_START(base) << 16;
163 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); 162 rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 2211a323db41..714ad45757d0 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev)
307 */ 307 */
308void r700_cp_stop(struct radeon_device *rdev) 308void r700_cp_stop(struct radeon_device *rdev)
309{ 309{
310 rdev->mc.active_vram_size = rdev->mc.visible_vram_size; 310 radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); 311 WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT));
312 WREG32(SCRATCH_UMSK, 0); 312 WREG32(SCRATCH_UMSK, 0);
313} 313}
@@ -321,7 +321,11 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
321 return -EINVAL; 321 return -EINVAL;
322 322
323 r700_cp_stop(rdev); 323 r700_cp_stop(rdev);
324 WREG32(CP_RB_CNTL, RB_NO_UPDATE | (15 << 8) | (3 << 0)); 324 WREG32(CP_RB_CNTL,
325#ifdef __BIG_ENDIAN
326 BUF_SWAP_32BIT |
327#endif
328 RB_NO_UPDATE | RB_BLKSZ(15) | RB_BUFSZ(3));
325 329
326 /* Reset cp */ 330 /* Reset cp */
327 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP); 331 WREG32(GRBM_SOFT_RESET, SOFT_RESET_CP);
@@ -1119,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev)
1119 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 1123 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
1120 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 1124 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
1121 rdev->mc.visible_vram_size = rdev->mc.aper_size; 1125 rdev->mc.visible_vram_size = rdev->mc.aper_size;
1122 rdev->mc.active_vram_size = rdev->mc.visible_vram_size;
1123 r700_vram_gtt_location(rdev, &rdev->mc); 1126 r700_vram_gtt_location(rdev, &rdev->mc);
1124 radeon_update_bandwidth_info(rdev); 1127 radeon_update_bandwidth_info(rdev);
1125 1128
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index abc8cf5a3672..79fa588e9ed5 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -76,10 +76,10 @@
76#define ROQ_IB1_START(x) ((x) << 0) 76#define ROQ_IB1_START(x) ((x) << 0)
77#define ROQ_IB2_START(x) ((x) << 8) 77#define ROQ_IB2_START(x) ((x) << 8)
78#define CP_RB_CNTL 0xC104 78#define CP_RB_CNTL 0xC104
79#define RB_BUFSZ(x) ((x)<<0) 79#define RB_BUFSZ(x) ((x) << 0)
80#define RB_BLKSZ(x) ((x)<<8) 80#define RB_BLKSZ(x) ((x) << 8)
81#define RB_NO_UPDATE (1<<27) 81#define RB_NO_UPDATE (1 << 27)
82#define RB_RPTR_WR_ENA (1<<31) 82#define RB_RPTR_WR_ENA (1 << 31)
83#define BUF_SWAP_32BIT (2 << 16) 83#define BUF_SWAP_32BIT (2 << 16)
84#define CP_RB_RPTR 0x8700 84#define CP_RB_RPTR 0x8700
85#define CP_RB_RPTR_ADDR 0xC10C 85#define CP_RB_RPTR_ADDR 0xC10C
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 773e484f1646..297bc9a7d6e6 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP
238 will be called k8temp. 238 will be called k8temp.
239 239
240config SENSORS_K10TEMP 240config SENSORS_K10TEMP
241 tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" 241 tristate "AMD Family 10h/11h/12h/14h temperature sensor"
242 depends on X86 && PCI 242 depends on X86 && PCI
243 help 243 help
244 If you say yes here you get support for the temperature 244 If you say yes here you get support for the temperature
245 sensor(s) inside your CPU. Supported are later revisions of 245 sensor(s) inside your CPU. Supported are later revisions of
246 the AMD Family 10h and all revisions of the AMD Family 11h 246 the AMD Family 10h and all revisions of the AMD Family 11h,
247 microarchitectures. 247 12h (Llano), and 14h (Brazos) microarchitectures.
248 248
249 This driver can also be built as a module. If so, the module 249 This driver can also be built as a module. If so, the module
250 will be called k10temp. 250 will be called k10temp.
@@ -455,13 +455,14 @@ config SENSORS_JZ4740
455 called jz4740-hwmon. 455 called jz4740-hwmon.
456 456
457config SENSORS_JC42 457config SENSORS_JC42
458 tristate "JEDEC JC42.4 compliant temperature sensors" 458 tristate "JEDEC JC42.4 compliant memory module temperature sensors"
459 depends on I2C 459 depends on I2C
460 help 460 help
461 If you say yes here you get support for Jedec JC42.4 compliant 461 If you say yes here, you get support for JEDEC JC42.4 compliant
462 temperature sensors. Support will include, but not be limited to, 462 temperature sensors, which are used on many DDR3 memory modules for
463 ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, 463 mobile devices and servers. Support will include, but not be limited
464 MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. 464 to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243,
465 MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3.
465 466
466 This driver can also be built as a module. If so, the module 467 This driver can also be built as a module. If so, the module
467 will be called jc42. 468 will be called jc42.
@@ -574,7 +575,7 @@ config SENSORS_LM85
574 help 575 help
575 If you say yes here you get support for National Semiconductor LM85 576 If you say yes here you get support for National Semiconductor LM85
576 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, 577 sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100,
577 EMC6D101 and EMC6D102. 578 EMC6D101, EMC6D102, and EMC6D103.
578 579
579 This driver can also be built as a module. If so, the module 580 This driver can also be built as a module. If so, the module
580 will be called lm85. 581 will be called lm85.
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822aa9bbf..d46c0c758ddf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
242 { "ad7414", 0 }, 242 { "ad7414", 0 },
243 {} 243 {}
244}; 244};
245MODULE_DEVICE_TABLE(i2c, ad7414_id);
245 246
246static struct i2c_driver ad7414_driver = { 247static struct i2c_driver ad7414_driver = {
247 .driver = { 248 .driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843a2964..5cc3e3784b42 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
334 { "adt7411", 0 }, 334 { "adt7411", 0 },
335 { } 335 { }
336}; 336};
337MODULE_DEVICE_TABLE(i2c, adt7411_id);
337 338
338static struct i2c_driver adt7411_driver = { 339static struct i2c_driver adt7411_driver = {
339 .driver = { 340 .driver = {
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c
index 3f49dd376f02..6e06019015a5 100644
--- a/drivers/hwmon/f71882fg.c
+++ b/drivers/hwmon/f71882fg.c
@@ -37,7 +37,7 @@
37#define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */ 37#define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */
38#define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */ 38#define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */
39#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ 39#define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */
40#define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */ 40#define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */
41 41
42#define SIO_REG_LDSEL 0x07 /* Logical device select */ 42#define SIO_REG_LDSEL 0x07 /* Logical device select */
43#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ 43#define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */
@@ -2111,7 +2111,6 @@ static int f71882fg_remove(struct platform_device *pdev)
2111 int nr_fans = (data->type == f71882fg) ? 4 : 3; 2111 int nr_fans = (data->type == f71882fg) ? 4 : 3;
2112 u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); 2112 u8 start_reg = f71882fg_read8(data, F71882FG_REG_START);
2113 2113
2114 platform_set_drvdata(pdev, NULL);
2115 if (data->hwmon_dev) 2114 if (data->hwmon_dev)
2116 hwmon_device_unregister(data->hwmon_dev); 2115 hwmon_device_unregister(data->hwmon_dev);
2117 2116
@@ -2178,6 +2177,7 @@ static int f71882fg_remove(struct platform_device *pdev)
2178 } 2177 }
2179 } 2178 }
2180 2179
2180 platform_set_drvdata(pdev, NULL);
2181 kfree(data); 2181 kfree(data);
2182 2182
2183 return 0; 2183 return 0;
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c
index 340fc78c8dde..934991237061 100644
--- a/drivers/hwmon/jc42.c
+++ b/drivers/hwmon/jc42.c
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = {
53 53
54/* Configuration register defines */ 54/* Configuration register defines */
55#define JC42_CFG_CRIT_ONLY (1 << 2) 55#define JC42_CFG_CRIT_ONLY (1 << 2)
56#define JC42_CFG_TCRIT_LOCK (1 << 6)
57#define JC42_CFG_EVENT_LOCK (1 << 7)
56#define JC42_CFG_SHUTDOWN (1 << 8) 58#define JC42_CFG_SHUTDOWN (1 << 8)
57#define JC42_CFG_HYST_SHIFT 9 59#define JC42_CFG_HYST_SHIFT 9
58#define JC42_CFG_HYST_MASK 0x03 60#define JC42_CFG_HYST_MASK 0x03
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev,
332{ 334{
333 struct i2c_client *client = to_i2c_client(dev); 335 struct i2c_client *client = to_i2c_client(dev);
334 struct jc42_data *data = i2c_get_clientdata(client); 336 struct jc42_data *data = i2c_get_clientdata(client);
335 long val; 337 unsigned long val;
336 int diff, hyst; 338 int diff, hyst;
337 int err; 339 int err;
338 int ret = count; 340 int ret = count;
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev,
380 382
381static DEVICE_ATTR(temp1_input, S_IRUGO, 383static DEVICE_ATTR(temp1_input, S_IRUGO,
382 show_temp_input, NULL); 384 show_temp_input, NULL);
383static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, 385static DEVICE_ATTR(temp1_crit, S_IRUGO,
384 show_temp_crit, set_temp_crit); 386 show_temp_crit, set_temp_crit);
385static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, 387static DEVICE_ATTR(temp1_min, S_IRUGO,
386 show_temp_min, set_temp_min); 388 show_temp_min, set_temp_min);
387static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, 389static DEVICE_ATTR(temp1_max, S_IRUGO,
388 show_temp_max, set_temp_max); 390 show_temp_max, set_temp_max);
389 391
390static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, 392static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO,
391 show_temp_crit_hyst, set_temp_crit_hyst); 393 show_temp_crit_hyst, set_temp_crit_hyst);
392static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, 394static DEVICE_ATTR(temp1_max_hyst, S_IRUGO,
393 show_temp_max_hyst, NULL); 395 show_temp_max_hyst, NULL);
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = {
412 NULL 414 NULL
413}; 415};
414 416
417static mode_t jc42_attribute_mode(struct kobject *kobj,
418 struct attribute *attr, int index)
419{
420 struct device *dev = container_of(kobj, struct device, kobj);
421 struct i2c_client *client = to_i2c_client(dev);
422 struct jc42_data *data = i2c_get_clientdata(client);
423 unsigned int config = data->config;
424 bool readonly;
425
426 if (attr == &dev_attr_temp1_crit.attr)
427 readonly = config & JC42_CFG_TCRIT_LOCK;
428 else if (attr == &dev_attr_temp1_min.attr ||
429 attr == &dev_attr_temp1_max.attr)
430 readonly = config & JC42_CFG_EVENT_LOCK;
431 else if (attr == &dev_attr_temp1_crit_hyst.attr)
432 readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK);
433 else
434 readonly = true;
435
436 return S_IRUGO | (readonly ? 0 : S_IWUSR);
437}
438
415static const struct attribute_group jc42_group = { 439static const struct attribute_group jc42_group = {
416 .attrs = jc42_attributes, 440 .attrs = jc42_attributes,
441 .is_visible = jc42_attribute_mode,
417}; 442};
418 443
419/* Return 0 if detection is successful, -ENODEV otherwise */ 444/* Return 0 if detection is successful, -ENODEV otherwise */
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c
index da5a2404cd3e..82bf65aa2968 100644
--- a/drivers/hwmon/k10temp.c
+++ b/drivers/hwmon/k10temp.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * k10temp.c - AMD Family 10h/11h processor hardware monitoring 2 * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring
3 * 3 *
4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> 4 * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de>
5 * 5 *
@@ -25,7 +25,7 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <asm/processor.h> 26#include <asm/processor.h>
27 27
28MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); 28MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor");
29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); 29MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
30MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
31 31
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev)
208static const struct pci_device_id k10temp_id_table[] = { 208static const struct pci_device_id k10temp_id_table[] = {
209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, 209 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, 210 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) },
211 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
211 {} 212 {}
212}; 213};
213MODULE_DEVICE_TABLE(pci, k10temp_id_table); 214MODULE_DEVICE_TABLE(pci, k10temp_id_table);
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c
index 1e229847f37a..d2cc28660816 100644
--- a/drivers/hwmon/lm85.c
+++ b/drivers/hwmon/lm85.c
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END };
41enum chips { 41enum chips {
42 any_chip, lm85b, lm85c, 42 any_chip, lm85b, lm85c,
43 adm1027, adt7463, adt7468, 43 adm1027, adt7463, adt7468,
44 emc6d100, emc6d102 44 emc6d100, emc6d102, emc6d103
45}; 45};
46 46
47/* The LM85 registers */ 47/* The LM85 registers */
@@ -90,6 +90,9 @@ enum chips {
90#define LM85_VERSTEP_EMC6D100_A0 0x60 90#define LM85_VERSTEP_EMC6D100_A0 0x60
91#define LM85_VERSTEP_EMC6D100_A1 0x61 91#define LM85_VERSTEP_EMC6D100_A1 0x61
92#define LM85_VERSTEP_EMC6D102 0x65 92#define LM85_VERSTEP_EMC6D102 0x65
93#define LM85_VERSTEP_EMC6D103_A0 0x68
94#define LM85_VERSTEP_EMC6D103_A1 0x69
95#define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */
93 96
94#define LM85_REG_CONFIG 0x40 97#define LM85_REG_CONFIG 0x40
95 98
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = {
348 { "emc6d100", emc6d100 }, 351 { "emc6d100", emc6d100 },
349 { "emc6d101", emc6d100 }, 352 { "emc6d101", emc6d100 },
350 { "emc6d102", emc6d102 }, 353 { "emc6d102", emc6d102 },
354 { "emc6d103", emc6d103 },
351 { } 355 { }
352}; 356};
353MODULE_DEVICE_TABLE(i2c, lm85_id); 357MODULE_DEVICE_TABLE(i2c, lm85_id);
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info)
1250 case LM85_VERSTEP_EMC6D102: 1254 case LM85_VERSTEP_EMC6D102:
1251 type_name = "emc6d102"; 1255 type_name = "emc6d102";
1252 break; 1256 break;
1257 case LM85_VERSTEP_EMC6D103_A0:
1258 case LM85_VERSTEP_EMC6D103_A1:
1259 type_name = "emc6d103";
1260 break;
1261 /*
1262 * Registers apparently missing in EMC6D103S/EMC6D103:A2
1263 * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102
1264 * (according to the data sheets), but used unconditionally
1265 * in the driver: 62[5:7], 6D[0:7], and 6E[0:7].
1266 * So skip EMC6D103S for now.
1267 case LM85_VERSTEP_EMC6D103S:
1268 type_name = "emc6d103s";
1269 break;
1270 */
1253 } 1271 }
1254 } else { 1272 } else {
1255 dev_dbg(&adapter->dev, 1273 dev_dbg(&adapter->dev,
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client,
1283 case adt7468: 1301 case adt7468:
1284 case emc6d100: 1302 case emc6d100:
1285 case emc6d102: 1303 case emc6d102:
1304 case emc6d103:
1286 data->freq_map = adm1027_freq_map; 1305 data->freq_map = adm1027_freq_map;
1287 break; 1306 break;
1288 default: 1307 default:
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev)
1468 /* More alarm bits */ 1487 /* More alarm bits */
1469 data->alarms |= lm85_read_value(client, 1488 data->alarms |= lm85_read_value(client,
1470 EMC6D100_REG_ALARM3) << 16; 1489 EMC6D100_REG_ALARM3) << 16;
1471 } else if (data->type == emc6d102) { 1490 } else if (data->type == emc6d102 || data->type == emc6d103) {
1472 /* Have to read LSB bits after the MSB ones because 1491 /* Have to read LSB bits after the MSB ones because
1473 the reading of the MSB bits has frozen the 1492 the reading of the MSB bits has frozen the
1474 LSBs (backward from the ADM1027). 1493 LSBs (backward from the ADM1027).
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index 2e067dd2ee51..50ea1f43bdc1 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -29,6 +29,7 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/ktime.h> 31#include <linux/ktime.h>
32#include <linux/slab.h>
32 33
33#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ 34#define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */
34#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */ 35#define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index ef3bcb1ce864..61653f079671 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = {
249static int ocores_i2c_of_probe(struct platform_device* pdev, 249static int ocores_i2c_of_probe(struct platform_device* pdev,
250 struct ocores_i2c* i2c) 250 struct ocores_i2c* i2c)
251{ 251{
252 __be32* val; 252 const __be32* val;
253 253
254 val = of_get_property(pdev->dev.of_node, "regstep", NULL); 254 val = of_get_property(pdev->dev.of_node, "regstep", NULL);
255 if (!val) { 255 if (!val) {
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index b605ff3a1fa0..58a58c7eaa17 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -378,9 +378,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
378 * REVISIT: Some wkup sources might not be needed. 378 * REVISIT: Some wkup sources might not be needed.
379 */ 379 */
380 dev->westate = OMAP_I2C_WE_ALL; 380 dev->westate = OMAP_I2C_WE_ALL;
381 if (dev->rev < OMAP_I2C_REV_ON_4430) 381 omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
382 omap_i2c_write_reg(dev, OMAP_I2C_WE_REG,
383 dev->westate);
384 } 382 }
385 } 383 }
386 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); 384 omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
@@ -847,11 +845,15 @@ complete:
847 dev_err(dev->dev, "Arbitration lost\n"); 845 dev_err(dev->dev, "Arbitration lost\n");
848 err |= OMAP_I2C_STAT_AL; 846 err |= OMAP_I2C_STAT_AL;
849 } 847 }
848 /*
849 * ProDB0017052: Clear ARDY bit twice
850 */
850 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | 851 if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK |
851 OMAP_I2C_STAT_AL)) { 852 OMAP_I2C_STAT_AL)) {
852 omap_i2c_ack_stat(dev, stat & 853 omap_i2c_ack_stat(dev, stat &
853 (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | 854 (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
854 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); 855 OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR |
856 OMAP_I2C_STAT_ARDY));
855 omap_i2c_complete_cmd(dev, err); 857 omap_i2c_complete_cmd(dev, err);
856 return IRQ_HANDLED; 858 return IRQ_HANDLED;
857 } 859 }
@@ -1137,12 +1139,41 @@ omap_i2c_remove(struct platform_device *pdev)
1137 return 0; 1139 return 0;
1138} 1140}
1139 1141
1142#ifdef CONFIG_SUSPEND
1143static int omap_i2c_suspend(struct device *dev)
1144{
1145 if (!pm_runtime_suspended(dev))
1146 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
1147 dev->bus->pm->runtime_suspend(dev);
1148
1149 return 0;
1150}
1151
1152static int omap_i2c_resume(struct device *dev)
1153{
1154 if (!pm_runtime_suspended(dev))
1155 if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
1156 dev->bus->pm->runtime_resume(dev);
1157
1158 return 0;
1159}
1160
1161static struct dev_pm_ops omap_i2c_pm_ops = {
1162 .suspend = omap_i2c_suspend,
1163 .resume = omap_i2c_resume,
1164};
1165#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1166#else
1167#define OMAP_I2C_PM_OPS NULL
1168#endif
1169
1140static struct platform_driver omap_i2c_driver = { 1170static struct platform_driver omap_i2c_driver = {
1141 .probe = omap_i2c_probe, 1171 .probe = omap_i2c_probe,
1142 .remove = omap_i2c_remove, 1172 .remove = omap_i2c_remove,
1143 .driver = { 1173 .driver = {
1144 .name = "omap_i2c", 1174 .name = "omap_i2c",
1145 .owner = THIS_MODULE, 1175 .owner = THIS_MODULE,
1176 .pm = OMAP_I2C_PM_OPS,
1146 }, 1177 },
1147}; 1178};
1148 1179
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index 495be451d326..266135ddf7fa 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev)
942 adap->owner = THIS_MODULE; 942 adap->owner = THIS_MODULE;
943 /* DDC class but actually often used for more generic I2C */ 943 /* DDC class but actually often used for more generic I2C */
944 adap->class = I2C_CLASS_DDC; 944 adap->class = I2C_CLASS_DDC;
945 strncpy(adap->name, "ST Microelectronics DDC I2C adapter", 945 strlcpy(adap->name, "ST Microelectronics DDC I2C adapter",
946 sizeof(adap->name)); 946 sizeof(adap->name));
947 adap->nr = bus_nr; 947 adap->nr = bus_nr;
948 adap->algo = &stu300_algo; 948 adap->algo = &stu300_algo;
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 1fa091e05690..4a5c4a44ffb1 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -62,6 +62,7 @@
62#include <linux/notifier.h> 62#include <linux/notifier.h>
63#include <linux/cpu.h> 63#include <linux/cpu.h>
64#include <asm/mwait.h> 64#include <asm/mwait.h>
65#include <asm/msr.h>
65 66
66#define INTEL_IDLE_VERSION "0.4" 67#define INTEL_IDLE_VERSION "0.4"
67#define PREFIX "intel_idle: " 68#define PREFIX "intel_idle: "
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state);
85static struct cpuidle_state *cpuidle_state_table; 86static struct cpuidle_state *cpuidle_state_table;
86 87
87/* 88/*
89 * Hardware C-state auto-demotion may not always be optimal.
90 * Indicate which enable bits to clear here.
91 */
92static unsigned long long auto_demotion_disable_flags;
93
94/*
88 * Set this flag for states where the HW flushes the TLB for us 95 * Set this flag for states where the HW flushes the TLB for us
89 * and so we don't need cross-calls to keep it consistent. 96 * and so we don't need cross-calls to keep it consistent.
90 * If this flag is set, SW flushes the TLB, so even if the 97 * If this flag is set, SW flushes the TLB, so even if the
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = {
281 .notifier_call = setup_broadcast_cpuhp_notify, 288 .notifier_call = setup_broadcast_cpuhp_notify,
282}; 289};
283 290
291static void auto_demotion_disable(void *dummy)
292{
293 unsigned long long msr_bits;
294
295 rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
296 msr_bits &= ~auto_demotion_disable_flags;
297 wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits);
298}
299
284/* 300/*
285 * intel_idle_probe() 301 * intel_idle_probe()
286 */ 302 */
@@ -324,11 +340,17 @@ static int intel_idle_probe(void)
324 case 0x25: /* Westmere */ 340 case 0x25: /* Westmere */
325 case 0x2C: /* Westmere */ 341 case 0x2C: /* Westmere */
326 cpuidle_state_table = nehalem_cstates; 342 cpuidle_state_table = nehalem_cstates;
343 auto_demotion_disable_flags =
344 (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE);
327 break; 345 break;
328 346
329 case 0x1C: /* 28 - Atom Processor */ 347 case 0x1C: /* 28 - Atom Processor */
348 cpuidle_state_table = atom_cstates;
349 break;
350
330 case 0x26: /* 38 - Lincroft Atom Processor */ 351 case 0x26: /* 38 - Lincroft Atom Processor */
331 cpuidle_state_table = atom_cstates; 352 cpuidle_state_table = atom_cstates;
353 auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE;
332 break; 354 break;
333 355
334 case 0x2A: /* SNB */ 356 case 0x2A: /* SNB */
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void)
436 return -EIO; 458 return -EIO;
437 } 459 }
438 } 460 }
461 if (auto_demotion_disable_flags)
462 smp_call_function(auto_demotion_disable, NULL, 1);
439 463
440 return 0; 464 return 0;
441} 465}
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index 8b606fd64022..08c194861af5 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2610 netif_carrier_on(nesvnic->netdev); 2610 netif_carrier_on(nesvnic->netdev);
2611 2611
2612 spin_lock(&nesvnic->port_ibevent_lock); 2612 spin_lock(&nesvnic->port_ibevent_lock);
2613 if (nesdev->iw_status == 0) { 2613 if (nesvnic->of_device_registered) {
2614 nesdev->iw_status = 1; 2614 if (nesdev->iw_status == 0) {
2615 nes_port_ibevent(nesvnic); 2615 nesdev->iw_status = 1;
2616 nes_port_ibevent(nesvnic);
2617 }
2616 } 2618 }
2617 spin_unlock(&nesvnic->port_ibevent_lock); 2619 spin_unlock(&nesvnic->port_ibevent_lock);
2618 } 2620 }
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
2642 netif_carrier_off(nesvnic->netdev); 2644 netif_carrier_off(nesvnic->netdev);
2643 2645
2644 spin_lock(&nesvnic->port_ibevent_lock); 2646 spin_lock(&nesvnic->port_ibevent_lock);
2645 if (nesdev->iw_status == 1) { 2647 if (nesvnic->of_device_registered) {
2646 nesdev->iw_status = 0; 2648 if (nesdev->iw_status == 1) {
2647 nes_port_ibevent(nesvnic); 2649 nesdev->iw_status = 0;
2650 nes_port_ibevent(nesvnic);
2651 }
2648 } 2652 }
2649 spin_unlock(&nesvnic->port_ibevent_lock); 2653 spin_unlock(&nesvnic->port_ibevent_lock);
2650 } 2654 }
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work)
2703 netif_carrier_on(nesvnic->netdev); 2707 netif_carrier_on(nesvnic->netdev);
2704 2708
2705 spin_lock(&nesvnic->port_ibevent_lock); 2709 spin_lock(&nesvnic->port_ibevent_lock);
2706 if (nesdev->iw_status == 0) { 2710 if (nesvnic->of_device_registered) {
2707 nesdev->iw_status = 1; 2711 if (nesdev->iw_status == 0) {
2708 nes_port_ibevent(nesvnic); 2712 nesdev->iw_status = 1;
2713 nes_port_ibevent(nesvnic);
2714 }
2709 } 2715 }
2710 spin_unlock(&nesvnic->port_ibevent_lock); 2716 spin_unlock(&nesvnic->port_ibevent_lock);
2711 } 2717 }
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work)
2723 netif_carrier_off(nesvnic->netdev); 2729 netif_carrier_off(nesvnic->netdev);
2724 2730
2725 spin_lock(&nesvnic->port_ibevent_lock); 2731 spin_lock(&nesvnic->port_ibevent_lock);
2726 if (nesdev->iw_status == 1) { 2732 if (nesvnic->of_device_registered) {
2727 nesdev->iw_status = 0; 2733 if (nesdev->iw_status == 1) {
2728 nes_port_ibevent(nesvnic); 2734 nesdev->iw_status = 0;
2735 nes_port_ibevent(nesvnic);
2736 }
2729 } 2737 }
2730 spin_unlock(&nesvnic->port_ibevent_lock); 2738 spin_unlock(&nesvnic->port_ibevent_lock);
2731 } 2739 }
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c
index 8245237b67ce..eca0c41f1226 100644
--- a/drivers/infiniband/hw/qib/qib_rc.c
+++ b/drivers/infiniband/hw/qib/qib_rc.c
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
1005 * there are still requests that haven't been acked. 1005 * there are still requests that haven't been acked.
1006 */ 1006 */
1007 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && 1007 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail &&
1008 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) 1008 !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) &&
1009 (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1009 start_timer(qp); 1010 start_timer(qp);
1010 1011
1011 while (qp->s_last != qp->s_acked) { 1012 while (qp->s_last != qp->s_acked) {
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp,
1439 } 1440 }
1440 1441
1441 spin_lock_irqsave(&qp->s_lock, flags); 1442 spin_lock_irqsave(&qp->s_lock, flags);
1443 if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK))
1444 goto ack_done;
1442 1445
1443 /* Ignore invalid responses. */ 1446 /* Ignore invalid responses. */
1444 if (qib_cmp24(psn, qp->s_next_psn) >= 0) 1447 if (qib_cmp24(psn, qp->s_next_psn) >= 0)
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 23cf8fc933ec..5b8f59d6c3e8 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner,
360 event->owner = owner; 360 event->owner = owner;
361 361
362 list_add_tail(&event->node, &gameport_event_list); 362 list_add_tail(&event->node, &gameport_event_list);
363 schedule_work(&gameport_event_work); 363 queue_work(system_long_wq, &gameport_event_work);
364 364
365out: 365out:
366 spin_unlock_irqrestore(&gameport_event_lock, flags); 366 spin_unlock_irqrestore(&gameport_event_lock, flags);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 7985114beac7..11905b6a3023 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -75,7 +75,6 @@ static int input_defuzz_abs_event(int value, int old_val, int fuzz)
75 * dev->event_lock held and interrupts disabled. 75 * dev->event_lock held and interrupts disabled.
76 */ 76 */
77static void input_pass_event(struct input_dev *dev, 77static void input_pass_event(struct input_dev *dev,
78 struct input_handler *src_handler,
79 unsigned int type, unsigned int code, int value) 78 unsigned int type, unsigned int code, int value)
80{ 79{
81 struct input_handler *handler; 80 struct input_handler *handler;
@@ -94,15 +93,6 @@ static void input_pass_event(struct input_dev *dev,
94 continue; 93 continue;
95 94
96 handler = handle->handler; 95 handler = handle->handler;
97
98 /*
99 * If this is the handler that injected this
100 * particular event we want to skip it to avoid
101 * filters firing again and again.
102 */
103 if (handler == src_handler)
104 continue;
105
106 if (!handler->filter) { 96 if (!handler->filter) {
107 if (filtered) 97 if (filtered)
108 break; 98 break;
@@ -132,7 +122,7 @@ static void input_repeat_key(unsigned long data)
132 if (test_bit(dev->repeat_key, dev->key) && 122 if (test_bit(dev->repeat_key, dev->key) &&
133 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 123 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
134 124
135 input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2); 125 input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
136 126
137 if (dev->sync) { 127 if (dev->sync) {
138 /* 128 /*
@@ -141,7 +131,7 @@ static void input_repeat_key(unsigned long data)
141 * Otherwise assume that the driver will send 131 * Otherwise assume that the driver will send
142 * SYN_REPORT once it's done. 132 * SYN_REPORT once it's done.
143 */ 133 */
144 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); 134 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
145 } 135 }
146 136
147 if (dev->rep[REP_PERIOD]) 137 if (dev->rep[REP_PERIOD])
@@ -174,7 +164,6 @@ static void input_stop_autorepeat(struct input_dev *dev)
174#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 164#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
175 165
176static int input_handle_abs_event(struct input_dev *dev, 166static int input_handle_abs_event(struct input_dev *dev,
177 struct input_handler *src_handler,
178 unsigned int code, int *pval) 167 unsigned int code, int *pval)
179{ 168{
180 bool is_mt_event; 169 bool is_mt_event;
@@ -218,15 +207,13 @@ static int input_handle_abs_event(struct input_dev *dev,
218 /* Flush pending "slot" event */ 207 /* Flush pending "slot" event */
219 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 208 if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
220 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot); 209 input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
221 input_pass_event(dev, src_handler, 210 input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
222 EV_ABS, ABS_MT_SLOT, dev->slot);
223 } 211 }
224 212
225 return INPUT_PASS_TO_HANDLERS; 213 return INPUT_PASS_TO_HANDLERS;
226} 214}
227 215
228static void input_handle_event(struct input_dev *dev, 216static void input_handle_event(struct input_dev *dev,
229 struct input_handler *src_handler,
230 unsigned int type, unsigned int code, int value) 217 unsigned int type, unsigned int code, int value)
231{ 218{
232 int disposition = INPUT_IGNORE_EVENT; 219 int disposition = INPUT_IGNORE_EVENT;
@@ -279,8 +266,7 @@ static void input_handle_event(struct input_dev *dev,
279 266
280 case EV_ABS: 267 case EV_ABS:
281 if (is_event_supported(code, dev->absbit, ABS_MAX)) 268 if (is_event_supported(code, dev->absbit, ABS_MAX))
282 disposition = input_handle_abs_event(dev, src_handler, 269 disposition = input_handle_abs_event(dev, code, &value);
283 code, &value);
284 270
285 break; 271 break;
286 272
@@ -338,7 +324,7 @@ static void input_handle_event(struct input_dev *dev,
338 dev->event(dev, type, code, value); 324 dev->event(dev, type, code, value);
339 325
340 if (disposition & INPUT_PASS_TO_HANDLERS) 326 if (disposition & INPUT_PASS_TO_HANDLERS)
341 input_pass_event(dev, src_handler, type, code, value); 327 input_pass_event(dev, type, code, value);
342} 328}
343 329
344/** 330/**
@@ -367,7 +353,7 @@ void input_event(struct input_dev *dev,
367 353
368 spin_lock_irqsave(&dev->event_lock, flags); 354 spin_lock_irqsave(&dev->event_lock, flags);
369 add_input_randomness(type, code, value); 355 add_input_randomness(type, code, value);
370 input_handle_event(dev, NULL, type, code, value); 356 input_handle_event(dev, type, code, value);
371 spin_unlock_irqrestore(&dev->event_lock, flags); 357 spin_unlock_irqrestore(&dev->event_lock, flags);
372 } 358 }
373} 359}
@@ -397,8 +383,7 @@ void input_inject_event(struct input_handle *handle,
397 rcu_read_lock(); 383 rcu_read_lock();
398 grab = rcu_dereference(dev->grab); 384 grab = rcu_dereference(dev->grab);
399 if (!grab || grab == handle) 385 if (!grab || grab == handle)
400 input_handle_event(dev, handle->handler, 386 input_handle_event(dev, type, code, value);
401 type, code, value);
402 rcu_read_unlock(); 387 rcu_read_unlock();
403 388
404 spin_unlock_irqrestore(&dev->event_lock, flags); 389 spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -611,10 +596,10 @@ static void input_dev_release_keys(struct input_dev *dev)
611 for (code = 0; code <= KEY_MAX; code++) { 596 for (code = 0; code <= KEY_MAX; code++) {
612 if (is_event_supported(code, dev->keybit, KEY_MAX) && 597 if (is_event_supported(code, dev->keybit, KEY_MAX) &&
613 __test_and_clear_bit(code, dev->key)) { 598 __test_and_clear_bit(code, dev->key)) {
614 input_pass_event(dev, NULL, EV_KEY, code, 0); 599 input_pass_event(dev, EV_KEY, code, 0);
615 } 600 }
616 } 601 }
617 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); 602 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
618 } 603 }
619} 604}
620 605
@@ -889,9 +874,9 @@ int input_set_keycode(struct input_dev *dev,
889 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 874 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
890 __test_and_clear_bit(old_keycode, dev->key)) { 875 __test_and_clear_bit(old_keycode, dev->key)) {
891 876
892 input_pass_event(dev, NULL, EV_KEY, old_keycode, 0); 877 input_pass_event(dev, EV_KEY, old_keycode, 0);
893 if (dev->sync) 878 if (dev->sync)
894 input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1); 879 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
895 } 880 }
896 881
897 out: 882 out:
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c
index ac471b77c18e..99ce9032d08c 100644
--- a/drivers/input/keyboard/tegra-kbc.c
+++ b/drivers/input/keyboard/tegra-kbc.c
@@ -71,8 +71,9 @@ struct tegra_kbc {
71 spinlock_t lock; 71 spinlock_t lock;
72 unsigned int repoll_dly; 72 unsigned int repoll_dly;
73 unsigned long cp_dly_jiffies; 73 unsigned long cp_dly_jiffies;
74 bool use_fn_map;
74 const struct tegra_kbc_platform_data *pdata; 75 const struct tegra_kbc_platform_data *pdata;
75 unsigned short keycode[KBC_MAX_KEY]; 76 unsigned short keycode[KBC_MAX_KEY * 2];
76 unsigned short current_keys[KBC_MAX_KPENT]; 77 unsigned short current_keys[KBC_MAX_KPENT];
77 unsigned int num_pressed_keys; 78 unsigned int num_pressed_keys;
78 struct timer_list timer; 79 struct timer_list timer;
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = {
178 KEY(15, 5, KEY_F2), 179 KEY(15, 5, KEY_F2),
179 KEY(15, 6, KEY_CAPSLOCK), 180 KEY(15, 6, KEY_CAPSLOCK),
180 KEY(15, 7, KEY_F6), 181 KEY(15, 7, KEY_F6),
182
183 /* Software Handled Function Keys */
184 KEY(20, 0, KEY_KP7),
185
186 KEY(21, 0, KEY_KP9),
187 KEY(21, 1, KEY_KP8),
188 KEY(21, 2, KEY_KP4),
189 KEY(21, 4, KEY_KP1),
190
191 KEY(22, 1, KEY_KPSLASH),
192 KEY(22, 2, KEY_KP6),
193 KEY(22, 3, KEY_KP5),
194 KEY(22, 4, KEY_KP3),
195 KEY(22, 5, KEY_KP2),
196 KEY(22, 7, KEY_KP0),
197
198 KEY(27, 1, KEY_KPASTERISK),
199 KEY(27, 3, KEY_KPMINUS),
200 KEY(27, 4, KEY_KPPLUS),
201 KEY(27, 5, KEY_KPDOT),
202
203 KEY(28, 5, KEY_VOLUMEUP),
204
205 KEY(29, 3, KEY_HOME),
206 KEY(29, 4, KEY_END),
207 KEY(29, 5, KEY_BRIGHTNESSDOWN),
208 KEY(29, 6, KEY_VOLUMEDOWN),
209 KEY(29, 7, KEY_BRIGHTNESSUP),
210
211 KEY(30, 0, KEY_NUMLOCK),
212 KEY(30, 1, KEY_SCROLLLOCK),
213 KEY(30, 2, KEY_MUTE),
214
215 KEY(31, 4, KEY_HELP),
181}; 216};
182 217
183static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { 218static const struct matrix_keymap_data tegra_kbc_default_keymap_data = {
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
224 unsigned int i; 259 unsigned int i;
225 unsigned int num_down = 0; 260 unsigned int num_down = 0;
226 unsigned long flags; 261 unsigned long flags;
262 bool fn_keypress = false;
227 263
228 spin_lock_irqsave(&kbc->lock, flags); 264 spin_lock_irqsave(&kbc->lock, flags);
229 for (i = 0; i < KBC_MAX_KPENT; i++) { 265 for (i = 0; i < KBC_MAX_KPENT; i++) {
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc)
237 MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); 273 MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT);
238 274
239 scancodes[num_down] = scancode; 275 scancodes[num_down] = scancode;
240 keycodes[num_down++] = kbc->keycode[scancode]; 276 keycodes[num_down] = kbc->keycode[scancode];
277 /* If driver uses Fn map, do not report the Fn key. */
278 if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map)
279 fn_keypress = true;
280 else
281 num_down++;
241 } 282 }
242 283
243 val >>= 8; 284 val >>= 8;
244 } 285 }
286
287 /*
288 * If the platform uses Fn keymaps, translate keys on a Fn keypress.
289 * Function keycodes are KBC_MAX_KEY apart from the plain keycodes.
290 */
291 if (fn_keypress) {
292 for (i = 0; i < num_down; i++) {
293 scancodes[i] += KBC_MAX_KEY;
294 keycodes[i] = kbc->keycode[scancodes[i]];
295 }
296 }
297
245 spin_unlock_irqrestore(&kbc->lock, flags); 298 spin_unlock_irqrestore(&kbc->lock, flags);
246 299
247 tegra_kbc_report_released_keys(kbc->idev, 300 tegra_kbc_report_released_keys(kbc->idev,
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev)
594 647
595 input_dev->keycode = kbc->keycode; 648 input_dev->keycode = kbc->keycode;
596 input_dev->keycodesize = sizeof(kbc->keycode[0]); 649 input_dev->keycodesize = sizeof(kbc->keycode[0]);
597 input_dev->keycodemax = ARRAY_SIZE(kbc->keycode); 650 input_dev->keycodemax = KBC_MAX_KEY;
651 if (pdata->use_fn_map)
652 input_dev->keycodemax *= 2;
598 653
654 kbc->use_fn_map = pdata->use_fn_map;
599 keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; 655 keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data;
600 matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, 656 matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT,
601 input_dev->keycode, input_dev->keybit); 657 input_dev->keycode, input_dev->keybit);
diff --git a/drivers/input/misc/rotary_encoder.c b/drivers/input/misc/rotary_encoder.c
index 1f8e0108962e..7e64d01da2be 100644
--- a/drivers/input/misc/rotary_encoder.c
+++ b/drivers/input/misc/rotary_encoder.c
@@ -176,7 +176,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
176 176
177 /* request the IRQs */ 177 /* request the IRQs */
178 err = request_irq(encoder->irq_a, &rotary_encoder_irq, 178 err = request_irq(encoder->irq_a, &rotary_encoder_irq,
179 IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, 179 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
180 DRV_NAME, encoder); 180 DRV_NAME, encoder);
181 if (err) { 181 if (err) {
182 dev_err(&pdev->dev, "unable to request IRQ %d\n", 182 dev_err(&pdev->dev, "unable to request IRQ %d\n",
@@ -185,7 +185,7 @@ static int __devinit rotary_encoder_probe(struct platform_device *pdev)
185 } 185 }
186 186
187 err = request_irq(encoder->irq_b, &rotary_encoder_irq, 187 err = request_irq(encoder->irq_b, &rotary_encoder_irq,
188 IORESOURCE_IRQ_HIGHEDGE | IORESOURCE_IRQ_LOWEDGE, 188 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
189 DRV_NAME, encoder); 189 DRV_NAME, encoder);
190 if (err) { 190 if (err) {
191 dev_err(&pdev->dev, "unable to request IRQ %d\n", 191 dev_err(&pdev->dev, "unable to request IRQ %d\n",
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h
index 25e5d042a72c..7453938bf5ef 100644
--- a/drivers/input/mouse/synaptics.h
+++ b/drivers/input/mouse/synaptics.h
@@ -51,6 +51,29 @@
51#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) 51#define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20)
52#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) 52#define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12)
53#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) 53#define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16)
54
55/*
56 * The following describes response for the 0x0c query.
57 *
58 * byte mask name meaning
59 * ---- ---- ------- ------------
60 * 1 0x01 adjustable threshold capacitive button sensitivity
61 * can be adjusted
62 * 1 0x02 report max query 0x0d gives max coord reported
63 * 1 0x04 clearpad sensor is ClearPad product
64 * 1 0x08 advanced gesture not particularly meaningful
65 * 1 0x10 clickpad bit 0 1-button ClickPad
66 * 1 0x60 multifinger mode identifies firmware finger counting
67 * (not reporting!) algorithm.
68 * Not particularly meaningful
69 * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered
70 * 2 0x01 clickpad bit 1 2-button ClickPad
71 * 2 0x02 deluxe LED controls touchpad support LED commands
72 * ala multimedia control bar
73 * 2 0x04 reduced filtering firmware does less filtering on
74 * position data, driver should watch
75 * for noise.
76 */
54#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ 77#define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */
55#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ 78#define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */
56#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) 79#define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000)
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index db5b0bca1a1a..ba70058e2be3 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -188,7 +188,8 @@ static void serio_free_event(struct serio_event *event)
188 kfree(event); 188 kfree(event);
189} 189}
190 190
191static void serio_remove_duplicate_events(struct serio_event *event) 191static void serio_remove_duplicate_events(void *object,
192 enum serio_event_type type)
192{ 193{
193 struct serio_event *e, *next; 194 struct serio_event *e, *next;
194 unsigned long flags; 195 unsigned long flags;
@@ -196,13 +197,13 @@ static void serio_remove_duplicate_events(struct serio_event *event)
196 spin_lock_irqsave(&serio_event_lock, flags); 197 spin_lock_irqsave(&serio_event_lock, flags);
197 198
198 list_for_each_entry_safe(e, next, &serio_event_list, node) { 199 list_for_each_entry_safe(e, next, &serio_event_list, node) {
199 if (event->object == e->object) { 200 if (object == e->object) {
200 /* 201 /*
201 * If this event is of different type we should not 202 * If this event is of different type we should not
202 * look further - we only suppress duplicate events 203 * look further - we only suppress duplicate events
203 * that were sent back-to-back. 204 * that were sent back-to-back.
204 */ 205 */
205 if (event->type != e->type) 206 if (type != e->type)
206 break; 207 break;
207 208
208 list_del_init(&e->node); 209 list_del_init(&e->node);
@@ -245,7 +246,7 @@ static void serio_handle_event(struct work_struct *work)
245 break; 246 break;
246 } 247 }
247 248
248 serio_remove_duplicate_events(event); 249 serio_remove_duplicate_events(event->object, event->type);
249 serio_free_event(event); 250 serio_free_event(event);
250 } 251 }
251 252
@@ -298,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner,
298 event->owner = owner; 299 event->owner = owner;
299 300
300 list_add_tail(&event->node, &serio_event_list); 301 list_add_tail(&event->node, &serio_event_list);
301 schedule_work(&serio_event_work); 302 queue_work(system_long_wq, &serio_event_work);
302 303
303out: 304out:
304 spin_unlock_irqrestore(&serio_event_lock, flags); 305 spin_unlock_irqrestore(&serio_event_lock, flags);
@@ -436,10 +437,12 @@ static ssize_t serio_rebind_driver(struct device *dev, struct device_attribute *
436 } else if (!strncmp(buf, "rescan", count)) { 437 } else if (!strncmp(buf, "rescan", count)) {
437 serio_disconnect_port(serio); 438 serio_disconnect_port(serio);
438 serio_find_driver(serio); 439 serio_find_driver(serio);
440 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
439 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) { 441 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
440 serio_disconnect_port(serio); 442 serio_disconnect_port(serio);
441 error = serio_bind_driver(serio, to_serio_driver(drv)); 443 error = serio_bind_driver(serio, to_serio_driver(drv));
442 put_driver(drv); 444 put_driver(drv);
445 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
443 } else { 446 } else {
444 error = -EINVAL; 447 error = -EINVAL;
445 } 448 }
diff --git a/drivers/input/tablet/wacom_sys.c b/drivers/input/tablet/wacom_sys.c
index fc381498b798..cf8fb9f5d4a8 100644
--- a/drivers/input/tablet/wacom_sys.c
+++ b/drivers/input/tablet/wacom_sys.c
@@ -519,7 +519,7 @@ static int wacom_probe(struct usb_interface *intf, const struct usb_device_id *i
519 /* Retrieve the physical and logical size for OEM devices */ 519 /* Retrieve the physical and logical size for OEM devices */
520 error = wacom_retrieve_hid_descriptor(intf, features); 520 error = wacom_retrieve_hid_descriptor(intf, features);
521 if (error) 521 if (error)
522 goto fail2; 522 goto fail3;
523 523
524 wacom_setup_device_quirks(features); 524 wacom_setup_device_quirks(features);
525 525
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c
index 14ea54b78e46..4bf2316e3284 100644
--- a/drivers/input/touchscreen/ads7846.c
+++ b/drivers/input/touchscreen/ads7846.c
@@ -941,28 +941,29 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
941 struct ads7846_platform_data *pdata = spi->dev.platform_data; 941 struct ads7846_platform_data *pdata = spi->dev.platform_data;
942 int err; 942 int err;
943 943
944 /* REVISIT when the irq can be triggered active-low, or if for some 944 /*
945 * REVISIT when the irq can be triggered active-low, or if for some
945 * reason the touchscreen isn't hooked up, we don't need to access 946 * reason the touchscreen isn't hooked up, we don't need to access
946 * the pendown state. 947 * the pendown state.
947 */ 948 */
948 if (!pdata->get_pendown_state && !gpio_is_valid(pdata->gpio_pendown)) {
949 dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
950 return -EINVAL;
951 }
952 949
953 if (pdata->get_pendown_state) { 950 if (pdata->get_pendown_state) {
954 ts->get_pendown_state = pdata->get_pendown_state; 951 ts->get_pendown_state = pdata->get_pendown_state;
955 return 0; 952 } else if (gpio_is_valid(pdata->gpio_pendown)) {
956 }
957 953
958 err = gpio_request(pdata->gpio_pendown, "ads7846_pendown"); 954 err = gpio_request(pdata->gpio_pendown, "ads7846_pendown");
959 if (err) { 955 if (err) {
960 dev_err(&spi->dev, "failed to request pendown GPIO%d\n", 956 dev_err(&spi->dev, "failed to request pendown GPIO%d\n",
961 pdata->gpio_pendown); 957 pdata->gpio_pendown);
962 return err; 958 return err;
963 } 959 }
964 960
965 ts->gpio_pendown = pdata->gpio_pendown; 961 ts->gpio_pendown = pdata->gpio_pendown;
962
963 } else {
964 dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
965 return -EINVAL;
966 }
966 967
967 return 0; 968 return 0;
968} 969}
@@ -1353,7 +1354,7 @@ static int __devinit ads7846_probe(struct spi_device *spi)
1353 err_put_regulator: 1354 err_put_regulator:
1354 regulator_put(ts->reg); 1355 regulator_put(ts->reg);
1355 err_free_gpio: 1356 err_free_gpio:
1356 if (ts->gpio_pendown != -1) 1357 if (!ts->get_pendown_state)
1357 gpio_free(ts->gpio_pendown); 1358 gpio_free(ts->gpio_pendown);
1358 err_cleanup_filter: 1359 err_cleanup_filter:
1359 if (ts->filter_cleanup) 1360 if (ts->filter_cleanup)
@@ -1383,8 +1384,13 @@ static int __devexit ads7846_remove(struct spi_device *spi)
1383 regulator_disable(ts->reg); 1384 regulator_disable(ts->reg);
1384 regulator_put(ts->reg); 1385 regulator_put(ts->reg);
1385 1386
1386 if (ts->gpio_pendown != -1) 1387 if (!ts->get_pendown_state) {
1388 /*
1389 * If we are not using specialized pendown method we must
1390 * have been relying on gpio we set up ourselves.
1391 */
1387 gpio_free(ts->gpio_pendown); 1392 gpio_free(ts->gpio_pendown);
1393 }
1388 1394
1389 if (ts->filter_cleanup) 1395 if (ts->filter_cleanup)
1390 ts->filter_cleanup(ts->filter_data); 1396 ts->filter_cleanup(ts->filter_data);
diff --git a/drivers/input/touchscreen/wacom_w8001.c b/drivers/input/touchscreen/wacom_w8001.c
index 5cb8449c909d..c14412ef4648 100644
--- a/drivers/input/touchscreen/wacom_w8001.c
+++ b/drivers/input/touchscreen/wacom_w8001.c
@@ -51,6 +51,10 @@ MODULE_LICENSE("GPL");
51#define W8001_PKTLEN_TPCCTL 11 /* control packet */ 51#define W8001_PKTLEN_TPCCTL 11 /* control packet */
52#define W8001_PKTLEN_TOUCH2FG 13 52#define W8001_PKTLEN_TOUCH2FG 13
53 53
54/* resolution in points/mm */
55#define W8001_PEN_RESOLUTION 100
56#define W8001_TOUCH_RESOLUTION 10
57
54struct w8001_coord { 58struct w8001_coord {
55 u8 rdy; 59 u8 rdy;
56 u8 tsw; 60 u8 tsw;
@@ -198,7 +202,7 @@ static void parse_touchquery(u8 *data, struct w8001_touch_query *query)
198 query->y = 1024; 202 query->y = 1024;
199 if (query->panel_res) 203 if (query->panel_res)
200 query->x = query->y = (1 << query->panel_res); 204 query->x = query->y = (1 << query->panel_res);
201 query->panel_res = 10; 205 query->panel_res = W8001_TOUCH_RESOLUTION;
202 } 206 }
203} 207}
204 208
@@ -394,6 +398,8 @@ static int w8001_setup(struct w8001 *w8001)
394 398
395 input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); 399 input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0);
396 input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); 400 input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0);
401 input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION);
402 input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION);
397 input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); 403 input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0);
398 if (coord.tilt_x && coord.tilt_y) { 404 if (coord.tilt_x && coord.tilt_y) {
399 input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); 405 input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0);
@@ -418,14 +424,17 @@ static int w8001_setup(struct w8001 *w8001)
418 w8001->max_touch_x = touch.x; 424 w8001->max_touch_x = touch.x;
419 w8001->max_touch_y = touch.y; 425 w8001->max_touch_y = touch.y;
420 426
421 /* scale to pen maximum */
422 if (w8001->max_pen_x && w8001->max_pen_y) { 427 if (w8001->max_pen_x && w8001->max_pen_y) {
428 /* if pen is supported scale to pen maximum */
423 touch.x = w8001->max_pen_x; 429 touch.x = w8001->max_pen_x;
424 touch.y = w8001->max_pen_y; 430 touch.y = w8001->max_pen_y;
431 touch.panel_res = W8001_PEN_RESOLUTION;
425 } 432 }
426 433
427 input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); 434 input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0);
428 input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); 435 input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0);
436 input_abs_set_res(dev, ABS_X, touch.panel_res);
437 input_abs_set_res(dev, ABS_Y, touch.panel_res);
429 438
430 switch (touch.sensor_id) { 439 switch (touch.sensor_id) {
431 case 0: 440 case 0:
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c
index 18f8798442fa..7bd5baa547be 100644
--- a/drivers/isdn/hardware/eicon/istream.c
+++ b/drivers/isdn/hardware/eicon/istream.c
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a,
62 stream interface. 62 stream interface.
63 If synchronous service was requested, then function 63 If synchronous service was requested, then function
64 does return amount of data written to stream. 64 does return amount of data written to stream.
65 'final' does indicate that pice of data to be written is 65 'final' does indicate that piece of data to be written is
66 final part of frame (necessary only by structured datatransfer) 66 final part of frame (necessary only by structured datatransfer)
67 return 0 if zero lengh packet was written 67 return 0 if zero lengh packet was written
68 return -1 if stream is full 68 return -1 if stream is full
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c
index 0858791978d8..cfff0c41d298 100644
--- a/drivers/isdn/hisax/isdnl2.c
+++ b/drivers/isdn/hisax/isdnl2.c
@@ -1247,10 +1247,10 @@ static void
1247l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) 1247l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1248{ 1248{
1249 struct PStack *st = fi->userdata; 1249 struct PStack *st = fi->userdata;
1250 struct sk_buff *skb, *oskb; 1250 struct sk_buff *skb;
1251 struct Layer2 *l2 = &st->l2; 1251 struct Layer2 *l2 = &st->l2;
1252 u_char header[MAX_HEADER_LEN]; 1252 u_char header[MAX_HEADER_LEN];
1253 int i; 1253 int i, hdr_space_needed;
1254 int unsigned p1; 1254 int unsigned p1;
1255 u_long flags; 1255 u_long flags;
1256 1256
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1261 if (!skb) 1261 if (!skb)
1262 return; 1262 return;
1263 1263
1264 hdr_space_needed = l2headersize(l2, 0);
1265 if (hdr_space_needed > skb_headroom(skb)) {
1266 struct sk_buff *orig_skb = skb;
1267
1268 skb = skb_realloc_headroom(skb, hdr_space_needed);
1269 if (!skb) {
1270 dev_kfree_skb(orig_skb);
1271 return;
1272 }
1273 }
1264 spin_lock_irqsave(&l2->lock, flags); 1274 spin_lock_irqsave(&l2->lock, flags);
1265 if(test_bit(FLG_MOD128, &l2->flag)) 1275 if(test_bit(FLG_MOD128, &l2->flag))
1266 p1 = (l2->vs - l2->va) % 128; 1276 p1 = (l2->vs - l2->va) % 128;
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
1285 l2->vs = (l2->vs + 1) % 8; 1295 l2->vs = (l2->vs + 1) % 8;
1286 } 1296 }
1287 spin_unlock_irqrestore(&l2->lock, flags); 1297 spin_unlock_irqrestore(&l2->lock, flags);
1288 p1 = skb->data - skb->head; 1298 memcpy(skb_push(skb, i), header, i);
1289 if (p1 >= i)
1290 memcpy(skb_push(skb, i), header, i);
1291 else {
1292 printk(KERN_WARNING
1293 "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
1294 oskb = skb;
1295 skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
1296 memcpy(skb_put(skb, i), header, i);
1297 skb_copy_from_linear_data(oskb,
1298 skb_put(skb, oskb->len), oskb->len);
1299 dev_kfree_skb(oskb);
1300 }
1301 st->l2.l2l1(st, PH_PULL | INDICATION, skb); 1299 st->l2.l2l1(st, PH_PULL | INDICATION, skb);
1302 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); 1300 test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
1303 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { 1301 if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
216 216
217 if (md_check_no_bitmap(mddev)) 217 if (md_check_no_bitmap(mddev))
218 return -EINVAL; 218 return -EINVAL;
219 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
220 conf = linear_conf(mddev, mddev->raid_disks); 219 conf = linear_conf(mddev, mddev->raid_disks);
221 220
222 if (!conf) 221 if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ecda4c1..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
553{ 553{
554 mddev_t *mddev, *new = NULL; 554 mddev_t *mddev, *new = NULL;
555 555
556 if (unit && MAJOR(unit) != MD_MAJOR)
557 unit &= ~((1<<MdpMinorShift)-1);
558
556 retry: 559 retry:
557 spin_lock(&all_mddevs_lock); 560 spin_lock(&all_mddevs_lock);
558 561
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
4138 } 4141 }
4139 4142
4140 mddev->array_sectors = sectors; 4143 mddev->array_sectors = sectors;
4141 set_capacity(mddev->gendisk, mddev->array_sectors); 4144 if (mddev->pers) {
4142 if (mddev->pers) 4145 set_capacity(mddev->gendisk, mddev->array_sectors);
4143 revalidate_disk(mddev->gendisk); 4146 revalidate_disk(mddev->gendisk);
4144 4147 }
4145 return len; 4148 return len;
4146} 4149}
4147 4150
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
4624 } 4627 }
4625 set_capacity(mddev->gendisk, mddev->array_sectors); 4628 set_capacity(mddev->gendisk, mddev->array_sectors);
4626 revalidate_disk(mddev->gendisk); 4629 revalidate_disk(mddev->gendisk);
4630 mddev->changed = 1;
4627 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4631 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4628out: 4632out:
4629 return err; 4633 return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
4712 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4716 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4713 mddev->recovery = 0; 4717 mddev->recovery = 0;
4714 mddev->in_sync = 0; 4718 mddev->in_sync = 0;
4719 mddev->changed = 0;
4715 mddev->degraded = 0; 4720 mddev->degraded = 0;
4716 mddev->safemode = 0; 4721 mddev->safemode = 0;
4717 mddev->bitmap_info.offset = 0; 4722 mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4827 4832
4828 set_capacity(disk, 0); 4833 set_capacity(disk, 0);
4829 mutex_unlock(&mddev->open_mutex); 4834 mutex_unlock(&mddev->open_mutex);
4835 mddev->changed = 1;
4830 revalidate_disk(disk); 4836 revalidate_disk(disk);
4831 4837
4832 if (mddev->ro) 4838 if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
6011 atomic_inc(&mddev->openers); 6017 atomic_inc(&mddev->openers);
6012 mutex_unlock(&mddev->open_mutex); 6018 mutex_unlock(&mddev->open_mutex);
6013 6019
6014 check_disk_size_change(mddev->gendisk, bdev); 6020 check_disk_change(bdev);
6015 out: 6021 out:
6016 return err; 6022 return err;
6017} 6023}
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
6026 6032
6027 return 0; 6033 return 0;
6028} 6034}
6035
6036static int md_media_changed(struct gendisk *disk)
6037{
6038 mddev_t *mddev = disk->private_data;
6039
6040 return mddev->changed;
6041}
6042
6043static int md_revalidate(struct gendisk *disk)
6044{
6045 mddev_t *mddev = disk->private_data;
6046
6047 mddev->changed = 0;
6048 return 0;
6049}
6029static const struct block_device_operations md_fops = 6050static const struct block_device_operations md_fops =
6030{ 6051{
6031 .owner = THIS_MODULE, 6052 .owner = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
6036 .compat_ioctl = md_compat_ioctl, 6057 .compat_ioctl = md_compat_ioctl,
6037#endif 6058#endif
6038 .getgeo = md_getgeo, 6059 .getgeo = md_getgeo,
6060 .media_changed = md_media_changed,
6061 .revalidate_disk= md_revalidate,
6039}; 6062};
6040 6063
6041static int md_thread(void * arg) 6064static int md_thread(void * arg)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b8593b2a..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@ struct mddev_s
274 atomic_t active; /* general refcount */ 274 atomic_t active; /* general refcount */
275 atomic_t openers; /* number of active opens */ 275 atomic_t openers; /* number of active opens */
276 276
277 int changed; /* True if we might need to
278 * reread partition info */
277 int degraded; /* whether md should consider 279 int degraded; /* whether md should consider
278 * adding a spare 280 * adding a spare
279 */ 281 */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
435 * bookkeeping area. [whatever we allocate in multipath_run(), 435 * bookkeeping area. [whatever we allocate in multipath_run(),
436 * should be freed in multipath_stop()] 436 * should be freed in multipath_stop()]
437 */ 437 */
438 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
439 438
440 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); 439 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
441 mddev->private = conf; 440 mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a96855edb..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
361 if (md_check_no_bitmap(mddev)) 361 if (md_check_no_bitmap(mddev))
362 return -EINVAL; 362 return -EINVAL;
363 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 363 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
364 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
365 364
366 /* if private is not null, we are here after takeover */ 365 /* if private is not null, we are here after takeover */
367 if (mddev->private == NULL) { 366 if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
670 mddev->new_layout = 0; 669 mddev->new_layout = 0;
671 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ 670 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
672 mddev->delta_disks = 1 - mddev->raid_disks; 671 mddev->delta_disks = 1 - mddev->raid_disks;
672 mddev->raid_disks = 1;
673 /* make sure it will be not marked as dirty */ 673 /* make sure it will be not marked as dirty */
674 mddev->recovery_cp = MaxSector; 674 mddev->recovery_cp = MaxSector;
675 675
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
593 if (conf->pending_bio_list.head) { 593 if (conf->pending_bio_list.head) {
594 struct bio *bio; 594 struct bio *bio;
595 bio = bio_list_get(&conf->pending_bio_list); 595 bio = bio_list_get(&conf->pending_bio_list);
596 /* Only take the spinlock to quiet a warning */
597 spin_lock(conf->mddev->queue->queue_lock);
596 blk_remove_plug(conf->mddev->queue); 598 blk_remove_plug(conf->mddev->queue);
599 spin_unlock(conf->mddev->queue->queue_lock);
597 spin_unlock_irq(&conf->device_lock); 600 spin_unlock_irq(&conf->device_lock);
598 /* flush any pending bitmap writes to 601 /* flush any pending bitmap writes to
599 * disk before proceeding w/ I/O */ 602 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_inc(&r1_bio->remaining); 962 atomic_inc(&r1_bio->remaining);
960 spin_lock_irqsave(&conf->device_lock, flags); 963 spin_lock_irqsave(&conf->device_lock, flags);
961 bio_list_add(&conf->pending_bio_list, mbio); 964 bio_list_add(&conf->pending_bio_list, mbio);
962 blk_plug_device(mddev->queue); 965 blk_plug_device_unlocked(mddev->queue);
963 spin_unlock_irqrestore(&conf->device_lock, flags); 966 spin_unlock_irqrestore(&conf->device_lock, flags);
964 } 967 }
965 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); 968 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
2021 if (IS_ERR(conf)) 2024 if (IS_ERR(conf))
2022 return PTR_ERR(conf); 2025 return PTR_ERR(conf);
2023 2026
2024 mddev->queue->queue_lock = &conf->device_lock;
2025 list_for_each_entry(rdev, &mddev->disks, same_set) { 2027 list_for_each_entry(rdev, &mddev->disks, same_set) {
2026 disk_stack_limits(mddev->gendisk, rdev->bdev, 2028 disk_stack_limits(mddev->gendisk, rdev->bdev,
2027 rdev->data_offset << 9); 2029 rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741b..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
662 if (conf->pending_bio_list.head) { 662 if (conf->pending_bio_list.head) {
663 struct bio *bio; 663 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 664 bio = bio_list_get(&conf->pending_bio_list);
665 /* Spinlock only taken to quiet a warning */
666 spin_lock(conf->mddev->queue->queue_lock);
665 blk_remove_plug(conf->mddev->queue); 667 blk_remove_plug(conf->mddev->queue);
668 spin_unlock(conf->mddev->queue->queue_lock);
666 spin_unlock_irq(&conf->device_lock); 669 spin_unlock_irq(&conf->device_lock);
667 /* flush any pending bitmap writes to disk 670 /* flush any pending bitmap writes to disk
668 * before proceeding w/ I/O */ 671 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 atomic_inc(&r10_bio->remaining); 974 atomic_inc(&r10_bio->remaining);
972 spin_lock_irqsave(&conf->device_lock, flags); 975 spin_lock_irqsave(&conf->device_lock, flags);
973 bio_list_add(&conf->pending_bio_list, mbio); 976 bio_list_add(&conf->pending_bio_list, mbio);
974 blk_plug_device(mddev->queue); 977 blk_plug_device_unlocked(mddev->queue);
975 spin_unlock_irqrestore(&conf->device_lock, flags); 978 spin_unlock_irqrestore(&conf->device_lock, flags);
976 } 979 }
977 980
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
2304 if (!conf) 2307 if (!conf)
2305 goto out; 2308 goto out;
2306 2309
2307 mddev->queue->queue_lock = &conf->device_lock;
2308
2309 mddev->thread = conf->thread; 2310 mddev->thread = conf->thread;
2310 conf->thread = NULL; 2311 conf->thread = NULL;
2311 2312
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 702812824195..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
5204 5204
5205 mddev->queue->backing_dev_info.congested_data = mddev; 5205 mddev->queue->backing_dev_info.congested_data = mddev;
5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5207 mddev->queue->queue_lock = &conf->device_lock;
5208 mddev->queue->unplug_fn = raid5_unplug_queue; 5207 mddev->queue->unplug_fn = raid5_unplug_queue;
5209 5208
5210 chunk_size = mddev->chunk_sectors << 9; 5209 chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c
index bc6a67768af1..8c4852114eeb 100644
--- a/drivers/media/common/tuners/tda8290.c
+++ b/drivers/media/common/tuners/tda8290.c
@@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props)
658#define TDA8290_ID 0x89 658#define TDA8290_ID 0x89
659 u8 reg = 0x1f, id; 659 u8 reg = 0x1f, id;
660 struct i2c_msg msg_read[] = { 660 struct i2c_msg msg_read[] = {
661 { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 661 { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
662 { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 662 { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
663 }; 663 };
664 664
665 /* detect tda8290 */ 665 /* detect tda8290 */
666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 666 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
667 printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 667 printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
668 __func__, reg); 668 __func__, reg);
669 return -ENODEV; 669 return -ENODEV;
670 } 670 }
@@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props)
685#define TDA8295C2_ID 0x8b 685#define TDA8295C2_ID 0x8b
686 u8 reg = 0x2f, id; 686 u8 reg = 0x2f, id;
687 struct i2c_msg msg_read[] = { 687 struct i2c_msg msg_read[] = {
688 { .addr = 0x4b, .flags = 0, .len = 1, .buf = &reg }, 688 { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = &reg },
689 { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, 689 { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id },
690 }; 690 };
691 691
692 /* detect tda8290 */ 692 /* detect tda8295 */
693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { 693 if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) {
694 printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", 694 printk(KERN_WARNING "%s: couldn't read register 0x%02x\n",
695 __func__, reg); 695 __func__, reg);
696 return -ENODEV; 696 return -ENODEV;
697 } 697 }
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
index defd83964ce2..193cdb77b76a 100644
--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
@@ -870,6 +870,23 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap)
870 return 0; 870 return 0;
871} 871}
872 872
873static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index,
874 u16 pid, int onoff)
875{
876 struct dib0700_state *st = adapter->dev->priv;
877 if (st->is_dib7000pc)
878 return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
879 return dib7000m_pid_filter(adapter->fe, index, pid, onoff);
880}
881
882static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff)
883{
884 struct dib0700_state *st = adapter->dev->priv;
885 if (st->is_dib7000pc)
886 return dib7000p_pid_filter_ctrl(adapter->fe, onoff);
887 return dib7000m_pid_filter_ctrl(adapter->fe, onoff);
888}
889
873static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) 890static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff)
874{ 891{
875 return dib7000p_pid_filter(adapter->fe, index, pid, onoff); 892 return dib7000p_pid_filter(adapter->fe, index, pid, onoff);
@@ -1875,8 +1892,8 @@ struct dvb_usb_device_properties dib0700_devices[] = {
1875 { 1892 {
1876 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, 1893 .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
1877 .pid_filter_count = 32, 1894 .pid_filter_count = 32,
1878 .pid_filter = stk70x0p_pid_filter, 1895 .pid_filter = stk7700p_pid_filter,
1879 .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, 1896 .pid_filter_ctrl = stk7700p_pid_filter_ctrl,
1880 .frontend_attach = stk7700p_frontend_attach, 1897 .frontend_attach = stk7700p_frontend_attach,
1881 .tuner_attach = stk7700p_tuner_attach, 1898 .tuner_attach = stk7700p_tuner_attach,
1882 1899
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c
index 9eea4188303b..46ccd01a7696 100644
--- a/drivers/media/dvb/dvb-usb/lmedm04.c
+++ b/drivers/media/dvb/dvb-usb/lmedm04.c
@@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev,
659} 659}
660 660
661/* Default firmware for LME2510C */ 661/* Default firmware for LME2510C */
662const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; 662char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw";
663 663
664static void lme_coldreset(struct usb_device *dev) 664static void lme_coldreset(struct usb_device *dev)
665{ 665{
@@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = {
1006 .caps = DVB_USB_IS_AN_I2C_ADAPTER, 1006 .caps = DVB_USB_IS_AN_I2C_ADAPTER,
1007 .usb_ctrl = DEVICE_SPECIFIC, 1007 .usb_ctrl = DEVICE_SPECIFIC,
1008 .download_firmware = lme2510_download_firmware, 1008 .download_firmware = lme2510_download_firmware,
1009 .firmware = lme_firmware, 1009 .firmware = (const char *)&lme_firmware,
1010 .size_of_priv = sizeof(struct lme2510_state), 1010 .size_of_priv = sizeof(struct lme2510_state),
1011 .num_adapters = 1, 1011 .num_adapters = 1,
1012 .adapter = { 1012 .adapter = {
@@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit);
1109 1109
1110MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); 1110MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>");
1111MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); 1111MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0");
1112MODULE_VERSION("1.74"); 1112MODULE_VERSION("1.75");
1113MODULE_LICENSE("GPL"); 1113MODULE_LICENSE("GPL");
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c
index c7f5ccf54aa5..289a79837f24 100644
--- a/drivers/media/dvb/frontends/dib7000m.c
+++ b/drivers/media/dvb/frontends/dib7000m.c
@@ -1285,6 +1285,25 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di
1285} 1285}
1286EXPORT_SYMBOL(dib7000m_get_i2c_master); 1286EXPORT_SYMBOL(dib7000m_get_i2c_master);
1287 1287
1288int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff)
1289{
1290 struct dib7000m_state *state = fe->demodulator_priv;
1291 u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef;
1292 val |= (onoff & 0x1) << 4;
1293 dprintk("PID filter enabled %d", onoff);
1294 return dib7000m_write_word(state, 294 + state->reg_offs, val);
1295}
1296EXPORT_SYMBOL(dib7000m_pid_filter_ctrl);
1297
1298int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff)
1299{
1300 struct dib7000m_state *state = fe->demodulator_priv;
1301 dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff);
1302 return dib7000m_write_word(state, 300 + state->reg_offs + id,
1303 onoff ? (1 << 13) | pid : 0);
1304}
1305EXPORT_SYMBOL(dib7000m_pid_filter);
1306
1288#if 0 1307#if 0
1289/* used with some prototype boards */ 1308/* used with some prototype boards */
1290int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, 1309int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods,
diff --git a/drivers/media/dvb/frontends/dib7000m.h b/drivers/media/dvb/frontends/dib7000m.h
index 113819ce9f0d..81fcf2241c64 100644
--- a/drivers/media/dvb/frontends/dib7000m.h
+++ b/drivers/media/dvb/frontends/dib7000m.h
@@ -46,6 +46,8 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
46extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, 46extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *,
47 enum dibx000_i2c_interface, 47 enum dibx000_i2c_interface,
48 int); 48 int);
49extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff);
50extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff);
49#else 51#else
50static inline 52static inline
51struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, 53struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap,
@@ -63,6 +65,19 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod,
63 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); 65 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
64 return NULL; 66 return NULL;
65} 67}
68static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id,
69 u16 pid, u8 onoff)
70{
71 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
72 return -ENODEV;
73}
74
75static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe,
76 uint8_t onoff)
77{
78 printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
79 return -ENODEV;
80}
66#endif 81#endif
67 82
68/* TODO 83/* TODO
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c
index 59feeb84aec7..10a432a79d00 100644
--- a/drivers/media/dvb/mantis/mantis_pci.c
+++ b/drivers/media/dvb/mantis/mantis_pci.c
@@ -22,7 +22,6 @@
22#include <linux/moduleparam.h> 22#include <linux/moduleparam.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <asm/io.h> 24#include <asm/io.h>
25#include <asm/pgtable.h>
26#include <asm/page.h> 25#include <asm/page.h>
27#include <linux/kmod.h> 26#include <linux/kmod.h>
28#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c
index 73230ff93b8a..01f258a2a57a 100644
--- a/drivers/media/rc/ir-raw.c
+++ b/drivers/media/rc/ir-raw.c
@@ -112,7 +112,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
112{ 112{
113 ktime_t now; 113 ktime_t now;
114 s64 delta; /* ns */ 114 s64 delta; /* ns */
115 struct ir_raw_event ev; 115 DEFINE_IR_RAW_EVENT(ev);
116 int rc = 0; 116 int rc = 0;
117 117
118 if (!dev->raw) 118 if (!dev->raw)
@@ -125,7 +125,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type)
125 * being called for the first time, note that delta can't 125 * being called for the first time, note that delta can't
126 * possibly be negative. 126 * possibly be negative.
127 */ 127 */
128 ev.duration = 0;
129 if (delta > IR_MAX_DURATION || !dev->raw->last_type) 128 if (delta > IR_MAX_DURATION || !dev->raw->last_type)
130 type |= IR_START_EVENT; 129 type |= IR_START_EVENT;
131 else 130 else
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
index 6df0a4980645..e4f8eac7f717 100644
--- a/drivers/media/rc/mceusb.c
+++ b/drivers/media/rc/mceusb.c
@@ -148,6 +148,7 @@ enum mceusb_model_type {
148 MCE_GEN2_TX_INV, 148 MCE_GEN2_TX_INV,
149 POLARIS_EVK, 149 POLARIS_EVK,
150 CX_HYBRID_TV, 150 CX_HYBRID_TV,
151 MULTIFUNCTION,
151}; 152};
152 153
153struct mceusb_model { 154struct mceusb_model {
@@ -155,9 +156,10 @@ struct mceusb_model {
155 u32 mce_gen2:1; 156 u32 mce_gen2:1;
156 u32 mce_gen3:1; 157 u32 mce_gen3:1;
157 u32 tx_mask_normal:1; 158 u32 tx_mask_normal:1;
158 u32 is_polaris:1;
159 u32 no_tx:1; 159 u32 no_tx:1;
160 160
161 int ir_intfnum;
162
161 const char *rc_map; /* Allow specify a per-board map */ 163 const char *rc_map; /* Allow specify a per-board map */
162 const char *name; /* per-board name */ 164 const char *name; /* per-board name */
163}; 165};
@@ -179,7 +181,6 @@ static const struct mceusb_model mceusb_model[] = {
179 .tx_mask_normal = 1, 181 .tx_mask_normal = 1,
180 }, 182 },
181 [POLARIS_EVK] = { 183 [POLARIS_EVK] = {
182 .is_polaris = 1,
183 /* 184 /*
184 * In fact, the EVK is shipped without 185 * In fact, the EVK is shipped without
185 * remotes, but we should have something handy, 186 * remotes, but we should have something handy,
@@ -189,10 +190,13 @@ static const struct mceusb_model mceusb_model[] = {
189 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 190 .name = "Conexant Hybrid TV (cx231xx) MCE IR",
190 }, 191 },
191 [CX_HYBRID_TV] = { 192 [CX_HYBRID_TV] = {
192 .is_polaris = 1,
193 .no_tx = 1, /* tx isn't wired up at all */ 193 .no_tx = 1, /* tx isn't wired up at all */
194 .name = "Conexant Hybrid TV (cx231xx) MCE IR", 194 .name = "Conexant Hybrid TV (cx231xx) MCE IR",
195 }, 195 },
196 [MULTIFUNCTION] = {
197 .mce_gen2 = 1,
198 .ir_intfnum = 2,
199 },
196}; 200};
197 201
198static struct usb_device_id mceusb_dev_table[] = { 202static struct usb_device_id mceusb_dev_table[] = {
@@ -216,8 +220,9 @@ static struct usb_device_id mceusb_dev_table[] = {
216 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, 220 { USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
217 /* Philips/Spinel plus IR transceiver for ASUS */ 221 /* Philips/Spinel plus IR transceiver for ASUS */
218 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, 222 { USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
219 /* Realtek MCE IR Receiver */ 223 /* Realtek MCE IR Receiver and card reader */
220 { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, 224 { USB_DEVICE(VENDOR_REALTEK, 0x0161),
225 .driver_info = MULTIFUNCTION },
221 /* SMK/Toshiba G83C0004D410 */ 226 /* SMK/Toshiba G83C0004D410 */
222 { USB_DEVICE(VENDOR_SMK, 0x031d), 227 { USB_DEVICE(VENDOR_SMK, 0x031d),
223 .driver_info = MCE_GEN2_TX_INV }, 228 .driver_info = MCE_GEN2_TX_INV },
@@ -1101,7 +1106,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1101 bool is_gen3; 1106 bool is_gen3;
1102 bool is_microsoft_gen1; 1107 bool is_microsoft_gen1;
1103 bool tx_mask_normal; 1108 bool tx_mask_normal;
1104 bool is_polaris; 1109 int ir_intfnum;
1105 1110
1106 dev_dbg(&intf->dev, "%s called\n", __func__); 1111 dev_dbg(&intf->dev, "%s called\n", __func__);
1107 1112
@@ -1110,13 +1115,11 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
1110 is_gen3 = mceusb_model[model].mce_gen3; 1115 is_gen3 = mceusb_model[model].mce_gen3;
1111 is_microsoft_gen1 = mceusb_model[model].mce_gen1; 1116 is_microsoft_gen1 = mceusb_model[model].mce_gen1;
1112 tx_mask_normal = mceusb_model[model].tx_mask_normal; 1117 tx_mask_normal = mceusb_model[model].tx_mask_normal;
1113 is_polaris = mceusb_model[model].is_polaris; 1118 ir_intfnum = mceusb_model[model].ir_intfnum;
1114 1119
1115 if (is_polaris) { 1120 /* There are multi-function devices with non-IR interfaces */
1116 /* Interface 0 is IR */ 1121 if (idesc->desc.bInterfaceNumber != ir_intfnum)
1117 if (idesc->desc.bInterfaceNumber) 1122 return -ENODEV;
1118 return -ENODEV;
1119 }
1120 1123
1121 /* step through the endpoints to find first bulk in and out endpoint */ 1124 /* step through the endpoints to find first bulk in and out endpoint */
1122 for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { 1125 for (i = 0; i < idesc->desc.bNumEndpoints; ++i) {
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c
index 273d9d674792..d4d64492a057 100644
--- a/drivers/media/rc/nuvoton-cir.c
+++ b/drivers/media/rc/nuvoton-cir.c
@@ -385,8 +385,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt)
385 385
386static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) 386static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
387{ 387{
388 /* set number of bytes needed for wake key comparison (default 67) */ 388 /* set number of bytes needed for wake from s3 (default 65) */
389 nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP); 389 nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
390 CIR_WAKE_FIFO_CMP_DEEP);
390 391
391 /* set tolerance/variance allowed per byte during wake compare */ 392 /* set tolerance/variance allowed per byte during wake compare */
392 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE, 393 nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h
index 1df82351cb03..048135eea702 100644
--- a/drivers/media/rc/nuvoton-cir.h
+++ b/drivers/media/rc/nuvoton-cir.h
@@ -305,8 +305,11 @@ struct nvt_dev {
305#define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 305#define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20
306#define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 306#define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10
307 307
308/* CIR Wake FIFO buffer is 67 bytes long */ 308/*
309#define CIR_WAKE_FIFO_LEN 67 309 * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes
310 * the system comparing only 65 bytes (fails with this set to 67)
311 */
312#define CIR_WAKE_FIFO_CMP_BYTES 65
310/* CIR Wake byte comparison tolerance */ 313/* CIR Wake byte comparison tolerance */
311#define CIR_WAKE_CMP_TOLERANCE 5 314#define CIR_WAKE_CMP_TOLERANCE 5
312 315
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index 512a2f4ada0e..5b4422ef4e6d 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -850,7 +850,7 @@ static ssize_t store_protocols(struct device *device,
850 count++; 850 count++;
851 } else { 851 } else {
852 for (i = 0; i < ARRAY_SIZE(proto_names); i++) { 852 for (i = 0; i < ARRAY_SIZE(proto_names); i++) {
853 if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) { 853 if (!strcasecmp(tmp, proto_names[i].name)) {
854 tmp += strlen(proto_names[i].name); 854 tmp += strlen(proto_names[i].name);
855 mask = proto_names[i].type; 855 mask = proto_names[i].type;
856 break; 856 break;
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c
index e41e4ad5cc40..9c475c600fc9 100644
--- a/drivers/media/video/au0828/au0828-video.c
+++ b/drivers/media/video/au0828/au0828-video.c
@@ -1758,7 +1758,12 @@ static int vidioc_reqbufs(struct file *file, void *priv,
1758 if (rc < 0) 1758 if (rc < 0)
1759 return rc; 1759 return rc;
1760 1760
1761 return videobuf_reqbufs(&fh->vb_vidq, rb); 1761 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1762 rc = videobuf_reqbufs(&fh->vb_vidq, rb);
1763 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1764 rc = videobuf_reqbufs(&fh->vb_vbiq, rb);
1765
1766 return rc;
1762} 1767}
1763 1768
1764static int vidioc_querybuf(struct file *file, void *priv, 1769static int vidioc_querybuf(struct file *file, void *priv,
@@ -1772,7 +1777,12 @@ static int vidioc_querybuf(struct file *file, void *priv,
1772 if (rc < 0) 1777 if (rc < 0)
1773 return rc; 1778 return rc;
1774 1779
1775 return videobuf_querybuf(&fh->vb_vidq, b); 1780 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1781 rc = videobuf_querybuf(&fh->vb_vidq, b);
1782 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1783 rc = videobuf_querybuf(&fh->vb_vbiq, b);
1784
1785 return rc;
1776} 1786}
1777 1787
1778static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) 1788static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1785,7 +1795,12 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b)
1785 if (rc < 0) 1795 if (rc < 0)
1786 return rc; 1796 return rc;
1787 1797
1788 return videobuf_qbuf(&fh->vb_vidq, b); 1798 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1799 rc = videobuf_qbuf(&fh->vb_vidq, b);
1800 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1801 rc = videobuf_qbuf(&fh->vb_vbiq, b);
1802
1803 return rc;
1789} 1804}
1790 1805
1791static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) 1806static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
@@ -1806,7 +1821,12 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
1806 dev->greenscreen_detected = 0; 1821 dev->greenscreen_detected = 0;
1807 } 1822 }
1808 1823
1809 return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); 1824 if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
1825 rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK);
1826 else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE)
1827 rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK);
1828
1829 return rc;
1810} 1830}
1811 1831
1812static struct v4l2_file_operations au0828_v4l_fops = { 1832static struct v4l2_file_operations au0828_v4l_fops = {
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c
index 87177733cf92..68ad1963f421 100644
--- a/drivers/media/video/cx18/cx18-cards.c
+++ b/drivers/media/video/cx18/cx18-cards.c
@@ -95,6 +95,53 @@ static const struct cx18_card cx18_card_hvr1600_esmt = {
95 .i2c = &cx18_i2c_std, 95 .i2c = &cx18_i2c_std,
96}; 96};
97 97
98static const struct cx18_card cx18_card_hvr1600_s5h1411 = {
99 .type = CX18_CARD_HVR_1600_S5H1411,
100 .name = "Hauppauge HVR-1600",
101 .comment = "Simultaneous Digital and Analog TV capture supported\n",
102 .v4l2_capabilities = CX18_CAP_ENCODER,
103 .hw_audio_ctrl = CX18_HW_418_AV,
104 .hw_muxer = CX18_HW_CS5345,
105 .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER |
106 CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL |
107 CX18_HW_Z8F0811_IR_HAUP,
108 .video_inputs = {
109 { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 },
110 { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 },
111 { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 },
112 { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 },
113 { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 },
114 },
115 .audio_inputs = {
116 { CX18_CARD_INPUT_AUD_TUNER,
117 CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 },
118 { CX18_CARD_INPUT_LINE_IN1,
119 CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 },
120 { CX18_CARD_INPUT_LINE_IN2,
121 CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 },
122 },
123 .radio_input = { CX18_CARD_INPUT_AUD_TUNER,
124 CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 },
125 .ddr = {
126 /* ESMT M13S128324A-5B memory */
127 .chip_config = 0x003,
128 .refresh = 0x30c,
129 .timing1 = 0x44220e82,
130 .timing2 = 0x08,
131 .tune_lane = 0,
132 .initial_emrs = 0,
133 },
134 .gpio_init.initial_value = 0x3001,
135 .gpio_init.direction = 0x3001,
136 .gpio_i2c_slave_reset = {
137 .active_lo_mask = 0x3001,
138 .msecs_asserted = 10,
139 .msecs_recovery = 40,
140 .ir_reset_mask = 0x0001,
141 },
142 .i2c = &cx18_i2c_std,
143};
144
98static const struct cx18_card cx18_card_hvr1600_samsung = { 145static const struct cx18_card cx18_card_hvr1600_samsung = {
99 .type = CX18_CARD_HVR_1600_SAMSUNG, 146 .type = CX18_CARD_HVR_1600_SAMSUNG,
100 .name = "Hauppauge HVR-1600 (Preproduction)", 147 .name = "Hauppauge HVR-1600 (Preproduction)",
@@ -523,7 +570,8 @@ static const struct cx18_card *cx18_card_list[] = {
523 &cx18_card_toshiba_qosmio_dvbt, 570 &cx18_card_toshiba_qosmio_dvbt,
524 &cx18_card_leadtek_pvr2100, 571 &cx18_card_leadtek_pvr2100,
525 &cx18_card_leadtek_dvr3100h, 572 &cx18_card_leadtek_dvr3100h,
526 &cx18_card_gotview_dvd3 573 &cx18_card_gotview_dvd3,
574 &cx18_card_hvr1600_s5h1411
527}; 575};
528 576
529const struct cx18_card *cx18_get_card(u16 index) 577const struct cx18_card *cx18_get_card(u16 index)
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
index 944af8adbe0c..b1c3cbd92743 100644
--- a/drivers/media/video/cx18/cx18-driver.c
+++ b/drivers/media/video/cx18/cx18-driver.c
@@ -157,6 +157,7 @@ MODULE_PARM_DESC(cardtype,
157 "\t\t\t 7 = Leadtek WinFast PVR2100\n" 157 "\t\t\t 7 = Leadtek WinFast PVR2100\n"
158 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" 158 "\t\t\t 8 = Leadtek WinFast DVR3100 H\n"
159 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" 159 "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n"
160 "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n"
160 "\t\t\t 0 = Autodetect (default)\n" 161 "\t\t\t 0 = Autodetect (default)\n"
161 "\t\t\t-1 = Ignore this card\n\t\t"); 162 "\t\t\t-1 = Ignore this card\n\t\t");
162MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); 163MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60");
@@ -337,6 +338,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
337 switch (cx->card->type) { 338 switch (cx->card->type) {
338 case CX18_CARD_HVR_1600_ESMT: 339 case CX18_CARD_HVR_1600_ESMT:
339 case CX18_CARD_HVR_1600_SAMSUNG: 340 case CX18_CARD_HVR_1600_SAMSUNG:
341 case CX18_CARD_HVR_1600_S5H1411:
340 tveeprom_hauppauge_analog(&c, tv, eedata); 342 tveeprom_hauppauge_analog(&c, tv, eedata);
341 break; 343 break;
342 case CX18_CARD_YUAN_MPC718: 344 case CX18_CARD_YUAN_MPC718:
@@ -365,7 +367,25 @@ static void cx18_process_eeprom(struct cx18 *cx)
365 from the model number. Use the cardtype module option if you 367 from the model number. Use the cardtype module option if you
366 have one of these preproduction models. */ 368 have one of these preproduction models. */
367 switch (tv.model) { 369 switch (tv.model) {
368 case 74000 ... 74999: 370 case 74301: /* Retail models */
371 case 74321:
372 case 74351: /* OEM models */
373 case 74361:
374 /* Digital side is s5h1411/tda18271 */
375 cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411);
376 break;
377 case 74021: /* Retail models */
378 case 74031:
379 case 74041:
380 case 74141:
381 case 74541: /* OEM models */
382 case 74551:
383 case 74591:
384 case 74651:
385 case 74691:
386 case 74751:
387 case 74891:
388 /* Digital side is s5h1409/mxl5005s */
369 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 389 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
370 break; 390 break;
371 case 0x718: 391 case 0x718:
@@ -377,7 +397,8 @@ static void cx18_process_eeprom(struct cx18 *cx)
377 CX18_ERR("Invalid EEPROM\n"); 397 CX18_ERR("Invalid EEPROM\n");
378 return; 398 return;
379 default: 399 default:
380 CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model); 400 CX18_ERR("Unknown model %d, defaulting to original HVR-1600 "
401 "(cardtype=1)\n", tv.model);
381 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); 402 cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT);
382 break; 403 break;
383 } 404 }
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h
index 306caac6d3fc..f736679d2517 100644
--- a/drivers/media/video/cx18/cx18-driver.h
+++ b/drivers/media/video/cx18/cx18-driver.h
@@ -85,7 +85,8 @@
85#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ 85#define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */
86#define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ 86#define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */
87#define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ 87#define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */
88#define CX18_CARD_LAST 8 88#define CX18_CARD_HVR_1600_S5H1411 9 /* Hauppauge HVR 1600 s5h1411/tda18271*/
89#define CX18_CARD_LAST 9
89 90
90#define CX18_ENC_STREAM_TYPE_MPG 0 91#define CX18_ENC_STREAM_TYPE_MPG 0
91#define CX18_ENC_STREAM_TYPE_TS 1 92#define CX18_ENC_STREAM_TYPE_TS 1
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c
index f0381d62518d..f41922bd4020 100644
--- a/drivers/media/video/cx18/cx18-dvb.c
+++ b/drivers/media/video/cx18/cx18-dvb.c
@@ -29,6 +29,8 @@
29#include "cx18-gpio.h" 29#include "cx18-gpio.h"
30#include "s5h1409.h" 30#include "s5h1409.h"
31#include "mxl5005s.h" 31#include "mxl5005s.h"
32#include "s5h1411.h"
33#include "tda18271.h"
32#include "zl10353.h" 34#include "zl10353.h"
33 35
34#include <linux/firmware.h> 36#include <linux/firmware.h>
@@ -77,6 +79,32 @@ static struct s5h1409_config hauppauge_hvr1600_config = {
77}; 79};
78 80
79/* 81/*
82 * CX18_CARD_HVR_1600_S5H1411
83 */
84static struct s5h1411_config hcw_s5h1411_config = {
85 .output_mode = S5H1411_SERIAL_OUTPUT,
86 .gpio = S5H1411_GPIO_OFF,
87 .vsb_if = S5H1411_IF_44000,
88 .qam_if = S5H1411_IF_4000,
89 .inversion = S5H1411_INVERSION_ON,
90 .status_mode = S5H1411_DEMODLOCKING,
91 .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK,
92};
93
94static struct tda18271_std_map hauppauge_tda18271_std_map = {
95 .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3,
96 .if_lvl = 6, .rfagc_top = 0x37 },
97 .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0,
98 .if_lvl = 6, .rfagc_top = 0x37 },
99};
100
101static struct tda18271_config hauppauge_tda18271_config = {
102 .std_map = &hauppauge_tda18271_std_map,
103 .gate = TDA18271_GATE_DIGITAL,
104 .output_opt = TDA18271_OUTPUT_LT_OFF,
105};
106
107/*
80 * CX18_CARD_LEADTEK_DVR3100H 108 * CX18_CARD_LEADTEK_DVR3100H
81 */ 109 */
82/* Information/confirmation of proper config values provided by Terry Wu */ 110/* Information/confirmation of proper config values provided by Terry Wu */
@@ -244,6 +272,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed)
244 switch (cx->card->type) { 272 switch (cx->card->type) {
245 case CX18_CARD_HVR_1600_ESMT: 273 case CX18_CARD_HVR_1600_ESMT:
246 case CX18_CARD_HVR_1600_SAMSUNG: 274 case CX18_CARD_HVR_1600_SAMSUNG:
275 case CX18_CARD_HVR_1600_S5H1411:
247 v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); 276 v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL);
248 v |= 0x00400000; /* Serial Mode */ 277 v |= 0x00400000; /* Serial Mode */
249 v |= 0x00002000; /* Data Length - Byte */ 278 v |= 0x00002000; /* Data Length - Byte */
@@ -455,6 +484,15 @@ static int dvb_register(struct cx18_stream *stream)
455 ret = 0; 484 ret = 0;
456 } 485 }
457 break; 486 break;
487 case CX18_CARD_HVR_1600_S5H1411:
488 dvb->fe = dvb_attach(s5h1411_attach,
489 &hcw_s5h1411_config,
490 &cx->i2c_adap[0]);
491 if (dvb->fe != NULL)
492 dvb_attach(tda18271_attach, dvb->fe,
493 0x60, &cx->i2c_adap[0],
494 &hauppauge_tda18271_config);
495 break;
458 case CX18_CARD_LEADTEK_DVR3100H: 496 case CX18_CARD_LEADTEK_DVR3100H:
459 dvb->fe = dvb_attach(zl10353_attach, 497 dvb->fe = dvb_attach(zl10353_attach,
460 &leadtek_dvr3100h_demod, 498 &leadtek_dvr3100h_demod,
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c
index ed3d8f55029b..307ff543c254 100644
--- a/drivers/media/video/cx23885/cx23885-i2c.c
+++ b/drivers/media/video/cx23885/cx23885-i2c.c
@@ -122,10 +122,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
122 122
123 if (!i2c_wait_done(i2c_adap)) 123 if (!i2c_wait_done(i2c_adap))
124 goto eio; 124 goto eio;
125 if (!i2c_slave_did_ack(i2c_adap)) {
126 retval = -ENXIO;
127 goto err;
128 }
129 if (i2c_debug) { 125 if (i2c_debug) {
130 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); 126 printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]);
131 if (!(ctrl & I2C_NOSTOP)) 127 if (!(ctrl & I2C_NOSTOP))
@@ -158,7 +154,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap,
158 154
159 eio: 155 eio:
160 retval = -EIO; 156 retval = -EIO;
161 err:
162 if (i2c_debug) 157 if (i2c_debug)
163 printk(KERN_ERR " ERR: %d\n", retval); 158 printk(KERN_ERR " ERR: %d\n", retval);
164 return retval; 159 return retval;
@@ -209,10 +204,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
209 204
210 if (!i2c_wait_done(i2c_adap)) 205 if (!i2c_wait_done(i2c_adap))
211 goto eio; 206 goto eio;
212 if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) {
213 retval = -ENXIO;
214 goto err;
215 }
216 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; 207 msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff;
217 if (i2c_debug) { 208 if (i2c_debug) {
218 dprintk(1, " %02x", msg->buf[cnt]); 209 dprintk(1, " %02x", msg->buf[cnt]);
@@ -224,7 +215,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap,
224 215
225 eio: 216 eio:
226 retval = -EIO; 217 retval = -EIO;
227 err:
228 if (i2c_debug) 218 if (i2c_debug)
229 printk(KERN_ERR " ERR: %d\n", retval); 219 printk(KERN_ERR " ERR: %d\n", retval);
230 return retval; 220 return retval;
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c
index 6fc09dd41b9d..35796e035247 100644
--- a/drivers/media/video/cx25840/cx25840-core.c
+++ b/drivers/media/video/cx25840/cx25840-core.c
@@ -2015,7 +2015,8 @@ static int cx25840_probe(struct i2c_client *client,
2015 kfree(state); 2015 kfree(state);
2016 return err; 2016 return err;
2017 } 2017 }
2018 v4l2_ctrl_cluster(2, &state->volume); 2018 if (!is_cx2583x(state))
2019 v4l2_ctrl_cluster(2, &state->volume);
2019 v4l2_ctrl_handler_setup(&state->hdl); 2020 v4l2_ctrl_handler_setup(&state->hdl);
2020 2021
2021 if (client->dev.platform_data) { 2022 if (client->dev.platform_data) {
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c
index 9b4faf009196..9c29e964d400 100644
--- a/drivers/media/video/ivtv/ivtv-irq.c
+++ b/drivers/media/video/ivtv/ivtv-irq.c
@@ -628,22 +628,66 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
628static void ivtv_irq_dma_err(struct ivtv *itv) 628static void ivtv_irq_dma_err(struct ivtv *itv)
629{ 629{
630 u32 data[CX2341X_MBOX_MAX_DATA]; 630 u32 data[CX2341X_MBOX_MAX_DATA];
631 u32 status;
631 632
632 del_timer(&itv->dma_timer); 633 del_timer(&itv->dma_timer);
634
633 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); 635 ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
636 status = read_reg(IVTV_REG_DMASTATUS);
634 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], 637 IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
635 read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); 638 status, itv->cur_dma_stream);
636 write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); 639 /*
640 * We do *not* write back to the IVTV_REG_DMASTATUS register to
641 * clear the error status, if either the encoder write (0x02) or
642 * decoder read (0x01) bus master DMA operation do not indicate
643 * completed. We can race with the DMA engine, which may have
644 * transitioned to completed status *after* we read the register.
645 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
646 * DMA engine has completed, will cause the DMA engine to stop working.
647 */
648 status &= 0x3;
649 if (status == 0x3)
650 write_reg(status, IVTV_REG_DMASTATUS);
651
637 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && 652 if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
638 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { 653 itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
639 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; 654 struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
640 655
641 /* retry */ 656 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
642 if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) 657 /* retry */
658 /*
659 * FIXME - handle cases of DMA error similar to
660 * encoder below, except conditioned on status & 0x1
661 */
643 ivtv_dma_dec_start(s); 662 ivtv_dma_dec_start(s);
644 else 663 return;
645 ivtv_dma_enc_start(s); 664 } else {
646 return; 665 if ((status & 0x2) == 0) {
666 /*
667 * CX2341x Bus Master DMA write is ongoing.
668 * Reset the timer and let it complete.
669 */
670 itv->dma_timer.expires =
671 jiffies + msecs_to_jiffies(600);
672 add_timer(&itv->dma_timer);
673 return;
674 }
675
676 if (itv->dma_retries < 3) {
677 /*
678 * CX2341x Bus Master DMA write has ended.
679 * Retry the write, starting with the first
680 * xfer segment. Just retrying the current
681 * segment is not sufficient.
682 */
683 s->sg_processed = 0;
684 itv->dma_retries++;
685 ivtv_dma_enc_start_xfer(s);
686 return;
687 }
688 /* Too many retries, give up on this one */
689 }
690
647 } 691 }
648 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { 692 if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
649 ivtv_udma_start(itv); 693 ivtv_udma_start(itv);
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c
index c179041d91f8..e7e717800ee2 100644
--- a/drivers/media/video/mem2mem_testdev.c
+++ b/drivers/media/video/mem2mem_testdev.c
@@ -1011,7 +1011,6 @@ static int m2mtest_remove(struct platform_device *pdev)
1011 v4l2_m2m_release(dev->m2m_dev); 1011 v4l2_m2m_release(dev->m2m_dev);
1012 del_timer_sync(&dev->timer); 1012 del_timer_sync(&dev->timer);
1013 video_unregister_device(dev->vfd); 1013 video_unregister_device(dev->vfd);
1014 video_device_release(dev->vfd);
1015 v4l2_device_unregister(&dev->v4l2_dev); 1014 v4l2_device_unregister(&dev->v4l2_dev);
1016 kfree(dev); 1015 kfree(dev);
1017 1016
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c
index b63f8cafa671..561909b65ce6 100644
--- a/drivers/media/video/s2255drv.c
+++ b/drivers/media/video/s2255drv.c
@@ -57,7 +57,7 @@
57#include <linux/usb.h> 57#include <linux/usb.h>
58 58
59#define S2255_MAJOR_VERSION 1 59#define S2255_MAJOR_VERSION 1
60#define S2255_MINOR_VERSION 20 60#define S2255_MINOR_VERSION 21
61#define S2255_RELEASE 0 61#define S2255_RELEASE 0
62#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ 62#define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \
63 S2255_MINOR_VERSION, \ 63 S2255_MINOR_VERSION, \
@@ -312,9 +312,9 @@ struct s2255_fh {
312}; 312};
313 313
314/* current cypress EEPROM firmware version */ 314/* current cypress EEPROM firmware version */
315#define S2255_CUR_USB_FWVER ((3 << 8) | 6) 315#define S2255_CUR_USB_FWVER ((3 << 8) | 11)
316/* current DSP FW version */ 316/* current DSP FW version */
317#define S2255_CUR_DSP_FWVER 8 317#define S2255_CUR_DSP_FWVER 10102
318/* Need DSP version 5+ for video status feature */ 318/* Need DSP version 5+ for video status feature */
319#define S2255_MIN_DSP_STATUS 5 319#define S2255_MIN_DSP_STATUS 5
320#define S2255_MIN_DSP_COLORFILTER 8 320#define S2255_MIN_DSP_COLORFILTER 8
@@ -492,9 +492,11 @@ static void planar422p_to_yuv_packed(const unsigned char *in,
492 492
493static void s2255_reset_dsppower(struct s2255_dev *dev) 493static void s2255_reset_dsppower(struct s2255_dev *dev)
494{ 494{
495 s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1); 495 s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1);
496 msleep(10); 496 msleep(10);
497 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); 497 s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1);
498 msleep(600);
499 s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1);
498 return; 500 return;
499} 501}
500 502
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index e9a3eab7b0cf..8c1d85e27be4 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -621,7 +621,7 @@ static int __init memstick_init(void)
621{ 621{
622 int rc; 622 int rc;
623 623
624 workqueue = create_freezeable_workqueue("kmemstick"); 624 workqueue = create_freezable_workqueue("kmemstick");
625 if (!workqueue) 625 if (!workqueue)
626 return -ENOMEM; 626 return -ENOMEM;
627 627
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index f71f22948477..1735c84ff757 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR 76#define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR
77#endif 77#endif
78 78
79#define MPT_LINUX_VERSION_COMMON "3.04.17" 79#define MPT_LINUX_VERSION_COMMON "3.04.18"
80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" 80#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18"
81#define WHAT_MAGIC_STRING "@" "(" "#" ")" 81#define WHAT_MAGIC_STRING "@" "(" "#" ")"
82 82
83#define show_mptmod_ver(s,ver) \ 83#define show_mptmod_ver(s,ver) \
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index a3856ed90aef..e8deb8ed0499 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
597} 597}
598 598
599static int 599static int
600mptctl_release(struct inode *inode, struct file *filep)
601{
602 fasync_helper(-1, filep, 0, &async_queue);
603 return 0;
604}
605
606static int
600mptctl_fasync(int fd, struct file *filep, int mode) 607mptctl_fasync(int fd, struct file *filep, int mode)
601{ 608{
602 MPT_ADAPTER *ioc; 609 MPT_ADAPTER *ioc;
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = {
2815 .llseek = no_llseek, 2822 .llseek = no_llseek,
2816 .fasync = mptctl_fasync, 2823 .fasync = mptctl_fasync,
2817 .unlocked_ioctl = mptctl_ioctl, 2824 .unlocked_ioctl = mptctl_ioctl,
2825 .release = mptctl_release,
2818#ifdef CONFIG_COMPAT 2826#ifdef CONFIG_COMPAT
2819 .compat_ioctl = compat_mpctl_ioctl, 2827 .compat_ioctl = compat_mpctl_ioctl,
2820#endif 2828#endif
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 59b8f53d1ece..0d9b82a44540 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
1873 } 1873 }
1874 1874
1875 out: 1875 out:
1876 printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", 1876 printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n",
1877 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); 1877 ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval,
1878 SCpnt, SCpnt->serial_number);
1878 1879
1879 return retval; 1880 return retval;
1880} 1881}
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
1911 1912
1912 vdevice = SCpnt->device->hostdata; 1913 vdevice = SCpnt->device->hostdata;
1913 if (!vdevice || !vdevice->vtarget) { 1914 if (!vdevice || !vdevice->vtarget) {
1914 retval = SUCCESS; 1915 retval = 0;
1915 goto out; 1916 goto out;
1916 } 1917 }
1917 1918
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 6a1f94042612..c45e6305b26f 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc)
143 unsigned long flags; 143 unsigned long flags;
144 struct asic3 *asic; 144 struct asic3 *asic;
145 145
146 desc->chip->ack(irq); 146 desc->irq_data.chip->irq_ack(&desc->irq_data);
147 147
148 asic = desc->handler_data; 148 asic = get_irq_data(irq);
149 149
150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { 150 for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) {
151 u32 status; 151 u32 status;
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c
index 33c923d215c7..fdd8a1b8bc67 100644
--- a/drivers/mfd/davinci_voicecodec.c
+++ b/drivers/mfd/davinci_voicecodec.c
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev)
118 118
119 /* Voice codec interface client */ 119 /* Voice codec interface client */
120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; 120 cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL];
121 cell->name = "davinci_vcif"; 121 cell->name = "davinci-vcif";
122 cell->driver_data = davinci_vc; 122 cell->driver_data = davinci_vc;
123 123
124 /* Voice codec CQ93VC client */ 124 /* Voice codec CQ93VC client */
125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; 125 cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL];
126 cell->name = "cq93vc"; 126 cell->name = "cq93vc-codec";
127 cell->driver_data = davinci_vc; 127 cell->driver_data = davinci_vc;
128 128
129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, 129 ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells,
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 627cf577b16d..e9018d1394ee 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client,
150static inline int __tps6586x_writes(struct i2c_client *client, int reg, 150static inline int __tps6586x_writes(struct i2c_client *client, int reg,
151 int len, uint8_t *val) 151 int len, uint8_t *val)
152{ 152{
153 int ret; 153 int ret, i;
154 154
155 ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); 155 for (i = 0; i < len; i++) {
156 if (ret < 0) { 156 ret = __tps6586x_write(client, reg + i, *(val + i));
157 dev_err(&client->dev, "failed writings to 0x%02x\n", reg); 157 if (ret < 0)
158 return ret; 158 return ret;
159 } 159 }
160 160
161 return 0; 161 return 0;
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c
index 000cb414a78a..92b85e28a15e 100644
--- a/drivers/mfd/ucb1x00-ts.c
+++ b/drivers/mfd/ucb1x00-ts.c
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
385 idev->close = ucb1x00_ts_close; 385 idev->close = ucb1x00_ts_close;
386 386
387 __set_bit(EV_ABS, idev->evbit); 387 __set_bit(EV_ABS, idev->evbit);
388 __set_bit(ABS_X, idev->absbit);
389 __set_bit(ABS_Y, idev->absbit);
390 __set_bit(ABS_PRESSURE, idev->absbit);
391 388
392 input_set_drvdata(idev, ts); 389 input_set_drvdata(idev, ts);
393 390
391 ucb1x00_adc_enable(ts->ucb);
392 ts->x_res = ucb1x00_ts_read_xres(ts);
393 ts->y_res = ucb1x00_ts_read_yres(ts);
394 ucb1x00_adc_disable(ts->ucb);
395
396 input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0);
397 input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0);
398 input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0);
399
394 err = input_register_device(idev); 400 err = input_register_device(idev);
395 if (err) 401 if (err)
396 goto fail; 402 goto fail;
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 41233c7fa581..f4016a075fd6 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev)
246 struct wm8994 *wm8994 = dev_get_drvdata(dev); 246 struct wm8994 *wm8994 = dev_get_drvdata(dev);
247 int ret; 247 int ret;
248 248
249 /* Don't actually go through with the suspend if the CODEC is
250 * still active (eg, for audio passthrough from CP. */
251 ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1);
252 if (ret < 0) {
253 dev_err(dev, "Failed to read power status: %d\n", ret);
254 } else if (ret & WM8994_VMID_SEL_MASK) {
255 dev_dbg(dev, "CODEC still active, ignoring suspend\n");
256 return 0;
257 }
258
249 /* GPIO configuration state is saved here since we may be configuring 259 /* GPIO configuration state is saved here since we may be configuring
250 * the GPIO alternate functions even if we're not using the gpiolib 260 * the GPIO alternate functions even if we're not using the gpiolib
251 * driver for them. 261 * driver for them.
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev)
261 if (ret < 0) 271 if (ret < 0)
262 dev_err(dev, "Failed to save LDO registers: %d\n", ret); 272 dev_err(dev, "Failed to save LDO registers: %d\n", ret);
263 273
274 wm8994->suspended = true;
275
264 ret = regulator_bulk_disable(wm8994->num_supplies, 276 ret = regulator_bulk_disable(wm8994->num_supplies,
265 wm8994->supplies); 277 wm8994->supplies);
266 if (ret != 0) { 278 if (ret != 0) {
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev)
276 struct wm8994 *wm8994 = dev_get_drvdata(dev); 288 struct wm8994 *wm8994 = dev_get_drvdata(dev);
277 int ret; 289 int ret;
278 290
291 /* We may have lied to the PM core about suspending */
292 if (!wm8994->suspended)
293 return 0;
294
279 ret = regulator_bulk_enable(wm8994->num_supplies, 295 ret = regulator_bulk_enable(wm8994->num_supplies,
280 wm8994->supplies); 296 wm8994->supplies);
281 if (ret != 0) { 297 if (ret != 0) {
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev)
298 if (ret < 0) 314 if (ret < 0)
299 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); 315 dev_err(dev, "Failed to restore GPIO registers: %d\n", ret);
300 316
317 wm8994->suspended = false;
318
301 return 0; 319 return 0;
302} 320}
303#endif 321#endif
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c
index 63ee4c1a5315..b6e1c9a6679e 100644
--- a/drivers/misc/bmp085.c
+++ b/drivers/misc/bmp085.c
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = {
449 { "bmp085", 0 }, 449 { "bmp085", 0 },
450 { } 450 { }
451}; 451};
452MODULE_DEVICE_TABLE(i2c, bmp085_id);
452 453
453static struct i2c_driver bmp085_driver = { 454static struct i2c_driver bmp085_driver = {
454 .driver = { 455 .driver = {
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c
index 5f6852dff40b..44d4475a09dd 100644
--- a/drivers/misc/tifm_core.c
+++ b/drivers/misc/tifm_core.c
@@ -329,7 +329,7 @@ static int __init tifm_init(void)
329{ 329{
330 int rc; 330 int rc;
331 331
332 workqueue = create_freezeable_workqueue("tifm"); 332 workqueue = create_freezable_workqueue("tifm");
333 if (!workqueue) 333 if (!workqueue)
334 return -ENOMEM; 334 return -ENOMEM;
335 335
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 4d2ea8e80140..6df5a55da110 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
785 if (x86_hyper != &x86_hyper_vmware) 785 if (x86_hyper != &x86_hyper_vmware)
786 return -ENODEV; 786 return -ENODEV;
787 787
788 vmballoon_wq = create_freezeable_workqueue("vmmemctl"); 788 vmballoon_wq = create_freezable_workqueue("vmmemctl");
789 if (!vmballoon_wq) { 789 if (!vmballoon_wq) {
790 pr_err("failed to create workqueue\n"); 790 pr_err("failed to create workqueue\n");
791 return -ENOMEM; 791 return -ENOMEM;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 6625c057be05..150b5f3cd401 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1529,7 +1529,7 @@ void mmc_rescan(struct work_struct *work)
1529 * still present 1529 * still present
1530 */ 1530 */
1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead 1531 if (host->bus_ops && host->bus_ops->detect && !host->bus_dead
1532 && mmc_card_is_removable(host)) 1532 && !(host->caps & MMC_CAP_NONREMOVABLE))
1533 host->bus_ops->detect(host); 1533 host->bus_ops->detect(host);
1534 1534
1535 /* 1535 /*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5c4a54d9b6a4..ebc62ad4cc56 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host)
792 */ 792 */
793 mmc_release_host(host); 793 mmc_release_host(host);
794 err = mmc_add_card(host->card); 794 err = mmc_add_card(host->card);
795 mmc_claim_host(host);
796 if (err) 795 if (err)
797 goto remove_added; 796 goto remove_added;
798 797
@@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host)
805 goto remove_added; 804 goto remove_added;
806 } 805 }
807 806
807 mmc_claim_host(host);
808 return 0; 808 return 0;
809 809
810 810
811remove_added: 811remove_added:
812 /* Remove without lock if the device has been added. */ 812 /* Remove without lock if the device has been added. */
813 mmc_release_host(host);
814 mmc_sdio_remove(host); 813 mmc_sdio_remove(host);
815 mmc_claim_host(host); 814 mmc_claim_host(host);
816remove: 815remove:
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index a8c3e1c9b02a..4aaa88f8ab5f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -1230,10 +1230,32 @@ static int inval_cache_and_wait_for_operation(
1230 sleep_time = chip_op_time / 2; 1230 sleep_time = chip_op_time / 2;
1231 1231
1232 for (;;) { 1232 for (;;) {
1233 if (chip->state != chip_state) {
1234 /* Someone's suspended the operation: sleep */
1235 DECLARE_WAITQUEUE(wait, current);
1236 set_current_state(TASK_UNINTERRUPTIBLE);
1237 add_wait_queue(&chip->wq, &wait);
1238 mutex_unlock(&chip->mutex);
1239 schedule();
1240 remove_wait_queue(&chip->wq, &wait);
1241 mutex_lock(&chip->mutex);
1242 continue;
1243 }
1244
1233 status = map_read(map, cmd_adr); 1245 status = map_read(map, cmd_adr);
1234 if (map_word_andequal(map, status, status_OK, status_OK)) 1246 if (map_word_andequal(map, status, status_OK, status_OK))
1235 break; 1247 break;
1236 1248
1249 if (chip->erase_suspended && chip_state == FL_ERASING) {
1250 /* Erase suspend occured while sleep: reset timeout */
1251 timeo = reset_timeo;
1252 chip->erase_suspended = 0;
1253 }
1254 if (chip->write_suspended && chip_state == FL_WRITING) {
1255 /* Write suspend occured while sleep: reset timeout */
1256 timeo = reset_timeo;
1257 chip->write_suspended = 0;
1258 }
1237 if (!timeo) { 1259 if (!timeo) {
1238 map_write(map, CMD(0x70), cmd_adr); 1260 map_write(map, CMD(0x70), cmd_adr);
1239 chip->state = FL_STATUS; 1261 chip->state = FL_STATUS;
@@ -1257,27 +1279,6 @@ static int inval_cache_and_wait_for_operation(
1257 timeo--; 1279 timeo--;
1258 } 1280 }
1259 mutex_lock(&chip->mutex); 1281 mutex_lock(&chip->mutex);
1260
1261 while (chip->state != chip_state) {
1262 /* Someone's suspended the operation: sleep */
1263 DECLARE_WAITQUEUE(wait, current);
1264 set_current_state(TASK_UNINTERRUPTIBLE);
1265 add_wait_queue(&chip->wq, &wait);
1266 mutex_unlock(&chip->mutex);
1267 schedule();
1268 remove_wait_queue(&chip->wq, &wait);
1269 mutex_lock(&chip->mutex);
1270 }
1271 if (chip->erase_suspended && chip_state == FL_ERASING) {
1272 /* Erase suspend occured while sleep: reset timeout */
1273 timeo = reset_timeo;
1274 chip->erase_suspended = 0;
1275 }
1276 if (chip->write_suspended && chip_state == FL_WRITING) {
1277 /* Write suspend occured while sleep: reset timeout */
1278 timeo = reset_timeo;
1279 chip->write_suspended = 0;
1280 }
1281 } 1282 }
1282 1283
1283 /* Done and happy. */ 1284 /* Done and happy. */
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
index d72a5fb2d041..4e1be51cc122 100644
--- a/drivers/mtd/chips/jedec_probe.c
+++ b/drivers/mtd/chips/jedec_probe.c
@@ -1935,14 +1935,14 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi)
1935} 1935}
1936 1936
1937 1937
1938static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) 1938static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index)
1939{ 1939{
1940 int i,num_erase_regions; 1940 int i,num_erase_regions;
1941 uint8_t uaddr; 1941 uint8_t uaddr;
1942 1942
1943 if (! (jedec_table[index].devtypes & p_cfi->device_type)) { 1943 if (!(jedec_table[index].devtypes & cfi->device_type)) {
1944 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", 1944 DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n",
1945 jedec_table[index].name, 4 * (1<<p_cfi->device_type)); 1945 jedec_table[index].name, 4 * (1<<cfi->device_type));
1946 return 0; 1946 return 0;
1947 } 1947 }
1948 1948
@@ -1950,27 +1950,28 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1950 1950
1951 num_erase_regions = jedec_table[index].nr_regions; 1951 num_erase_regions = jedec_table[index].nr_regions;
1952 1952
1953 p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); 1953 cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL);
1954 if (!p_cfi->cfiq) { 1954 if (!cfi->cfiq) {
1955 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); 1955 //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name);
1956 return 0; 1956 return 0;
1957 } 1957 }
1958 1958
1959 memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); 1959 memset(cfi->cfiq, 0, sizeof(struct cfi_ident));
1960 1960
1961 p_cfi->cfiq->P_ID = jedec_table[index].cmd_set; 1961 cfi->cfiq->P_ID = jedec_table[index].cmd_set;
1962 p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; 1962 cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions;
1963 p_cfi->cfiq->DevSize = jedec_table[index].dev_size; 1963 cfi->cfiq->DevSize = jedec_table[index].dev_size;
1964 p_cfi->cfi_mode = CFI_MODE_JEDEC; 1964 cfi->cfi_mode = CFI_MODE_JEDEC;
1965 cfi->sector_erase_cmd = CMD(0x30);
1965 1966
1966 for (i=0; i<num_erase_regions; i++){ 1967 for (i=0; i<num_erase_regions; i++){
1967 p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; 1968 cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i];
1968 } 1969 }
1969 p_cfi->cmdset_priv = NULL; 1970 cfi->cmdset_priv = NULL;
1970 1971
1971 /* This may be redundant for some cases, but it doesn't hurt */ 1972 /* This may be redundant for some cases, but it doesn't hurt */
1972 p_cfi->mfr = jedec_table[index].mfr_id; 1973 cfi->mfr = jedec_table[index].mfr_id;
1973 p_cfi->id = jedec_table[index].dev_id; 1974 cfi->id = jedec_table[index].dev_id;
1974 1975
1975 uaddr = jedec_table[index].uaddr; 1976 uaddr = jedec_table[index].uaddr;
1976 1977
@@ -1978,8 +1979,8 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index)
1978 our brains explode when we see the datasheets talking about address 1979 our brains explode when we see the datasheets talking about address
1979 lines numbered from A-1 to A18. The CFI table has unlock addresses 1980 lines numbered from A-1 to A18. The CFI table has unlock addresses
1980 in device-words according to the mode the device is connected in */ 1981 in device-words according to the mode the device is connected in */
1981 p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; 1982 cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type;
1982 p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; 1983 cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type;
1983 1984
1984 return 1; /* ok */ 1985 return 1; /* ok */
1985} 1986}
@@ -2175,7 +2176,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
2175 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", 2176 "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n",
2176 __func__, cfi->mfr, cfi->id, 2177 __func__, cfi->mfr, cfi->id,
2177 cfi->addr_unlock1, cfi->addr_unlock2 ); 2178 cfi->addr_unlock1, cfi->addr_unlock2 );
2178 if (!cfi_jedec_setup(cfi, i)) 2179 if (!cfi_jedec_setup(map, cfi, i))
2179 return 0; 2180 return 0;
2180 goto ok_out; 2181 goto ok_out;
2181 } 2182 }
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c
index 77d64ce19e9f..92de7e3a49a5 100644
--- a/drivers/mtd/maps/amd76xrom.c
+++ b/drivers/mtd/maps/amd76xrom.c
@@ -151,6 +151,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev,
151 printk(KERN_ERR MOD_NAME 151 printk(KERN_ERR MOD_NAME
152 " %s(): Unable to register resource %pR - kernel bug?\n", 152 " %s(): Unable to register resource %pR - kernel bug?\n",
153 __func__, &window->rsrc); 153 __func__, &window->rsrc);
154 return -EBUSY;
154 } 155 }
155 156
156 157
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index cb20c67995d8..e0a2373bf0e2 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -413,7 +413,6 @@ error3:
413error2: 413error2:
414 list_del(&new->list); 414 list_del(&new->list);
415error1: 415error1:
416 kfree(new);
417 return ret; 416 return ret;
418} 417}
419 418
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 15682ec8530e..28af71c61834 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -968,6 +968,6 @@ static void __exit omap_nand_exit(void)
968module_init(omap_nand_init); 968module_init(omap_nand_init);
969module_exit(omap_nand_exit); 969module_exit(omap_nand_exit);
970 970
971MODULE_ALIAS(DRIVER_NAME); 971MODULE_ALIAS("platform:" DRIVER_NAME);
972MODULE_LICENSE("GPL"); 972MODULE_LICENSE("GPL");
973MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); 973MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards");
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index d9d7efbc77cc..6322d1fb5d62 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
930 930
931 init_completion(&dev->dma_done); 931 init_completion(&dev->dma_done);
932 932
933 dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); 933 dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
934 934
935 if (!dev->card_workqueue) 935 if (!dev->card_workqueue)
936 goto error9; 936 goto error9;
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c
index e78914938c5c..ac08750748a3 100644
--- a/drivers/mtd/onenand/generic.c
+++ b/drivers/mtd/onenand/generic.c
@@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = {
131 .remove = __devexit_p(generic_onenand_remove), 131 .remove = __devexit_p(generic_onenand_remove),
132}; 132};
133 133
134MODULE_ALIAS(DRIVER_NAME); 134MODULE_ALIAS("platform:" DRIVER_NAME);
135 135
136static int __init generic_onenand_init(void) 136static int __init generic_onenand_init(void)
137{ 137{
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c
index ac31f461cc1c..c849cacf4b2f 100644
--- a/drivers/mtd/onenand/omap2.c
+++ b/drivers/mtd/onenand/omap2.c
@@ -860,7 +860,7 @@ static void __exit omap2_onenand_exit(void)
860module_init(omap2_onenand_init); 860module_init(omap2_onenand_init);
861module_exit(omap2_onenand_exit); 861module_exit(omap2_onenand_exit);
862 862
863MODULE_ALIAS(DRIVER_NAME); 863MODULE_ALIAS("platform:" DRIVER_NAME);
864MODULE_LICENSE("GPL"); 864MODULE_LICENSE("GPL");
865MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); 865MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>");
866MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); 866MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3");
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
index 67822cf6c025..ac0d6a8613b5 100644
--- a/drivers/mtd/sm_ftl.c
+++ b/drivers/mtd/sm_ftl.c
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
1258static __init int sm_module_init(void) 1258static __init int sm_module_init(void)
1259{ 1259{
1260 int error = 0; 1260 int error = 0;
1261 cache_flush_workqueue = create_freezeable_workqueue("smflush"); 1261 cache_flush_workqueue = create_freezable_workqueue("smflush");
1262 1262
1263 if (IS_ERR(cache_flush_workqueue)) 1263 if (IS_ERR(cache_flush_workqueue))
1264 return PTR_ERR(cache_flush_workqueue); 1264 return PTR_ERR(cache_flush_workqueue);
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index 39214e512452..7ca0eded2561 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data)
425 int csr0, boguscnt; 425 int csr0, boguscnt;
426 int handled = 0; 426 int handled = 0;
427 427
428 if (dev == NULL) {
429 printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n");
430 return IRQ_NONE;
431 }
432
433 lance->RAP = CSR0; /* PCnet-ISA Controller Status */ 428 lance->RAP = CSR0; /* PCnet-ISA Controller Status */
434 429
435 if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ 430 if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 653c62475cb6..8849699c66c4 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -22,7 +22,7 @@
22 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
23/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
24 24
25#define DRV_MODULE_VERSION "1.62.00-5" 25#define DRV_MODULE_VERSION "1.62.00-6"
26#define DRV_MODULE_RELDATE "2011/01/30" 26#define DRV_MODULE_RELDATE "2011/01/30"
27#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
28 28
@@ -1211,6 +1211,7 @@ struct bnx2x {
1211 /* DCBX Negotation results */ 1211 /* DCBX Negotation results */
1212 struct dcbx_features dcbx_local_feat; 1212 struct dcbx_features dcbx_local_feat;
1213 u32 dcbx_error; 1213 u32 dcbx_error;
1214 u32 pending_max;
1214}; 1215};
1215 1216
1216/** 1217/**
@@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1613#define BNX2X_BTR 4 1614#define BNX2X_BTR 4
1614#define MAX_SPQ_PENDING 8 1615#define MAX_SPQ_PENDING 8
1615 1616
1616 1617/* CMNG constants, as derived from system spec calculations */
1617/* CMNG constants 1618/* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */
1618 derived from lab experiments, and not from system spec calculations !!! */ 1619#define DEF_MIN_RATE 100
1619#define DEF_MIN_RATE 100 1620/* resolution of the rate shaping timer - 400 usec */
1620/* resolution of the rate shaping timer - 100 usec */ 1621#define RS_PERIODIC_TIMEOUT_USEC 400
1621#define RS_PERIODIC_TIMEOUT_USEC 100
1622/* resolution of fairness algorithm in usecs -
1623 coefficient for calculating the actual t fair */
1624#define T_FAIR_COEF 10000000
1625/* number of bytes in single QM arbitration cycle - 1622/* number of bytes in single QM arbitration cycle -
1626 coefficient for calculating the fairness timer */ 1623 * coefficient for calculating the fairness timer */
1627#define QM_ARB_BYTES 40000 1624#define QM_ARB_BYTES 160000
1628#define FAIR_MEM 2 1625/* resolution of Min algorithm 1:100 */
1626#define MIN_RES 100
1627/* how many bytes above threshold for the minimal credit of Min algorithm*/
1628#define MIN_ABOVE_THRESH 32768
1629/* Fairness algorithm integration time coefficient -
1630 * for calculating the actual Tfair */
1631#define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES)
1632/* Memory of fairness algorithm . 2 cycles */
1633#define FAIR_MEM 2
1629 1634
1630 1635
1631#define ATTN_NIG_FOR_FUNC (1L << 8) 1636#define ATTN_NIG_FOR_FUNC (1L << 8)
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 710ce5d04c53..a71b32940533 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
259#endif 259#endif
260} 260}
261 261
262/* Timestamp option length allowed for TPA aggregation:
263 *
264 * nop nop kind length echo val
265 */
266#define TPA_TSTAMP_OPT_LEN 12
267/**
268 * Calculate the approximate value of the MSS for this
269 * aggregation using the first packet of it.
270 *
271 * @param bp
272 * @param parsing_flags Parsing flags from the START CQE
273 * @param len_on_bd Total length of the first packet for the
274 * aggregation.
275 */
276static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
277 u16 len_on_bd)
278{
279 /* TPA arrgregation won't have an IP options and TCP options
280 * other than timestamp.
281 */
282 u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
283
284
285 /* Check if there was a TCP timestamp, if there is it's will
286 * always be 12 bytes length: nop nop kind length echo val.
287 *
288 * Otherwise FW would close the aggregation.
289 */
290 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
291 hdrs_len += TPA_TSTAMP_OPT_LEN;
292
293 return len_on_bd - hdrs_len;
294}
295
262static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, 296static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
263 struct sk_buff *skb, 297 struct sk_buff *skb,
264 struct eth_fast_path_rx_cqe *fp_cqe, 298 struct eth_fast_path_rx_cqe *fp_cqe,
265 u16 cqe_idx) 299 u16 cqe_idx, u16 parsing_flags)
266{ 300{
267 struct sw_rx_page *rx_pg, old_rx_pg; 301 struct sw_rx_page *rx_pg, old_rx_pg;
268 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); 302 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
275 309
276 /* This is needed in order to enable forwarding support */ 310 /* This is needed in order to enable forwarding support */
277 if (frag_size) 311 if (frag_size)
278 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, 312 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
279 max(frag_size, (u32)len_on_bd)); 313 len_on_bd);
280 314
281#ifdef BNX2X_STOP_ON_ERROR 315#ifdef BNX2X_STOP_ON_ERROR
282 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { 316 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
344 if (likely(new_skb)) { 378 if (likely(new_skb)) {
345 /* fix ip xsum and give it to the stack */ 379 /* fix ip xsum and give it to the stack */
346 /* (no need to map the new skb) */ 380 /* (no need to map the new skb) */
381 u16 parsing_flags =
382 le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
347 383
348 prefetch(skb); 384 prefetch(skb);
349 prefetch(((char *)(skb)) + L1_CACHE_BYTES); 385 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
373 } 409 }
374 410
375 if (!bnx2x_fill_frag_skb(bp, fp, skb, 411 if (!bnx2x_fill_frag_skb(bp, fp, skb,
376 &cqe->fast_path_cqe, cqe_idx)) { 412 &cqe->fast_path_cqe, cqe_idx,
377 if ((le16_to_cpu(cqe->fast_path_cqe. 413 parsing_flags)) {
378 pars_flags.flags) & PARSING_FLAGS_VLAN)) 414 if (parsing_flags & PARSING_FLAGS_VLAN)
379 __vlan_hwaccel_put_tag(skb, 415 __vlan_hwaccel_put_tag(skb,
380 le16_to_cpu(cqe->fast_path_cqe. 416 le16_to_cpu(cqe->fast_path_cqe.
381 vlan_tag)); 417 vlan_tag));
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{ 739{
704 u16 line_speed = bp->link_vars.line_speed; 740 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) { 741 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] & 742 u16 maxCfg = bnx2x_extract_max_cfg(bp,
707 FUNC_MF_CFG_MAX_BW_MASK) >> 743 bp->mf_config[BP_VN(bp)]);
708 FUNC_MF_CFG_MAX_BW_SHIFT; 744
709 /* Calculate the current MAX line speed limit for the DCC 745 /* Calculate the current MAX line speed limit for the MF
710 * capable devices 746 * devices
711 */ 747 */
712 if (IS_MF_SD(bp)) { 748 if (IS_MF_SI(bp))
749 line_speed = (line_speed * maxCfg) / 100;
750 else { /* SD mode */
713 u16 vn_max_rate = maxCfg * 100; 751 u16 vn_max_rate = maxCfg * 100;
714 752
715 if (vn_max_rate < line_speed) 753 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate; 754 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */ 755 }
718 line_speed = (line_speed * maxCfg) / 100;
719 } 756 }
720 757
721 return line_speed; 758 return line_speed;
@@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp)
959 bnx2x_free_rx_skbs(bp); 996 bnx2x_free_rx_skbs(bp);
960} 997}
961 998
999void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1000{
1001 /* load old values */
1002 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1003
1004 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1005 /* leave all but MAX value */
1006 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1007
1008 /* set new MAX value */
1009 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1010 & FUNC_MF_CFG_MAX_BW_MASK;
1011
1012 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1013 }
1014}
1015
962static void bnx2x_free_msix_irqs(struct bnx2x *bp) 1016static void bnx2x_free_msix_irqs(struct bnx2x *bp)
963{ 1017{
964 int i, offset = 1; 1018 int i, offset = 1;
@@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1427 1481
1428 bnx2x_set_eth_mac(bp, 1); 1482 bnx2x_set_eth_mac(bp, 1);
1429 1483
1484 if (bp->pending_max) {
1485 bnx2x_update_max_mf_config(bp, bp->pending_max);
1486 bp->pending_max = 0;
1487 }
1488
1430 if (bp->port.pmf) 1489 if (bp->port.pmf)
1431 bnx2x_initial_phy_init(bp, load_mode); 1490 bnx2x_initial_phy_init(bp, load_mode);
1432 1491
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 03eb4d68e6bb..85ea7f26b51f 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp);
341 */ 341 */
342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); 342int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
343 343
344/**
345 * Updates MAX part of MF configuration in HW
346 * (if required)
347 *
348 * @param bp
349 * @param value
350 */
351void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value);
352
344/* dev_close main block */ 353/* dev_close main block */
345int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); 354int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
346 355
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp,
1044void bnx2x_acquire_phy_lock(struct bnx2x *bp); 1053void bnx2x_acquire_phy_lock(struct bnx2x *bp);
1045void bnx2x_release_phy_lock(struct bnx2x *bp); 1054void bnx2x_release_phy_lock(struct bnx2x *bp);
1046 1055
1056/**
1057 * Extracts MAX BW part from MF configuration.
1058 *
1059 * @param bp
1060 * @param mf_cfg
1061 *
1062 * @return u16
1063 */
1064static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg)
1065{
1066 u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1067 FUNC_MF_CFG_MAX_BW_SHIFT;
1068 if (!max_cfg) {
1069 BNX2X_ERR("Illegal configuration detected for Max BW - "
1070 "using 100 instead\n");
1071 max_cfg = 100;
1072 }
1073 return max_cfg;
1074}
1075
1047#endif /* BNX2X_CMN_H */ 1076#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index 5b44a8b48509..7e92f9d0dcfd 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
238 speed |= (cmd->speed_hi << 16); 238 speed |= (cmd->speed_hi << 16);
239 239
240 if (IS_MF_SI(bp)) { 240 if (IS_MF_SI(bp)) {
241 u32 param = 0; 241 u32 part;
242 u32 line_speed = bp->link_vars.line_speed; 242 u32 line_speed = bp->link_vars.line_speed;
243 243
244 /* use 10G if no link detected */ 244 /* use 10G if no link detected */
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
251 REQ_BC_VER_4_SET_MF_BW); 251 REQ_BC_VER_4_SET_MF_BW);
252 return -EINVAL; 252 return -EINVAL;
253 } 253 }
254 if (line_speed < speed) { 254
255 BNX2X_DEV_INFO("New speed should be less or equal " 255 part = (speed * 100) / line_speed;
256 "to actual line speed\n"); 256
257 if (line_speed < speed || !part) {
258 BNX2X_DEV_INFO("Speed setting should be in a range "
259 "from 1%% to 100%% "
260 "of actual line speed\n");
257 return -EINVAL; 261 return -EINVAL;
258 } 262 }
259 /* load old values */
260 param = bp->mf_config[BP_VN(bp)];
261
262 /* leave only MIN value */
263 param &= FUNC_MF_CFG_MIN_BW_MASK;
264 263
265 /* set new MAX value */ 264 if (bp->state != BNX2X_STATE_OPEN)
266 param |= (((speed * 100) / line_speed) 265 /* store value for following "load" */
267 << FUNC_MF_CFG_MAX_BW_SHIFT) 266 bp->pending_max = part;
268 & FUNC_MF_CFG_MAX_BW_MASK; 267 else
268 bnx2x_update_max_mf_config(bp, part);
269 269
270 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
271 return 0; 270 return 0;
272 } 271 }
273 272
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
1781 { 0x100, 0x350 }, /* manuf_info */ 1780 { 0x100, 0x350 }, /* manuf_info */
1782 { 0x450, 0xf0 }, /* feature_info */ 1781 { 0x450, 0xf0 }, /* feature_info */
1783 { 0x640, 0x64 }, /* upgrade_key_info */ 1782 { 0x640, 0x64 }, /* upgrade_key_info */
1784 { 0x6a4, 0x64 },
1785 { 0x708, 0x70 }, /* manuf_key_info */ 1783 { 0x708, 0x70 }, /* manuf_key_info */
1786 { 0x778, 0x70 },
1787 { 0, 0 } 1784 { 0, 0 }
1788 }; 1785 };
1789 __be32 buf[0x350 / 4]; 1786 __be32 buf[0x350 / 4];
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev,
1933 buf[4] = 1; 1930 buf[4] = 1;
1934 etest->flags |= ETH_TEST_FL_FAILED; 1931 etest->flags |= ETH_TEST_FL_FAILED;
1935 } 1932 }
1936 if (bp->port.pmf) 1933
1937 if (bnx2x_link_test(bp, is_serdes) != 0) { 1934 if (bnx2x_link_test(bp, is_serdes) != 0) {
1938 buf[5] = 1; 1935 buf[5] = 1;
1939 etest->flags |= ETH_TEST_FL_FAILED; 1936 etest->flags |= ETH_TEST_FL_FAILED;
1940 } 1937 }
1941 1938
1942#ifdef BNX2X_EXTRA_DEBUG 1939#ifdef BNX2X_EXTRA_DEBUG
1943 bnx2x_panic_dump(bp); 1940 bnx2x_panic_dump(bp);
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index 5a268e9a0895..fa6dbe3f2058 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -241,7 +241,7 @@ static const struct {
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't 241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment. 242 * want to handle "system kill" flow at the moment.
243 */ 243 */
244 BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), 244 BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), 245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), 246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), 247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index d584d32c747d..aa032339e321 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
1974 vn_max_rate = 0; 1974 vn_max_rate = 0;
1975 1975
1976 } else { 1976 } else {
1977 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1978
1977 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 1979 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1978 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 1980 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1979 /* If min rate is zero - set it to 1 */ 1981 /* If fairness is enabled (not all min rates are zeroes) and
1982 if current min rate is zero - set it to 1.
1983 This is a requirement of the algorithm. */
1980 if (bp->vn_weight_sum && (vn_min_rate == 0)) 1984 if (bp->vn_weight_sum && (vn_min_rate == 0))
1981 vn_min_rate = DEF_MIN_RATE; 1985 vn_min_rate = DEF_MIN_RATE;
1982 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 1986
1983 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 1987 if (IS_MF_SI(bp))
1988 /* maxCfg in percents of linkspeed */
1989 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1990 else
1991 /* maxCfg is absolute in 100Mb units */
1992 vn_max_rate = maxCfg * 100;
1984 } 1993 }
1985 1994
1986 DP(NETIF_MSG_IFUP, 1995 DP(NETIF_MSG_IFUP,
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
2006 m_fair_vn.vn_credit_delta = 2015 m_fair_vn.vn_credit_delta =
2007 max_t(u32, (vn_min_rate * (T_FAIR_COEF / 2016 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2008 (8 * bp->vn_weight_sum))), 2017 (8 * bp->vn_weight_sum))),
2009 (bp->cmng.fair_vars.fair_threshold * 2)); 2018 (bp->cmng.fair_vars.fair_threshold +
2019 MIN_ABOVE_THRESH));
2010 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", 2020 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2011 m_fair_vn.vn_credit_delta); 2021 m_fair_vn.vn_credit_delta);
2012 } 2022 }
@@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2082 bnx2x_calc_vn_weight_sum(bp); 2092 bnx2x_calc_vn_weight_sum(bp);
2083 2093
2084 /* calculate and set min-max rate for each vn */ 2094 /* calculate and set min-max rate for each vn */
2085 for (vn = VN_0; vn < E1HVN_MAX; vn++) 2095 if (bp->port.pmf)
2086 bnx2x_init_vn_minmax(bp, vn); 2096 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2097 bnx2x_init_vn_minmax(bp, vn);
2087 2098
2088 /* always enable rate shaping and fairness */ 2099 /* always enable rate shaping and fairness */
2089 bp->cmng.flags.cmng_enables |= 2100 bp->cmng.flags.cmng_enables |=
@@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2152 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2163 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2153 } 2164 }
2154 2165
2155 /* indicate link status only if link status actually changed */
2156 if (prev_link_status != bp->link_vars.link_status)
2157 bnx2x_link_report(bp);
2158
2159 if (IS_MF(bp))
2160 bnx2x_link_sync_notify(bp);
2161
2162 if (bp->link_vars.link_up && bp->link_vars.line_speed) { 2166 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2163 int cmng_fns = bnx2x_get_cmng_fns_mode(bp); 2167 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
2164 2168
@@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2170 DP(NETIF_MSG_IFUP, 2174 DP(NETIF_MSG_IFUP,
2171 "single function mode without fairness\n"); 2175 "single function mode without fairness\n");
2172 } 2176 }
2177
2178 if (IS_MF(bp))
2179 bnx2x_link_sync_notify(bp);
2180
2181 /* indicate link status only if link status actually changed */
2182 if (prev_link_status != bp->link_vars.link_status)
2183 bnx2x_link_report(bp);
2173} 2184}
2174 2185
2175void bnx2x__link_status_update(struct bnx2x *bp) 2186void bnx2x__link_status_update(struct bnx2x *bp)
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index bda60d590fa8..3445ded6674f 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
1239 if (unlikely(bp->panic)) 1239 if (unlikely(bp->panic))
1240 return; 1240 return;
1241 1241
1242 bnx2x_stats_stm[bp->stats_state][event].action(bp);
1243
1242 /* Protect a state change flow */ 1244 /* Protect a state change flow */
1243 spin_lock_bh(&bp->stats_lock); 1245 spin_lock_bh(&bp->stats_lock);
1244 state = bp->stats_state; 1246 state = bp->stats_state;
1245 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 1247 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
1246 spin_unlock_bh(&bp->stats_lock); 1248 spin_unlock_bh(&bp->stats_lock);
1247 1249
1248 bnx2x_stats_stm[state][event].action(bp);
1249
1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) 1250 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", 1251 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
1252 state, event, bp->stats_state); 1252 state, event, bp->stats_state);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 1024ae158227..a5d5d0b5b155 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port)
281} 281}
282 282
283/** 283/**
284 * __get_rx_machine_lock - lock the port's RX machine 284 * __get_state_machine_lock - lock the port's state machines
285 * @port: the port we're looking at 285 * @port: the port we're looking at
286 * 286 *
287 */ 287 */
288static inline void __get_rx_machine_lock(struct port *port) 288static inline void __get_state_machine_lock(struct port *port)
289{ 289{
290 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 290 spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
291} 291}
292 292
293/** 293/**
294 * __release_rx_machine_lock - unlock the port's RX machine 294 * __release_state_machine_lock - unlock the port's state machines
295 * @port: the port we're looking at 295 * @port: the port we're looking at
296 * 296 *
297 */ 297 */
298static inline void __release_rx_machine_lock(struct port *port) 298static inline void __release_state_machine_lock(struct port *port)
299{ 299{
300 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 300 spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
301} 301}
302 302
303/** 303/**
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port)
388} 388}
389 389
390/** 390/**
391 * __initialize_port_locks - initialize a port's RX machine spinlock 391 * __initialize_port_locks - initialize a port's STATE machine spinlock
392 * @port: the port we're looking at 392 * @port: the port we're looking at
393 * 393 *
394 */ 394 */
395static inline void __initialize_port_locks(struct port *port) 395static inline void __initialize_port_locks(struct port *port)
396{ 396{
397 // make sure it isn't called twice 397 // make sure it isn't called twice
398 spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); 398 spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock));
399} 399}
400 400
401//conversions 401//conversions
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1025{ 1025{
1026 rx_states_t last_state; 1026 rx_states_t last_state;
1027 1027
1028 // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback)
1029 __get_rx_machine_lock(port);
1030
1031 // keep current State Machine state to compare later if it was changed 1028 // keep current State Machine state to compare later if it was changed
1032 last_state = port->sm_rx_state; 1029 last_state = port->sm_rx_state;
1033 1030
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1133 pr_err("%s: An illegal loopback occurred on adapter (%s).\n" 1130 pr_err("%s: An illegal loopback occurred on adapter (%s).\n"
1134 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", 1131 "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
1135 port->slave->dev->master->name, port->slave->dev->name); 1132 port->slave->dev->master->name, port->slave->dev->name);
1136 __release_rx_machine_lock(port);
1137 return; 1133 return;
1138 } 1134 }
1139 __update_selected(lacpdu, port); 1135 __update_selected(lacpdu, port);
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1153 break; 1149 break;
1154 } 1150 }
1155 } 1151 }
1156 __release_rx_machine_lock(port);
1157} 1152}
1158 1153
1159/** 1154/**
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2155 goto re_arm; 2150 goto re_arm;
2156 } 2151 }
2157 2152
2153 /* Lock around state machines to protect data accessed
2154 * by all (e.g., port->sm_vars). ad_rx_machine may run
2155 * concurrently due to incoming LACPDU.
2156 */
2157 __get_state_machine_lock(port);
2158
2158 ad_rx_machine(NULL, port); 2159 ad_rx_machine(NULL, port);
2159 ad_periodic_machine(port); 2160 ad_periodic_machine(port);
2160 ad_port_selection_logic(port); 2161 ad_port_selection_logic(port);
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
2164 // turn off the BEGIN bit, since we already handled it 2165 // turn off the BEGIN bit, since we already handled it
2165 if (port->sm_vars & AD_PORT_BEGIN) 2166 if (port->sm_vars & AD_PORT_BEGIN)
2166 port->sm_vars &= ~AD_PORT_BEGIN; 2167 port->sm_vars &= ~AD_PORT_BEGIN;
2168
2169 __release_state_machine_lock(port);
2167 } 2170 }
2168 2171
2169re_arm: 2172re_arm:
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u
2200 case AD_TYPE_LACPDU: 2203 case AD_TYPE_LACPDU:
2201 pr_debug("Received LACPDU on port %d\n", 2204 pr_debug("Received LACPDU on port %d\n",
2202 port->actor_port_number); 2205 port->actor_port_number);
2206 /* Protect against concurrent state machines */
2207 __get_state_machine_lock(port);
2203 ad_rx_machine(lacpdu, port); 2208 ad_rx_machine(lacpdu, port);
2209 __release_state_machine_lock(port);
2204 break; 2210 break;
2205 2211
2206 case AD_TYPE_MARKER: 2212 case AD_TYPE_MARKER:
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 2c46a154f2c6..b28baff70864 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -264,7 +264,8 @@ struct ad_bond_info {
264struct ad_slave_info { 264struct ad_slave_info {
265 struct aggregator aggregator; // 802.3ad aggregator structure 265 struct aggregator aggregator; // 802.3ad aggregator structure
266 struct port port; // 802.3ad port structure 266 struct port port; // 802.3ad port structure
267 spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt 267 spinlock_t state_machine_lock; /* mutex state machines vs.
268 incoming LACPDU */
268 u16 id; 269 u16 id;
269}; 270};
270 271
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 7ab534aee452..7513c4523ac4 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
940 goto open_unlock; 940 goto open_unlock;
941 } 941 }
942 942
943 priv->wq = create_freezeable_workqueue("mcp251x_wq"); 943 priv->wq = create_freezable_workqueue("mcp251x_wq");
944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); 944 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); 945 INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
946 946
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index 27d1d398e25e..d38706958af6 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -1,5 +1,5 @@
1config CAN_MSCAN 1config CAN_MSCAN
2 depends on CAN_DEV && (PPC || M68K || M68KNOMMU) 2 depends on CAN_DEV && (PPC || M68K)
3 tristate "Support for Freescale MSCAN based chips" 3 tristate "Support for Freescale MSCAN based chips"
4 ---help--- 4 ---help---
5 The Motorola Scalable Controller Area Network (MSCAN) definition 5 The Motorola Scalable Controller Area Network (MSCAN) definition
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig
index 8ba81b3ddd90..5de46a9a77bb 100644
--- a/drivers/net/can/softing/Kconfig
+++ b/drivers/net/can/softing/Kconfig
@@ -18,7 +18,7 @@ config CAN_SOFTING
18config CAN_SOFTING_CS 18config CAN_SOFTING_CS
19 tristate "Softing Gmbh CAN pcmcia cards" 19 tristate "Softing Gmbh CAN pcmcia cards"
20 depends on PCMCIA 20 depends on PCMCIA
21 select CAN_SOFTING 21 depends on CAN_SOFTING
22 ---help--- 22 ---help---
23 Support for PCMCIA cards from Softing Gmbh & some cards 23 Support for PCMCIA cards from Softing Gmbh & some cards
24 from Vector Gmbh. 24 from Vector Gmbh.
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c
index 5157e15e96eb..aeea9f9ff6e8 100644
--- a/drivers/net/can/softing/softing_main.c
+++ b/drivers/net/can/softing/softing_main.c
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = {
633}; 633};
634 634
635static const struct can_bittiming_const softing_btr_const = { 635static const struct can_bittiming_const softing_btr_const = {
636 .name = "softing",
636 .tseg1_min = 1, 637 .tseg1_min = 1,
637 .tseg1_max = 16, 638 .tseg1_max = 16,
638 .tseg2_min = 1, 639 .tseg2_min = 1,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 7ff170cbc7dc..302be4aa69d6 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2760 u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2761 int kcqe_cnt; 2761 int kcqe_cnt;
2762 2762
2763 /* status block index must be read before reading other fields */
2764 rmb();
2763 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2765 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2764 2766
2765 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { 2767 while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2770 barrier(); 2772 barrier();
2771 if (status_idx != *cp->kcq1.status_idx_ptr) { 2773 if (status_idx != *cp->kcq1.status_idx_ptr) {
2772 status_idx = (u16) *cp->kcq1.status_idx_ptr; 2774 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2775 /* status block index must be read first */
2776 rmb();
2773 cp->kwq_con_idx = *cp->kwq_con_idx_ptr; 2777 cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2774 } else 2778 } else
2775 break; 2779 break;
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2888 u32 last_status = *info->status_idx_ptr; 2892 u32 last_status = *info->status_idx_ptr;
2889 int kcqe_cnt; 2893 int kcqe_cnt;
2890 2894
2895 /* status block index must be read before reading the KCQ */
2896 rmb();
2891 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { 2897 while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2892 2898
2893 service_kcqes(dev, kcqe_cnt); 2899 service_kcqes(dev, kcqe_cnt);
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2898 break; 2904 break;
2899 2905
2900 last_status = *info->status_idx_ptr; 2906 last_status = *info->status_idx_ptr;
2907 /* status block index must be read before reading the KCQ */
2908 rmb();
2901 } 2909 }
2902 return last_status; 2910 return last_status;
2903} 2911}
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2906{ 2914{
2907 struct cnic_dev *dev = (struct cnic_dev *) data; 2915 struct cnic_dev *dev = (struct cnic_dev *) data;
2908 struct cnic_local *cp = dev->cnic_priv; 2916 struct cnic_local *cp = dev->cnic_priv;
2909 u32 status_idx; 2917 u32 status_idx, new_status_idx;
2910 2918
2911 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2919 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2912 return; 2920 return;
2913 2921
2914 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2922 while (1) {
2923 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2915 2924
2916 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2925 CNIC_WR16(dev, cp->kcq1.io_addr,
2926 cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2917 2927
2918 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 2928 if (!BNX2X_CHIP_IS_E2(cp->chip_id)) {
2919 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); 2929 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2930 status_idx, IGU_INT_ENABLE, 1);
2931 break;
2932 }
2933
2934 new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2935
2936 if (new_status_idx != status_idx)
2937 continue;
2920 2938
2921 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + 2939 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2922 MAX_KCQ_IDX); 2940 MAX_KCQ_IDX);
2923 2941
2924 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2942 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2925 status_idx, IGU_INT_ENABLE, 1); 2943 status_idx, IGU_INT_ENABLE, 1);
2926 } else { 2944
2927 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2945 break;
2928 status_idx, IGU_INT_ENABLE, 1);
2929 } 2946 }
2930} 2947}
2931 2948
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 56166ae2059f..6aad64df4dcb 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2040{ 2040{
2041 int i; 2041 int i;
2042 2042
2043 BUG_ON(adapter->debugfs_root == NULL); 2043 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2044 2044
2045 /* 2045 /*
2046 * Debugfs support is best effort. 2046 * Debugfs support is best effort.
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2061 */ 2061 */
2062static void cleanup_debugfs(struct adapter *adapter) 2062static void cleanup_debugfs(struct adapter *adapter)
2063{ 2063{
2064 BUG_ON(adapter->debugfs_root == NULL); 2064 BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2065 2065
2066 /* 2066 /*
2067 * Unlike our sister routine cleanup_proc(), we don't need to remove 2067 * Unlike our sister routine cleanup_proc(), we don't need to remove
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2489 struct net_device *netdev; 2489 struct net_device *netdev;
2490 2490
2491 /* 2491 /*
2492 * Vet our module parameters.
2493 */
2494 if (msi != MSI_MSIX && msi != MSI_MSI) {
2495 dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d"
2496 " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX,
2497 MSI_MSI);
2498 err = -EINVAL;
2499 goto err_out;
2500 }
2501
2502 /*
2503 * Print our driver banner the first time we're called to initialize a 2492 * Print our driver banner the first time we're called to initialize a
2504 * device. 2493 * device.
2505 */ 2494 */
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2711 /* 2700 /*
2712 * Set up our debugfs entries. 2701 * Set up our debugfs entries.
2713 */ 2702 */
2714 if (cxgb4vf_debugfs_root) { 2703 if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
2715 adapter->debugfs_root = 2704 adapter->debugfs_root =
2716 debugfs_create_dir(pci_name(pdev), 2705 debugfs_create_dir(pci_name(pdev),
2717 cxgb4vf_debugfs_root); 2706 cxgb4vf_debugfs_root);
2718 if (adapter->debugfs_root == NULL) 2707 if (IS_ERR_OR_NULL(adapter->debugfs_root))
2719 dev_warn(&pdev->dev, "could not create debugfs" 2708 dev_warn(&pdev->dev, "could not create debugfs"
2720 " directory"); 2709 " directory");
2721 else 2710 else
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2770 */ 2759 */
2771 2760
2772err_free_debugfs: 2761err_free_debugfs:
2773 if (adapter->debugfs_root) { 2762 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2774 cleanup_debugfs(adapter); 2763 cleanup_debugfs(adapter);
2775 debugfs_remove_recursive(adapter->debugfs_root); 2764 debugfs_remove_recursive(adapter->debugfs_root);
2776 } 2765 }
@@ -2802,7 +2791,6 @@ err_release_regions:
2802err_disable_device: 2791err_disable_device:
2803 pci_disable_device(pdev); 2792 pci_disable_device(pdev);
2804 2793
2805err_out:
2806 return err; 2794 return err;
2807} 2795}
2808 2796
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2840 /* 2828 /*
2841 * Tear down our debugfs entries. 2829 * Tear down our debugfs entries.
2842 */ 2830 */
2843 if (adapter->debugfs_root) { 2831 if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
2844 cleanup_debugfs(adapter); 2832 cleanup_debugfs(adapter);
2845 debugfs_remove_recursive(adapter->debugfs_root); 2833 debugfs_remove_recursive(adapter->debugfs_root);
2846 } 2834 }
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev)
2874} 2862}
2875 2863
2876/* 2864/*
2865 * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
2866 * delivery.
2867 */
2868static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev)
2869{
2870 struct adapter *adapter;
2871 int pidx;
2872
2873 adapter = pci_get_drvdata(pdev);
2874 if (!adapter)
2875 return;
2876
2877 /*
2878 * Disable all Virtual Interfaces. This will shut down the
2879 * delivery of all ingress packets into the chip for these
2880 * Virtual Interfaces.
2881 */
2882 for_each_port(adapter, pidx) {
2883 struct net_device *netdev;
2884 struct port_info *pi;
2885
2886 if (!test_bit(pidx, &adapter->registered_device_map))
2887 continue;
2888
2889 netdev = adapter->port[pidx];
2890 if (!netdev)
2891 continue;
2892
2893 pi = netdev_priv(netdev);
2894 t4vf_enable_vi(adapter, pi->viid, false, false);
2895 }
2896
2897 /*
2898 * Free up all Queues which will prevent further DMA and
2899 * Interrupts allowing various internal pathways to drain.
2900 */
2901 t4vf_free_sge_resources(adapter);
2902}
2903
2904/*
2877 * PCI Device registration data structures. 2905 * PCI Device registration data structures.
2878 */ 2906 */
2879#define CH_DEVICE(devid, idx) \ 2907#define CH_DEVICE(devid, idx) \
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = {
2906 .id_table = cxgb4vf_pci_tbl, 2934 .id_table = cxgb4vf_pci_tbl,
2907 .probe = cxgb4vf_pci_probe, 2935 .probe = cxgb4vf_pci_probe,
2908 .remove = __devexit_p(cxgb4vf_pci_remove), 2936 .remove = __devexit_p(cxgb4vf_pci_remove),
2937 .shutdown = __devexit_p(cxgb4vf_pci_shutdown),
2909}; 2938};
2910 2939
2911/* 2940/*
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void)
2915{ 2944{
2916 int ret; 2945 int ret;
2917 2946
2947 /*
2948 * Vet our module parameters.
2949 */
2950 if (msi != MSI_MSIX && msi != MSI_MSI) {
2951 printk(KERN_WARNING KBUILD_MODNAME
2952 ": bad module parameter msi=%d; must be %d"
2953 " (MSI-X or MSI) or %d (MSI)\n",
2954 msi, MSI_MSIX, MSI_MSI);
2955 return -EINVAL;
2956 }
2957
2918 /* Debugfs support is optional, just warn if this fails */ 2958 /* Debugfs support is optional, just warn if this fails */
2919 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); 2959 cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
2920 if (!cxgb4vf_debugfs_root) 2960 if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2921 printk(KERN_WARNING KBUILD_MODNAME ": could not create" 2961 printk(KERN_WARNING KBUILD_MODNAME ": could not create"
2922 " debugfs entry, continuing\n"); 2962 " debugfs entry, continuing\n");
2923 2963
2924 ret = pci_register_driver(&cxgb4vf_driver); 2964 ret = pci_register_driver(&cxgb4vf_driver);
2925 if (ret < 0) 2965 if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root))
2926 debugfs_remove(cxgb4vf_debugfs_root); 2966 debugfs_remove(cxgb4vf_debugfs_root);
2927 return ret; 2967 return ret;
2928} 2968}
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 0f51c80475ce..192db226ec7f 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
171 delay_idx = 0; 171 delay_idx = 0;
172 ms = delay[0]; 172 ms = delay[0];
173 173
174 for (i = 0; i < 500; i += ms) { 174 for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
175 if (sleep_ok) { 175 if (sleep_ok) {
176 ms = delay[delay_idx]; 176 ms = delay[delay_idx];
177 if (delay_idx < ARRAY_SIZE(delay) - 1) 177 if (delay_idx < ARRAY_SIZE(delay) - 1)
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2a628d17d178..7018bfe408a4 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status)
1008 int ret; 1008 int ret;
1009 1009
1010 /* free and bail if we are shutting down */ 1010 /* free and bail if we are shutting down */
1011 if (unlikely(!netif_running(ndev))) { 1011 if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) {
1012 dev_kfree_skb_any(skb); 1012 dev_kfree_skb_any(skb);
1013 return; 1013 return;
1014 } 1014 }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 2d4c4fc1d900..461dd6f905f7 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev)
802 /* Checksum mode */ 802 /* Checksum mode */
803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum); 803 dm9000_set_rx_csum_unlocked(dev, db->rx_csum);
804 804
805 /* GPIO0 on pre-activate PHY */
806 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
807 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 805 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
808 iow(db, DM9000_GPR, 0); /* Enable PHY */
809 806
810 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; 807 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
811 808
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev)
852 unsigned long flags; 849 unsigned long flags;
853 850
854 /* Save previous register address */ 851 /* Save previous register address */
855 reg_save = readb(db->io_addr);
856 spin_lock_irqsave(&db->lock, flags); 852 spin_lock_irqsave(&db->lock, flags);
853 reg_save = readb(db->io_addr);
857 854
858 netif_stop_queue(dev); 855 netif_stop_queue(dev);
859 dm9000_reset(db); 856 dm9000_reset(db);
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev)
1194 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) 1191 if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
1195 return -EAGAIN; 1192 return -EAGAIN;
1196 1193
1194 /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
1195 iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
1196 mdelay(1); /* delay needs by DM9000B */
1197
1197 /* Initialize DM9000 board */ 1198 /* Initialize DM9000 board */
1198 dm9000_reset(db); 1199 dm9000_reset(db);
1199 dm9000_init_dm9000(dev); 1200 dm9000_init_dm9000(dev);
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 9d8a20b72fa9..8318ea06cb6d 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp)
337 for (i = 0; i < PHY_MAX_ADDR; i++) 337 for (i = 0; i < PHY_MAX_ADDR; i++)
338 bp->mii_bus->irq[i] = PHY_POLL; 338 bp->mii_bus->irq[i] = PHY_POLL;
339 339
340 platform_set_drvdata(bp->dev, bp->mii_bus);
341
342 if (mdiobus_register(bp->mii_bus)) { 340 if (mdiobus_register(bp->mii_bus)) {
343 err = -ENXIO; 341 err = -ENXIO;
344 goto err_out_free_mdio_irq; 342 goto err_out_free_mdio_irq;
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
863 bp = netdev_priv(dev); 861 bp = netdev_priv(dev);
864 bp->dev = dev; 862 bp->dev = dev;
865 863
864 platform_set_drvdata(pdev, dev);
866 SET_NETDEV_DEV(dev, &pdev->dev); 865 SET_NETDEV_DEV(dev, &pdev->dev);
867 866
868 spin_lock_init(&bp->lock); 867 spin_lock_init(&bp->lock);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 55c1711f1688..33e7c45a4fe4 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -42,7 +42,8 @@
42#define GBE_CONFIG_RAM_BASE \ 42#define GBE_CONFIG_RAM_BASE \
43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) 43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
44 44
45#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) 45#define GBE_CONFIG_BASE_VIRT \
46 ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE))
46 47
47#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ 48#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
48 (iowrite16_rep(base + offset, data, count)) 49 (iowrite16_rep(base + offset, data, count))
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 3065870cf2a7..2e5022849f18 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work)
937 u16 phy_status, phy_1000t_status, phy_ext_status; 937 u16 phy_status, phy_1000t_status, phy_ext_status;
938 u16 pci_status; 938 u16 pci_status;
939 939
940 if (test_bit(__E1000_DOWN, &adapter->state))
941 return;
942
940 e1e_rphy(hw, PHY_STATUS, &phy_status); 943 e1e_rphy(hw, PHY_STATUS, &phy_status);
941 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); 944 e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status);
942 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); 945 e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status);
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work)
1506 struct e1000_adapter *adapter = container_of(work, 1509 struct e1000_adapter *adapter = container_of(work,
1507 struct e1000_adapter, downshift_task); 1510 struct e1000_adapter, downshift_task);
1508 1511
1512 if (test_bit(__E1000_DOWN, &adapter->state))
1513 return;
1514
1509 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); 1515 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1510} 1516}
1511 1517
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter)
3338 return 0; 3344 return 0;
3339} 3345}
3340 3346
3347static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
3348{
3349 struct e1000_hw *hw = &adapter->hw;
3350
3351 if (!(adapter->flags2 & FLAG2_DMA_BURST))
3352 return;
3353
3354 /* flush pending descriptor writebacks to memory */
3355 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
3356 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
3357
3358 /* execute the writes immediately */
3359 e1e_flush();
3360}
3361
3341void e1000e_down(struct e1000_adapter *adapter) 3362void e1000e_down(struct e1000_adapter *adapter)
3342{ 3363{
3343 struct net_device *netdev = adapter->netdev; 3364 struct net_device *netdev = adapter->netdev;
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter)
3377 3398
3378 if (!pci_channel_offline(adapter->pdev)) 3399 if (!pci_channel_offline(adapter->pdev))
3379 e1000e_reset(adapter); 3400 e1000e_reset(adapter);
3401
3402 e1000e_flush_descriptors(adapter);
3403
3380 e1000_clean_tx_ring(adapter); 3404 e1000_clean_tx_ring(adapter);
3381 e1000_clean_rx_ring(adapter); 3405 e1000_clean_rx_ring(adapter);
3382 3406
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3765{ 3789{
3766 struct e1000_adapter *adapter = container_of(work, 3790 struct e1000_adapter *adapter = container_of(work,
3767 struct e1000_adapter, update_phy_task); 3791 struct e1000_adapter, update_phy_task);
3792
3793 if (test_bit(__E1000_DOWN, &adapter->state))
3794 return;
3795
3768 e1000_get_phy_info(&adapter->hw); 3796 e1000_get_phy_info(&adapter->hw);
3769} 3797}
3770 3798
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work)
3775static void e1000_update_phy_info(unsigned long data) 3803static void e1000_update_phy_info(unsigned long data)
3776{ 3804{
3777 struct e1000_adapter *adapter = (struct e1000_adapter *) data; 3805 struct e1000_adapter *adapter = (struct e1000_adapter *) data;
3806
3807 if (test_bit(__E1000_DOWN, &adapter->state))
3808 return;
3809
3778 schedule_work(&adapter->update_phy_task); 3810 schedule_work(&adapter->update_phy_task);
3779} 3811}
3780 3812
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work)
4149 u32 link, tctl; 4181 u32 link, tctl;
4150 int tx_pending = 0; 4182 int tx_pending = 0;
4151 4183
4184 if (test_bit(__E1000_DOWN, &adapter->state))
4185 return;
4186
4152 link = e1000e_has_link(adapter); 4187 link = e1000e_has_link(adapter);
4153 if ((netif_carrier_ok(netdev)) && link) { 4188 if ((netif_carrier_ok(netdev)) && link) {
4154 /* Cancel scheduled suspend requests. */ 4189 /* Cancel scheduled suspend requests. */
@@ -4337,19 +4372,12 @@ link_up:
4337 else 4372 else
4338 ew32(ICS, E1000_ICS_RXDMT0); 4373 ew32(ICS, E1000_ICS_RXDMT0);
4339 4374
4375 /* flush pending descriptors to memory before detecting Tx hang */
4376 e1000e_flush_descriptors(adapter);
4377
4340 /* Force detection of hung controller every watchdog period */ 4378 /* Force detection of hung controller every watchdog period */
4341 adapter->detect_tx_hung = 1; 4379 adapter->detect_tx_hung = 1;
4342 4380
4343 /* flush partial descriptors to memory before detecting Tx hang */
4344 if (adapter->flags2 & FLAG2_DMA_BURST) {
4345 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4346 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4347 /*
4348 * no need to flush the writes because the timeout code does
4349 * an er32 first thing
4350 */
4351 }
4352
4353 /* 4381 /*
4354 * With 82571 controllers, LAA may be overwritten due to controller 4382 * With 82571 controllers, LAA may be overwritten due to controller
4355 * reset from the other port. Set the appropriate LAA in RAR[0] 4383 * reset from the other port. Set the appropriate LAA in RAR[0]
@@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work)
4887 struct e1000_adapter *adapter; 4915 struct e1000_adapter *adapter;
4888 adapter = container_of(work, struct e1000_adapter, reset_task); 4916 adapter = container_of(work, struct e1000_adapter, reset_task);
4889 4917
4918 /* don't run the task if already down */
4919 if (test_bit(__E1000_DOWN, &adapter->state))
4920 return;
4921
4890 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && 4922 if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4891 (adapter->flags & FLAG_RX_RESTART_NOW))) { 4923 (adapter->flags & FLAG_RX_RESTART_NOW))) {
4892 e1000e_dump(adapter); 4924 e1000e_dump(adapter);
@@ -5935,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5935 /* APME bit in EEPROM is mapped to WUC.APME */ 5967 /* APME bit in EEPROM is mapped to WUC.APME */
5936 eeprom_data = er32(WUC); 5968 eeprom_data = er32(WUC);
5937 eeprom_apme_mask = E1000_WUC_APME; 5969 eeprom_apme_mask = E1000_WUC_APME;
5938 if (eeprom_data & E1000_WUC_PHY_WAKE) 5970 if ((hw->mac.type > e1000_ich10lan) &&
5971 (eeprom_data & E1000_WUC_PHY_WAKE))
5939 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; 5972 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
5940 } else if (adapter->flags & FLAG_APME_IN_CTRL3) { 5973 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
5941 if (adapter->flags & FLAG_APME_CHECK_PORT_B && 5974 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 2a71373719ae..cd0282d5d40f 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = {
74 }, { 74 }, {
75 .name = "imx28-fec", 75 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, 76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 } 77 },
78 { }
78}; 79};
79 80
80static unsigned char macaddr[ETH_ALEN]; 81static unsigned char macaddr[ETH_ALEN];
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index af09296ef0dd..9c0b1bac6af6 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5645 goto out_error; 5645 goto out_error;
5646 } 5646 }
5647 5647
5648 netif_carrier_off(dev);
5649
5648 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", 5650 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5649 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); 5651 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5650 5652
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index 74486a8b009a..af3822f9ea9a 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr)
220 * The parameter rar_count will usually be hw->mac.rar_entry_count 220 * The parameter rar_count will usually be hw->mac.rar_entry_count
221 * unless there are workarounds that change this. 221 * unless there are workarounds that change this.
222 **/ 222 **/
223void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, 223static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw,
224 u8 *mc_addr_list, u32 mc_addr_count, 224 u8 *mc_addr_list, u32 mc_addr_count,
225 u32 rar_used_count, u32 rar_count) 225 u32 rar_used_count, u32 rar_count)
226{ 226{
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 8753980668c7..c54a88274d51 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
159 struct scatterlist *sg; 159 struct scatterlist *sg;
160 unsigned int i, j, dmacount; 160 unsigned int i, j, dmacount;
161 unsigned int len; 161 unsigned int len;
162 static const unsigned int bufflen = 4096; 162 static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
163 unsigned int firstoff = 0; 163 unsigned int firstoff = 0;
164 unsigned int lastsize; 164 unsigned int lastsize;
165 unsigned int thisoff = 0; 165 unsigned int thisoff = 0;
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
254 /* only the last buffer may have non-full bufflen */ 254 /* only the last buffer may have non-full bufflen */
255 lastsize = thisoff + thislen; 255 lastsize = thisoff + thislen;
256 256
257 /*
258 * lastsize can not be buffer len.
259 * If it is then adding another buffer with lastsize = 1.
260 */
261 if (lastsize == bufflen) {
262 if (j >= IXGBE_BUFFCNT_MAX) {
263 e_err(drv, "xid=%x:%d,%d,%d:addr=%llx "
264 "not enough user buffers. We need an extra "
265 "buffer because lastsize is bufflen.\n",
266 xid, i, j, dmacount, (u64)addr);
267 goto out_noddp_free;
268 }
269
270 ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
271 j++;
272 lastsize = 1;
273 }
274
257 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); 275 fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
258 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); 276 fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
259 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); 277 fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
532 e_err(drv, "failed to allocated FCoE DDP pool\n"); 550 e_err(drv, "failed to allocated FCoE DDP pool\n");
533 551
534 spin_lock_init(&fcoe->lock); 552 spin_lock_init(&fcoe->lock);
553
554 /* Extra buffer to be shared by all DDPs for HW work around */
555 fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC);
556 if (fcoe->extra_ddp_buffer == NULL) {
557 e_err(drv, "failed to allocated extra DDP buffer\n");
558 goto out_extra_ddp_buffer_alloc;
559 }
560
561 fcoe->extra_ddp_buffer_dma =
562 dma_map_single(&adapter->pdev->dev,
563 fcoe->extra_ddp_buffer,
564 IXGBE_FCBUFF_MIN,
565 DMA_FROM_DEVICE);
566 if (dma_mapping_error(&adapter->pdev->dev,
567 fcoe->extra_ddp_buffer_dma)) {
568 e_err(drv, "failed to map extra DDP buffer\n");
569 goto out_extra_ddp_buffer_dma;
570 }
535 } 571 }
536 572
537 /* Enable L2 eth type filter for FCoE */ 573 /* Enable L2 eth type filter for FCoE */
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
581 } 617 }
582 } 618 }
583#endif 619#endif
620
621 return;
622
623out_extra_ddp_buffer_dma:
624 kfree(fcoe->extra_ddp_buffer);
625out_extra_ddp_buffer_alloc:
626 pci_pool_destroy(fcoe->pool);
627 fcoe->pool = NULL;
584} 628}
585 629
586/** 630/**
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter)
600 if (fcoe->pool) { 644 if (fcoe->pool) {
601 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) 645 for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++)
602 ixgbe_fcoe_ddp_put(adapter->netdev, i); 646 ixgbe_fcoe_ddp_put(adapter->netdev, i);
647 dma_unmap_single(&adapter->pdev->dev,
648 fcoe->extra_ddp_buffer_dma,
649 IXGBE_FCBUFF_MIN,
650 DMA_FROM_DEVICE);
651 kfree(fcoe->extra_ddp_buffer);
603 pci_pool_destroy(fcoe->pool); 652 pci_pool_destroy(fcoe->pool);
604 fcoe->pool = NULL; 653 fcoe->pool = NULL;
605 } 654 }
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h
index 4bc2c551c8db..65cc8fb14fe7 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ixgbe/ixgbe_fcoe.h
@@ -70,6 +70,8 @@ struct ixgbe_fcoe {
70 spinlock_t lock; 70 spinlock_t lock;
71 struct pci_pool *pool; 71 struct pci_pool *pool;
72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; 72 struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX];
73 unsigned char *extra_ddp_buffer;
74 dma_addr_t extra_ddp_buffer_dma;
73}; 75};
74 76
75#endif /* _IXGBE_FCOE_H */ 77#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index fbae703b46d7..30f9ccfb4f87 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter)
3728 * We need to try and force an autonegotiation 3728 * We need to try and force an autonegotiation
3729 * session, then bring up link. 3729 * session, then bring up link.
3730 */ 3730 */
3731 hw->mac.ops.setup_sfp(hw); 3731 if (hw->mac.ops.setup_sfp)
3732 hw->mac.ops.setup_sfp(hw);
3732 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 3733 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
3733 schedule_work(&adapter->multispeed_fiber_task); 3734 schedule_work(&adapter->multispeed_fiber_task);
3734 } else { 3735 } else {
@@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work)
5968 unregister_netdev(adapter->netdev); 5969 unregister_netdev(adapter->netdev);
5969 return; 5970 return;
5970 } 5971 }
5971 hw->mac.ops.setup_sfp(hw); 5972 if (hw->mac.ops.setup_sfp)
5973 hw->mac.ops.setup_sfp(hw);
5972 5974
5973 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) 5975 if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK))
5974 /* This will also work for DA Twinax connections */ 5976 /* This will also work for DA Twinax connections */
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index f69e73e2191e..79ccb54ab00c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp)
260 for (i = 0; i < PHY_MAX_ADDR; i++) 260 for (i = 0; i < PHY_MAX_ADDR; i++)
261 bp->mii_bus->irq[i] = PHY_POLL; 261 bp->mii_bus->irq[i] = PHY_POLL;
262 262
263 platform_set_drvdata(bp->dev, bp->mii_bus); 263 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
264 264
265 if (mdiobus_register(bp->mii_bus)) 265 if (mdiobus_register(bp->mii_bus))
266 goto err_out_free_mdio_irq; 266 goto err_out_free_mdio_irq;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5933621ac3ff..fc27a9926d9e 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q,
528 vnet_hdr_len = q->vnet_hdr_sz; 528 vnet_hdr_len = q->vnet_hdr_sz;
529 529
530 err = -EINVAL; 530 err = -EINVAL;
531 if ((len -= vnet_hdr_len) < 0) 531 if (len < vnet_hdr_len)
532 goto err; 532 goto err;
533 len -= vnet_hdr_len;
533 534
534 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, 535 err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
535 sizeof(vnet_hdr)); 536 sizeof(vnet_hdr));
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h
index a0c26a99520f..e1e33c80fb25 100644
--- a/drivers/net/pch_gbe/pch_gbe.h
+++ b/drivers/net/pch_gbe/pch_gbe.h
@@ -73,7 +73,7 @@ struct pch_gbe_regs {
73 struct pch_gbe_regs_mac_adr mac_adr[16]; 73 struct pch_gbe_regs_mac_adr mac_adr[16];
74 u32 ADDR_MASK; 74 u32 ADDR_MASK;
75 u32 MIIM; 75 u32 MIIM;
76 u32 reserve2; 76 u32 MAC_ADDR_LOAD;
77 u32 RGMII_ST; 77 u32 RGMII_ST;
78 u32 RGMII_CTRL; 78 u32 RGMII_CTRL;
79 u32 reserve3[3]; 79 u32 reserve3[3];
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 4c9a7d4f3fca..b99e90aca37d 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION;
29#define PCH_GBE_SHORT_PKT 64 29#define PCH_GBE_SHORT_PKT 64
30#define DSC_INIT16 0xC000 30#define DSC_INIT16 0xC000
31#define PCH_GBE_DMA_ALIGN 0 31#define PCH_GBE_DMA_ALIGN 0
32#define PCH_GBE_DMA_PADDING 2
32#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ 33#define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */
33#define PCH_GBE_COPYBREAK_DEFAULT 256 34#define PCH_GBE_COPYBREAK_DEFAULT 256
34#define PCH_GBE_PCI_BAR 1 35#define PCH_GBE_PCI_BAR 1
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT;
88static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); 89static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg);
89static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, 90static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg,
90 int data); 91 int data);
92
93inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw)
94{
95 iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD);
96}
97
91/** 98/**
92 * pch_gbe_mac_read_mac_addr - Read MAC address 99 * pch_gbe_mac_read_mac_addr - Read MAC address
93 * @hw: Pointer to the HW structure 100 * @hw: Pointer to the HW structure
@@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1365 struct pch_gbe_buffer *buffer_info; 1372 struct pch_gbe_buffer *buffer_info;
1366 struct pch_gbe_rx_desc *rx_desc; 1373 struct pch_gbe_rx_desc *rx_desc;
1367 u32 length; 1374 u32 length;
1368 unsigned char tmp_packet[ETH_HLEN];
1369 unsigned int i; 1375 unsigned int i;
1370 unsigned int cleaned_count = 0; 1376 unsigned int cleaned_count = 0;
1371 bool cleaned = false; 1377 bool cleaned = false;
1372 struct sk_buff *skb; 1378 struct sk_buff *skb, *new_skb;
1373 u8 dma_status; 1379 u8 dma_status;
1374 u16 gbec_status; 1380 u16 gbec_status;
1375 u32 tcp_ip_status; 1381 u32 tcp_ip_status;
1376 u8 skb_copy_flag = 0;
1377 u8 skb_padding_flag = 0;
1378 1382
1379 i = rx_ring->next_to_clean; 1383 i = rx_ring->next_to_clean;
1380 1384
@@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter,
1418 pr_err("Receive CRC Error\n"); 1422 pr_err("Receive CRC Error\n");
1419 } else { 1423 } else {
1420 /* get receive length */ 1424 /* get receive length */
1421 /* length convert[-3], padding[-2] */ 1425 /* length convert[-3] */
1422 length = (rx_desc->rx_words_eob) - 3 - 2; 1426 length = (rx_desc->rx_words_eob) - 3;
1423 1427
1424 /* Decide the data conversion method */ 1428 /* Decide the data conversion method */
1425 if (!adapter->rx_csum) { 1429 if (!adapter->rx_csum) {
1426 /* [Header:14][payload] */ 1430 /* [Header:14][payload] */
1427 skb_padding_flag = 0; 1431 if (NET_IP_ALIGN) {
1428 skb_copy_flag = 1; 1432 /* Because alignment differs,
1433 * the new_skb is newly allocated,
1434 * and data is copied to new_skb.*/
1435 new_skb = netdev_alloc_skb(netdev,
1436 length + NET_IP_ALIGN);
1437 if (!new_skb) {
1438 /* dorrop error */
1439 pr_err("New skb allocation "
1440 "Error\n");
1441 goto dorrop;
1442 }
1443 skb_reserve(new_skb, NET_IP_ALIGN);
1444 memcpy(new_skb->data, skb->data,
1445 length);
1446 skb = new_skb;
1447 } else {
1448 /* DMA buffer is used as SKB as it is.*/
1449 buffer_info->skb = NULL;
1450 }
1429 } else { 1451 } else {
1430 /* [Header:14][padding:2][payload] */ 1452 /* [Header:14][padding:2][payload] */
1431 skb_padding_flag = 1; 1453 /* The length includes padding length */
1432 if (length < copybreak) 1454 length = length - PCH_GBE_DMA_PADDING;
1433 skb_copy_flag = 1; 1455 if ((length < copybreak) ||
1434 else 1456 (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) {
1435 skb_copy_flag = 0; 1457 /* Because alignment differs,
1436 } 1458 * the new_skb is newly allocated,
1437 1459 * and data is copied to new_skb.
1438 /* Data conversion */ 1460 * Padding data is deleted
1439 if (skb_copy_flag) { /* recycle skb */ 1461 * at the time of a copy.*/
1440 struct sk_buff *new_skb; 1462 new_skb = netdev_alloc_skb(netdev,
1441 new_skb = 1463 length + NET_IP_ALIGN);
1442 netdev_alloc_skb(netdev, 1464 if (!new_skb) {
1443 length + NET_IP_ALIGN); 1465 /* dorrop error */
1444 if (new_skb) { 1466 pr_err("New skb allocation "
1445 if (!skb_padding_flag) { 1467 "Error\n");
1446 skb_reserve(new_skb, 1468 goto dorrop;
1447 NET_IP_ALIGN);
1448 } 1469 }
1470 skb_reserve(new_skb, NET_IP_ALIGN);
1449 memcpy(new_skb->data, skb->data, 1471 memcpy(new_skb->data, skb->data,
1450 length); 1472 ETH_HLEN);
1451 /* save the skb 1473 memcpy(&new_skb->data[ETH_HLEN],
1452 * in buffer_info as good */ 1474 &skb->data[ETH_HLEN +
1475 PCH_GBE_DMA_PADDING],
1476 length - ETH_HLEN);
1453 skb = new_skb; 1477 skb = new_skb;
1454 } else if (!skb_padding_flag) { 1478 } else {
1455 /* dorrop error */ 1479 /* Padding data is deleted
1456 pr_err("New skb allocation Error\n"); 1480 * by moving header data.*/
1457 goto dorrop; 1481 memmove(&skb->data[PCH_GBE_DMA_PADDING],
1482 &skb->data[0], ETH_HLEN);
1483 skb_reserve(skb, NET_IP_ALIGN);
1484 buffer_info->skb = NULL;
1458 } 1485 }
1459 } else {
1460 buffer_info->skb = NULL;
1461 } 1486 }
1462 if (skb_padding_flag) { 1487 /* The length includes FCS length */
1463 memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); 1488 length = length - ETH_FCS_LEN;
1464 memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0],
1465 ETH_HLEN);
1466 skb_reserve(skb, NET_IP_ALIGN);
1467
1468 }
1469
1470 /* update status of driver */ 1489 /* update status of driver */
1471 adapter->stats.rx_bytes += length; 1490 adapter->stats.rx_bytes += length;
1472 adapter->stats.rx_packets++; 1491 adapter->stats.rx_packets++;
@@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2318 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; 2337 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2319 pch_gbe_set_ethtool_ops(netdev); 2338 pch_gbe_set_ethtool_ops(netdev);
2320 2339
2340 pch_gbe_mac_load_mac_addr(&adapter->hw);
2321 pch_gbe_mac_reset_hw(&adapter->hw); 2341 pch_gbe_mac_reset_hw(&adapter->hw);
2322 2342
2323 /* setup the private structure */ 2343 /* setup the private structure */
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 9226cda4d054..530ab5a10bd3 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), 692 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01),
693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), 693 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05),
694 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), 695 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
695 PCMCIA_DEVICE_NULL, 696 PCMCIA_DEVICE_NULL,
696}; 697};
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 27e6f6d43cac..e3ebd90ae651 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -49,8 +49,8 @@
49#include <asm/processor.h> 49#include <asm/processor.h>
50 50
51#define DRV_NAME "r6040" 51#define DRV_NAME "r6040"
52#define DRV_VERSION "0.26" 52#define DRV_VERSION "0.27"
53#define DRV_RELDATE "30May2010" 53#define DRV_RELDATE "23Feb2011"
54 54
55/* PHY CHIP Address */ 55/* PHY CHIP Address */
56#define PHY1_ADDR 1 /* For MAC1 */ 56#define PHY1_ADDR 1 /* For MAC1 */
@@ -69,6 +69,8 @@
69 69
70/* MAC registers */ 70/* MAC registers */
71#define MCR0 0x00 /* Control register 0 */ 71#define MCR0 0x00 /* Control register 0 */
72#define MCR0_PROMISC 0x0020 /* Promiscuous mode */
73#define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */
72#define MCR1 0x04 /* Control register 1 */ 74#define MCR1 0x04 /* Control register 1 */
73#define MAC_RST 0x0001 /* Reset the MAC */ 75#define MAC_RST 0x0001 /* Reset the MAC */
74#define MBCR 0x08 /* Bus control */ 76#define MBCR 0x08 /* Bus control */
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev)
851{ 853{
852 struct r6040_private *lp = netdev_priv(dev); 854 struct r6040_private *lp = netdev_priv(dev);
853 void __iomem *ioaddr = lp->base; 855 void __iomem *ioaddr = lp->base;
854 u16 *adrp;
855 u16 reg;
856 unsigned long flags; 856 unsigned long flags;
857 struct netdev_hw_addr *ha; 857 struct netdev_hw_addr *ha;
858 int i; 858 int i;
859 u16 *adrp;
860 u16 hash_table[4] = { 0 };
861
862 spin_lock_irqsave(&lp->lock, flags);
859 863
860 /* MAC Address */ 864 /* Keep our MAC Address */
861 adrp = (u16 *)dev->dev_addr; 865 adrp = (u16 *)dev->dev_addr;
862 iowrite16(adrp[0], ioaddr + MID_0L); 866 iowrite16(adrp[0], ioaddr + MID_0L);
863 iowrite16(adrp[1], ioaddr + MID_0M); 867 iowrite16(adrp[1], ioaddr + MID_0M);
864 iowrite16(adrp[2], ioaddr + MID_0H); 868 iowrite16(adrp[2], ioaddr + MID_0H);
865 869
866 /* Promiscous Mode */
867 spin_lock_irqsave(&lp->lock, flags);
868
869 /* Clear AMCP & PROM bits */ 870 /* Clear AMCP & PROM bits */
870 reg = ioread16(ioaddr) & ~0x0120; 871 lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN);
871 if (dev->flags & IFF_PROMISC) {
872 reg |= 0x0020;
873 lp->mcr0 |= 0x0020;
874 }
875 /* Too many multicast addresses
876 * accept all traffic */
877 else if ((netdev_mc_count(dev) > MCAST_MAX) ||
878 (dev->flags & IFF_ALLMULTI))
879 reg |= 0x0020;
880 872
881 iowrite16(reg, ioaddr); 873 /* Promiscuous mode */
882 spin_unlock_irqrestore(&lp->lock, flags); 874 if (dev->flags & IFF_PROMISC)
875 lp->mcr0 |= MCR0_PROMISC;
883 876
884 /* Build the hash table */ 877 /* Enable multicast hash table function to
885 if (netdev_mc_count(dev) > MCAST_MAX) { 878 * receive all multicast packets. */
886 u16 hash_table[4]; 879 else if (dev->flags & IFF_ALLMULTI) {
887 u32 crc; 880 lp->mcr0 |= MCR0_HASH_EN;
888 881
889 for (i = 0; i < 4; i++) 882 for (i = 0; i < MCAST_MAX ; i++) {
890 hash_table[i] = 0; 883 iowrite16(0, ioaddr + MID_1L + 8 * i);
884 iowrite16(0, ioaddr + MID_1M + 8 * i);
885 iowrite16(0, ioaddr + MID_1H + 8 * i);
886 }
891 887
888 for (i = 0; i < 4; i++)
889 hash_table[i] = 0xffff;
890 }
891 /* Use internal multicast address registers if the number of
892 * multicast addresses is not greater than MCAST_MAX. */
893 else if (netdev_mc_count(dev) <= MCAST_MAX) {
894 i = 0;
892 netdev_for_each_mc_addr(ha, dev) { 895 netdev_for_each_mc_addr(ha, dev) {
893 char *addrs = ha->addr; 896 u16 *adrp = (u16 *) ha->addr;
897 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
898 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
899 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
900 i++;
901 }
902 while (i < MCAST_MAX) {
903 iowrite16(0, ioaddr + MID_1L + 8 * i);
904 iowrite16(0, ioaddr + MID_1M + 8 * i);
905 iowrite16(0, ioaddr + MID_1H + 8 * i);
906 i++;
907 }
908 }
909 /* Otherwise, Enable multicast hash table function. */
910 else {
911 u32 crc;
894 912
895 if (!(*addrs & 1)) 913 lp->mcr0 |= MCR0_HASH_EN;
896 continue; 914
915 for (i = 0; i < MCAST_MAX ; i++) {
916 iowrite16(0, ioaddr + MID_1L + 8 * i);
917 iowrite16(0, ioaddr + MID_1M + 8 * i);
918 iowrite16(0, ioaddr + MID_1H + 8 * i);
919 }
897 920
898 crc = ether_crc_le(6, addrs); 921 /* Build multicast hash table */
922 netdev_for_each_mc_addr(ha, dev) {
923 u8 *addrs = ha->addr;
924
925 crc = ether_crc(ETH_ALEN, addrs);
899 crc >>= 26; 926 crc >>= 26;
900 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 927 hash_table[crc >> 4] |= 1 << (crc & 0xf);
901 } 928 }
902 /* Fill the MAC hash tables with their values */ 929 }
930
931 iowrite16(lp->mcr0, ioaddr + MCR0);
932
933 /* Fill the MAC hash tables with their values */
934 if (lp->mcr0 && MCR0_HASH_EN) {
903 iowrite16(hash_table[0], ioaddr + MAR0); 935 iowrite16(hash_table[0], ioaddr + MAR0);
904 iowrite16(hash_table[1], ioaddr + MAR1); 936 iowrite16(hash_table[1], ioaddr + MAR1);
905 iowrite16(hash_table[2], ioaddr + MAR2); 937 iowrite16(hash_table[2], ioaddr + MAR2);
906 iowrite16(hash_table[3], ioaddr + MAR3); 938 iowrite16(hash_table[3], ioaddr + MAR3);
907 } 939 }
908 /* Multicast Address 1~4 case */ 940
909 i = 0; 941 spin_unlock_irqrestore(&lp->lock, flags);
910 netdev_for_each_mc_addr(ha, dev) {
911 if (i >= MCAST_MAX)
912 break;
913 adrp = (u16 *) ha->addr;
914 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
915 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
916 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
917 i++;
918 }
919 while (i < MCAST_MAX) {
920 iowrite16(0xffff, ioaddr + MID_1L + 8 * i);
921 iowrite16(0xffff, ioaddr + MID_1M + 8 * i);
922 iowrite16(0xffff, ioaddr + MID_1H + 8 * i);
923 i++;
924 }
925} 942}
926 943
927static void netdev_get_drvinfo(struct net_device *dev, 944static void netdev_get_drvinfo(struct net_device *dev,
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 59ccf0c5c610..7ffdb80adf40 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -25,6 +25,7 @@
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/firmware.h> 27#include <linux/firmware.h>
28#include <linux/pci-aspm.h>
28 29
29#include <asm/system.h> 30#include <asm/system.h>
30#include <asm/io.h> 31#include <asm/io.h>
@@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
617 } 618 }
618} 619}
619 620
620static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) 621static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
621{ 622{
623 void __iomem *ioaddr = tp->mmio_addr;
622 int i; 624 int i;
623 625
624 RTL_W8(ERIDR, cmd); 626 RTL_W8(ERIDR, cmd);
@@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
630 break; 632 break;
631 } 633 }
632 634
633 ocp_write(ioaddr, 0x1, 0x30, 0x00000001); 635 ocp_write(tp, 0x1, 0x30, 0x00000001);
634} 636}
635 637
636#define OOB_CMD_RESET 0x00 638#define OOB_CMD_RESET 0x00
@@ -2868,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2868{ 2870{
2869 void __iomem *ioaddr = tp->mmio_addr; 2871 void __iomem *ioaddr = tp->mmio_addr;
2870 2872
2871 if (tp->mac_version == RTL_GIGA_MAC_VER_27) 2873 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2874 (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
2875 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
2872 return; 2876 return;
2877 }
2873 2878
2874 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || 2879 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
2875 (tp->mac_version == RTL_GIGA_MAC_VER_24)) && 2880 (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
@@ -2891,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
2891 switch (tp->mac_version) { 2896 switch (tp->mac_version) {
2892 case RTL_GIGA_MAC_VER_25: 2897 case RTL_GIGA_MAC_VER_25:
2893 case RTL_GIGA_MAC_VER_26: 2898 case RTL_GIGA_MAC_VER_26:
2899 case RTL_GIGA_MAC_VER_27:
2900 case RTL_GIGA_MAC_VER_28:
2894 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); 2901 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
2895 break; 2902 break;
2896 } 2903 }
@@ -2900,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
2900{ 2907{
2901 void __iomem *ioaddr = tp->mmio_addr; 2908 void __iomem *ioaddr = tp->mmio_addr;
2902 2909
2903 if (tp->mac_version == RTL_GIGA_MAC_VER_27) 2910 if (((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
2911 (tp->mac_version == RTL_GIGA_MAC_VER_28)) &&
2912 (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) {
2904 return; 2913 return;
2914 }
2905 2915
2906 switch (tp->mac_version) { 2916 switch (tp->mac_version) {
2907 case RTL_GIGA_MAC_VER_25: 2917 case RTL_GIGA_MAC_VER_25:
2908 case RTL_GIGA_MAC_VER_26: 2918 case RTL_GIGA_MAC_VER_26:
2919 case RTL_GIGA_MAC_VER_27:
2920 case RTL_GIGA_MAC_VER_28:
2909 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); 2921 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
2910 break; 2922 break;
2911 } 2923 }
@@ -3009,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3009 mii->reg_num_mask = 0x1f; 3021 mii->reg_num_mask = 0x1f;
3010 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); 3022 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
3011 3023
3024 /* disable ASPM completely as that cause random device stop working
3025 * problems as well as full system hangs for some PCIe devices users */
3026 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
3027 PCIE_LINK_STATE_CLKPM);
3028
3012 /* enable device (incl. PCI PM wakeup and hotplug setup) */ 3029 /* enable device (incl. PCI PM wakeup and hotplug setup) */
3013 rc = pci_enable_device(pdev); 3030 rc = pci_enable_device(pdev);
3014 if (rc < 0) { 3031 if (rc < 0) {
@@ -3042,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3042 goto err_out_mwi_2; 3059 goto err_out_mwi_2;
3043 } 3060 }
3044 3061
3045 tp->cp_cmd = PCIMulRW | RxChkSum; 3062 tp->cp_cmd = RxChkSum;
3046 3063
3047 if ((sizeof(dma_addr_t) > 4) && 3064 if ((sizeof(dma_addr_t) > 4) &&
3048 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { 3065 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
@@ -3190,6 +3207,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3190 if (pci_dev_run_wake(pdev)) 3207 if (pci_dev_run_wake(pdev))
3191 pm_runtime_put_noidle(&pdev->dev); 3208 pm_runtime_put_noidle(&pdev->dev);
3192 3209
3210 netif_carrier_off(dev);
3211
3193out: 3212out:
3194 return rc; 3213 return rc;
3195 3214
@@ -3316,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
3316 /* Disable interrupts */ 3335 /* Disable interrupts */
3317 rtl8169_irq_mask_and_ack(ioaddr); 3336 rtl8169_irq_mask_and_ack(ioaddr);
3318 3337
3319 if (tp->mac_version == RTL_GIGA_MAC_VER_28) { 3338 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3339 tp->mac_version == RTL_GIGA_MAC_VER_28) {
3320 while (RTL_R8(TxPoll) & NPQ) 3340 while (RTL_R8(TxPoll) & NPQ)
3321 udelay(20); 3341 udelay(20);
3322 3342
@@ -3845,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
3845 Cxpl_dbg_sel | \ 3865 Cxpl_dbg_sel | \
3846 ASF | \ 3866 ASF | \
3847 PktCntrDisable | \ 3867 PktCntrDisable | \
3848 PCIDAC | \ 3868 Mac_dbgo_sel)
3849 PCIMulRW)
3850 3869
3851static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) 3870static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3852{ 3871{
@@ -3876,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3876 if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) 3895 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
3877 RTL_W8(Config1, cfg1 & ~LEDS0); 3896 RTL_W8(Config1, cfg1 & ~LEDS0);
3878 3897
3879 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3880
3881 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); 3898 rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
3882} 3899}
3883 3900
@@ -3889,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3889 3906
3890 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); 3907 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
3891 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 3908 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3892
3893 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK);
3894} 3909}
3895 3910
3896static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) 3911static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev)
@@ -3916,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev)
3916 } 3931 }
3917 } 3932 }
3918 3933
3934 RTL_W8(Cfg9346, Cfg9346_Unlock);
3935
3919 switch (tp->mac_version) { 3936 switch (tp->mac_version) {
3920 case RTL_GIGA_MAC_VER_07: 3937 case RTL_GIGA_MAC_VER_07:
3921 rtl_hw_start_8102e_1(ioaddr, pdev); 3938 rtl_hw_start_8102e_1(ioaddr, pdev);
@@ -3930,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev)
3930 break; 3947 break;
3931 } 3948 }
3932 3949
3933 RTL_W8(Cfg9346, Cfg9346_Unlock); 3950 RTL_W8(Cfg9346, Cfg9346_Lock);
3934 3951
3935 RTL_W8(MaxTxPacketSize, TxPacketMax); 3952 RTL_W8(MaxTxPacketSize, TxPacketMax);
3936 3953
3937 rtl_set_rx_max_size(ioaddr, rx_buf_sz); 3954 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3938 3955
3939 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; 3956 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
3940
3941 RTL_W16(CPlusCmd, tp->cp_cmd); 3957 RTL_W16(CPlusCmd, tp->cp_cmd);
3942 3958
3943 RTL_W16(IntrMitigate, 0x0000); 3959 RTL_W16(IntrMitigate, 0x0000);
@@ -3947,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev)
3947 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 3963 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3948 rtl_set_rx_tx_config_registers(tp); 3964 rtl_set_rx_tx_config_registers(tp);
3949 3965
3950 RTL_W8(Cfg9346, Cfg9346_Lock);
3951
3952 RTL_R8(IntrMask); 3966 RTL_R8(IntrMask);
3953 3967
3954 rtl_set_rx_mode(dev); 3968 rtl_set_rx_mode(dev);
3955 3969
3956 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3957
3958 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); 3970 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
3959 3971
3960 RTL_W16(IntrMask, tp->intr_event); 3972 RTL_W16(IntrMask, tp->intr_event);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 0e8bb19ed60d..ca886d98bdc7 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
569 struct ethtool_test *test, u64 *data) 569 struct ethtool_test *test, u64 *data)
570{ 570{
571 struct efx_nic *efx = netdev_priv(net_dev); 571 struct efx_nic *efx = netdev_priv(net_dev);
572 struct efx_self_tests efx_tests; 572 struct efx_self_tests *efx_tests;
573 int already_up; 573 int already_up;
574 int rc; 574 int rc = -ENOMEM;
575
576 efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
577 if (!efx_tests)
578 goto fail;
579
575 580
576 ASSERT_RTNL(); 581 ASSERT_RTNL();
577 if (efx->state != STATE_RUNNING) { 582 if (efx->state != STATE_RUNNING) {
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
589 if (rc) { 594 if (rc) {
590 netif_err(efx, drv, efx->net_dev, 595 netif_err(efx, drv, efx->net_dev,
591 "failed opening device.\n"); 596 "failed opening device.\n");
592 goto fail2; 597 goto fail1;
593 } 598 }
594 } 599 }
595 600
596 memset(&efx_tests, 0, sizeof(efx_tests)); 601 rc = efx_selftest(efx, efx_tests, test->flags);
597
598 rc = efx_selftest(efx, &efx_tests, test->flags);
599 602
600 if (!already_up) 603 if (!already_up)
601 dev_close(efx->net_dev); 604 dev_close(efx->net_dev);
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
604 rc == 0 ? "passed" : "failed", 607 rc == 0 ? "passed" : "failed",
605 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 608 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
606 609
607 fail2: 610fail1:
608 fail1:
609 /* Fill ethtool results structures */ 611 /* Fill ethtool results structures */
610 efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); 612 efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
613 kfree(efx_tests);
614fail:
611 if (rc) 615 if (rc)
612 test->flags |= ETH_TEST_FL_FAILED; 616 test->flags |= ETH_TEST_FL_FAILED;
613} 617}
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 42daf98ba736..35b28f42d208 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); 3856 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN);
3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 3857 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
3858 3858
3859 /* device is off until link detection */
3860 netif_carrier_off(dev);
3861
3862 return dev; 3859 return dev;
3863} 3860}
3864 3861
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 64bfdae5956f..d70bde95460b 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev)
1178 smsc911x_reg_write(pdata, HW_CFG, 0x00050000); 1178 smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
1179 smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); 1179 smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740);
1180 1180
1181 /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */
1182 spin_lock_irq(&pdata->mac_lock);
1183 smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q);
1184 spin_unlock_irq(&pdata->mac_lock);
1185
1181 /* Make sure EEPROM has finished loading before setting GPIO_CFG */ 1186 /* Make sure EEPROM has finished loading before setting GPIO_CFG */
1182 timeout = 50; 1187 timeout = 50;
1183 while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && 1188 while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) &&
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 34a0af3837f9..0e5f03135b50 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1560 1560
1561 priv->hw = device; 1561 priv->hw = device;
1562 1562
1563 if (device_can_wakeup(priv->device)) 1563 if (device_can_wakeup(priv->device)) {
1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ 1564 priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */
1565 enable_irq_wake(dev->irq);
1566 }
1565 1567
1566 return 0; 1568 return 0;
1567} 1569}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 93b32d366611..06c0e5033656 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11158 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11159 break; /* We have no PHY */ 11159 break; /* We have no PHY */
11160 11160
11161 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11161 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11162 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11163 !netif_running(dev)))
11162 return -EAGAIN; 11164 return -EAGAIN;
11163 11165
11164 spin_lock_bh(&tp->lock); 11166 spin_lock_bh(&tp->lock);
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11174 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) 11176 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11175 break; /* We have no PHY */ 11177 break; /* We have no PHY */
11176 11178
11177 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11179 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) ||
11180 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
11181 !netif_running(dev)))
11178 return -EAGAIN; 11182 return -EAGAIN;
11179 11183
11180 spin_lock_bh(&tp->lock); 11184 spin_lock_bh(&tp->lock);
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 02b622e3b9fb..5002f5be47be 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = {
651 .driver_info = (unsigned long)&dm9601_info, 651 .driver_info = (unsigned long)&dm9601_info,
652 }, 652 },
653 { 653 {
654 USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */
655 .driver_info = (unsigned long)&dm9601_info,
656 },
657 {
654 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ 658 USB_DEVICE(0x0a46, 0x9000), /* DM9000E */
655 .driver_info = (unsigned long)&dm9601_info, 659 .driver_info = (unsigned long)&dm9601_info,
656 }, 660 },
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index bed8fcedff49..6d83812603b6 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -2628,15 +2628,15 @@ exit:
2628 2628
2629static void hso_free_tiomget(struct hso_serial *serial) 2629static void hso_free_tiomget(struct hso_serial *serial)
2630{ 2630{
2631 struct hso_tiocmget *tiocmget = serial->tiocmget; 2631 struct hso_tiocmget *tiocmget;
2632 if (!serial)
2633 return;
2634 tiocmget = serial->tiocmget;
2632 if (tiocmget) { 2635 if (tiocmget) {
2633 if (tiocmget->urb) { 2636 usb_free_urb(tiocmget->urb);
2634 usb_free_urb(tiocmget->urb); 2637 tiocmget->urb = NULL;
2635 tiocmget->urb = NULL;
2636 }
2637 serial->tiocmget = NULL; 2638 serial->tiocmget = NULL;
2638 kfree(tiocmget); 2639 kfree(tiocmget);
2639
2640 } 2640 }
2641} 2641}
2642 2642
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ed9a41643ff4..95c41d56631c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -931,8 +931,10 @@ fail_halt:
931 if (urb != NULL) { 931 if (urb != NULL) {
932 clear_bit (EVENT_RX_MEMORY, &dev->flags); 932 clear_bit (EVENT_RX_MEMORY, &dev->flags);
933 status = usb_autopm_get_interface(dev->intf); 933 status = usb_autopm_get_interface(dev->intf);
934 if (status < 0) 934 if (status < 0) {
935 usb_free_urb(urb);
935 goto fail_lowmem; 936 goto fail_lowmem;
937 }
936 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) 938 if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK)
937 resched = 0; 939 resched = 0;
938 usb_autopm_put_interface(dev->intf); 940 usb_autopm_put_interface(dev->intf);
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 78c26fdccad1..62ce2f4e8605 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah)
282 return 0; 282 return 0;
283} 283}
284 284
285/*
286 * Wait for synth to settle
287 */
288static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
289 struct ieee80211_channel *channel)
290{
291 /*
292 * On 5211+ read activation -> rx delay
293 * and use it (100ns steps).
294 */
295 if (ah->ah_version != AR5K_AR5210) {
296 u32 delay;
297 delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
298 AR5K_PHY_RX_DELAY_M;
299 delay = (channel->hw_value & CHANNEL_CCK) ?
300 ((delay << 2) / 22) : (delay / 10);
301 if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
302 delay = delay << 1;
303 if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
304 delay = delay << 2;
305 /* XXX: /2 on turbo ? Let's be safe
306 * for now */
307 udelay(100 + delay);
308 } else {
309 mdelay(1);
310 }
311}
312
285 313
286/**********************\ 314/**********************\
287* RF Gain optimization * 315* RF Gain optimization *
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah,
1253 case AR5K_RF5111: 1281 case AR5K_RF5111:
1254 ret = ath5k_hw_rf5111_channel(ah, channel); 1282 ret = ath5k_hw_rf5111_channel(ah, channel);
1255 break; 1283 break;
1284 case AR5K_RF2317:
1256 case AR5K_RF2425: 1285 case AR5K_RF2425:
1257 ret = ath5k_hw_rf2425_channel(ah, channel); 1286 ret = ath5k_hw_rf2425_channel(ah, channel);
1258 break; 1287 break;
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3237 /* Failed */ 3266 /* Failed */
3238 if (i >= 100) 3267 if (i >= 100)
3239 return -EIO; 3268 return -EIO;
3269
3270 /* Set channel and wait for synth */
3271 ret = ath5k_hw_channel(ah, channel);
3272 if (ret)
3273 return ret;
3274
3275 ath5k_hw_wait_for_synth(ah, channel);
3240 } 3276 }
3241 3277
3242 /* 3278 /*
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3251 if (ret) 3287 if (ret)
3252 return ret; 3288 return ret;
3253 3289
3290 /* Write OFDM timings on 5212*/
3291 if (ah->ah_version == AR5K_AR5212 &&
3292 channel->hw_value & CHANNEL_OFDM) {
3293
3294 ret = ath5k_hw_write_ofdm_timings(ah, channel);
3295 if (ret)
3296 return ret;
3297
3298 /* Spur info is available only from EEPROM versions
3299 * greater than 5.3, but the EEPROM routines will use
3300 * static values for older versions */
3301 if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
3302 ath5k_hw_set_spur_mitigation_filter(ah,
3303 channel);
3304 }
3305
3306 /* If we used fast channel switching
3307 * we are done, release RF bus and
3308 * fire up NF calibration.
3309 *
3310 * Note: Only NF calibration due to
3311 * channel change, not AGC calibration
3312 * since AGC is still running !
3313 */
3314 if (fast) {
3315 /*
3316 * Release RF Bus grant
3317 */
3318 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
3319 AR5K_PHY_RFBUS_REQ_REQUEST);
3320
3321 /*
3322 * Start NF calibration
3323 */
3324 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
3325 AR5K_PHY_AGCCTL_NF);
3326
3327 return ret;
3328 }
3329
3254 /* 3330 /*
3255 * For 5210 we do all initialization using 3331 * For 5210 we do all initialization using
3256 * initvals, so we don't have to modify 3332 * initvals, so we don't have to modify
3257 * any settings (5210 also only supports 3333 * any settings (5210 also only supports
3258 * a/aturbo modes) 3334 * a/aturbo modes)
3259 */ 3335 */
3260 if ((ah->ah_version != AR5K_AR5210) && !fast) { 3336 if (ah->ah_version != AR5K_AR5210) {
3261 3337
3262 /* 3338 /*
3263 * Write initial RF gain settings 3339 * Write initial RF gain settings
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3276 if (ret) 3352 if (ret)
3277 return ret; 3353 return ret;
3278 3354
3279 /* Write OFDM timings on 5212*/
3280 if (ah->ah_version == AR5K_AR5212 &&
3281 channel->hw_value & CHANNEL_OFDM) {
3282
3283 ret = ath5k_hw_write_ofdm_timings(ah, channel);
3284 if (ret)
3285 return ret;
3286
3287 /* Spur info is available only from EEPROM versions
3288 * greater than 5.3, but the EEPROM routines will use
3289 * static values for older versions */
3290 if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
3291 ath5k_hw_set_spur_mitigation_filter(ah,
3292 channel);
3293 }
3294
3295 /*Enable/disable 802.11b mode on 5111 3355 /*Enable/disable 802.11b mode on 5111
3296 (enable 2111 frequency converter + CCK)*/ 3356 (enable 2111 frequency converter + CCK)*/
3297 if (ah->ah_radio == AR5K_RF5111) { 3357 if (ah->ah_radio == AR5K_RF5111) {
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3322 */ 3382 */
3323 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); 3383 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
3324 3384
3385 ath5k_hw_wait_for_synth(ah, channel);
3386
3325 /* 3387 /*
3326 * On 5211+ read activation -> rx delay 3388 * Perform ADC test to see if baseband is ready
3327 * and use it. 3389 * Set tx hold and check adc test register
3328 */ 3390 */
3329 if (ah->ah_version != AR5K_AR5210) { 3391 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
3330 u32 delay; 3392 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
3331 delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & 3393 for (i = 0; i <= 20; i++) {
3332 AR5K_PHY_RX_DELAY_M; 3394 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
3333 delay = (channel->hw_value & CHANNEL_CCK) ? 3395 break;
3334 ((delay << 2) / 22) : (delay / 10); 3396 udelay(200);
3335 if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
3336 delay = delay << 1;
3337 if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
3338 delay = delay << 2;
3339 /* XXX: /2 on turbo ? Let's be safe
3340 * for now */
3341 udelay(100 + delay);
3342 } else {
3343 mdelay(1);
3344 }
3345
3346 if (fast)
3347 /*
3348 * Release RF Bus grant
3349 */
3350 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
3351 AR5K_PHY_RFBUS_REQ_REQUEST);
3352 else {
3353 /*
3354 * Perform ADC test to see if baseband is ready
3355 * Set tx hold and check adc test register
3356 */
3357 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
3358 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
3359 for (i = 0; i <= 20; i++) {
3360 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
3361 break;
3362 udelay(200);
3363 }
3364 ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
3365 } 3397 }
3398 ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
3366 3399
3367 /* 3400 /*
3368 * Start automatic gain control calibration 3401 * Start automatic gain control calibration
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 23838e37d45f..1a7fa6ea4cf5 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -21,7 +21,6 @@
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/leds.h> 22#include <linux/leds.h>
23#include <linux/completion.h> 23#include <linux/completion.h>
24#include <linux/pm_qos_params.h>
25 24
26#include "debug.h" 25#include "debug.h"
27#include "common.h" 26#include "common.h"
@@ -57,8 +56,6 @@ struct ath_node;
57 56
58#define A_MAX(a, b) ((a) > (b) ? (a) : (b)) 57#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
59 58
60#define ATH9K_PM_QOS_DEFAULT_VALUE 55
61
62#define TSF_TO_TU(_h,_l) \ 59#define TSF_TO_TU(_h,_l) \
63 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 60 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
64 61
@@ -633,8 +630,6 @@ struct ath_softc {
633 struct ath_descdma txsdma; 630 struct ath_descdma txsdma;
634 631
635 struct ath_ant_comb ant_comb; 632 struct ath_ant_comb ant_comb;
636
637 struct pm_qos_request_list pm_qos_req;
638}; 633};
639 634
640struct ath_wiphy { 635struct ath_wiphy {
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
666extern struct ieee80211_ops ath9k_ops; 661extern struct ieee80211_ops ath9k_ops;
667extern int ath9k_modparam_nohwcrypt; 662extern int ath9k_modparam_nohwcrypt;
668extern int led_blink; 663extern int led_blink;
669extern int ath9k_pm_qos_value;
670extern bool is_ath9k_unloaded; 664extern bool is_ath9k_unloaded;
671 665
672irqreturn_t ath_isr(int irq, void *dev); 666irqreturn_t ath_isr(int irq, void *dev);
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 5ab3084eb9cb..07b1633b7f3f 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
219 struct tx_buf *tx_buf = NULL; 219 struct tx_buf *tx_buf = NULL;
220 struct sk_buff *nskb = NULL; 220 struct sk_buff *nskb = NULL;
221 int ret = 0, i; 221 int ret = 0, i;
222 u16 *hdr, tx_skb_cnt = 0; 222 u16 tx_skb_cnt = 0;
223 u8 *buf; 223 u8 *buf;
224 __le16 *hdr;
224 225
225 if (hif_dev->tx.tx_skb_cnt == 0) 226 if (hif_dev->tx.tx_skb_cnt == 0)
226 return 0; 227 return 0;
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev)
245 246
246 buf = tx_buf->buf; 247 buf = tx_buf->buf;
247 buf += tx_buf->offset; 248 buf += tx_buf->offset;
248 hdr = (u16 *)buf; 249 hdr = (__le16 *)buf;
249 *hdr++ = nskb->len; 250 *hdr++ = cpu_to_le16(nskb->len);
250 *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; 251 *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG);
251 buf += 4; 252 buf += 4;
252 memcpy(buf, nskb->data, nskb->len); 253 memcpy(buf, nskb->data, nskb->len);
253 tx_buf->len = nskb->len + 4; 254 tx_buf->len = nskb->len + 4;
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 087a6a95edd5..a033d01bf8a0 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable;
41module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); 41module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
42MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); 42MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
43 43
44int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
45module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
46MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
47
48bool is_ath9k_unloaded; 44bool is_ath9k_unloaded;
49/* We use the hw_value as an index into our private channel structure */ 45/* We use the hw_value as an index into our private channel structure */
50 46
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
762 ath_init_leds(sc); 758 ath_init_leds(sc);
763 ath_start_rfkill_poll(sc); 759 ath_start_rfkill_poll(sc);
764 760
765 pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
766 PM_QOS_DEFAULT_VALUE);
767
768 return 0; 761 return 0;
769 762
770error_world: 763error_world:
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc)
831 } 824 }
832 825
833 ieee80211_unregister_hw(hw); 826 ieee80211_unregister_hw(hw);
834 pm_qos_remove_request(&sc->pm_qos_req);
835 ath_rx_cleanup(sc); 827 ath_rx_cleanup(sc);
836 ath_tx_cleanup(sc); 828 ath_tx_cleanup(sc);
837 ath9k_deinit_softc(sc); 829 ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 180170d3ce25..2915b11edefb 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
885 struct ath_common *common = ath9k_hw_common(ah); 885 struct ath_common *common = ath9k_hw_common(ah);
886 886
887 if (!(ints & ATH9K_INT_GLOBAL)) 887 if (!(ints & ATH9K_INT_GLOBAL))
888 ath9k_hw_enable_interrupts(ah); 888 ath9k_hw_disable_interrupts(ah);
889 889
890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
891 891
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
964 } 964 }
965 965
966 ath9k_hw_enable_interrupts(ah); 966 if (ints & ATH9K_INT_GLOBAL)
967 ath9k_hw_enable_interrupts(ah);
967 968
968 return; 969 return;
969} 970}
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index da5c64597c1f..a09d15f7aa6e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1173,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw)
1173 ath9k_btcoex_timer_resume(sc); 1173 ath9k_btcoex_timer_resume(sc);
1174 } 1174 }
1175 1175
1176 /* User has the option to provide pm-qos value as a module
1177 * parameter rather than using the default value of
1178 * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
1179 */
1180 pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
1181
1182 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) 1176 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
1183 common->bus_ops->extn_synch_en(common); 1177 common->bus_ops->extn_synch_en(common);
1184 1178
@@ -1345,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1345 1339
1346 sc->sc_flags |= SC_OP_INVALID; 1340 sc->sc_flags |= SC_OP_INVALID;
1347 1341
1348 pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE);
1349
1350 mutex_unlock(&sc->mutex); 1342 mutex_unlock(&sc->mutex);
1351 1343
1352 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); 1344 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 537732e5964f..f82c400be288 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = {
118 { USB_DEVICE(0x057c, 0x8402) }, 118 { USB_DEVICE(0x057c, 0x8402) },
119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ 119 /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */
120 { USB_DEVICE(0x1668, 0x1200) }, 120 { USB_DEVICE(0x1668, 0x1200) },
121 /* Airlive X.USB a/b/g/n */
122 { USB_DEVICE(0x1b75, 0x9170) },
121 123
122 /* terminate */ 124 /* terminate */
123 {} 125 {}
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index a9b852be4509..39b6f16c87fa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv,
402} 402}
403#endif 403#endif
404 404
405/**
406 * iwl3945_good_plcp_health - checks for plcp error.
407 *
408 * When the plcp error is exceeding the thresholds, reset the radio
409 * to improve the throughput.
410 */
411static bool iwl3945_good_plcp_health(struct iwl_priv *priv,
412 struct iwl_rx_packet *pkt)
413{
414 bool rc = true;
415 struct iwl3945_notif_statistics current_stat;
416 int combined_plcp_delta;
417 unsigned int plcp_msec;
418 unsigned long plcp_received_jiffies;
419
420 if (priv->cfg->base_params->plcp_delta_threshold ==
421 IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) {
422 IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n");
423 return rc;
424 }
425 memcpy(&current_stat, pkt->u.raw, sizeof(struct
426 iwl3945_notif_statistics));
427 /*
428 * check for plcp_err and trigger radio reset if it exceeds
429 * the plcp error threshold plcp_delta.
430 */
431 plcp_received_jiffies = jiffies;
432 plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
433 (long) priv->plcp_jiffies);
434 priv->plcp_jiffies = plcp_received_jiffies;
435 /*
436 * check to make sure plcp_msec is not 0 to prevent division
437 * by zero.
438 */
439 if (plcp_msec) {
440 combined_plcp_delta =
441 (le32_to_cpu(current_stat.rx.ofdm.plcp_err) -
442 le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err));
443
444 if ((combined_plcp_delta > 0) &&
445 ((combined_plcp_delta * 100) / plcp_msec) >
446 priv->cfg->base_params->plcp_delta_threshold) {
447 /*
448 * if plcp_err exceed the threshold, the following
449 * data is printed in csv format:
450 * Text: plcp_err exceeded %d,
451 * Received ofdm.plcp_err,
452 * Current ofdm.plcp_err,
453 * combined_plcp_delta,
454 * plcp_msec
455 */
456 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
457 "%u, %d, %u mSecs\n",
458 priv->cfg->base_params->plcp_delta_threshold,
459 le32_to_cpu(current_stat.rx.ofdm.plcp_err),
460 combined_plcp_delta, plcp_msec);
461 /*
462 * Reset the RF radio due to the high plcp
463 * error rate
464 */
465 rc = false;
466 }
467 }
468 return rc;
469}
470
471void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 405void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
472 struct iwl_rx_mem_buffer *rxb) 406 struct iwl_rx_mem_buffer *rxb)
473{ 407{
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = {
2734 .isr_ops = { 2668 .isr_ops = {
2735 .isr = iwl_isr_legacy, 2669 .isr = iwl_isr_legacy,
2736 }, 2670 },
2737 .check_plcp_health = iwl3945_good_plcp_health,
2738 2671
2739 .debugfs_ops = { 2672 .debugfs_ops = {
2740 .rx_stats_read = iwl3945_ucode_rx_stats_read, 2673 .rx_stats_read = iwl3945_ucode_rx_stats_read,
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 79ab0a6b1386..537fb8c84e3a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -51,7 +51,7 @@
51#include "iwl-agn-debugfs.h" 51#include "iwl-agn-debugfs.h"
52 52
53/* Highest firmware API version supported */ 53/* Highest firmware API version supported */
54#define IWL5000_UCODE_API_MAX 2 54#define IWL5000_UCODE_API_MAX 5
55#define IWL5150_UCODE_API_MAX 2 55#define IWL5150_UCODE_API_MAX 2
56 56
57/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 1eacba4daa5b..0494d7b102d4 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
199 while (i != idx) { 199 while (i != idx) {
200 u16 len; 200 u16 len;
201 struct sk_buff *skb; 201 struct sk_buff *skb;
202 dma_addr_t dma_addr;
202 desc = &ring[i]; 203 desc = &ring[i];
203 len = le16_to_cpu(desc->len); 204 len = le16_to_cpu(desc->len);
204 skb = rx_buf[i]; 205 skb = rx_buf[i];
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
216 217
217 len = priv->common.rx_mtu; 218 len = priv->common.rx_mtu;
218 } 219 }
220 dma_addr = le32_to_cpu(desc->host_addr);
221 pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
222 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
219 skb_put(skb, len); 223 skb_put(skb, len);
220 224
221 if (p54_rx(dev, skb)) { 225 if (p54_rx(dev, skb)) {
222 pci_unmap_single(priv->pdev, 226 pci_unmap_single(priv->pdev, dma_addr,
223 le32_to_cpu(desc->host_addr), 227 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
224 priv->common.rx_mtu + 32,
225 PCI_DMA_FROMDEVICE);
226 rx_buf[i] = NULL; 228 rx_buf[i] = NULL;
227 desc->host_addr = 0; 229 desc->host_addr = cpu_to_le32(0);
228 } else { 230 } else {
229 skb_trim(skb, 0); 231 skb_trim(skb, 0);
232 pci_dma_sync_single_for_device(priv->pdev, dma_addr,
233 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
230 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); 234 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
231 } 235 }
232 236
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 21713a7638c4..9b344a921e74 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ 98 {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */
99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
101 {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */
101 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 102 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
102 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ 103 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
103 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ 104 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 848cc2cce247..518542b4bf9e 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2597 __le32 mode; 2597 __le32 mode;
2598 int ret; 2598 int ret;
2599 2599
2600 if (priv->device_type != RNDIS_BCM4320B)
2601 return -ENOTSUPP;
2602
2600 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, 2603 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
2601 enabled ? "enabled" : "disabled", 2604 enabled ? "enabled" : "disabled",
2602 timeout); 2605 timeout);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index aa97971a38af..3b3f1e45ab3e 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
652 */ 652 */
653 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 653 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
654 654
655 /*
656 * The hardware has already checked the Michael Mic and has
657 * stripped it from the frame. Signal this to mac80211.
658 */
659 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
660
655 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) 661 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
656 rxdesc->flags |= RX_FLAG_DECRYPTED; 662 rxdesc->flags |= RX_FLAG_DECRYPTED;
657 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) 663 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1065 { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1071 { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
1066#endif 1072#endif
1067#ifdef CONFIG_RT2800PCI_RT35XX 1073#ifdef CONFIG_RT2800PCI_RT35XX
1074 { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) },
1075 { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) },
1068 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1076 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
1069 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1077 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
1070 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1078 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index b97a4a54ff4c..197a36c05fda 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
486 */ 486 */
487 rxdesc->flags |= RX_FLAG_IV_STRIPPED; 487 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
488 488
489 /*
490 * The hardware has already checked the Michael Mic and has
491 * stripped it from the frame. Signal this to mac80211.
492 */
493 rxdesc->flags |= RX_FLAG_MMIC_STRIPPED;
494
489 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) 495 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
490 rxdesc->flags |= RX_FLAG_DECRYPTED; 496 rxdesc->flags |= RX_FLAG_DECRYPTED;
491 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) 497 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd492754..ea1580085347 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig NFC_DEVICES 5menuconfig NFC_DEVICES
6 bool "NFC devices" 6 bool "Near Field Communication (NFC) devices"
7 default n 7 default n
8 ---help--- 8 ---help---
9 You'll have to say Y if your computer contains an NFC device that 9 You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae647264dd6..724f65d8f9e4 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
60struct pn544_info { 60struct pn544_info {
61 struct miscdevice miscdev; 61 struct miscdevice miscdev;
62 struct i2c_client *i2c_dev; 62 struct i2c_client *i2c_dev;
63 struct regulator_bulk_data regs[2]; 63 struct regulator_bulk_data regs[3];
64 64
65 enum pn544_state state; 65 enum pn544_state state;
66 wait_queue_head_t read_wait; 66 wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
74 74
75static const char reg_vdd_io[] = "Vdd_IO"; 75static const char reg_vdd_io[] = "Vdd_IO";
76static const char reg_vbat[] = "VBat"; 76static const char reg_vbat[] = "VBat";
77static const char reg_vsim[] = "VSim";
77 78
78/* sysfs interface */ 79/* sysfs interface */
79static ssize_t pn544_test(struct device *dev, 80static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
740 741
741 info->regs[0].supply = reg_vdd_io; 742 info->regs[0].supply = reg_vdd_io;
742 info->regs[1].supply = reg_vbat; 743 info->regs[1].supply = reg_vbat;
744 info->regs[2].supply = reg_vsim;
743 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), 745 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
744 info->regs); 746 info->regs);
745 if (r < 0) 747 if (r < 0)
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c
index 28295d0a50f6..4d87b5dc9284 100644
--- a/drivers/of/pdt.c
+++ b/drivers/of/pdt.c
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata;
36 (p)->unique_id = of_pdt_unique_id++; \ 36 (p)->unique_id = of_pdt_unique_id++; \
37} while (0) 37} while (0)
38 38
39static inline const char *of_pdt_node_name(struct device_node *dp) 39static char * __init of_pdt_build_full_name(struct device_node *dp)
40{ 40{
41 return dp->path_component_name; 41 int len, ourlen, plen;
42 char *n;
43
44 dp->path_component_name = build_path_component(dp);
45
46 plen = strlen(dp->parent->full_name);
47 ourlen = strlen(dp->path_component_name);
48 len = ourlen + plen + 2;
49
50 n = prom_early_alloc(len);
51 strcpy(n, dp->parent->full_name);
52 if (!of_node_is_root(dp->parent)) {
53 strcpy(n + plen, "/");
54 plen++;
55 }
56 strcpy(n + plen, dp->path_component_name);
57
58 return n;
42} 59}
43 60
44#else 61#else /* CONFIG_SPARC */
45 62
46static inline void of_pdt_incr_unique_id(void *p) { } 63static inline void of_pdt_incr_unique_id(void *p) { }
47static inline void irq_trans_init(struct device_node *dp) { } 64static inline void irq_trans_init(struct device_node *dp) { }
48 65
49static inline const char *of_pdt_node_name(struct device_node *dp) 66static char * __init of_pdt_build_full_name(struct device_node *dp)
50{ 67{
51 return dp->name; 68 static int failsafe_id = 0; /* for generating unique names on failure */
69 char *buf;
70 int len;
71
72 if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len))
73 goto failsafe;
74
75 buf = prom_early_alloc(len + 1);
76 if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len))
77 goto failsafe;
78 return buf;
79
80 failsafe:
81 buf = prom_early_alloc(strlen(dp->parent->full_name) +
82 strlen(dp->name) + 16);
83 sprintf(buf, "%s/%s@unknown%i",
84 of_node_is_root(dp->parent) ? "" : dp->parent->full_name,
85 dp->name, failsafe_id++);
86 pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf);
87 return buf;
52} 88}
53 89
54#endif /* !CONFIG_SPARC */ 90#endif /* !CONFIG_SPARC */
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name)
132 return buf; 168 return buf;
133} 169}
134 170
135static char * __init of_pdt_try_pkg2path(phandle node)
136{
137 char *res, *buf = NULL;
138 int len;
139
140 if (!of_pdt_prom_ops->pkg2path)
141 return NULL;
142
143 if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len))
144 return NULL;
145 buf = prom_early_alloc(len + 1);
146 if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) {
147 pr_err("%s: package-to-path failed\n", __func__);
148 return NULL;
149 }
150
151 res = strrchr(buf, '/');
152 if (!res) {
153 pr_err("%s: couldn't find / in %s\n", __func__, buf);
154 return NULL;
155 }
156 return res+1;
157}
158
159/*
160 * When fetching the node's name, first try using package-to-path; if
161 * that fails (either because the arch hasn't supplied a PROM callback,
162 * or some other random failure), fall back to just looking at the node's
163 * 'name' property.
164 */
165static char * __init of_pdt_build_name(phandle node)
166{
167 char *buf;
168
169 buf = of_pdt_try_pkg2path(node);
170 if (!buf)
171 buf = of_pdt_get_one_property(node, "name");
172
173 return buf;
174}
175
176static struct device_node * __init of_pdt_create_node(phandle node, 171static struct device_node * __init of_pdt_create_node(phandle node,
177 struct device_node *parent) 172 struct device_node *parent)
178{ 173{
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node,
187 182
188 kref_init(&dp->kref); 183 kref_init(&dp->kref);
189 184
190 dp->name = of_pdt_build_name(node); 185 dp->name = of_pdt_get_one_property(node, "name");
191 dp->type = of_pdt_get_one_property(node, "device_type"); 186 dp->type = of_pdt_get_one_property(node, "device_type");
192 dp->phandle = node; 187 dp->phandle = node;
193 188
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node,
198 return dp; 193 return dp;
199} 194}
200 195
201static char * __init of_pdt_build_full_name(struct device_node *dp)
202{
203 int len, ourlen, plen;
204 char *n;
205
206 plen = strlen(dp->parent->full_name);
207 ourlen = strlen(of_pdt_node_name(dp));
208 len = ourlen + plen + 2;
209
210 n = prom_early_alloc(len);
211 strcpy(n, dp->parent->full_name);
212 if (!of_node_is_root(dp->parent)) {
213 strcpy(n + plen, "/");
214 plen++;
215 }
216 strcpy(n + plen, of_pdt_node_name(dp));
217
218 return n;
219}
220
221static struct device_node * __init of_pdt_build_tree(struct device_node *parent, 196static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
222 phandle node, 197 phandle node,
223 struct device_node ***nextp) 198 struct device_node ***nextp)
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent,
240 *(*nextp) = dp; 215 *(*nextp) = dp;
241 *nextp = &dp->allnext; 216 *nextp = &dp->allnext;
242 217
243#if defined(CONFIG_SPARC)
244 dp->path_component_name = build_path_component(dp);
245#endif
246 dp->full_name = of_pdt_build_full_name(dp); 218 dp->full_name = of_pdt_build_full_name(dp);
247 219
248 dp->child = of_pdt_build_tree(dp, 220 dp->child = of_pdt_build_tree(dp,
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 8ecaac983923..ea25e5bfcf23 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -23,6 +23,7 @@
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/fs.h> 24#include <linux/fs.h>
25#include <linux/capability.h> 25#include <linux/capability.h>
26#include <linux/security.h>
26#include <linux/pci-aspm.h> 27#include <linux/pci-aspm.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include "pci.h" 29#include "pci.h"
@@ -368,7 +369,7 @@ pci_read_config(struct file *filp, struct kobject *kobj,
368 u8 *data = (u8*) buf; 369 u8 *data = (u8*) buf;
369 370
370 /* Several chips lock up trying to read undefined config space */ 371 /* Several chips lock up trying to read undefined config space */
371 if (cap_raised(filp->f_cred->cap_effective, CAP_SYS_ADMIN)) { 372 if (security_capable(filp->f_cred, CAP_SYS_ADMIN) == 0) {
372 size = dev->cfg_size; 373 size = dev->cfg_size;
373 } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { 374 } else if (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) {
374 size = 128; 375 size = 128;
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index 0bdda5b3ed55..42fbf1a75576 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev)
518 flags |= CONF_ENABLE_IOCARD; 518 flags |= CONF_ENABLE_IOCARD;
519 if (flags & CONF_ENABLE_IOCARD) 519 if (flags & CONF_ENABLE_IOCARD)
520 s->socket.flags |= SS_IOCARD; 520 s->socket.flags |= SS_IOCARD;
521 if (flags & CONF_ENABLE_ZVCARD)
522 s->socket.flags |= SS_ZVCARD | SS_IOCARD;
521 if (flags & CONF_ENABLE_SPKR) { 523 if (flags & CONF_ENABLE_SPKR) {
522 s->socket.flags |= SS_SPKR_ENA; 524 s->socket.flags |= SS_SPKR_ENA;
523 status = CCSR_AUDIO_ENA; 525 status = CCSR_AUDIO_ENA;
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 3755e7c8c715..2c540542b5af 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
215} 215}
216#endif 216#endif
217 217
218static void pxa2xx_configure_sockets(struct device *dev) 218void pxa2xx_configure_sockets(struct device *dev)
219{ 219{
220 struct pcmcia_low_level *ops = dev->platform_data; 220 struct pcmcia_low_level *ops = dev->platform_data;
221 /* 221 /*
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h
index bb62ea87b8f9..b609b45469ed 100644
--- a/drivers/pcmcia/pxa2xx_base.h
+++ b/drivers/pcmcia/pxa2xx_base.h
@@ -1,3 +1,4 @@
1int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); 1int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt);
2void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); 2void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops);
3void pxa2xx_configure_sockets(struct device *dev);
3 4
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c
index c3f72192af66..a52039564e74 100644
--- a/drivers/pcmcia/pxa2xx_colibri.c
+++ b/drivers/pcmcia/pxa2xx_colibri.c
@@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void)
181{ 181{
182 int ret; 182 int ret;
183 183
184 if (!machine_is_colibri() && !machine_is_colibri320())
185 return -ENODEV;
186
184 colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); 187 colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
185 if (!colibri_pcmcia_device) 188 if (!colibri_pcmcia_device)
186 return -ENOMEM; 189 return -ENOMEM;
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c
index b9f8c8fb42bd..25afe637c657 100644
--- a/drivers/pcmcia/pxa2xx_lubbock.c
+++ b/drivers/pcmcia/pxa2xx_lubbock.c
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev)
226 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); 226 lubbock_set_misc_wr((1 << 15) | (1 << 14), 0);
227 227
228 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); 228 pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops);
229 pxa2xx_configure_sockets(&sadev->dev);
229 ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, 230 ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops,
230 pxa2xx_drv_pcmcia_add_one); 231 pxa2xx_drv_pcmcia_add_one);
231 } 232 }
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index d163bc2e2b9e..a59af5b24f0a 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -227,7 +227,7 @@ config SONYPI_COMPAT
227config IDEAPAD_LAPTOP 227config IDEAPAD_LAPTOP
228 tristate "Lenovo IdeaPad Laptop Extras" 228 tristate "Lenovo IdeaPad Laptop Extras"
229 depends on ACPI 229 depends on ACPI
230 depends on RFKILL 230 depends on RFKILL && INPUT
231 select INPUT_SPARSEKMAP 231 select INPUT_SPARSEKMAP
232 help 232 help
233 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. 233 This is a driver for the rfkill switches on Lenovo IdeaPad netbooks.
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c5c4b8c32eb8..38b34a73866a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL");
84 */ 84 */
85#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" 85#define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB"
86#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" 86#define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C"
87#define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" 87#define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"
88#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" 88#define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A"
89#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" 89#define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531"
90 90
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev,
1280 return -EINVAL; 1280 return -EINVAL;
1281 return count; 1281 return count;
1282} 1282}
1283static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, 1283static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg,
1284 set_bool_threeg); 1284 set_bool_threeg);
1285 1285
1286static ssize_t show_interface(struct device *dev, struct device_attribute *attr, 1286static ssize_t show_interface(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 4633fd8532cc..fe495939c307 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device)
1081 struct proc_dir_entry *proc; 1081 struct proc_dir_entry *proc;
1082 mode_t mode; 1082 mode_t mode;
1083 1083
1084 /*
1085 * If parameter uid or gid is not changed, keep the default setting for
1086 * our proc entries (-rw-rw-rw-) else, it means we care about security,
1087 * and then set to -rw-rw----
1088 */
1089
1090 if ((asus_uid == 0) && (asus_gid == 0)) { 1084 if ((asus_uid == 0) && (asus_gid == 0)) {
1091 mode = S_IFREG | S_IRUGO | S_IWUGO; 1085 mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP;
1092 } else { 1086 } else {
1093 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; 1087 mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP;
1094 printk(KERN_WARNING " asus_uid and asus_gid parameters are " 1088 printk(KERN_WARNING " asus_uid and asus_gid parameters are "
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 34657f96b5a5..ad24ef36f9f7 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked)
290 dell_send_request(buffer, 17, 11); 290 dell_send_request(buffer, 17, 11);
291 291
292 /* If the hardware switch controls this radio, and the hardware 292 /* If the hardware switch controls this radio, and the hardware
293 switch is disabled, don't allow changing the software state */ 293 switch is disabled, don't allow changing the software state.
294 If the hardware switch is reported as not supported, always
295 fire the SMI to toggle the killswitch. */
294 if ((hwswitch_state & BIT(hwswitch_bit)) && 296 if ((hwswitch_state & BIT(hwswitch_bit)) &&
295 !(buffer->output[1] & BIT(16))) { 297 !(buffer->output[1] & BIT(16)) &&
298 (buffer->output[1] & BIT(0))) {
296 ret = -EINVAL; 299 ret = -EINVAL;
297 goto out; 300 goto out;
298 } 301 }
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = {
398 401
399static void dell_update_rfkill(struct work_struct *ignored) 402static void dell_update_rfkill(struct work_struct *ignored)
400{ 403{
404 int status;
405
406 get_buffer();
407 dell_send_request(buffer, 17, 11);
408 status = buffer->output[1];
409 release_buffer();
410
411 /* if hardware rfkill is not supported, set it explicitly */
412 if (!(status & BIT(0))) {
413 if (wifi_rfkill)
414 dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17));
415 if (bluetooth_rfkill)
416 dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18));
417 if (wwan_rfkill)
418 dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19));
419 }
420
401 if (wifi_rfkill) 421 if (wifi_rfkill)
402 dell_rfkill_query(wifi_rfkill, (void *)1); 422 dell_rfkill_query(wifi_rfkill, (void *)1);
403 if (bluetooth_rfkill) 423 if (bluetooth_rfkill)
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c
index 930e62762365..61433d492862 100644
--- a/drivers/platform/x86/intel_pmic_gpio.c
+++ b/drivers/platform/x86/intel_pmic_gpio.c
@@ -60,69 +60,20 @@ enum pmic_gpio_register {
60#define GPOSW_DOU 0x08 60#define GPOSW_DOU 0x08
61#define GPOSW_RDRV 0x30 61#define GPOSW_RDRV 0x30
62 62
63#define GPIO_UPDATE_TYPE 0x80000000
63 64
64#define NUM_GPIO 24 65#define NUM_GPIO 24
65 66
66struct pmic_gpio_irq {
67 spinlock_t lock;
68 u32 trigger[NUM_GPIO];
69 u32 dirty;
70 struct work_struct work;
71};
72
73
74struct pmic_gpio { 67struct pmic_gpio {
68 struct mutex buslock;
75 struct gpio_chip chip; 69 struct gpio_chip chip;
76 struct pmic_gpio_irq irqtypes;
77 void *gpiointr; 70 void *gpiointr;
78 int irq; 71 int irq;
79 unsigned irq_base; 72 unsigned irq_base;
73 unsigned int update_type;
74 u32 trigger_type;
80}; 75};
81 76
82static void pmic_program_irqtype(int gpio, int type)
83{
84 if (type & IRQ_TYPE_EDGE_RISING)
85 intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20);
86 else
87 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20);
88
89 if (type & IRQ_TYPE_EDGE_FALLING)
90 intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10);
91 else
92 intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10);
93};
94
95static void pmic_irqtype_work(struct work_struct *work)
96{
97 struct pmic_gpio_irq *t =
98 container_of(work, struct pmic_gpio_irq, work);
99 unsigned long flags;
100 int i;
101 u16 type;
102
103 spin_lock_irqsave(&t->lock, flags);
104 /* As we drop the lock, we may need multiple scans if we race the
105 pmic_irq_type function */
106 while (t->dirty) {
107 /*
108 * For each pin that has the dirty bit set send an IPC
109 * message to configure the hardware via the PMIC
110 */
111 for (i = 0; i < NUM_GPIO; i++) {
112 if (!(t->dirty & (1 << i)))
113 continue;
114 t->dirty &= ~(1 << i);
115 /* We can't trust the array entry or dirty
116 once the lock is dropped */
117 type = t->trigger[i];
118 spin_unlock_irqrestore(&t->lock, flags);
119 pmic_program_irqtype(i, type);
120 spin_lock_irqsave(&t->lock, flags);
121 }
122 }
123 spin_unlock_irqrestore(&t->lock, flags);
124}
125
126static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 77static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
127{ 78{
128 if (offset > 8) { 79 if (offset > 8) {
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
190 1 << (offset - 16)); 141 1 << (offset - 16));
191} 142}
192 143
193static int pmic_irq_type(unsigned irq, unsigned type) 144/*
145 * This is called from genirq with pg->buslock locked and
146 * irq_desc->lock held. We can not access the scu bus here, so we
147 * store the change and update in the bus_sync_unlock() function below
148 */
149static int pmic_irq_type(struct irq_data *data, unsigned type)
194{ 150{
195 struct pmic_gpio *pg = get_irq_chip_data(irq); 151 struct pmic_gpio *pg = irq_data_get_irq_chip_data(data);
196 u32 gpio = irq - pg->irq_base; 152 u32 gpio = data->irq - pg->irq_base;
197 unsigned long flags;
198 153
199 if (gpio >= pg->chip.ngpio) 154 if (gpio >= pg->chip.ngpio)
200 return -EINVAL; 155 return -EINVAL;
201 156
202 spin_lock_irqsave(&pg->irqtypes.lock, flags); 157 pg->trigger_type = type;
203 pg->irqtypes.trigger[gpio] = type; 158 pg->update_type = gpio | GPIO_UPDATE_TYPE;
204 pg->irqtypes.dirty |= (1 << gpio);
205 spin_unlock_irqrestore(&pg->irqtypes.lock, flags);
206 schedule_work(&pg->irqtypes.work);
207 return 0; 159 return 0;
208} 160}
209 161
210
211
212static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 162static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
213{ 163{
214 struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); 164 struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip);
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
217} 167}
218 168
219/* the gpiointr register is read-clear, so just do nothing. */ 169/* the gpiointr register is read-clear, so just do nothing. */
220static void pmic_irq_unmask(unsigned irq) 170static void pmic_irq_unmask(struct irq_data *data) { }
221{
222};
223 171
224static void pmic_irq_mask(unsigned irq) 172static void pmic_irq_mask(struct irq_data *data) { }
225{
226};
227 173
228static struct irq_chip pmic_irqchip = { 174static struct irq_chip pmic_irqchip = {
229 .name = "PMIC-GPIO", 175 .name = "PMIC-GPIO",
230 .mask = pmic_irq_mask, 176 .irq_mask = pmic_irq_mask,
231 .unmask = pmic_irq_unmask, 177 .irq_unmask = pmic_irq_unmask,
232 .set_type = pmic_irq_type, 178 .irq_set_type = pmic_irq_type,
233}; 179};
234 180
235static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) 181static irqreturn_t pmic_irq_handler(int irq, void *data)
236{ 182{
237 struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); 183 struct pmic_gpio *pg = data;
238 u8 intsts = *((u8 *)pg->gpiointr + 4); 184 u8 intsts = *((u8 *)pg->gpiointr + 4);
239 int gpio; 185 int gpio;
186 irqreturn_t ret = IRQ_NONE;
240 187
241 for (gpio = 0; gpio < 8; gpio++) { 188 for (gpio = 0; gpio < 8; gpio++) {
242 if (intsts & (1 << gpio)) { 189 if (intsts & (1 << gpio)) {
243 pr_debug("pmic pin %d triggered\n", gpio); 190 pr_debug("pmic pin %d triggered\n", gpio);
244 generic_handle_irq(pg->irq_base + gpio); 191 generic_handle_irq(pg->irq_base + gpio);
192 ret = IRQ_HANDLED;
245 } 193 }
246 } 194 }
247 195 return ret;
248 if (desc->chip->irq_eoi)
249 desc->chip->irq_eoi(irq_get_irq_data(irq));
250 else
251 dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq);
252} 196}
253 197
254static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) 198static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
297 pg->chip.can_sleep = 1; 241 pg->chip.can_sleep = 1;
298 pg->chip.dev = dev; 242 pg->chip.dev = dev;
299 243
300 INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); 244 mutex_init(&pg->buslock);
301 spin_lock_init(&pg->irqtypes.lock);
302 245
303 pg->chip.dev = dev; 246 pg->chip.dev = dev;
304 retval = gpiochip_add(&pg->chip); 247 retval = gpiochip_add(&pg->chip);
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev)
306 printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); 249 printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__);
307 goto err; 250 goto err;
308 } 251 }
309 set_irq_data(pg->irq, pg); 252
310 set_irq_chained_handler(pg->irq, pmic_irq_handler); 253 retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg);
254 if (retval) {
255 printk(KERN_WARNING "pmic: Interrupt request failed\n");
256 goto err;
257 }
258
311 for (i = 0; i < 8; i++) { 259 for (i = 0; i < 8; i++) {
312 set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, 260 set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip,
313 handle_simple_irq, "demux"); 261 handle_simple_irq, "demux");
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c
index 1fe0f1feff71..865ef78d6f1a 100644
--- a/drivers/platform/x86/tc1100-wmi.c
+++ b/drivers/platform/x86/tc1100-wmi.c
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \
162 return -EINVAL; \ 162 return -EINVAL; \
163 return count; \ 163 return count; \
164} \ 164} \
165static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ 165static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \
166 show_bool_##value, set_bool_##value); 166 show_bool_##value, set_bool_##value);
167 167
168show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); 168show_set_bool(wireless, TC1100_INSTANCE_WIRELESS);
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index dd599585c6a9..eb9922385ef8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode)
2275 if (keycode != KEY_RESERVED) { 2275 if (keycode != KEY_RESERVED) {
2276 mutex_lock(&tpacpi_inputdev_send_mutex); 2276 mutex_lock(&tpacpi_inputdev_send_mutex);
2277 2277
2278 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
2278 input_report_key(tpacpi_inputdev, keycode, 1); 2279 input_report_key(tpacpi_inputdev, keycode, 1);
2279 if (keycode == KEY_UNKNOWN)
2280 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
2281 scancode);
2282 input_sync(tpacpi_inputdev); 2280 input_sync(tpacpi_inputdev);
2283 2281
2282 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode);
2284 input_report_key(tpacpi_inputdev, keycode, 0); 2283 input_report_key(tpacpi_inputdev, keycode, 0);
2285 if (keycode == KEY_UNKNOWN)
2286 input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
2287 scancode);
2288 input_sync(tpacpi_inputdev); 2284 input_sync(tpacpi_inputdev);
2289 2285
2290 mutex_unlock(&tpacpi_inputdev_send_mutex); 2286 mutex_unlock(&tpacpi_inputdev_send_mutex);
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig
index f3a73dd77660..e4c4f3dc0728 100644
--- a/drivers/pps/generators/Kconfig
+++ b/drivers/pps/generators/Kconfig
@@ -6,7 +6,7 @@ comment "PPS generators support"
6 6
7config PPS_GENERATOR_PARPORT 7config PPS_GENERATOR_PARPORT
8 tristate "Parallel port PPS signal generator" 8 tristate "Parallel port PPS signal generator"
9 depends on PARPORT 9 depends on PARPORT && BROKEN
10 help 10 help
11 If you say yes here you get support for a PPS signal generator which 11 If you say yes here you get support for a PPS signal generator which
12 utilizes STROBE pin of a parallel port to send PPS signals. It uses 12 utilizes STROBE pin of a parallel port to send PPS signals. It uses
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43f7519..a4e8eb9fece6 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
168{ 168{
169 unsigned long flags; 169 unsigned long flags;
170 int captured = 0; 170 int captured = 0;
171 struct pps_ktime ts_real; 171 struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
172 172
173 /* check event type */ 173 /* check event type */
174 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); 174 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b41853a877..1269fbd2deca 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
77 77
78 /* Several chips lock up trying to read undefined config space */ 78 /* Several chips lock up trying to read undefined config space */
79 if (capable(CAP_SYS_ADMIN)) 79 if (capable(CAP_SYS_ADMIN))
80 size = 0x200000; 80 size = RIO_MAINT_SPACE_SZ;
81 81
82 if (off > size) 82 if (off >= size)
83 return 0; 83 return 0;
84 if (off + count > size) { 84 if (off + count > size) {
85 size -= off; 85 size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
147 loff_t init_off = off; 147 loff_t init_off = off;
148 u8 *data = (u8 *) buf; 148 u8 *data = (u8 *) buf;
149 149
150 if (off > 0x200000) 150 if (off >= RIO_MAINT_SPACE_SZ)
151 return 0; 151 return 0;
152 if (off + count > 0x200000) { 152 if (off + count > RIO_MAINT_SPACE_SZ) {
153 size = 0x200000 - off; 153 size = RIO_MAINT_SPACE_SZ - off;
154 count = size; 154 count = size;
155 } 155 }
156 156
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
200 .name = "config", 200 .name = "config",
201 .mode = S_IRUGO | S_IWUSR, 201 .mode = S_IRUGO | S_IWUSR,
202 }, 202 },
203 .size = 0x200000, 203 .size = RIO_MAINT_SPACE_SZ,
204 .read = rio_read_config, 204 .read = rio_read_config,
205 .write = rio_write_config, 205 .write = rio_write_config,
206}; 206};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b950d4..2bb5de1f2421 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
174 174
175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); 175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
176 176
177 BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); 177 BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
178 178
179 return mc13xxx_regulators[id].voltages[val]; 179 return mc13xxx_regulators[id].voltages[val];
180} 180}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4bde91..06df898842c0 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
120 return REGULATOR_MODE_IDLE; 120 return REGULATOR_MODE_IDLE;
121 default: 121 default:
122 BUG(); 122 BUG();
123 return -EINVAL;
123 } 124 }
124} 125}
125 126
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index cdd97192dc69..4941cade319f 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -97,6 +97,18 @@ config RTC_INTF_DEV
97 97
98 If unsure, say Y. 98 If unsure, say Y.
99 99
100config RTC_INTF_DEV_UIE_EMUL
101 bool "RTC UIE emulation on dev interface"
102 depends on RTC_INTF_DEV
103 help
104 Provides an emulation for RTC_UIE if the underlying rtc chip
105 driver does not expose RTC_UIE ioctls. Those requests generate
106 once-per-second update interrupts, used for synchronization.
107
108 The emulation code will read the time from the hardware
109 clock several times per second, please enable this option
110 only if you know that you really need it.
111
100config RTC_DRV_TEST 112config RTC_DRV_TEST
101 tristate "Test driver/device" 113 tristate "Test driver/device"
102 help 114 help
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index a0c01967244d..cb2f0728fd70 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled)
209 } 209 }
210 210
211 if (err) 211 if (err)
212 return err; 212 /* nothing */;
213 213 else if (!rtc->ops)
214 if (!rtc->ops)
215 err = -ENODEV; 214 err = -ENODEV;
216 else if (!rtc->ops->alarm_irq_enable) 215 else if (!rtc->ops->alarm_irq_enable)
217 err = -EINVAL; 216 err = -EINVAL;
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
229 if (err) 228 if (err)
230 return err; 229 return err;
231 230
231#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
232 if (enabled == 0 && rtc->uie_irq_active) {
233 mutex_unlock(&rtc->ops_lock);
234 return rtc_dev_update_irq_enable_emul(rtc, 0);
235 }
236#endif
232 /* make sure we're changing state */ 237 /* make sure we're changing state */
233 if (rtc->uie_rtctimer.enabled == enabled) 238 if (rtc->uie_rtctimer.enabled == enabled)
234 goto out; 239 goto out;
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled)
248 253
249out: 254out:
250 mutex_unlock(&rtc->ops_lock); 255 mutex_unlock(&rtc->ops_lock);
256#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
257 /*
258 * Enable emulation if the driver did not provide
259 * the update_irq_enable function pointer or if returned
260 * -EINVAL to signal that it has been configured without
261 * interrupts or that are not available at the moment.
262 */
263 if (err == -EINVAL)
264 err = rtc_dev_update_irq_enable_emul(rtc, enabled);
265#endif
251 return err; 266 return err;
252 267
253} 268}
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable);
263 * 278 *
264 * Triggers the registered irq_task function callback. 279 * Triggers the registered irq_task function callback.
265 */ 280 */
266static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) 281void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode)
267{ 282{
268 unsigned long flags; 283 unsigned long flags;
269 284
diff --git a/drivers/rtc/rtc-at32ap700x.c b/drivers/rtc/rtc-at32ap700x.c
index b2752b6e7a2f..e725d51e773d 100644
--- a/drivers/rtc/rtc-at32ap700x.c
+++ b/drivers/rtc/rtc-at32ap700x.c
@@ -134,36 +134,29 @@ static int at32_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
134 return ret; 134 return ret;
135} 135}
136 136
137static int at32_rtc_ioctl(struct device *dev, unsigned int cmd, 137static int at32_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
138 unsigned long arg)
139{ 138{
140 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev); 139 struct rtc_at32ap700x *rtc = dev_get_drvdata(dev);
141 int ret = 0; 140 int ret = 0;
142 141
143 spin_lock_irq(&rtc->lock); 142 spin_lock_irq(&rtc->lock);
144 143
145 switch (cmd) { 144 if(enabled) {
146 case RTC_AIE_ON:
147 if (rtc_readl(rtc, VAL) > rtc->alarm_time) { 145 if (rtc_readl(rtc, VAL) > rtc->alarm_time) {
148 ret = -EINVAL; 146 ret = -EINVAL;
149 break; 147 goto out;
150 } 148 }
151 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) 149 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
152 | RTC_BIT(CTRL_TOPEN)); 150 | RTC_BIT(CTRL_TOPEN));
153 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); 151 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
154 rtc_writel(rtc, IER, RTC_BIT(IER_TOPI)); 152 rtc_writel(rtc, IER, RTC_BIT(IER_TOPI));
155 break; 153 } else {
156 case RTC_AIE_OFF:
157 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL) 154 rtc_writel(rtc, CTRL, rtc_readl(rtc, CTRL)
158 & ~RTC_BIT(CTRL_TOPEN)); 155 & ~RTC_BIT(CTRL_TOPEN));
159 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI)); 156 rtc_writel(rtc, IDR, RTC_BIT(IDR_TOPI));
160 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI)); 157 rtc_writel(rtc, ICR, RTC_BIT(ICR_TOPI));
161 break;
162 default:
163 ret = -ENOIOCTLCMD;
164 break;
165 } 158 }
166 159out:
167 spin_unlock_irq(&rtc->lock); 160 spin_unlock_irq(&rtc->lock);
168 161
169 return ret; 162 return ret;
@@ -195,11 +188,11 @@ static irqreturn_t at32_rtc_interrupt(int irq, void *dev_id)
195} 188}
196 189
197static struct rtc_class_ops at32_rtc_ops = { 190static struct rtc_class_ops at32_rtc_ops = {
198 .ioctl = at32_rtc_ioctl,
199 .read_time = at32_rtc_readtime, 191 .read_time = at32_rtc_readtime,
200 .set_time = at32_rtc_settime, 192 .set_time = at32_rtc_settime,
201 .read_alarm = at32_rtc_readalarm, 193 .read_alarm = at32_rtc_readalarm,
202 .set_alarm = at32_rtc_setalarm, 194 .set_alarm = at32_rtc_setalarm,
195 .alarm_irq_enable = at32_rtc_alarm_irq_enable,
203}; 196};
204 197
205static int __init at32_rtc_probe(struct platform_device *pdev) 198static int __init at32_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index bc8bbca9a2e2..26d1cf5d19ae 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -195,13 +195,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
195 195
196 /* important: scrub old status before enabling IRQs */ 196 /* important: scrub old status before enabling IRQs */
197 switch (cmd) { 197 switch (cmd) {
198 case RTC_AIE_OFF: /* alarm off */
199 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
200 break;
201 case RTC_AIE_ON: /* alarm on */
202 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
203 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
204 break;
205 case RTC_UIE_OFF: /* update off */ 198 case RTC_UIE_OFF: /* update off */
206 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); 199 at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV);
207 break; 200 break;
@@ -217,6 +210,18 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
217 return ret; 210 return ret;
218} 211}
219 212
213static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
214{
215 pr_debug("%s(): cmd=%08x\n", __func__, enabled);
216
217 if (enabled) {
218 at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
219 at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM);
220 } else
221 at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM);
222
223 return 0;
224}
220/* 225/*
221 * Provide additional RTC information in /proc/driver/rtc 226 * Provide additional RTC information in /proc/driver/rtc
222 */ 227 */
@@ -270,6 +275,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
270 .read_alarm = at91_rtc_readalarm, 275 .read_alarm = at91_rtc_readalarm,
271 .set_alarm = at91_rtc_setalarm, 276 .set_alarm = at91_rtc_setalarm,
272 .proc = at91_rtc_proc, 277 .proc = at91_rtc_proc,
278 .alarm_irq_enable = at91_rtc_alarm_irq_enable,
273}; 279};
274 280
275/* 281/*
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index f677e0710ca1..5469c52cba3d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -229,12 +229,6 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
229 dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr); 229 dev_dbg(dev, "ioctl: cmd=%08x, arg=%08lx, mr %08x\n", cmd, arg, mr);
230 230
231 switch (cmd) { 231 switch (cmd) {
232 case RTC_AIE_OFF: /* alarm off */
233 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
234 break;
235 case RTC_AIE_ON: /* alarm on */
236 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
237 break;
238 case RTC_UIE_OFF: /* update off */ 232 case RTC_UIE_OFF: /* update off */
239 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN); 233 rtt_writel(rtc, MR, mr & ~AT91_RTT_RTTINCIEN);
240 break; 234 break;
@@ -249,6 +243,19 @@ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd,
249 return ret; 243 return ret;
250} 244}
251 245
246static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
247{
248 struct sam9_rtc *rtc = dev_get_drvdata(dev);
249 u32 mr = rtt_readl(rtc, MR);
250
251 dev_dbg(dev, "alarm_irq_enable: enabled=%08x, mr %08x\n", enabled, mr);
252 if (enabled)
253 rtt_writel(rtc, MR, mr | AT91_RTT_ALMIEN);
254 else
255 rtt_writel(rtc, MR, mr & ~AT91_RTT_ALMIEN);
256 return 0;
257}
258
252/* 259/*
253 * Provide additional RTC information in /proc/driver/rtc 260 * Provide additional RTC information in /proc/driver/rtc
254 */ 261 */
@@ -302,6 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
302 .read_alarm = at91_rtc_readalarm, 309 .read_alarm = at91_rtc_readalarm,
303 .set_alarm = at91_rtc_setalarm, 310 .set_alarm = at91_rtc_setalarm,
304 .proc = at91_rtc_proc, 311 .proc = at91_rtc_proc,
312 .alarm_irq_enable = at91_rtc_alarm_irq_enable,
305}; 313};
306 314
307/* 315/*
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c
index b4b6087f2234..17971d93354d 100644
--- a/drivers/rtc/rtc-bfin.c
+++ b/drivers/rtc/rtc-bfin.c
@@ -259,15 +259,6 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
259 bfin_rtc_int_clear(~RTC_ISTAT_SEC); 259 bfin_rtc_int_clear(~RTC_ISTAT_SEC);
260 break; 260 break;
261 261
262 case RTC_AIE_ON:
263 dev_dbg_stamp(dev);
264 bfin_rtc_int_set_alarm(rtc);
265 break;
266 case RTC_AIE_OFF:
267 dev_dbg_stamp(dev);
268 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
269 break;
270
271 default: 262 default:
272 dev_dbg_stamp(dev); 263 dev_dbg_stamp(dev);
273 ret = -ENOIOCTLCMD; 264 ret = -ENOIOCTLCMD;
@@ -276,6 +267,17 @@ static int bfin_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long ar
276 return ret; 267 return ret;
277} 268}
278 269
270static int bfin_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
271{
272 struct bfin_rtc *rtc = dev_get_drvdata(dev);
273
274 dev_dbg_stamp(dev);
275 if (enabled)
276 bfin_rtc_int_set_alarm(rtc);
277 else
278 bfin_rtc_int_clear(~(RTC_ISTAT_ALARM | RTC_ISTAT_ALARM_DAY));
279}
280
279static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm) 281static int bfin_rtc_read_time(struct device *dev, struct rtc_time *tm)
280{ 282{
281 struct bfin_rtc *rtc = dev_get_drvdata(dev); 283 struct bfin_rtc *rtc = dev_get_drvdata(dev);
@@ -362,6 +364,7 @@ static struct rtc_class_ops bfin_rtc_ops = {
362 .read_alarm = bfin_rtc_read_alarm, 364 .read_alarm = bfin_rtc_read_alarm,
363 .set_alarm = bfin_rtc_set_alarm, 365 .set_alarm = bfin_rtc_set_alarm,
364 .proc = bfin_rtc_proc, 366 .proc = bfin_rtc_proc,
367 .alarm_irq_enable = bfin_rtc_alarm_irq_enable,
365}; 368};
366 369
367static int __devinit bfin_rtc_probe(struct platform_device *pdev) 370static int __devinit bfin_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index 212b16edafc0..d0e06edb14c5 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
46 return err; 46 return err;
47} 47}
48 48
49#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
50/*
51 * Routine to poll RTC seconds field for change as often as possible,
52 * after first RTC_UIE use timer to reduce polling
53 */
54static void rtc_uie_task(struct work_struct *work)
55{
56 struct rtc_device *rtc =
57 container_of(work, struct rtc_device, uie_task);
58 struct rtc_time tm;
59 int num = 0;
60 int err;
61
62 err = rtc_read_time(rtc, &tm);
63
64 spin_lock_irq(&rtc->irq_lock);
65 if (rtc->stop_uie_polling || err) {
66 rtc->uie_task_active = 0;
67 } else if (rtc->oldsecs != tm.tm_sec) {
68 num = (tm.tm_sec + 60 - rtc->oldsecs) % 60;
69 rtc->oldsecs = tm.tm_sec;
70 rtc->uie_timer.expires = jiffies + HZ - (HZ/10);
71 rtc->uie_timer_active = 1;
72 rtc->uie_task_active = 0;
73 add_timer(&rtc->uie_timer);
74 } else if (schedule_work(&rtc->uie_task) == 0) {
75 rtc->uie_task_active = 0;
76 }
77 spin_unlock_irq(&rtc->irq_lock);
78 if (num)
79 rtc_handle_legacy_irq(rtc, num, RTC_UF);
80}
81static void rtc_uie_timer(unsigned long data)
82{
83 struct rtc_device *rtc = (struct rtc_device *)data;
84 unsigned long flags;
85
86 spin_lock_irqsave(&rtc->irq_lock, flags);
87 rtc->uie_timer_active = 0;
88 rtc->uie_task_active = 1;
89 if ((schedule_work(&rtc->uie_task) == 0))
90 rtc->uie_task_active = 0;
91 spin_unlock_irqrestore(&rtc->irq_lock, flags);
92}
93
94static int clear_uie(struct rtc_device *rtc)
95{
96 spin_lock_irq(&rtc->irq_lock);
97 if (rtc->uie_irq_active) {
98 rtc->stop_uie_polling = 1;
99 if (rtc->uie_timer_active) {
100 spin_unlock_irq(&rtc->irq_lock);
101 del_timer_sync(&rtc->uie_timer);
102 spin_lock_irq(&rtc->irq_lock);
103 rtc->uie_timer_active = 0;
104 }
105 if (rtc->uie_task_active) {
106 spin_unlock_irq(&rtc->irq_lock);
107 flush_scheduled_work();
108 spin_lock_irq(&rtc->irq_lock);
109 }
110 rtc->uie_irq_active = 0;
111 }
112 spin_unlock_irq(&rtc->irq_lock);
113 return 0;
114}
115
116static int set_uie(struct rtc_device *rtc)
117{
118 struct rtc_time tm;
119 int err;
120
121 err = rtc_read_time(rtc, &tm);
122 if (err)
123 return err;
124 spin_lock_irq(&rtc->irq_lock);
125 if (!rtc->uie_irq_active) {
126 rtc->uie_irq_active = 1;
127 rtc->stop_uie_polling = 0;
128 rtc->oldsecs = tm.tm_sec;
129 rtc->uie_task_active = 1;
130 if (schedule_work(&rtc->uie_task) == 0)
131 rtc->uie_task_active = 0;
132 }
133 rtc->irq_data = 0;
134 spin_unlock_irq(&rtc->irq_lock);
135 return 0;
136}
137
138int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled)
139{
140 if (enabled)
141 return set_uie(rtc);
142 else
143 return clear_uie(rtc);
144}
145EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul);
146
147#endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */
49 148
50static ssize_t 149static ssize_t
51rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) 150rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
@@ -154,19 +253,7 @@ static long rtc_dev_ioctl(struct file *file,
154 if (err) 253 if (err)
155 goto done; 254 goto done;
156 255
157 /* try the driver's ioctl interface */ 256 /*
158 if (ops->ioctl) {
159 err = ops->ioctl(rtc->dev.parent, cmd, arg);
160 if (err != -ENOIOCTLCMD) {
161 mutex_unlock(&rtc->ops_lock);
162 return err;
163 }
164 }
165
166 /* if the driver does not provide the ioctl interface
167 * or if that particular ioctl was not implemented
168 * (-ENOIOCTLCMD), we will try to emulate here.
169 *
170 * Drivers *SHOULD NOT* provide ioctl implementations 257 * Drivers *SHOULD NOT* provide ioctl implementations
171 * for these requests. Instead, provide methods to 258 * for these requests. Instead, provide methods to
172 * support the following code, so that the RTC's main 259 * support the following code, so that the RTC's main
@@ -329,7 +416,12 @@ static long rtc_dev_ioctl(struct file *file,
329 return err; 416 return err;
330 417
331 default: 418 default:
332 err = -ENOTTY; 419 /* Finally try the driver's ioctl interface */
420 if (ops->ioctl) {
421 err = ops->ioctl(rtc->dev.parent, cmd, arg);
422 if (err == -ENOIOCTLCMD)
423 err = -ENOTTY;
424 }
333 break; 425 break;
334 } 426 }
335 427
@@ -394,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc)
394 486
395 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); 487 rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id);
396 488
489#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
490 INIT_WORK(&rtc->uie_task, rtc_uie_task);
491 setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
492#endif
493
397 cdev_init(&rtc->char_dev, &rtc_dev_fops); 494 cdev_init(&rtc->char_dev, &rtc_dev_fops);
398 rtc->char_dev.owner = rtc->owner; 495 rtc->char_dev.owner = rtc->owner;
399} 496}
diff --git a/drivers/rtc/rtc-ds1286.c b/drivers/rtc/rtc-ds1286.c
index bf430f9091ed..60ce69600828 100644
--- a/drivers/rtc/rtc-ds1286.c
+++ b/drivers/rtc/rtc-ds1286.c
@@ -40,6 +40,26 @@ static inline void ds1286_rtc_write(struct ds1286_priv *priv, u8 data, int reg)
40 __raw_writel(data, &priv->rtcregs[reg]); 40 __raw_writel(data, &priv->rtcregs[reg]);
41} 41}
42 42
43
44static int ds1286_alarm_irq_enable(struct device *dev, unsigned int enabled)
45{
46 struct ds1286_priv *priv = dev_get_drvdata(dev);
47 unsigned long flags;
48 unsigned char val;
49
50 /* Allow or mask alarm interrupts */
51 spin_lock_irqsave(&priv->lock, flags);
52 val = ds1286_rtc_read(priv, RTC_CMD);
53 if (enabled)
54 val &= ~RTC_TDM;
55 else
56 val |= RTC_TDM;
57 ds1286_rtc_write(priv, val, RTC_CMD);
58 spin_unlock_irqrestore(&priv->lock, flags);
59
60 return 0;
61}
62
43#ifdef CONFIG_RTC_INTF_DEV 63#ifdef CONFIG_RTC_INTF_DEV
44 64
45static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 65static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
@@ -49,22 +69,6 @@ static int ds1286_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
49 unsigned char val; 69 unsigned char val;
50 70
51 switch (cmd) { 71 switch (cmd) {
52 case RTC_AIE_OFF:
53 /* Mask alarm int. enab. bit */
54 spin_lock_irqsave(&priv->lock, flags);
55 val = ds1286_rtc_read(priv, RTC_CMD);
56 val |= RTC_TDM;
57 ds1286_rtc_write(priv, val, RTC_CMD);
58 spin_unlock_irqrestore(&priv->lock, flags);
59 break;
60 case RTC_AIE_ON:
61 /* Allow alarm interrupts. */
62 spin_lock_irqsave(&priv->lock, flags);
63 val = ds1286_rtc_read(priv, RTC_CMD);
64 val &= ~RTC_TDM;
65 ds1286_rtc_write(priv, val, RTC_CMD);
66 spin_unlock_irqrestore(&priv->lock, flags);
67 break;
68 case RTC_WIE_OFF: 72 case RTC_WIE_OFF:
69 /* Mask watchdog int. enab. bit */ 73 /* Mask watchdog int. enab. bit */
70 spin_lock_irqsave(&priv->lock, flags); 74 spin_lock_irqsave(&priv->lock, flags);
@@ -316,12 +320,13 @@ static int ds1286_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
316} 320}
317 321
318static const struct rtc_class_ops ds1286_ops = { 322static const struct rtc_class_ops ds1286_ops = {
319 .ioctl = ds1286_ioctl, 323 .ioctl = ds1286_ioctl,
320 .proc = ds1286_proc, 324 .proc = ds1286_proc,
321 .read_time = ds1286_read_time, 325 .read_time = ds1286_read_time,
322 .set_time = ds1286_set_time, 326 .set_time = ds1286_set_time,
323 .read_alarm = ds1286_read_alarm, 327 .read_alarm = ds1286_read_alarm,
324 .set_alarm = ds1286_set_alarm, 328 .set_alarm = ds1286_set_alarm,
329 .alarm_irq_enable = ds1286_alarm_irq_enable,
325}; 330};
326 331
327static int __devinit ds1286_probe(struct platform_device *pdev) 332static int __devinit ds1286_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-ds1305.c b/drivers/rtc/rtc-ds1305.c
index 077af1d7b9e4..57fbcc149ba7 100644
--- a/drivers/rtc/rtc-ds1305.c
+++ b/drivers/rtc/rtc-ds1305.c
@@ -139,49 +139,32 @@ static u8 hour2bcd(bool hr12, int hour)
139 * Interface to RTC framework 139 * Interface to RTC framework
140 */ 140 */
141 141
142#ifdef CONFIG_RTC_INTF_DEV 142static int ds1305_alarm_irq_enable(struct device *dev, unsigned int enabled)
143
144/*
145 * Context: caller holds rtc->ops_lock (to protect ds1305->ctrl)
146 */
147static int ds1305_ioctl(struct device *dev, unsigned cmd, unsigned long arg)
148{ 143{
149 struct ds1305 *ds1305 = dev_get_drvdata(dev); 144 struct ds1305 *ds1305 = dev_get_drvdata(dev);
150 u8 buf[2]; 145 u8 buf[2];
151 int status = -ENOIOCTLCMD; 146 long err = -EINVAL;
152 147
153 buf[0] = DS1305_WRITE | DS1305_CONTROL; 148 buf[0] = DS1305_WRITE | DS1305_CONTROL;
154 buf[1] = ds1305->ctrl[0]; 149 buf[1] = ds1305->ctrl[0];
155 150
156 switch (cmd) { 151 if (enabled) {
157 case RTC_AIE_OFF:
158 status = 0;
159 if (!(buf[1] & DS1305_AEI0))
160 goto done;
161 buf[1] &= ~DS1305_AEI0;
162 break;
163
164 case RTC_AIE_ON:
165 status = 0;
166 if (ds1305->ctrl[0] & DS1305_AEI0) 152 if (ds1305->ctrl[0] & DS1305_AEI0)
167 goto done; 153 goto done;
168 buf[1] |= DS1305_AEI0; 154 buf[1] |= DS1305_AEI0;
169 break; 155 } else {
170 } 156 if (!(buf[1] & DS1305_AEI0))
171 if (status == 0) { 157 goto done;
172 status = spi_write_then_read(ds1305->spi, buf, sizeof buf, 158 buf[1] &= ~DS1305_AEI0;
173 NULL, 0);
174 if (status >= 0)
175 ds1305->ctrl[0] = buf[1];
176 } 159 }
177 160 err = spi_write_then_read(ds1305->spi, buf, sizeof buf, NULL, 0);
161 if (err >= 0)
162 ds1305->ctrl[0] = buf[1];
178done: 163done:
179 return status; 164 return err;
165
180} 166}
181 167
182#else
183#define ds1305_ioctl NULL
184#endif
185 168
186/* 169/*
187 * Get/set of date and time is pretty normal. 170 * Get/set of date and time is pretty normal.
@@ -460,12 +443,12 @@ done:
460#endif 443#endif
461 444
462static const struct rtc_class_ops ds1305_ops = { 445static const struct rtc_class_ops ds1305_ops = {
463 .ioctl = ds1305_ioctl,
464 .read_time = ds1305_get_time, 446 .read_time = ds1305_get_time,
465 .set_time = ds1305_set_time, 447 .set_time = ds1305_set_time,
466 .read_alarm = ds1305_get_alarm, 448 .read_alarm = ds1305_get_alarm,
467 .set_alarm = ds1305_set_alarm, 449 .set_alarm = ds1305_set_alarm,
468 .proc = ds1305_proc, 450 .proc = ds1305_proc,
451 .alarm_irq_enable = ds1305_alarm_irq_enable,
469}; 452};
470 453
471static void ds1305_work(struct work_struct *work) 454static void ds1305_work(struct work_struct *work)
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index 0d559b6416dd..4724ba3acf1a 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -495,50 +495,27 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t)
495 return 0; 495 return 0;
496} 496}
497 497
498static int ds1307_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 498static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled)
499{ 499{
500 struct i2c_client *client = to_i2c_client(dev); 500 struct i2c_client *client = to_i2c_client(dev);
501 struct ds1307 *ds1307 = i2c_get_clientdata(client); 501 struct ds1307 *ds1307 = i2c_get_clientdata(client);
502 int ret; 502 int ret;
503 503
504 switch (cmd) { 504 if (!test_bit(HAS_ALARM, &ds1307->flags))
505 case RTC_AIE_OFF: 505 return -ENOTTY;
506 if (!test_bit(HAS_ALARM, &ds1307->flags))
507 return -ENOTTY;
508
509 ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
510 if (ret < 0)
511 return ret;
512
513 ret &= ~DS1337_BIT_A1IE;
514
515 ret = i2c_smbus_write_byte_data(client,
516 DS1337_REG_CONTROL, ret);
517 if (ret < 0)
518 return ret;
519
520 break;
521
522 case RTC_AIE_ON:
523 if (!test_bit(HAS_ALARM, &ds1307->flags))
524 return -ENOTTY;
525 506
526 ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); 507 ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL);
527 if (ret < 0) 508 if (ret < 0)
528 return ret; 509 return ret;
529 510
511 if (enabled)
530 ret |= DS1337_BIT_A1IE; 512 ret |= DS1337_BIT_A1IE;
513 else
514 ret &= ~DS1337_BIT_A1IE;
531 515
532 ret = i2c_smbus_write_byte_data(client, 516 ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret);
533 DS1337_REG_CONTROL, ret); 517 if (ret < 0)
534 if (ret < 0) 518 return ret;
535 return ret;
536
537 break;
538
539 default:
540 return -ENOIOCTLCMD;
541 }
542 519
543 return 0; 520 return 0;
544} 521}
@@ -548,7 +525,7 @@ static const struct rtc_class_ops ds13xx_rtc_ops = {
548 .set_time = ds1307_set_time, 525 .set_time = ds1307_set_time,
549 .read_alarm = ds1337_read_alarm, 526 .read_alarm = ds1337_read_alarm,
550 .set_alarm = ds1337_set_alarm, 527 .set_alarm = ds1337_set_alarm,
551 .ioctl = ds1307_ioctl, 528 .alarm_irq_enable = ds1307_alarm_irq_enable,
552}; 529};
553 530
554/*----------------------------------------------------------------------*/ 531/*----------------------------------------------------------------------*/
diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
index 47fb6357c346..d834a63ec4b0 100644
--- a/drivers/rtc/rtc-ds1374.c
+++ b/drivers/rtc/rtc-ds1374.c
@@ -307,42 +307,25 @@ unlock:
307 mutex_unlock(&ds1374->mutex); 307 mutex_unlock(&ds1374->mutex);
308} 308}
309 309
310static int ds1374_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 310static int ds1374_alarm_irq_enable(struct device *dev, unsigned int enabled)
311{ 311{
312 struct i2c_client *client = to_i2c_client(dev); 312 struct i2c_client *client = to_i2c_client(dev);
313 struct ds1374 *ds1374 = i2c_get_clientdata(client); 313 struct ds1374 *ds1374 = i2c_get_clientdata(client);
314 int ret = -ENOIOCTLCMD; 314 int ret;
315 315
316 mutex_lock(&ds1374->mutex); 316 mutex_lock(&ds1374->mutex);
317 317
318 switch (cmd) { 318 ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
319 case RTC_AIE_OFF: 319 if (ret < 0)
320 ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR); 320 goto out;
321 if (ret < 0)
322 goto out;
323
324 ret &= ~DS1374_REG_CR_WACE;
325
326 ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
327 if (ret < 0)
328 goto out;
329
330 break;
331
332 case RTC_AIE_ON:
333 ret = i2c_smbus_read_byte_data(client, DS1374_REG_CR);
334 if (ret < 0)
335 goto out;
336 321
322 if (enabled) {
337 ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE; 323 ret |= DS1374_REG_CR_WACE | DS1374_REG_CR_AIE;
338 ret &= ~DS1374_REG_CR_WDALM; 324 ret &= ~DS1374_REG_CR_WDALM;
339 325 } else {
340 ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret); 326 ret &= ~DS1374_REG_CR_WACE;
341 if (ret < 0)
342 goto out;
343
344 break;
345 } 327 }
328 ret = i2c_smbus_write_byte_data(client, DS1374_REG_CR, ret);
346 329
347out: 330out:
348 mutex_unlock(&ds1374->mutex); 331 mutex_unlock(&ds1374->mutex);
@@ -354,7 +337,7 @@ static const struct rtc_class_ops ds1374_rtc_ops = {
354 .set_time = ds1374_set_time, 337 .set_time = ds1374_set_time,
355 .read_alarm = ds1374_read_alarm, 338 .read_alarm = ds1374_read_alarm,
356 .set_alarm = ds1374_set_alarm, 339 .set_alarm = ds1374_set_alarm,
357 .ioctl = ds1374_ioctl, 340 .alarm_irq_enable = ds1374_alarm_irq_enable,
358}; 341};
359 342
360static int ds1374_probe(struct i2c_client *client, 343static int ds1374_probe(struct i2c_client *client,
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee19764c..950735415a7c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C 2 * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
3 * 3 *
4 * Copyright (C) 2009-2010 Freescale Semiconductor. 4 * Copyright (C) 2009-2011 Freescale Semiconductor.
5 * Author: Jack Lan <jack.lan@freescale.com> 5 * Author: Jack Lan <jack.lan@freescale.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
141 time->tm_hour = bcd2bin(hour); 141 time->tm_hour = bcd2bin(hour);
142 } 142 }
143 143
144 time->tm_wday = bcd2bin(week); 144 /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
145 time->tm_wday = bcd2bin(week) - 1;
145 time->tm_mday = bcd2bin(day); 146 time->tm_mday = bcd2bin(day);
146 time->tm_mon = bcd2bin(month & 0x7F); 147 /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
148 time->tm_mon = bcd2bin(month & 0x7F) - 1;
147 if (century) 149 if (century)
148 add_century = 100; 150 add_century = 100;
149 151
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
162 buf[0] = bin2bcd(time->tm_sec); 164 buf[0] = bin2bcd(time->tm_sec);
163 buf[1] = bin2bcd(time->tm_min); 165 buf[1] = bin2bcd(time->tm_min);
164 buf[2] = bin2bcd(time->tm_hour); 166 buf[2] = bin2bcd(time->tm_hour);
165 buf[3] = bin2bcd(time->tm_wday); /* Day of the week */ 167 /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
168 buf[3] = bin2bcd(time->tm_wday + 1);
166 buf[4] = bin2bcd(time->tm_mday); /* Date */ 169 buf[4] = bin2bcd(time->tm_mday); /* Date */
167 buf[5] = bin2bcd(time->tm_mon); 170 /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
171 buf[5] = bin2bcd(time->tm_mon + 1);
168 if (time->tm_year >= 100) { 172 if (time->tm_year >= 100) {
169 buf[5] |= 0x80; 173 buf[5] |= 0x80;
170 buf[6] = bin2bcd(time->tm_year - 100); 174 buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c
index 5a8daa358066..69fe664a2228 100644
--- a/drivers/rtc/rtc-m41t80.c
+++ b/drivers/rtc/rtc-m41t80.c
@@ -213,41 +213,27 @@ static int m41t80_rtc_set_time(struct device *dev, struct rtc_time *tm)
213 return m41t80_set_datetime(to_i2c_client(dev), tm); 213 return m41t80_set_datetime(to_i2c_client(dev), tm);
214} 214}
215 215
216#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE) 216static int m41t80_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
217static int
218m41t80_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
219{ 217{
220 struct i2c_client *client = to_i2c_client(dev); 218 struct i2c_client *client = to_i2c_client(dev);
221 int rc; 219 int rc;
222 220
223 switch (cmd) {
224 case RTC_AIE_OFF:
225 case RTC_AIE_ON:
226 break;
227 default:
228 return -ENOIOCTLCMD;
229 }
230
231 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); 221 rc = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON);
232 if (rc < 0) 222 if (rc < 0)
233 goto err; 223 goto err;
234 switch (cmd) { 224
235 case RTC_AIE_OFF: 225 if (enabled)
236 rc &= ~M41T80_ALMON_AFE;
237 break;
238 case RTC_AIE_ON:
239 rc |= M41T80_ALMON_AFE; 226 rc |= M41T80_ALMON_AFE;
240 break; 227 else
241 } 228 rc &= ~M41T80_ALMON_AFE;
229
242 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0) 230 if (i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, rc) < 0)
243 goto err; 231 goto err;
232
244 return 0; 233 return 0;
245err: 234err:
246 return -EIO; 235 return -EIO;
247} 236}
248#else
249#define m41t80_rtc_ioctl NULL
250#endif
251 237
252static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) 238static int m41t80_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t)
253{ 239{
@@ -374,7 +360,7 @@ static struct rtc_class_ops m41t80_rtc_ops = {
374 .read_alarm = m41t80_rtc_read_alarm, 360 .read_alarm = m41t80_rtc_read_alarm,
375 .set_alarm = m41t80_rtc_set_alarm, 361 .set_alarm = m41t80_rtc_set_alarm,
376 .proc = m41t80_rtc_proc, 362 .proc = m41t80_rtc_proc,
377 .ioctl = m41t80_rtc_ioctl, 363 .alarm_irq_enable = m41t80_rtc_alarm_irq_enable,
378}; 364};
379 365
380#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) 366#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
index a99a0b554eb8..3978f4caf724 100644
--- a/drivers/rtc/rtc-m48t59.c
+++ b/drivers/rtc/rtc-m48t59.c
@@ -263,30 +263,21 @@ static int m48t59_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
263/* 263/*
264 * Handle commands from user-space 264 * Handle commands from user-space
265 */ 265 */
266static int m48t59_rtc_ioctl(struct device *dev, unsigned int cmd, 266static int m48t59_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
267 unsigned long arg)
268{ 267{
269 struct platform_device *pdev = to_platform_device(dev); 268 struct platform_device *pdev = to_platform_device(dev);
270 struct m48t59_plat_data *pdata = pdev->dev.platform_data; 269 struct m48t59_plat_data *pdata = pdev->dev.platform_data;
271 struct m48t59_private *m48t59 = platform_get_drvdata(pdev); 270 struct m48t59_private *m48t59 = platform_get_drvdata(pdev);
272 unsigned long flags; 271 unsigned long flags;
273 int ret = 0;
274 272
275 spin_lock_irqsave(&m48t59->lock, flags); 273 spin_lock_irqsave(&m48t59->lock, flags);
276 switch (cmd) { 274 if (enabled)
277 case RTC_AIE_OFF: /* alarm interrupt off */
278 M48T59_WRITE(0x00, M48T59_INTR);
279 break;
280 case RTC_AIE_ON: /* alarm interrupt on */
281 M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR); 275 M48T59_WRITE(M48T59_INTR_AFE, M48T59_INTR);
282 break; 276 else
283 default: 277 M48T59_WRITE(0x00, M48T59_INTR);
284 ret = -ENOIOCTLCMD;
285 break;
286 }
287 spin_unlock_irqrestore(&m48t59->lock, flags); 278 spin_unlock_irqrestore(&m48t59->lock, flags);
288 279
289 return ret; 280 return 0;
290} 281}
291 282
292static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq) 283static int m48t59_rtc_proc(struct device *dev, struct seq_file *seq)
@@ -330,12 +321,12 @@ static irqreturn_t m48t59_rtc_interrupt(int irq, void *dev_id)
330} 321}
331 322
332static const struct rtc_class_ops m48t59_rtc_ops = { 323static const struct rtc_class_ops m48t59_rtc_ops = {
333 .ioctl = m48t59_rtc_ioctl,
334 .read_time = m48t59_rtc_read_time, 324 .read_time = m48t59_rtc_read_time,
335 .set_time = m48t59_rtc_set_time, 325 .set_time = m48t59_rtc_set_time,
336 .read_alarm = m48t59_rtc_readalarm, 326 .read_alarm = m48t59_rtc_readalarm,
337 .set_alarm = m48t59_rtc_setalarm, 327 .set_alarm = m48t59_rtc_setalarm,
338 .proc = m48t59_rtc_proc, 328 .proc = m48t59_rtc_proc,
329 .alarm_irq_enable = m48t59_rtc_alarm_irq_enable,
339}; 330};
340 331
341static const struct rtc_class_ops m48t02_rtc_ops = { 332static const struct rtc_class_ops m48t02_rtc_ops = {
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index bcd0cf63eb16..1db62db8469d 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -255,42 +255,21 @@ static int mrst_irq_set_state(struct device *dev, int enabled)
255 return 0; 255 return 0;
256} 256}
257 257
258#if defined(CONFIG_RTC_INTF_DEV) || defined(CONFIG_RTC_INTF_DEV_MODULE)
259
260/* Currently, the vRTC doesn't support UIE ON/OFF */ 258/* Currently, the vRTC doesn't support UIE ON/OFF */
261static int 259static int mrst_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
262mrst_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
263{ 260{
264 struct mrst_rtc *mrst = dev_get_drvdata(dev); 261 struct mrst_rtc *mrst = dev_get_drvdata(dev);
265 unsigned long flags; 262 unsigned long flags;
266 263
267 switch (cmd) {
268 case RTC_AIE_OFF:
269 case RTC_AIE_ON:
270 if (!mrst->irq)
271 return -EINVAL;
272 break;
273 default:
274 /* PIE ON/OFF is handled by mrst_irq_set_state() */
275 return -ENOIOCTLCMD;
276 }
277
278 spin_lock_irqsave(&rtc_lock, flags); 264 spin_lock_irqsave(&rtc_lock, flags);
279 switch (cmd) { 265 if (enabled)
280 case RTC_AIE_OFF: /* alarm off */
281 mrst_irq_disable(mrst, RTC_AIE);
282 break;
283 case RTC_AIE_ON: /* alarm on */
284 mrst_irq_enable(mrst, RTC_AIE); 266 mrst_irq_enable(mrst, RTC_AIE);
285 break; 267 else
286 } 268 mrst_irq_disable(mrst, RTC_AIE);
287 spin_unlock_irqrestore(&rtc_lock, flags); 269 spin_unlock_irqrestore(&rtc_lock, flags);
288 return 0; 270 return 0;
289} 271}
290 272
291#else
292#define mrst_rtc_ioctl NULL
293#endif
294 273
295#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE) 274#if defined(CONFIG_RTC_INTF_PROC) || defined(CONFIG_RTC_INTF_PROC_MODULE)
296 275
@@ -317,13 +296,13 @@ static int mrst_procfs(struct device *dev, struct seq_file *seq)
317#endif 296#endif
318 297
319static const struct rtc_class_ops mrst_rtc_ops = { 298static const struct rtc_class_ops mrst_rtc_ops = {
320 .ioctl = mrst_rtc_ioctl,
321 .read_time = mrst_read_time, 299 .read_time = mrst_read_time,
322 .set_time = mrst_set_time, 300 .set_time = mrst_set_time,
323 .read_alarm = mrst_read_alarm, 301 .read_alarm = mrst_read_alarm,
324 .set_alarm = mrst_set_alarm, 302 .set_alarm = mrst_set_alarm,
325 .proc = mrst_procfs, 303 .proc = mrst_procfs,
326 .irq_set_state = mrst_irq_set_state, 304 .irq_set_state = mrst_irq_set_state,
305 .alarm_irq_enable = mrst_rtc_alarm_irq_enable,
327}; 306};
328 307
329static struct mrst_rtc mrst_rtc; 308static struct mrst_rtc mrst_rtc;
diff --git a/drivers/rtc/rtc-msm6242.c b/drivers/rtc/rtc-msm6242.c
index b2fff0ca49f8..67820626e18f 100644
--- a/drivers/rtc/rtc-msm6242.c
+++ b/drivers/rtc/rtc-msm6242.c
@@ -82,7 +82,7 @@ static inline unsigned int msm6242_read(struct msm6242_priv *priv,
82static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val, 82static inline void msm6242_write(struct msm6242_priv *priv, unsigned int val,
83 unsigned int reg) 83 unsigned int reg)
84{ 84{
85 return __raw_writel(val, &priv->regs[reg]); 85 __raw_writel(val, &priv->regs[reg]);
86} 86}
87 87
88static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val, 88static inline void msm6242_set(struct msm6242_priv *priv, unsigned int val,
diff --git a/drivers/rtc/rtc-mv.c b/drivers/rtc/rtc-mv.c
index bcca47298554..60627a764514 100644
--- a/drivers/rtc/rtc-mv.c
+++ b/drivers/rtc/rtc-mv.c
@@ -169,25 +169,19 @@ static int mv_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
169 return 0; 169 return 0;
170} 170}
171 171
172static int mv_rtc_ioctl(struct device *dev, unsigned int cmd, 172static int mv_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
173 unsigned long arg)
174{ 173{
175 struct platform_device *pdev = to_platform_device(dev); 174 struct platform_device *pdev = to_platform_device(dev);
176 struct rtc_plat_data *pdata = platform_get_drvdata(pdev); 175 struct rtc_plat_data *pdata = platform_get_drvdata(pdev);
177 void __iomem *ioaddr = pdata->ioaddr; 176 void __iomem *ioaddr = pdata->ioaddr;
178 177
179 if (pdata->irq < 0) 178 if (pdata->irq < 0)
180 return -ENOIOCTLCMD; /* fall back into rtc-dev's emulation */ 179 return -EINVAL; /* fall back into rtc-dev's emulation */
181 switch (cmd) { 180
182 case RTC_AIE_OFF: 181 if (enabled)
183 writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
184 break;
185 case RTC_AIE_ON:
186 writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS); 182 writel(1, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
187 break; 183 else
188 default: 184 writel(0, ioaddr + RTC_ALARM_INTERRUPT_MASK_REG_OFFS);
189 return -ENOIOCTLCMD;
190 }
191 return 0; 185 return 0;
192} 186}
193 187
@@ -216,7 +210,7 @@ static const struct rtc_class_ops mv_rtc_alarm_ops = {
216 .set_time = mv_rtc_set_time, 210 .set_time = mv_rtc_set_time,
217 .read_alarm = mv_rtc_read_alarm, 211 .read_alarm = mv_rtc_read_alarm,
218 .set_alarm = mv_rtc_set_alarm, 212 .set_alarm = mv_rtc_set_alarm,
219 .ioctl = mv_rtc_ioctl, 213 .alarm_irq_enable = mv_rtc_alarm_irq_enable,
220}; 214};
221 215
222static int __devinit mv_rtc_probe(struct platform_device *pdev) 216static int __devinit mv_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index e72b523c79a5..b4dbf3a319b3 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -143,8 +143,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
143 u8 reg; 143 u8 reg;
144 144
145 switch (cmd) { 145 switch (cmd) {
146 case RTC_AIE_OFF:
147 case RTC_AIE_ON:
148 case RTC_UIE_OFF: 146 case RTC_UIE_OFF:
149 case RTC_UIE_ON: 147 case RTC_UIE_ON:
150 break; 148 break;
@@ -156,13 +154,6 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
156 rtc_wait_not_busy(); 154 rtc_wait_not_busy();
157 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG); 155 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
158 switch (cmd) { 156 switch (cmd) {
159 /* AIE = Alarm Interrupt Enable */
160 case RTC_AIE_OFF:
161 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
162 break;
163 case RTC_AIE_ON:
164 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
165 break;
166 /* UIE = Update Interrupt Enable (1/second) */ 157 /* UIE = Update Interrupt Enable (1/second) */
167 case RTC_UIE_OFF: 158 case RTC_UIE_OFF:
168 reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER; 159 reg &= ~OMAP_RTC_INTERRUPTS_IT_TIMER;
@@ -182,6 +173,24 @@ omap_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
182#define omap_rtc_ioctl NULL 173#define omap_rtc_ioctl NULL
183#endif 174#endif
184 175
176static int omap_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
177{
178 u8 reg;
179
180 local_irq_disable();
181 rtc_wait_not_busy();
182 reg = rtc_read(OMAP_RTC_INTERRUPTS_REG);
183 if (enabled)
184 reg |= OMAP_RTC_INTERRUPTS_IT_ALARM;
185 else
186 reg &= ~OMAP_RTC_INTERRUPTS_IT_ALARM;
187 rtc_wait_not_busy();
188 rtc_write(reg, OMAP_RTC_INTERRUPTS_REG);
189 local_irq_enable();
190
191 return 0;
192}
193
185/* this hardware doesn't support "don't care" alarm fields */ 194/* this hardware doesn't support "don't care" alarm fields */
186static int tm2bcd(struct rtc_time *tm) 195static int tm2bcd(struct rtc_time *tm)
187{ 196{
@@ -309,6 +318,7 @@ static struct rtc_class_ops omap_rtc_ops = {
309 .set_time = omap_rtc_set_time, 318 .set_time = omap_rtc_set_time,
310 .read_alarm = omap_rtc_read_alarm, 319 .read_alarm = omap_rtc_read_alarm,
311 .set_alarm = omap_rtc_set_alarm, 320 .set_alarm = omap_rtc_set_alarm,
321 .alarm_irq_enable = omap_rtc_alarm_irq_enable,
312}; 322};
313 323
314static int omap_rtc_alarm; 324static int omap_rtc_alarm;
diff --git a/drivers/rtc/rtc-rp5c01.c b/drivers/rtc/rtc-rp5c01.c
index 36eb66184461..694da39b6dd2 100644
--- a/drivers/rtc/rtc-rp5c01.c
+++ b/drivers/rtc/rtc-rp5c01.c
@@ -76,7 +76,7 @@ static inline unsigned int rp5c01_read(struct rp5c01_priv *priv,
76static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val, 76static inline void rp5c01_write(struct rp5c01_priv *priv, unsigned int val,
77 unsigned int reg) 77 unsigned int reg)
78{ 78{
79 return __raw_writel(val, &priv->regs[reg]); 79 __raw_writel(val, &priv->regs[reg]);
80} 80}
81 81
82static void rp5c01_lock(struct rp5c01_priv *priv) 82static void rp5c01_lock(struct rp5c01_priv *priv)
diff --git a/drivers/rtc/rtc-rs5c372.c b/drivers/rtc/rtc-rs5c372.c
index dd14e202c2c8..6aaa1550e3b1 100644
--- a/drivers/rtc/rtc-rs5c372.c
+++ b/drivers/rtc/rtc-rs5c372.c
@@ -299,14 +299,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
299 if (rs5c->type == rtc_rs5c372a 299 if (rs5c->type == rtc_rs5c372a
300 && (buf & RS5C372A_CTRL1_SL1)) 300 && (buf & RS5C372A_CTRL1_SL1))
301 return -ENOIOCTLCMD; 301 return -ENOIOCTLCMD;
302 case RTC_AIE_OFF:
303 case RTC_AIE_ON:
304 /* these irq management calls only make sense for chips
305 * which are wired up to an IRQ.
306 */
307 if (!rs5c->has_irq)
308 return -ENOIOCTLCMD;
309 break;
310 default: 302 default:
311 return -ENOIOCTLCMD; 303 return -ENOIOCTLCMD;
312 } 304 }
@@ -317,12 +309,6 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
317 309
318 addr = RS5C_ADDR(RS5C_REG_CTRL1); 310 addr = RS5C_ADDR(RS5C_REG_CTRL1);
319 switch (cmd) { 311 switch (cmd) {
320 case RTC_AIE_OFF: /* alarm off */
321 buf &= ~RS5C_CTRL1_AALE;
322 break;
323 case RTC_AIE_ON: /* alarm on */
324 buf |= RS5C_CTRL1_AALE;
325 break;
326 case RTC_UIE_OFF: /* update off */ 312 case RTC_UIE_OFF: /* update off */
327 buf &= ~RS5C_CTRL1_CT_MASK; 313 buf &= ~RS5C_CTRL1_CT_MASK;
328 break; 314 break;
@@ -347,6 +333,39 @@ rs5c_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
347#endif 333#endif
348 334
349 335
336static int rs5c_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
337{
338 struct i2c_client *client = to_i2c_client(dev);
339 struct rs5c372 *rs5c = i2c_get_clientdata(client);
340 unsigned char buf;
341 int status, addr;
342
343 buf = rs5c->regs[RS5C_REG_CTRL1];
344
345 if (!rs5c->has_irq)
346 return -EINVAL;
347
348 status = rs5c_get_regs(rs5c);
349 if (status < 0)
350 return status;
351
352 addr = RS5C_ADDR(RS5C_REG_CTRL1);
353 if (enabled)
354 buf |= RS5C_CTRL1_AALE;
355 else
356 buf &= ~RS5C_CTRL1_AALE;
357
358 if (i2c_smbus_write_byte_data(client, addr, buf) < 0) {
359 printk(KERN_WARNING "%s: can't update alarm\n",
360 rs5c->rtc->name);
361 status = -EIO;
362 } else
363 rs5c->regs[RS5C_REG_CTRL1] = buf;
364
365 return status;
366}
367
368
350/* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI, 369/* NOTE: Since RTC_WKALM_{RD,SET} were originally defined for EFI,
351 * which only exposes a polled programming interface; and since 370 * which only exposes a polled programming interface; and since
352 * these calls map directly to those EFI requests; we don't demand 371 * these calls map directly to those EFI requests; we don't demand
@@ -466,6 +485,7 @@ static const struct rtc_class_ops rs5c372_rtc_ops = {
466 .set_time = rs5c372_rtc_set_time, 485 .set_time = rs5c372_rtc_set_time,
467 .read_alarm = rs5c_read_alarm, 486 .read_alarm = rs5c_read_alarm,
468 .set_alarm = rs5c_set_alarm, 487 .set_alarm = rs5c_set_alarm,
488 .alarm_irq_enable = rs5c_rtc_alarm_irq_enable,
469}; 489};
470 490
471#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE) 491#if defined(CONFIG_RTC_INTF_SYSFS) || defined(CONFIG_RTC_INTF_SYSFS_MODULE)
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index cf953ecbfca9..b80fa2882408 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id)
77} 77}
78 78
79/* Update control registers */ 79/* Update control registers */
80static void s3c_rtc_setaie(int to) 80static int s3c_rtc_setaie(struct device *dev, unsigned int enabled)
81{ 81{
82 unsigned int tmp; 82 unsigned int tmp;
83 83
84 pr_debug("%s: aie=%d\n", __func__, to); 84 pr_debug("%s: aie=%d\n", __func__, enabled);
85 85
86 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; 86 tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN;
87 87
88 if (to) 88 if (enabled)
89 tmp |= S3C2410_RTCALM_ALMEN; 89 tmp |= S3C2410_RTCALM_ALMEN;
90 90
91 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); 91 writeb(tmp, s3c_rtc_base + S3C2410_RTCALM);
92
93 return 0;
92} 94}
93 95
94static int s3c_rtc_setpie(struct device *dev, int enabled) 96static int s3c_rtc_setpie(struct device *dev, int enabled)
@@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
308 310
309 writeb(alrm_en, base + S3C2410_RTCALM); 311 writeb(alrm_en, base + S3C2410_RTCALM);
310 312
311 s3c_rtc_setaie(alrm->enabled); 313 s3c_rtc_setaie(dev, alrm->enabled);
312 314
313 return 0; 315 return 0;
314} 316}
@@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev)
440 rtc_device_unregister(rtc); 442 rtc_device_unregister(rtc);
441 443
442 s3c_rtc_setpie(&dev->dev, 0); 444 s3c_rtc_setpie(&dev->dev, 0);
443 s3c_rtc_setaie(0); 445 s3c_rtc_setaie(&dev->dev, 0);
444 446
445 clk_disable(rtc_clk); 447 clk_disable(rtc_clk);
446 clk_put(rtc_clk); 448 clk_put(rtc_clk);
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 88ea52b8647a..5dfe5ffcb0d3 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -314,16 +314,6 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
314 unsigned long arg) 314 unsigned long arg)
315{ 315{
316 switch (cmd) { 316 switch (cmd) {
317 case RTC_AIE_OFF:
318 spin_lock_irq(&sa1100_rtc_lock);
319 RTSR &= ~RTSR_ALE;
320 spin_unlock_irq(&sa1100_rtc_lock);
321 return 0;
322 case RTC_AIE_ON:
323 spin_lock_irq(&sa1100_rtc_lock);
324 RTSR |= RTSR_ALE;
325 spin_unlock_irq(&sa1100_rtc_lock);
326 return 0;
327 case RTC_UIE_OFF: 317 case RTC_UIE_OFF:
328 spin_lock_irq(&sa1100_rtc_lock); 318 spin_lock_irq(&sa1100_rtc_lock);
329 RTSR &= ~RTSR_HZE; 319 RTSR &= ~RTSR_HZE;
@@ -338,6 +328,17 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd,
338 return -ENOIOCTLCMD; 328 return -ENOIOCTLCMD;
339} 329}
340 330
331static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
332{
333 spin_lock_irq(&sa1100_rtc_lock);
334 if (enabled)
335 RTSR |= RTSR_ALE;
336 else
337 RTSR &= ~RTSR_ALE;
338 spin_unlock_irq(&sa1100_rtc_lock);
339 return 0;
340}
341
341static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) 342static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
342{ 343{
343 rtc_time_to_tm(RCNR, tm); 344 rtc_time_to_tm(RCNR, tm);
@@ -410,6 +411,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
410 .proc = sa1100_rtc_proc, 411 .proc = sa1100_rtc_proc,
411 .irq_set_freq = sa1100_irq_set_freq, 412 .irq_set_freq = sa1100_irq_set_freq,
412 .irq_set_state = sa1100_irq_set_state, 413 .irq_set_state = sa1100_irq_set_state,
414 .alarm_irq_enable = sa1100_rtc_alarm_irq_enable,
413}; 415};
414 416
415static int sa1100_rtc_probe(struct platform_device *pdev) 417static int sa1100_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-sh.c b/drivers/rtc/rtc-sh.c
index 06e41ed93230..93314a9e7fa9 100644
--- a/drivers/rtc/rtc-sh.c
+++ b/drivers/rtc/rtc-sh.c
@@ -350,10 +350,6 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
350 unsigned int ret = 0; 350 unsigned int ret = 0;
351 351
352 switch (cmd) { 352 switch (cmd) {
353 case RTC_AIE_OFF:
354 case RTC_AIE_ON:
355 sh_rtc_setaie(dev, cmd == RTC_AIE_ON);
356 break;
357 case RTC_UIE_OFF: 353 case RTC_UIE_OFF:
358 rtc->periodic_freq &= ~PF_OXS; 354 rtc->periodic_freq &= ~PF_OXS;
359 sh_rtc_setcie(dev, 0); 355 sh_rtc_setcie(dev, 0);
@@ -369,6 +365,12 @@ static int sh_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
369 return ret; 365 return ret;
370} 366}
371 367
368static int sh_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
369{
370 sh_rtc_setaie(dev, enabled);
371 return 0;
372}
373
372static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm) 374static int sh_rtc_read_time(struct device *dev, struct rtc_time *tm)
373{ 375{
374 struct platform_device *pdev = to_platform_device(dev); 376 struct platform_device *pdev = to_platform_device(dev);
@@ -604,6 +606,7 @@ static struct rtc_class_ops sh_rtc_ops = {
604 .irq_set_state = sh_rtc_irq_set_state, 606 .irq_set_state = sh_rtc_irq_set_state,
605 .irq_set_freq = sh_rtc_irq_set_freq, 607 .irq_set_freq = sh_rtc_irq_set_freq,
606 .proc = sh_rtc_proc, 608 .proc = sh_rtc_proc,
609 .alarm_irq_enable = sh_rtc_alarm_irq_enable,
607}; 610};
608 611
609static int __init sh_rtc_probe(struct platform_device *pdev) 612static int __init sh_rtc_probe(struct platform_device *pdev)
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c
index 51725f7755b0..a82d6fe97076 100644
--- a/drivers/rtc/rtc-test.c
+++ b/drivers/rtc/rtc-test.c
@@ -50,24 +50,9 @@ static int test_rtc_proc(struct device *dev, struct seq_file *seq)
50 return 0; 50 return 0;
51} 51}
52 52
53static int test_rtc_ioctl(struct device *dev, unsigned int cmd, 53static int test_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
54 unsigned long arg)
55{ 54{
56 /* We do support interrupts, they're generated 55 return 0;
57 * using the sysfs interface.
58 */
59 switch (cmd) {
60 case RTC_PIE_ON:
61 case RTC_PIE_OFF:
62 case RTC_UIE_ON:
63 case RTC_UIE_OFF:
64 case RTC_AIE_ON:
65 case RTC_AIE_OFF:
66 return 0;
67
68 default:
69 return -ENOIOCTLCMD;
70 }
71} 56}
72 57
73static const struct rtc_class_ops test_rtc_ops = { 58static const struct rtc_class_ops test_rtc_ops = {
@@ -76,7 +61,7 @@ static const struct rtc_class_ops test_rtc_ops = {
76 .read_alarm = test_rtc_read_alarm, 61 .read_alarm = test_rtc_read_alarm,
77 .set_alarm = test_rtc_set_alarm, 62 .set_alarm = test_rtc_set_alarm,
78 .set_mmss = test_rtc_set_mmss, 63 .set_mmss = test_rtc_set_mmss,
79 .ioctl = test_rtc_ioctl, 64 .alarm_irq_enable = test_rtc_alarm_irq_enable,
80}; 65};
81 66
82static ssize_t test_irq_show(struct device *dev, 67static ssize_t test_irq_show(struct device *dev,
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c
index c3244244e8cf..769190ac6d11 100644
--- a/drivers/rtc/rtc-vr41xx.c
+++ b/drivers/rtc/rtc-vr41xx.c
@@ -240,26 +240,6 @@ static int vr41xx_rtc_irq_set_state(struct device *dev, int enabled)
240static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) 240static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
241{ 241{
242 switch (cmd) { 242 switch (cmd) {
243 case RTC_AIE_ON:
244 spin_lock_irq(&rtc_lock);
245
246 if (!alarm_enabled) {
247 enable_irq(aie_irq);
248 alarm_enabled = 1;
249 }
250
251 spin_unlock_irq(&rtc_lock);
252 break;
253 case RTC_AIE_OFF:
254 spin_lock_irq(&rtc_lock);
255
256 if (alarm_enabled) {
257 disable_irq(aie_irq);
258 alarm_enabled = 0;
259 }
260
261 spin_unlock_irq(&rtc_lock);
262 break;
263 case RTC_EPOCH_READ: 243 case RTC_EPOCH_READ:
264 return put_user(epoch, (unsigned long __user *)arg); 244 return put_user(epoch, (unsigned long __user *)arg);
265 case RTC_EPOCH_SET: 245 case RTC_EPOCH_SET:
@@ -275,6 +255,24 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long
275 return 0; 255 return 0;
276} 256}
277 257
258static int vr41xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
259{
260 spin_lock_irq(&rtc_lock);
261 if (enabled) {
262 if (!alarm_enabled) {
263 enable_irq(aie_irq);
264 alarm_enabled = 1;
265 }
266 } else {
267 if (alarm_enabled) {
268 disable_irq(aie_irq);
269 alarm_enabled = 0;
270 }
271 }
272 spin_unlock_irq(&rtc_lock);
273 return 0;
274}
275
278static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id) 276static irqreturn_t elapsedtime_interrupt(int irq, void *dev_id)
279{ 277{
280 struct platform_device *pdev = (struct platform_device *)dev_id; 278 struct platform_device *pdev = (struct platform_device *)dev_id;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 318672d05563..a9fe23d5bd0f 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline;
72static struct ccw_device_id dasd_eckd_ids[] = { 72static struct ccw_device_id dasd_eckd_ids[] = {
73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, 73 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, 74 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, 75 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3},
76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, 76 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, 77 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, 78 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index c881a14fa5dd..1f6a4d894e73 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -62,8 +62,8 @@ static int xpram_devs;
62/* 62/*
63 * Parameter parsing functions. 63 * Parameter parsing functions.
64 */ 64 */
65static int __initdata devs = XPRAM_DEVS; 65static int devs = XPRAM_DEVS;
66static char __initdata *sizes[XPRAM_MAX_DEVS]; 66static char *sizes[XPRAM_MAX_DEVS];
67 67
68module_param(devs, int, 0); 68module_param(devs, int, 0);
69module_param_array(sizes, charp, NULL, 0); 69module_param_array(sizes, charp, NULL, 0);
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c
index 8cd58e412b5e..5ad44daef73b 100644
--- a/drivers/s390/char/keyboard.c
+++ b/drivers/s390/char/keyboard.c
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file,
460 unsigned int cmd, unsigned long arg) 460 unsigned int cmd, unsigned long arg)
461{ 461{
462 void __user *argp; 462 void __user *argp;
463 int ct, perm; 463 unsigned int ct;
464 int perm;
464 465
465 argp = (void __user *)arg; 466 argp = (void __user *)arg;
466 467
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 7a242f073632..267b54e8ff5a 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request)
280 return rc; 280 return rc;
281} 281}
282 282
283static inline void
284tape_do_io_async_free(struct tape_device *device, struct tape_request *request)
285{
286 request->callback = (void *) tape_free_request;
287 request->callback_data = NULL;
288 tape_do_io_async(device, request);
289}
290
283extern int tape_oper_handler(int irq, int status); 291extern int tape_oper_handler(int irq, int status);
284extern void tape_noper_handler(int irq, int status); 292extern void tape_noper_handler(int irq, int status);
285extern int tape_open(struct tape_device *); 293extern int tape_open(struct tape_device *);
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index c17f35b6136a..c26511171ffe 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int);
53 * Medium sense for 34xx tapes. There is no 'real' medium sense call. 53 * Medium sense for 34xx tapes. There is no 'real' medium sense call.
54 * So we just do a normal sense. 54 * So we just do a normal sense.
55 */ 55 */
56static int 56static void __tape_34xx_medium_sense(struct tape_request *request)
57tape_34xx_medium_sense(struct tape_device *device)
58{ 57{
59 struct tape_request *request; 58 struct tape_device *device = request->device;
60 unsigned char *sense; 59 unsigned char *sense;
61 int rc;
62
63 request = tape_alloc_request(1, 32);
64 if (IS_ERR(request)) {
65 DBF_EXCEPTION(6, "MSEN fail\n");
66 return PTR_ERR(request);
67 }
68
69 request->op = TO_MSEN;
70 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
71 60
72 rc = tape_do_io_interruptible(device, request);
73 if (request->rc == 0) { 61 if (request->rc == 0) {
74 sense = request->cpdata; 62 sense = request->cpdata;
75 63
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device)
88 device->tape_generic_status |= GMT_WR_PROT(~0); 76 device->tape_generic_status |= GMT_WR_PROT(~0);
89 else 77 else
90 device->tape_generic_status &= ~GMT_WR_PROT(~0); 78 device->tape_generic_status &= ~GMT_WR_PROT(~0);
91 } else { 79 } else
92 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", 80 DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n",
93 request->rc); 81 request->rc);
94 }
95 tape_free_request(request); 82 tape_free_request(request);
83}
84
85static int tape_34xx_medium_sense(struct tape_device *device)
86{
87 struct tape_request *request;
88 int rc;
89
90 request = tape_alloc_request(1, 32);
91 if (IS_ERR(request)) {
92 DBF_EXCEPTION(6, "MSEN fail\n");
93 return PTR_ERR(request);
94 }
96 95
96 request->op = TO_MSEN;
97 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
98 rc = tape_do_io_interruptible(device, request);
99 __tape_34xx_medium_sense(request);
97 return rc; 100 return rc;
98} 101}
99 102
103static void tape_34xx_medium_sense_async(struct tape_device *device)
104{
105 struct tape_request *request;
106
107 request = tape_alloc_request(1, 32);
108 if (IS_ERR(request)) {
109 DBF_EXCEPTION(6, "MSEN fail\n");
110 return;
111 }
112
113 request->op = TO_MSEN;
114 tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata);
115 request->callback = (void *) __tape_34xx_medium_sense;
116 request->callback_data = NULL;
117 tape_do_io_async(device, request);
118}
119
100struct tape_34xx_work { 120struct tape_34xx_work {
101 struct tape_device *device; 121 struct tape_device *device;
102 enum tape_op op; 122 enum tape_op op;
@@ -109,6 +129,9 @@ struct tape_34xx_work {
109 * is inserted but cannot call tape_do_io* from an interrupt context. 129 * is inserted but cannot call tape_do_io* from an interrupt context.
110 * Maybe that's useful for other actions we want to start from the 130 * Maybe that's useful for other actions we want to start from the
111 * interrupt handler. 131 * interrupt handler.
132 * Note: the work handler is called by the system work queue. The tape
133 * commands started by the handler need to be asynchrounous, otherwise
134 * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
112 */ 135 */
113static void 136static void
114tape_34xx_work_handler(struct work_struct *work) 137tape_34xx_work_handler(struct work_struct *work)
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work)
119 142
120 switch(p->op) { 143 switch(p->op) {
121 case TO_MSEN: 144 case TO_MSEN:
122 tape_34xx_medium_sense(device); 145 tape_34xx_medium_sense_async(device);
123 break; 146 break;
124 default: 147 default:
125 DBF_EVENT(3, "T34XX: internal error: unknown work\n"); 148 DBF_EVENT(3, "T34XX: internal error: unknown work\n");
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index fbe361fcd2c0..de2e99e0a71b 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -329,17 +329,17 @@ out:
329/* 329/*
330 * Enable encryption 330 * Enable encryption
331 */ 331 */
332static int tape_3592_enable_crypt(struct tape_device *device) 332static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device)
333{ 333{
334 struct tape_request *request; 334 struct tape_request *request;
335 char *data; 335 char *data;
336 336
337 DBF_EVENT(6, "tape_3592_enable_crypt\n"); 337 DBF_EVENT(6, "tape_3592_enable_crypt\n");
338 if (!crypt_supported(device)) 338 if (!crypt_supported(device))
339 return -ENOSYS; 339 return ERR_PTR(-ENOSYS);
340 request = tape_alloc_request(2, 72); 340 request = tape_alloc_request(2, 72);
341 if (IS_ERR(request)) 341 if (IS_ERR(request))
342 return PTR_ERR(request); 342 return request;
343 data = request->cpdata; 343 data = request->cpdata;
344 memset(data,0,72); 344 memset(data,0,72);
345 345
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device)
354 request->op = TO_CRYPT_ON; 354 request->op = TO_CRYPT_ON;
355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 355 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 356 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
357 return request;
358}
359
360static int tape_3592_enable_crypt(struct tape_device *device)
361{
362 struct tape_request *request;
363
364 request = __tape_3592_enable_crypt(device);
365 if (IS_ERR(request))
366 return PTR_ERR(request);
357 return tape_do_io_free(device, request); 367 return tape_do_io_free(device, request);
358} 368}
359 369
370static void tape_3592_enable_crypt_async(struct tape_device *device)
371{
372 struct tape_request *request;
373
374 request = __tape_3592_enable_crypt(device);
375 if (!IS_ERR(request))
376 tape_do_io_async_free(device, request);
377}
378
360/* 379/*
361 * Disable encryption 380 * Disable encryption
362 */ 381 */
363static int tape_3592_disable_crypt(struct tape_device *device) 382static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device)
364{ 383{
365 struct tape_request *request; 384 struct tape_request *request;
366 char *data; 385 char *data;
367 386
368 DBF_EVENT(6, "tape_3592_disable_crypt\n"); 387 DBF_EVENT(6, "tape_3592_disable_crypt\n");
369 if (!crypt_supported(device)) 388 if (!crypt_supported(device))
370 return -ENOSYS; 389 return ERR_PTR(-ENOSYS);
371 request = tape_alloc_request(2, 72); 390 request = tape_alloc_request(2, 72);
372 if (IS_ERR(request)) 391 if (IS_ERR(request))
373 return PTR_ERR(request); 392 return request;
374 data = request->cpdata; 393 data = request->cpdata;
375 memset(data,0,72); 394 memset(data,0,72);
376 395
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device)
383 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); 402 tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data);
384 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); 403 tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36);
385 404
405 return request;
406}
407
408static int tape_3592_disable_crypt(struct tape_device *device)
409{
410 struct tape_request *request;
411
412 request = __tape_3592_disable_crypt(device);
413 if (IS_ERR(request))
414 return PTR_ERR(request);
386 return tape_do_io_free(device, request); 415 return tape_do_io_free(device, request);
387} 416}
388 417
418static void tape_3592_disable_crypt_async(struct tape_device *device)
419{
420 struct tape_request *request;
421
422 request = __tape_3592_disable_crypt(device);
423 if (!IS_ERR(request))
424 tape_do_io_async_free(device, request);
425}
426
389/* 427/*
390 * IOCTL: Set encryption status 428 * IOCTL: Set encryption status
391 */ 429 */
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg)
457/* 495/*
458 * SENSE Medium: Get Sense data about medium state 496 * SENSE Medium: Get Sense data about medium state
459 */ 497 */
460static int 498static int tape_3590_sense_medium(struct tape_device *device)
461tape_3590_sense_medium(struct tape_device *device)
462{ 499{
463 struct tape_request *request; 500 struct tape_request *request;
464 501
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device)
470 return tape_do_io_free(device, request); 507 return tape_do_io_free(device, request);
471} 508}
472 509
510static void tape_3590_sense_medium_async(struct tape_device *device)
511{
512 struct tape_request *request;
513
514 request = tape_alloc_request(1, 128);
515 if (IS_ERR(request))
516 return;
517 request->op = TO_MSEN;
518 tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata);
519 tape_do_io_async_free(device, request);
520}
521
473/* 522/*
474 * MTTELL: Tell block. Return the number of block relative to current file. 523 * MTTELL: Tell block. Return the number of block relative to current file.
475 */ 524 */
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device,
546 * 2. The attention msg is written to the "read subsystem data" buffer. 595 * 2. The attention msg is written to the "read subsystem data" buffer.
547 * In this case we probably should print it to the console. 596 * In this case we probably should print it to the console.
548 */ 597 */
549static int 598static void tape_3590_read_attmsg_async(struct tape_device *device)
550tape_3590_read_attmsg(struct tape_device *device)
551{ 599{
552 struct tape_request *request; 600 struct tape_request *request;
553 char *buf; 601 char *buf;
554 602
555 request = tape_alloc_request(3, 4096); 603 request = tape_alloc_request(3, 4096);
556 if (IS_ERR(request)) 604 if (IS_ERR(request))
557 return PTR_ERR(request); 605 return;
558 request->op = TO_READ_ATTMSG; 606 request->op = TO_READ_ATTMSG;
559 buf = request->cpdata; 607 buf = request->cpdata;
560 buf[0] = PREP_RD_SS_DATA; 608 buf[0] = PREP_RD_SS_DATA;
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device)
562 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); 610 tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf);
563 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); 611 tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12);
564 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); 612 tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL);
565 return tape_do_io_free(device, request); 613 tape_do_io_async_free(device, request);
566} 614}
567 615
568/* 616/*
569 * These functions are used to schedule follow-up actions from within an 617 * These functions are used to schedule follow-up actions from within an
570 * interrupt context (like unsolicited interrupts). 618 * interrupt context (like unsolicited interrupts).
619 * Note: the work handler is called by the system work queue. The tape
620 * commands started by the handler need to be asynchrounous, otherwise
621 * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq).
571 */ 622 */
572struct work_handler_data { 623struct work_handler_data {
573 struct tape_device *device; 624 struct tape_device *device;
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work)
583 634
584 switch (p->op) { 635 switch (p->op) {
585 case TO_MSEN: 636 case TO_MSEN:
586 tape_3590_sense_medium(p->device); 637 tape_3590_sense_medium_async(p->device);
587 break; 638 break;
588 case TO_READ_ATTMSG: 639 case TO_READ_ATTMSG:
589 tape_3590_read_attmsg(p->device); 640 tape_3590_read_attmsg_async(p->device);
590 break; 641 break;
591 case TO_CRYPT_ON: 642 case TO_CRYPT_ON:
592 tape_3592_enable_crypt(p->device); 643 tape_3592_enable_crypt_async(p->device);
593 break; 644 break;
594 case TO_CRYPT_OFF: 645 case TO_CRYPT_OFF:
595 tape_3592_disable_crypt(p->device); 646 tape_3592_disable_crypt_async(p->device);
596 break; 647 break;
597 default: 648 default:
598 DBF_EVENT(3, "T3590: work handler undefined for " 649 DBF_EVENT(3, "T3590: work handler undefined for "
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 44578b56ad0a..d3e58d763b43 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1561{ 1561{
1562 struct Scsi_Host *host = rport_to_shost(rport); 1562 struct Scsi_Host *host = rport_to_shost(rport);
1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data; 1563 fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1564 unsigned long flags;
1564 1565
1565 if (!fcport) 1566 if (!fcport)
1566 return; 1567 return;
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1573 * Transport has effectively 'deleted' the rport, clear 1574 * Transport has effectively 'deleted' the rport, clear
1574 * all local references. 1575 * all local references.
1575 */ 1576 */
1576 spin_lock_irq(host->host_lock); 1577 spin_lock_irqsave(host->host_lock, flags);
1577 fcport->rport = fcport->drport = NULL; 1578 fcport->rport = fcport->drport = NULL;
1578 *((fc_port_t **)rport->dd_data) = NULL; 1579 *((fc_port_t **)rport->dd_data) = NULL;
1579 spin_unlock_irq(host->host_lock); 1580 spin_unlock_irqrestore(host->host_lock, flags);
1580 1581
1581 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) 1582 if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1582 return; 1583 return;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f948e1a73aec..d9479c3fe5f8 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data)
2505{ 2505{
2506 fc_port_t *fcport = data; 2506 fc_port_t *fcport = data;
2507 struct fc_rport *rport; 2507 struct fc_rport *rport;
2508 unsigned long flags;
2508 2509
2509 spin_lock_irq(fcport->vha->host->host_lock); 2510 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2510 rport = fcport->drport ? fcport->drport: fcport->rport; 2511 rport = fcport->drport ? fcport->drport: fcport->rport;
2511 fcport->drport = NULL; 2512 fcport->drport = NULL;
2512 spin_unlock_irq(fcport->vha->host->host_lock); 2513 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2513 if (rport) 2514 if (rport)
2514 fc_remote_port_delete(rport); 2515 fc_remote_port_delete(rport);
2515} 2516}
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2879 struct fc_rport_identifiers rport_ids; 2880 struct fc_rport_identifiers rport_ids;
2880 struct fc_rport *rport; 2881 struct fc_rport *rport;
2881 struct qla_hw_data *ha = vha->hw; 2882 struct qla_hw_data *ha = vha->hw;
2883 unsigned long flags;
2882 2884
2883 qla2x00_rport_del(fcport); 2885 qla2x00_rport_del(fcport);
2884 2886
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2893 "Unable to allocate fc remote port!\n"); 2895 "Unable to allocate fc remote port!\n");
2894 return; 2896 return;
2895 } 2897 }
2896 spin_lock_irq(fcport->vha->host->host_lock); 2898 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2897 *((fc_port_t **)rport->dd_data) = fcport; 2899 *((fc_port_t **)rport->dd_data) = fcport;
2898 spin_unlock_irq(fcport->vha->host->host_lock); 2900 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2899 2901
2900 rport->supported_classes = fcport->supported_classes; 2902 rport->supported_classes = fcport->supported_classes;
2901 2903
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c194c23ca1fb..f27724d76cf6 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)
562 } 562 }
563 if (atomic_read(&fcport->state) != FCS_ONLINE) { 563 if (atomic_read(&fcport->state) != FCS_ONLINE) {
564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || 564 if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
565 atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
566 atomic_read(&base_vha->loop_state) == LOOP_DEAD) { 565 atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
567 cmd->result = DID_NO_CONNECT << 16; 566 cmd->result = DID_NO_CONNECT << 16;
568 goto qc24_fail_command; 567 goto qc24_fail_command;
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2513{ 2512{
2514 struct fc_rport *rport; 2513 struct fc_rport *rport;
2515 scsi_qla_host_t *base_vha; 2514 scsi_qla_host_t *base_vha;
2515 unsigned long flags;
2516 2516
2517 if (!fcport->rport) 2517 if (!fcport->rport)
2518 return; 2518 return;
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2520 rport = fcport->rport; 2520 rport = fcport->rport;
2521 if (defer) { 2521 if (defer) {
2522 base_vha = pci_get_drvdata(vha->hw->pdev); 2522 base_vha = pci_get_drvdata(vha->hw->pdev);
2523 spin_lock_irq(vha->host->host_lock); 2523 spin_lock_irqsave(vha->host->host_lock, flags);
2524 fcport->drport = rport; 2524 fcport->drport = rport;
2525 spin_unlock_irq(vha->host->host_lock); 2525 spin_unlock_irqrestore(vha->host->host_lock, flags);
2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2526 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2527 qla2xxx_wake_dpc(base_vha); 2527 qla2xxx_wake_dpc(base_vha);
2528 } else 2528 } else
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data)
3282 3282
3283 set_user_nice(current, -20); 3283 set_user_nice(current, -20);
3284 3284
3285 set_current_state(TASK_INTERRUPTIBLE);
3285 while (!kthread_should_stop()) { 3286 while (!kthread_should_stop()) {
3286 DEBUG3(printk("qla2x00: DPC handler sleeping\n")); 3287 DEBUG3(printk("qla2x00: DPC handler sleeping\n"));
3287 3288
3288 set_current_state(TASK_INTERRUPTIBLE);
3289 schedule(); 3289 schedule();
3290 __set_current_state(TASK_RUNNING); 3290 __set_current_state(TASK_RUNNING);
3291 3291
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data)
3454 qla2x00_do_dpc_all_vps(base_vha); 3454 qla2x00_do_dpc_all_vps(base_vha);
3455 3455
3456 ha->dpc_active = 0; 3456 ha->dpc_active = 0;
3457 set_current_state(TASK_INTERRUPTIBLE);
3457 } /* End of while(1) */ 3458 } /* End of while(1) */
3459 __set_current_state(TASK_RUNNING);
3458 3460
3459 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); 3461 DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no));
3460 3462
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b310934efed..a6b2d72022fc 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd,
1671 unsigned long long lba, unsigned int num, int write) 1671 unsigned long long lba, unsigned int num, int write)
1672{ 1672{
1673 int ret; 1673 int ret;
1674 unsigned int block, rest = 0; 1674 unsigned long long block, rest = 0;
1675 int (*func)(struct scsi_cmnd *, unsigned char *, int); 1675 int (*func)(struct scsi_cmnd *, unsigned char *, int);
1676 1676
1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; 1677 func = write ? fetch_to_dev_buffer : fill_from_dev_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9045c52abd25..fb2bb35c62cb 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q)
443 &sdev->request_queue->queue_flags); 443 &sdev->request_queue->queue_flags);
444 if (flagset) 444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue); 446 __blk_run_queue(sdev->request_queue, false);
447 if (flagset) 447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock); 449 spin_unlock(sdev->request_queue->queue_lock);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 998c01be3234..5c3ccfc6b622 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport)
3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); 3829 !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags);
3830 if (flagset) 3830 if (flagset)
3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); 3831 queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q);
3832 __blk_run_queue(rport->rqst_q); 3832 __blk_run_queue(rport->rqst_q, false);
3833 if (flagset) 3833 if (flagset)
3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); 3834 queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q);
3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); 3835 spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags);
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c
index 351d8a375b57..19752b09e155 100644
--- a/drivers/spi/pxa2xx_spi_pci.c
+++ b/drivers/spi/pxa2xx_spi_pci.c
@@ -7,10 +7,9 @@
7#include <linux/of_device.h> 7#include <linux/of_device.h>
8#include <linux/spi/pxa2xx_spi.h> 8#include <linux/spi/pxa2xx_spi.h>
9 9
10struct awesome_struct { 10struct ce4100_info {
11 struct ssp_device ssp; 11 struct ssp_device ssp;
12 struct platform_device spi_pdev; 12 struct platform_device *spi_pdev;
13 struct pxa2xx_spi_master spi_pdata;
14}; 13};
15 14
16static DEFINE_MUTEX(ssp_lock); 15static DEFINE_MUTEX(ssp_lock);
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp)
51} 50}
52EXPORT_SYMBOL_GPL(pxa_ssp_free); 51EXPORT_SYMBOL_GPL(pxa_ssp_free);
53 52
54static void plat_dev_release(struct device *dev)
55{
56 struct awesome_struct *as = container_of(dev,
57 struct awesome_struct, spi_pdev.dev);
58
59 of_device_node_put(&as->spi_pdev.dev);
60}
61
62static int __devinit ce4100_spi_probe(struct pci_dev *dev, 53static int __devinit ce4100_spi_probe(struct pci_dev *dev,
63 const struct pci_device_id *ent) 54 const struct pci_device_id *ent)
64{ 55{
65 int ret; 56 int ret;
66 resource_size_t phys_beg; 57 resource_size_t phys_beg;
67 resource_size_t phys_len; 58 resource_size_t phys_len;
68 struct awesome_struct *spi_info; 59 struct ce4100_info *spi_info;
69 struct platform_device *pdev; 60 struct platform_device *pdev;
70 struct pxa2xx_spi_master *spi_pdata; 61 struct pxa2xx_spi_master spi_pdata;
71 struct ssp_device *ssp; 62 struct ssp_device *ssp;
72 63
73 ret = pci_enable_device(dev); 64 ret = pci_enable_device(dev);
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
84 return ret; 75 return ret;
85 } 76 }
86 77
78 pdev = platform_device_alloc("pxa2xx-spi", dev->devfn);
87 spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); 79 spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL);
88 if (!spi_info) { 80 if (!pdev || !spi_info ) {
89 ret = -ENOMEM; 81 ret = -ENOMEM;
90 goto err_kz; 82 goto err_nomem;
91 } 83 }
92 ssp = &spi_info->ssp; 84 memset(&spi_pdata, 0, sizeof(spi_pdata));
93 pdev = &spi_info->spi_pdev; 85 spi_pdata.num_chipselect = dev->devfn;
94 spi_pdata = &spi_info->spi_pdata;
95 86
96 pdev->name = "pxa2xx-spi"; 87 ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata));
97 pdev->id = dev->devfn; 88 if (ret)
98 pdev->dev.parent = &dev->dev; 89 goto err_nomem;
99 pdev->dev.platform_data = &spi_info->spi_pdata;
100 90
91 pdev->dev.parent = &dev->dev;
101#ifdef CONFIG_OF 92#ifdef CONFIG_OF
102 pdev->dev.of_node = dev->dev.of_node; 93 pdev->dev.of_node = dev->dev.of_node;
103#endif 94#endif
104 pdev->dev.release = plat_dev_release; 95 ssp = &spi_info->ssp;
105
106 spi_pdata->num_chipselect = dev->devfn;
107
108 ssp->phys_base = pci_resource_start(dev, 0); 96 ssp->phys_base = pci_resource_start(dev, 0);
109 ssp->mmio_base = ioremap(phys_beg, phys_len); 97 ssp->mmio_base = ioremap(phys_beg, phys_len);
110 if (!ssp->mmio_base) { 98 if (!ssp->mmio_base) {
111 dev_err(&pdev->dev, "failed to ioremap() registers\n"); 99 dev_err(&pdev->dev, "failed to ioremap() registers\n");
112 ret = -EIO; 100 ret = -EIO;
113 goto err_remap; 101 goto err_nomem;
114 } 102 }
115 ssp->irq = dev->irq; 103 ssp->irq = dev->irq;
116 ssp->port_id = pdev->id; 104 ssp->port_id = pdev->id;
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev,
122 110
123 pci_set_drvdata(dev, spi_info); 111 pci_set_drvdata(dev, spi_info);
124 112
125 ret = platform_device_register(pdev); 113 ret = platform_device_add(pdev);
126 if (ret) 114 if (ret)
127 goto err_dev_add; 115 goto err_dev_add;
128 116
@@ -135,27 +123,21 @@ err_dev_add:
135 mutex_unlock(&ssp_lock); 123 mutex_unlock(&ssp_lock);
136 iounmap(ssp->mmio_base); 124 iounmap(ssp->mmio_base);
137 125
138err_remap: 126err_nomem:
139 kfree(spi_info);
140
141err_kz:
142 release_mem_region(phys_beg, phys_len); 127 release_mem_region(phys_beg, phys_len);
143 128 platform_device_put(pdev);
129 kfree(spi_info);
144 return ret; 130 return ret;
145} 131}
146 132
147static void __devexit ce4100_spi_remove(struct pci_dev *dev) 133static void __devexit ce4100_spi_remove(struct pci_dev *dev)
148{ 134{
149 struct awesome_struct *spi_info; 135 struct ce4100_info *spi_info;
150 struct platform_device *pdev;
151 struct ssp_device *ssp; 136 struct ssp_device *ssp;
152 137
153 spi_info = pci_get_drvdata(dev); 138 spi_info = pci_get_drvdata(dev);
154
155 ssp = &spi_info->ssp; 139 ssp = &spi_info->ssp;
156 pdev = &spi_info->spi_pdev; 140 platform_device_unregister(spi_info->spi_pdev);
157
158 platform_device_unregister(pdev);
159 141
160 iounmap(ssp->mmio_base); 142 iounmap(ssp->mmio_base);
161 release_mem_region(pci_resource_start(dev, 0), 143 release_mem_region(pci_resource_start(dev, 0),
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev)
171} 153}
172 154
173static struct pci_device_id ce4100_spi_devices[] __devinitdata = { 155static struct pci_device_id ce4100_spi_devices[] __devinitdata = {
174
175 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, 156 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) },
176 { }, 157 { },
177}; 158};
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 5cfd70819f08..973bb190ef57 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \
13 target_core_transport.o \ 13 target_core_transport.o \
14 target_core_cdb.o \ 14 target_core_cdb.o \
15 target_core_ua.o \ 15 target_core_ua.o \
16 target_core_rd.o \ 16 target_core_rd.o
17 target_core_mib.o
18 17
19obj-$(CONFIG_TARGET_CORE) += target_core_mod.o 18obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
20 19
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 2764510798b0..caf8dc18ee0a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -37,7 +37,6 @@
37#include <linux/parser.h> 37#include <linux/parser.h>
38#include <linux/syscalls.h> 38#include <linux/syscalls.h>
39#include <linux/configfs.h> 39#include <linux/configfs.h>
40#include <linux/proc_fs.h>
41 40
42#include <target/target_core_base.h> 41#include <target/target_core_base.h>
43#include <target/target_core_device.h> 42#include <target/target_core_device.h>
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item)
1971{ 1970{
1972 struct se_subsystem_dev *se_dev = container_of(to_config_group(item), 1971 struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
1973 struct se_subsystem_dev, se_dev_group); 1972 struct se_subsystem_dev, se_dev_group);
1974 struct config_group *dev_cg; 1973 struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
1975 1974 struct se_subsystem_api *t = hba->transport;
1976 if (!(se_dev)) 1975 struct config_group *dev_cg = &se_dev->se_dev_group;
1977 return;
1978 1976
1979 dev_cg = &se_dev->se_dev_group;
1980 kfree(dev_cg->default_groups); 1977 kfree(dev_cg->default_groups);
1978 /*
1979 * This pointer will set when the storage is enabled with:
1980 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
1981 */
1982 if (se_dev->se_dev_ptr) {
1983 printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
1984 "virtual_device() for se_dev_ptr: %p\n",
1985 se_dev->se_dev_ptr);
1986
1987 se_free_virtual_device(se_dev->se_dev_ptr, hba);
1988 } else {
1989 /*
1990 * Release struct se_subsystem_dev->se_dev_su_ptr..
1991 */
1992 printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
1993 "device() for se_dev_su_ptr: %p\n",
1994 se_dev->se_dev_su_ptr);
1995
1996 t->free_device(se_dev->se_dev_su_ptr);
1997 }
1998
1999 printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
2000 "_dev_t: %p\n", se_dev);
2001 kfree(se_dev);
1981} 2002}
1982 2003
1983static ssize_t target_core_dev_show(struct config_item *item, 2004static ssize_t target_core_dev_show(struct config_item *item,
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
2140 NULL, 2161 NULL,
2141}; 2162};
2142 2163
2164static void target_core_alua_lu_gp_release(struct config_item *item)
2165{
2166 struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
2167 struct t10_alua_lu_gp, lu_gp_group);
2168
2169 core_alua_free_lu_gp(lu_gp);
2170}
2171
2143static struct configfs_item_operations target_core_alua_lu_gp_ops = { 2172static struct configfs_item_operations target_core_alua_lu_gp_ops = {
2173 .release = target_core_alua_lu_gp_release,
2144 .show_attribute = target_core_alua_lu_gp_attr_show, 2174 .show_attribute = target_core_alua_lu_gp_attr_show,
2145 .store_attribute = target_core_alua_lu_gp_attr_store, 2175 .store_attribute = target_core_alua_lu_gp_attr_store,
2146}; 2176};
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp(
2191 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" 2221 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
2192 " Group: core/alua/lu_gps/%s, ID: %hu\n", 2222 " Group: core/alua/lu_gps/%s, ID: %hu\n",
2193 config_item_name(item), lu_gp->lu_gp_id); 2223 config_item_name(item), lu_gp->lu_gp_id);
2194 2224 /*
2225 * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release()
2226 * -> target_core_alua_lu_gp_release()
2227 */
2195 config_item_put(item); 2228 config_item_put(item);
2196 core_alua_free_lu_gp(lu_gp);
2197} 2229}
2198 2230
2199static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { 2231static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
2549 NULL, 2581 NULL,
2550}; 2582};
2551 2583
2584static void target_core_alua_tg_pt_gp_release(struct config_item *item)
2585{
2586 struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
2587 struct t10_alua_tg_pt_gp, tg_pt_gp_group);
2588
2589 core_alua_free_tg_pt_gp(tg_pt_gp);
2590}
2591
2552static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { 2592static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
2593 .release = target_core_alua_tg_pt_gp_release,
2553 .show_attribute = target_core_alua_tg_pt_gp_attr_show, 2594 .show_attribute = target_core_alua_tg_pt_gp_attr_show,
2554 .store_attribute = target_core_alua_tg_pt_gp_attr_store, 2595 .store_attribute = target_core_alua_tg_pt_gp_attr_store,
2555}; 2596};
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp(
2602 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" 2643 printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
2603 " Group: alua/tg_pt_gps/%s, ID: %hu\n", 2644 " Group: alua/tg_pt_gps/%s, ID: %hu\n",
2604 config_item_name(item), tg_pt_gp->tg_pt_gp_id); 2645 config_item_name(item), tg_pt_gp->tg_pt_gp_id);
2605 2646 /*
2647 * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release()
2648 * -> target_core_alua_tg_pt_gp_release().
2649 */
2606 config_item_put(item); 2650 config_item_put(item);
2607 core_alua_free_tg_pt_gp(tg_pt_gp);
2608} 2651}
2609 2652
2610static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { 2653static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev(
2771 struct se_subsystem_api *t; 2814 struct se_subsystem_api *t;
2772 struct config_item *df_item; 2815 struct config_item *df_item;
2773 struct config_group *dev_cg, *tg_pt_gp_cg; 2816 struct config_group *dev_cg, *tg_pt_gp_cg;
2774 int i, ret; 2817 int i;
2775 2818
2776 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); 2819 hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
2777 2820
2778 if (mutex_lock_interruptible(&hba->hba_access_mutex)) 2821 mutex_lock(&hba->hba_access_mutex);
2779 goto out;
2780
2781 t = hba->transport; 2822 t = hba->transport;
2782 2823
2783 spin_lock(&se_global->g_device_lock); 2824 spin_lock(&se_global->g_device_lock);
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev(
2791 config_item_put(df_item); 2832 config_item_put(df_item);
2792 } 2833 }
2793 kfree(tg_pt_gp_cg->default_groups); 2834 kfree(tg_pt_gp_cg->default_groups);
2794 core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); 2835 /*
2836 * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp
2837 * directly from target_core_alua_tg_pt_gp_release().
2838 */
2795 T10_ALUA(se_dev)->default_tg_pt_gp = NULL; 2839 T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
2796 2840
2797 dev_cg = &se_dev->se_dev_group; 2841 dev_cg = &se_dev->se_dev_group;
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev(
2800 dev_cg->default_groups[i] = NULL; 2844 dev_cg->default_groups[i] = NULL;
2801 config_item_put(df_item); 2845 config_item_put(df_item);
2802 } 2846 }
2803
2804 config_item_put(item);
2805 /* 2847 /*
2806 * This pointer will set when the storage is enabled with: 2848 * The releasing of se_dev and associated se_dev->se_dev_ptr is done
2807 * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` 2849 * from target_core_dev_item_ops->release() ->target_core_dev_release().
2808 */ 2850 */
2809 if (se_dev->se_dev_ptr) { 2851 config_item_put(item);
2810 printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
2811 "virtual_device() for se_dev_ptr: %p\n",
2812 se_dev->se_dev_ptr);
2813
2814 ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
2815 if (ret < 0)
2816 goto hba_out;
2817 } else {
2818 /*
2819 * Release struct se_subsystem_dev->se_dev_su_ptr..
2820 */
2821 printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
2822 "device() for se_dev_su_ptr: %p\n",
2823 se_dev->se_dev_su_ptr);
2824
2825 t->free_device(se_dev->se_dev_su_ptr);
2826 }
2827
2828 printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
2829 "_dev_t: %p\n", se_dev);
2830
2831hba_out:
2832 mutex_unlock(&hba->hba_access_mutex); 2852 mutex_unlock(&hba->hba_access_mutex);
2833out:
2834 kfree(se_dev);
2835} 2853}
2836 2854
2837static struct configfs_group_operations target_core_hba_group_ops = { 2855static struct configfs_group_operations target_core_hba_group_ops = {
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
2914 2932
2915CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); 2933CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
2916 2934
2935static void target_core_hba_release(struct config_item *item)
2936{
2937 struct se_hba *hba = container_of(to_config_group(item),
2938 struct se_hba, hba_group);
2939 core_delete_hba(hba);
2940}
2941
2917static struct configfs_attribute *target_core_hba_attrs[] = { 2942static struct configfs_attribute *target_core_hba_attrs[] = {
2918 &target_core_hba_hba_info.attr, 2943 &target_core_hba_hba_info.attr,
2919 &target_core_hba_hba_mode.attr, 2944 &target_core_hba_hba_mode.attr,
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = {
2921}; 2946};
2922 2947
2923static struct configfs_item_operations target_core_hba_item_ops = { 2948static struct configfs_item_operations target_core_hba_item_ops = {
2949 .release = target_core_hba_release,
2924 .show_attribute = target_core_hba_attr_show, 2950 .show_attribute = target_core_hba_attr_show,
2925 .store_attribute = target_core_hba_attr_store, 2951 .store_attribute = target_core_hba_attr_store,
2926}; 2952};
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget(
2997 struct config_group *group, 3023 struct config_group *group,
2998 struct config_item *item) 3024 struct config_item *item)
2999{ 3025{
3000 struct se_hba *hba = item_to_hba(item); 3026 /*
3001 3027 * core_delete_hba() is called from target_core_hba_item_ops->release()
3028 * -> target_core_hba_release()
3029 */
3002 config_item_put(item); 3030 config_item_put(item);
3003 core_delete_hba(hba);
3004} 3031}
3005 3032
3006static struct configfs_group_operations target_core_group_ops = { 3033static struct configfs_group_operations target_core_group_ops = {
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void)
3022 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; 3049 struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
3023 struct config_group *lu_gp_cg = NULL; 3050 struct config_group *lu_gp_cg = NULL;
3024 struct configfs_subsystem *subsys; 3051 struct configfs_subsystem *subsys;
3025 struct proc_dir_entry *scsi_target_proc = NULL;
3026 struct t10_alua_lu_gp *lu_gp; 3052 struct t10_alua_lu_gp *lu_gp;
3027 int ret; 3053 int ret;
3028 3054
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void)
3128 if (core_dev_setup_virtual_lun0() < 0) 3154 if (core_dev_setup_virtual_lun0() < 0)
3129 goto out; 3155 goto out;
3130 3156
3131 scsi_target_proc = proc_mkdir("scsi_target", 0);
3132 if (!(scsi_target_proc)) {
3133 printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
3134 goto out;
3135 }
3136 ret = init_scsi_target_mib();
3137 if (ret < 0)
3138 goto out;
3139
3140 return 0; 3157 return 0;
3141 3158
3142out: 3159out:
3143 configfs_unregister_subsystem(subsys); 3160 configfs_unregister_subsystem(subsys);
3144 if (scsi_target_proc)
3145 remove_proc_entry("scsi_target", 0);
3146 core_dev_release_virtual_lun0(); 3161 core_dev_release_virtual_lun0();
3147 rd_module_exit(); 3162 rd_module_exit();
3148out_global: 3163out_global:
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void)
3178 config_item_put(item); 3193 config_item_put(item);
3179 } 3194 }
3180 kfree(lu_gp_cg->default_groups); 3195 kfree(lu_gp_cg->default_groups);
3181 core_alua_free_lu_gp(se_global->default_lu_gp); 3196 lu_gp_cg->default_groups = NULL;
3182 se_global->default_lu_gp = NULL;
3183 3197
3184 alua_cg = &se_global->alua_group; 3198 alua_cg = &se_global->alua_group;
3185 for (i = 0; alua_cg->default_groups[i]; i++) { 3199 for (i = 0; alua_cg->default_groups[i]; i++) {
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void)
3188 config_item_put(item); 3202 config_item_put(item);
3189 } 3203 }
3190 kfree(alua_cg->default_groups); 3204 kfree(alua_cg->default_groups);
3205 alua_cg->default_groups = NULL;
3191 3206
3192 hba_cg = &se_global->target_core_hbagroup; 3207 hba_cg = &se_global->target_core_hbagroup;
3193 for (i = 0; hba_cg->default_groups[i]; i++) { 3208 for (i = 0; hba_cg->default_groups[i]; i++) {
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void)
3196 config_item_put(item); 3211 config_item_put(item);
3197 } 3212 }
3198 kfree(hba_cg->default_groups); 3213 kfree(hba_cg->default_groups);
3199 3214 hba_cg->default_groups = NULL;
3200 for (i = 0; subsys->su_group.default_groups[i]; i++) { 3215 /*
3201 item = &subsys->su_group.default_groups[i]->cg_item; 3216 * We expect subsys->su_group.default_groups to be released
3202 subsys->su_group.default_groups[i] = NULL; 3217 * by configfs subsystem provider logic..
3203 config_item_put(item); 3218 */
3204 } 3219 configfs_unregister_subsystem(subsys);
3205 kfree(subsys->su_group.default_groups); 3220 kfree(subsys->su_group.default_groups);
3206 3221
3207 configfs_unregister_subsystem(subsys); 3222 core_alua_free_lu_gp(se_global->default_lu_gp);
3223 se_global->default_lu_gp = NULL;
3224
3208 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" 3225 printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
3209 " Infrastructure\n"); 3226 " Infrastructure\n");
3210 3227
3211 remove_scsi_target_mib();
3212 remove_proc_entry("scsi_target", 0);
3213 core_dev_release_virtual_lun0(); 3228 core_dev_release_virtual_lun0();
3214 rd_module_exit(); 3229 rd_module_exit();
3215 release_se_global(); 3230 release_se_global();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 317ce58d426d..5da051a07fa3 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -373,11 +373,11 @@ int core_update_device_list_for_node(
373 /* 373 /*
374 * deve->se_lun_acl will be NULL for demo-mode created LUNs 374 * deve->se_lun_acl will be NULL for demo-mode created LUNs
375 * that have not been explictly concerted to MappedLUNs -> 375 * that have not been explictly concerted to MappedLUNs ->
376 * struct se_lun_acl. 376 * struct se_lun_acl, but we remove deve->alua_port_list from
377 * port->sep_alua_list. This also means that active UAs and
378 * NodeACL context specific PR metadata for demo-mode
379 * MappedLUN *deve will be released below..
377 */ 380 */
378 if (!(deve->se_lun_acl))
379 return 0;
380
381 spin_lock_bh(&port->sep_alua_lock); 381 spin_lock_bh(&port->sep_alua_lock);
382 list_del(&deve->alua_port_list); 382 list_del(&deve->alua_port_list);
383 spin_unlock_bh(&port->sep_alua_lock); 383 spin_unlock_bh(&port->sep_alua_lock);
@@ -395,12 +395,14 @@ int core_update_device_list_for_node(
395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl" 395 printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
396 " already set for demo mode -> explict" 396 " already set for demo mode -> explict"
397 " LUN ACL transition\n"); 397 " LUN ACL transition\n");
398 spin_unlock_irq(&nacl->device_list_lock);
398 return -1; 399 return -1;
399 } 400 }
400 if (deve->se_lun != lun) { 401 if (deve->se_lun != lun) {
401 printk(KERN_ERR "struct se_dev_entry->se_lun does" 402 printk(KERN_ERR "struct se_dev_entry->se_lun does"
402 " match passed struct se_lun for demo mode" 403 " match passed struct se_lun for demo mode"
403 " -> explict LUN ACL transition\n"); 404 " -> explict LUN ACL transition\n");
405 spin_unlock_irq(&nacl->device_list_lock);
404 return -1; 406 return -1;
405 } 407 }
406 deve->se_lun_acl = lun_acl; 408 deve->se_lun_acl = lun_acl;
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev)
865 } 867 }
866 } 868 }
867 spin_unlock(&hba->device_lock); 869 spin_unlock(&hba->device_lock);
868
869 while (atomic_read(&hba->dev_mib_access_count))
870 cpu_relax();
871} 870}
872 871
873int se_dev_check_online(struct se_device *dev) 872int se_dev_check_online(struct se_device *dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 32b148d7e261..b65d1c8e7740 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
214 214
215CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); 215CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
216 216
217static void target_fabric_mappedlun_release(struct config_item *item)
218{
219 struct se_lun_acl *lacl = container_of(to_config_group(item),
220 struct se_lun_acl, se_lun_group);
221 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
222
223 core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
224}
225
217static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { 226static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
218 &target_fabric_mappedlun_write_protect.attr, 227 &target_fabric_mappedlun_write_protect.attr,
219 NULL, 228 NULL,
220}; 229};
221 230
222static struct configfs_item_operations target_fabric_mappedlun_item_ops = { 231static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
232 .release = target_fabric_mappedlun_release,
223 .show_attribute = target_fabric_mappedlun_attr_show, 233 .show_attribute = target_fabric_mappedlun_attr_show,
224 .store_attribute = target_fabric_mappedlun_attr_store, 234 .store_attribute = target_fabric_mappedlun_attr_store,
225 .allow_link = target_fabric_mappedlun_link, 235 .allow_link = target_fabric_mappedlun_link,
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun(
337 struct config_group *group, 347 struct config_group *group,
338 struct config_item *item) 348 struct config_item *item)
339{ 349{
340 struct se_lun_acl *lacl = container_of(to_config_group(item),
341 struct se_lun_acl, se_lun_group);
342 struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
343
344 config_item_put(item); 350 config_item_put(item);
345 core_dev_free_initiator_node_lun_acl(se_tpg, lacl); 351}
352
353static void target_fabric_nacl_base_release(struct config_item *item)
354{
355 struct se_node_acl *se_nacl = container_of(to_config_group(item),
356 struct se_node_acl, acl_group);
357 struct se_portal_group *se_tpg = se_nacl->se_tpg;
358 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
359
360 tf->tf_ops.fabric_drop_nodeacl(se_nacl);
346} 361}
347 362
348static struct configfs_item_operations target_fabric_nacl_base_item_ops = { 363static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
364 .release = target_fabric_nacl_base_release,
349 .show_attribute = target_fabric_nacl_base_attr_show, 365 .show_attribute = target_fabric_nacl_base_attr_show,
350 .store_attribute = target_fabric_nacl_base_attr_store, 366 .store_attribute = target_fabric_nacl_base_attr_store,
351}; 367};
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl(
404 struct config_group *group, 420 struct config_group *group,
405 struct config_item *item) 421 struct config_item *item)
406{ 422{
407 struct se_portal_group *se_tpg = container_of(group,
408 struct se_portal_group, tpg_acl_group);
409 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
410 struct se_node_acl *se_nacl = container_of(to_config_group(item), 423 struct se_node_acl *se_nacl = container_of(to_config_group(item),
411 struct se_node_acl, acl_group); 424 struct se_node_acl, acl_group);
412 struct config_item *df_item; 425 struct config_item *df_item;
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl(
419 nacl_cg->default_groups[i] = NULL; 432 nacl_cg->default_groups[i] = NULL;
420 config_item_put(df_item); 433 config_item_put(df_item);
421 } 434 }
422 435 /*
436 * struct se_node_acl free is done in target_fabric_nacl_base_release()
437 */
423 config_item_put(item); 438 config_item_put(item);
424 tf->tf_ops.fabric_drop_nodeacl(se_nacl);
425} 439}
426 440
427static struct configfs_group_operations target_fabric_nacl_group_ops = { 441static struct configfs_group_operations target_fabric_nacl_group_ops = {
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
437 451
438CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); 452CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
439 453
454static void target_fabric_np_base_release(struct config_item *item)
455{
456 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
457 struct se_tpg_np, tpg_np_group);
458 struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent;
459 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
460
461 tf->tf_ops.fabric_drop_np(se_tpg_np);
462}
463
440static struct configfs_item_operations target_fabric_np_base_item_ops = { 464static struct configfs_item_operations target_fabric_np_base_item_ops = {
465 .release = target_fabric_np_base_release,
441 .show_attribute = target_fabric_np_base_attr_show, 466 .show_attribute = target_fabric_np_base_attr_show,
442 .store_attribute = target_fabric_np_base_attr_store, 467 .store_attribute = target_fabric_np_base_attr_store,
443}; 468};
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np(
466 if (!(se_tpg_np) || IS_ERR(se_tpg_np)) 491 if (!(se_tpg_np) || IS_ERR(se_tpg_np))
467 return ERR_PTR(-EINVAL); 492 return ERR_PTR(-EINVAL);
468 493
494 se_tpg_np->tpg_np_parent = se_tpg;
469 config_group_init_type_name(&se_tpg_np->tpg_np_group, name, 495 config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
470 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); 496 &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
471 497
@@ -476,14 +502,10 @@ static void target_fabric_drop_np(
476 struct config_group *group, 502 struct config_group *group,
477 struct config_item *item) 503 struct config_item *item)
478{ 504{
479 struct se_portal_group *se_tpg = container_of(group, 505 /*
480 struct se_portal_group, tpg_np_group); 506 * struct se_tpg_np is released via target_fabric_np_base_release()
481 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 507 */
482 struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
483 struct se_tpg_np, tpg_np_group);
484
485 config_item_put(item); 508 config_item_put(item);
486 tf->tf_ops.fabric_drop_np(se_tpg_np);
487} 509}
488 510
489static struct configfs_group_operations target_fabric_np_group_ops = { 511static struct configfs_group_operations target_fabric_np_group_ops = {
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
814 */ 836 */
815CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); 837CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
816 838
839static void target_fabric_tpg_release(struct config_item *item)
840{
841 struct se_portal_group *se_tpg = container_of(to_config_group(item),
842 struct se_portal_group, tpg_group);
843 struct se_wwn *wwn = se_tpg->se_tpg_wwn;
844 struct target_fabric_configfs *tf = wwn->wwn_tf;
845
846 tf->tf_ops.fabric_drop_tpg(se_tpg);
847}
848
817static struct configfs_item_operations target_fabric_tpg_base_item_ops = { 849static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
850 .release = target_fabric_tpg_release,
818 .show_attribute = target_fabric_tpg_attr_show, 851 .show_attribute = target_fabric_tpg_attr_show,
819 .store_attribute = target_fabric_tpg_attr_store, 852 .store_attribute = target_fabric_tpg_attr_store,
820}; 853};
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg(
872 struct config_group *group, 905 struct config_group *group,
873 struct config_item *item) 906 struct config_item *item)
874{ 907{
875 struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
876 struct target_fabric_configfs *tf = wwn->wwn_tf;
877 struct se_portal_group *se_tpg = container_of(to_config_group(item), 908 struct se_portal_group *se_tpg = container_of(to_config_group(item),
878 struct se_portal_group, tpg_group); 909 struct se_portal_group, tpg_group);
879 struct config_group *tpg_cg = &se_tpg->tpg_group; 910 struct config_group *tpg_cg = &se_tpg->tpg_group;
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg(
890 } 921 }
891 922
892 config_item_put(item); 923 config_item_put(item);
893 tf->tf_ops.fabric_drop_tpg(se_tpg);
894} 924}
895 925
926static void target_fabric_release_wwn(struct config_item *item)
927{
928 struct se_wwn *wwn = container_of(to_config_group(item),
929 struct se_wwn, wwn_group);
930 struct target_fabric_configfs *tf = wwn->wwn_tf;
931
932 tf->tf_ops.fabric_drop_wwn(wwn);
933}
934
935static struct configfs_item_operations target_fabric_tpg_item_ops = {
936 .release = target_fabric_release_wwn,
937};
938
896static struct configfs_group_operations target_fabric_tpg_group_ops = { 939static struct configfs_group_operations target_fabric_tpg_group_ops = {
897 .make_group = target_fabric_make_tpg, 940 .make_group = target_fabric_make_tpg,
898 .drop_item = target_fabric_drop_tpg, 941 .drop_item = target_fabric_drop_tpg,
899}; 942};
900 943
901TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); 944TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops,
945 NULL);
902 946
903/* End of tfc_tpg_cit */ 947/* End of tfc_tpg_cit */
904 948
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn(
932 struct config_group *group, 976 struct config_group *group,
933 struct config_item *item) 977 struct config_item *item)
934{ 978{
935 struct target_fabric_configfs *tf = container_of(group,
936 struct target_fabric_configfs, tf_group);
937 struct se_wwn *wwn = container_of(to_config_group(item),
938 struct se_wwn, wwn_group);
939
940 config_item_put(item); 979 config_item_put(item);
941 tf->tf_ops.fabric_drop_wwn(wwn);
942} 980}
943 981
944static struct configfs_group_operations target_fabric_wwn_group_ops = { 982static struct configfs_group_operations target_fabric_wwn_group_ops = {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c6e0d757e76e..67f0c09983c8 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice(
154 154
155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, 155 bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); 156 FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
157 if (!(bd)) 157 if (IS_ERR(bd))
158 goto failed; 158 goto failed;
159 /* 159 /*
160 * Setup the local scope queue_limits from struct request_queue->limits 160 * Setup the local scope queue_limits from struct request_queue->limits
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p)
220{ 220{
221 struct iblock_dev *ib_dev = p; 221 struct iblock_dev *ib_dev = p;
222 222
223 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); 223 if (ib_dev->ibd_bd != NULL)
224 bioset_free(ib_dev->ibd_bio_set); 224 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
225 if (ib_dev->ibd_bio_set != NULL)
226 bioset_free(ib_dev->ibd_bio_set);
225 kfree(ib_dev); 227 kfree(ib_dev);
226} 228}
227 229
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c
deleted file mode 100644
index d5a48aa0d2d1..000000000000
--- a/drivers/target/target_core_mib.c
+++ /dev/null
@@ -1,1078 +0,0 @@
1/*******************************************************************************
2 * Filename: target_core_mib.c
3 *
4 * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
5 * Copyright (c) 2007-2010 Rising Tide Systems
6 * Copyright (c) 2008-2010 Linux-iSCSI.org
7 *
8 * Nicholas A. Bellinger <nab@linux-iscsi.org>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 *
24 ******************************************************************************/
25
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/delay.h>
30#include <linux/timer.h>
31#include <linux/string.h>
32#include <linux/version.h>
33#include <generated/utsrelease.h>
34#include <linux/utsname.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/blkdev.h>
38#include <scsi/scsi.h>
39#include <scsi/scsi_device.h>
40#include <scsi/scsi_host.h>
41
42#include <target/target_core_base.h>
43#include <target/target_core_transport.h>
44#include <target/target_core_fabric_ops.h>
45#include <target/target_core_configfs.h>
46
47#include "target_core_hba.h"
48#include "target_core_mib.h"
49
50/* SCSI mib table index */
51static struct scsi_index_table scsi_index_table;
52
53#ifndef INITIAL_JIFFIES
54#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
55#endif
56
57/* SCSI Instance Table */
58#define SCSI_INST_SW_INDEX 1
59#define SCSI_TRANSPORT_INDEX 1
60
61#define NONE "None"
62#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
63
64static inline int list_is_first(const struct list_head *list,
65 const struct list_head *head)
66{
67 return list->prev == head;
68}
69
70static void *locate_hba_start(
71 struct seq_file *seq,
72 loff_t *pos)
73{
74 spin_lock(&se_global->g_device_lock);
75 return seq_list_start(&se_global->g_se_dev_list, *pos);
76}
77
78static void *locate_hba_next(
79 struct seq_file *seq,
80 void *v,
81 loff_t *pos)
82{
83 return seq_list_next(v, &se_global->g_se_dev_list, pos);
84}
85
86static void locate_hba_stop(struct seq_file *seq, void *v)
87{
88 spin_unlock(&se_global->g_device_lock);
89}
90
91/****************************************************************************
92 * SCSI MIB Tables
93 ****************************************************************************/
94
95/*
96 * SCSI Instance Table
97 */
98static void *scsi_inst_seq_start(
99 struct seq_file *seq,
100 loff_t *pos)
101{
102 spin_lock(&se_global->hba_lock);
103 return seq_list_start(&se_global->g_hba_list, *pos);
104}
105
106static void *scsi_inst_seq_next(
107 struct seq_file *seq,
108 void *v,
109 loff_t *pos)
110{
111 return seq_list_next(v, &se_global->g_hba_list, pos);
112}
113
114static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
115{
116 spin_unlock(&se_global->hba_lock);
117}
118
119static int scsi_inst_seq_show(struct seq_file *seq, void *v)
120{
121 struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
122
123 if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
124 seq_puts(seq, "inst sw_indx\n");
125
126 seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
127 seq_printf(seq, "plugin: %s version: %s\n",
128 hba->transport->name, TARGET_CORE_VERSION);
129
130 return 0;
131}
132
133static const struct seq_operations scsi_inst_seq_ops = {
134 .start = scsi_inst_seq_start,
135 .next = scsi_inst_seq_next,
136 .stop = scsi_inst_seq_stop,
137 .show = scsi_inst_seq_show
138};
139
140static int scsi_inst_seq_open(struct inode *inode, struct file *file)
141{
142 return seq_open(file, &scsi_inst_seq_ops);
143}
144
145static const struct file_operations scsi_inst_seq_fops = {
146 .owner = THIS_MODULE,
147 .open = scsi_inst_seq_open,
148 .read = seq_read,
149 .llseek = seq_lseek,
150 .release = seq_release,
151};
152
153/*
154 * SCSI Device Table
155 */
156static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
157{
158 return locate_hba_start(seq, pos);
159}
160
161static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
162{
163 return locate_hba_next(seq, v, pos);
164}
165
166static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
167{
168 locate_hba_stop(seq, v);
169}
170
171static int scsi_dev_seq_show(struct seq_file *seq, void *v)
172{
173 struct se_hba *hba;
174 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
175 g_se_dev_list);
176 struct se_device *dev = se_dev->se_dev_ptr;
177 char str[28];
178 int k;
179
180 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
181 seq_puts(seq, "inst indx role ports\n");
182
183 if (!(dev))
184 return 0;
185
186 hba = dev->se_hba;
187 if (!(hba)) {
188 /* Log error ? */
189 return 0;
190 }
191
192 seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
193 dev->dev_index, "Target", dev->dev_port_count);
194
195 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
196
197 /* vendor */
198 for (k = 0; k < 8; k++)
199 str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
200 DEV_T10_WWN(dev)->vendor[k] : 0x20;
201 str[k] = 0x20;
202
203 /* model */
204 for (k = 0; k < 16; k++)
205 str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
206 DEV_T10_WWN(dev)->model[k] : 0x20;
207 str[k + 9] = 0;
208
209 seq_printf(seq, "dev_alias: %s\n", str);
210
211 return 0;
212}
213
214static const struct seq_operations scsi_dev_seq_ops = {
215 .start = scsi_dev_seq_start,
216 .next = scsi_dev_seq_next,
217 .stop = scsi_dev_seq_stop,
218 .show = scsi_dev_seq_show
219};
220
221static int scsi_dev_seq_open(struct inode *inode, struct file *file)
222{
223 return seq_open(file, &scsi_dev_seq_ops);
224}
225
226static const struct file_operations scsi_dev_seq_fops = {
227 .owner = THIS_MODULE,
228 .open = scsi_dev_seq_open,
229 .read = seq_read,
230 .llseek = seq_lseek,
231 .release = seq_release,
232};
233
234/*
235 * SCSI Port Table
236 */
237static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
238{
239 return locate_hba_start(seq, pos);
240}
241
242static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
243{
244 return locate_hba_next(seq, v, pos);
245}
246
247static void scsi_port_seq_stop(struct seq_file *seq, void *v)
248{
249 locate_hba_stop(seq, v);
250}
251
252static int scsi_port_seq_show(struct seq_file *seq, void *v)
253{
254 struct se_hba *hba;
255 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
256 g_se_dev_list);
257 struct se_device *dev = se_dev->se_dev_ptr;
258 struct se_port *sep, *sep_tmp;
259
260 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
261 seq_puts(seq, "inst device indx role busy_count\n");
262
263 if (!(dev))
264 return 0;
265
266 hba = dev->se_hba;
267 if (!(hba)) {
268 /* Log error ? */
269 return 0;
270 }
271
272 /* FIXME: scsiPortBusyStatuses count */
273 spin_lock(&dev->se_port_lock);
274 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
275 seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
276 dev->dev_index, sep->sep_index, "Device",
277 dev->dev_index, 0);
278 }
279 spin_unlock(&dev->se_port_lock);
280
281 return 0;
282}
283
284static const struct seq_operations scsi_port_seq_ops = {
285 .start = scsi_port_seq_start,
286 .next = scsi_port_seq_next,
287 .stop = scsi_port_seq_stop,
288 .show = scsi_port_seq_show
289};
290
291static int scsi_port_seq_open(struct inode *inode, struct file *file)
292{
293 return seq_open(file, &scsi_port_seq_ops);
294}
295
296static const struct file_operations scsi_port_seq_fops = {
297 .owner = THIS_MODULE,
298 .open = scsi_port_seq_open,
299 .read = seq_read,
300 .llseek = seq_lseek,
301 .release = seq_release,
302};
303
304/*
305 * SCSI Transport Table
306 */
307static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
308{
309 return locate_hba_start(seq, pos);
310}
311
312static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
313{
314 return locate_hba_next(seq, v, pos);
315}
316
317static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
318{
319 locate_hba_stop(seq, v);
320}
321
322static int scsi_transport_seq_show(struct seq_file *seq, void *v)
323{
324 struct se_hba *hba;
325 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
326 g_se_dev_list);
327 struct se_device *dev = se_dev->se_dev_ptr;
328 struct se_port *se, *se_tmp;
329 struct se_portal_group *tpg;
330 struct t10_wwn *wwn;
331 char buf[64];
332
333 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
334 seq_puts(seq, "inst device indx dev_name\n");
335
336 if (!(dev))
337 return 0;
338
339 hba = dev->se_hba;
340 if (!(hba)) {
341 /* Log error ? */
342 return 0;
343 }
344
345 wwn = DEV_T10_WWN(dev);
346
347 spin_lock(&dev->se_port_lock);
348 list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
349 tpg = se->sep_tpg;
350 sprintf(buf, "scsiTransport%s",
351 TPG_TFO(tpg)->get_fabric_name());
352
353 seq_printf(seq, "%u %s %u %s+%s\n",
354 hba->hba_index, /* scsiTransportIndex */
355 buf, /* scsiTransportType */
356 (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
357 TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
358 0,
359 TPG_TFO(tpg)->tpg_get_wwn(tpg),
360 (strlen(wwn->unit_serial)) ?
361 /* scsiTransportDevName */
362 wwn->unit_serial : wwn->vendor);
363 }
364 spin_unlock(&dev->se_port_lock);
365
366 return 0;
367}
368
369static const struct seq_operations scsi_transport_seq_ops = {
370 .start = scsi_transport_seq_start,
371 .next = scsi_transport_seq_next,
372 .stop = scsi_transport_seq_stop,
373 .show = scsi_transport_seq_show
374};
375
376static int scsi_transport_seq_open(struct inode *inode, struct file *file)
377{
378 return seq_open(file, &scsi_transport_seq_ops);
379}
380
381static const struct file_operations scsi_transport_seq_fops = {
382 .owner = THIS_MODULE,
383 .open = scsi_transport_seq_open,
384 .read = seq_read,
385 .llseek = seq_lseek,
386 .release = seq_release,
387};
388
389/*
390 * SCSI Target Device Table
391 */
392static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
393{
394 return locate_hba_start(seq, pos);
395}
396
397static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
398{
399 return locate_hba_next(seq, v, pos);
400}
401
402static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
403{
404 locate_hba_stop(seq, v);
405}
406
407
408#define LU_COUNT 1 /* for now */
409static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
410{
411 struct se_hba *hba;
412 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
413 g_se_dev_list);
414 struct se_device *dev = se_dev->se_dev_ptr;
415 int non_accessible_lus = 0;
416 char status[16];
417
418 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
419 seq_puts(seq, "inst indx num_LUs status non_access_LUs"
420 " resets\n");
421
422 if (!(dev))
423 return 0;
424
425 hba = dev->se_hba;
426 if (!(hba)) {
427 /* Log error ? */
428 return 0;
429 }
430
431 switch (dev->dev_status) {
432 case TRANSPORT_DEVICE_ACTIVATED:
433 strcpy(status, "activated");
434 break;
435 case TRANSPORT_DEVICE_DEACTIVATED:
436 strcpy(status, "deactivated");
437 non_accessible_lus = 1;
438 break;
439 case TRANSPORT_DEVICE_SHUTDOWN:
440 strcpy(status, "shutdown");
441 non_accessible_lus = 1;
442 break;
443 case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
444 case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
445 strcpy(status, "offline");
446 non_accessible_lus = 1;
447 break;
448 default:
449 sprintf(status, "unknown(%d)", dev->dev_status);
450 non_accessible_lus = 1;
451 }
452
453 seq_printf(seq, "%u %u %u %s %u %u\n",
454 hba->hba_index, dev->dev_index, LU_COUNT,
455 status, non_accessible_lus, dev->num_resets);
456
457 return 0;
458}
459
460static const struct seq_operations scsi_tgt_dev_seq_ops = {
461 .start = scsi_tgt_dev_seq_start,
462 .next = scsi_tgt_dev_seq_next,
463 .stop = scsi_tgt_dev_seq_stop,
464 .show = scsi_tgt_dev_seq_show
465};
466
467static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
468{
469 return seq_open(file, &scsi_tgt_dev_seq_ops);
470}
471
472static const struct file_operations scsi_tgt_dev_seq_fops = {
473 .owner = THIS_MODULE,
474 .open = scsi_tgt_dev_seq_open,
475 .read = seq_read,
476 .llseek = seq_lseek,
477 .release = seq_release,
478};
479
480/*
481 * SCSI Target Port Table
482 */
483static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
484{
485 return locate_hba_start(seq, pos);
486}
487
488static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
489{
490 return locate_hba_next(seq, v, pos);
491}
492
493static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
494{
495 locate_hba_stop(seq, v);
496}
497
498static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
499{
500 struct se_hba *hba;
501 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
502 g_se_dev_list);
503 struct se_device *dev = se_dev->se_dev_ptr;
504 struct se_port *sep, *sep_tmp;
505 struct se_portal_group *tpg;
506 u32 rx_mbytes, tx_mbytes;
507 unsigned long long num_cmds;
508 char buf[64];
509
510 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
511 seq_puts(seq, "inst device indx name port_index in_cmds"
512 " write_mbytes read_mbytes hs_in_cmds\n");
513
514 if (!(dev))
515 return 0;
516
517 hba = dev->se_hba;
518 if (!(hba)) {
519 /* Log error ? */
520 return 0;
521 }
522
523 spin_lock(&dev->se_port_lock);
524 list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
525 tpg = sep->sep_tpg;
526 sprintf(buf, "%sPort#",
527 TPG_TFO(tpg)->get_fabric_name());
528
529 seq_printf(seq, "%u %u %u %s%d %s%s%d ",
530 hba->hba_index,
531 dev->dev_index,
532 sep->sep_index,
533 buf, sep->sep_index,
534 TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
535 TPG_TFO(tpg)->tpg_get_tag(tpg));
536
537 spin_lock(&sep->sep_lun->lun_sep_lock);
538 num_cmds = sep->sep_stats.cmd_pdus;
539 rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
540 tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
541 spin_unlock(&sep->sep_lun->lun_sep_lock);
542
543 seq_printf(seq, "%llu %u %u %u\n", num_cmds,
544 rx_mbytes, tx_mbytes, 0);
545 }
546 spin_unlock(&dev->se_port_lock);
547
548 return 0;
549}
550
551static const struct seq_operations scsi_tgt_port_seq_ops = {
552 .start = scsi_tgt_port_seq_start,
553 .next = scsi_tgt_port_seq_next,
554 .stop = scsi_tgt_port_seq_stop,
555 .show = scsi_tgt_port_seq_show
556};
557
558static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
559{
560 return seq_open(file, &scsi_tgt_port_seq_ops);
561}
562
563static const struct file_operations scsi_tgt_port_seq_fops = {
564 .owner = THIS_MODULE,
565 .open = scsi_tgt_port_seq_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = seq_release,
569};
570
571/*
572 * SCSI Authorized Initiator Table:
573 * It contains the SCSI Initiators authorized to be attached to one of the
574 * local Target ports.
575 * Iterates through all active TPGs and extracts the info from the ACLs
576 */
577static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
578{
579 spin_lock_bh(&se_global->se_tpg_lock);
580 return seq_list_start(&se_global->g_se_tpg_list, *pos);
581}
582
583static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
584 loff_t *pos)
585{
586 return seq_list_next(v, &se_global->g_se_tpg_list, pos);
587}
588
589static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
590{
591 spin_unlock_bh(&se_global->se_tpg_lock);
592}
593
594static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
595{
596 struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
597 se_tpg_list);
598 struct se_dev_entry *deve;
599 struct se_lun *lun;
600 struct se_node_acl *se_nacl;
601 int j;
602
603 if (list_is_first(&se_tpg->se_tpg_list,
604 &se_global->g_se_tpg_list))
605 seq_puts(seq, "inst dev port indx dev_or_port intr_name "
606 "map_indx att_count num_cmds read_mbytes "
607 "write_mbytes hs_num_cmds creation_time row_status\n");
608
609 if (!(se_tpg))
610 return 0;
611
612 spin_lock(&se_tpg->acl_node_lock);
613 list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
614
615 atomic_inc(&se_nacl->mib_ref_count);
616 smp_mb__after_atomic_inc();
617 spin_unlock(&se_tpg->acl_node_lock);
618
619 spin_lock_irq(&se_nacl->device_list_lock);
620 for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
621 deve = &se_nacl->device_list[j];
622 if (!(deve->lun_flags &
623 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
624 (!deve->se_lun))
625 continue;
626 lun = deve->se_lun;
627 if (!lun->lun_se_dev)
628 continue;
629
630 seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
631 " %u %s\n",
632 /* scsiInstIndex */
633 (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
634 TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
635 0,
636 /* scsiDeviceIndex */
637 lun->lun_se_dev->dev_index,
638 /* scsiAuthIntrTgtPortIndex */
639 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
640 /* scsiAuthIntrIndex */
641 se_nacl->acl_index,
642 /* scsiAuthIntrDevOrPort */
643 1,
644 /* scsiAuthIntrName */
645 se_nacl->initiatorname[0] ?
646 se_nacl->initiatorname : NONE,
647 /* FIXME: scsiAuthIntrLunMapIndex */
648 0,
649 /* scsiAuthIntrAttachedTimes */
650 deve->attach_count,
651 /* scsiAuthIntrOutCommands */
652 deve->total_cmds,
653 /* scsiAuthIntrReadMegaBytes */
654 (u32)(deve->read_bytes >> 20),
655 /* scsiAuthIntrWrittenMegaBytes */
656 (u32)(deve->write_bytes >> 20),
657 /* FIXME: scsiAuthIntrHSOutCommands */
658 0,
659 /* scsiAuthIntrLastCreation */
660 (u32)(((u32)deve->creation_time -
661 INITIAL_JIFFIES) * 100 / HZ),
662 /* FIXME: scsiAuthIntrRowStatus */
663 "Ready");
664 }
665 spin_unlock_irq(&se_nacl->device_list_lock);
666
667 spin_lock(&se_tpg->acl_node_lock);
668 atomic_dec(&se_nacl->mib_ref_count);
669 smp_mb__after_atomic_dec();
670 }
671 spin_unlock(&se_tpg->acl_node_lock);
672
673 return 0;
674}
675
676static const struct seq_operations scsi_auth_intr_seq_ops = {
677 .start = scsi_auth_intr_seq_start,
678 .next = scsi_auth_intr_seq_next,
679 .stop = scsi_auth_intr_seq_stop,
680 .show = scsi_auth_intr_seq_show
681};
682
683static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
684{
685 return seq_open(file, &scsi_auth_intr_seq_ops);
686}
687
688static const struct file_operations scsi_auth_intr_seq_fops = {
689 .owner = THIS_MODULE,
690 .open = scsi_auth_intr_seq_open,
691 .read = seq_read,
692 .llseek = seq_lseek,
693 .release = seq_release,
694};
695
696/*
697 * SCSI Attached Initiator Port Table:
698 * It lists the SCSI Initiators attached to one of the local Target ports.
699 * Iterates through all active TPGs and use active sessions from each TPG
700 * to list the info fo this table.
701 */
702static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
703{
704 spin_lock_bh(&se_global->se_tpg_lock);
705 return seq_list_start(&se_global->g_se_tpg_list, *pos);
706}
707
708static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
709 loff_t *pos)
710{
711 return seq_list_next(v, &se_global->g_se_tpg_list, pos);
712}
713
714static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
715{
716 spin_unlock_bh(&se_global->se_tpg_lock);
717}
718
719static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
720{
721 struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
722 se_tpg_list);
723 struct se_dev_entry *deve;
724 struct se_lun *lun;
725 struct se_node_acl *se_nacl;
726 struct se_session *se_sess;
727 unsigned char buf[64];
728 int j;
729
730 if (list_is_first(&se_tpg->se_tpg_list,
731 &se_global->g_se_tpg_list))
732 seq_puts(seq, "inst dev port indx port_auth_indx port_name"
733 " port_ident\n");
734
735 if (!(se_tpg))
736 return 0;
737
738 spin_lock(&se_tpg->session_lock);
739 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
740 if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
741 (!se_sess->se_node_acl) ||
742 (!se_sess->se_node_acl->device_list))
743 continue;
744
745 atomic_inc(&se_sess->mib_ref_count);
746 smp_mb__after_atomic_inc();
747 se_nacl = se_sess->se_node_acl;
748 atomic_inc(&se_nacl->mib_ref_count);
749 smp_mb__after_atomic_inc();
750 spin_unlock(&se_tpg->session_lock);
751
752 spin_lock_irq(&se_nacl->device_list_lock);
753 for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
754 deve = &se_nacl->device_list[j];
755 if (!(deve->lun_flags &
756 TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
757 (!deve->se_lun))
758 continue;
759
760 lun = deve->se_lun;
761 if (!lun->lun_se_dev)
762 continue;
763
764 memset(buf, 0, 64);
765 if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
766 TPG_TFO(se_tpg)->sess_get_initiator_sid(
767 se_sess, (unsigned char *)&buf[0], 64);
768
769 seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
770 /* scsiInstIndex */
771 (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
772 TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
773 0,
774 /* scsiDeviceIndex */
775 lun->lun_se_dev->dev_index,
776 /* scsiPortIndex */
777 TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
778 /* scsiAttIntrPortIndex */
779 (TPG_TFO(se_tpg)->sess_get_index != NULL) ?
780 TPG_TFO(se_tpg)->sess_get_index(se_sess) :
781 0,
782 /* scsiAttIntrPortAuthIntrIdx */
783 se_nacl->acl_index,
784 /* scsiAttIntrPortName */
785 se_nacl->initiatorname[0] ?
786 se_nacl->initiatorname : NONE,
787 /* scsiAttIntrPortIdentifier */
788 buf);
789 }
790 spin_unlock_irq(&se_nacl->device_list_lock);
791
792 spin_lock(&se_tpg->session_lock);
793 atomic_dec(&se_nacl->mib_ref_count);
794 smp_mb__after_atomic_dec();
795 atomic_dec(&se_sess->mib_ref_count);
796 smp_mb__after_atomic_dec();
797 }
798 spin_unlock(&se_tpg->session_lock);
799
800 return 0;
801}
802
803static const struct seq_operations scsi_att_intr_port_seq_ops = {
804 .start = scsi_att_intr_port_seq_start,
805 .next = scsi_att_intr_port_seq_next,
806 .stop = scsi_att_intr_port_seq_stop,
807 .show = scsi_att_intr_port_seq_show
808};
809
810static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
811{
812 return seq_open(file, &scsi_att_intr_port_seq_ops);
813}
814
815static const struct file_operations scsi_att_intr_port_seq_fops = {
816 .owner = THIS_MODULE,
817 .open = scsi_att_intr_port_seq_open,
818 .read = seq_read,
819 .llseek = seq_lseek,
820 .release = seq_release,
821};
822
823/*
824 * SCSI Logical Unit Table
825 */
826static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
827{
828 return locate_hba_start(seq, pos);
829}
830
831static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
832{
833 return locate_hba_next(seq, v, pos);
834}
835
836static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
837{
838 locate_hba_stop(seq, v);
839}
840
841#define SCSI_LU_INDEX 1
842static int scsi_lu_seq_show(struct seq_file *seq, void *v)
843{
844 struct se_hba *hba;
845 struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
846 g_se_dev_list);
847 struct se_device *dev = se_dev->se_dev_ptr;
848 int j;
849 char str[28];
850
851 if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
852 seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
853 " dev_type status state-bit num_cmds read_mbytes"
854 " write_mbytes resets full_stat hs_num_cmds creation_time\n");
855
856 if (!(dev))
857 return 0;
858
859 hba = dev->se_hba;
860 if (!(hba)) {
861 /* Log error ? */
862 return 0;
863 }
864
865 /* Fix LU state, if we can read it from the device */
866 seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
867 dev->dev_index, SCSI_LU_INDEX,
868 (unsigned long long)0, /* FIXME: scsiLuDefaultLun */
869 (strlen(DEV_T10_WWN(dev)->unit_serial)) ?
870 /* scsiLuWwnName */
871 (char *)&DEV_T10_WWN(dev)->unit_serial[0] :
872 "None");
873
874 memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
875 /* scsiLuVendorId */
876 for (j = 0; j < 8; j++)
877 str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
878 DEV_T10_WWN(dev)->vendor[j] : 0x20;
879 str[8] = 0;
880 seq_printf(seq, " %s", str);
881
882 /* scsiLuProductId */
883 for (j = 0; j < 16; j++)
884 str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
885 DEV_T10_WWN(dev)->model[j] : 0x20;
886 str[16] = 0;
887 seq_printf(seq, " %s", str);
888
889 /* scsiLuRevisionId */
890 for (j = 0; j < 4; j++)
891 str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
892 DEV_T10_WWN(dev)->revision[j] : 0x20;
893 str[4] = 0;
894 seq_printf(seq, " %s", str);
895
896 seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
897 /* scsiLuPeripheralType */
898 TRANSPORT(dev)->get_device_type(dev),
899 (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
900 "available" : "notavailable", /* scsiLuStatus */
901 "exposed", /* scsiLuState */
902 (unsigned long long)dev->num_cmds,
903 /* scsiLuReadMegaBytes */
904 (u32)(dev->read_bytes >> 20),
905 /* scsiLuWrittenMegaBytes */
906 (u32)(dev->write_bytes >> 20),
907 dev->num_resets, /* scsiLuInResets */
908 0, /* scsiLuOutTaskSetFullStatus */
909 0, /* scsiLuHSInCommands */
910 (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
911 100 / HZ));
912
913 return 0;
914}
915
916static const struct seq_operations scsi_lu_seq_ops = {
917 .start = scsi_lu_seq_start,
918 .next = scsi_lu_seq_next,
919 .stop = scsi_lu_seq_stop,
920 .show = scsi_lu_seq_show
921};
922
923static int scsi_lu_seq_open(struct inode *inode, struct file *file)
924{
925 return seq_open(file, &scsi_lu_seq_ops);
926}
927
928static const struct file_operations scsi_lu_seq_fops = {
929 .owner = THIS_MODULE,
930 .open = scsi_lu_seq_open,
931 .read = seq_read,
932 .llseek = seq_lseek,
933 .release = seq_release,
934};
935
936/****************************************************************************/
937
938/*
939 * Remove proc fs entries
940 */
941void remove_scsi_target_mib(void)
942{
943 remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
944 remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
945 remove_proc_entry("scsi_target/mib/scsi_port", NULL);
946 remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
947 remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
948 remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
949 remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
950 remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
951 remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
952 remove_proc_entry("scsi_target/mib", NULL);
953}
954
955/*
956 * Create proc fs entries for the mib tables
957 */
958int init_scsi_target_mib(void)
959{
960 struct proc_dir_entry *dir_entry;
961 struct proc_dir_entry *scsi_inst_entry;
962 struct proc_dir_entry *scsi_dev_entry;
963 struct proc_dir_entry *scsi_port_entry;
964 struct proc_dir_entry *scsi_transport_entry;
965 struct proc_dir_entry *scsi_tgt_dev_entry;
966 struct proc_dir_entry *scsi_tgt_port_entry;
967 struct proc_dir_entry *scsi_auth_intr_entry;
968 struct proc_dir_entry *scsi_att_intr_port_entry;
969 struct proc_dir_entry *scsi_lu_entry;
970
971 dir_entry = proc_mkdir("scsi_target/mib", NULL);
972 if (!(dir_entry)) {
973 printk(KERN_ERR "proc_mkdir() failed.\n");
974 return -1;
975 }
976
977 scsi_inst_entry =
978 create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
979 if (scsi_inst_entry)
980 scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
981 else
982 goto error;
983
984 scsi_dev_entry =
985 create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
986 if (scsi_dev_entry)
987 scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
988 else
989 goto error;
990
991 scsi_port_entry =
992 create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
993 if (scsi_port_entry)
994 scsi_port_entry->proc_fops = &scsi_port_seq_fops;
995 else
996 goto error;
997
998 scsi_transport_entry =
999 create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
1000 if (scsi_transport_entry)
1001 scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
1002 else
1003 goto error;
1004
1005 scsi_tgt_dev_entry =
1006 create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
1007 if (scsi_tgt_dev_entry)
1008 scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
1009 else
1010 goto error;
1011
1012 scsi_tgt_port_entry =
1013 create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
1014 if (scsi_tgt_port_entry)
1015 scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
1016 else
1017 goto error;
1018
1019 scsi_auth_intr_entry =
1020 create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
1021 if (scsi_auth_intr_entry)
1022 scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
1023 else
1024 goto error;
1025
1026 scsi_att_intr_port_entry =
1027 create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
1028 if (scsi_att_intr_port_entry)
1029 scsi_att_intr_port_entry->proc_fops =
1030 &scsi_att_intr_port_seq_fops;
1031 else
1032 goto error;
1033
1034 scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
1035 if (scsi_lu_entry)
1036 scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
1037 else
1038 goto error;
1039
1040 return 0;
1041
1042error:
1043 printk(KERN_ERR "create_proc_entry() failed.\n");
1044 remove_scsi_target_mib();
1045 return -1;
1046}
1047
1048/*
1049 * Initialize the index table for allocating unique row indexes to various mib
1050 * tables
1051 */
1052void init_scsi_index_table(void)
1053{
1054 memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
1055 spin_lock_init(&scsi_index_table.lock);
1056}
1057
1058/*
1059 * Allocate a new row index for the entry type specified
1060 */
1061u32 scsi_get_new_index(scsi_index_t type)
1062{
1063 u32 new_index;
1064
1065 if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
1066 printk(KERN_ERR "Invalid index type %d\n", type);
1067 return -1;
1068 }
1069
1070 spin_lock(&scsi_index_table.lock);
1071 new_index = ++scsi_index_table.scsi_mib_index[type];
1072 if (new_index == 0)
1073 new_index = ++scsi_index_table.scsi_mib_index[type];
1074 spin_unlock(&scsi_index_table.lock);
1075
1076 return new_index;
1077}
1078EXPORT_SYMBOL(scsi_get_new_index);
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h
deleted file mode 100644
index 277204633850..000000000000
--- a/drivers/target/target_core_mib.h
+++ /dev/null
@@ -1,28 +0,0 @@
1#ifndef TARGET_CORE_MIB_H
2#define TARGET_CORE_MIB_H
3
4typedef enum {
5 SCSI_INST_INDEX,
6 SCSI_DEVICE_INDEX,
7 SCSI_AUTH_INTR_INDEX,
8 SCSI_INDEX_TYPE_MAX
9} scsi_index_t;
10
11struct scsi_index_table {
12 spinlock_t lock;
13 u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
14} ____cacheline_aligned;
15
16/* SCSI Port stats */
17struct scsi_port_stats {
18 u64 cmd_pdus;
19 u64 tx_data_octets;
20 u64 rx_data_octets;
21} ____cacheline_aligned;
22
23extern int init_scsi_target_mib(void);
24extern void remove_scsi_target_mib(void);
25extern void init_scsi_index_table(void);
26extern u32 scsi_get_new_index(scsi_index_t);
27
28#endif /*** TARGET_CORE_MIB_H ***/
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 742d24609a9b..f2a08477a68c 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk(
462 */ 462 */
463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path, 463 bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); 464 FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
465 if (!(bd)) { 465 if (IS_ERR(bd)) {
466 printk("pSCSI: blkdev_get_by_path() failed\n"); 466 printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
467 scsi_device_put(sd); 467 scsi_device_put(sd);
468 return NULL; 468 return NULL;
469 } 469 }
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index 158cecbec718..4a109835e420 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -282,6 +282,9 @@ int core_tmr_lun_reset(
282 282
283 atomic_set(&task->task_active, 0); 283 atomic_set(&task->task_active, 0);
284 atomic_set(&task->task_stop, 0); 284 atomic_set(&task->task_stop, 0);
285 } else {
286 if (atomic_read(&task->task_execute_queue) != 0)
287 transport_remove_task_from_execute_queue(task, dev);
285 } 288 }
286 __transport_stop_task_timer(task, &flags); 289 __transport_stop_task_timer(task, &flags);
287 290
@@ -301,6 +304,7 @@ int core_tmr_lun_reset(
301 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" 304 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
302 " task: %p, t_fe_count: %d dev: %p\n", task, 305 " task: %p, t_fe_count: %d dev: %p\n", task,
303 fe_count, dev); 306 fe_count, dev);
307 atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
304 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, 308 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
305 flags); 309 flags);
306 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 310 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
@@ -310,6 +314,7 @@ int core_tmr_lun_reset(
310 } 314 }
311 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," 315 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
312 " t_fe_count: %d dev: %p\n", task, fe_count, dev); 316 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
317 atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
313 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); 318 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
314 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); 319 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
315 320
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index abfa81a57115..c26f67467623 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
275 spin_lock_init(&acl->device_list_lock); 275 spin_lock_init(&acl->device_list_lock);
276 spin_lock_init(&acl->nacl_sess_lock); 276 spin_lock_init(&acl->nacl_sess_lock);
277 atomic_set(&acl->acl_pr_ref_count, 0); 277 atomic_set(&acl->acl_pr_ref_count, 0);
278 atomic_set(&acl->mib_ref_count, 0);
279 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); 278 acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
280 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); 279 snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
281 acl->se_tpg = tpg; 280 acl->se_tpg = tpg;
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
318 cpu_relax(); 317 cpu_relax();
319} 318}
320 319
321void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
322{
323 while (atomic_read(&nacl->mib_ref_count) != 0)
324 cpu_relax();
325}
326
327void core_tpg_clear_object_luns(struct se_portal_group *tpg) 320void core_tpg_clear_object_luns(struct se_portal_group *tpg)
328{ 321{
329 int i, ret; 322 int i, ret;
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl(
480 spin_unlock_bh(&tpg->session_lock); 473 spin_unlock_bh(&tpg->session_lock);
481 474
482 core_tpg_wait_for_nacl_pr_ref(acl); 475 core_tpg_wait_for_nacl_pr_ref(acl);
483 core_tpg_wait_for_mib_ref(acl);
484 core_clear_initiator_node_from_tpg(acl, tpg); 476 core_clear_initiator_node_from_tpg(acl, tpg);
485 core_free_device_list_for_node(acl, tpg); 477 core_free_device_list_for_node(acl, tpg);
486 478
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register);
701 693
702int core_tpg_deregister(struct se_portal_group *se_tpg) 694int core_tpg_deregister(struct se_portal_group *se_tpg)
703{ 695{
696 struct se_node_acl *nacl, *nacl_tmp;
697
704 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" 698 printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
705 " for endpoint: %s Portal Tag %u\n", 699 " for endpoint: %s Portal Tag %u\n",
706 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? 700 (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
714 708
715 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) 709 while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
716 cpu_relax(); 710 cpu_relax();
711 /*
712 * Release any remaining demo-mode generated se_node_acl that have
713 * not been released because of TFO->tpg_check_demo_mode_cache() == 1
714 * in transport_deregister_session().
715 */
716 spin_lock_bh(&se_tpg->acl_node_lock);
717 list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
718 acl_list) {
719 list_del(&nacl->acl_list);
720 se_tpg->num_node_acls--;
721 spin_unlock_bh(&se_tpg->acl_node_lock);
722
723 core_tpg_wait_for_nacl_pr_ref(nacl);
724 core_free_device_list_for_node(nacl, se_tpg);
725 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl);
726
727 spin_lock_bh(&se_tpg->acl_node_lock);
728 }
729 spin_unlock_bh(&se_tpg->acl_node_lock);
717 730
718 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 731 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
719 core_tpg_release_virtual_lun0(se_tpg); 732 core_tpg_release_virtual_lun0(se_tpg);
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 28b6292ff298..4bbf6c147f89 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -379,6 +379,40 @@ void release_se_global(void)
379 se_global = NULL; 379 se_global = NULL;
380} 380}
381 381
382/* SCSI statistics table index */
383static struct scsi_index_table scsi_index_table;
384
385/*
386 * Initialize the index table for allocating unique row indexes to various mib
387 * tables.
388 */
389void init_scsi_index_table(void)
390{
391 memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
392 spin_lock_init(&scsi_index_table.lock);
393}
394
395/*
396 * Allocate a new row index for the entry type specified
397 */
398u32 scsi_get_new_index(scsi_index_t type)
399{
400 u32 new_index;
401
402 if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
403 printk(KERN_ERR "Invalid index type %d\n", type);
404 return -EINVAL;
405 }
406
407 spin_lock(&scsi_index_table.lock);
408 new_index = ++scsi_index_table.scsi_mib_index[type];
409 if (new_index == 0)
410 new_index = ++scsi_index_table.scsi_mib_index[type];
411 spin_unlock(&scsi_index_table.lock);
412
413 return new_index;
414}
415
382void transport_init_queue_obj(struct se_queue_obj *qobj) 416void transport_init_queue_obj(struct se_queue_obj *qobj)
383{ 417{
384 atomic_set(&qobj->queue_cnt, 0); 418 atomic_set(&qobj->queue_cnt, 0);
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void)
437 } 471 }
438 INIT_LIST_HEAD(&se_sess->sess_list); 472 INIT_LIST_HEAD(&se_sess->sess_list);
439 INIT_LIST_HEAD(&se_sess->sess_acl_list); 473 INIT_LIST_HEAD(&se_sess->sess_acl_list);
440 atomic_set(&se_sess->mib_ref_count, 0);
441 474
442 return se_sess; 475 return se_sess;
443} 476}
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess)
546 transport_free_session(se_sess); 579 transport_free_session(se_sess);
547 return; 580 return;
548 } 581 }
549 /*
550 * Wait for possible reference in drivers/target/target_core_mib.c:
551 * scsi_att_intr_port_seq_show()
552 */
553 while (atomic_read(&se_sess->mib_ref_count) != 0)
554 cpu_relax();
555 582
556 spin_lock_bh(&se_tpg->session_lock); 583 spin_lock_bh(&se_tpg->session_lock);
557 list_del(&se_sess->sess_list); 584 list_del(&se_sess->sess_list);
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess)
574 spin_unlock_bh(&se_tpg->acl_node_lock); 601 spin_unlock_bh(&se_tpg->acl_node_lock);
575 602
576 core_tpg_wait_for_nacl_pr_ref(se_nacl); 603 core_tpg_wait_for_nacl_pr_ref(se_nacl);
577 core_tpg_wait_for_mib_ref(se_nacl);
578 core_free_device_list_for_node(se_nacl, se_tpg); 604 core_free_device_list_for_node(se_nacl, se_tpg);
579 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, 605 TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg,
580 se_nacl); 606 se_nacl);
@@ -1181,7 +1207,7 @@ transport_get_task_from_execute_queue(struct se_device *dev)
1181 * 1207 *
1182 * 1208 *
1183 */ 1209 */
1184static void transport_remove_task_from_execute_queue( 1210void transport_remove_task_from_execute_queue(
1185 struct se_task *task, 1211 struct se_task *task,
1186 struct se_device *dev) 1212 struct se_device *dev)
1187{ 1213{
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map(
4827 4853
4828 return ret; 4854 return ret;
4829 } 4855 }
4856
4857 BUG_ON(list_empty(se_mem_list));
4830 /* 4858 /*
4831 * This is the normal path for all normal non BIDI and BIDI-COMMAND 4859 * This is the normal path for all normal non BIDI and BIDI-COMMAND
4832 * WRITE payloads.. If we need to do BIDI READ passthrough for 4860 * WRITE payloads.. If we need to do BIDI READ passthrough for
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd)
5008 struct se_mem *se_mem = NULL, *se_mem_lout = NULL; 5036 struct se_mem *se_mem = NULL, *se_mem_lout = NULL;
5009 u32 se_mem_cnt = 0, task_offset = 0; 5037 u32 se_mem_cnt = 0, task_offset = 0;
5010 5038
5011 BUG_ON(list_empty(cmd->t_task->t_mem_list)); 5039 if (!list_empty(T_TASK(cmd)->t_mem_list))
5040 se_mem = list_entry(T_TASK(cmd)->t_mem_list->next,
5041 struct se_mem, se_list);
5012 5042
5013 ret = transport_do_se_mem_map(dev, task, 5043 ret = transport_do_se_mem_map(dev, task,
5014 cmd->t_task->t_mem_list, NULL, se_mem, 5044 cmd->t_task->t_mem_list, NULL, se_mem,
@@ -5519,7 +5549,8 @@ static void transport_generic_wait_for_tasks(
5519 5549
5520 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); 5550 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
5521 } 5551 }
5522 if (!atomic_read(&T_TASK(cmd)->t_transport_active)) 5552 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
5553 atomic_read(&T_TASK(cmd)->t_transport_aborted))
5523 goto remove; 5554 goto remove;
5524 5555
5525 atomic_set(&T_TASK(cmd)->t_transport_stop, 1); 5556 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
@@ -5926,6 +5957,9 @@ static void transport_processing_shutdown(struct se_device *dev)
5926 5957
5927 atomic_set(&task->task_active, 0); 5958 atomic_set(&task->task_active, 0);
5928 atomic_set(&task->task_stop, 0); 5959 atomic_set(&task->task_stop, 0);
5960 } else {
5961 if (atomic_read(&task->task_execute_queue) != 0)
5962 transport_remove_task_from_execute_queue(task, dev);
5929 } 5963 }
5930 __transport_stop_task_timer(task, &flags); 5964 __transport_stop_task_timer(task, &flags);
5931 5965
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba3ca23..bf7c687519ef 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig THERMAL 5menuconfig THERMAL
6 tristate "Generic Thermal sysfs driver" 6 tristate "Generic Thermal sysfs driver"
7 depends on NET
8 help 7 help
9 Generic Thermal Sysfs driver offers a generic mechanism for 8 Generic Thermal Sysfs driver offers a generic mechanism for
10 thermal management. Usually it's made up of one or more thermal 9 thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c79280..713b7ea4a607 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
62 62
63static unsigned int thermal_event_seqnum; 63static unsigned int thermal_event_seqnum;
64 64
65static struct genl_family thermal_event_genl_family = {
66 .id = GENL_ID_GENERATE,
67 .name = THERMAL_GENL_FAMILY_NAME,
68 .version = THERMAL_GENL_VERSION,
69 .maxattr = THERMAL_GENL_ATTR_MAX,
70};
71
72static struct genl_multicast_group thermal_event_mcgrp = {
73 .name = THERMAL_GENL_MCAST_GROUP_NAME,
74};
75
76static int genetlink_init(void);
77static void genetlink_exit(void);
78
79static int get_idr(struct idr *idr, struct mutex *lock, int *id) 65static int get_idr(struct idr *idr, struct mutex *lock, int *id)
80{ 66{
81 int err; 67 int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1225 1211
1226EXPORT_SYMBOL(thermal_zone_device_unregister); 1212EXPORT_SYMBOL(thermal_zone_device_unregister);
1227 1213
1214#ifdef CONFIG_NET
1215static struct genl_family thermal_event_genl_family = {
1216 .id = GENL_ID_GENERATE,
1217 .name = THERMAL_GENL_FAMILY_NAME,
1218 .version = THERMAL_GENL_VERSION,
1219 .maxattr = THERMAL_GENL_ATTR_MAX,
1220};
1221
1222static struct genl_multicast_group thermal_event_mcgrp = {
1223 .name = THERMAL_GENL_MCAST_GROUP_NAME,
1224};
1225
1228int generate_netlink_event(u32 orig, enum events event) 1226int generate_netlink_event(u32 orig, enum events event)
1229{ 1227{
1230 struct sk_buff *skb; 1228 struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
1301 return result; 1299 return result;
1302} 1300}
1303 1301
1302static void genetlink_exit(void)
1303{
1304 genl_unregister_family(&thermal_event_genl_family);
1305}
1306#else /* !CONFIG_NET */
1307static inline int genetlink_init(void) { return 0; }
1308static inline void genetlink_exit(void) {}
1309#endif /* !CONFIG_NET */
1310
1304static int __init thermal_init(void) 1311static int __init thermal_init(void)
1305{ 1312{
1306 int result = 0; 1313 int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
1316 return result; 1323 return result;
1317} 1324}
1318 1325
1319static void genetlink_exit(void)
1320{
1321 genl_unregister_family(&thermal_event_genl_family);
1322}
1323
1324static void __exit thermal_exit(void) 1326static void __exit thermal_exit(void)
1325{ 1327{
1326 class_unregister(&thermal_class); 1328 class_unregister(&thermal_class);
diff --git a/drivers/tty/serial/68328serial.c b/drivers/tty/serial/68328serial.c
index be0ebce36e54..de0160e3f8c4 100644
--- a/drivers/tty/serial/68328serial.c
+++ b/drivers/tty/serial/68328serial.c
@@ -262,7 +262,7 @@ static void status_handle(struct m68k_serial *info, unsigned short status)
262 262
263static void receive_chars(struct m68k_serial *info, unsigned short rx) 263static void receive_chars(struct m68k_serial *info, unsigned short rx)
264{ 264{
265 struct tty_struct *tty = info->port.tty; 265 struct tty_struct *tty = info->tty;
266 m68328_uart *uart = &uart_addr[info->line]; 266 m68328_uart *uart = &uart_addr[info->line];
267 unsigned char ch, flag; 267 unsigned char ch, flag;
268 268
@@ -329,7 +329,7 @@ static void transmit_chars(struct m68k_serial *info)
329 goto clear_and_return; 329 goto clear_and_return;
330 } 330 }
331 331
332 if((info->xmit_cnt <= 0) || info->port.tty->stopped) { 332 if((info->xmit_cnt <= 0) || info->tty->stopped) {
333 /* That's peculiar... TX ints off */ 333 /* That's peculiar... TX ints off */
334 uart->ustcnt &= ~USTCNT_TX_INTR_MASK; 334 uart->ustcnt &= ~USTCNT_TX_INTR_MASK;
335 goto clear_and_return; 335 goto clear_and_return;
@@ -383,7 +383,7 @@ static void do_softint(struct work_struct *work)
383 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue); 383 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue);
384 struct tty_struct *tty; 384 struct tty_struct *tty;
385 385
386 tty = info->port.tty; 386 tty = info->tty;
387 if (!tty) 387 if (!tty)
388 return; 388 return;
389#if 0 389#if 0
@@ -407,7 +407,7 @@ static void do_serial_hangup(struct work_struct *work)
407 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup); 407 struct m68k_serial *info = container_of(work, struct m68k_serial, tqueue_hangup);
408 struct tty_struct *tty; 408 struct tty_struct *tty;
409 409
410 tty = info->port.tty; 410 tty = info->tty;
411 if (!tty) 411 if (!tty)
412 return; 412 return;
413 413
@@ -451,8 +451,8 @@ static int startup(struct m68k_serial * info)
451 uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK; 451 uart->ustcnt = USTCNT_UEN | USTCNT_RXEN | USTCNT_RX_INTR_MASK;
452#endif 452#endif
453 453
454 if (info->port.tty) 454 if (info->tty)
455 clear_bit(TTY_IO_ERROR, &info->port.tty->flags); 455 clear_bit(TTY_IO_ERROR, &info->tty->flags);
456 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; 456 info->xmit_cnt = info->xmit_head = info->xmit_tail = 0;
457 457
458 /* 458 /*
@@ -486,8 +486,8 @@ static void shutdown(struct m68k_serial * info)
486 info->xmit_buf = 0; 486 info->xmit_buf = 0;
487 } 487 }
488 488
489 if (info->port.tty) 489 if (info->tty)
490 set_bit(TTY_IO_ERROR, &info->port.tty->flags); 490 set_bit(TTY_IO_ERROR, &info->tty->flags);
491 491
492 info->flags &= ~S_INITIALIZED; 492 info->flags &= ~S_INITIALIZED;
493 local_irq_restore(flags); 493 local_irq_restore(flags);
@@ -553,9 +553,9 @@ static void change_speed(struct m68k_serial *info)
553 unsigned cflag; 553 unsigned cflag;
554 int i; 554 int i;
555 555
556 if (!info->port.tty || !info->port.tty->termios) 556 if (!info->tty || !info->tty->termios)
557 return; 557 return;
558 cflag = info->port.tty->termios->c_cflag; 558 cflag = info->tty->termios->c_cflag;
559 if (!(port = info->port)) 559 if (!(port = info->port))
560 return; 560 return;
561 561
@@ -970,7 +970,6 @@ static void send_break(struct m68k_serial * info, unsigned int duration)
970static int rs_ioctl(struct tty_struct *tty, struct file * file, 970static int rs_ioctl(struct tty_struct *tty, struct file * file,
971 unsigned int cmd, unsigned long arg) 971 unsigned int cmd, unsigned long arg)
972{ 972{
973 int error;
974 struct m68k_serial * info = (struct m68k_serial *)tty->driver_data; 973 struct m68k_serial * info = (struct m68k_serial *)tty->driver_data;
975 int retval; 974 int retval;
976 975
@@ -1104,7 +1103,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
1104 tty_ldisc_flush(tty); 1103 tty_ldisc_flush(tty);
1105 tty->closing = 0; 1104 tty->closing = 0;
1106 info->event = 0; 1105 info->event = 0;
1107 info->port.tty = NULL; 1106 info->tty = NULL;
1108#warning "This is not and has never been valid so fix it" 1107#warning "This is not and has never been valid so fix it"
1109#if 0 1108#if 0
1110 if (tty->ldisc.num != ldiscs[N_TTY].num) { 1109 if (tty->ldisc.num != ldiscs[N_TTY].num) {
@@ -1142,7 +1141,7 @@ void rs_hangup(struct tty_struct *tty)
1142 info->event = 0; 1141 info->event = 0;
1143 info->count = 0; 1142 info->count = 0;
1144 info->flags &= ~S_NORMAL_ACTIVE; 1143 info->flags &= ~S_NORMAL_ACTIVE;
1145 info->port.tty = NULL; 1144 info->tty = NULL;
1146 wake_up_interruptible(&info->open_wait); 1145 wake_up_interruptible(&info->open_wait);
1147} 1146}
1148 1147
@@ -1261,7 +1260,7 @@ int rs_open(struct tty_struct *tty, struct file * filp)
1261 1260
1262 info->count++; 1261 info->count++;
1263 tty->driver_data = info; 1262 tty->driver_data = info;
1264 info->port.tty = tty; 1263 info->tty = tty;
1265 1264
1266 /* 1265 /*
1267 * Start up serial port 1266 * Start up serial port
@@ -1338,7 +1337,7 @@ rs68328_init(void)
1338 info = &m68k_soft[i]; 1337 info = &m68k_soft[i];
1339 info->magic = SERIAL_MAGIC; 1338 info->magic = SERIAL_MAGIC;
1340 info->port = (int) &uart_addr[i]; 1339 info->port = (int) &uart_addr[i];
1341 info->port.tty = NULL; 1340 info->tty = NULL;
1342 info->irq = uart_irqs[i]; 1341 info->irq = uart_irqs[i];
1343 info->custom_divisor = 16; 1342 info->custom_divisor = 16;
1344 info->close_delay = 50; 1343 info->close_delay = 50;
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c
index beb1afa27d8d..7b951adac54b 100644
--- a/drivers/tty/serial/max3100.c
+++ b/drivers/tty/serial/max3100.c
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
601 s->rts = 0; 601 s->rts = 0;
602 602
603 sprintf(b, "max3100-%d", s->minor); 603 sprintf(b, "max3100-%d", s->minor);
604 s->workqueue = create_freezeable_workqueue(b); 604 s->workqueue = create_freezable_workqueue(b);
605 if (!s->workqueue) { 605 if (!s->workqueue) {
606 dev_warn(&s->spi->dev, "cannot create workqueue\n"); 606 dev_warn(&s->spi->dev, "cannot create workqueue\n");
607 return -EBUSY; 607 return -EBUSY;
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c
index 910870edf708..750b4f627315 100644
--- a/drivers/tty/serial/max3107.c
+++ b/drivers/tty/serial/max3107.c
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
833 struct max3107_port *s = container_of(port, struct max3107_port, port); 833 struct max3107_port *s = container_of(port, struct max3107_port, port);
834 834
835 /* Initialize work queue */ 835 /* Initialize work queue */
836 s->workqueue = create_freezeable_workqueue("max3107"); 836 s->workqueue = create_freezable_workqueue("max3107");
837 if (!s->workqueue) { 837 if (!s->workqueue) {
838 dev_err(&s->spi->dev, "Workqueue creation failed\n"); 838 dev_err(&s->spi->dev, "Workqueue creation failed\n");
839 return -EBUSY; 839 return -EBUSY;
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c
index 93760b2ea172..1ef4df9bf7e4 100644
--- a/drivers/tty/serial/serial_cs.c
+++ b/drivers/tty/serial/serial_cs.c
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = {
712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 712 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), 713 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01),
714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), 714 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
715 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05),
715 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), 716 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
716 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 717 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
717 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), 718 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 8e0dd254eb11..81f13958e751 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -571,6 +571,7 @@ struct sysrq_state {
571 unsigned int alt_use; 571 unsigned int alt_use;
572 bool active; 572 bool active;
573 bool need_reinject; 573 bool need_reinject;
574 bool reinjecting;
574}; 575};
575 576
576static void sysrq_reinject_alt_sysrq(struct work_struct *work) 577static void sysrq_reinject_alt_sysrq(struct work_struct *work)
@@ -581,6 +582,10 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
581 unsigned int alt_code = sysrq->alt_use; 582 unsigned int alt_code = sysrq->alt_use;
582 583
583 if (sysrq->need_reinject) { 584 if (sysrq->need_reinject) {
585 /* we do not want the assignment to be reordered */
586 sysrq->reinjecting = true;
587 mb();
588
584 /* Simulate press and release of Alt + SysRq */ 589 /* Simulate press and release of Alt + SysRq */
585 input_inject_event(handle, EV_KEY, alt_code, 1); 590 input_inject_event(handle, EV_KEY, alt_code, 1);
586 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1); 591 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 1);
@@ -589,6 +594,9 @@ static void sysrq_reinject_alt_sysrq(struct work_struct *work)
589 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0); 594 input_inject_event(handle, EV_KEY, KEY_SYSRQ, 0);
590 input_inject_event(handle, EV_KEY, alt_code, 0); 595 input_inject_event(handle, EV_KEY, alt_code, 0);
591 input_inject_event(handle, EV_SYN, SYN_REPORT, 1); 596 input_inject_event(handle, EV_SYN, SYN_REPORT, 1);
597
598 mb();
599 sysrq->reinjecting = false;
592 } 600 }
593} 601}
594 602
@@ -599,6 +607,13 @@ static bool sysrq_filter(struct input_handle *handle,
599 bool was_active = sysrq->active; 607 bool was_active = sysrq->active;
600 bool suppress; 608 bool suppress;
601 609
610 /*
611 * Do not filter anything if we are in the process of re-injecting
612 * Alt+SysRq combination.
613 */
614 if (sysrq->reinjecting)
615 return false;
616
602 switch (type) { 617 switch (type) {
603 618
604 case EV_SYN: 619 case EV_SYN:
@@ -629,7 +644,7 @@ static bool sysrq_filter(struct input_handle *handle,
629 sysrq->alt_use = sysrq->alt; 644 sysrq->alt_use = sysrq->alt;
630 /* 645 /*
631 * If nothing else will be pressed we'll need 646 * If nothing else will be pressed we'll need
632 * to * re-inject Alt-SysRq keysroke. 647 * to re-inject Alt-SysRq keysroke.
633 */ 648 */
634 sysrq->need_reinject = true; 649 sysrq->need_reinject = true;
635 } 650 }
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d041c6826e43..0f299b7aad60 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2681 2681
2682 mutex_lock(&usb_address0_mutex); 2682 mutex_lock(&usb_address0_mutex);
2683 2683
2684 if (!udev->config && oldspeed == USB_SPEED_SUPER) { 2684 /* Reset the device; full speed may morph to high speed */
2685 /* Don't reset USB 3.0 devices during an initial setup */ 2685 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2686 usb_set_device_state(udev, USB_STATE_DEFAULT); 2686 retval = hub_port_reset(hub, port1, udev, delay);
2687 } else { 2687 if (retval < 0) /* error or disconnect */
2688 /* Reset the device; full speed may morph to high speed */ 2688 goto fail;
2689 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ 2689 /* success, speed is known */
2690 retval = hub_port_reset(hub, port1, udev, delay); 2690
2691 if (retval < 0) /* error or disconnect */
2692 goto fail;
2693 /* success, speed is known */
2694 }
2695 retval = -ENODEV; 2691 retval = -ENODEV;
2696 2692
2697 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { 2693 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 44c595432d6f..81ce6a8e1d94 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = {
48 { USB_DEVICE(0x04b4, 0x0526), .driver_info = 48 { USB_DEVICE(0x04b4, 0x0526), .driver_info =
49 USB_QUIRK_CONFIG_INTF_STRINGS }, 49 USB_QUIRK_CONFIG_INTF_STRINGS },
50 50
51 /* Samsung Android phone modem - ID conflict with SPH-I500 */
52 { USB_DEVICE(0x04e8, 0x6601), .driver_info =
53 USB_QUIRK_CONFIG_INTF_STRINGS },
54
51 /* Roland SC-8820 */ 55 /* Roland SC-8820 */
52 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 56 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
53 57
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = {
68 /* M-Systems Flash Disk Pioneers */ 72 /* M-Systems Flash Disk Pioneers */
69 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, 73 { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME },
70 74
75 /* Keytouch QWERTY Panel keyboard */
76 { USB_DEVICE(0x0926, 0x3333), .driver_info =
77 USB_QUIRK_CONFIG_INTF_STRINGS },
78
71 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ 79 /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
72 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, 80 { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
73 81
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c
index 3c6e1a058745..5e1495097ec3 100644
--- a/drivers/usb/gadget/f_phonet.c
+++ b/drivers/usb/gadget/f_phonet.c
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req)
346 346
347 if (unlikely(!skb)) 347 if (unlikely(!skb))
348 break; 348 break;
349 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0,
350 req->actual);
351 page = NULL;
352 349
353 if (req->actual < req->length) { /* Last fragment */ 350 if (skb->len == 0) { /* First fragment */
354 skb->protocol = htons(ETH_P_PHONET); 351 skb->protocol = htons(ETH_P_PHONET);
355 skb_reset_mac_header(skb); 352 skb_reset_mac_header(skb);
356 pskb_pull(skb, 1); 353 /* Can't use pskb_pull() on page in IRQ */
354 memcpy(skb_put(skb, 1), page_address(page), 1);
355 }
356
357 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
358 skb->len == 0, req->actual);
359 page = NULL;
360
361 if (req->actual < req->length) { /* Last fragment */
357 skb->dev = dev; 362 skb->dev = dev;
358 dev->stats.rx_packets++; 363 dev->stats.rx_packets++;
359 dev->stats.rx_bytes += skb->len; 364 dev->stats.rx_bytes += skb->len;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index e8f4f36fdf0b..a6f21b891f68 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -29,6 +29,7 @@
29 29
30#include <linux/of.h> 30#include <linux/of.h>
31#include <linux/of_platform.h> 31#include <linux/of_platform.h>
32#include <linux/of_address.h>
32 33
33/** 34/**
34 * ehci_xilinx_of_setup - Initialize the device for ehci_reset() 35 * ehci_xilinx_of_setup - Initialize the device for ehci_reset()
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf381..0231814a97a5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
169 } 169 }
170} 170}
171 171
172void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) 172void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
173{ 173{
174 void *addr; 174 struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
175 void __iomem *addr;
175 u32 temp; 176 u32 temp;
176 u64 temp_64; 177 u64 temp_64;
177 178
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
449 } 450 }
450} 451}
451 452
452void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) 453static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
453{ 454{
454 /* Fields are 32 bits wide, DMA addresses are in bytes */ 455 /* Fields are 32 bits wide, DMA addresses are in bytes */
455 int field_size = 32 / 8; 456 int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
488 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); 489 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
489} 490}
490 491
491void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, 492static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
492 struct xhci_container_ctx *ctx, 493 struct xhci_container_ctx *ctx,
493 unsigned int last_ep) 494 unsigned int last_ep)
494{ 495{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f0e7a6..a9534396e85b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
307 307
308/***************** Streams structures manipulation *************************/ 308/***************** Streams structures manipulation *************************/
309 309
310void xhci_free_stream_ctx(struct xhci_hcd *xhci, 310static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
311 unsigned int num_stream_ctxs, 311 unsigned int num_stream_ctxs,
312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
313{ 313{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
335 * The stream context array must be a power of 2, and can be as small as 335 * The stream context array must be a power of 2, and can be as small as
336 * 64 bytes or as large as 1MB. 336 * 64 bytes or as large as 1MB.
337 */ 337 */
338struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 338static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
339 unsigned int num_stream_ctxs, dma_addr_t *dma, 339 unsigned int num_stream_ctxs, dma_addr_t *dma,
340 gfp_t mem_flags) 340 gfp_t mem_flags)
341{ 341{
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1900 val &= DBOFF_MASK; 1900 val &= DBOFF_MASK;
1901 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 1901 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
1902 " from cap regs base addr\n", val); 1902 " from cap regs base addr\n", val);
1903 xhci->dba = (void *) xhci->cap_regs + val; 1903 xhci->dba = (void __iomem *) xhci->cap_regs + val;
1904 xhci_dbg_regs(xhci); 1904 xhci_dbg_regs(xhci);
1905 xhci_print_run_regs(xhci); 1905 xhci_print_run_regs(xhci);
1906 /* Set ir_set to interrupt register set 0 */ 1906 /* Set ir_set to interrupt register set 0 */
1907 xhci->ir_set = (void *) xhci->run_regs->ir_set; 1907 xhci->ir_set = &xhci->run_regs->ir_set[0];
1908 1908
1909 /* 1909 /*
1910 * Event ring setup: Allocate a normal ring, but also setup 1910 * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1961 /* Set the event ring dequeue address */ 1961 /* Set the event ring dequeue address */
1962 xhci_set_hc_event_deq(xhci); 1962 xhci_set_hc_event_deq(xhci);
1963 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 1963 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
1964 xhci_print_ir_set(xhci, xhci->ir_set, 0); 1964 xhci_print_ir_set(xhci, 0);
1965 1965
1966 /* 1966 /*
1967 * XXX: Might need to set the Interrupter Moderation Register to 1967 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c1ce5a..3289bf4832c9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
474 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 474 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
475 dev->eps[ep_index].stopped_trb, 475 dev->eps[ep_index].stopped_trb,
476 &state->new_cycle_state); 476 &state->new_cycle_state);
477 if (!state->new_deq_seg) 477 if (!state->new_deq_seg) {
478 BUG(); 478 WARN_ON(1);
479 return;
480 }
481
479 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 482 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
480 xhci_dbg(xhci, "Finding endpoint context\n"); 483 xhci_dbg(xhci, "Finding endpoint context\n");
481 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 484 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
486 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 489 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
487 state->new_deq_ptr, 490 state->new_deq_ptr,
488 &state->new_cycle_state); 491 &state->new_cycle_state);
489 if (!state->new_deq_seg) 492 if (!state->new_deq_seg) {
490 BUG(); 493 WARN_ON(1);
494 return;
495 }
491 496
492 trb = &state->new_deq_ptr->generic; 497 trb = &state->new_deq_ptr->generic;
493 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && 498 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2363 2368
2364 /* Scatter gather list entries may cross 64KB boundaries */ 2369 /* Scatter gather list entries may cross 64KB boundaries */
2365 running_total = TRB_MAX_BUFF_SIZE - 2370 running_total = TRB_MAX_BUFF_SIZE -
2366 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2371 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2372 running_total &= TRB_MAX_BUFF_SIZE - 1;
2367 if (running_total != 0) 2373 if (running_total != 0)
2368 num_trbs++; 2374 num_trbs++;
2369 2375
2370 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2376 /* How many more 64KB chunks to transfer, how many more TRBs? */
2371 while (running_total < sg_dma_len(sg)) { 2377 while (running_total < sg_dma_len(sg) && running_total < temp) {
2372 num_trbs++; 2378 num_trbs++;
2373 running_total += TRB_MAX_BUFF_SIZE; 2379 running_total += TRB_MAX_BUFF_SIZE;
2374 } 2380 }
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2394static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2400static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2395{ 2401{
2396 if (num_trbs != 0) 2402 if (num_trbs != 0)
2397 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2403 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2398 "TRBs, %d left\n", __func__, 2404 "TRBs, %d left\n", __func__,
2399 urb->ep->desc.bEndpointAddress, num_trbs); 2405 urb->ep->desc.bEndpointAddress, num_trbs);
2400 if (running_total != urb->transfer_buffer_length) 2406 if (running_total != urb->transfer_buffer_length)
2401 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2407 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2402 "queued %#x (%d), asked for %#x (%d)\n", 2408 "queued %#x (%d), asked for %#x (%d)\n",
2403 __func__, 2409 __func__,
2404 urb->ep->desc.bEndpointAddress, 2410 urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2533 sg = urb->sg; 2539 sg = urb->sg;
2534 addr = (u64) sg_dma_address(sg); 2540 addr = (u64) sg_dma_address(sg);
2535 this_sg_len = sg_dma_len(sg); 2541 this_sg_len = sg_dma_len(sg);
2536 trb_buff_len = TRB_MAX_BUFF_SIZE - 2542 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2537 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2538 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2543 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2539 if (trb_buff_len > urb->transfer_buffer_length) 2544 if (trb_buff_len > urb->transfer_buffer_length)
2540 trb_buff_len = urb->transfer_buffer_length; 2545 trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2572 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2577 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2573 (unsigned int) addr + trb_buff_len); 2578 (unsigned int) addr + trb_buff_len);
2574 if (TRB_MAX_BUFF_SIZE - 2579 if (TRB_MAX_BUFF_SIZE -
2575 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { 2580 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2576 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 2581 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2577 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 2582 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2578 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2583 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2616 } 2621 }
2617 2622
2618 trb_buff_len = TRB_MAX_BUFF_SIZE - 2623 trb_buff_len = TRB_MAX_BUFF_SIZE -
2619 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2624 (addr & (TRB_MAX_BUFF_SIZE - 1));
2620 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2625 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2621 if (running_total + trb_buff_len > urb->transfer_buffer_length) 2626 if (running_total + trb_buff_len > urb->transfer_buffer_length)
2622 trb_buff_len = 2627 trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2656 num_trbs = 0; 2661 num_trbs = 0;
2657 /* How much data is (potentially) left before the 64KB boundary? */ 2662 /* How much data is (potentially) left before the 64KB boundary? */
2658 running_total = TRB_MAX_BUFF_SIZE - 2663 running_total = TRB_MAX_BUFF_SIZE -
2659 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2664 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2665 running_total &= TRB_MAX_BUFF_SIZE - 1;
2660 2666
2661 /* If there's some data on this 64KB chunk, or we have to send a 2667 /* If there's some data on this 64KB chunk, or we have to send a
2662 * zero-length transfer, we need at least one TRB 2668 * zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2700 /* How much data is in the first TRB? */ 2706 /* How much data is in the first TRB? */
2701 addr = (u64) urb->transfer_dma; 2707 addr = (u64) urb->transfer_dma;
2702 trb_buff_len = TRB_MAX_BUFF_SIZE - 2708 trb_buff_len = TRB_MAX_BUFF_SIZE -
2703 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2709 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2704 if (urb->transfer_buffer_length < trb_buff_len) 2710 if (trb_buff_len > urb->transfer_buffer_length)
2705 trb_buff_len = urb->transfer_buffer_length; 2711 trb_buff_len = urb->transfer_buffer_length;
2706 2712
2707 first_trb = true; 2713 first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
2879 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 2885 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2880 td_len = urb->iso_frame_desc[i].length; 2886 td_len = urb->iso_frame_desc[i].length;
2881 2887
2882 running_total = TRB_MAX_BUFF_SIZE - 2888 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2883 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2889 running_total &= TRB_MAX_BUFF_SIZE - 1;
2884 if (running_total != 0) 2890 if (running_total != 0)
2885 num_trbs++; 2891 num_trbs++;
2886 2892
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e165877..2083fc2179b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
109/* 109/*
110 * Set the run bit and wait for the host to be running. 110 * Set the run bit and wait for the host to be running.
111 */ 111 */
112int xhci_start(struct xhci_hcd *xhci) 112static int xhci_start(struct xhci_hcd *xhci)
113{ 113{
114 u32 temp; 114 u32 temp;
115 int ret; 115 int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
329 329
330 330
331#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 331#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
332void xhci_event_ring_work(unsigned long arg) 332static void xhci_event_ring_work(unsigned long arg)
333{ 333{
334 unsigned long flags; 334 unsigned long flags;
335 int temp; 335 int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
473 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 473 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
474 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 474 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
475 &xhci->ir_set->irq_pending); 475 &xhci->ir_set->irq_pending);
476 xhci_print_ir_set(xhci, xhci->ir_set, 0); 476 xhci_print_ir_set(xhci, 0);
477 477
478 if (NUM_TEST_NOOPS > 0) 478 if (NUM_TEST_NOOPS > 0)
479 doorbell = xhci_setup_one_noop(xhci); 479 doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
528 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 528 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
529 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 529 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
530 &xhci->ir_set->irq_pending); 530 &xhci->ir_set->irq_pending);
531 xhci_print_ir_set(xhci, xhci->ir_set, 0); 531 xhci_print_ir_set(xhci, 0);
532 532
533 xhci_dbg(xhci, "cleaning up memory\n"); 533 xhci_dbg(xhci, "cleaning up memory\n");
534 xhci_mem_cleanup(xhci); 534 xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
755 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 755 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
756 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 756 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
757 &xhci->ir_set->irq_pending); 757 &xhci->ir_set->irq_pending);
758 xhci_print_ir_set(xhci, xhci->ir_set, 0); 758 xhci_print_ir_set(xhci, 0);
759 759
760 xhci_dbg(xhci, "cleaning up memory\n"); 760 xhci_dbg(xhci, "cleaning up memory\n");
761 xhci_mem_cleanup(xhci); 761 xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
857/* Returns 1 if the arguments are OK; 857/* Returns 1 if the arguments are OK;
858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
859 */ 859 */
860int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 860static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
861 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 861 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
862 const char *func) { 862 const char *func) {
863 struct xhci_hcd *xhci; 863 struct xhci_hcd *xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1693 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 1693 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1694} 1694}
1695 1695
1696void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 1696static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1697 unsigned int slot_id, unsigned int ep_index, 1697 unsigned int slot_id, unsigned int ep_index,
1698 struct xhci_dequeue_state *deq_state) 1698 struct xhci_dequeue_state *deq_state)
1699{ 1699{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd22015..7f127df6dd55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
1348} 1348}
1349 1349
1350/* xHCI debugging */ 1350/* xHCI debugging */
1351void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); 1351void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
1352void xhci_print_registers(struct xhci_hcd *xhci); 1352void xhci_print_registers(struct xhci_hcd *xhci);
1353void xhci_dbg_regs(struct xhci_hcd *xhci); 1353void xhci_dbg_regs(struct xhci_hcd *xhci);
1354void xhci_print_run_regs(struct xhci_hcd *xhci); 1354void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 54a8bd1047d6..c292d5c499e7 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
1864 INIT_LIST_HEAD(&musb->out_bulk); 1864 INIT_LIST_HEAD(&musb->out_bulk);
1865 1865
1866 hcd->uses_new_polling = 1; 1866 hcd->uses_new_polling = 1;
1867 hcd->has_tt = 1;
1867 1868
1868 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1869 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1869 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; 1870 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d74a8113ae74..e6400be8a0f8 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -488,6 +488,15 @@ struct musb {
488 unsigned set_address:1; 488 unsigned set_address:1;
489 unsigned test_mode:1; 489 unsigned test_mode:1;
490 unsigned softconnect:1; 490 unsigned softconnect:1;
491
492 u8 address;
493 u8 test_mode_nr;
494 u16 ackpend; /* ep0 */
495 enum musb_g_ep0_state ep0_state;
496 struct usb_gadget g; /* the gadget */
497 struct usb_gadget_driver *gadget_driver; /* its driver */
498#endif
499
491 /* 500 /*
492 * FIXME: Remove this flag. 501 * FIXME: Remove this flag.
493 * 502 *
@@ -501,14 +510,6 @@ struct musb {
501 */ 510 */
502 unsigned double_buffer_not_ok:1 __deprecated; 511 unsigned double_buffer_not_ok:1 __deprecated;
503 512
504 u8 address;
505 u8 test_mode_nr;
506 u16 ackpend; /* ep0 */
507 enum musb_g_ep0_state ep0_state;
508 struct usb_gadget g; /* the gadget */
509 struct usb_gadget_driver *gadget_driver; /* its driver */
510#endif
511
512 struct musb_hdrc_config *config; 513 struct musb_hdrc_config *config;
513 514
514#ifdef MUSB_CONFIG_PROC_FS 515#ifdef MUSB_CONFIG_PROC_FS
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index a3f12333fc41..bc8badd16897 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb)
362 362
363static int omap2430_musb_exit(struct musb *musb) 363static int omap2430_musb_exit(struct musb *musb)
364{ 364{
365 del_timer_sync(&musb_idle_timer);
365 366
366 omap2430_low_level_exit(musb); 367 omap2430_low_level_exit(musb);
367 otg_put_transceiver(musb->xceiv); 368 otg_put_transceiver(musb->xceiv);
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 7481ff8a49e4..0457813eebee 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = {
301 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 301 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
303 }, 303 },
304 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
305 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
306 },
304 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ 307 { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */
305 308
306 { } 309 { }
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c
index b004b2a485c3..9c014e2ecd68 100644
--- a/drivers/usb/serial/usb_wwan.c
+++ b/drivers/usb/serial/usb_wwan.c
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb)
295 __func__, status, endpoint); 295 __func__, status, endpoint);
296 } else { 296 } else {
297 tty = tty_port_tty_get(&port->port); 297 tty = tty_port_tty_get(&port->port);
298 if (urb->actual_length) { 298 if (tty) {
299 tty_insert_flip_string(tty, data, urb->actual_length); 299 if (urb->actual_length) {
300 tty_flip_buffer_push(tty); 300 tty_insert_flip_string(tty, data,
301 } else 301 urb->actual_length);
302 dbg("%s: empty read urb received", __func__); 302 tty_flip_buffer_push(tty);
303 tty_kref_put(tty); 303 } else
304 dbg("%s: empty read urb received", __func__);
305 tty_kref_put(tty);
306 }
304 307
305 /* Resubmit urb so we continue receiving */ 308 /* Resubmit urb so we continue receiving */
306 if (status != -ESHUTDOWN) { 309 if (status != -ESHUTDOWN) {
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 15a5d89b7f39..1c11959a7d58 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -27,6 +27,7 @@
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28#include <linux/usb.h> 28#include <linux/usb.h>
29#include <linux/usb/serial.h> 29#include <linux/usb/serial.h>
30#include <linux/usb/cdc.h>
30#include "visor.h" 31#include "visor.h"
31 32
32/* 33/*
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial,
479 480
480 dbg("%s", __func__); 481 dbg("%s", __func__);
481 482
483 /*
484 * some Samsung Android phones in modem mode have the same ID
485 * as SPH-I500, but they are ACM devices, so dont bind to them
486 */
487 if (id->idVendor == SAMSUNG_VENDOR_ID &&
488 id->idProduct == SAMSUNG_SPH_I500_ID &&
489 serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM &&
490 serial->dev->descriptor.bDeviceSubClass ==
491 USB_CDC_SUBCLASS_ACM)
492 return -ENODEV;
493
482 if (serial->dev->actconfig->desc.bConfigurationValue != 1) { 494 if (serial->dev->actconfig->desc.bConfigurationValue != 1) {
483 dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", 495 dev_err(&serial->dev->dev, "active config #%d != 1 ??\n",
484 serial->dev->actconfig->desc.bConfigurationValue); 496 serial->dev->actconfig->desc.bConfigurationValue);
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 8010aaeb5adb..dd0e84a9bd2f 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
239 lcd->spi = spi; 239 lcd->spi = spi;
240 lcd->power = FB_BLANK_POWERDOWN; 240 lcd->power = FB_BLANK_POWERDOWN;
241 lcd->buffer = kzalloc(8, GFP_KERNEL); 241 lcd->buffer = kzalloc(8, GFP_KERNEL);
242 if (!lcd->buffer) {
243 ret = -ENOMEM;
244 goto out_free_lcd;
245 }
242 246
243 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); 247 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
244 if (IS_ERR(ld)) { 248 if (IS_ERR(ld)) {
245 ret = PTR_ERR(ld); 249 ret = PTR_ERR(ld);
246 goto out_free_lcd; 250 goto out_free_buffer;
247 } 251 }
248 lcd->ld = ld; 252 lcd->ld = ld;
249 253
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
257 261
258out_unregister: 262out_unregister:
259 lcd_device_unregister(ld); 263 lcd_device_unregister(ld);
264out_free_buffer:
265 kfree(lcd->buffer);
260out_free_lcd: 266out_free_lcd:
261 kfree(lcd); 267 kfree(lcd);
262 return ret; 268 return ret;
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
268 274
269 ltv350qv_power(lcd, FB_BLANK_POWERDOWN); 275 ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
270 lcd_device_unregister(lcd->ld); 276 lcd_device_unregister(lcd->ld);
277 kfree(lcd->buffer);
271 kfree(lcd); 278 kfree(lcd);
272 279
273 return 0; 280 return 0;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 2e2400e7322e..31649b7b672f 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -862,12 +862,12 @@ config SBC_EPX_C3_WATCHDOG
862 862
863# M68K Architecture 863# M68K Architecture
864 864
865config M548x_WATCHDOG 865config M54xx_WATCHDOG
866 tristate "MCF548x watchdog support" 866 tristate "MCF54xx watchdog support"
867 depends on M548x 867 depends on M548x
868 help 868 help
869 To compile this driver as a module, choose M here: the 869 To compile this driver as a module, choose M here: the
870 module will be called m548x_wdt. 870 module will be called m54xx_wdt.
871 871
872# MIPS Architecture 872# MIPS Architecture
873 873
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index dd776651917c..20e44c4782b3 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -106,7 +106,7 @@ obj-$(CONFIG_SBC_EPX_C3_WATCHDOG) += sbc_epx_c3.o
106# M32R Architecture 106# M32R Architecture
107 107
108# M68K Architecture 108# M68K Architecture
109obj-$(CONFIG_M548x_WATCHDOG) += m548x_wdt.o 109obj-$(CONFIG_M54xx_WATCHDOG) += m54xx_wdt.o
110 110
111# MIPS Architecture 111# MIPS Architecture
112obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o 112obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c
index eca855a55c0d..3de4ba0260a5 100644
--- a/drivers/watchdog/cpwd.c
+++ b/drivers/watchdog/cpwd.c
@@ -646,7 +646,7 @@ static int __devexit cpwd_remove(struct platform_device *op)
646 struct cpwd *p = dev_get_drvdata(&op->dev); 646 struct cpwd *p = dev_get_drvdata(&op->dev);
647 int i; 647 int i;
648 648
649 for (i = 0; i < 4; i++) { 649 for (i = 0; i < WD_NUMDEVS; i++) {
650 misc_deregister(&p->devs[i].misc); 650 misc_deregister(&p->devs[i].misc);
651 651
652 if (!p->enabled) { 652 if (!p->enabled) {
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c
index 24b966d5061a..204a5603c4ae 100644
--- a/drivers/watchdog/hpwdt.c
+++ b/drivers/watchdog/hpwdt.c
@@ -710,7 +710,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
710 return 0; 710 return 0;
711} 711}
712 712
713static void __devexit hpwdt_exit_nmi_decoding(void) 713static void hpwdt_exit_nmi_decoding(void)
714{ 714{
715 unregister_die_notifier(&die_notifier); 715 unregister_die_notifier(&die_notifier);
716 if (cru_rom_addr) 716 if (cru_rom_addr)
@@ -726,7 +726,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev)
726 return 0; 726 return 0;
727} 727}
728 728
729static void __devexit hpwdt_exit_nmi_decoding(void) 729static void hpwdt_exit_nmi_decoding(void)
730{ 730{
731} 731}
732#endif /* CONFIG_HPWDT_NMI_DECODING */ 732#endif /* CONFIG_HPWDT_NMI_DECODING */
diff --git a/drivers/watchdog/m548x_wdt.c b/drivers/watchdog/m54xx_wdt.c
index cabbcfe1c847..4d43286074aa 100644
--- a/drivers/watchdog/m548x_wdt.c
+++ b/drivers/watchdog/m54xx_wdt.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/watchdog/m548x_wdt.c 2 * drivers/watchdog/m54xx_wdt.c
3 * 3 *
4 * Watchdog driver for ColdFire MCF548x processors 4 * Watchdog driver for ColdFire MCF547x & MCF548x processors
5 * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be> 5 * Copyright 2010 (c) Philippe De Muyter <phdm@macqel.be>
6 * 6 *
7 * Adapted from the IXP4xx watchdog driver, which carries these notices: 7 * Adapted from the IXP4xx watchdog driver, which carries these notices:
@@ -29,8 +29,8 @@
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30 30
31#include <asm/coldfire.h> 31#include <asm/coldfire.h>
32#include <asm/m548xsim.h> 32#include <asm/m54xxsim.h>
33#include <asm/m548xgpt.h> 33#include <asm/m54xxgpt.h>
34 34
35static int nowayout = WATCHDOG_NOWAYOUT; 35static int nowayout = WATCHDOG_NOWAYOUT;
36static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */ 36static unsigned int heartbeat = 30; /* (secs) Default is 0.5 minute */
@@ -76,7 +76,7 @@ static void wdt_keepalive(void)
76 __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0); 76 __raw_writel(gms0, MCF_MBAR + MCF_GPT_GMS0);
77} 77}
78 78
79static int m548x_wdt_open(struct inode *inode, struct file *file) 79static int m54xx_wdt_open(struct inode *inode, struct file *file)
80{ 80{
81 if (test_and_set_bit(WDT_IN_USE, &wdt_status)) 81 if (test_and_set_bit(WDT_IN_USE, &wdt_status))
82 return -EBUSY; 82 return -EBUSY;
@@ -86,7 +86,7 @@ static int m548x_wdt_open(struct inode *inode, struct file *file)
86 return nonseekable_open(inode, file); 86 return nonseekable_open(inode, file);
87} 87}
88 88
89static ssize_t m548x_wdt_write(struct file *file, const char *data, 89static ssize_t m54xx_wdt_write(struct file *file, const char *data,
90 size_t len, loff_t *ppos) 90 size_t len, loff_t *ppos)
91{ 91{
92 if (len) { 92 if (len) {
@@ -112,10 +112,10 @@ static ssize_t m548x_wdt_write(struct file *file, const char *data,
112static const struct watchdog_info ident = { 112static const struct watchdog_info ident = {
113 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | 113 .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT |
114 WDIOF_KEEPALIVEPING, 114 WDIOF_KEEPALIVEPING,
115 .identity = "Coldfire M548x Watchdog", 115 .identity = "Coldfire M54xx Watchdog",
116}; 116};
117 117
118static long m548x_wdt_ioctl(struct file *file, unsigned int cmd, 118static long m54xx_wdt_ioctl(struct file *file, unsigned int cmd,
119 unsigned long arg) 119 unsigned long arg)
120{ 120{
121 int ret = -ENOTTY; 121 int ret = -ENOTTY;
@@ -161,7 +161,7 @@ static long m548x_wdt_ioctl(struct file *file, unsigned int cmd,
161 return ret; 161 return ret;
162} 162}
163 163
164static int m548x_wdt_release(struct inode *inode, struct file *file) 164static int m54xx_wdt_release(struct inode *inode, struct file *file)
165{ 165{
166 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) 166 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status))
167 wdt_disable(); 167 wdt_disable();
@@ -177,45 +177,45 @@ static int m548x_wdt_release(struct inode *inode, struct file *file)
177} 177}
178 178
179 179
180static const struct file_operations m548x_wdt_fops = { 180static const struct file_operations m54xx_wdt_fops = {
181 .owner = THIS_MODULE, 181 .owner = THIS_MODULE,
182 .llseek = no_llseek, 182 .llseek = no_llseek,
183 .write = m548x_wdt_write, 183 .write = m54xx_wdt_write,
184 .unlocked_ioctl = m548x_wdt_ioctl, 184 .unlocked_ioctl = m54xx_wdt_ioctl,
185 .open = m548x_wdt_open, 185 .open = m54xx_wdt_open,
186 .release = m548x_wdt_release, 186 .release = m54xx_wdt_release,
187}; 187};
188 188
189static struct miscdevice m548x_wdt_miscdev = { 189static struct miscdevice m54xx_wdt_miscdev = {
190 .minor = WATCHDOG_MINOR, 190 .minor = WATCHDOG_MINOR,
191 .name = "watchdog", 191 .name = "watchdog",
192 .fops = &m548x_wdt_fops, 192 .fops = &m54xx_wdt_fops,
193}; 193};
194 194
195static int __init m548x_wdt_init(void) 195static int __init m54xx_wdt_init(void)
196{ 196{
197 if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4, 197 if (!request_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4,
198 "Coldfire M548x Watchdog")) { 198 "Coldfire M54xx Watchdog")) {
199 printk(KERN_WARNING 199 printk(KERN_WARNING
200 "Coldfire M548x Watchdog : I/O region busy\n"); 200 "Coldfire M54xx Watchdog : I/O region busy\n");
201 return -EBUSY; 201 return -EBUSY;
202 } 202 }
203 printk(KERN_INFO "ColdFire watchdog driver is loaded.\n"); 203 printk(KERN_INFO "ColdFire watchdog driver is loaded.\n");
204 204
205 return misc_register(&m548x_wdt_miscdev); 205 return misc_register(&m54xx_wdt_miscdev);
206} 206}
207 207
208static void __exit m548x_wdt_exit(void) 208static void __exit m54xx_wdt_exit(void)
209{ 209{
210 misc_deregister(&m548x_wdt_miscdev); 210 misc_deregister(&m54xx_wdt_miscdev);
211 release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4); 211 release_mem_region(MCF_MBAR + MCF_GPT_GCIR0, 4);
212} 212}
213 213
214module_init(m548x_wdt_init); 214module_init(m54xx_wdt_init);
215module_exit(m548x_wdt_exit); 215module_exit(m54xx_wdt_exit);
216 216
217MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>"); 217MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
218MODULE_DESCRIPTION("Coldfire M548x Watchdog"); 218MODULE_DESCRIPTION("Coldfire M54xx Watchdog");
219 219
220module_param(heartbeat, int, 0); 220module_param(heartbeat, int, 0);
221MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)"); 221MODULE_PARM_DESC(heartbeat, "Watchdog heartbeat in seconds (default 30s)");
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c
index c7d67e9a7465..79906255eeb6 100644
--- a/drivers/watchdog/sbc_fitpc2_wdt.c
+++ b/drivers/watchdog/sbc_fitpc2_wdt.c
@@ -201,11 +201,14 @@ static struct miscdevice fitpc2_wdt_miscdev = {
201static int __init fitpc2_wdt_init(void) 201static int __init fitpc2_wdt_init(void)
202{ 202{
203 int err; 203 int err;
204 const char *brd_name;
204 205
205 if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) 206 brd_name = dmi_get_system_info(DMI_BOARD_NAME);
207
208 if (!brd_name || !strstr(brd_name, "SBC-FITPC2"))
206 return -ENODEV; 209 return -ENODEV;
207 210
208 pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); 211 pr_info("%s found\n", brd_name);
209 212
210 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { 213 if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) {
211 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); 214 pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT);
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c
index 0461858e07d0..b61ab1c54293 100644
--- a/drivers/watchdog/sch311x_wdt.c
+++ b/drivers/watchdog/sch311x_wdt.c
@@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr)
508 sch311x_sio_outb(sio_config_port, 0x07, 0x0a); 508 sch311x_sio_outb(sio_config_port, 0x07, 0x0a);
509 509
510 /* Check if Logical Device Register is currently active */ 510 /* Check if Logical Device Register is currently active */
511 if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0) 511 if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0)
512 printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n"); 512 printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n");
513 513
514 /* Get the base address of the runtime registers */ 514 /* Get the base address of the runtime registers */
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c
index a6c12dec91a1..df2a64dc9672 100644
--- a/drivers/watchdog/w83697ug_wdt.c
+++ b/drivers/watchdog/w83697ug_wdt.c
@@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void)
109 outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ 109 outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */
110 outb_p(0x30, WDT_EFER); /* select CR30 */ 110 outb_p(0x30, WDT_EFER); /* select CR30 */
111 c = inb_p(WDT_EFDR); 111 c = inb_p(WDT_EFDR);
112 outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ 112 outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */
113 113
114 return 0; 114 return 0;
115} 115}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index db8c4c4ac880..24177272bcb8 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID;
37#ifdef CONFIG_PM_SLEEP 37#ifdef CONFIG_PM_SLEEP
38static int xen_hvm_suspend(void *data) 38static int xen_hvm_suspend(void *data)
39{ 39{
40 int err;
40 struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; 41 struct sched_shutdown r = { .reason = SHUTDOWN_suspend };
41 int *cancelled = data; 42 int *cancelled = data;
42 43
43 BUG_ON(!irqs_disabled()); 44 BUG_ON(!irqs_disabled());
44 45
46 err = sysdev_suspend(PMSG_SUSPEND);
47 if (err) {
48 printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n",
49 err);
50 return err;
51 }
52
45 *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 53 *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
46 54
47 xen_hvm_post_suspend(*cancelled); 55 xen_hvm_post_suspend(*cancelled);
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data)
53 xen_timer_resume(); 61 xen_timer_resume();
54 } 62 }
55 63
64 sysdev_resume();
65
56 return 0; 66 return 0;
57} 67}
58 68
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 15690bb1d3b5..789b3afb3423 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
140 candidate->first = candidate->last = index; 140 candidate->first = candidate->last = index;
141 candidate->offset_first = from; 141 candidate->offset_first = from;
142 candidate->to_last = to; 142 candidate->to_last = to;
143 INIT_LIST_HEAD(&candidate->link);
143 candidate->usage = 1; 144 candidate->usage = 1;
144 candidate->state = AFS_WBACK_PENDING; 145 candidate->state = AFS_WBACK_PENDING;
145 init_waitqueue_head(&candidate->waitq); 146 init_waitqueue_head(&candidate->waitq);
diff --git a/fs/aio.c b/fs/aio.c
index fc557a3be0a9..26869cde3953 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx)
239 call_rcu(&ctx->rcu_head, ctx_rcu_free); 239 call_rcu(&ctx->rcu_head, ctx_rcu_free);
240} 240}
241 241
242#define get_ioctx(kioctx) do { \ 242static inline void get_ioctx(struct kioctx *kioctx)
243 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 243{
244 atomic_inc(&(kioctx)->users); \ 244 BUG_ON(atomic_read(&kioctx->users) <= 0);
245} while (0) 245 atomic_inc(&kioctx->users);
246#define put_ioctx(kioctx) do { \ 246}
247 BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ 247
248 if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ 248static inline int try_get_ioctx(struct kioctx *kioctx)
249 __put_ioctx(kioctx); \ 249{
250} while (0) 250 return atomic_inc_not_zero(&kioctx->users);
251}
252
253static inline void put_ioctx(struct kioctx *kioctx)
254{
255 BUG_ON(atomic_read(&kioctx->users) <= 0);
256 if (unlikely(atomic_dec_and_test(&kioctx->users)))
257 __put_ioctx(kioctx);
258}
251 259
252/* ioctx_alloc 260/* ioctx_alloc
253 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. 261 * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
601 rcu_read_lock(); 609 rcu_read_lock();
602 610
603 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { 611 hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
604 if (ctx->user_id == ctx_id && !ctx->dead) { 612 /*
605 get_ioctx(ctx); 613 * RCU protects us against accessing freed memory but
614 * we have to be careful not to get a reference when the
615 * reference count already dropped to 0 (ctx->dead test
616 * is unreliable because of races).
617 */
618 if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
606 ret = ctx; 619 ret = ctx;
607 break; 620 break;
608 } 621 }
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1629 goto out_put_req; 1642 goto out_put_req;
1630 1643
1631 spin_lock_irq(&ctx->ctx_lock); 1644 spin_lock_irq(&ctx->ctx_lock);
1645 /*
1646 * We could have raced with io_destroy() and are currently holding a
1647 * reference to ctx which should be destroyed. We cannot submit IO
1648 * since ctx gets freed as soon as io_submit() puts its reference. The
1649 * check here is reliable: io_destroy() sets ctx->dead before waiting
1650 * for outstanding IO and the barrier between these two is realized by
1651 * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
1652 * increment ctx->reqs_active before checking for ctx->dead and the
1653 * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
1654 * don't see ctx->dead set here, io_destroy() waits for our IO to
1655 * finish.
1656 */
1657 if (ctx->dead) {
1658 spin_unlock_irq(&ctx->ctx_lock);
1659 ret = -EINVAL;
1660 goto out_put_req;
1661 }
1632 aio_run_iocb(req); 1662 aio_run_iocb(req);
1633 if (!list_empty(&ctx->run_list)) { 1663 if (!list_empty(&ctx->run_list)) {
1634 /* drain the run list */ 1664 /* drain the run list */
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 333a7bb4cb9c..889287019599 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
873 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); 873 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
874 if (ret) 874 if (ret)
875 goto out_del; 875 goto out_del;
876 /*
877 * bdev could be deleted beneath us which would implicitly destroy
878 * the holder directory. Hold on to it.
879 */
880 kobject_get(bdev->bd_part->holder_dir);
876 881
877 list_add(&holder->list, &bdev->bd_holder_disks); 882 list_add(&holder->list, &bdev->bd_holder_disks);
878 goto out_unlock; 883 goto out_unlock;
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
909 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); 914 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
910 del_symlink(bdev->bd_part->holder_dir, 915 del_symlink(bdev->bd_part->holder_dir,
911 &disk_to_dev(disk)->kobj); 916 &disk_to_dev(disk)->kobj);
917 kobject_put(bdev->bd_part->holder_dir);
912 list_del_init(&holder->list); 918 list_del_init(&holder->list);
913 kfree(holder); 919 kfree(holder);
914 } 920 }
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
922 * flush_disk - invalidates all buffer-cache entries on a disk 928 * flush_disk - invalidates all buffer-cache entries on a disk
923 * 929 *
924 * @bdev: struct block device to be flushed 930 * @bdev: struct block device to be flushed
931 * @kill_dirty: flag to guide handling of dirty inodes
925 * 932 *
926 * Invalidates all buffer-cache entries on a disk. It should be called 933 * Invalidates all buffer-cache entries on a disk. It should be called
927 * when a disk has been changed -- either by a media change or online 934 * when a disk has been changed -- either by a media change or online
928 * resize. 935 * resize.
929 */ 936 */
930static void flush_disk(struct block_device *bdev) 937static void flush_disk(struct block_device *bdev, bool kill_dirty)
931{ 938{
932 if (__invalidate_device(bdev)) { 939 if (__invalidate_device(bdev, kill_dirty)) {
933 char name[BDEVNAME_SIZE] = ""; 940 char name[BDEVNAME_SIZE] = "";
934 941
935 if (bdev->bd_disk) 942 if (bdev->bd_disk)
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
966 "%s: detected capacity change from %lld to %lld\n", 973 "%s: detected capacity change from %lld to %lld\n",
967 name, bdev_size, disk_size); 974 name, bdev_size, disk_size);
968 i_size_write(bdev->bd_inode, disk_size); 975 i_size_write(bdev->bd_inode, disk_size);
969 flush_disk(bdev); 976 flush_disk(bdev, false);
970 } 977 }
971} 978}
972EXPORT_SYMBOL(check_disk_size_change); 979EXPORT_SYMBOL(check_disk_size_change);
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev)
1019 if (!(events & DISK_EVENT_MEDIA_CHANGE)) 1026 if (!(events & DISK_EVENT_MEDIA_CHANGE))
1020 return 0; 1027 return 0;
1021 1028
1022 flush_disk(bdev); 1029 flush_disk(bdev, true);
1023 if (bdops->revalidate_disk) 1030 if (bdops->revalidate_disk)
1024 bdops->revalidate_disk(bdev->bd_disk); 1031 bdops->revalidate_disk(bdev->bd_disk);
1025 return 1; 1032 return 1;
@@ -1215,12 +1222,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1215 1222
1216 res = __blkdev_get(bdev, mode, 0); 1223 res = __blkdev_get(bdev, mode, 0);
1217 1224
1218 /* __blkdev_get() may alter read only status, check it afterwards */
1219 if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1220 __blkdev_put(bdev, mode, 0);
1221 res = -EACCES;
1222 }
1223
1224 if (whole) { 1225 if (whole) {
1225 /* finish claiming */ 1226 /* finish claiming */
1226 mutex_lock(&bdev->bd_mutex); 1227 mutex_lock(&bdev->bd_mutex);
@@ -1298,6 +1299,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1298 if (err) 1299 if (err)
1299 return ERR_PTR(err); 1300 return ERR_PTR(err);
1300 1301
1302 if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1303 blkdev_put(bdev, mode);
1304 return ERR_PTR(-EACCES);
1305 }
1306
1301 return bdev; 1307 return bdev;
1302} 1308}
1303EXPORT_SYMBOL(blkdev_get_by_path); 1309EXPORT_SYMBOL(blkdev_get_by_path);
@@ -1601,7 +1607,7 @@ fail:
1601} 1607}
1602EXPORT_SYMBOL(lookup_bdev); 1608EXPORT_SYMBOL(lookup_bdev);
1603 1609
1604int __invalidate_device(struct block_device *bdev) 1610int __invalidate_device(struct block_device *bdev, bool kill_dirty)
1605{ 1611{
1606 struct super_block *sb = get_super(bdev); 1612 struct super_block *sb = get_super(bdev);
1607 int res = 0; 1613 int res = 0;
@@ -1614,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev)
1614 * hold). 1620 * hold).
1615 */ 1621 */
1616 shrink_dcache_sb(sb); 1622 shrink_dcache_sb(sb);
1617 res = invalidate_inodes(sb); 1623 res = invalidate_inodes(sb, kill_dirty);
1618 drop_super(sb); 1624 drop_super(sb);
1619 } 1625 }
1620 invalidate_bdev(bdev); 1626 invalidate_bdev(bdev);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2c98b3af6052..7f78cc78fdd0 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -729,6 +729,15 @@ struct btrfs_space_info {
729 u64 disk_total; /* total bytes on disk, takes mirrors into 729 u64 disk_total; /* total bytes on disk, takes mirrors into
730 account */ 730 account */
731 731
732 /*
733 * we bump reservation progress every time we decrement
734 * bytes_reserved. This way people waiting for reservations
735 * know something good has happened and they can check
736 * for progress. The number here isn't to be trusted, it
737 * just shows reclaim activity
738 */
739 unsigned long reservation_progress;
740
732 int full; /* indicates that we cannot allocate any more 741 int full; /* indicates that we cannot allocate any more
733 chunks for this space */ 742 chunks for this space */
734 int force_alloc; /* set if we need to force a chunk alloc for 743 int force_alloc; /* set if we need to force a chunk alloc for
@@ -1254,6 +1263,7 @@ struct btrfs_root {
1254#define BTRFS_MOUNT_SPACE_CACHE (1 << 12) 1263#define BTRFS_MOUNT_SPACE_CACHE (1 << 12)
1255#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) 1264#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
1256#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) 1265#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
1266#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
1257 1267
1258#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) 1268#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
1259#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) 1269#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@@ -2218,6 +2228,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root,
2218 u64 start, u64 end); 2228 u64 start, u64 end);
2219int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, 2229int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
2220 u64 num_bytes); 2230 u64 num_bytes);
2231int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
2232 struct btrfs_root *root, u64 type);
2221 2233
2222/* ctree.c */ 2234/* ctree.c */
2223int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 2235int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index fdce8799b98d..e1aa8d607bc7 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -359,10 +359,14 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page)
359 359
360 tree = &BTRFS_I(page->mapping->host)->io_tree; 360 tree = &BTRFS_I(page->mapping->host)->io_tree;
361 361
362 if (page->private == EXTENT_PAGE_PRIVATE) 362 if (page->private == EXTENT_PAGE_PRIVATE) {
363 WARN_ON(1);
363 goto out; 364 goto out;
364 if (!page->private) 365 }
366 if (!page->private) {
367 WARN_ON(1);
365 goto out; 368 goto out;
369 }
366 len = page->private >> 2; 370 len = page->private >> 2;
367 WARN_ON(len == 0); 371 WARN_ON(len == 0);
368 372
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 4e7e012ad667..7b3089b5c2df 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3342,15 +3342,16 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3342 u64 max_reclaim; 3342 u64 max_reclaim;
3343 u64 reclaimed = 0; 3343 u64 reclaimed = 0;
3344 long time_left; 3344 long time_left;
3345 int pause = 1;
3346 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; 3345 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3347 int loops = 0; 3346 int loops = 0;
3347 unsigned long progress;
3348 3348
3349 block_rsv = &root->fs_info->delalloc_block_rsv; 3349 block_rsv = &root->fs_info->delalloc_block_rsv;
3350 space_info = block_rsv->space_info; 3350 space_info = block_rsv->space_info;
3351 3351
3352 smp_mb(); 3352 smp_mb();
3353 reserved = space_info->bytes_reserved; 3353 reserved = space_info->bytes_reserved;
3354 progress = space_info->reservation_progress;
3354 3355
3355 if (reserved == 0) 3356 if (reserved == 0)
3356 return 0; 3357 return 0;
@@ -3365,31 +3366,36 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans,
3365 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); 3366 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3366 3367
3367 spin_lock(&space_info->lock); 3368 spin_lock(&space_info->lock);
3368 if (reserved > space_info->bytes_reserved) { 3369 if (reserved > space_info->bytes_reserved)
3369 loops = 0;
3370 reclaimed += reserved - space_info->bytes_reserved; 3370 reclaimed += reserved - space_info->bytes_reserved;
3371 } else {
3372 loops++;
3373 }
3374 reserved = space_info->bytes_reserved; 3371 reserved = space_info->bytes_reserved;
3375 spin_unlock(&space_info->lock); 3372 spin_unlock(&space_info->lock);
3376 3373
3374 loops++;
3375
3377 if (reserved == 0 || reclaimed >= max_reclaim) 3376 if (reserved == 0 || reclaimed >= max_reclaim)
3378 break; 3377 break;
3379 3378
3380 if (trans && trans->transaction->blocked) 3379 if (trans && trans->transaction->blocked)
3381 return -EAGAIN; 3380 return -EAGAIN;
3382 3381
3383 __set_current_state(TASK_INTERRUPTIBLE); 3382 time_left = schedule_timeout_interruptible(1);
3384 time_left = schedule_timeout(pause);
3385 3383
3386 /* We were interrupted, exit */ 3384 /* We were interrupted, exit */
3387 if (time_left) 3385 if (time_left)
3388 break; 3386 break;
3389 3387
3390 pause <<= 1; 3388 /* we've kicked the IO a few times, if anything has been freed,
3391 if (pause > HZ / 10) 3389 * exit. There is no sense in looping here for a long time
3392 pause = HZ / 10; 3390 * when we really need to commit the transaction, or there are
3391 * just too many writers without enough free space
3392 */
3393
3394 if (loops > 3) {
3395 smp_mb();
3396 if (progress != space_info->reservation_progress)
3397 break;
3398 }
3393 3399
3394 } 3400 }
3395 return reclaimed >= to_reclaim; 3401 return reclaimed >= to_reclaim;
@@ -3612,6 +3618,7 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3612 if (num_bytes) { 3618 if (num_bytes) {
3613 spin_lock(&space_info->lock); 3619 spin_lock(&space_info->lock);
3614 space_info->bytes_reserved -= num_bytes; 3620 space_info->bytes_reserved -= num_bytes;
3621 space_info->reservation_progress++;
3615 spin_unlock(&space_info->lock); 3622 spin_unlock(&space_info->lock);
3616 } 3623 }
3617 } 3624 }
@@ -3844,6 +3851,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3844 if (block_rsv->reserved >= block_rsv->size) { 3851 if (block_rsv->reserved >= block_rsv->size) {
3845 num_bytes = block_rsv->reserved - block_rsv->size; 3852 num_bytes = block_rsv->reserved - block_rsv->size;
3846 sinfo->bytes_reserved -= num_bytes; 3853 sinfo->bytes_reserved -= num_bytes;
3854 sinfo->reservation_progress++;
3847 block_rsv->reserved = block_rsv->size; 3855 block_rsv->reserved = block_rsv->size;
3848 block_rsv->full = 1; 3856 block_rsv->full = 1;
3849 } 3857 }
@@ -4005,7 +4013,6 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4005 to_reserve = 0; 4013 to_reserve = 0;
4006 } 4014 }
4007 spin_unlock(&BTRFS_I(inode)->accounting_lock); 4015 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4008
4009 to_reserve += calc_csum_metadata_size(inode, num_bytes); 4016 to_reserve += calc_csum_metadata_size(inode, num_bytes);
4010 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); 4017 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
4011 if (ret) 4018 if (ret)
@@ -4133,6 +4140,7 @@ static int update_block_group(struct btrfs_trans_handle *trans,
4133 btrfs_set_block_group_used(&cache->item, old_val); 4140 btrfs_set_block_group_used(&cache->item, old_val);
4134 cache->reserved -= num_bytes; 4141 cache->reserved -= num_bytes;
4135 cache->space_info->bytes_reserved -= num_bytes; 4142 cache->space_info->bytes_reserved -= num_bytes;
4143 cache->space_info->reservation_progress++;
4136 cache->space_info->bytes_used += num_bytes; 4144 cache->space_info->bytes_used += num_bytes;
4137 cache->space_info->disk_used += num_bytes * factor; 4145 cache->space_info->disk_used += num_bytes * factor;
4138 spin_unlock(&cache->lock); 4146 spin_unlock(&cache->lock);
@@ -4184,6 +4192,7 @@ static int pin_down_extent(struct btrfs_root *root,
4184 if (reserved) { 4192 if (reserved) {
4185 cache->reserved -= num_bytes; 4193 cache->reserved -= num_bytes;
4186 cache->space_info->bytes_reserved -= num_bytes; 4194 cache->space_info->bytes_reserved -= num_bytes;
4195 cache->space_info->reservation_progress++;
4187 } 4196 }
4188 spin_unlock(&cache->lock); 4197 spin_unlock(&cache->lock);
4189 spin_unlock(&cache->space_info->lock); 4198 spin_unlock(&cache->space_info->lock);
@@ -4234,6 +4243,7 @@ static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4234 space_info->bytes_readonly += num_bytes; 4243 space_info->bytes_readonly += num_bytes;
4235 cache->reserved -= num_bytes; 4244 cache->reserved -= num_bytes;
4236 space_info->bytes_reserved -= num_bytes; 4245 space_info->bytes_reserved -= num_bytes;
4246 space_info->reservation_progress++;
4237 } 4247 }
4238 spin_unlock(&cache->lock); 4248 spin_unlock(&cache->lock);
4239 spin_unlock(&space_info->lock); 4249 spin_unlock(&space_info->lock);
@@ -4712,6 +4722,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4712 if (ret) { 4722 if (ret) {
4713 spin_lock(&cache->space_info->lock); 4723 spin_lock(&cache->space_info->lock);
4714 cache->space_info->bytes_reserved -= buf->len; 4724 cache->space_info->bytes_reserved -= buf->len;
4725 cache->space_info->reservation_progress++;
4715 spin_unlock(&cache->space_info->lock); 4726 spin_unlock(&cache->space_info->lock);
4716 } 4727 }
4717 goto out; 4728 goto out;
@@ -5376,7 +5387,7 @@ again:
5376 num_bytes, data, 1); 5387 num_bytes, data, 1);
5377 goto again; 5388 goto again;
5378 } 5389 }
5379 if (ret == -ENOSPC) { 5390 if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
5380 struct btrfs_space_info *sinfo; 5391 struct btrfs_space_info *sinfo;
5381 5392
5382 sinfo = __find_space_info(root->fs_info, data); 5393 sinfo = __find_space_info(root->fs_info, data);
@@ -6583,7 +6594,7 @@ static noinline int relocate_data_extent(struct inode *reloc_inode,
6583 u64 end = start + extent_key->offset - 1; 6594 u64 end = start + extent_key->offset - 1;
6584 6595
6585 em = alloc_extent_map(GFP_NOFS); 6596 em = alloc_extent_map(GFP_NOFS);
6586 BUG_ON(!em || IS_ERR(em)); 6597 BUG_ON(!em);
6587 6598
6588 em->start = start; 6599 em->start = start;
6589 em->len = extent_key->offset; 6600 em->len = extent_key->offset;
@@ -8065,6 +8076,13 @@ out:
8065 return ret; 8076 return ret;
8066} 8077}
8067 8078
8079int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8080 struct btrfs_root *root, u64 type)
8081{
8082 u64 alloc_flags = get_alloc_profile(root, type);
8083 return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
8084}
8085
8068/* 8086/*
8069 * helper to account the unused space of all the readonly block group in the 8087 * helper to account the unused space of all the readonly block group in the
8070 * list. takes mirrors into account. 8088 * list. takes mirrors into account.
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 5e76a474cb7e..714adc4ac4c2 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
1433 */ 1433 */
1434u64 count_range_bits(struct extent_io_tree *tree, 1434u64 count_range_bits(struct extent_io_tree *tree,
1435 u64 *start, u64 search_end, u64 max_bytes, 1435 u64 *start, u64 search_end, u64 max_bytes,
1436 unsigned long bits) 1436 unsigned long bits, int contig)
1437{ 1437{
1438 struct rb_node *node; 1438 struct rb_node *node;
1439 struct extent_state *state; 1439 struct extent_state *state;
1440 u64 cur_start = *start; 1440 u64 cur_start = *start;
1441 u64 total_bytes = 0; 1441 u64 total_bytes = 0;
1442 u64 last = 0;
1442 int found = 0; 1443 int found = 0;
1443 1444
1444 if (search_end <= cur_start) { 1445 if (search_end <= cur_start) {
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
1463 state = rb_entry(node, struct extent_state, rb_node); 1464 state = rb_entry(node, struct extent_state, rb_node);
1464 if (state->start > search_end) 1465 if (state->start > search_end)
1465 break; 1466 break;
1466 if (state->end >= cur_start && (state->state & bits)) { 1467 if (contig && found && state->start > last + 1)
1468 break;
1469 if (state->end >= cur_start && (state->state & bits) == bits) {
1467 total_bytes += min(search_end, state->end) + 1 - 1470 total_bytes += min(search_end, state->end) + 1 -
1468 max(cur_start, state->start); 1471 max(cur_start, state->start);
1469 if (total_bytes >= max_bytes) 1472 if (total_bytes >= max_bytes)
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
1472 *start = state->start; 1475 *start = state->start;
1473 found = 1; 1476 found = 1;
1474 } 1477 }
1478 last = state->end;
1479 } else if (contig && found) {
1480 break;
1475 } 1481 }
1476 node = rb_next(node); 1482 node = rb_next(node);
1477 if (!node) 1483 if (!node)
@@ -1946,6 +1952,7 @@ void set_page_extent_mapped(struct page *page)
1946 1952
1947static void set_page_extent_head(struct page *page, unsigned long len) 1953static void set_page_extent_head(struct page *page, unsigned long len)
1948{ 1954{
1955 WARN_ON(!PagePrivate(page));
1949 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2); 1956 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1950} 1957}
1951 1958
@@ -2821,9 +2828,17 @@ int try_release_extent_state(struct extent_map_tree *map,
2821 * at this point we can safely clear everything except the 2828 * at this point we can safely clear everything except the
2822 * locked bit and the nodatasum bit 2829 * locked bit and the nodatasum bit
2823 */ 2830 */
2824 clear_extent_bit(tree, start, end, 2831 ret = clear_extent_bit(tree, start, end,
2825 ~(EXTENT_LOCKED | EXTENT_NODATASUM), 2832 ~(EXTENT_LOCKED | EXTENT_NODATASUM),
2826 0, 0, NULL, mask); 2833 0, 0, NULL, mask);
2834
2835 /* if clear_extent_bit failed for enomem reasons,
2836 * we can't allow the release to continue.
2837 */
2838 if (ret < 0)
2839 ret = 0;
2840 else
2841 ret = 1;
2827 } 2842 }
2828 return ret; 2843 return ret;
2829} 2844}
@@ -2903,6 +2918,46 @@ out:
2903 return sector; 2918 return sector;
2904} 2919}
2905 2920
2921/*
2922 * helper function for fiemap, which doesn't want to see any holes.
2923 * This maps until we find something past 'last'
2924 */
2925static struct extent_map *get_extent_skip_holes(struct inode *inode,
2926 u64 offset,
2927 u64 last,
2928 get_extent_t *get_extent)
2929{
2930 u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
2931 struct extent_map *em;
2932 u64 len;
2933
2934 if (offset >= last)
2935 return NULL;
2936
2937 while(1) {
2938 len = last - offset;
2939 if (len == 0)
2940 break;
2941 len = (len + sectorsize - 1) & ~(sectorsize - 1);
2942 em = get_extent(inode, NULL, 0, offset, len, 0);
2943 if (!em || IS_ERR(em))
2944 return em;
2945
2946 /* if this isn't a hole return it */
2947 if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
2948 em->block_start != EXTENT_MAP_HOLE) {
2949 return em;
2950 }
2951
2952 /* this is a hole, advance to the next extent */
2953 offset = extent_map_end(em);
2954 free_extent_map(em);
2955 if (offset >= last)
2956 break;
2957 }
2958 return NULL;
2959}
2960
2906int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2961int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2907 __u64 start, __u64 len, get_extent_t *get_extent) 2962 __u64 start, __u64 len, get_extent_t *get_extent)
2908{ 2963{
@@ -2912,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2912 u32 flags = 0; 2967 u32 flags = 0;
2913 u32 found_type; 2968 u32 found_type;
2914 u64 last; 2969 u64 last;
2970 u64 last_for_get_extent = 0;
2915 u64 disko = 0; 2971 u64 disko = 0;
2972 u64 isize = i_size_read(inode);
2916 struct btrfs_key found_key; 2973 struct btrfs_key found_key;
2917 struct extent_map *em = NULL; 2974 struct extent_map *em = NULL;
2918 struct extent_state *cached_state = NULL; 2975 struct extent_state *cached_state = NULL;
2919 struct btrfs_path *path; 2976 struct btrfs_path *path;
2920 struct btrfs_file_extent_item *item; 2977 struct btrfs_file_extent_item *item;
2921 int end = 0; 2978 int end = 0;
2922 u64 em_start = 0, em_len = 0; 2979 u64 em_start = 0;
2980 u64 em_len = 0;
2981 u64 em_end = 0;
2923 unsigned long emflags; 2982 unsigned long emflags;
2924 int hole = 0;
2925 2983
2926 if (len == 0) 2984 if (len == 0)
2927 return -EINVAL; 2985 return -EINVAL;
@@ -2931,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2931 return -ENOMEM; 2989 return -ENOMEM;
2932 path->leave_spinning = 1; 2990 path->leave_spinning = 1;
2933 2991
2992 /*
2993 * lookup the last file extent. We're not using i_size here
2994 * because there might be preallocation past i_size
2995 */
2934 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, 2996 ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
2935 path, inode->i_ino, -1, 0); 2997 path, inode->i_ino, -1, 0);
2936 if (ret < 0) { 2998 if (ret < 0) {
@@ -2944,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2944 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 3006 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
2945 found_type = btrfs_key_type(&found_key); 3007 found_type = btrfs_key_type(&found_key);
2946 3008
2947 /* No extents, just return */ 3009 /* No extents, but there might be delalloc bits */
2948 if (found_key.objectid != inode->i_ino || 3010 if (found_key.objectid != inode->i_ino ||
2949 found_type != BTRFS_EXTENT_DATA_KEY) { 3011 found_type != BTRFS_EXTENT_DATA_KEY) {
2950 btrfs_free_path(path); 3012 /* have to trust i_size as the end */
2951 return 0; 3013 last = (u64)-1;
3014 last_for_get_extent = isize;
3015 } else {
3016 /*
3017 * remember the start of the last extent. There are a
3018 * bunch of different factors that go into the length of the
3019 * extent, so its much less complex to remember where it started
3020 */
3021 last = found_key.offset;
3022 last_for_get_extent = last + 1;
2952 } 3023 }
2953 last = found_key.offset;
2954 btrfs_free_path(path); 3024 btrfs_free_path(path);
2955 3025
3026 /*
3027 * we might have some extents allocated but more delalloc past those
3028 * extents. so, we trust isize unless the start of the last extent is
3029 * beyond isize
3030 */
3031 if (last < isize) {
3032 last = (u64)-1;
3033 last_for_get_extent = isize;
3034 }
3035
2956 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3036 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
2957 &cached_state, GFP_NOFS); 3037 &cached_state, GFP_NOFS);
2958 em = get_extent(inode, NULL, 0, off, max - off, 0); 3038
3039 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3040 get_extent);
2959 if (!em) 3041 if (!em)
2960 goto out; 3042 goto out;
2961 if (IS_ERR(em)) { 3043 if (IS_ERR(em)) {
@@ -2964,22 +3046,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2964 } 3046 }
2965 3047
2966 while (!end) { 3048 while (!end) {
2967 hole = 0; 3049 u64 offset_in_extent;
2968 off = em->start + em->len;
2969 if (off >= max)
2970 end = 1;
2971 3050
2972 if (em->block_start == EXTENT_MAP_HOLE) { 3051 /* break if the extent we found is outside the range */
2973 hole = 1; 3052 if (em->start >= max || extent_map_end(em) < off)
2974 goto next; 3053 break;
2975 }
2976 3054
2977 em_start = em->start; 3055 /*
2978 em_len = em->len; 3056 * get_extent may return an extent that starts before our
3057 * requested range. We have to make sure the ranges
3058 * we return to fiemap always move forward and don't
3059 * overlap, so adjust the offsets here
3060 */
3061 em_start = max(em->start, off);
2979 3062
3063 /*
3064 * record the offset from the start of the extent
3065 * for adjusting the disk offset below
3066 */
3067 offset_in_extent = em_start - em->start;
3068 em_end = extent_map_end(em);
3069 em_len = em_end - em_start;
3070 emflags = em->flags;
2980 disko = 0; 3071 disko = 0;
2981 flags = 0; 3072 flags = 0;
2982 3073
3074 /*
3075 * bump off for our next call to get_extent
3076 */
3077 off = extent_map_end(em);
3078 if (off >= max)
3079 end = 1;
3080
2983 if (em->block_start == EXTENT_MAP_LAST_BYTE) { 3081 if (em->block_start == EXTENT_MAP_LAST_BYTE) {
2984 end = 1; 3082 end = 1;
2985 flags |= FIEMAP_EXTENT_LAST; 3083 flags |= FIEMAP_EXTENT_LAST;
@@ -2990,42 +3088,34 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2990 flags |= (FIEMAP_EXTENT_DELALLOC | 3088 flags |= (FIEMAP_EXTENT_DELALLOC |
2991 FIEMAP_EXTENT_UNKNOWN); 3089 FIEMAP_EXTENT_UNKNOWN);
2992 } else { 3090 } else {
2993 disko = em->block_start; 3091 disko = em->block_start + offset_in_extent;
2994 } 3092 }
2995 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) 3093 if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
2996 flags |= FIEMAP_EXTENT_ENCODED; 3094 flags |= FIEMAP_EXTENT_ENCODED;
2997 3095
2998next:
2999 emflags = em->flags;
3000 free_extent_map(em); 3096 free_extent_map(em);
3001 em = NULL; 3097 em = NULL;
3002 if (!end) { 3098 if ((em_start >= last) || em_len == (u64)-1 ||
3003 em = get_extent(inode, NULL, 0, off, max - off, 0); 3099 (last == (u64)-1 && isize <= em_end)) {
3004 if (!em)
3005 goto out;
3006 if (IS_ERR(em)) {
3007 ret = PTR_ERR(em);
3008 goto out;
3009 }
3010 emflags = em->flags;
3011 }
3012
3013 if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
3014 flags |= FIEMAP_EXTENT_LAST; 3100 flags |= FIEMAP_EXTENT_LAST;
3015 end = 1; 3101 end = 1;
3016 } 3102 }
3017 3103
3018 if (em_start == last) { 3104 /* now scan forward to see if this is really the last extent. */
3105 em = get_extent_skip_holes(inode, off, last_for_get_extent,
3106 get_extent);
3107 if (IS_ERR(em)) {
3108 ret = PTR_ERR(em);
3109 goto out;
3110 }
3111 if (!em) {
3019 flags |= FIEMAP_EXTENT_LAST; 3112 flags |= FIEMAP_EXTENT_LAST;
3020 end = 1; 3113 end = 1;
3021 } 3114 }
3022 3115 ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
3023 if (!hole) { 3116 em_len, flags);
3024 ret = fiemap_fill_next_extent(fieinfo, em_start, disko, 3117 if (ret)
3025 em_len, flags); 3118 goto out_free;
3026 if (ret)
3027 goto out_free;
3028 }
3029 } 3119 }
3030out_free: 3120out_free:
3031 free_extent_map(em); 3121 free_extent_map(em);
@@ -3194,7 +3284,13 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3194 } 3284 }
3195 if (!PageUptodate(p)) 3285 if (!PageUptodate(p))
3196 uptodate = 0; 3286 uptodate = 0;
3197 unlock_page(p); 3287
3288 /*
3289 * see below about how we avoid a nasty race with release page
3290 * and why we unlock later
3291 */
3292 if (i != 0)
3293 unlock_page(p);
3198 } 3294 }
3199 if (uptodate) 3295 if (uptodate)
3200 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); 3296 set_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
@@ -3218,9 +3314,26 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
3218 atomic_inc(&eb->refs); 3314 atomic_inc(&eb->refs);
3219 spin_unlock(&tree->buffer_lock); 3315 spin_unlock(&tree->buffer_lock);
3220 radix_tree_preload_end(); 3316 radix_tree_preload_end();
3317
3318 /*
3319 * there is a race where release page may have
3320 * tried to find this extent buffer in the radix
3321 * but failed. It will tell the VM it is safe to
3322 * reclaim the, and it will clear the page private bit.
3323 * We must make sure to set the page private bit properly
3324 * after the extent buffer is in the radix tree so
3325 * it doesn't get lost
3326 */
3327 set_page_extent_mapped(eb->first_page);
3328 set_page_extent_head(eb->first_page, eb->len);
3329 if (!page0)
3330 unlock_page(eb->first_page);
3221 return eb; 3331 return eb;
3222 3332
3223free_eb: 3333free_eb:
3334 if (eb->first_page && !page0)
3335 unlock_page(eb->first_page);
3336
3224 if (!atomic_dec_and_test(&eb->refs)) 3337 if (!atomic_dec_and_test(&eb->refs))
3225 return exists; 3338 return exists;
3226 btrfs_release_extent_buffer(eb); 3339 btrfs_release_extent_buffer(eb);
@@ -3271,10 +3384,11 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree,
3271 continue; 3384 continue;
3272 3385
3273 lock_page(page); 3386 lock_page(page);
3387 WARN_ON(!PagePrivate(page));
3388
3389 set_page_extent_mapped(page);
3274 if (i == 0) 3390 if (i == 0)
3275 set_page_extent_head(page, eb->len); 3391 set_page_extent_head(page, eb->len);
3276 else
3277 set_page_private(page, EXTENT_PAGE_PRIVATE);
3278 3392
3279 clear_page_dirty_for_io(page); 3393 clear_page_dirty_for_io(page);
3280 spin_lock_irq(&page->mapping->tree_lock); 3394 spin_lock_irq(&page->mapping->tree_lock);
@@ -3464,6 +3578,13 @@ int read_extent_buffer_pages(struct extent_io_tree *tree,
3464 3578
3465 for (i = start_i; i < num_pages; i++) { 3579 for (i = start_i; i < num_pages; i++) {
3466 page = extent_buffer_page(eb, i); 3580 page = extent_buffer_page(eb, i);
3581
3582 WARN_ON(!PagePrivate(page));
3583
3584 set_page_extent_mapped(page);
3585 if (i == 0)
3586 set_page_extent_head(page, eb->len);
3587
3467 if (inc_all_pages) 3588 if (inc_all_pages)
3468 page_cache_get(page); 3589 page_cache_get(page);
3469 if (!PageUptodate(page)) { 3590 if (!PageUptodate(page)) {
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 7083cfafd061..9318dfefd59c 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -191,7 +191,7 @@ void extent_io_exit(void);
191 191
192u64 count_range_bits(struct extent_io_tree *tree, 192u64 count_range_bits(struct extent_io_tree *tree,
193 u64 *start, u64 search_end, 193 u64 *start, u64 search_end,
194 u64 max_bytes, unsigned long bits); 194 u64 max_bytes, unsigned long bits, int contig);
195 195
196void free_extent_state(struct extent_state *state); 196void free_extent_state(struct extent_state *state);
197int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, 197int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index b0e1fce12530..2b6c12e983b3 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -51,8 +51,8 @@ struct extent_map *alloc_extent_map(gfp_t mask)
51{ 51{
52 struct extent_map *em; 52 struct extent_map *em;
53 em = kmem_cache_alloc(extent_map_cache, mask); 53 em = kmem_cache_alloc(extent_map_cache, mask);
54 if (!em || IS_ERR(em)) 54 if (!em)
55 return em; 55 return NULL;
56 em->in_tree = 0; 56 em->in_tree = 0;
57 em->flags = 0; 57 em->flags = 0;
58 em->compress_type = BTRFS_COMPRESS_NONE; 58 em->compress_type = BTRFS_COMPRESS_NONE;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c1d3a818731a..f447b783bb84 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -70,6 +70,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
70 70
71 /* Flush processor's dcache for this page */ 71 /* Flush processor's dcache for this page */
72 flush_dcache_page(page); 72 flush_dcache_page(page);
73
74 /*
75 * if we get a partial write, we can end up with
76 * partially up to date pages. These add
77 * a lot of complexity, so make sure they don't
78 * happen by forcing this copy to be retried.
79 *
80 * The rest of the btrfs_file_write code will fall
81 * back to page at a time copies after we return 0.
82 */
83 if (!PageUptodate(page) && copied < count)
84 copied = 0;
85
73 iov_iter_advance(i, copied); 86 iov_iter_advance(i, copied);
74 write_bytes -= copied; 87 write_bytes -= copied;
75 total_copied += copied; 88 total_copied += copied;
@@ -186,6 +199,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
186 split = alloc_extent_map(GFP_NOFS); 199 split = alloc_extent_map(GFP_NOFS);
187 if (!split2) 200 if (!split2)
188 split2 = alloc_extent_map(GFP_NOFS); 201 split2 = alloc_extent_map(GFP_NOFS);
202 BUG_ON(!split || !split2);
189 203
190 write_lock(&em_tree->lock); 204 write_lock(&em_tree->lock);
191 em = lookup_extent_mapping(em_tree, start, len); 205 em = lookup_extent_mapping(em_tree, start, len);
@@ -762,6 +776,27 @@ out:
762} 776}
763 777
764/* 778/*
779 * on error we return an unlocked page and the error value
780 * on success we return a locked page and 0
781 */
782static int prepare_uptodate_page(struct page *page, u64 pos)
783{
784 int ret = 0;
785
786 if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
787 ret = btrfs_readpage(NULL, page);
788 if (ret)
789 return ret;
790 lock_page(page);
791 if (!PageUptodate(page)) {
792 unlock_page(page);
793 return -EIO;
794 }
795 }
796 return 0;
797}
798
799/*
765 * this gets pages into the page cache and locks them down, it also properly 800 * this gets pages into the page cache and locks them down, it also properly
766 * waits for data=ordered extents to finish before allowing the pages to be 801 * waits for data=ordered extents to finish before allowing the pages to be
767 * modified. 802 * modified.
@@ -776,6 +811,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
776 unsigned long index = pos >> PAGE_CACHE_SHIFT; 811 unsigned long index = pos >> PAGE_CACHE_SHIFT;
777 struct inode *inode = fdentry(file)->d_inode; 812 struct inode *inode = fdentry(file)->d_inode;
778 int err = 0; 813 int err = 0;
814 int faili = 0;
779 u64 start_pos; 815 u64 start_pos;
780 u64 last_pos; 816 u64 last_pos;
781 817
@@ -793,15 +829,24 @@ again:
793 for (i = 0; i < num_pages; i++) { 829 for (i = 0; i < num_pages; i++) {
794 pages[i] = grab_cache_page(inode->i_mapping, index + i); 830 pages[i] = grab_cache_page(inode->i_mapping, index + i);
795 if (!pages[i]) { 831 if (!pages[i]) {
796 int c; 832 faili = i - 1;
797 for (c = i - 1; c >= 0; c--) { 833 err = -ENOMEM;
798 unlock_page(pages[c]); 834 goto fail;
799 page_cache_release(pages[c]); 835 }
800 } 836
801 return -ENOMEM; 837 if (i == 0)
838 err = prepare_uptodate_page(pages[i], pos);
839 if (i == num_pages - 1)
840 err = prepare_uptodate_page(pages[i],
841 pos + write_bytes);
842 if (err) {
843 page_cache_release(pages[i]);
844 faili = i - 1;
845 goto fail;
802 } 846 }
803 wait_on_page_writeback(pages[i]); 847 wait_on_page_writeback(pages[i]);
804 } 848 }
849 err = 0;
805 if (start_pos < inode->i_size) { 850 if (start_pos < inode->i_size) {
806 struct btrfs_ordered_extent *ordered; 851 struct btrfs_ordered_extent *ordered;
807 lock_extent_bits(&BTRFS_I(inode)->io_tree, 852 lock_extent_bits(&BTRFS_I(inode)->io_tree,
@@ -841,6 +886,14 @@ again:
841 WARN_ON(!PageLocked(pages[i])); 886 WARN_ON(!PageLocked(pages[i]));
842 } 887 }
843 return 0; 888 return 0;
889fail:
890 while (faili >= 0) {
891 unlock_page(pages[faili]);
892 page_cache_release(pages[faili]);
893 faili--;
894 }
895 return err;
896
844} 897}
845 898
846static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 899static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
@@ -850,7 +903,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
850 struct file *file = iocb->ki_filp; 903 struct file *file = iocb->ki_filp;
851 struct inode *inode = fdentry(file)->d_inode; 904 struct inode *inode = fdentry(file)->d_inode;
852 struct btrfs_root *root = BTRFS_I(inode)->root; 905 struct btrfs_root *root = BTRFS_I(inode)->root;
853 struct page *pinned[2];
854 struct page **pages = NULL; 906 struct page **pages = NULL;
855 struct iov_iter i; 907 struct iov_iter i;
856 loff_t *ppos = &iocb->ki_pos; 908 loff_t *ppos = &iocb->ki_pos;
@@ -871,9 +923,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
871 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 923 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
872 (file->f_flags & O_DIRECT)); 924 (file->f_flags & O_DIRECT));
873 925
874 pinned[0] = NULL;
875 pinned[1] = NULL;
876
877 start_pos = pos; 926 start_pos = pos;
878 927
879 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 928 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
@@ -961,32 +1010,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
961 first_index = pos >> PAGE_CACHE_SHIFT; 1010 first_index = pos >> PAGE_CACHE_SHIFT;
962 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; 1011 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
963 1012
964 /*
965 * there are lots of better ways to do this, but this code
966 * makes sure the first and last page in the file range are
967 * up to date and ready for cow
968 */
969 if ((pos & (PAGE_CACHE_SIZE - 1))) {
970 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
971 if (!PageUptodate(pinned[0])) {
972 ret = btrfs_readpage(NULL, pinned[0]);
973 BUG_ON(ret);
974 wait_on_page_locked(pinned[0]);
975 } else {
976 unlock_page(pinned[0]);
977 }
978 }
979 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
980 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
981 if (!PageUptodate(pinned[1])) {
982 ret = btrfs_readpage(NULL, pinned[1]);
983 BUG_ON(ret);
984 wait_on_page_locked(pinned[1]);
985 } else {
986 unlock_page(pinned[1]);
987 }
988 }
989
990 while (iov_iter_count(&i) > 0) { 1013 while (iov_iter_count(&i) > 0) {
991 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1014 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
992 size_t write_bytes = min(iov_iter_count(&i), 1015 size_t write_bytes = min(iov_iter_count(&i),
@@ -1023,8 +1046,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1023 1046
1024 copied = btrfs_copy_from_user(pos, num_pages, 1047 copied = btrfs_copy_from_user(pos, num_pages,
1025 write_bytes, pages, &i); 1048 write_bytes, pages, &i);
1026 dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> 1049
1027 PAGE_CACHE_SHIFT; 1050 /*
1051 * if we have trouble faulting in the pages, fall
1052 * back to one page at a time
1053 */
1054 if (copied < write_bytes)
1055 nrptrs = 1;
1056
1057 if (copied == 0)
1058 dirty_pages = 0;
1059 else
1060 dirty_pages = (copied + offset +
1061 PAGE_CACHE_SIZE - 1) >>
1062 PAGE_CACHE_SHIFT;
1028 1063
1029 if (num_pages > dirty_pages) { 1064 if (num_pages > dirty_pages) {
1030 if (copied > 0) 1065 if (copied > 0)
@@ -1068,10 +1103,6 @@ out:
1068 err = ret; 1103 err = ret;
1069 1104
1070 kfree(pages); 1105 kfree(pages);
1071 if (pinned[0])
1072 page_cache_release(pinned[0]);
1073 if (pinned[1])
1074 page_cache_release(pinned[1]);
1075 *ppos = pos; 1106 *ppos = pos;
1076 1107
1077 /* 1108 /*
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index bcc461a9695f..9007bbd01dbf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -644,6 +644,7 @@ retry:
644 async_extent->ram_size - 1, 0); 644 async_extent->ram_size - 1, 0);
645 645
646 em = alloc_extent_map(GFP_NOFS); 646 em = alloc_extent_map(GFP_NOFS);
647 BUG_ON(!em);
647 em->start = async_extent->start; 648 em->start = async_extent->start;
648 em->len = async_extent->ram_size; 649 em->len = async_extent->ram_size;
649 em->orig_start = em->start; 650 em->orig_start = em->start;
@@ -820,6 +821,7 @@ static noinline int cow_file_range(struct inode *inode,
820 BUG_ON(ret); 821 BUG_ON(ret);
821 822
822 em = alloc_extent_map(GFP_NOFS); 823 em = alloc_extent_map(GFP_NOFS);
824 BUG_ON(!em);
823 em->start = start; 825 em->start = start;
824 em->orig_start = em->start; 826 em->orig_start = em->start;
825 ram_size = ins.offset; 827 ram_size = ins.offset;
@@ -1169,6 +1171,7 @@ out_check:
1169 struct extent_map_tree *em_tree; 1171 struct extent_map_tree *em_tree;
1170 em_tree = &BTRFS_I(inode)->extent_tree; 1172 em_tree = &BTRFS_I(inode)->extent_tree;
1171 em = alloc_extent_map(GFP_NOFS); 1173 em = alloc_extent_map(GFP_NOFS);
1174 BUG_ON(!em);
1172 em->start = cur_offset; 1175 em->start = cur_offset;
1173 em->orig_start = em->start; 1176 em->orig_start = em->start;
1174 em->len = num_bytes; 1177 em->len = num_bytes;
@@ -1910,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start)
1910 1913
1911 private = 0; 1914 private = 0;
1912 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, 1915 if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
1913 (u64)-1, 1, EXTENT_DIRTY)) { 1916 (u64)-1, 1, EXTENT_DIRTY, 0)) {
1914 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, 1917 ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
1915 start, &private_failure); 1918 start, &private_failure);
1916 if (ret == 0) { 1919 if (ret == 0) {
@@ -4818,10 +4821,11 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4818 goto fail; 4821 goto fail;
4819 4822
4820 /* 4823 /*
4821 * 1 item for inode ref 4824 * 2 items for inode and inode ref
4822 * 2 items for dir items 4825 * 2 items for dir items
4826 * 1 item for parent inode
4823 */ 4827 */
4824 trans = btrfs_start_transaction(root, 3); 4828 trans = btrfs_start_transaction(root, 5);
4825 if (IS_ERR(trans)) { 4829 if (IS_ERR(trans)) {
4826 err = PTR_ERR(trans); 4830 err = PTR_ERR(trans);
4827 goto fail; 4831 goto fail;
@@ -5277,6 +5281,128 @@ out:
5277 return em; 5281 return em;
5278} 5282}
5279 5283
5284struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
5285 size_t pg_offset, u64 start, u64 len,
5286 int create)
5287{
5288 struct extent_map *em;
5289 struct extent_map *hole_em = NULL;
5290 u64 range_start = start;
5291 u64 end;
5292 u64 found;
5293 u64 found_end;
5294 int err = 0;
5295
5296 em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
5297 if (IS_ERR(em))
5298 return em;
5299 if (em) {
5300 /*
5301 * if our em maps to a hole, there might
5302 * actually be delalloc bytes behind it
5303 */
5304 if (em->block_start != EXTENT_MAP_HOLE)
5305 return em;
5306 else
5307 hole_em = em;
5308 }
5309
5310 /* check to see if we've wrapped (len == -1 or similar) */
5311 end = start + len;
5312 if (end < start)
5313 end = (u64)-1;
5314 else
5315 end -= 1;
5316
5317 em = NULL;
5318
5319 /* ok, we didn't find anything, lets look for delalloc */
5320 found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
5321 end, len, EXTENT_DELALLOC, 1);
5322 found_end = range_start + found;
5323 if (found_end < range_start)
5324 found_end = (u64)-1;
5325
5326 /*
5327 * we didn't find anything useful, return
5328 * the original results from get_extent()
5329 */
5330 if (range_start > end || found_end <= start) {
5331 em = hole_em;
5332 hole_em = NULL;
5333 goto out;
5334 }
5335
5336 /* adjust the range_start to make sure it doesn't
5337 * go backwards from the start they passed in
5338 */
5339 range_start = max(start,range_start);
5340 found = found_end - range_start;
5341
5342 if (found > 0) {
5343 u64 hole_start = start;
5344 u64 hole_len = len;
5345
5346 em = alloc_extent_map(GFP_NOFS);
5347 if (!em) {
5348 err = -ENOMEM;
5349 goto out;
5350 }
5351 /*
5352 * when btrfs_get_extent can't find anything it
5353 * returns one huge hole
5354 *
5355 * make sure what it found really fits our range, and
5356 * adjust to make sure it is based on the start from
5357 * the caller
5358 */
5359 if (hole_em) {
5360 u64 calc_end = extent_map_end(hole_em);
5361
5362 if (calc_end <= start || (hole_em->start > end)) {
5363 free_extent_map(hole_em);
5364 hole_em = NULL;
5365 } else {
5366 hole_start = max(hole_em->start, start);
5367 hole_len = calc_end - hole_start;
5368 }
5369 }
5370 em->bdev = NULL;
5371 if (hole_em && range_start > hole_start) {
5372 /* our hole starts before our delalloc, so we
5373 * have to return just the parts of the hole
5374 * that go until the delalloc starts
5375 */
5376 em->len = min(hole_len,
5377 range_start - hole_start);
5378 em->start = hole_start;
5379 em->orig_start = hole_start;
5380 /*
5381 * don't adjust block start at all,
5382 * it is fixed at EXTENT_MAP_HOLE
5383 */
5384 em->block_start = hole_em->block_start;
5385 em->block_len = hole_len;
5386 } else {
5387 em->start = range_start;
5388 em->len = found;
5389 em->orig_start = range_start;
5390 em->block_start = EXTENT_MAP_DELALLOC;
5391 em->block_len = found;
5392 }
5393 } else if (hole_em) {
5394 return hole_em;
5395 }
5396out:
5397
5398 free_extent_map(hole_em);
5399 if (err) {
5400 free_extent_map(em);
5401 return ERR_PTR(err);
5402 }
5403 return em;
5404}
5405
5280static struct extent_map *btrfs_new_extent_direct(struct inode *inode, 5406static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
5281 u64 start, u64 len) 5407 u64 start, u64 len)
5282{ 5408{
@@ -5931,6 +6057,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
5931 if (!skip_sum) { 6057 if (!skip_sum) {
5932 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); 6058 dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS);
5933 if (!dip->csums) { 6059 if (!dip->csums) {
6060 kfree(dip);
5934 ret = -ENOMEM; 6061 ret = -ENOMEM;
5935 goto free_ordered; 6062 goto free_ordered;
5936 } 6063 }
@@ -6099,7 +6226,7 @@ out:
6099static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 6226static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
6100 __u64 start, __u64 len) 6227 __u64 start, __u64 len)
6101{ 6228{
6102 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); 6229 return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
6103} 6230}
6104 6231
6105int btrfs_readpage(struct file *file, struct page *page) 6232int btrfs_readpage(struct file *file, struct page *page)
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 02d224e8c83f..5fdb2abc4fa7 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1071 if (copy_from_user(&flags, arg, sizeof(flags))) 1071 if (copy_from_user(&flags, arg, sizeof(flags)))
1072 return -EFAULT; 1072 return -EFAULT;
1073 1073
1074 if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) 1074 if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
1075 return -EINVAL; 1075 return -EINVAL;
1076 1076
1077 if (flags & ~BTRFS_SUBVOL_RDONLY) 1077 if (flags & ~BTRFS_SUBVOL_RDONLY)
1078 return -EOPNOTSUPP; 1078 return -EOPNOTSUPP;
1079 1079
1080 if (!is_owner_or_cap(inode))
1081 return -EACCES;
1082
1080 down_write(&root->fs_info->subvol_sem); 1083 down_write(&root->fs_info->subvol_sem);
1081 1084
1082 /* nothing to do */ 1085 /* nothing to do */
@@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
1097 goto out_reset; 1100 goto out_reset;
1098 } 1101 }
1099 1102
1100 ret = btrfs_update_root(trans, root, 1103 ret = btrfs_update_root(trans, root->fs_info->tree_root,
1101 &root->root_key, &root->root_item); 1104 &root->root_key, &root->root_item);
1102 1105
1103 btrfs_commit_transaction(trans, root); 1106 btrfs_commit_transaction(trans, root);
@@ -2208,7 +2211,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
2208 int num_types = 4; 2211 int num_types = 4;
2209 int alloc_size; 2212 int alloc_size;
2210 int ret = 0; 2213 int ret = 0;
2211 int slot_count = 0; 2214 u64 slot_count = 0;
2212 int i, c; 2215 int i, c;
2213 2216
2214 if (copy_from_user(&space_args, 2217 if (copy_from_user(&space_args,
@@ -2247,7 +2250,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
2247 goto out; 2250 goto out;
2248 } 2251 }
2249 2252
2250 slot_count = min_t(int, space_args.space_slots, slot_count); 2253 slot_count = min_t(u64, space_args.space_slots, slot_count);
2251 2254
2252 alloc_size = sizeof(*dest) * slot_count; 2255 alloc_size = sizeof(*dest) * slot_count;
2253 2256
@@ -2267,6 +2270,9 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
2267 for (i = 0; i < num_types; i++) { 2270 for (i = 0; i < num_types; i++) {
2268 struct btrfs_space_info *tmp; 2271 struct btrfs_space_info *tmp;
2269 2272
2273 if (!slot_count)
2274 break;
2275
2270 info = NULL; 2276 info = NULL;
2271 rcu_read_lock(); 2277 rcu_read_lock();
2272 list_for_each_entry_rcu(tmp, &root->fs_info->space_info, 2278 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
@@ -2288,7 +2294,10 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
2288 memcpy(dest, &space, sizeof(space)); 2294 memcpy(dest, &space, sizeof(space));
2289 dest++; 2295 dest++;
2290 space_args.total_spaces++; 2296 space_args.total_spaces++;
2297 slot_count--;
2291 } 2298 }
2299 if (!slot_count)
2300 break;
2292 } 2301 }
2293 up_read(&info->groups_sem); 2302 up_read(&info->groups_sem);
2294 } 2303 }
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index cc9b450399df..a178f5ebea78 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
280 unsigned long tot_out; 280 unsigned long tot_out;
281 unsigned long tot_len; 281 unsigned long tot_len;
282 char *buf; 282 char *buf;
283 bool may_late_unmap, need_unmap;
283 284
284 data_in = kmap(pages_in[0]); 285 data_in = kmap(pages_in[0]);
285 tot_len = read_compress_length(data_in); 286 tot_len = read_compress_length(data_in);
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
300 301
301 tot_in += in_len; 302 tot_in += in_len;
302 working_bytes = in_len; 303 working_bytes = in_len;
304 may_late_unmap = need_unmap = false;
303 305
304 /* fast path: avoid using the working buffer */ 306 /* fast path: avoid using the working buffer */
305 if (in_page_bytes_left >= in_len) { 307 if (in_page_bytes_left >= in_len) {
306 buf = data_in + in_offset; 308 buf = data_in + in_offset;
307 bytes = in_len; 309 bytes = in_len;
310 may_late_unmap = true;
308 goto cont; 311 goto cont;
309 } 312 }
310 313
@@ -329,14 +332,17 @@ cont:
329 if (working_bytes == 0 && tot_in >= tot_len) 332 if (working_bytes == 0 && tot_in >= tot_len)
330 break; 333 break;
331 334
332 kunmap(pages_in[page_in_index]); 335 if (page_in_index + 1 >= total_pages_in) {
333 page_in_index++;
334 if (page_in_index >= total_pages_in) {
335 ret = -1; 336 ret = -1;
336 data_in = NULL;
337 goto done; 337 goto done;
338 } 338 }
339 data_in = kmap(pages_in[page_in_index]); 339
340 if (may_late_unmap)
341 need_unmap = true;
342 else
343 kunmap(pages_in[page_in_index]);
344
345 data_in = kmap(pages_in[++page_in_index]);
340 346
341 in_page_bytes_left = PAGE_CACHE_SIZE; 347 in_page_bytes_left = PAGE_CACHE_SIZE;
342 in_offset = 0; 348 in_offset = 0;
@@ -346,6 +352,8 @@ cont:
346 out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); 352 out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
347 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, 353 ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
348 &out_len); 354 &out_len);
355 if (need_unmap)
356 kunmap(pages_in[page_in_index - 1]);
349 if (ret != LZO_E_OK) { 357 if (ret != LZO_E_OK) {
350 printk(KERN_WARNING "btrfs decompress failed\n"); 358 printk(KERN_WARNING "btrfs decompress failed\n");
351 ret = -1; 359 ret = -1;
@@ -363,8 +371,7 @@ cont:
363 break; 371 break;
364 } 372 }
365done: 373done:
366 if (data_in) 374 kunmap(pages_in[page_in_index]);
367 kunmap(pages_in[page_in_index]);
368 return ret; 375 return ret;
369} 376}
370 377
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 1f5556acb530..31ade5802ae8 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1157,6 +1157,7 @@ static int clone_backref_node(struct btrfs_trans_handle *trans,
1157 new_node->bytenr = dest->node->start; 1157 new_node->bytenr = dest->node->start;
1158 new_node->level = node->level; 1158 new_node->level = node->level;
1159 new_node->lowest = node->lowest; 1159 new_node->lowest = node->lowest;
1160 new_node->checked = 1;
1160 new_node->root = dest; 1161 new_node->root = dest;
1161 1162
1162 if (!node->lowest) { 1163 if (!node->lowest) {
@@ -3653,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3653 u32 item_size; 3654 u32 item_size;
3654 int ret; 3655 int ret;
3655 int err = 0; 3656 int err = 0;
3657 int progress = 0;
3656 3658
3657 path = btrfs_alloc_path(); 3659 path = btrfs_alloc_path();
3658 if (!path) 3660 if (!path)
@@ -3665,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3665 } 3667 }
3666 3668
3667 while (1) { 3669 while (1) {
3670 progress++;
3668 trans = btrfs_start_transaction(rc->extent_root, 0); 3671 trans = btrfs_start_transaction(rc->extent_root, 0);
3669 BUG_ON(IS_ERR(trans)); 3672 BUG_ON(IS_ERR(trans));
3670 3673restart:
3671 if (update_backref_cache(trans, &rc->backref_cache)) { 3674 if (update_backref_cache(trans, &rc->backref_cache)) {
3672 btrfs_end_transaction(trans, rc->extent_root); 3675 btrfs_end_transaction(trans, rc->extent_root);
3673 continue; 3676 continue;
@@ -3780,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
3780 } 3783 }
3781 } 3784 }
3782 } 3785 }
3786 if (trans && progress && err == -ENOSPC) {
3787 ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
3788 rc->block_group->flags);
3789 if (ret == 0) {
3790 err = 0;
3791 progress = 0;
3792 goto restart;
3793 }
3794 }
3783 3795
3784 btrfs_release_path(rc->extent_root, path); 3796 btrfs_release_path(rc->extent_root, path);
3785 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, 3797 clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a004008f7d28..d39a9895d932 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -155,7 +155,8 @@ enum {
155 Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, 155 Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
156 Opt_compress_type, Opt_compress_force, Opt_compress_force_type, 156 Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
157 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, 157 Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
158 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, 158 Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
159 Opt_enospc_debug, Opt_err,
159}; 160};
160 161
161static match_table_t tokens = { 162static match_table_t tokens = {
@@ -184,6 +185,7 @@ static match_table_t tokens = {
184 {Opt_space_cache, "space_cache"}, 185 {Opt_space_cache, "space_cache"},
185 {Opt_clear_cache, "clear_cache"}, 186 {Opt_clear_cache, "clear_cache"},
186 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, 187 {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
188 {Opt_enospc_debug, "enospc_debug"},
187 {Opt_err, NULL}, 189 {Opt_err, NULL},
188}; 190};
189 191
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
358 case Opt_user_subvol_rm_allowed: 360 case Opt_user_subvol_rm_allowed:
359 btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); 361 btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
360 break; 362 break;
363 case Opt_enospc_debug:
364 btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
365 break;
361 case Opt_err: 366 case Opt_err:
362 printk(KERN_INFO "btrfs: unrecognized mount option " 367 printk(KERN_INFO "btrfs: unrecognized mount option "
363 "'%s'\n", p); 368 "'%s'\n", p);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 2636a051e4b2..dd13eb81ee40 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1338 1338
1339 ret = btrfs_shrink_device(device, 0); 1339 ret = btrfs_shrink_device(device, 0);
1340 if (ret) 1340 if (ret)
1341 goto error_brelse; 1341 goto error_undo;
1342 1342
1343 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); 1343 ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
1344 if (ret) 1344 if (ret)
1345 goto error_brelse; 1345 goto error_undo;
1346 1346
1347 device->in_fs_metadata = 0; 1347 device->in_fs_metadata = 0;
1348 1348
@@ -1416,6 +1416,13 @@ out:
1416 mutex_unlock(&root->fs_info->volume_mutex); 1416 mutex_unlock(&root->fs_info->volume_mutex);
1417 mutex_unlock(&uuid_mutex); 1417 mutex_unlock(&uuid_mutex);
1418 return ret; 1418 return ret;
1419error_undo:
1420 if (device->writeable) {
1421 list_add(&device->dev_alloc_list,
1422 &root->fs_info->fs_devices->alloc_list);
1423 root->fs_info->fs_devices->rw_devices++;
1424 }
1425 goto error_brelse;
1419} 1426}
1420 1427
1421/* 1428/*
@@ -1605,12 +1612,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1605 1612
1606 ret = find_next_devid(root, &device->devid); 1613 ret = find_next_devid(root, &device->devid);
1607 if (ret) { 1614 if (ret) {
1615 kfree(device->name);
1608 kfree(device); 1616 kfree(device);
1609 goto error; 1617 goto error;
1610 } 1618 }
1611 1619
1612 trans = btrfs_start_transaction(root, 0); 1620 trans = btrfs_start_transaction(root, 0);
1613 if (IS_ERR(trans)) { 1621 if (IS_ERR(trans)) {
1622 kfree(device->name);
1614 kfree(device); 1623 kfree(device);
1615 ret = PTR_ERR(trans); 1624 ret = PTR_ERR(trans);
1616 goto error; 1625 goto error;
@@ -1631,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1631 device->dev_root = root->fs_info->dev_root; 1640 device->dev_root = root->fs_info->dev_root;
1632 device->bdev = bdev; 1641 device->bdev = bdev;
1633 device->in_fs_metadata = 1; 1642 device->in_fs_metadata = 1;
1634 device->mode = 0; 1643 device->mode = FMODE_EXCL;
1635 set_blocksize(device->bdev, 4096); 1644 set_blocksize(device->bdev, 4096);
1636 1645
1637 if (seeding_dev) { 1646 if (seeding_dev) {
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 0bc68de8edd7..ebafa65a29b6 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -409,7 +409,7 @@ more:
409 spin_lock(&inode->i_lock); 409 spin_lock(&inode->i_lock);
410 if (ci->i_release_count == fi->dir_release_count) { 410 if (ci->i_release_count == fi->dir_release_count) {
411 dout(" marking %p complete\n", inode); 411 dout(" marking %p complete\n", inode);
412 ci->i_ceph_flags |= CEPH_I_COMPLETE; 412 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
413 ci->i_max_offset = filp->f_pos; 413 ci->i_max_offset = filp->f_pos;
414 } 414 }
415 spin_unlock(&inode->i_lock); 415 spin_unlock(&inode->i_lock);
@@ -496,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
496 496
497 /* .snap dir? */ 497 /* .snap dir? */
498 if (err == -ENOENT && 498 if (err == -ENOENT &&
499 ceph_snap(parent) == CEPH_NOSNAP &&
499 strcmp(dentry->d_name.name, 500 strcmp(dentry->d_name.name,
500 fsc->mount_options->snapdir_name) == 0) { 501 fsc->mount_options->snapdir_name) == 0) {
501 struct inode *inode = ceph_get_snapdir(parent); 502 struct inode *inode = ceph_get_snapdir(parent);
@@ -992,7 +993,7 @@ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd)
992{ 993{
993 struct inode *dir; 994 struct inode *dir;
994 995
995 if (nd->flags & LOOKUP_RCU) 996 if (nd && nd->flags & LOOKUP_RCU)
996 return -ECHILD; 997 return -ECHILD;
997 998
998 dir = dentry->d_parent->d_inode; 999 dir = dentry->d_parent->d_inode;
@@ -1029,28 +1030,8 @@ out_touch:
1029static void ceph_dentry_release(struct dentry *dentry) 1030static void ceph_dentry_release(struct dentry *dentry)
1030{ 1031{
1031 struct ceph_dentry_info *di = ceph_dentry(dentry); 1032 struct ceph_dentry_info *di = ceph_dentry(dentry);
1032 struct inode *parent_inode = NULL;
1033 u64 snapid = CEPH_NOSNAP;
1034 1033
1035 if (!IS_ROOT(dentry)) { 1034 dout("dentry_release %p\n", dentry);
1036 parent_inode = dentry->d_parent->d_inode;
1037 if (parent_inode)
1038 snapid = ceph_snap(parent_inode);
1039 }
1040 dout("dentry_release %p parent %p\n", dentry, parent_inode);
1041 if (parent_inode && snapid != CEPH_SNAPDIR) {
1042 struct ceph_inode_info *ci = ceph_inode(parent_inode);
1043
1044 spin_lock(&parent_inode->i_lock);
1045 if (ci->i_shared_gen == di->lease_shared_gen ||
1046 snapid <= CEPH_MAXSNAP) {
1047 dout(" clearing %p complete (d_release)\n",
1048 parent_inode);
1049 ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
1050 ci->i_release_count++;
1051 }
1052 spin_unlock(&parent_inode->i_lock);
1053 }
1054 if (di) { 1035 if (di) {
1055 ceph_dentry_lru_del(dentry); 1036 ceph_dentry_lru_del(dentry);
1056 if (di->lease_session) 1037 if (di->lease_session)
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 5625463aa479..193bfa5e9cbd 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -707,7 +707,7 @@ static int fill_inode(struct inode *inode,
707 (issued & CEPH_CAP_FILE_EXCL) == 0 && 707 (issued & CEPH_CAP_FILE_EXCL) == 0 &&
708 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { 708 (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) {
709 dout(" marking %p complete (empty)\n", inode); 709 dout(" marking %p complete (empty)\n", inode);
710 ci->i_ceph_flags |= CEPH_I_COMPLETE; 710 /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */
711 ci->i_max_offset = 2; 711 ci->i_max_offset = 2;
712 } 712 }
713 break; 713 break;
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index 39c243acd062..f40b9139e437 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
584 if (lastinode) 584 if (lastinode)
585 iput(lastinode); 585 iput(lastinode);
586 586
587 dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino); 587 list_for_each_entry(child, &realm->children, child_item) {
588 list_for_each_entry(child, &realm->children, child_item) 588 dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n",
589 queue_realm_cap_snaps(child); 589 realm, realm->ino, child, child->ino);
590 list_del_init(&child->dirty_item);
591 list_add(&child->dirty_item, &realm->dirty_item);
592 }
590 593
594 list_del_init(&realm->dirty_item);
591 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); 595 dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino);
592} 596}
593 597
@@ -683,7 +687,9 @@ more:
683 * queue cap snaps _after_ we've built the new snap contexts, 687 * queue cap snaps _after_ we've built the new snap contexts,
684 * so that i_head_snapc can be set appropriately. 688 * so that i_head_snapc can be set appropriately.
685 */ 689 */
686 list_for_each_entry(realm, &dirty_realms, dirty_item) { 690 while (!list_empty(&dirty_realms)) {
691 realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
692 dirty_item);
687 queue_realm_cap_snaps(realm); 693 queue_realm_cap_snaps(realm);
688 } 694 }
689 695
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index 4a3330235d55..a9371b6578c0 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
127extern const struct export_operations cifs_export_ops; 127extern const struct export_operations cifs_export_ops;
128#endif /* EXPERIMENTAL */ 128#endif /* EXPERIMENTAL */
129 129
130#define CIFS_VERSION "1.70" 130#define CIFS_VERSION "1.71"
131#endif /* _CIFSFS_H */ 131#endif /* _CIFSFS_H */
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 8d9189f64477..79f641eeda30 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
170{ 170{
171 int rc, alen, slen; 171 int rc, alen, slen;
172 const char *pct; 172 const char *pct;
173 char *endp, scope_id[13]; 173 char scope_id[13];
174 struct sockaddr_in *s4 = (struct sockaddr_in *) dst; 174 struct sockaddr_in *s4 = (struct sockaddr_in *) dst;
175 struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; 175 struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst;
176 176
@@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len)
197 memcpy(scope_id, pct + 1, slen); 197 memcpy(scope_id, pct + 1, slen);
198 scope_id[slen] = '\0'; 198 scope_id[slen] = '\0';
199 199
200 s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); 200 rc = strict_strtoul(scope_id, 0,
201 if (endp != scope_id + slen) 201 (unsigned long *)&s6->sin6_scope_id);
202 return 0; 202 rc = (rc == 0) ? 1 : 0;
203 } 203 }
204 204
205 return rc; 205 return rc;
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 1adc9625a344..16765703131b 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate:
656 656
657 if (type == LANMAN) { 657 if (type == LANMAN) {
658#ifdef CONFIG_CIFS_WEAK_PW_HASH 658#ifdef CONFIG_CIFS_WEAK_PW_HASH
659 char lnm_session_key[CIFS_SESS_KEY_SIZE]; 659 char lnm_session_key[CIFS_AUTH_RESP_SIZE];
660 660
661 pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; 661 pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE;
662 662
663 /* no capabilities flags in old lanman negotiation */ 663 /* no capabilities flags in old lanman negotiation */
664 664
665 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); 665 pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE);
666 666
667 /* Calculate hash with password and copy into bcc_ptr. 667 /* Calculate hash with password and copy into bcc_ptr.
668 * Encryption Key (stored as in cryptkey) gets used if the 668 * Encryption Key (stored as in cryptkey) gets used if the
@@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate:
675 true : false, lnm_session_key); 675 true : false, lnm_session_key);
676 676
677 ses->flags |= CIFS_SES_LANMAN; 677 ses->flags |= CIFS_SES_LANMAN;
678 memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); 678 memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE);
679 bcc_ptr += CIFS_SESS_KEY_SIZE; 679 bcc_ptr += CIFS_AUTH_RESP_SIZE;
680 680
681 /* can not sign if LANMAN negotiated so no need 681 /* can not sign if LANMAN negotiated so no need
682 to calculate signing key? but what if server 682 to calculate signing key? but what if server
diff --git a/fs/compat.c b/fs/compat.c
index f6fd0a00e6cc..691c3fd8ce1d 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1228,7 +1228,9 @@ compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec,
1228 file = fget_light(fd, &fput_needed); 1228 file = fget_light(fd, &fput_needed);
1229 if (!file) 1229 if (!file)
1230 return -EBADF; 1230 return -EBADF;
1231 ret = compat_readv(file, vec, vlen, &pos); 1231 ret = -ESPIPE;
1232 if (file->f_mode & FMODE_PREAD)
1233 ret = compat_readv(file, vec, vlen, &pos);
1232 fput_light(file, fput_needed); 1234 fput_light(file, fput_needed);
1233 return ret; 1235 return ret;
1234} 1236}
@@ -1285,7 +1287,9 @@ compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec,
1285 file = fget_light(fd, &fput_needed); 1287 file = fget_light(fd, &fput_needed);
1286 if (!file) 1288 if (!file)
1287 return -EBADF; 1289 return -EBADF;
1288 ret = compat_writev(file, vec, vlen, &pos); 1290 ret = -ESPIPE;
1291 if (file->f_mode & FMODE_PWRITE)
1292 ret = compat_writev(file, vec, vlen, &pos);
1289 fput_light(file, fput_needed); 1293 fput_light(file, fput_needed);
1290 return ret; 1294 return ret;
1291} 1295}
diff --git a/fs/dcache.c b/fs/dcache.c
index 2a6bd9a4ae97..611ffe928c03 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1523,6 +1523,28 @@ struct dentry * d_alloc_root(struct inode * root_inode)
1523} 1523}
1524EXPORT_SYMBOL(d_alloc_root); 1524EXPORT_SYMBOL(d_alloc_root);
1525 1525
1526static struct dentry * __d_find_any_alias(struct inode *inode)
1527{
1528 struct dentry *alias;
1529
1530 if (list_empty(&inode->i_dentry))
1531 return NULL;
1532 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
1533 __dget(alias);
1534 return alias;
1535}
1536
1537static struct dentry * d_find_any_alias(struct inode *inode)
1538{
1539 struct dentry *de;
1540
1541 spin_lock(&inode->i_lock);
1542 de = __d_find_any_alias(inode);
1543 spin_unlock(&inode->i_lock);
1544 return de;
1545}
1546
1547
1526/** 1548/**
1527 * d_obtain_alias - find or allocate a dentry for a given inode 1549 * d_obtain_alias - find or allocate a dentry for a given inode
1528 * @inode: inode to allocate the dentry for 1550 * @inode: inode to allocate the dentry for
@@ -1552,7 +1574,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
1552 if (IS_ERR(inode)) 1574 if (IS_ERR(inode))
1553 return ERR_CAST(inode); 1575 return ERR_CAST(inode);
1554 1576
1555 res = d_find_alias(inode); 1577 res = d_find_any_alias(inode);
1556 if (res) 1578 if (res)
1557 goto out_iput; 1579 goto out_iput;
1558 1580
@@ -1565,7 +1587,7 @@ struct dentry *d_obtain_alias(struct inode *inode)
1565 1587
1566 1588
1567 spin_lock(&inode->i_lock); 1589 spin_lock(&inode->i_lock);
1568 res = __d_find_alias(inode, 0); 1590 res = __d_find_any_alias(inode);
1569 if (res) { 1591 if (res) {
1570 spin_unlock(&inode->i_lock); 1592 spin_unlock(&inode->i_lock);
1571 dput(tmp); 1593 dput(tmp);
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c
index 6fc4f319b550..534c1d46e69e 100644
--- a/fs/ecryptfs/dentry.c
+++ b/fs/ecryptfs/dentry.c
@@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd)
46{ 46{
47 struct dentry *lower_dentry; 47 struct dentry *lower_dentry;
48 struct vfsmount *lower_mnt; 48 struct vfsmount *lower_mnt;
49 struct dentry *dentry_save; 49 struct dentry *dentry_save = NULL;
50 struct vfsmount *vfsmount_save; 50 struct vfsmount *vfsmount_save = NULL;
51 int rc = 1; 51 int rc = 1;
52 52
53 if (nd->flags & LOOKUP_RCU) 53 if (nd && nd->flags & LOOKUP_RCU)
54 return -ECHILD; 54 return -ECHILD;
55 55
56 lower_dentry = ecryptfs_dentry_to_lower(dentry); 56 lower_dentry = ecryptfs_dentry_to_lower(dentry);
57 lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); 57 lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
58 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) 58 if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate)
59 goto out; 59 goto out;
60 dentry_save = nd->path.dentry; 60 if (nd) {
61 vfsmount_save = nd->path.mnt; 61 dentry_save = nd->path.dentry;
62 nd->path.dentry = lower_dentry; 62 vfsmount_save = nd->path.mnt;
63 nd->path.mnt = lower_mnt; 63 nd->path.dentry = lower_dentry;
64 nd->path.mnt = lower_mnt;
65 }
64 rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); 66 rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd);
65 nd->path.dentry = dentry_save; 67 if (nd) {
66 nd->path.mnt = vfsmount_save; 68 nd->path.dentry = dentry_save;
69 nd->path.mnt = vfsmount_save;
70 }
67 if (dentry->d_inode) { 71 if (dentry->d_inode) {
68 struct inode *lower_inode = 72 struct inode *lower_inode =
69 ecryptfs_inode_to_lower(dentry->d_inode); 73 ecryptfs_inode_to_lower(dentry->d_inode);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index dbc84ed96336..e00753496e3e 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry,
632 u32 flags); 632 u32 flags);
633int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, 633int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
634 struct dentry *lower_dentry, 634 struct dentry *lower_dentry,
635 struct inode *ecryptfs_dir_inode, 635 struct inode *ecryptfs_dir_inode);
636 struct nameidata *ecryptfs_nd);
637int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, 636int ecryptfs_decode_and_decrypt_filename(char **decrypted_name,
638 size_t *decrypted_name_size, 637 size_t *decrypted_name_size,
639 struct dentry *ecryptfs_dentry, 638 struct dentry *ecryptfs_dentry,
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c
index 81e10e6a9443..7d1050e254f9 100644
--- a/fs/ecryptfs/file.c
+++ b/fs/ecryptfs/file.c
@@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
317 317
318const struct file_operations ecryptfs_dir_fops = { 318const struct file_operations ecryptfs_dir_fops = {
319 .readdir = ecryptfs_readdir, 319 .readdir = ecryptfs_readdir,
320 .read = generic_read_dir,
320 .unlocked_ioctl = ecryptfs_unlocked_ioctl, 321 .unlocked_ioctl = ecryptfs_unlocked_ioctl,
321#ifdef CONFIG_COMPAT 322#ifdef CONFIG_COMPAT
322 .compat_ioctl = ecryptfs_compat_ioctl, 323 .compat_ioctl = ecryptfs_compat_ioctl,
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index bd33f87a1907..b592938a84bc 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode,
74 unsigned int flags_save; 74 unsigned int flags_save;
75 int rc; 75 int rc;
76 76
77 dentry_save = nd->path.dentry; 77 if (nd) {
78 vfsmount_save = nd->path.mnt; 78 dentry_save = nd->path.dentry;
79 flags_save = nd->flags; 79 vfsmount_save = nd->path.mnt;
80 nd->path.dentry = lower_dentry; 80 flags_save = nd->flags;
81 nd->path.mnt = lower_mnt; 81 nd->path.dentry = lower_dentry;
82 nd->flags &= ~LOOKUP_OPEN; 82 nd->path.mnt = lower_mnt;
83 nd->flags &= ~LOOKUP_OPEN;
84 }
83 rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); 85 rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd);
84 nd->path.dentry = dentry_save; 86 if (nd) {
85 nd->path.mnt = vfsmount_save; 87 nd->path.dentry = dentry_save;
86 nd->flags = flags_save; 88 nd->path.mnt = vfsmount_save;
89 nd->flags = flags_save;
90 }
87 return rc; 91 return rc;
88} 92}
89 93
@@ -241,8 +245,7 @@ out:
241 */ 245 */
242int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, 246int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
243 struct dentry *lower_dentry, 247 struct dentry *lower_dentry,
244 struct inode *ecryptfs_dir_inode, 248 struct inode *ecryptfs_dir_inode)
245 struct nameidata *ecryptfs_nd)
246{ 249{
247 struct dentry *lower_dir_dentry; 250 struct dentry *lower_dir_dentry;
248 struct vfsmount *lower_mnt; 251 struct vfsmount *lower_mnt;
@@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry,
290 goto out; 293 goto out;
291 if (special_file(lower_inode->i_mode)) 294 if (special_file(lower_inode->i_mode))
292 goto out; 295 goto out;
293 if (!ecryptfs_nd)
294 goto out;
295 /* Released in this function */ 296 /* Released in this function */
296 page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); 297 page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER);
297 if (!page_virt) { 298 if (!page_virt) {
@@ -349,75 +350,6 @@ out:
349} 350}
350 351
351/** 352/**
352 * ecryptfs_new_lower_dentry
353 * @name: The name of the new dentry.
354 * @lower_dir_dentry: Parent directory of the new dentry.
355 * @nd: nameidata from last lookup.
356 *
357 * Create a new dentry or get it from lower parent dir.
358 */
359static struct dentry *
360ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry,
361 struct nameidata *nd)
362{
363 struct dentry *new_dentry;
364 struct dentry *tmp;
365 struct inode *lower_dir_inode;
366
367 lower_dir_inode = lower_dir_dentry->d_inode;
368
369 tmp = d_alloc(lower_dir_dentry, name);
370 if (!tmp)
371 return ERR_PTR(-ENOMEM);
372
373 mutex_lock(&lower_dir_inode->i_mutex);
374 new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd);
375 mutex_unlock(&lower_dir_inode->i_mutex);
376
377 if (!new_dentry)
378 new_dentry = tmp;
379 else
380 dput(tmp);
381
382 return new_dentry;
383}
384
385
386/**
387 * ecryptfs_lookup_one_lower
388 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
389 * @lower_dir_dentry: lower parent directory
390 * @name: lower file name
391 *
392 * Get the lower dentry from vfs. If lower dentry does not exist yet,
393 * create it.
394 */
395static struct dentry *
396ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry,
397 struct dentry *lower_dir_dentry, struct qstr *name)
398{
399 struct nameidata nd;
400 struct vfsmount *lower_mnt;
401 int err;
402
403 lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt(
404 ecryptfs_dentry->d_parent));
405 err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd);
406 mntput(lower_mnt);
407
408 if (!err) {
409 /* we dont need the mount */
410 mntput(nd.path.mnt);
411 return nd.path.dentry;
412 }
413 if (err != -ENOENT)
414 return ERR_PTR(err);
415
416 /* create a new lower dentry */
417 return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd);
418}
419
420/**
421 * ecryptfs_lookup 353 * ecryptfs_lookup
422 * @ecryptfs_dir_inode: The eCryptfs directory inode 354 * @ecryptfs_dir_inode: The eCryptfs directory inode
423 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up 355 * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
@@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
434 size_t encrypted_and_encoded_name_size; 366 size_t encrypted_and_encoded_name_size;
435 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; 367 struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL;
436 struct dentry *lower_dir_dentry, *lower_dentry; 368 struct dentry *lower_dir_dentry, *lower_dentry;
437 struct qstr lower_name;
438 int rc = 0; 369 int rc = 0;
439 370
440 if ((ecryptfs_dentry->d_name.len == 1 371 if ((ecryptfs_dentry->d_name.len == 1
@@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
444 goto out_d_drop; 375 goto out_d_drop;
445 } 376 }
446 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); 377 lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent);
447 lower_name.name = ecryptfs_dentry->d_name.name; 378 mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
448 lower_name.len = ecryptfs_dentry->d_name.len; 379 lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name,
449 lower_name.hash = ecryptfs_dentry->d_name.hash; 380 lower_dir_dentry,
450 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { 381 ecryptfs_dentry->d_name.len);
451 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, 382 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
452 lower_dir_dentry->d_inode, &lower_name);
453 if (rc < 0)
454 goto out_d_drop;
455 }
456 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
457 lower_dir_dentry, &lower_name);
458 if (IS_ERR(lower_dentry)) { 383 if (IS_ERR(lower_dentry)) {
459 rc = PTR_ERR(lower_dentry); 384 rc = PTR_ERR(lower_dentry);
460 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 385 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
461 "[%d] on lower_dentry = [%s]\n", __func__, rc, 386 "[%d] on lower_dentry = [%s]\n", __func__, rc,
462 encrypted_and_encoded_name); 387 encrypted_and_encoded_name);
463 goto out_d_drop; 388 goto out_d_drop;
@@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode,
479 "filename; rc = [%d]\n", __func__, rc); 404 "filename; rc = [%d]\n", __func__, rc);
480 goto out_d_drop; 405 goto out_d_drop;
481 } 406 }
482 lower_name.name = encrypted_and_encoded_name; 407 mutex_lock(&lower_dir_dentry->d_inode->i_mutex);
483 lower_name.len = encrypted_and_encoded_name_size; 408 lower_dentry = lookup_one_len(encrypted_and_encoded_name,
484 lower_name.hash = full_name_hash(lower_name.name, lower_name.len); 409 lower_dir_dentry,
485 if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { 410 encrypted_and_encoded_name_size);
486 rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, 411 mutex_unlock(&lower_dir_dentry->d_inode->i_mutex);
487 lower_dir_dentry->d_inode, &lower_name);
488 if (rc < 0)
489 goto out_d_drop;
490 }
491 lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry,
492 lower_dir_dentry, &lower_name);
493 if (IS_ERR(lower_dentry)) { 412 if (IS_ERR(lower_dentry)) {
494 rc = PTR_ERR(lower_dentry); 413 rc = PTR_ERR(lower_dentry);
495 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " 414 ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned "
496 "[%d] on lower_dentry = [%s]\n", __func__, rc, 415 "[%d] on lower_dentry = [%s]\n", __func__, rc,
497 encrypted_and_encoded_name); 416 encrypted_and_encoded_name);
498 goto out_d_drop; 417 goto out_d_drop;
499 } 418 }
500lookup_and_interpose: 419lookup_and_interpose:
501 rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, 420 rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry,
502 ecryptfs_dir_inode, 421 ecryptfs_dir_inode);
503 ecryptfs_nd);
504 goto out; 422 goto out;
505out_d_drop: 423out_d_drop:
506 d_drop(ecryptfs_dentry); 424 d_drop(ecryptfs_dentry);
@@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1092 rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), 1010 rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry),
1093 ecryptfs_dentry_to_lower(dentry), &lower_stat); 1011 ecryptfs_dentry_to_lower(dentry), &lower_stat);
1094 if (!rc) { 1012 if (!rc) {
1013 fsstack_copy_attr_all(dentry->d_inode,
1014 ecryptfs_inode_to_lower(dentry->d_inode));
1095 generic_fillattr(dentry->d_inode, stat); 1015 generic_fillattr(dentry->d_inode, stat);
1096 stat->blocks = lower_stat.blocks; 1016 stat->blocks = lower_stat.blocks;
1097 } 1017 }
diff --git a/fs/eventfd.c b/fs/eventfd.c
index e0194b3e14d6..d9a591773919 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get);
99 * @ctx: [in] Pointer to eventfd context. 99 * @ctx: [in] Pointer to eventfd context.
100 * 100 *
101 * The eventfd context reference must have been previously acquired either 101 * The eventfd context reference must have been previously acquired either
102 * with eventfd_ctx_get() or eventfd_ctx_fdget()). 102 * with eventfd_ctx_get() or eventfd_ctx_fdget().
103 */ 103 */
104void eventfd_ctx_put(struct eventfd_ctx *ctx) 104void eventfd_ctx_put(struct eventfd_ctx *ctx)
105{ 105{
@@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt)
146 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. 146 * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue.
147 * @ctx: [in] Pointer to eventfd context. 147 * @ctx: [in] Pointer to eventfd context.
148 * @wait: [in] Wait queue to be removed. 148 * @wait: [in] Wait queue to be removed.
149 * @cnt: [out] Pointer to the 64bit conter value. 149 * @cnt: [out] Pointer to the 64-bit counter value.
150 * 150 *
151 * Returns zero if successful, or the following error codes: 151 * Returns %0 if successful, or the following error codes:
152 * 152 *
153 * -EAGAIN : The operation would have blocked. 153 * -EAGAIN : The operation would have blocked.
154 * 154 *
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue);
175 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. 175 * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero.
176 * @ctx: [in] Pointer to eventfd context. 176 * @ctx: [in] Pointer to eventfd context.
177 * @no_wait: [in] Different from zero if the operation should not block. 177 * @no_wait: [in] Different from zero if the operation should not block.
178 * @cnt: [out] Pointer to the 64bit conter value. 178 * @cnt: [out] Pointer to the 64-bit counter value.
179 * 179 *
180 * Returns zero if successful, or the following error codes: 180 * Returns %0 if successful, or the following error codes:
181 * 181 *
182 * -EAGAIN : The operation would have blocked but @no_wait was nonzero. 182 * -EAGAIN : The operation would have blocked but @no_wait was non-zero.
183 * -ERESTARTSYS : A signal interrupted the wait operation. 183 * -ERESTARTSYS : A signal interrupted the wait operation.
184 * 184 *
185 * If @no_wait is zero, the function might sleep until the eventfd internal 185 * If @no_wait is zero, the function might sleep until the eventfd internal
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 267d0ada4541..4a09af9e9a63 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -63,6 +63,13 @@
63 * cleanup path and it is also acquired by eventpoll_release_file() 63 * cleanup path and it is also acquired by eventpoll_release_file()
64 * if a file has been pushed inside an epoll set and it is then 64 * if a file has been pushed inside an epoll set and it is then
65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). 65 * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
66 * It is also acquired when inserting an epoll fd onto another epoll
67 * fd. We do this so that we walk the epoll tree and ensure that this
68 * insertion does not create a cycle of epoll file descriptors, which
69 * could lead to deadlock. We need a global mutex to prevent two
70 * simultaneous inserts (A into B and B into A) from racing and
71 * constructing a cycle without either insert observing that it is
72 * going to.
66 * It is possible to drop the "ep->mtx" and to use the global 73 * It is possible to drop the "ep->mtx" and to use the global
67 * mutex "epmutex" (together with "ep->lock") to have it working, 74 * mutex "epmutex" (together with "ep->lock") to have it working,
68 * but having "ep->mtx" will make the interface more scalable. 75 * but having "ep->mtx" will make the interface more scalable.
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly;
224 */ 231 */
225static DEFINE_MUTEX(epmutex); 232static DEFINE_MUTEX(epmutex);
226 233
234/* Used to check for epoll file descriptor inclusion loops */
235static struct nested_calls poll_loop_ncalls;
236
227/* Used for safe wake up implementation */ 237/* Used for safe wake up implementation */
228static struct nested_calls poll_safewake_ncalls; 238static struct nested_calls poll_safewake_ncalls;
229 239
@@ -1198,6 +1208,62 @@ retry:
1198 return res; 1208 return res;
1199} 1209}
1200 1210
1211/**
1212 * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
1213 * API, to verify that adding an epoll file inside another
1214 * epoll structure, does not violate the constraints, in
1215 * terms of closed loops, or too deep chains (which can
1216 * result in excessive stack usage).
1217 *
1218 * @priv: Pointer to the epoll file to be currently checked.
1219 * @cookie: Original cookie for this call. This is the top-of-the-chain epoll
1220 * data structure pointer.
1221 * @call_nests: Current dept of the @ep_call_nested() call stack.
1222 *
1223 * Returns: Returns zero if adding the epoll @file inside current epoll
1224 * structure @ep does not violate the constraints, or -1 otherwise.
1225 */
1226static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
1227{
1228 int error = 0;
1229 struct file *file = priv;
1230 struct eventpoll *ep = file->private_data;
1231 struct rb_node *rbp;
1232 struct epitem *epi;
1233
1234 mutex_lock(&ep->mtx);
1235 for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
1236 epi = rb_entry(rbp, struct epitem, rbn);
1237 if (unlikely(is_file_epoll(epi->ffd.file))) {
1238 error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1239 ep_loop_check_proc, epi->ffd.file,
1240 epi->ffd.file->private_data, current);
1241 if (error != 0)
1242 break;
1243 }
1244 }
1245 mutex_unlock(&ep->mtx);
1246
1247 return error;
1248}
1249
1250/**
1251 * ep_loop_check - Performs a check to verify that adding an epoll file (@file)
1252 * another epoll file (represented by @ep) does not create
1253 * closed loops or too deep chains.
1254 *
1255 * @ep: Pointer to the epoll private data structure.
1256 * @file: Pointer to the epoll file to be checked.
1257 *
1258 * Returns: Returns zero if adding the epoll @file inside current epoll
1259 * structure @ep does not violate the constraints, or -1 otherwise.
1260 */
1261static int ep_loop_check(struct eventpoll *ep, struct file *file)
1262{
1263 return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
1264 ep_loop_check_proc, file, ep, current);
1265}
1266
1201/* 1267/*
1202 * Open an eventpoll file descriptor. 1268 * Open an eventpoll file descriptor.
1203 */ 1269 */
@@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1246 struct epoll_event __user *, event) 1312 struct epoll_event __user *, event)
1247{ 1313{
1248 int error; 1314 int error;
1315 int did_lock_epmutex = 0;
1249 struct file *file, *tfile; 1316 struct file *file, *tfile;
1250 struct eventpoll *ep; 1317 struct eventpoll *ep;
1251 struct epitem *epi; 1318 struct epitem *epi;
@@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1287 */ 1354 */
1288 ep = file->private_data; 1355 ep = file->private_data;
1289 1356
1357 /*
1358 * When we insert an epoll file descriptor, inside another epoll file
1359 * descriptor, there is the change of creating closed loops, which are
1360 * better be handled here, than in more critical paths.
1361 *
1362 * We hold epmutex across the loop check and the insert in this case, in
1363 * order to prevent two separate inserts from racing and each doing the
1364 * insert "at the same time" such that ep_loop_check passes on both
1365 * before either one does the insert, thereby creating a cycle.
1366 */
1367 if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
1368 mutex_lock(&epmutex);
1369 did_lock_epmutex = 1;
1370 error = -ELOOP;
1371 if (ep_loop_check(ep, tfile) != 0)
1372 goto error_tgt_fput;
1373 }
1374
1375
1290 mutex_lock(&ep->mtx); 1376 mutex_lock(&ep->mtx);
1291 1377
1292 /* 1378 /*
@@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
1322 mutex_unlock(&ep->mtx); 1408 mutex_unlock(&ep->mtx);
1323 1409
1324error_tgt_fput: 1410error_tgt_fput:
1411 if (unlikely(did_lock_epmutex))
1412 mutex_unlock(&epmutex);
1413
1325 fput(tfile); 1414 fput(tfile);
1326error_fput: 1415error_fput:
1327 fput(file); 1416 fput(file);
@@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void)
1441 EP_ITEM_COST; 1530 EP_ITEM_COST;
1442 BUG_ON(max_user_watches < 0); 1531 BUG_ON(max_user_watches < 0);
1443 1532
1533 /*
1534 * Initialize the structure used to perform epoll file descriptor
1535 * inclusion loops checks.
1536 */
1537 ep_nested_calls_init(&poll_loop_ncalls);
1538
1444 /* Initialize the structure used to perform safe poll wait head wake ups */ 1539 /* Initialize the structure used to perform safe poll wait head wake ups */
1445 ep_nested_calls_init(&poll_safewake_ncalls); 1540 ep_nested_calls_init(&poll_safewake_ncalls);
1446 1541
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 264e95d02830..4d70db110cfc 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page); 272 new_de = exofs_find_entry(new_dir, new_dentry, &new_page);
273 if (!new_de) 273 if (!new_de)
274 goto out_dir; 274 goto out_dir;
275 inode_inc_link_count(old_inode);
276 err = exofs_set_link(new_dir, new_de, new_page, old_inode); 275 err = exofs_set_link(new_dir, new_de, new_page, old_inode);
277 new_inode->i_ctime = CURRENT_TIME; 276 new_inode->i_ctime = CURRENT_TIME;
278 if (dir_de) 277 if (dir_de)
@@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
286 if (new_dir->i_nlink >= EXOFS_LINK_MAX) 285 if (new_dir->i_nlink >= EXOFS_LINK_MAX)
287 goto out_dir; 286 goto out_dir;
288 } 287 }
289 inode_inc_link_count(old_inode);
290 err = exofs_add_link(new_dentry, old_inode); 288 err = exofs_add_link(new_dentry, old_inode);
291 if (err) { 289 if (err)
292 inode_dec_link_count(old_inode);
293 goto out_dir; 290 goto out_dir;
294 }
295 if (dir_de) 291 if (dir_de)
296 inode_inc_link_count(new_dir); 292 inode_inc_link_count(new_dir);
297 } 293 }
@@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry,
299 old_inode->i_ctime = CURRENT_TIME; 295 old_inode->i_ctime = CURRENT_TIME;
300 296
301 exofs_delete_entry(old_de, old_page); 297 exofs_delete_entry(old_de, old_page);
302 inode_dec_link_count(old_inode); 298 mark_inode_dirty(old_inode);
303 299
304 if (dir_de) { 300 if (dir_de) {
305 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); 301 err = exofs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c
index 2e1d8341d827..adb91855ccd0 100644
--- a/fs/ext2/namei.c
+++ b/fs/ext2/namei.c
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); 344 new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page);
345 if (!new_de) 345 if (!new_de)
346 goto out_dir; 346 goto out_dir;
347 inode_inc_link_count(old_inode);
348 ext2_set_link(new_dir, new_de, new_page, old_inode, 1); 347 ext2_set_link(new_dir, new_de, new_page, old_inode, 1);
349 new_inode->i_ctime = CURRENT_TIME_SEC; 348 new_inode->i_ctime = CURRENT_TIME_SEC;
350 if (dir_de) 349 if (dir_de)
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
356 if (new_dir->i_nlink >= EXT2_LINK_MAX) 355 if (new_dir->i_nlink >= EXT2_LINK_MAX)
357 goto out_dir; 356 goto out_dir;
358 } 357 }
359 inode_inc_link_count(old_inode);
360 err = ext2_add_link(new_dentry, old_inode); 358 err = ext2_add_link(new_dentry, old_inode);
361 if (err) { 359 if (err)
362 inode_dec_link_count(old_inode);
363 goto out_dir; 360 goto out_dir;
364 }
365 if (dir_de) 361 if (dir_de)
366 inode_inc_link_count(new_dir); 362 inode_inc_link_count(new_dir);
367 } 363 }
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry,
369 /* 365 /*
370 * Like most other Unix systems, set the ctime for inodes on a 366 * Like most other Unix systems, set the ctime for inodes on a
371 * rename. 367 * rename.
372 * inode_dec_link_count() will mark the inode dirty.
373 */ 368 */
374 old_inode->i_ctime = CURRENT_TIME_SEC; 369 old_inode->i_ctime = CURRENT_TIME_SEC;
370 mark_inode_dirty(old_inode);
375 371
376 ext2_delete_entry (old_de, old_page); 372 ext2_delete_entry (old_de, old_page);
377 inode_dec_link_count(old_inode);
378 373
379 if (dir_de) { 374 if (dir_de) {
380 if (old_dir != new_dir) 375 if (old_dir != new_dir)
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index f88f752babd9..adae3fb7451a 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -43,7 +43,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry)
43 43
44static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) 44static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
45{ 45{
46 if (nd->flags & LOOKUP_RCU) 46 if (nd && nd->flags & LOOKUP_RCU)
47 return -ECHILD; 47 return -ECHILD;
48 48
49 /* This is not negative dentry. Always valid. */ 49 /* This is not negative dentry. Always valid. */
@@ -54,7 +54,7 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd)
54 54
55static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) 55static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd)
56{ 56{
57 if (nd->flags & LOOKUP_RCU) 57 if (nd && nd->flags & LOOKUP_RCU)
58 return -ECHILD; 58 return -ECHILD;
59 59
60 /* 60 /*
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index bfed8447ed80..8bd0ef9286c3 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -158,7 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd)
158{ 158{
159 struct inode *inode; 159 struct inode *inode;
160 160
161 if (nd->flags & LOOKUP_RCU) 161 if (nd && nd->flags & LOOKUP_RCU)
162 return -ECHILD; 162 return -ECHILD;
163 163
164 inode = entry->d_inode; 164 inode = entry->d_inode;
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
1283 if (err) 1283 if (err)
1284 return err; 1284 return err;
1285 1285
1286 if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc) 1286 if (attr->ia_valid & ATTR_OPEN) {
1287 return 0; 1287 if (fc->atomic_o_trunc)
1288 return 0;
1289 file = NULL;
1290 }
1288 1291
1289 if (attr->ia_valid & ATTR_SIZE) 1292 if (attr->ia_valid & ATTR_SIZE)
1290 is_truncate = true; 1293 is_truncate = true;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 95da1bc1c826..9e0832dbb1e3 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
86 return ff; 86 return ff;
87} 87}
88 88
89static void fuse_release_async(struct work_struct *work)
90{
91 struct fuse_req *req;
92 struct fuse_conn *fc;
93 struct path path;
94
95 req = container_of(work, struct fuse_req, misc.release.work);
96 path = req->misc.release.path;
97 fc = get_fuse_conn(path.dentry->d_inode);
98
99 fuse_put_request(fc, req);
100 path_put(&path);
101}
102
89static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) 103static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
90{ 104{
91 path_put(&req->misc.release.path); 105 if (fc->destroy_req) {
106 /*
107 * If this is a fuseblk mount, then it's possible that
108 * releasing the path will result in releasing the
109 * super block and sending the DESTROY request. If
110 * the server is single threaded, this would hang.
111 * For this reason do the path_put() in a separate
112 * thread.
113 */
114 atomic_inc(&req->count);
115 INIT_WORK(&req->misc.release.work, fuse_release_async);
116 schedule_work(&req->misc.release.work);
117 } else {
118 path_put(&req->misc.release.path);
119 }
92} 120}
93 121
94static void fuse_file_put(struct fuse_file *ff) 122static void fuse_file_put(struct fuse_file *ff, bool sync)
95{ 123{
96 if (atomic_dec_and_test(&ff->count)) { 124 if (atomic_dec_and_test(&ff->count)) {
97 struct fuse_req *req = ff->reserved_req; 125 struct fuse_req *req = ff->reserved_req;
98 126
99 req->end = fuse_release_end; 127 if (sync) {
100 fuse_request_send_background(ff->fc, req); 128 fuse_request_send(ff->fc, req);
129 path_put(&req->misc.release.path);
130 fuse_put_request(ff->fc, req);
131 } else {
132 req->end = fuse_release_end;
133 fuse_request_send_background(ff->fc, req);
134 }
101 kfree(ff); 135 kfree(ff);
102 } 136 }
103} 137}
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
219 * Normally this will send the RELEASE request, however if 253 * Normally this will send the RELEASE request, however if
220 * some asynchronous READ or WRITE requests are outstanding, 254 * some asynchronous READ or WRITE requests are outstanding,
221 * the sending will be delayed. 255 * the sending will be delayed.
256 *
257 * Make the release synchronous if this is a fuseblk mount,
258 * synchronous RELEASE is allowed (and desirable) in this case
259 * because the server can be trusted not to screw up.
222 */ 260 */
223 fuse_file_put(ff); 261 fuse_file_put(ff, ff->fc->destroy_req != NULL);
224} 262}
225 263
226static int fuse_open(struct inode *inode, struct file *file) 264static int fuse_open(struct inode *inode, struct file *file)
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
558 page_cache_release(page); 596 page_cache_release(page);
559 } 597 }
560 if (req->ff) 598 if (req->ff)
561 fuse_file_put(req->ff); 599 fuse_file_put(req->ff, false);
562} 600}
563 601
564static void fuse_send_readpages(struct fuse_req *req, struct file *file) 602static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
1137static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) 1175static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
1138{ 1176{
1139 __free_page(req->pages[0]); 1177 __free_page(req->pages[0]);
1140 fuse_file_put(req->ff); 1178 fuse_file_put(req->ff, false);
1141} 1179}
1142 1180
1143static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) 1181static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index ae5744a2f9e9..d4286947bc2c 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -21,6 +21,7 @@
21#include <linux/rwsem.h> 21#include <linux/rwsem.h>
22#include <linux/rbtree.h> 22#include <linux/rbtree.h>
23#include <linux/poll.h> 23#include <linux/poll.h>
24#include <linux/workqueue.h>
24 25
25/** Max number of pages that can be used in a single read request */ 26/** Max number of pages that can be used in a single read request */
26#define FUSE_MAX_PAGES_PER_REQ 32 27#define FUSE_MAX_PAGES_PER_REQ 32
@@ -262,7 +263,10 @@ struct fuse_req {
262 /** Data for asynchronous requests */ 263 /** Data for asynchronous requests */
263 union { 264 union {
264 struct { 265 struct {
265 struct fuse_release_in in; 266 union {
267 struct fuse_release_in in;
268 struct work_struct work;
269 };
266 struct path path; 270 struct path path;
267 } release; 271 } release;
268 struct fuse_init_in init_in; 272 struct fuse_init_in init_in;
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c
index 4a456338b873..0da8da2c991d 100644
--- a/fs/gfs2/dentry.c
+++ b/fs/gfs2/dentry.c
@@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd)
44 int error; 44 int error;
45 int had_lock = 0; 45 int had_lock = 0;
46 46
47 if (nd->flags & LOOKUP_RCU) 47 if (nd && nd->flags & LOOKUP_RCU)
48 return -ECHILD; 48 return -ECHILD;
49 49
50 parent = dget_parent(dentry); 50 parent = dget_parent(dentry);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 08a8beb152e6..7cd9a5a68d59 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void)
1779#endif 1779#endif
1780 1780
1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1781 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
1782 WQ_HIGHPRI | WQ_FREEZEABLE, 0); 1782 WQ_HIGHPRI | WQ_FREEZABLE, 0);
1783 if (IS_ERR(glock_workqueue)) 1783 if (IS_ERR(glock_workqueue))
1784 return PTR_ERR(glock_workqueue); 1784 return PTR_ERR(glock_workqueue);
1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1785 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
1786 WQ_MEM_RECLAIM | WQ_FREEZEABLE, 1786 WQ_MEM_RECLAIM | WQ_FREEZABLE,
1787 0); 1787 0);
1788 if (IS_ERR(gfs2_delete_workqueue)) { 1788 if (IS_ERR(gfs2_delete_workqueue)) {
1789 destroy_workqueue(glock_workqueue); 1789 destroy_workqueue(glock_workqueue);
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index ebef7ab6e17e..72c31a315d96 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo)
59 struct address_space *mapping = (struct address_space *)(gl + 1); 59 struct address_space *mapping = (struct address_space *)(gl + 1);
60 60
61 gfs2_init_glock_once(gl); 61 gfs2_init_glock_once(gl);
62 memset(mapping, 0, sizeof(*mapping)); 62 address_space_init_once(mapping);
63 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
64 spin_lock_init(&mapping->tree_lock);
65 spin_lock_init(&mapping->i_mmap_lock);
66 INIT_LIST_HEAD(&mapping->private_list);
67 spin_lock_init(&mapping->private_lock);
68 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
69 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
70} 63}
71 64
72/** 65/**
@@ -144,7 +137,7 @@ static int __init init_gfs2_fs(void)
144 137
145 error = -ENOMEM; 138 error = -ENOMEM;
146 gfs_recovery_wq = alloc_workqueue("gfs_recovery", 139 gfs_recovery_wq = alloc_workqueue("gfs_recovery",
147 WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); 140 WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
148 if (!gfs_recovery_wq) 141 if (!gfs_recovery_wq)
149 goto fail_wq; 142 goto fail_wq;
150 143
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c
index afa66aaa2237..b4d70b13be92 100644
--- a/fs/hfs/dir.c
+++ b/fs/hfs/dir.c
@@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
238} 238}
239 239
240/* 240/*
241 * hfs_unlink() 241 * hfs_remove()
242 * 242 *
243 * This is the unlink() entry in the inode_operations structure for 243 * This serves as both unlink() and rmdir() in the inode_operations
244 * regular HFS directories. The purpose is to delete an existing 244 * structure for regular HFS directories. The purpose is to delete
245 * file, given the inode for the parent directory and the name 245 * an existing child, given the inode for the parent directory and
246 * (and its length) of the existing file. 246 * the name (and its length) of the existing directory.
247 */
248static int hfs_unlink(struct inode *dir, struct dentry *dentry)
249{
250 struct inode *inode;
251 int res;
252
253 inode = dentry->d_inode;
254 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
255 if (res)
256 return res;
257
258 drop_nlink(inode);
259 hfs_delete_inode(inode);
260 inode->i_ctime = CURRENT_TIME_SEC;
261 mark_inode_dirty(inode);
262
263 return res;
264}
265
266/*
267 * hfs_rmdir()
268 * 247 *
269 * This is the rmdir() entry in the inode_operations structure for 248 * HFS does not have hardlinks, so both rmdir and unlink set the
270 * regular HFS directories. The purpose is to delete an existing 249 * link count to 0. The only difference is the emptiness check.
271 * directory, given the inode for the parent directory and the name
272 * (and its length) of the existing directory.
273 */ 250 */
274static int hfs_rmdir(struct inode *dir, struct dentry *dentry) 251static int hfs_remove(struct inode *dir, struct dentry *dentry)
275{ 252{
276 struct inode *inode; 253 struct inode *inode = dentry->d_inode;
277 int res; 254 int res;
278 255
279 inode = dentry->d_inode; 256 if (S_ISDIR(inode->i_mode) && inode->i_size != 2)
280 if (inode->i_size != 2)
281 return -ENOTEMPTY; 257 return -ENOTEMPTY;
282 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); 258 res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name);
283 if (res) 259 if (res)
@@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry,
307 283
308 /* Unlink destination if it already exists */ 284 /* Unlink destination if it already exists */
309 if (new_dentry->d_inode) { 285 if (new_dentry->d_inode) {
310 res = hfs_unlink(new_dir, new_dentry); 286 res = hfs_remove(new_dir, new_dentry);
311 if (res) 287 if (res)
312 return res; 288 return res;
313 } 289 }
@@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = {
332const struct inode_operations hfs_dir_inode_operations = { 308const struct inode_operations hfs_dir_inode_operations = {
333 .create = hfs_create, 309 .create = hfs_create,
334 .lookup = hfs_lookup, 310 .lookup = hfs_lookup,
335 .unlink = hfs_unlink, 311 .unlink = hfs_remove,
336 .mkdir = hfs_mkdir, 312 .mkdir = hfs_mkdir,
337 .rmdir = hfs_rmdir, 313 .rmdir = hfs_remove,
338 .rename = hfs_rename, 314 .rename = hfs_rename,
339 .setattr = hfs_inode_setattr, 315 .setattr = hfs_inode_setattr,
340}; 316};
diff --git a/fs/inode.c b/fs/inode.c
index da85e56378f3..0647d80accf6 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode)
295 call_rcu(&inode->i_rcu, i_callback); 295 call_rcu(&inode->i_rcu, i_callback);
296} 296}
297 297
298void address_space_init_once(struct address_space *mapping)
299{
300 memset(mapping, 0, sizeof(*mapping));
301 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
302 spin_lock_init(&mapping->tree_lock);
303 spin_lock_init(&mapping->i_mmap_lock);
304 INIT_LIST_HEAD(&mapping->private_list);
305 spin_lock_init(&mapping->private_lock);
306 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
307 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
308 mutex_init(&mapping->unmap_mutex);
309}
310EXPORT_SYMBOL(address_space_init_once);
311
298/* 312/*
299 * These are initializations that only need to be done 313 * These are initializations that only need to be done
300 * once, because the fields are idempotent across use 314 * once, because the fields are idempotent across use
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode)
308 INIT_LIST_HEAD(&inode->i_devices); 322 INIT_LIST_HEAD(&inode->i_devices);
309 INIT_LIST_HEAD(&inode->i_wb_list); 323 INIT_LIST_HEAD(&inode->i_wb_list);
310 INIT_LIST_HEAD(&inode->i_lru); 324 INIT_LIST_HEAD(&inode->i_lru);
311 INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); 325 address_space_init_once(&inode->i_data);
312 spin_lock_init(&inode->i_data.tree_lock);
313 spin_lock_init(&inode->i_data.i_mmap_lock);
314 INIT_LIST_HEAD(&inode->i_data.private_list);
315 spin_lock_init(&inode->i_data.private_lock);
316 INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap);
317 INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear);
318 i_size_ordered_init(inode); 326 i_size_ordered_init(inode);
319#ifdef CONFIG_FSNOTIFY 327#ifdef CONFIG_FSNOTIFY
320 INIT_HLIST_HEAD(&inode->i_fsnotify_marks); 328 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
@@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb)
540/** 548/**
541 * invalidate_inodes - attempt to free all inodes on a superblock 549 * invalidate_inodes - attempt to free all inodes on a superblock
542 * @sb: superblock to operate on 550 * @sb: superblock to operate on
551 * @kill_dirty: flag to guide handling of dirty inodes
543 * 552 *
544 * Attempts to free all inodes for a given superblock. If there were any 553 * Attempts to free all inodes for a given superblock. If there were any
545 * busy inodes return a non-zero value, else zero. 554 * busy inodes return a non-zero value, else zero.
555 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
556 * them as busy.
546 */ 557 */
547int invalidate_inodes(struct super_block *sb) 558int invalidate_inodes(struct super_block *sb, bool kill_dirty)
548{ 559{
549 int busy = 0; 560 int busy = 0;
550 struct inode *inode, *next; 561 struct inode *inode, *next;
@@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb)
556 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { 567 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
557 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) 568 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
558 continue; 569 continue;
570 if (inode->i_state & I_DIRTY && !kill_dirty) {
571 busy = 1;
572 continue;
573 }
559 if (atomic_read(&inode->i_count)) { 574 if (atomic_read(&inode->i_count)) {
560 busy = 1; 575 busy = 1;
561 continue; 576 continue;
diff --git a/fs/internal.h b/fs/internal.h
index 0663568b1247..9b976b57d7fe 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *);
112 */ 112 */
113extern int get_nr_dirty_inodes(void); 113extern int get_nr_dirty_inodes(void);
114extern void evict_inodes(struct super_block *); 114extern void evict_inodes(struct super_block *);
115extern int invalidate_inodes(struct super_block *); 115extern int invalidate_inodes(struct super_block *, bool);
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 81ead850ddb6..5a2b269428a6 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1600,7 +1600,7 @@ out:
1600 1600
1601static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) 1601static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd)
1602{ 1602{
1603 if (nd->flags & LOOKUP_RCU) 1603 if (nd && nd->flags & LOOKUP_RCU)
1604 return -ECHILD; 1604 return -ECHILD;
1605 /* 1605 /*
1606 * This is not negative dentry. Always valid. 1606 * This is not negative dentry. Always valid.
diff --git a/fs/minix/namei.c b/fs/minix/namei.c
index ce7337ddfdbf..6e6777f1b4b2 100644
--- a/fs/minix/namei.c
+++ b/fs/minix/namei.c
@@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
213 new_de = minix_find_entry(new_dentry, &new_page); 213 new_de = minix_find_entry(new_dentry, &new_page);
214 if (!new_de) 214 if (!new_de)
215 goto out_dir; 215 goto out_dir;
216 inode_inc_link_count(old_inode);
217 minix_set_link(new_de, new_page, old_inode); 216 minix_set_link(new_de, new_page, old_inode);
218 new_inode->i_ctime = CURRENT_TIME_SEC; 217 new_inode->i_ctime = CURRENT_TIME_SEC;
219 if (dir_de) 218 if (dir_de)
@@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry,
225 if (new_dir->i_nlink >= info->s_link_max) 224 if (new_dir->i_nlink >= info->s_link_max)
226 goto out_dir; 225 goto out_dir;
227 } 226 }
228 inode_inc_link_count(old_inode);
229 err = minix_add_link(new_dentry, old_inode); 227 err = minix_add_link(new_dentry, old_inode);
230 if (err) { 228 if (err)
231 inode_dec_link_count(old_inode);
232 goto out_dir; 229 goto out_dir;
233 }
234 if (dir_de) 230 if (dir_de)
235 inode_inc_link_count(new_dir); 231 inode_inc_link_count(new_dir);
236 } 232 }
237 233
238 minix_delete_entry(old_de, old_page); 234 minix_delete_entry(old_de, old_page);
239 inode_dec_link_count(old_inode); 235 mark_inode_dirty(old_inode);
240 236
241 if (dir_de) { 237 if (dir_de) {
242 minix_set_link(dir_de, dir_page, new_dir); 238 minix_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/namei.c b/fs/namei.c
index ec4b2d0190a8..a4689eb2df28 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -455,14 +455,6 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry
455 struct fs_struct *fs = current->fs; 455 struct fs_struct *fs = current->fs;
456 struct dentry *parent = nd->path.dentry; 456 struct dentry *parent = nd->path.dentry;
457 457
458 /*
459 * It can be possible to revalidate the dentry that we started
460 * the path walk with. force_reval_path may also revalidate the
461 * dentry already committed to the nameidata.
462 */
463 if (unlikely(parent == dentry))
464 return nameidata_drop_rcu(nd);
465
466 BUG_ON(!(nd->flags & LOOKUP_RCU)); 458 BUG_ON(!(nd->flags & LOOKUP_RCU));
467 if (nd->root.mnt) { 459 if (nd->root.mnt) {
468 spin_lock(&fs->lock); 460 spin_lock(&fs->lock);
@@ -571,33 +563,15 @@ void release_open_intent(struct nameidata *nd)
571 } 563 }
572} 564}
573 565
574/* 566static inline int d_revalidate(struct dentry *dentry, struct nameidata *nd)
575 * Call d_revalidate and handle filesystems that request rcu-walk
576 * to be dropped. This may be called and return in rcu-walk mode,
577 * regardless of success or error. If -ECHILD is returned, the caller
578 * must return -ECHILD back up the path walk stack so path walk may
579 * be restarted in ref-walk mode.
580 */
581static int d_revalidate(struct dentry *dentry, struct nameidata *nd)
582{ 567{
583 int status; 568 return dentry->d_op->d_revalidate(dentry, nd);
584
585 status = dentry->d_op->d_revalidate(dentry, nd);
586 if (status == -ECHILD) {
587 if (nameidata_dentry_drop_rcu(nd, dentry))
588 return status;
589 status = dentry->d_op->d_revalidate(dentry, nd);
590 }
591
592 return status;
593} 569}
594 570
595static inline struct dentry * 571static struct dentry *
596do_revalidate(struct dentry *dentry, struct nameidata *nd) 572do_revalidate(struct dentry *dentry, struct nameidata *nd)
597{ 573{
598 int status; 574 int status = d_revalidate(dentry, nd);
599
600 status = d_revalidate(dentry, nd);
601 if (unlikely(status <= 0)) { 575 if (unlikely(status <= 0)) {
602 /* 576 /*
603 * The dentry failed validation. 577 * The dentry failed validation.
@@ -606,24 +580,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd)
606 * to return a fail status. 580 * to return a fail status.
607 */ 581 */
608 if (status < 0) { 582 if (status < 0) {
609 /* If we're in rcu-walk, we don't have a ref */ 583 dput(dentry);
610 if (!(nd->flags & LOOKUP_RCU))
611 dput(dentry);
612 dentry = ERR_PTR(status); 584 dentry = ERR_PTR(status);
613 585 } else if (!d_invalidate(dentry)) {
614 } else { 586 dput(dentry);
615 /* Don't d_invalidate in rcu-walk mode */ 587 dentry = NULL;
616 if (nameidata_dentry_drop_rcu_maybe(nd, dentry))
617 return ERR_PTR(-ECHILD);
618 if (!d_invalidate(dentry)) {
619 dput(dentry);
620 dentry = NULL;
621 }
622 } 588 }
623 } 589 }
624 return dentry; 590 return dentry;
625} 591}
626 592
593static inline struct dentry *
594do_revalidate_rcu(struct dentry *dentry, struct nameidata *nd)
595{
596 int status = d_revalidate(dentry, nd);
597 if (likely(status > 0))
598 return dentry;
599 if (status == -ECHILD) {
600 if (nameidata_dentry_drop_rcu(nd, dentry))
601 return ERR_PTR(-ECHILD);
602 return do_revalidate(dentry, nd);
603 }
604 if (status < 0)
605 return ERR_PTR(status);
606 /* Don't d_invalidate in rcu-walk mode */
607 if (nameidata_dentry_drop_rcu(nd, dentry))
608 return ERR_PTR(-ECHILD);
609 if (!d_invalidate(dentry)) {
610 dput(dentry);
611 dentry = NULL;
612 }
613 return dentry;
614}
615
627static inline int need_reval_dot(struct dentry *dentry) 616static inline int need_reval_dot(struct dentry *dentry)
628{ 617{
629 if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE))) 618 if (likely(!(dentry->d_flags & DCACHE_OP_REVALIDATE)))
@@ -668,9 +657,6 @@ force_reval_path(struct path *path, struct nameidata *nd)
668 return 0; 657 return 0;
669 658
670 if (!status) { 659 if (!status) {
671 /* Don't d_invalidate in rcu-walk mode */
672 if (nameidata_drop_rcu(nd))
673 return -ECHILD;
674 d_invalidate(dentry); 660 d_invalidate(dentry);
675 status = -ESTALE; 661 status = -ESTALE;
676 } 662 }
@@ -777,6 +763,8 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p)
777 int error; 763 int error;
778 struct dentry *dentry = link->dentry; 764 struct dentry *dentry = link->dentry;
779 765
766 BUG_ON(nd->flags & LOOKUP_RCU);
767
780 touch_atime(link->mnt, dentry); 768 touch_atime(link->mnt, dentry);
781 nd_set_link(nd, NULL); 769 nd_set_link(nd, NULL);
782 770
@@ -807,10 +795,16 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p)
807 * Without that kind of total limit, nasty chains of consecutive 795 * Without that kind of total limit, nasty chains of consecutive
808 * symlinks can cause almost arbitrarily long lookups. 796 * symlinks can cause almost arbitrarily long lookups.
809 */ 797 */
810static inline int do_follow_link(struct path *path, struct nameidata *nd) 798static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd)
811{ 799{
812 void *cookie; 800 void *cookie;
813 int err = -ELOOP; 801 int err = -ELOOP;
802
803 /* We drop rcu-walk here */
804 if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry))
805 return -ECHILD;
806 BUG_ON(inode != path->dentry->d_inode);
807
814 if (current->link_count >= MAX_NESTED_LINKS) 808 if (current->link_count >= MAX_NESTED_LINKS)
815 goto loop; 809 goto loop;
816 if (current->total_link_count >= 40) 810 if (current->total_link_count >= 40)
@@ -1255,9 +1249,15 @@ static int do_lookup(struct nameidata *nd, struct qstr *name,
1255 return -ECHILD; 1249 return -ECHILD;
1256 1250
1257 nd->seq = seq; 1251 nd->seq = seq;
1258 if (dentry->d_flags & DCACHE_OP_REVALIDATE) 1252 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1259 goto need_revalidate; 1253 dentry = do_revalidate_rcu(dentry, nd);
1260done2: 1254 if (!dentry)
1255 goto need_lookup;
1256 if (IS_ERR(dentry))
1257 goto fail;
1258 if (!(nd->flags & LOOKUP_RCU))
1259 goto done;
1260 }
1261 path->mnt = mnt; 1261 path->mnt = mnt;
1262 path->dentry = dentry; 1262 path->dentry = dentry;
1263 if (likely(__follow_mount_rcu(nd, path, inode, false))) 1263 if (likely(__follow_mount_rcu(nd, path, inode, false)))
@@ -1270,8 +1270,13 @@ done2:
1270 if (!dentry) 1270 if (!dentry)
1271 goto need_lookup; 1271 goto need_lookup;
1272found: 1272found:
1273 if (dentry->d_flags & DCACHE_OP_REVALIDATE) 1273 if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
1274 goto need_revalidate; 1274 dentry = do_revalidate(dentry, nd);
1275 if (!dentry)
1276 goto need_lookup;
1277 if (IS_ERR(dentry))
1278 goto fail;
1279 }
1275done: 1280done:
1276 path->mnt = mnt; 1281 path->mnt = mnt;
1277 path->dentry = dentry; 1282 path->dentry = dentry;
@@ -1313,16 +1318,6 @@ need_lookup:
1313 mutex_unlock(&dir->i_mutex); 1318 mutex_unlock(&dir->i_mutex);
1314 goto found; 1319 goto found;
1315 1320
1316need_revalidate:
1317 dentry = do_revalidate(dentry, nd);
1318 if (!dentry)
1319 goto need_lookup;
1320 if (IS_ERR(dentry))
1321 goto fail;
1322 if (nd->flags & LOOKUP_RCU)
1323 goto done2;
1324 goto done;
1325
1326fail: 1321fail:
1327 return PTR_ERR(dentry); 1322 return PTR_ERR(dentry);
1328} 1323}
@@ -1419,11 +1414,7 @@ exec_again:
1419 goto out_dput; 1414 goto out_dput;
1420 1415
1421 if (inode->i_op->follow_link) { 1416 if (inode->i_op->follow_link) {
1422 /* We commonly drop rcu-walk here */ 1417 err = do_follow_link(inode, &next, nd);
1423 if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry))
1424 return -ECHILD;
1425 BUG_ON(inode != next.dentry->d_inode);
1426 err = do_follow_link(&next, nd);
1427 if (err) 1418 if (err)
1428 goto return_err; 1419 goto return_err;
1429 nd->inode = nd->path.dentry->d_inode; 1420 nd->inode = nd->path.dentry->d_inode;
@@ -1467,10 +1458,7 @@ last_component:
1467 break; 1458 break;
1468 if (inode && unlikely(inode->i_op->follow_link) && 1459 if (inode && unlikely(inode->i_op->follow_link) &&
1469 (lookup_flags & LOOKUP_FOLLOW)) { 1460 (lookup_flags & LOOKUP_FOLLOW)) {
1470 if (nameidata_dentry_drop_rcu_maybe(nd, next.dentry)) 1461 err = do_follow_link(inode, &next, nd);
1471 return -ECHILD;
1472 BUG_ON(inode != next.dentry->d_inode);
1473 err = do_follow_link(&next, nd);
1474 if (err) 1462 if (err)
1475 goto return_err; 1463 goto return_err;
1476 nd->inode = nd->path.dentry->d_inode; 1464 nd->inode = nd->path.dentry->d_inode;
@@ -1504,12 +1492,15 @@ return_reval:
1504 * We may need to check the cached dentry for staleness. 1492 * We may need to check the cached dentry for staleness.
1505 */ 1493 */
1506 if (need_reval_dot(nd->path.dentry)) { 1494 if (need_reval_dot(nd->path.dentry)) {
1495 if (nameidata_drop_rcu_last_maybe(nd))
1496 return -ECHILD;
1507 /* Note: we do not d_invalidate() */ 1497 /* Note: we do not d_invalidate() */
1508 err = d_revalidate(nd->path.dentry, nd); 1498 err = d_revalidate(nd->path.dentry, nd);
1509 if (!err) 1499 if (!err)
1510 err = -ESTALE; 1500 err = -ESTALE;
1511 if (err < 0) 1501 if (err < 0)
1512 break; 1502 break;
1503 return 0;
1513 } 1504 }
1514return_base: 1505return_base:
1515 if (nameidata_drop_rcu_last_maybe(nd)) 1506 if (nameidata_drop_rcu_last_maybe(nd))
@@ -1555,6 +1546,7 @@ static int path_walk(const char *name, struct nameidata *nd)
1555 /* nd->path had been dropped */ 1546 /* nd->path had been dropped */
1556 current->total_link_count = 0; 1547 current->total_link_count = 0;
1557 nd->path = save; 1548 nd->path = save;
1549 nd->inode = save.dentry->d_inode;
1558 path_get(&nd->path); 1550 path_get(&nd->path);
1559 nd->flags |= LOOKUP_REVAL; 1551 nd->flags |= LOOKUP_REVAL;
1560 result = link_path_walk(name, nd); 1552 result = link_path_walk(name, nd);
@@ -2464,22 +2456,29 @@ struct file *do_filp_open(int dfd, const char *pathname,
2464 /* !O_CREAT, simple open */ 2456 /* !O_CREAT, simple open */
2465 error = do_path_lookup(dfd, pathname, flags, &nd); 2457 error = do_path_lookup(dfd, pathname, flags, &nd);
2466 if (unlikely(error)) 2458 if (unlikely(error))
2467 goto out_filp; 2459 goto out_filp2;
2468 error = -ELOOP; 2460 error = -ELOOP;
2469 if (!(nd.flags & LOOKUP_FOLLOW)) { 2461 if (!(nd.flags & LOOKUP_FOLLOW)) {
2470 if (nd.inode->i_op->follow_link) 2462 if (nd.inode->i_op->follow_link)
2471 goto out_path; 2463 goto out_path2;
2472 } 2464 }
2473 error = -ENOTDIR; 2465 error = -ENOTDIR;
2474 if (nd.flags & LOOKUP_DIRECTORY) { 2466 if (nd.flags & LOOKUP_DIRECTORY) {
2475 if (!nd.inode->i_op->lookup) 2467 if (!nd.inode->i_op->lookup)
2476 goto out_path; 2468 goto out_path2;
2477 } 2469 }
2478 audit_inode(pathname, nd.path.dentry); 2470 audit_inode(pathname, nd.path.dentry);
2479 filp = finish_open(&nd, open_flag, acc_mode); 2471 filp = finish_open(&nd, open_flag, acc_mode);
2472out2:
2480 release_open_intent(&nd); 2473 release_open_intent(&nd);
2481 return filp; 2474 return filp;
2482 2475
2476out_path2:
2477 path_put(&nd.path);
2478out_filp2:
2479 filp = ERR_PTR(error);
2480 goto out2;
2481
2483creat: 2482creat:
2484 /* OK, have to create the file. Find the parent. */ 2483 /* OK, have to create the file. Find the parent. */
2485 error = path_init_rcu(dfd, pathname, 2484 error = path_init_rcu(dfd, pathname,
diff --git a/fs/namespace.c b/fs/namespace.c
index 7b0b95371696..d1edf26025dc 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
1244 */ 1244 */
1245 br_write_lock(vfsmount_lock); 1245 br_write_lock(vfsmount_lock);
1246 if (mnt_get_count(mnt) != 2) { 1246 if (mnt_get_count(mnt) != 2) {
1247 br_write_lock(vfsmount_lock); 1247 br_write_unlock(vfsmount_lock);
1248 return -EBUSY; 1248 return -EBUSY;
1249 } 1249 }
1250 br_write_unlock(vfsmount_lock); 1250 br_write_unlock(vfsmount_lock);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1cc600e77bb4..2f8e61816d75 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -37,6 +37,7 @@
37#include <linux/inet.h> 37#include <linux/inet.h>
38#include <linux/nfs_xdr.h> 38#include <linux/nfs_xdr.h>
39#include <linux/slab.h> 39#include <linux/slab.h>
40#include <linux/compat.h>
40 41
41#include <asm/system.h> 42#include <asm/system.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
@@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word)
89 */ 90 */
90u64 nfs_compat_user_ino64(u64 fileid) 91u64 nfs_compat_user_ino64(u64 fileid)
91{ 92{
92 int ino; 93#ifdef CONFIG_COMPAT
94 compat_ulong_t ino;
95#else
96 unsigned long ino;
97#endif
93 98
94 if (enable_ino64) 99 if (enable_ino64)
95 return fileid; 100 return fileid;
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 7a7474073148..1be36cf65bfc 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
298#if defined(CONFIG_NFS_V4_1) 298#if defined(CONFIG_NFS_V4_1)
299struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); 299struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
300struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); 300struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
301extern void nfs4_schedule_session_recovery(struct nfs4_session *);
302#else
303static inline void nfs4_schedule_session_recovery(struct nfs4_session *session)
304{
305}
301#endif /* CONFIG_NFS_V4_1 */ 306#endif /* CONFIG_NFS_V4_1 */
302 307
303extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); 308extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
@@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *);
307extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); 312extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t);
308extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); 313extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t);
309extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); 314extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t);
310extern void nfs4_schedule_state_recovery(struct nfs_client *); 315extern void nfs4_schedule_lease_recovery(struct nfs_client *);
311extern void nfs4_schedule_state_manager(struct nfs_client *); 316extern void nfs4_schedule_state_manager(struct nfs_client *);
312extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); 317extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *);
313extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state);
314extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 318extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
315extern void nfs41_handle_recall_slot(struct nfs_client *clp); 319extern void nfs41_handle_recall_slot(struct nfs_client *clp);
316extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 320extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index f5c9b125e8cc..b73c34375f60 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode)
219 goto out_err; 219 goto out_err;
220 } 220 }
221 buf = kmalloc(rlen + 1, GFP_KERNEL); 221 buf = kmalloc(rlen + 1, GFP_KERNEL);
222 if (!buf) {
223 dprintk("%s: Not enough memory\n", __func__);
224 goto out_err;
225 }
222 buf[rlen] = '\0'; 226 buf[rlen] = '\0';
223 memcpy(buf, r_addr, rlen); 227 memcpy(buf, r_addr, rlen);
224 228
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 78936a8f40ab..0a07e353a961 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -256,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
256 case -NFS4ERR_OPENMODE: 256 case -NFS4ERR_OPENMODE:
257 if (state == NULL) 257 if (state == NULL)
258 break; 258 break;
259 nfs4_state_mark_reclaim_nograce(clp, state); 259 nfs4_schedule_stateid_recovery(server, state);
260 goto do_state_recovery; 260 goto wait_on_recovery;
261 case -NFS4ERR_STALE_STATEID: 261 case -NFS4ERR_STALE_STATEID:
262 case -NFS4ERR_STALE_CLIENTID: 262 case -NFS4ERR_STALE_CLIENTID:
263 case -NFS4ERR_EXPIRED: 263 case -NFS4ERR_EXPIRED:
264 goto do_state_recovery; 264 nfs4_schedule_lease_recovery(clp);
265 goto wait_on_recovery;
265#if defined(CONFIG_NFS_V4_1) 266#if defined(CONFIG_NFS_V4_1)
266 case -NFS4ERR_BADSESSION: 267 case -NFS4ERR_BADSESSION:
267 case -NFS4ERR_BADSLOT: 268 case -NFS4ERR_BADSLOT:
@@ -272,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
272 case -NFS4ERR_SEQ_MISORDERED: 273 case -NFS4ERR_SEQ_MISORDERED:
273 dprintk("%s ERROR: %d Reset session\n", __func__, 274 dprintk("%s ERROR: %d Reset session\n", __func__,
274 errorcode); 275 errorcode);
275 nfs4_schedule_state_recovery(clp); 276 nfs4_schedule_session_recovery(clp->cl_session);
276 exception->retry = 1; 277 exception->retry = 1;
277 break; 278 break;
278#endif /* defined(CONFIG_NFS_V4_1) */ 279#endif /* defined(CONFIG_NFS_V4_1) */
@@ -295,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode,
295 } 296 }
296 /* We failed to handle the error */ 297 /* We failed to handle the error */
297 return nfs4_map_errors(ret); 298 return nfs4_map_errors(ret);
298do_state_recovery: 299wait_on_recovery:
299 nfs4_schedule_state_recovery(clp);
300 ret = nfs4_wait_clnt_recover(clp); 300 ret = nfs4_wait_clnt_recover(clp);
301 if (ret == 0) 301 if (ret == 0)
302 exception->retry = 1; 302 exception->retry = 1;
@@ -435,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *
435 clp = res->sr_session->clp; 435 clp = res->sr_session->clp;
436 do_renew_lease(clp, timestamp); 436 do_renew_lease(clp, timestamp);
437 /* Check sequence flags */ 437 /* Check sequence flags */
438 if (atomic_read(&clp->cl_count) > 1) 438 if (res->sr_status_flags != 0)
439 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); 439 nfs4_schedule_lease_recovery(clp);
440 break; 440 break;
441 case -NFS4ERR_DELAY: 441 case -NFS4ERR_DELAY:
442 /* The server detected a resend of the RPC call and 442 /* The server detected a resend of the RPC call and
@@ -1255,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
1255 case -NFS4ERR_BAD_HIGH_SLOT: 1255 case -NFS4ERR_BAD_HIGH_SLOT:
1256 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1256 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1257 case -NFS4ERR_DEADSESSION: 1257 case -NFS4ERR_DEADSESSION:
1258 nfs4_schedule_state_recovery( 1258 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
1259 server->nfs_client);
1260 goto out; 1259 goto out;
1261 case -NFS4ERR_STALE_CLIENTID: 1260 case -NFS4ERR_STALE_CLIENTID:
1262 case -NFS4ERR_STALE_STATEID: 1261 case -NFS4ERR_STALE_STATEID:
1263 case -NFS4ERR_EXPIRED: 1262 case -NFS4ERR_EXPIRED:
1264 /* Don't recall a delegation if it was lost */ 1263 /* Don't recall a delegation if it was lost */
1265 nfs4_schedule_state_recovery(server->nfs_client); 1264 nfs4_schedule_lease_recovery(server->nfs_client);
1266 goto out; 1265 goto out;
1267 case -ERESTARTSYS: 1266 case -ERESTARTSYS:
1268 /* 1267 /*
@@ -1271,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
1271 */ 1270 */
1272 case -NFS4ERR_ADMIN_REVOKED: 1271 case -NFS4ERR_ADMIN_REVOKED:
1273 case -NFS4ERR_BAD_STATEID: 1272 case -NFS4ERR_BAD_STATEID:
1274 nfs4_state_mark_reclaim_nograce(server->nfs_client, state); 1273 nfs4_schedule_stateid_recovery(server, state);
1275 case -EKEYEXPIRED: 1274 case -EKEYEXPIRED:
1276 /* 1275 /*
1277 * User RPCSEC_GSS context has expired. 1276 * User RPCSEC_GSS context has expired.
@@ -1587,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server)
1587 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && 1586 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) &&
1588 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) 1587 !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state))
1589 break; 1588 break;
1590 nfs4_schedule_state_recovery(clp); 1589 nfs4_schedule_state_manager(clp);
1591 ret = -EIO; 1590 ret = -EIO;
1592 } 1591 }
1593 return ret; 1592 return ret;
@@ -3178,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata)
3178 if (task->tk_status < 0) { 3177 if (task->tk_status < 0) {
3179 /* Unless we're shutting down, schedule state recovery! */ 3178 /* Unless we're shutting down, schedule state recovery! */
3180 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) 3179 if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0)
3181 nfs4_schedule_state_recovery(clp); 3180 nfs4_schedule_lease_recovery(clp);
3182 return; 3181 return;
3183 } 3182 }
3184 do_renew_lease(clp, timestamp); 3183 do_renew_lease(clp, timestamp);
@@ -3252,6 +3251,35 @@ static void buf_to_pages(const void *buf, size_t buflen,
3252 } 3251 }
3253} 3252}
3254 3253
3254static int buf_to_pages_noslab(const void *buf, size_t buflen,
3255 struct page **pages, unsigned int *pgbase)
3256{
3257 struct page *newpage, **spages;
3258 int rc = 0;
3259 size_t len;
3260 spages = pages;
3261
3262 do {
3263 len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
3264 newpage = alloc_page(GFP_KERNEL);
3265
3266 if (newpage == NULL)
3267 goto unwind;
3268 memcpy(page_address(newpage), buf, len);
3269 buf += len;
3270 buflen -= len;
3271 *pages++ = newpage;
3272 rc++;
3273 } while (buflen != 0);
3274
3275 return rc;
3276
3277unwind:
3278 for(; rc > 0; rc--)
3279 __free_page(spages[rc-1]);
3280 return -ENOMEM;
3281}
3282
3255struct nfs4_cached_acl { 3283struct nfs4_cached_acl {
3256 int cached; 3284 int cached;
3257 size_t len; 3285 size_t len;
@@ -3420,13 +3448,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
3420 .rpc_argp = &arg, 3448 .rpc_argp = &arg,
3421 .rpc_resp = &res, 3449 .rpc_resp = &res,
3422 }; 3450 };
3423 int ret; 3451 int ret, i;
3424 3452
3425 if (!nfs4_server_supports_acls(server)) 3453 if (!nfs4_server_supports_acls(server))
3426 return -EOPNOTSUPP; 3454 return -EOPNOTSUPP;
3455 i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3456 if (i < 0)
3457 return i;
3427 nfs_inode_return_delegation(inode); 3458 nfs_inode_return_delegation(inode);
3428 buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
3429 ret = nfs4_call_sync(server, &msg, &arg, &res, 1); 3459 ret = nfs4_call_sync(server, &msg, &arg, &res, 1);
3460
3461 /*
3462 * Free each page after tx, so the only ref left is
3463 * held by the network stack
3464 */
3465 for (; i > 0; i--)
3466 put_page(pages[i-1]);
3467
3430 /* 3468 /*
3431 * Acl update can result in inode attribute update. 3469 * Acl update can result in inode attribute update.
3432 * so mark the attribute cache invalid. 3470 * so mark the attribute cache invalid.
@@ -3464,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3464 case -NFS4ERR_OPENMODE: 3502 case -NFS4ERR_OPENMODE:
3465 if (state == NULL) 3503 if (state == NULL)
3466 break; 3504 break;
3467 nfs4_state_mark_reclaim_nograce(clp, state); 3505 nfs4_schedule_stateid_recovery(server, state);
3468 goto do_state_recovery; 3506 goto wait_on_recovery;
3469 case -NFS4ERR_STALE_STATEID: 3507 case -NFS4ERR_STALE_STATEID:
3470 case -NFS4ERR_STALE_CLIENTID: 3508 case -NFS4ERR_STALE_CLIENTID:
3471 case -NFS4ERR_EXPIRED: 3509 case -NFS4ERR_EXPIRED:
3472 goto do_state_recovery; 3510 nfs4_schedule_lease_recovery(clp);
3511 goto wait_on_recovery;
3473#if defined(CONFIG_NFS_V4_1) 3512#if defined(CONFIG_NFS_V4_1)
3474 case -NFS4ERR_BADSESSION: 3513 case -NFS4ERR_BADSESSION:
3475 case -NFS4ERR_BADSLOT: 3514 case -NFS4ERR_BADSLOT:
@@ -3480,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3480 case -NFS4ERR_SEQ_MISORDERED: 3519 case -NFS4ERR_SEQ_MISORDERED:
3481 dprintk("%s ERROR %d, Reset session\n", __func__, 3520 dprintk("%s ERROR %d, Reset session\n", __func__,
3482 task->tk_status); 3521 task->tk_status);
3483 nfs4_schedule_state_recovery(clp); 3522 nfs4_schedule_session_recovery(clp->cl_session);
3484 task->tk_status = 0; 3523 task->tk_status = 0;
3485 return -EAGAIN; 3524 return -EAGAIN;
3486#endif /* CONFIG_NFS_V4_1 */ 3525#endif /* CONFIG_NFS_V4_1 */
@@ -3497,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3497 } 3536 }
3498 task->tk_status = nfs4_map_errors(task->tk_status); 3537 task->tk_status = nfs4_map_errors(task->tk_status);
3499 return 0; 3538 return 0;
3500do_state_recovery: 3539wait_on_recovery:
3501 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); 3540 rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
3502 nfs4_schedule_state_recovery(clp);
3503 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) 3541 if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0)
3504 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); 3542 rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
3505 task->tk_status = 0; 3543 task->tk_status = 0;
@@ -4110,7 +4148,7 @@ static void nfs4_lock_release(void *calldata)
4110 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, 4148 task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp,
4111 data->arg.lock_seqid); 4149 data->arg.lock_seqid);
4112 if (!IS_ERR(task)) 4150 if (!IS_ERR(task))
4113 rpc_put_task(task); 4151 rpc_put_task_async(task);
4114 dprintk("%s: cancelling lock!\n", __func__); 4152 dprintk("%s: cancelling lock!\n", __func__);
4115 } else 4153 } else
4116 nfs_free_seqid(data->arg.lock_seqid); 4154 nfs_free_seqid(data->arg.lock_seqid);
@@ -4134,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = {
4134 4172
4135static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) 4173static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error)
4136{ 4174{
4137 struct nfs_client *clp = server->nfs_client;
4138 struct nfs4_state *state = lsp->ls_state;
4139
4140 switch (error) { 4175 switch (error) {
4141 case -NFS4ERR_ADMIN_REVOKED: 4176 case -NFS4ERR_ADMIN_REVOKED:
4142 case -NFS4ERR_BAD_STATEID: 4177 case -NFS4ERR_BAD_STATEID:
4143 case -NFS4ERR_EXPIRED: 4178 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4144 if (new_lock_owner != 0 || 4179 if (new_lock_owner != 0 ||
4145 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) 4180 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4146 nfs4_state_mark_reclaim_nograce(clp, state); 4181 nfs4_schedule_stateid_recovery(server, lsp->ls_state);
4147 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4148 break; 4182 break;
4149 case -NFS4ERR_STALE_STATEID: 4183 case -NFS4ERR_STALE_STATEID:
4150 if (new_lock_owner != 0 ||
4151 (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
4152 nfs4_state_mark_reclaim_reboot(clp, state);
4153 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; 4184 lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED;
4185 case -NFS4ERR_EXPIRED:
4186 nfs4_schedule_lease_recovery(server->nfs_client);
4154 }; 4187 };
4155} 4188}
4156 4189
@@ -4366,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4366 case -NFS4ERR_EXPIRED: 4399 case -NFS4ERR_EXPIRED:
4367 case -NFS4ERR_STALE_CLIENTID: 4400 case -NFS4ERR_STALE_CLIENTID:
4368 case -NFS4ERR_STALE_STATEID: 4401 case -NFS4ERR_STALE_STATEID:
4402 nfs4_schedule_lease_recovery(server->nfs_client);
4403 goto out;
4369 case -NFS4ERR_BADSESSION: 4404 case -NFS4ERR_BADSESSION:
4370 case -NFS4ERR_BADSLOT: 4405 case -NFS4ERR_BADSLOT:
4371 case -NFS4ERR_BAD_HIGH_SLOT: 4406 case -NFS4ERR_BAD_HIGH_SLOT:
4372 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4407 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4373 case -NFS4ERR_DEADSESSION: 4408 case -NFS4ERR_DEADSESSION:
4374 nfs4_schedule_state_recovery(server->nfs_client); 4409 nfs4_schedule_session_recovery(server->nfs_client->cl_session);
4375 goto out; 4410 goto out;
4376 case -ERESTARTSYS: 4411 case -ERESTARTSYS:
4377 /* 4412 /*
@@ -4381,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4381 case -NFS4ERR_ADMIN_REVOKED: 4416 case -NFS4ERR_ADMIN_REVOKED:
4382 case -NFS4ERR_BAD_STATEID: 4417 case -NFS4ERR_BAD_STATEID:
4383 case -NFS4ERR_OPENMODE: 4418 case -NFS4ERR_OPENMODE:
4384 nfs4_state_mark_reclaim_nograce(server->nfs_client, state); 4419 nfs4_schedule_stateid_recovery(server, state);
4385 err = 0; 4420 err = 0;
4386 goto out; 4421 goto out;
4387 case -EKEYEXPIRED: 4422 case -EKEYEXPIRED:
@@ -4988,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp)
4988 int status; 5023 int status;
4989 unsigned *ptr; 5024 unsigned *ptr;
4990 struct nfs4_session *session = clp->cl_session; 5025 struct nfs4_session *session = clp->cl_session;
5026 long timeout = 0;
5027 int err;
4991 5028
4992 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5029 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
4993 5030
4994 status = _nfs4_proc_create_session(clp); 5031 do {
5032 status = _nfs4_proc_create_session(clp);
5033 if (status == -NFS4ERR_DELAY) {
5034 err = nfs4_delay(clp->cl_rpcclient, &timeout);
5035 if (err)
5036 status = err;
5037 }
5038 } while (status == -NFS4ERR_DELAY);
5039
4995 if (status) 5040 if (status)
4996 goto out; 5041 goto out;
4997 5042
@@ -5100,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client
5100 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5145 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5101 return -EAGAIN; 5146 return -EAGAIN;
5102 default: 5147 default:
5103 nfs4_schedule_state_recovery(clp); 5148 nfs4_schedule_lease_recovery(clp);
5104 } 5149 }
5105 return 0; 5150 return 0;
5106} 5151}
@@ -5187,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr
5187 if (IS_ERR(task)) 5232 if (IS_ERR(task))
5188 ret = PTR_ERR(task); 5233 ret = PTR_ERR(task);
5189 else 5234 else
5190 rpc_put_task(task); 5235 rpc_put_task_async(task);
5191 dprintk("<-- %s status=%d\n", __func__, ret); 5236 dprintk("<-- %s status=%d\n", __func__, ret);
5192 return ret; 5237 return ret;
5193} 5238}
@@ -5203,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
5203 goto out; 5248 goto out;
5204 } 5249 }
5205 ret = rpc_wait_for_completion_task(task); 5250 ret = rpc_wait_for_completion_task(task);
5206 if (!ret) 5251 if (!ret) {
5252 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
5253
5254 if (task->tk_status == 0)
5255 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
5207 ret = task->tk_status; 5256 ret = task->tk_status;
5257 }
5208 rpc_put_task(task); 5258 rpc_put_task(task);
5209out: 5259out:
5210 dprintk("<-- %s status=%d\n", __func__, ret); 5260 dprintk("<-- %s status=%d\n", __func__, ret);
@@ -5241,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf
5241 rpc_delay(task, NFS4_POLL_RETRY_MAX); 5291 rpc_delay(task, NFS4_POLL_RETRY_MAX);
5242 return -EAGAIN; 5292 return -EAGAIN;
5243 default: 5293 default:
5244 nfs4_schedule_state_recovery(clp); 5294 nfs4_schedule_lease_recovery(clp);
5245 } 5295 }
5246 return 0; 5296 return 0;
5247} 5297}
@@ -5309,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp)
5309 status = PTR_ERR(task); 5359 status = PTR_ERR(task);
5310 goto out; 5360 goto out;
5311 } 5361 }
5362 status = nfs4_wait_for_completion_rpc_task(task);
5363 if (status == 0)
5364 status = task->tk_status;
5312 rpc_put_task(task); 5365 rpc_put_task(task);
5313 return 0; 5366 return 0;
5314out: 5367out:
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e6742b57a04c..0592288f9f06 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
1007} 1007}
1008 1008
1009/* 1009/*
1010 * Schedule a state recovery attempt 1010 * Schedule a lease recovery attempt
1011 */ 1011 */
1012void nfs4_schedule_state_recovery(struct nfs_client *clp) 1012void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1013{ 1013{
1014 if (!clp) 1014 if (!clp)
1015 return; 1015 return;
@@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp)
1018 nfs4_schedule_state_manager(clp); 1018 nfs4_schedule_state_manager(clp);
1019} 1019}
1020 1020
1021int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) 1021static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state)
1022{ 1022{
1023 1023
1024 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1024 set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st
1032 return 1; 1032 return 1;
1033} 1033}
1034 1034
1035int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) 1035static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state)
1036{ 1036{
1037 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); 1037 set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags);
1038 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); 1038 clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags);
@@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s
1041 return 1; 1041 return 1;
1042} 1042}
1043 1043
1044void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state)
1045{
1046 struct nfs_client *clp = server->nfs_client;
1047
1048 nfs4_state_mark_reclaim_nograce(clp, state);
1049 nfs4_schedule_state_manager(clp);
1050}
1051
1044static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) 1052static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops)
1045{ 1053{
1046 struct inode *inode = state->inode; 1054 struct inode *inode = state->inode;
@@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp)
1436} 1444}
1437 1445
1438#ifdef CONFIG_NFS_V4_1 1446#ifdef CONFIG_NFS_V4_1
1447void nfs4_schedule_session_recovery(struct nfs4_session *session)
1448{
1449 nfs4_schedule_lease_recovery(session->clp);
1450}
1451
1439void nfs41_handle_recall_slot(struct nfs_client *clp) 1452void nfs41_handle_recall_slot(struct nfs_client *clp)
1440{ 1453{
1441 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1454 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1442 nfs4_schedule_state_recovery(clp); 1455 nfs4_schedule_state_manager(clp);
1443} 1456}
1444 1457
1445static void nfs4_reset_all_state(struct nfs_client *clp) 1458static void nfs4_reset_all_state(struct nfs_client *clp)
@@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp)
1447 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1460 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1448 clp->cl_boot_time = CURRENT_TIME; 1461 clp->cl_boot_time = CURRENT_TIME;
1449 nfs4_state_start_reclaim_nograce(clp); 1462 nfs4_state_start_reclaim_nograce(clp);
1450 nfs4_schedule_state_recovery(clp); 1463 nfs4_schedule_state_manager(clp);
1451 } 1464 }
1452} 1465}
1453 1466
@@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
1455{ 1468{
1456 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1469 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1457 nfs4_state_start_reclaim_reboot(clp); 1470 nfs4_state_start_reclaim_reboot(clp);
1458 nfs4_schedule_state_recovery(clp); 1471 nfs4_schedule_state_manager(clp);
1459 } 1472 }
1460} 1473}
1461 1474
@@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1475{ 1488{
1476 nfs_expire_all_delegations(clp); 1489 nfs_expire_all_delegations(clp);
1477 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 1490 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1478 nfs4_schedule_state_recovery(clp); 1491 nfs4_schedule_state_manager(clp);
1479} 1492}
1480 1493
1481void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1494void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 4e2c168b6ee9..94d50e86a124 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1660 1660
1661 p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); 1661 p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12);
1662 *p++ = cpu_to_be32(OP_CREATE_SESSION); 1662 *p++ = cpu_to_be32(OP_CREATE_SESSION);
1663 p = xdr_encode_hyper(p, clp->cl_ex_clid); 1663 p = xdr_encode_hyper(p, clp->cl_clientid);
1664 *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ 1664 *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */
1665 *p++ = cpu_to_be32(args->flags); /*flags */ 1665 *p++ = cpu_to_be32(args->flags); /*flags */
1666 1666
@@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr,
4694 p = xdr_inline_decode(xdr, 8); 4694 p = xdr_inline_decode(xdr, 8);
4695 if (unlikely(!p)) 4695 if (unlikely(!p))
4696 goto out_overflow; 4696 goto out_overflow;
4697 xdr_decode_hyper(p, &clp->cl_ex_clid); 4697 xdr_decode_hyper(p, &clp->cl_clientid);
4698 p = xdr_inline_decode(xdr, 12); 4698 p = xdr_inline_decode(xdr, 12);
4699 if (unlikely(!p)) 4699 if (unlikely(!p))
4700 goto out_overflow; 4700 goto out_overflow;
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c
index 903908a20023..c541093a5bf2 100644
--- a/fs/nfs/nfsroot.c
+++ b/fs/nfs/nfsroot.c
@@ -86,11 +86,14 @@
86/* Default path we try to mount. "%s" gets replaced by our IP address */ 86/* Default path we try to mount. "%s" gets replaced by our IP address */
87#define NFS_ROOT "/tftpboot/%s" 87#define NFS_ROOT "/tftpboot/%s"
88 88
89/* Default NFSROOT mount options. */
90#define NFS_DEF_OPTIONS "udp"
91
89/* Parameters passed from the kernel command line */ 92/* Parameters passed from the kernel command line */
90static char nfs_root_parms[256] __initdata = ""; 93static char nfs_root_parms[256] __initdata = "";
91 94
92/* Text-based mount options passed to super.c */ 95/* Text-based mount options passed to super.c */
93static char nfs_root_options[256] __initdata = ""; 96static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS;
94 97
95/* Address of NFS server */ 98/* Address of NFS server */
96static __be32 servaddr __initdata = htonl(INADDR_NONE); 99static __be32 servaddr __initdata = htonl(INADDR_NONE);
@@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src,
160} 163}
161 164
162static int __init root_nfs_cat(char *dest, const char *src, 165static int __init root_nfs_cat(char *dest, const char *src,
163 const size_t destlen) 166 const size_t destlen)
164{ 167{
168 size_t len = strlen(dest);
169
170 if (len && dest[len - 1] != ',')
171 if (strlcat(dest, ",", destlen) > destlen)
172 return -1;
173
165 if (strlcat(dest, src, destlen) > destlen) 174 if (strlcat(dest, src, destlen) > destlen)
166 return -1; 175 return -1;
167 return 0; 176 return 0;
@@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
194 if (root_nfs_cat(nfs_root_options, incoming, 203 if (root_nfs_cat(nfs_root_options, incoming,
195 sizeof(nfs_root_options))) 204 sizeof(nfs_root_options)))
196 return -1; 205 return -1;
197
198 /*
199 * Possibly prepare for more options to be appended
200 */
201 if (nfs_root_options[0] != '\0' &&
202 nfs_root_options[strlen(nfs_root_options)] != ',')
203 if (root_nfs_cat(nfs_root_options, ",",
204 sizeof(nfs_root_options)))
205 return -1;
206
207 return 0; 206 return 0;
208} 207}
209 208
@@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath,
217 */ 216 */
218static int __init root_nfs_data(char *cmdline) 217static int __init root_nfs_data(char *cmdline)
219{ 218{
220 char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; 219 char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1];
221 int len, retval = -1; 220 int len, retval = -1;
222 char *tmp = NULL; 221 char *tmp = NULL;
223 const size_t tmplen = sizeof(nfs_export_path); 222 const size_t tmplen = sizeof(nfs_export_path);
@@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline)
244 * Append mandatory options for nfsroot so they override 243 * Append mandatory options for nfsroot so they override
245 * what has come before 244 * what has come before
246 */ 245 */
247 snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4", 246 snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4",
248 &servaddr); 247 &servaddr);
249 if (root_nfs_cat(nfs_root_options, addr_option, 248 if (root_nfs_cat(nfs_root_options, mand_options,
250 sizeof(nfs_root_options))) 249 sizeof(nfs_root_options)))
251 goto out_optionstoolong; 250 goto out_optionstoolong;
252 251
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c
index e313a51acdd1..6481d537d69d 100644
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
@@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
180 task_setup_data.rpc_client = NFS_CLIENT(dir); 180 task_setup_data.rpc_client = NFS_CLIENT(dir);
181 task = rpc_run_task(&task_setup_data); 181 task = rpc_run_task(&task_setup_data);
182 if (!IS_ERR(task)) 182 if (!IS_ERR(task))
183 rpc_put_task(task); 183 rpc_put_task_async(task);
184 return 1; 184 return 1;
185} 185}
186 186
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c8278f4046cb..42b92d7a9cc4 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head,
1292 task = rpc_run_task(&task_setup_data); 1292 task = rpc_run_task(&task_setup_data);
1293 if (IS_ERR(task)) 1293 if (IS_ERR(task))
1294 return PTR_ERR(task); 1294 return PTR_ERR(task);
1295 if (how & FLUSH_SYNC)
1296 rpc_wait_for_completion_task(task);
1295 rpc_put_task(task); 1297 rpc_put_task(task);
1296 return 0; 1298 return 0;
1297} 1299}
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 3be975e18919..02eb4edf0ece 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -432,7 +432,7 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr,
432 * If the server returns different values for sessionID, slotID or 432 * If the server returns different values for sessionID, slotID or
433 * sequence number, the server is looney tunes. 433 * sequence number, the server is looney tunes.
434 */ 434 */
435 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4); 435 p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4);
436 if (unlikely(p == NULL)) 436 if (unlikely(p == NULL))
437 goto out_overflow; 437 goto out_overflow;
438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); 438 memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN);
@@ -484,7 +484,7 @@ static int decode_cb_sequence4res(struct xdr_stream *xdr,
484out: 484out:
485 return status; 485 return status;
486out_default: 486out_default:
487 return nfs_cb_stat_to_errno(status); 487 return nfs_cb_stat_to_errno(nfserr);
488} 488}
489 489
490/* 490/*
@@ -564,11 +564,9 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
564 if (unlikely(status)) 564 if (unlikely(status))
565 goto out; 565 goto out;
566 if (unlikely(nfserr != NFS4_OK)) 566 if (unlikely(nfserr != NFS4_OK))
567 goto out_default; 567 status = nfs_cb_stat_to_errno(nfserr);
568out: 568out:
569 return status; 569 return status;
570out_default:
571 return nfs_cb_stat_to_errno(status);
572} 570}
573 571
574/* 572/*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index d98d0213285d..7b566ec14e18 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -230,9 +230,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
230 dp->dl_client = clp; 230 dp->dl_client = clp;
231 get_nfs4_file(fp); 231 get_nfs4_file(fp);
232 dp->dl_file = fp; 232 dp->dl_file = fp;
233 dp->dl_vfs_file = find_readable_file(fp);
234 get_file(dp->dl_vfs_file);
235 dp->dl_flock = NULL;
236 dp->dl_type = type; 233 dp->dl_type = type;
237 dp->dl_stateid.si_boot = boot_time; 234 dp->dl_stateid.si_boot = boot_time;
238 dp->dl_stateid.si_stateownerid = current_delegid++; 235 dp->dl_stateid.si_stateownerid = current_delegid++;
@@ -241,8 +238,6 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
241 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle); 238 fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
242 dp->dl_time = 0; 239 dp->dl_time = 0;
243 atomic_set(&dp->dl_count, 1); 240 atomic_set(&dp->dl_count, 1);
244 list_add(&dp->dl_perfile, &fp->fi_delegations);
245 list_add(&dp->dl_perclnt, &clp->cl_delegations);
246 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc); 241 INIT_WORK(&dp->dl_recall.cb_work, nfsd4_do_callback_rpc);
247 return dp; 242 return dp;
248} 243}
@@ -253,36 +248,30 @@ nfs4_put_delegation(struct nfs4_delegation *dp)
253 if (atomic_dec_and_test(&dp->dl_count)) { 248 if (atomic_dec_and_test(&dp->dl_count)) {
254 dprintk("NFSD: freeing dp %p\n",dp); 249 dprintk("NFSD: freeing dp %p\n",dp);
255 put_nfs4_file(dp->dl_file); 250 put_nfs4_file(dp->dl_file);
256 fput(dp->dl_vfs_file);
257 kmem_cache_free(deleg_slab, dp); 251 kmem_cache_free(deleg_slab, dp);
258 num_delegations--; 252 num_delegations--;
259 } 253 }
260} 254}
261 255
262/* Remove the associated file_lock first, then remove the delegation. 256static void nfs4_put_deleg_lease(struct nfs4_file *fp)
263 * lease_modify() is called to remove the FS_LEASE file_lock from
264 * the i_flock list, eventually calling nfsd's lock_manager
265 * fl_release_callback.
266 */
267static void
268nfs4_close_delegation(struct nfs4_delegation *dp)
269{ 257{
270 dprintk("NFSD: close_delegation dp %p\n",dp); 258 if (atomic_dec_and_test(&fp->fi_delegees)) {
271 /* XXX: do we even need this check?: */ 259 vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
272 if (dp->dl_flock) 260 fp->fi_lease = NULL;
273 vfs_setlease(dp->dl_vfs_file, F_UNLCK, &dp->dl_flock); 261 fp->fi_deleg_file = NULL;
262 }
274} 263}
275 264
276/* Called under the state lock. */ 265/* Called under the state lock. */
277static void 266static void
278unhash_delegation(struct nfs4_delegation *dp) 267unhash_delegation(struct nfs4_delegation *dp)
279{ 268{
280 list_del_init(&dp->dl_perfile);
281 list_del_init(&dp->dl_perclnt); 269 list_del_init(&dp->dl_perclnt);
282 spin_lock(&recall_lock); 270 spin_lock(&recall_lock);
271 list_del_init(&dp->dl_perfile);
283 list_del_init(&dp->dl_recall_lru); 272 list_del_init(&dp->dl_recall_lru);
284 spin_unlock(&recall_lock); 273 spin_unlock(&recall_lock);
285 nfs4_close_delegation(dp); 274 nfs4_put_deleg_lease(dp->dl_file);
286 nfs4_put_delegation(dp); 275 nfs4_put_delegation(dp);
287} 276}
288 277
@@ -958,8 +947,6 @@ expire_client(struct nfs4_client *clp)
958 spin_lock(&recall_lock); 947 spin_lock(&recall_lock);
959 while (!list_empty(&clp->cl_delegations)) { 948 while (!list_empty(&clp->cl_delegations)) {
960 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt); 949 dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
961 dprintk("NFSD: expire client. dp %p, fp %p\n", dp,
962 dp->dl_flock);
963 list_del_init(&dp->dl_perclnt); 950 list_del_init(&dp->dl_perclnt);
964 list_move(&dp->dl_recall_lru, &reaplist); 951 list_move(&dp->dl_recall_lru, &reaplist);
965 } 952 }
@@ -2078,6 +2065,7 @@ alloc_init_file(struct inode *ino)
2078 fp->fi_inode = igrab(ino); 2065 fp->fi_inode = igrab(ino);
2079 fp->fi_id = current_fileid++; 2066 fp->fi_id = current_fileid++;
2080 fp->fi_had_conflict = false; 2067 fp->fi_had_conflict = false;
2068 fp->fi_lease = NULL;
2081 memset(fp->fi_fds, 0, sizeof(fp->fi_fds)); 2069 memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
2082 memset(fp->fi_access, 0, sizeof(fp->fi_access)); 2070 memset(fp->fi_access, 0, sizeof(fp->fi_access));
2083 spin_lock(&recall_lock); 2071 spin_lock(&recall_lock);
@@ -2329,23 +2317,8 @@ nfs4_file_downgrade(struct nfs4_file *fp, unsigned int share_access)
2329 nfs4_file_put_access(fp, O_RDONLY); 2317 nfs4_file_put_access(fp, O_RDONLY);
2330} 2318}
2331 2319
2332/* 2320static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
2333 * Spawn a thread to perform a recall on the delegation represented
2334 * by the lease (file_lock)
2335 *
2336 * Called from break_lease() with lock_flocks() held.
2337 * Note: we assume break_lease will only call this *once* for any given
2338 * lease.
2339 */
2340static
2341void nfsd_break_deleg_cb(struct file_lock *fl)
2342{ 2321{
2343 struct nfs4_delegation *dp = (struct nfs4_delegation *)fl->fl_owner;
2344
2345 dprintk("NFSD nfsd_break_deleg_cb: dp %p fl %p\n",dp,fl);
2346 if (!dp)
2347 return;
2348
2349 /* We're assuming the state code never drops its reference 2322 /* We're assuming the state code never drops its reference
2350 * without first removing the lease. Since we're in this lease 2323 * without first removing the lease. Since we're in this lease
2351 * callback (and since the lease code is serialized by the kernel 2324 * callback (and since the lease code is serialized by the kernel
@@ -2353,22 +2326,35 @@ void nfsd_break_deleg_cb(struct file_lock *fl)
2353 * it's safe to take a reference: */ 2326 * it's safe to take a reference: */
2354 atomic_inc(&dp->dl_count); 2327 atomic_inc(&dp->dl_count);
2355 2328
2356 spin_lock(&recall_lock);
2357 list_add_tail(&dp->dl_recall_lru, &del_recall_lru); 2329 list_add_tail(&dp->dl_recall_lru, &del_recall_lru);
2358 spin_unlock(&recall_lock);
2359 2330
2360 /* only place dl_time is set. protected by lock_flocks*/ 2331 /* only place dl_time is set. protected by lock_flocks*/
2361 dp->dl_time = get_seconds(); 2332 dp->dl_time = get_seconds();
2362 2333
2334 nfsd4_cb_recall(dp);
2335}
2336
2337/* Called from break_lease() with lock_flocks() held. */
2338static void nfsd_break_deleg_cb(struct file_lock *fl)
2339{
2340 struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
2341 struct nfs4_delegation *dp;
2342
2343 BUG_ON(!fp);
2344 /* We assume break_lease is only called once per lease: */
2345 BUG_ON(fp->fi_had_conflict);
2363 /* 2346 /*
2364 * We don't want the locks code to timeout the lease for us; 2347 * We don't want the locks code to timeout the lease for us;
2365 * we'll remove it ourself if the delegation isn't returned 2348 * we'll remove it ourself if a delegation isn't returned
2366 * in time. 2349 * in time:
2367 */ 2350 */
2368 fl->fl_break_time = 0; 2351 fl->fl_break_time = 0;
2369 2352
2370 dp->dl_file->fi_had_conflict = true; 2353 spin_lock(&recall_lock);
2371 nfsd4_cb_recall(dp); 2354 fp->fi_had_conflict = true;
2355 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2356 nfsd_break_one_deleg(dp);
2357 spin_unlock(&recall_lock);
2372} 2358}
2373 2359
2374static 2360static
@@ -2461,10 +2447,13 @@ find_delegation_file(struct nfs4_file *fp, stateid_t *stid)
2461{ 2447{
2462 struct nfs4_delegation *dp; 2448 struct nfs4_delegation *dp;
2463 2449
2464 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { 2450 spin_lock(&recall_lock);
2465 if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) 2451 list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
2452 if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) {
2453 spin_unlock(&recall_lock);
2466 return dp; 2454 return dp;
2467 } 2455 }
2456 spin_unlock(&recall_lock);
2468 return NULL; 2457 return NULL;
2469} 2458}
2470 2459
@@ -2641,6 +2630,66 @@ static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
2641 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN; 2630 return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
2642} 2631}
2643 2632
2633static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
2634{
2635 struct file_lock *fl;
2636
2637 fl = locks_alloc_lock();
2638 if (!fl)
2639 return NULL;
2640 locks_init_lock(fl);
2641 fl->fl_lmops = &nfsd_lease_mng_ops;
2642 fl->fl_flags = FL_LEASE;
2643 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2644 fl->fl_end = OFFSET_MAX;
2645 fl->fl_owner = (fl_owner_t)(dp->dl_file);
2646 fl->fl_pid = current->tgid;
2647 return fl;
2648}
2649
2650static int nfs4_setlease(struct nfs4_delegation *dp, int flag)
2651{
2652 struct nfs4_file *fp = dp->dl_file;
2653 struct file_lock *fl;
2654 int status;
2655
2656 fl = nfs4_alloc_init_lease(dp, flag);
2657 if (!fl)
2658 return -ENOMEM;
2659 fl->fl_file = find_readable_file(fp);
2660 list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2661 status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
2662 if (status) {
2663 list_del_init(&dp->dl_perclnt);
2664 locks_free_lock(fl);
2665 return -ENOMEM;
2666 }
2667 fp->fi_lease = fl;
2668 fp->fi_deleg_file = fl->fl_file;
2669 get_file(fp->fi_deleg_file);
2670 atomic_set(&fp->fi_delegees, 1);
2671 list_add(&dp->dl_perfile, &fp->fi_delegations);
2672 return 0;
2673}
2674
2675static int nfs4_set_delegation(struct nfs4_delegation *dp, int flag)
2676{
2677 struct nfs4_file *fp = dp->dl_file;
2678
2679 if (!fp->fi_lease)
2680 return nfs4_setlease(dp, flag);
2681 spin_lock(&recall_lock);
2682 if (fp->fi_had_conflict) {
2683 spin_unlock(&recall_lock);
2684 return -EAGAIN;
2685 }
2686 atomic_inc(&fp->fi_delegees);
2687 list_add(&dp->dl_perfile, &fp->fi_delegations);
2688 spin_unlock(&recall_lock);
2689 list_add(&dp->dl_perclnt, &dp->dl_client->cl_delegations);
2690 return 0;
2691}
2692
2644/* 2693/*
2645 * Attempt to hand out a delegation. 2694 * Attempt to hand out a delegation.
2646 */ 2695 */
@@ -2650,7 +2699,6 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
2650 struct nfs4_delegation *dp; 2699 struct nfs4_delegation *dp;
2651 struct nfs4_stateowner *sop = stp->st_stateowner; 2700 struct nfs4_stateowner *sop = stp->st_stateowner;
2652 int cb_up; 2701 int cb_up;
2653 struct file_lock *fl;
2654 int status, flag = 0; 2702 int status, flag = 0;
2655 2703
2656 cb_up = nfsd4_cb_channel_good(sop->so_client); 2704 cb_up = nfsd4_cb_channel_good(sop->so_client);
@@ -2681,36 +2729,11 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open, struct nfs4_sta
2681 } 2729 }
2682 2730
2683 dp = alloc_init_deleg(sop->so_client, stp, fh, flag); 2731 dp = alloc_init_deleg(sop->so_client, stp, fh, flag);
2684 if (dp == NULL) { 2732 if (dp == NULL)
2685 flag = NFS4_OPEN_DELEGATE_NONE; 2733 goto out_no_deleg;
2686 goto out; 2734 status = nfs4_set_delegation(dp, flag);
2687 } 2735 if (status)
2688 status = -ENOMEM; 2736 goto out_free;
2689 fl = locks_alloc_lock();
2690 if (!fl)
2691 goto out;
2692 locks_init_lock(fl);
2693 fl->fl_lmops = &nfsd_lease_mng_ops;
2694 fl->fl_flags = FL_LEASE;
2695 fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
2696 fl->fl_end = OFFSET_MAX;
2697 fl->fl_owner = (fl_owner_t)dp;
2698 fl->fl_file = find_readable_file(stp->st_file);
2699 BUG_ON(!fl->fl_file);
2700 fl->fl_pid = current->tgid;
2701 dp->dl_flock = fl;
2702
2703 /* vfs_setlease checks to see if delegation should be handed out.
2704 * the lock_manager callback fl_change is used
2705 */
2706 if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
2707 dprintk("NFSD: setlease failed [%d], no delegation\n", status);
2708 dp->dl_flock = NULL;
2709 locks_free_lock(fl);
2710 unhash_delegation(dp);
2711 flag = NFS4_OPEN_DELEGATE_NONE;
2712 goto out;
2713 }
2714 2737
2715 memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid)); 2738 memcpy(&open->op_delegate_stateid, &dp->dl_stateid, sizeof(dp->dl_stateid));
2716 2739
@@ -2722,6 +2745,12 @@ out:
2722 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) 2745 && open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE)
2723 dprintk("NFSD: WARNING: refusing delegation reclaim\n"); 2746 dprintk("NFSD: WARNING: refusing delegation reclaim\n");
2724 open->op_delegate_type = flag; 2747 open->op_delegate_type = flag;
2748 return;
2749out_free:
2750 nfs4_put_delegation(dp);
2751out_no_deleg:
2752 flag = NFS4_OPEN_DELEGATE_NONE;
2753 goto out;
2725} 2754}
2726 2755
2727/* 2756/*
@@ -2916,8 +2945,6 @@ nfs4_laundromat(void)
2916 test_val = u; 2945 test_val = u;
2917 break; 2946 break;
2918 } 2947 }
2919 dprintk("NFSD: purging unused delegation dp %p, fp %p\n",
2920 dp, dp->dl_flock);
2921 list_move(&dp->dl_recall_lru, &reaplist); 2948 list_move(&dp->dl_recall_lru, &reaplist);
2922 } 2949 }
2923 spin_unlock(&recall_lock); 2950 spin_unlock(&recall_lock);
@@ -3128,7 +3155,7 @@ nfs4_preprocess_stateid_op(struct nfsd4_compound_state *cstate,
3128 goto out; 3155 goto out;
3129 renew_client(dp->dl_client); 3156 renew_client(dp->dl_client);
3130 if (filpp) { 3157 if (filpp) {
3131 *filpp = find_readable_file(dp->dl_file); 3158 *filpp = dp->dl_file->fi_deleg_file;
3132 BUG_ON(!*filpp); 3159 BUG_ON(!*filpp);
3133 } 3160 }
3134 } else { /* open or lock stateid */ 3161 } else { /* open or lock stateid */
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 956629b9cdc9..615f0a9f0600 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
317 READ_BUF(dummy32); 317 READ_BUF(dummy32);
318 len += (XDR_QUADLEN(dummy32) << 2); 318 len += (XDR_QUADLEN(dummy32) << 2);
319 READMEM(buf, dummy32); 319 READMEM(buf, dummy32);
320 if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) 320 if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid)))
321 goto out_nfserr; 321 return status;
322 iattr->ia_valid |= ATTR_UID; 322 iattr->ia_valid |= ATTR_UID;
323 } 323 }
324 if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { 324 if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) {
@@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval,
328 READ_BUF(dummy32); 328 READ_BUF(dummy32);
329 len += (XDR_QUADLEN(dummy32) << 2); 329 len += (XDR_QUADLEN(dummy32) << 2);
330 READMEM(buf, dummy32); 330 READMEM(buf, dummy32);
331 if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) 331 if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid)))
332 goto out_nfserr; 332 return status;
333 iattr->ia_valid |= ATTR_GID; 333 iattr->ia_valid |= ATTR_GID;
334 } 334 }
335 if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { 335 if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) {
@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1142 1142
1143 u32 dummy; 1143 u32 dummy;
1144 char *machine_name; 1144 char *machine_name;
1145 int i; 1145 int i, j;
1146 int nr_secflavs; 1146 int nr_secflavs;
1147 1147
1148 READ_BUF(16); 1148 READ_BUF(16);
@@ -1215,7 +1215,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp,
1215 READ_BUF(4); 1215 READ_BUF(4);
1216 READ32(dummy); 1216 READ32(dummy);
1217 READ_BUF(dummy * 4); 1217 READ_BUF(dummy * 4);
1218 for (i = 0; i < dummy; ++i) 1218 for (j = 0; j < dummy; ++j)
1219 READ32(dummy); 1219 READ32(dummy);
1220 break; 1220 break;
1221 case RPC_AUTH_GSS: 1221 case RPC_AUTH_GSS:
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 3074656ba7bf..2d31224b07bf 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -83,8 +83,6 @@ struct nfs4_delegation {
83 atomic_t dl_count; /* ref count */ 83 atomic_t dl_count; /* ref count */
84 struct nfs4_client *dl_client; 84 struct nfs4_client *dl_client;
85 struct nfs4_file *dl_file; 85 struct nfs4_file *dl_file;
86 struct file *dl_vfs_file;
87 struct file_lock *dl_flock;
88 u32 dl_type; 86 u32 dl_type;
89 time_t dl_time; 87 time_t dl_time;
90/* For recall: */ 88/* For recall: */
@@ -379,6 +377,9 @@ struct nfs4_file {
379 */ 377 */
380 atomic_t fi_readers; 378 atomic_t fi_readers;
381 atomic_t fi_writers; 379 atomic_t fi_writers;
380 struct file *fi_deleg_file;
381 struct file_lock *fi_lease;
382 atomic_t fi_delegees;
382 struct inode *fi_inode; 383 struct inode *fi_inode;
383 u32 fi_id; /* used with stateowner->so_id 384 u32 fi_id; /* used with stateowner->so_id
384 * for stateid_hashtbl hash */ 385 * for stateid_hashtbl hash */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 641117f2188d..da1d9701f8e4 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -808,7 +808,7 @@ nfsd_get_raparms(dev_t dev, ino_t ino)
808 if (ra->p_count == 0) 808 if (ra->p_count == 0)
809 frap = rap; 809 frap = rap;
810 } 810 }
811 depth = nfsdstats.ra_size*11/10; 811 depth = nfsdstats.ra_size;
812 if (!frap) { 812 if (!frap) {
813 spin_unlock(&rab->pb_lock); 813 spin_unlock(&rab->pb_lock);
814 return NULL; 814 return NULL;
@@ -1744,6 +1744,13 @@ nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
1744 host_err = nfsd_break_lease(odentry->d_inode); 1744 host_err = nfsd_break_lease(odentry->d_inode);
1745 if (host_err) 1745 if (host_err)
1746 goto out_drop_write; 1746 goto out_drop_write;
1747 if (ndentry->d_inode) {
1748 host_err = nfsd_break_lease(ndentry->d_inode);
1749 if (host_err)
1750 goto out_drop_write;
1751 }
1752 if (host_err)
1753 goto out_drop_write;
1747 host_err = vfs_rename(fdir, odentry, tdir, ndentry); 1754 host_err = vfs_rename(fdir, odentry, tdir, ndentry);
1748 if (!host_err) { 1755 if (!host_err) {
1749 host_err = commit_metadata(tfhp); 1756 host_err = commit_metadata(tfhp);
@@ -1812,22 +1819,22 @@ nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
1812 1819
1813 host_err = mnt_want_write(fhp->fh_export->ex_path.mnt); 1820 host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
1814 if (host_err) 1821 if (host_err)
1815 goto out_nfserr; 1822 goto out_put;
1816 1823
1817 host_err = nfsd_break_lease(rdentry->d_inode); 1824 host_err = nfsd_break_lease(rdentry->d_inode);
1818 if (host_err) 1825 if (host_err)
1819 goto out_put; 1826 goto out_drop_write;
1820 if (type != S_IFDIR) 1827 if (type != S_IFDIR)
1821 host_err = vfs_unlink(dirp, rdentry); 1828 host_err = vfs_unlink(dirp, rdentry);
1822 else 1829 else
1823 host_err = vfs_rmdir(dirp, rdentry); 1830 host_err = vfs_rmdir(dirp, rdentry);
1824out_put:
1825 dput(rdentry);
1826
1827 if (!host_err) 1831 if (!host_err)
1828 host_err = commit_metadata(fhp); 1832 host_err = commit_metadata(fhp);
1829 1833out_drop_write:
1830 mnt_drop_write(fhp->fh_export->ex_path.mnt); 1834 mnt_drop_write(fhp->fh_export->ex_path.mnt);
1835out_put:
1836 dput(rdentry);
1837
1831out_nfserr: 1838out_nfserr:
1832 err = nfserrno(host_err); 1839 err = nfserrno(host_err);
1833out: 1840out:
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c
index 388e9e8f5286..85f7baa15f5d 100644
--- a/fs/nilfs2/btnode.c
+++ b/fs/nilfs2/btnode.c
@@ -35,11 +35,6 @@
35#include "btnode.h" 35#include "btnode.h"
36 36
37 37
38void nilfs_btnode_cache_init_once(struct address_space *btnc)
39{
40 nilfs_mapping_init_once(btnc);
41}
42
43static const struct address_space_operations def_btnode_aops = { 38static const struct address_space_operations def_btnode_aops = {
44 .sync_page = block_sync_page, 39 .sync_page = block_sync_page,
45}; 40};
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h
index 79037494f1e0..1b8ebd888c28 100644
--- a/fs/nilfs2/btnode.h
+++ b/fs/nilfs2/btnode.h
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt {
37 struct buffer_head *newbh; 37 struct buffer_head *newbh;
38}; 38};
39 39
40void nilfs_btnode_cache_init_once(struct address_space *);
41void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); 40void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *);
42void nilfs_btnode_cache_clear(struct address_space *); 41void nilfs_btnode_cache_clear(struct address_space *);
43struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, 42struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc,
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 6a0e2a189f60..a0babd2bff6a 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode,
454 struct backing_dev_info *bdi = inode->i_sb->s_bdi; 454 struct backing_dev_info *bdi = inode->i_sb->s_bdi;
455 455
456 INIT_LIST_HEAD(&shadow->frozen_buffers); 456 INIT_LIST_HEAD(&shadow->frozen_buffers);
457 nilfs_mapping_init_once(&shadow->frozen_data); 457 address_space_init_once(&shadow->frozen_data);
458 nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); 458 nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops);
459 nilfs_mapping_init_once(&shadow->frozen_btnodes); 459 address_space_init_once(&shadow->frozen_btnodes);
460 nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); 460 nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops);
461 mi->mi_shadow = shadow; 461 mi->mi_shadow = shadow;
462 return 0; 462 return 0;
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 98034271cd02..161791d26458 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); 397 new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page);
398 if (!new_de) 398 if (!new_de)
399 goto out_dir; 399 goto out_dir;
400 inc_nlink(old_inode);
401 nilfs_set_link(new_dir, new_de, new_page, old_inode); 400 nilfs_set_link(new_dir, new_de, new_page, old_inode);
402 nilfs_mark_inode_dirty(new_dir); 401 nilfs_mark_inode_dirty(new_dir);
403 new_inode->i_ctime = CURRENT_TIME; 402 new_inode->i_ctime = CURRENT_TIME;
@@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
411 if (new_dir->i_nlink >= NILFS_LINK_MAX) 410 if (new_dir->i_nlink >= NILFS_LINK_MAX)
412 goto out_dir; 411 goto out_dir;
413 } 412 }
414 inc_nlink(old_inode);
415 err = nilfs_add_link(new_dentry, old_inode); 413 err = nilfs_add_link(new_dentry, old_inode);
416 if (err) { 414 if (err)
417 drop_nlink(old_inode);
418 nilfs_mark_inode_dirty(old_inode);
419 goto out_dir; 415 goto out_dir;
420 }
421 if (dir_de) { 416 if (dir_de) {
422 inc_nlink(new_dir); 417 inc_nlink(new_dir);
423 nilfs_mark_inode_dirty(new_dir); 418 nilfs_mark_inode_dirty(new_dir);
@@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry,
431 old_inode->i_ctime = CURRENT_TIME; 426 old_inode->i_ctime = CURRENT_TIME;
432 427
433 nilfs_delete_entry(old_de, old_page); 428 nilfs_delete_entry(old_de, old_page);
434 drop_nlink(old_inode);
435 429
436 if (dir_de) { 430 if (dir_de) {
437 nilfs_set_link(old_inode, dir_de, dir_page, new_dir); 431 nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 0c432416cfef..a585b35fd6bc 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page,
492 return nc; 492 return nc;
493} 493}
494 494
495void nilfs_mapping_init_once(struct address_space *mapping)
496{
497 memset(mapping, 0, sizeof(*mapping));
498 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
499 spin_lock_init(&mapping->tree_lock);
500 INIT_LIST_HEAD(&mapping->private_list);
501 spin_lock_init(&mapping->private_lock);
502
503 spin_lock_init(&mapping->i_mmap_lock);
504 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
505 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
506}
507
508void nilfs_mapping_init(struct address_space *mapping, 495void nilfs_mapping_init(struct address_space *mapping,
509 struct backing_dev_info *bdi, 496 struct backing_dev_info *bdi,
510 const struct address_space_operations *aops) 497 const struct address_space_operations *aops)
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h
index 622df27cd891..2a00953ebd5f 100644
--- a/fs/nilfs2/page.h
+++ b/fs/nilfs2/page.h
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *);
61int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); 61int nilfs_copy_dirty_pages(struct address_space *, struct address_space *);
62void nilfs_copy_back_pages(struct address_space *, struct address_space *); 62void nilfs_copy_back_pages(struct address_space *, struct address_space *);
63void nilfs_clear_dirty_pages(struct address_space *); 63void nilfs_clear_dirty_pages(struct address_space *);
64void nilfs_mapping_init_once(struct address_space *mapping);
65void nilfs_mapping_init(struct address_space *mapping, 64void nilfs_mapping_init(struct address_space *mapping,
66 struct backing_dev_info *bdi, 65 struct backing_dev_info *bdi,
67 const struct address_space_operations *aops); 66 const struct address_space_operations *aops);
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 55ebae5c7f39..2de9f636792a 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci,
430 nilfs_segctor_map_segsum_entry( 430 nilfs_segctor_map_segsum_entry(
431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); 431 sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo));
432 432
433 if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) 433 if (NILFS_I(inode)->i_root &&
434 !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags))
434 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); 435 set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags);
435 /* skip finfo */ 436 /* skip finfo */
436} 437}
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 58fd707174e1..1673b3d99842 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj)
1279#ifdef CONFIG_NILFS_XATTR 1279#ifdef CONFIG_NILFS_XATTR
1280 init_rwsem(&ii->xattr_sem); 1280 init_rwsem(&ii->xattr_sem);
1281#endif 1281#endif
1282 nilfs_btnode_cache_init_once(&ii->i_btnode_cache); 1282 address_space_init_once(&ii->i_btnode_cache);
1283 ii->i_bmap = &ii->i_bmap_data; 1283 ii->i_bmap = &ii->i_bmap_data;
1284 inode_init_once(&ii->vfs_inode); 1284 inode_init_once(&ii->vfs_inode);
1285} 1285}
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index 6d80ecc7834f..7eb90403fc8a 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -56,7 +56,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry,
56 int ret = 0; /* if all else fails, just return false */ 56 int ret = 0; /* if all else fails, just return false */
57 struct ocfs2_super *osb; 57 struct ocfs2_super *osb;
58 58
59 if (nd->flags & LOOKUP_RCU) 59 if (nd && nd->flags & LOOKUP_RCU)
60 return -ECHILD; 60 return -ECHILD;
61 61
62 inode = dentry->d_inode; 62 inode = dentry->d_inode;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 43e56b97f9c0..6180da1e37e6 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb)
405 ocfs2_quota_trans_credits(sb); 405 ocfs2_quota_trans_credits(sb);
406} 406}
407 407
408/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + 408/* data block for new dir/symlink, allocation of directory block, dx_root
409 * bitmap block for the new bit) dx_root update for free list */ 409 * update for free list */
410#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1) 410#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
411 411
412static inline int ocfs2_add_dir_index_credits(struct super_block *sb) 412static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
413{ 413{
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index b5f9160e93e9..19ebc5aad391 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3228 u32 num_clusters, unsigned int e_flags) 3228 u32 num_clusters, unsigned int e_flags)
3229{ 3229{
3230 int ret, delete, index, credits = 0; 3230 int ret, delete, index, credits = 0;
3231 u32 new_bit, new_len; 3231 u32 new_bit, new_len, orig_num_clusters;
3232 unsigned int set_len; 3232 unsigned int set_len;
3233 struct ocfs2_super *osb = OCFS2_SB(sb); 3233 struct ocfs2_super *osb = OCFS2_SB(sb);
3234 handle_t *handle; 3234 handle_t *handle;
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3261 goto out; 3261 goto out;
3262 } 3262 }
3263 3263
3264 orig_num_clusters = num_clusters;
3265
3264 while (num_clusters) { 3266 while (num_clusters) {
3265 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, 3267 ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
3266 p_cluster, num_clusters, 3268 p_cluster, num_clusters,
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
3348 * in write-back mode. 3350 * in write-back mode.
3349 */ 3351 */
3350 if (context->get_clusters == ocfs2_di_get_clusters) { 3352 if (context->get_clusters == ocfs2_di_get_clusters) {
3351 ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); 3353 ret = ocfs2_cow_sync_writeback(sb, context, cpos,
3354 orig_num_clusters);
3352 if (ret) 3355 if (ret)
3353 mlog_errno(ret); 3356 mlog_errno(ret);
3354 } 3357 }
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 38f986d2447e..36c423fb0635 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb,
1316 struct mount_options *mopt, 1316 struct mount_options *mopt,
1317 int is_remount) 1317 int is_remount)
1318{ 1318{
1319 int status; 1319 int status, user_stack = 0;
1320 char *p; 1320 char *p;
1321 u32 tmp; 1321 u32 tmp;
1322 1322
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb,
1459 memcpy(mopt->cluster_stack, args[0].from, 1459 memcpy(mopt->cluster_stack, args[0].from,
1460 OCFS2_STACK_LABEL_LEN); 1460 OCFS2_STACK_LABEL_LEN);
1461 mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; 1461 mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
1462 /*
1463 * Open code the memcmp here as we don't have
1464 * an osb to pass to
1465 * ocfs2_userspace_stack().
1466 */
1467 if (memcmp(mopt->cluster_stack,
1468 OCFS2_CLASSIC_CLUSTER_STACK,
1469 OCFS2_STACK_LABEL_LEN))
1470 user_stack = 1;
1462 break; 1471 break;
1463 case Opt_inode64: 1472 case Opt_inode64:
1464 mopt->mount_opt |= OCFS2_MOUNT_INODE64; 1473 mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb,
1514 } 1523 }
1515 } 1524 }
1516 1525
1517 /* Ensure only one heartbeat mode */ 1526 if (user_stack == 0) {
1518 tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | 1527 /* Ensure only one heartbeat mode */
1519 OCFS2_MOUNT_HB_NONE); 1528 tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
1520 if (hweight32(tmp) != 1) { 1529 OCFS2_MOUNT_HB_GLOBAL |
1521 mlog(ML_ERROR, "Invalid heartbeat mount options\n"); 1530 OCFS2_MOUNT_HB_NONE);
1522 status = 0; 1531 if (hweight32(tmp) != 1) {
1523 goto bail; 1532 mlog(ML_ERROR, "Invalid heartbeat mount options\n");
1533 status = 0;
1534 goto bail;
1535 }
1524 } 1536 }
1525 1537
1526 status = 1; 1538 status = 1;
diff --git a/fs/open.c b/fs/open.c
index 5a2c6ebc22b5..b47aab39c057 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -233,6 +233,14 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
233 233
234 if (!(file->f_mode & FMODE_WRITE)) 234 if (!(file->f_mode & FMODE_WRITE))
235 return -EBADF; 235 return -EBADF;
236
237 /* It's not possible punch hole on append only file */
238 if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode))
239 return -EPERM;
240
241 if (IS_IMMUTABLE(inode))
242 return -EPERM;
243
236 /* 244 /*
237 * Revalidate the write permissions, in case security policy has 245 * Revalidate the write permissions, in case security policy has
238 * changed since the files were opened. 246 * changed since the files were opened.
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
index 789c625c7aa5..b10e3540d5b7 100644
--- a/fs/partitions/ldm.c
+++ b/fs/partitions/ldm.c
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
251 } 251 }
252 252
253 vm->vblk_size = get_unaligned_be32(data + 0x08); 253 vm->vblk_size = get_unaligned_be32(data + 0x08);
254 if (vm->vblk_size == 0) {
255 ldm_error ("Illegal VBLK size");
256 return false;
257 }
258
254 vm->vblk_offset = get_unaligned_be32(data + 0x0C); 259 vm->vblk_offset = get_unaligned_be32(data + 0x0C);
255 vm->last_vblk_seq = get_unaligned_be32(data + 0x04); 260 vm->last_vblk_seq = get_unaligned_be32(data + 0x04);
256 261
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
index 68d6a216ee79..11f688bd76c5 100644
--- a/fs/partitions/mac.c
+++ b/fs/partitions/mac.c
@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len)
29 29
30int mac_partition(struct parsed_partitions *state) 30int mac_partition(struct parsed_partitions *state)
31{ 31{
32 int slot = 1;
33 Sector sect; 32 Sector sect;
34 unsigned char *data; 33 unsigned char *data;
35 int blk, blocks_in_map; 34 int slot, blocks_in_map;
36 unsigned secsize; 35 unsigned secsize;
37#ifdef CONFIG_PPC_PMAC 36#ifdef CONFIG_PPC_PMAC
38 int found_root = 0; 37 int found_root = 0;
@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state)
59 put_dev_sector(sect); 58 put_dev_sector(sect);
60 return 0; /* not a MacOS disk */ 59 return 0; /* not a MacOS disk */
61 } 60 }
62 strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
63 blocks_in_map = be32_to_cpu(part->map_count); 61 blocks_in_map = be32_to_cpu(part->map_count);
64 for (blk = 1; blk <= blocks_in_map; ++blk) { 62 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
65 int pos = blk * secsize; 63 put_dev_sector(sect);
64 return 0;
65 }
66 strlcat(state->pp_buf, " [mac]", PAGE_SIZE);
67 for (slot = 1; slot <= blocks_in_map; ++slot) {
68 int pos = slot * secsize;
66 put_dev_sector(sect); 69 put_dev_sector(sect);
67 data = read_part_sector(state, pos/512, &sect); 70 data = read_part_sector(state, pos/512, &sect);
68 if (!data) 71 if (!data)
@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state)
113 } 116 }
114 117
115 if (goodness > found_root_goodness) { 118 if (goodness > found_root_goodness) {
116 found_root = blk; 119 found_root = slot;
117 found_root_goodness = goodness; 120 found_root_goodness = goodness;
118 } 121 }
119 } 122 }
120#endif /* CONFIG_PPC_PMAC */ 123#endif /* CONFIG_PPC_PMAC */
121
122 ++slot;
123 } 124 }
124#ifdef CONFIG_PPC_PMAC 125#ifdef CONFIG_PPC_PMAC
125 if (found_root_goodness) 126 if (found_root_goodness)
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c
index 48cec7cbca17..be03a0b08b47 100644
--- a/fs/partitions/osf.c
+++ b/fs/partitions/osf.c
@@ -10,10 +10,13 @@
10#include "check.h" 10#include "check.h"
11#include "osf.h" 11#include "osf.h"
12 12
13#define MAX_OSF_PARTITIONS 8
14
13int osf_partition(struct parsed_partitions *state) 15int osf_partition(struct parsed_partitions *state)
14{ 16{
15 int i; 17 int i;
16 int slot = 1; 18 int slot = 1;
19 unsigned int npartitions;
17 Sector sect; 20 Sector sect;
18 unsigned char *data; 21 unsigned char *data;
19 struct disklabel { 22 struct disklabel {
@@ -45,7 +48,7 @@ int osf_partition(struct parsed_partitions *state)
45 u8 p_fstype; 48 u8 p_fstype;
46 u8 p_frag; 49 u8 p_frag;
47 __le16 p_cpg; 50 __le16 p_cpg;
48 } d_partitions[8]; 51 } d_partitions[MAX_OSF_PARTITIONS];
49 } * label; 52 } * label;
50 struct d_partition * partition; 53 struct d_partition * partition;
51 54
@@ -63,7 +66,12 @@ int osf_partition(struct parsed_partitions *state)
63 put_dev_sector(sect); 66 put_dev_sector(sect);
64 return 0; 67 return 0;
65 } 68 }
66 for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) { 69 npartitions = le16_to_cpu(label->d_npartitions);
70 if (npartitions > MAX_OSF_PARTITIONS) {
71 put_dev_sector(sect);
72 return 0;
73 }
74 for (i = 0 ; i < npartitions; i++, partition++) {
67 if (slot == state->limit) 75 if (slot == state->limit)
68 break; 76 break;
69 if (le32_to_cpu(partition->p_size)) 77 if (le32_to_cpu(partition->p_size))
diff --git a/fs/proc/array.c b/fs/proc/array.c
index df2b703b9d0f..7c99c1cf7e5c 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -353,9 +353,6 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
353 task_cap(m, task); 353 task_cap(m, task);
354 task_cpus_allowed(m, task); 354 task_cpus_allowed(m, task);
355 cpuset_task_status_allowed(m, task); 355 cpuset_task_status_allowed(m, task);
356#if defined(CONFIG_S390)
357 task_show_regs(m, task);
358#endif
359 task_context_switch_counts(m, task); 356 task_context_switch_counts(m, task);
360 return 0; 357 return 0;
361} 358}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 9d096e82b201..d49c4b5d2c3e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2620,35 +2620,6 @@ static const struct pid_entry proc_base_stuff[] = {
2620 &proc_self_inode_operations, NULL, {}), 2620 &proc_self_inode_operations, NULL, {}),
2621}; 2621};
2622 2622
2623/*
2624 * Exceptional case: normally we are not allowed to unhash a busy
2625 * directory. In this case, however, we can do it - no aliasing problems
2626 * due to the way we treat inodes.
2627 */
2628static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd)
2629{
2630 struct inode *inode;
2631 struct task_struct *task;
2632
2633 if (nd->flags & LOOKUP_RCU)
2634 return -ECHILD;
2635
2636 inode = dentry->d_inode;
2637 task = get_proc_task(inode);
2638 if (task) {
2639 put_task_struct(task);
2640 return 1;
2641 }
2642 d_drop(dentry);
2643 return 0;
2644}
2645
2646static const struct dentry_operations proc_base_dentry_operations =
2647{
2648 .d_revalidate = proc_base_revalidate,
2649 .d_delete = pid_delete_dentry,
2650};
2651
2652static struct dentry *proc_base_instantiate(struct inode *dir, 2623static struct dentry *proc_base_instantiate(struct inode *dir,
2653 struct dentry *dentry, struct task_struct *task, const void *ptr) 2624 struct dentry *dentry, struct task_struct *task, const void *ptr)
2654{ 2625{
@@ -2685,7 +2656,6 @@ static struct dentry *proc_base_instantiate(struct inode *dir,
2685 if (p->fop) 2656 if (p->fop)
2686 inode->i_fop = p->fop; 2657 inode->i_fop = p->fop;
2687 ei->op = p->op; 2658 ei->op = p->op;
2688 d_set_d_op(dentry, &proc_base_dentry_operations);
2689 d_add(dentry, inode); 2659 d_add(dentry, inode);
2690 error = NULL; 2660 error = NULL;
2691out: 2661out:
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 176ce4cda68a..d6a7ca1fdac5 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -27,6 +27,7 @@
27static void proc_evict_inode(struct inode *inode) 27static void proc_evict_inode(struct inode *inode)
28{ 28{
29 struct proc_dir_entry *de; 29 struct proc_dir_entry *de;
30 struct ctl_table_header *head;
30 31
31 truncate_inode_pages(&inode->i_data, 0); 32 truncate_inode_pages(&inode->i_data, 0);
32 end_writeback(inode); 33 end_writeback(inode);
@@ -38,8 +39,11 @@ static void proc_evict_inode(struct inode *inode)
38 de = PROC_I(inode)->pde; 39 de = PROC_I(inode)->pde;
39 if (de) 40 if (de)
40 pde_put(de); 41 pde_put(de);
41 if (PROC_I(inode)->sysctl) 42 head = PROC_I(inode)->sysctl;
42 sysctl_head_put(PROC_I(inode)->sysctl); 43 if (head) {
44 rcu_assign_pointer(PROC_I(inode)->sysctl, NULL);
45 sysctl_head_put(head);
46 }
43} 47}
44 48
45struct vfsmount *proc_mnt; 49struct vfsmount *proc_mnt;
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c
index d9396a4fc7ff..927cbd115e53 100644
--- a/fs/proc/proc_devtree.c
+++ b/fs/proc/proc_devtree.c
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void)
233 return; 233 return;
234 root = of_find_node_by_path("/"); 234 root = of_find_node_by_path("/");
235 if (root == NULL) { 235 if (root == NULL) {
236 printk(KERN_ERR "/proc/device-tree: can't find root\n"); 236 pr_debug("/proc/device-tree: can't find root\n");
237 return; 237 return;
238 } 238 }
239 proc_device_tree_add_node(root, proc_device_tree); 239 proc_device_tree_add_node(root, proc_device_tree);
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 09a1f92a34ef..8eb2522111c5 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -408,15 +408,18 @@ static int proc_sys_compare(const struct dentry *parent,
408 const struct dentry *dentry, const struct inode *inode, 408 const struct dentry *dentry, const struct inode *inode,
409 unsigned int len, const char *str, const struct qstr *name) 409 unsigned int len, const char *str, const struct qstr *name)
410{ 410{
411 struct ctl_table_header *head;
411 /* Although proc doesn't have negative dentries, rcu-walk means 412 /* Although proc doesn't have negative dentries, rcu-walk means
412 * that inode here can be NULL */ 413 * that inode here can be NULL */
414 /* AV: can it, indeed? */
413 if (!inode) 415 if (!inode)
414 return 0; 416 return 1;
415 if (name->len != len) 417 if (name->len != len)
416 return 1; 418 return 1;
417 if (memcmp(name->name, str, len)) 419 if (memcmp(name->name, str, len))
418 return 1; 420 return 1;
419 return !sysctl_is_seen(PROC_I(inode)->sysctl); 421 head = rcu_dereference(PROC_I(inode)->sysctl);
422 return !head || !sysctl_is_seen(head);
420} 423}
421 424
422static const struct dentry_operations proc_sys_dentry_operations = { 425static const struct dentry_operations proc_sys_dentry_operations = {
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
index ba5f51ec3458..68fdf45cc6c9 100644
--- a/fs/reiserfs/namei.c
+++ b/fs/reiserfs/namei.c
@@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, 771 EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
772 dentry, inode, &security); 772 dentry, inode, &security);
773 if (retval) { 773 if (retval) {
774 dir->i_nlink--; 774 DEC_DIR_INODE_NLINK(dir)
775 goto out_failed; 775 goto out_failed;
776 } 776 }
777 777
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c
index 3cfb2e933644..5c11ca82b782 100644
--- a/fs/reiserfs/xattr.c
+++ b/fs/reiserfs/xattr.c
@@ -978,8 +978,6 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags)
978 978
979static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) 979static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd)
980{ 980{
981 if (nd->flags & LOOKUP_RCU)
982 return -ECHILD;
983 return -EPERM; 981 return -EPERM;
984} 982}
985 983
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c
index b427b1208c26..e474fbcf8bde 100644
--- a/fs/sysv/namei.c
+++ b/fs/sysv/namei.c
@@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
245 new_de = sysv_find_entry(new_dentry, &new_page); 245 new_de = sysv_find_entry(new_dentry, &new_page);
246 if (!new_de) 246 if (!new_de)
247 goto out_dir; 247 goto out_dir;
248 inode_inc_link_count(old_inode);
249 sysv_set_link(new_de, new_page, old_inode); 248 sysv_set_link(new_de, new_page, old_inode);
250 new_inode->i_ctime = CURRENT_TIME_SEC; 249 new_inode->i_ctime = CURRENT_TIME_SEC;
251 if (dir_de) 250 if (dir_de)
@@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry,
257 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) 256 if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max)
258 goto out_dir; 257 goto out_dir;
259 } 258 }
260 inode_inc_link_count(old_inode);
261 err = sysv_add_link(new_dentry, old_inode); 259 err = sysv_add_link(new_dentry, old_inode);
262 if (err) { 260 if (err)
263 inode_dec_link_count(old_inode);
264 goto out_dir; 261 goto out_dir;
265 }
266 if (dir_de) 262 if (dir_de)
267 inode_inc_link_count(new_dir); 263 inode_inc_link_count(new_dir);
268 } 264 }
269 265
270 sysv_delete_entry(old_de, old_page); 266 sysv_delete_entry(old_de, old_page);
271 inode_dec_link_count(old_inode); 267 mark_inode_dirty(old_inode);
272 268
273 if (dir_de) { 269 if (dir_de) {
274 sysv_set_link(dir_de, dir_page, new_dir); 270 sysv_set_link(dir_de, dir_page, new_dir);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 2be0f9eb86d2..b7c338d5e9df 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -32,6 +32,8 @@
32#include <linux/crc-itu-t.h> 32#include <linux/crc-itu-t.h>
33#include <linux/exportfs.h> 33#include <linux/exportfs.h>
34 34
35enum { UDF_MAX_LINKS = 0xffff };
36
35static inline int udf_match(int len1, const unsigned char *name1, int len2, 37static inline int udf_match(int len1, const unsigned char *name1, int len2,
36 const unsigned char *name2) 38 const unsigned char *name2)
37{ 39{
@@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode)
650 struct udf_inode_info *iinfo; 652 struct udf_inode_info *iinfo;
651 653
652 err = -EMLINK; 654 err = -EMLINK;
653 if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) 655 if (dir->i_nlink >= UDF_MAX_LINKS)
654 goto out; 656 goto out;
655 657
656 err = -EIO; 658 err = -EIO;
@@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir,
1034 struct fileIdentDesc cfi, *fi; 1036 struct fileIdentDesc cfi, *fi;
1035 int err; 1037 int err;
1036 1038
1037 if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { 1039 if (inode->i_nlink >= UDF_MAX_LINKS)
1038 return -EMLINK; 1040 return -EMLINK;
1039 }
1040 1041
1041 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); 1042 fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
1042 if (!fi) { 1043 if (!fi) {
@@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
1131 goto end_rename; 1132 goto end_rename;
1132 1133
1133 retval = -EMLINK; 1134 retval = -EMLINK;
1134 if (!new_inode && 1135 if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS)
1135 new_dir->i_nlink >=
1136 (256 << sizeof(new_dir->i_nlink)) - 1)
1137 goto end_rename; 1136 goto end_rename;
1138 } 1137 }
1139 if (!nfi) { 1138 if (!nfi) {
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 12f39b9e4437..d6f681535eb8 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); 306 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
307 if (!new_de) 307 if (!new_de)
308 goto out_dir; 308 goto out_dir;
309 inode_inc_link_count(old_inode);
310 ufs_set_link(new_dir, new_de, new_page, old_inode); 309 ufs_set_link(new_dir, new_de, new_page, old_inode);
311 new_inode->i_ctime = CURRENT_TIME_SEC; 310 new_inode->i_ctime = CURRENT_TIME_SEC;
312 if (dir_de) 311 if (dir_de)
@@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
318 if (new_dir->i_nlink >= UFS_LINK_MAX) 317 if (new_dir->i_nlink >= UFS_LINK_MAX)
319 goto out_dir; 318 goto out_dir;
320 } 319 }
321 inode_inc_link_count(old_inode);
322 err = ufs_add_link(new_dentry, old_inode); 320 err = ufs_add_link(new_dentry, old_inode);
323 if (err) { 321 if (err)
324 inode_dec_link_count(old_inode);
325 goto out_dir; 322 goto out_dir;
326 }
327 if (dir_de) 323 if (dir_de)
328 inode_inc_link_count(new_dir); 324 inode_inc_link_count(new_dir);
329 } 325 }
@@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
331 /* 327 /*
332 * Like most other Unix systems, set the ctime for inodes on a 328 * Like most other Unix systems, set the ctime for inodes on a
333 * rename. 329 * rename.
334 * inode_dec_link_count() will mark the inode dirty.
335 */ 330 */
336 old_inode->i_ctime = CURRENT_TIME_SEC; 331 old_inode->i_ctime = CURRENT_TIME_SEC;
337 332
338 ufs_delete_entry(old_dir, old_de, old_page); 333 ufs_delete_entry(old_dir, old_de, old_page);
339 inode_dec_link_count(old_inode); 334 mark_inode_dirty(old_inode);
340 335
341 if (dir_de) { 336 if (dir_de) {
342 ufs_set_link(old_inode, dir_de, dir_page, new_dir); 337 ufs_set_link(old_inode, dir_de, dir_page, new_dir);
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c
index 05201ae719e5..d61611c88012 100644
--- a/fs/xfs/linux-2.6/xfs_discard.c
+++ b/fs/xfs/linux-2.6/xfs_discard.c
@@ -152,6 +152,8 @@ xfs_ioc_trim(
152 152
153 if (!capable(CAP_SYS_ADMIN)) 153 if (!capable(CAP_SYS_ADMIN))
154 return -XFS_ERROR(EPERM); 154 return -XFS_ERROR(EPERM);
155 if (!blk_queue_discard(q))
156 return -XFS_ERROR(EOPNOTSUPP);
155 if (copy_from_user(&range, urange, sizeof(range))) 157 if (copy_from_user(&range, urange, sizeof(range)))
156 return -XFS_ERROR(EFAULT); 158 return -XFS_ERROR(EFAULT);
157 159
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index f5e2a19e0f8e..0ca0e3c024d7 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1(
695 xfs_mount_t *mp, 695 xfs_mount_t *mp,
696 void __user *arg) 696 void __user *arg)
697{ 697{
698 xfs_fsop_geom_v1_t fsgeo; 698 xfs_fsop_geom_t fsgeo;
699 int error; 699 int error;
700 700
701 error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); 701 error = xfs_fs_geometry(mp, &fsgeo, 3);
702 if (error) 702 if (error)
703 return -error; 703 return -error;
704 704
705 if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) 705 /*
706 * Caller should have passed an argument of type
707 * xfs_fsop_geom_v1_t. This is a proper subset of the
708 * xfs_fsop_geom_t that xfs_fs_geometry() fills in.
709 */
710 if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t)))
706 return -XFS_ERROR(EFAULT); 711 return -XFS_ERROR(EFAULT);
707 return 0; 712 return 0;
708} 713}
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index cec89dd5d7d2..85668efb3e3e 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -53,6 +53,9 @@ xfs_fs_geometry(
53 xfs_fsop_geom_t *geo, 53 xfs_fsop_geom_t *geo,
54 int new_version) 54 int new_version)
55{ 55{
56
57 memset(geo, 0, sizeof(*geo));
58
56 geo->blocksize = mp->m_sb.sb_blocksize; 59 geo->blocksize = mp->m_sb.sb_blocksize;
57 geo->rtextsize = mp->m_sb.sb_rextsize; 60 geo->rtextsize = mp->m_sb.sb_rextsize;
58 geo->agblocks = mp->m_sb.sb_agblocks; 61 geo->agblocks = mp->m_sb.sb_agblocks;
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 31b6188df221..b4bfe338ea0e 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -4,6 +4,8 @@
4#ifndef __ASSEMBLY__ 4#ifndef __ASSEMBLY__
5#ifdef CONFIG_MMU 5#ifdef CONFIG_MMU
6 6
7#include <linux/mm_types.h>
8
7#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 9#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
8extern int ptep_set_access_flags(struct vm_area_struct *vma, 10extern int ptep_set_access_flags(struct vm_area_struct *vma,
9 unsigned long address, pte_t *ptep, 11 unsigned long address, pte_t *ptep,
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index fe29aadb129d..348843b80150 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1101,7 +1101,7 @@ struct drm_device {
1101 struct platform_device *platformdev; /**< Platform device struture */ 1101 struct platform_device *platformdev; /**< Platform device struture */
1102 1102
1103 struct drm_sg_mem *sg; /**< Scatter gather memory */ 1103 struct drm_sg_mem *sg; /**< Scatter gather memory */
1104 int num_crtcs; /**< Number of CRTCs on this device */ 1104 unsigned int num_crtcs; /**< Number of CRTCs on this device */
1105 void *dev_private; /**< device private data */ 1105 void *dev_private; /**< device private data */
1106 void *mm_private; 1106 void *mm_private;
1107 struct address_space *dev_mapping; 1107 struct address_space *dev_mapping;
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h
index 5cb86c307f5d..fc4875433817 100644
--- a/include/keys/rxrpc-type.h
+++ b/include/keys/rxrpc-type.h
@@ -99,7 +99,6 @@ struct rxrpc_key_token {
99 * structure of raw payloads passed to add_key() or instantiate key 99 * structure of raw payloads passed to add_key() or instantiate key
100 */ 100 */
101struct rxrpc_key_data_v1 { 101struct rxrpc_key_data_v1 {
102 u32 kif_version; /* 1 */
103 u16 security_index; 102 u16 security_index;
104 u16 ticket_length; 103 u16 ticket_length;
105 u32 expiry; /* time_t */ 104 u32 expiry; /* time_t */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d18ff34670a..d5063e1b5555 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q);
699extern void blk_stop_queue(struct request_queue *q); 699extern void blk_stop_queue(struct request_queue *q);
700extern void blk_sync_queue(struct request_queue *q); 700extern void blk_sync_queue(struct request_queue *q);
701extern void __blk_stop_queue(struct request_queue *q); 701extern void __blk_stop_queue(struct request_queue *q);
702extern void __blk_run_queue(struct request_queue *); 702extern void __blk_run_queue(struct request_queue *q, bool force_kblockd);
703extern void blk_run_queue(struct request_queue *); 703extern void blk_run_queue(struct request_queue *);
704extern int blk_rq_map_user(struct request_queue *, struct request *, 704extern int blk_rq_map_user(struct request_queue *, struct request *,
705 struct rq_map_data *, void __user *, unsigned long, 705 struct rq_map_data *, void __user *, unsigned long,
@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p)
1088 1088
1089struct work_struct; 1089struct work_struct;
1090int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1090int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1091int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1092 1091
1093#ifdef CONFIG_BLK_CGROUP 1092#ifdef CONFIG_BLK_CGROUP
1094/* 1093/*
@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1136extern int blk_throtl_init(struct request_queue *q); 1135extern int blk_throtl_init(struct request_queue *q);
1137extern void blk_throtl_exit(struct request_queue *q); 1136extern void blk_throtl_exit(struct request_queue *q);
1138extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1137extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1139extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1140extern void throtl_shutdown_timer_wq(struct request_queue *q); 1138extern void throtl_shutdown_timer_wq(struct request_queue *q);
1141#else /* CONFIG_BLK_DEV_THROTTLING */ 1139#else /* CONFIG_BLK_DEV_THROTTLING */
1142static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) 1140static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1146 1144
1147static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1145static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1148static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1146static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1149static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1150static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} 1147static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1151#endif /* CONFIG_BLK_DEV_THROTTLING */ 1148#endif /* CONFIG_BLK_DEV_THROTTLING */
1152 1149
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3395cf7130f5..b22fb0d3db0f 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq)
245 245
246extern void blk_dump_cmd(char *buf, struct request *rq); 246extern void blk_dump_cmd(char *buf, struct request *rq);
247extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); 247extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
248extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
249 248
250#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ 249#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
251 250
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index c3011beac30d..31d91a64838b 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -123,6 +123,7 @@ struct ceph_msg_pos {
123#define SOCK_CLOSED 11 /* socket state changed to closed */ 123#define SOCK_CLOSED 11 /* socket state changed to closed */
124#define OPENING 13 /* open connection w/ (possibly new) peer */ 124#define OPENING 13 /* open connection w/ (possibly new) peer */
125#define DEAD 14 /* dead, about to kfree */ 125#define DEAD 14 /* dead, about to kfree */
126#define BACKOFF 15
126 127
127/* 128/*
128 * A single connection with another host. 129 * A single connection with another host.
@@ -160,7 +161,6 @@ struct ceph_connection {
160 struct list_head out_queue; 161 struct list_head out_queue;
161 struct list_head out_sent; /* sending or sent but unacked */ 162 struct list_head out_sent; /* sending or sent but unacked */
162 u64 out_seq; /* last message queued for send */ 163 u64 out_seq; /* last message queued for send */
163 bool out_keepalive_pending;
164 164
165 u64 in_seq, in_seq_acked; /* last message received, acked */ 165 u64 in_seq, in_seq_acked; /* last message received, acked */
166 166
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index 68cd248f6d3e..66900e3c6eb1 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -101,8 +101,8 @@ struct ieee_pfc {
101 */ 101 */
102struct dcb_app { 102struct dcb_app {
103 __u8 selector; 103 __u8 selector;
104 __u32 protocol;
105 __u8 priority; 104 __u8 priority;
105 __u16 protocol;
106}; 106};
107 107
108struct dcbmsg { 108struct dcbmsg {
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index da7e52b099f3..1effc8b56b4e 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -109,7 +109,7 @@ static inline void freezer_count(void)
109} 109}
110 110
111/* 111/*
112 * Check if the task should be counted as freezeable by the freezer 112 * Check if the task should be counted as freezable by the freezer
113 */ 113 */
114static inline int freezer_should_skip(struct task_struct *p) 114static inline int freezer_should_skip(struct task_struct *p)
115{ 115{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index bd3215940c37..e38b50a4b9d2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -649,6 +649,7 @@ struct address_space {
649 spinlock_t private_lock; /* for use by the address_space */ 649 spinlock_t private_lock; /* for use by the address_space */
650 struct list_head private_list; /* ditto */ 650 struct list_head private_list; /* ditto */
651 struct address_space *assoc_mapping; /* ditto */ 651 struct address_space *assoc_mapping; /* ditto */
652 struct mutex unmap_mutex; /* to protect unmapping */
652} __attribute__((aligned(sizeof(long)))); 653} __attribute__((aligned(sizeof(long))));
653 /* 654 /*
654 * On most architectures that alignment is already the case; but 655 * On most architectures that alignment is already the case; but
@@ -2139,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk,
2139 struct block_device *bdev); 2140 struct block_device *bdev);
2140extern int revalidate_disk(struct gendisk *); 2141extern int revalidate_disk(struct gendisk *);
2141extern int check_disk_change(struct block_device *); 2142extern int check_disk_change(struct block_device *);
2142extern int __invalidate_device(struct block_device *); 2143extern int __invalidate_device(struct block_device *, bool);
2143extern int invalidate_partition(struct gendisk *, int); 2144extern int invalidate_partition(struct gendisk *, int);
2144#endif 2145#endif
2145unsigned long invalidate_mapping_pages(struct address_space *mapping, 2146unsigned long invalidate_mapping_pages(struct address_space *mapping,
@@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin);
2225 2226
2226extern int inode_init_always(struct super_block *, struct inode *); 2227extern int inode_init_always(struct super_block *, struct inode *);
2227extern void inode_init_once(struct inode *); 2228extern void inode_init_once(struct inode *);
2229extern void address_space_init_once(struct address_space *mapping);
2228extern void ihold(struct inode * inode); 2230extern void ihold(struct inode * inode);
2229extern void iput(struct inode *); 2231extern void iput(struct inode *);
2230extern struct inode * igrab(struct inode *); 2232extern struct inode * igrab(struct inode *);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0b84c61607e8..dca31761b311 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -332,16 +332,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order)
332 return alloc_pages_current(gfp_mask, order); 332 return alloc_pages_current(gfp_mask, order);
333} 333}
334extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, 334extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order,
335 struct vm_area_struct *vma, unsigned long addr); 335 struct vm_area_struct *vma, unsigned long addr,
336 int node);
336#else 337#else
337#define alloc_pages(gfp_mask, order) \ 338#define alloc_pages(gfp_mask, order) \
338 alloc_pages_node(numa_node_id(), gfp_mask, order) 339 alloc_pages_node(numa_node_id(), gfp_mask, order)
339#define alloc_pages_vma(gfp_mask, order, vma, addr) \ 340#define alloc_pages_vma(gfp_mask, order, vma, addr, node) \
340 alloc_pages(gfp_mask, order) 341 alloc_pages(gfp_mask, order)
341#endif 342#endif
342#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) 343#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
343#define alloc_page_vma(gfp_mask, vma, addr) \ 344#define alloc_page_vma(gfp_mask, vma, addr) \
344 alloc_pages_vma(gfp_mask, 0, vma, addr) 345 alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id())
346#define alloc_page_vma_node(gfp_mask, vma, addr, node) \
347 alloc_pages_vma(gfp_mask, 0, vma, addr, node)
345 348
346extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); 349extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order);
347extern unsigned long get_zeroed_page(gfp_t gfp_mask); 350extern unsigned long get_zeroed_page(gfp_t gfp_mask);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 8e6c8c42bc3c..df29c8fde36b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -57,7 +57,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
57 (transparent_hugepage_flags & \ 57 (transparent_hugepage_flags & \
58 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \ 58 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG) && \
59 ((__vma)->vm_flags & VM_HUGEPAGE))) && \ 59 ((__vma)->vm_flags & VM_HUGEPAGE))) && \
60 !((__vma)->vm_flags & VM_NOHUGEPAGE)) 60 !((__vma)->vm_flags & VM_NOHUGEPAGE) && \
61 !is_vma_temporary_stack(__vma))
61#define transparent_hugepage_defrag(__vma) \ 62#define transparent_hugepage_defrag(__vma) \
62 ((transparent_hugepage_flags & \ 63 ((transparent_hugepage_flags & \
63 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \ 64 (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)) || \
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 697474691749..fe7c4b9ae270 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -4,8 +4,8 @@
4#include <linux/types.h> 4#include <linux/types.h>
5#include <linux/input.h> 5#include <linux/input.h>
6 6
7#define MATRIX_MAX_ROWS 16 7#define MATRIX_MAX_ROWS 32
8#define MATRIX_MAX_COLS 16 8#define MATRIX_MAX_COLS 32
9 9
10#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\ 10#define KEY(row, col, val) ((((row) & (MATRIX_MAX_ROWS - 1)) << 24) |\
11 (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\ 11 (((col) & (MATRIX_MAX_COLS - 1)) << 16) |\
diff --git a/include/linux/list.h b/include/linux/list.h
index 9a5f8a71810c..3a54266a1e85 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next)
96 * in an undefined state. 96 * in an undefined state.
97 */ 97 */
98#ifndef CONFIG_DEBUG_LIST 98#ifndef CONFIG_DEBUG_LIST
99static inline void __list_del_entry(struct list_head *entry)
100{
101 __list_del(entry->prev, entry->next);
102}
103
99static inline void list_del(struct list_head *entry) 104static inline void list_del(struct list_head *entry)
100{ 105{
101 __list_del(entry->prev, entry->next); 106 __list_del(entry->prev, entry->next);
@@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry)
103 entry->prev = LIST_POISON2; 108 entry->prev = LIST_POISON2;
104} 109}
105#else 110#else
111extern void __list_del_entry(struct list_head *entry);
106extern void list_del(struct list_head *entry); 112extern void list_del(struct list_head *entry);
107#endif 113#endif
108 114
@@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old,
135 */ 141 */
136static inline void list_del_init(struct list_head *entry) 142static inline void list_del_init(struct list_head *entry)
137{ 143{
138 __list_del(entry->prev, entry->next); 144 __list_del_entry(entry);
139 INIT_LIST_HEAD(entry); 145 INIT_LIST_HEAD(entry);
140} 146}
141 147
@@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry)
146 */ 152 */
147static inline void list_move(struct list_head *list, struct list_head *head) 153static inline void list_move(struct list_head *list, struct list_head *head)
148{ 154{
149 __list_del(list->prev, list->next); 155 __list_del_entry(list);
150 list_add(list, head); 156 list_add(list, head);
151} 157}
152 158
@@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head)
158static inline void list_move_tail(struct list_head *list, 164static inline void list_move_tail(struct list_head *list,
159 struct list_head *head) 165 struct list_head *head)
160{ 166{
161 __list_del(list->prev, list->next); 167 __list_del_entry(list);
162 list_add_tail(list, head); 168 list_add_tail(list, head);
163} 169}
164 170
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 3fd36845ca45..ef4f0b6083a3 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -71,6 +71,7 @@ struct wm8994 {
71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; 71 u16 irq_masks_cache[WM8994_NUM_IRQ_REGS];
72 72
73 /* Used over suspend/resume */ 73 /* Used over suspend/resume */
74 bool suspended;
74 u16 ldo_regs[WM8994_NUM_LDO_REGS]; 75 u16 ldo_regs[WM8994_NUM_LDO_REGS];
75 u16 gpio_regs[WM8994_NUM_GPIO_REGS]; 76 u16 gpio_regs[WM8994_NUM_GPIO_REGS];
76 77
diff --git a/include/linux/module.h b/include/linux/module.h
index 9bdf27c7615b..5de42043dff0 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -62,7 +62,7 @@ struct module_version_attribute {
62 struct module_attribute mattr; 62 struct module_attribute mattr;
63 const char *module_name; 63 const char *module_name;
64 const char *version; 64 const char *version;
65}; 65} __attribute__ ((__aligned__(sizeof(void *))));
66 66
67struct module_kobject 67struct module_kobject
68{ 68{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d971346b0340..71caf7a5e6c6 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2392,6 +2392,9 @@ extern int netdev_notice(const struct net_device *dev, const char *format, ...)
2392extern int netdev_info(const struct net_device *dev, const char *format, ...) 2392extern int netdev_info(const struct net_device *dev, const char *format, ...)
2393 __attribute__ ((format (printf, 2, 3))); 2393 __attribute__ ((format (printf, 2, 3)));
2394 2394
2395#define MODULE_ALIAS_NETDEV(device) \
2396 MODULE_ALIAS("netdev-" device)
2397
2395#if defined(DEBUG) 2398#if defined(DEBUG)
2396#define netdev_dbg(__dev, format, args...) \ 2399#define netdev_dbg(__dev, format, args...) \
2397 netdev_printk(KERN_DEBUG, __dev, format, ##args) 2400 netdev_printk(KERN_DEBUG, __dev, format, ##args)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b197563913bf..3e112de12d8d 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -68,11 +68,7 @@ struct nfs_client {
68 unsigned char cl_id_uniquifier; 68 unsigned char cl_id_uniquifier;
69 u32 cl_cb_ident; /* v4.0 callback identifier */ 69 u32 cl_cb_ident; /* v4.0 callback identifier */
70 const struct nfs4_minor_version_ops *cl_mvops; 70 const struct nfs4_minor_version_ops *cl_mvops;
71#endif /* CONFIG_NFS_V4 */
72 71
73#ifdef CONFIG_NFS_V4_1
74 /* clientid returned from EXCHANGE_ID, used by session operations */
75 u64 cl_ex_clid;
76 /* The sequence id to use for the next CREATE_SESSION */ 72 /* The sequence id to use for the next CREATE_SESSION */
77 u32 cl_seqid; 73 u32 cl_seqid;
78 /* The flags used for obtaining the clientid during EXCHANGE_ID */ 74 /* The flags used for obtaining the clientid during EXCHANGE_ID */
@@ -80,7 +76,7 @@ struct nfs_client {
80 struct nfs4_session *cl_session; /* sharred session */ 76 struct nfs4_session *cl_session; /* sharred session */
81 struct list_head cl_layouts; 77 struct list_head cl_layouts;
82 struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ 78 struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */
83#endif /* CONFIG_NFS_V4_1 */ 79#endif /* CONFIG_NFS_V4 */
84 80
85#ifdef CONFIG_NFS_FSCACHE 81#ifdef CONFIG_NFS_FSCACHE
86 struct fscache_cookie *fscache; /* client index cache cookie */ 82 struct fscache_cookie *fscache; /* client index cache cookie */
@@ -185,7 +181,7 @@ struct nfs_server {
185/* maximum number of slots to use */ 181/* maximum number of slots to use */
186#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE 182#define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE
187 183
188#if defined(CONFIG_NFS_V4_1) 184#if defined(CONFIG_NFS_V4)
189 185
190/* Sessions */ 186/* Sessions */
191#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) 187#define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long)))
@@ -225,5 +221,5 @@ struct nfs4_session {
225 struct nfs_client *clp; 221 struct nfs_client *clp;
226}; 222};
227 223
228#endif /* CONFIG_NFS_V4_1 */ 224#endif /* CONFIG_NFS_V4 */
229#endif 225#endif
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
index 32fb81212fd1..1ca64113efe8 100644
--- a/include/linux/oprofile.h
+++ b/include/linux/oprofile.h
@@ -16,6 +16,8 @@
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/spinlock.h> 17#include <linux/spinlock.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/printk.h>
19#include <asm/atomic.h> 21#include <asm/atomic.h>
20 22
21/* Each escaped entry is prefixed by ESCAPE_CODE 23/* Each escaped entry is prefixed by ESCAPE_CODE
@@ -186,10 +188,17 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val);
186int oprofile_add_data64(struct op_entry *entry, u64 val); 188int oprofile_add_data64(struct op_entry *entry, u64 val);
187int oprofile_write_commit(struct op_entry *entry); 189int oprofile_write_commit(struct op_entry *entry);
188 190
189#ifdef CONFIG_PERF_EVENTS 191#ifdef CONFIG_HW_PERF_EVENTS
190int __init oprofile_perf_init(struct oprofile_operations *ops); 192int __init oprofile_perf_init(struct oprofile_operations *ops);
191void oprofile_perf_exit(void); 193void oprofile_perf_exit(void);
192char *op_name_from_perf_id(void); 194char *op_name_from_perf_id(void);
193#endif /* CONFIG_PERF_EVENTS */ 195#else
196static inline int __init oprofile_perf_init(struct oprofile_operations *ops)
197{
198 pr_info("oprofile: hardware counters not available\n");
199 return -ENODEV;
200}
201static inline void oprofile_perf_exit(void) { }
202#endif /* CONFIG_HW_PERF_EVENTS */
194 203
195#endif /* OPROFILE_H */ 204#endif /* OPROFILE_H */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index dd9c7ab38270..21415cc91cbb 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -431,6 +431,8 @@ struct dev_pm_info {
431 struct list_head entry; 431 struct list_head entry;
432 struct completion completion; 432 struct completion completion;
433 struct wakeup_source *wakeup; 433 struct wakeup_source *wakeup;
434#else
435 unsigned int should_wakeup:1;
434#endif 436#endif
435#ifdef CONFIG_PM_RUNTIME 437#ifdef CONFIG_PM_RUNTIME
436 struct timer_list suspend_timer; 438 struct timer_list suspend_timer;
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 9cff00dd6b63..03a67db03d01 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev)
109 return dev->power.can_wakeup; 109 return dev->power.can_wakeup;
110} 110}
111 111
112static inline bool device_may_wakeup(struct device *dev)
113{
114 return false;
115}
116
117static inline struct wakeup_source *wakeup_source_create(const char *name) 112static inline struct wakeup_source *wakeup_source_create(const char *name)
118{ 113{
119 return NULL; 114 return NULL;
@@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {}
134 129
135static inline int device_wakeup_enable(struct device *dev) 130static inline int device_wakeup_enable(struct device *dev)
136{ 131{
137 return -EINVAL; 132 dev->power.should_wakeup = true;
133 return 0;
138} 134}
139 135
140static inline int device_wakeup_disable(struct device *dev) 136static inline int device_wakeup_disable(struct device *dev)
141{ 137{
138 dev->power.should_wakeup = false;
142 return 0; 139 return 0;
143} 140}
144 141
145static inline int device_init_wakeup(struct device *dev, bool val) 142static inline int device_set_wakeup_enable(struct device *dev, bool enable)
146{ 143{
147 dev->power.can_wakeup = val; 144 dev->power.should_wakeup = enable;
148 return val ? -EINVAL : 0; 145 return 0;
149} 146}
150 147
148static inline int device_init_wakeup(struct device *dev, bool val)
149{
150 device_set_wakeup_capable(dev, val);
151 device_set_wakeup_enable(dev, val);
152 return 0;
153}
151 154
152static inline int device_set_wakeup_enable(struct device *dev, bool enable) 155static inline bool device_may_wakeup(struct device *dev)
153{ 156{
154 return -EINVAL; 157 return dev->power.can_wakeup && dev->power.should_wakeup;
155} 158}
156 159
157static inline void __pm_stay_awake(struct wakeup_source *ws) {} 160static inline void __pm_stay_awake(struct wakeup_source *ws) {}
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 092a04f874a8..a1147e5dd245 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -102,11 +102,8 @@
102 102
103extern long arch_ptrace(struct task_struct *child, long request, 103extern long arch_ptrace(struct task_struct *child, long request,
104 unsigned long addr, unsigned long data); 104 unsigned long addr, unsigned long data);
105extern int ptrace_traceme(void);
106extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); 105extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len);
107extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); 106extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len);
108extern int ptrace_attach(struct task_struct *tsk);
109extern int ptrace_detach(struct task_struct *, unsigned int);
110extern void ptrace_disable(struct task_struct *); 107extern void ptrace_disable(struct task_struct *);
111extern int ptrace_check_attach(struct task_struct *task, int kill); 108extern int ptrace_check_attach(struct task_struct *task, int kill);
112extern int ptrace_request(struct task_struct *child, long request, 109extern int ptrace_request(struct task_struct *child, long request,
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h
index d63dcbaea169..9026b30238f3 100644
--- a/include/linux/rio_regs.h
+++ b/include/linux/rio_regs.h
@@ -14,10 +14,12 @@
14#define LINUX_RIO_REGS_H 14#define LINUX_RIO_REGS_H
15 15
16/* 16/*
17 * In RapidIO, each device has a 2MB configuration space that is 17 * In RapidIO, each device has a 16MB configuration space that is
18 * accessed via maintenance transactions. Portions of configuration 18 * accessed via maintenance transactions. Portions of configuration
19 * space are standardized and/or reserved. 19 * space are standardized and/or reserved.
20 */ 20 */
21#define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */
22
21#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ 23#define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */
22#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ 24#define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */
23#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ 25#define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index a0b639f8e805..89c3e5182991 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -203,6 +203,18 @@ struct rtc_device
203 struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ 203 struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */
204 int pie_enabled; 204 int pie_enabled;
205 struct work_struct irqwork; 205 struct work_struct irqwork;
206
207
208#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
209 struct work_struct uie_task;
210 struct timer_list uie_timer;
211 /* Those fields are protected by rtc->irq_lock */
212 unsigned int oldsecs;
213 unsigned int uie_irq_active:1;
214 unsigned int stop_uie_polling:1;
215 unsigned int uie_task_active:1;
216 unsigned int uie_timer_active:1;
217#endif
206}; 218};
207#define to_rtc_device(d) container_of(d, struct rtc_device, dev) 219#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
208 220
@@ -235,7 +247,10 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc,
235 struct rtc_task *task, int freq); 247 struct rtc_task *task, int freq);
236extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); 248extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled);
237extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); 249extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
250extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
251 unsigned int enabled);
238 252
253void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode);
239void rtc_aie_update_irq(void *private); 254void rtc_aie_update_irq(void *private);
240void rtc_uie_update_irq(void *private); 255void rtc_uie_update_irq(void *private);
241enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); 256enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d747f948b34e..777d8a5ed06b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
1744#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ 1744#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
1745#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ 1745#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
1746#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ 1746#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
1747#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ 1747#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
1748#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ 1748#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
1749 1749
1750/* 1750/*
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h
index 88513fd8e208..d81db8012c63 100644
--- a/include/linux/sunrpc/sched.h
+++ b/include/linux/sunrpc/sched.h
@@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
212struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, 212struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req,
213 const struct rpc_call_ops *ops); 213 const struct rpc_call_ops *ops);
214void rpc_put_task(struct rpc_task *); 214void rpc_put_task(struct rpc_task *);
215void rpc_put_task_async(struct rpc_task *);
215void rpc_exit_task(struct rpc_task *); 216void rpc_exit_task(struct rpc_task *);
216void rpc_exit(struct rpc_task *, int); 217void rpc_exit(struct rpc_task *, int);
217void rpc_release_calldata(const struct rpc_call_ops *, void *); 218void rpc_release_calldata(const struct rpc_call_ops *, void *);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 7bb5cb64f3b8..11684d9e6bd2 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -930,6 +930,7 @@ enum
930 930
931#ifdef __KERNEL__ 931#ifdef __KERNEL__
932#include <linux/list.h> 932#include <linux/list.h>
933#include <linux/rcupdate.h>
933 934
934/* For the /proc/sys support */ 935/* For the /proc/sys support */
935struct ctl_table; 936struct ctl_table;
@@ -1037,10 +1038,15 @@ struct ctl_table_root {
1037 struct ctl_table trees. */ 1038 struct ctl_table trees. */
1038struct ctl_table_header 1039struct ctl_table_header
1039{ 1040{
1040 struct ctl_table *ctl_table; 1041 union {
1041 struct list_head ctl_entry; 1042 struct {
1042 int used; 1043 struct ctl_table *ctl_table;
1043 int count; 1044 struct list_head ctl_entry;
1045 int used;
1046 int count;
1047 };
1048 struct rcu_head rcu;
1049 };
1044 struct completion *unregistering; 1050 struct completion *unregistering;
1045 struct ctl_table *ctl_table_arg; 1051 struct ctl_table *ctl_table_arg;
1046 struct ctl_table_root *root; 1052 struct ctl_table_root *root;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 8651556dbd52..d3ec89fb4122 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *);
172struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, 172struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
173 const struct thermal_cooling_device_ops *); 173 const struct thermal_cooling_device_ops *);
174void thermal_cooling_device_unregister(struct thermal_cooling_device *); 174void thermal_cooling_device_unregister(struct thermal_cooling_device *);
175
176#ifdef CONFIG_NET
175extern int generate_netlink_event(u32 orig, enum events event); 177extern int generate_netlink_event(u32 orig, enum events event);
178#else
179static inline int generate_netlink_event(u32 orig, enum events event)
180{
181 return 0;
182}
183#endif
176 184
177#endif /* __THERMAL_H__ */ 185#endif /* __THERMAL_H__ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1ac11586a2f5..f7998a3bf020 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
250enum { 250enum {
251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ 251 WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ 252 WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
253 WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ 253 WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ 254 WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
255 WQ_HIGHPRI = 1 << 4, /* high priority */ 255 WQ_HIGHPRI = 1 << 4, /* high priority */
256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 256 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
318/** 318/**
319 * alloc_ordered_workqueue - allocate an ordered workqueue 319 * alloc_ordered_workqueue - allocate an ordered workqueue
320 * @name: name of the workqueue 320 * @name: name of the workqueue
321 * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) 321 * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
322 * 322 *
323 * Allocate an ordered workqueue. An ordered workqueue executes at 323 * Allocate an ordered workqueue. An ordered workqueue executes at
324 * most one work item at any given time in the queued order. They are 324 * most one work item at any given time in the queued order. They are
@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags)
335 335
336#define create_workqueue(name) \ 336#define create_workqueue(name) \
337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1) 337 alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
338#define create_freezeable_workqueue(name) \ 338#define create_freezable_workqueue(name) \
339 alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 339 alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
340#define create_singlethread_workqueue(name) \ 340#define create_singlethread_workqueue(name) \
341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) 341 alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
342 342
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 4a3cd2cd2f5e..96e50e0ce3ca 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -89,6 +89,18 @@
89#define IPV6_ADDR_SCOPE_GLOBAL 0x0e 89#define IPV6_ADDR_SCOPE_GLOBAL 0x0e
90 90
91/* 91/*
92 * Addr flags
93 */
94#ifdef __KERNEL__
95#define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \
96 ((a)->s6_addr[1] & 0x10)
97#define IPV6_ADDR_MC_FLAG_PREFIX(a) \
98 ((a)->s6_addr[1] & 0x20)
99#define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \
100 ((a)->s6_addr[1] & 0x40)
101#endif
102
103/*
92 * fragmentation header 104 * fragmentation header
93 */ 105 */
94 106
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h
index cd85b3bc8327..e505358d8999 100644
--- a/include/net/netfilter/nf_tproxy_core.h
+++ b/include/net/netfilter/nf_tproxy_core.h
@@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol,
201} 201}
202#endif 202#endif
203 203
204static inline void
205nf_tproxy_put_sock(struct sock *sk)
206{
207 /* TIME_WAIT inet sockets have to be handled differently */
208 if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT))
209 inet_twsk_put(inet_twsk(sk));
210 else
211 sock_put(sk);
212}
213
214/* assign a socket to the skb -- consumes sk */ 204/* assign a socket to the skb -- consumes sk */
215int 205void
216nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); 206nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk);
217 207
218#endif 208#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 160a407c1963..04f8556313d5 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -199,7 +199,7 @@ struct tcf_proto {
199 199
200struct qdisc_skb_cb { 200struct qdisc_skb_cb {
201 unsigned int pkt_len; 201 unsigned int pkt_len;
202 char data[]; 202 long data[];
203}; 203};
204 204
205static inline int qdisc_qlen(struct Qdisc *q) 205static inline int qdisc_qlen(struct Qdisc *q)
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 8479b66c067b..3fd5064dd43a 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev);
261#define CONF_ENABLE_ESR 0x0008 261#define CONF_ENABLE_ESR 0x0008
262#define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ 262#define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ
263 * (CONF_ENABLE_IRQ) in use */ 263 * (CONF_ENABLE_IRQ) in use */
264#define CONF_ENABLE_ZVCARD 0x0020
264 265
265/* flags used by pcmcia_loop_config() autoconfiguration */ 266/* flags used by pcmcia_loop_config() autoconfiguration */
266#define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ 267#define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */
diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h
index b4a0db2307ef..1eeebd534f7e 100644
--- a/include/sound/wm8903.h
+++ b/include/sound/wm8903.h
@@ -17,13 +17,9 @@
17/* 17/*
18 * R6 (0x06) - Mic Bias Control 0 18 * R6 (0x06) - Mic Bias Control 0
19 */ 19 */
20#define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */ 20#define WM8903_MICDET_THR_MASK 0x0030 /* MICDET_THR - [5:4] */
21#define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */ 21#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [5:4] */
22#define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */ 22#define WM8903_MICDET_THR_WIDTH 2 /* MICDET_THR - [5:4] */
23#define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */
24#define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */
25#define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */
26#define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */
27#define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ 23#define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */
28#define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ 24#define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */
29#define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ 25#define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 07fdfb6b9a9a..0828b6c8610a 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -8,7 +8,6 @@
8#include <scsi/scsi_cmnd.h> 8#include <scsi/scsi_cmnd.h>
9#include <net/sock.h> 9#include <net/sock.h>
10#include <net/tcp.h> 10#include <net/tcp.h>
11#include "target_core_mib.h"
12 11
13#define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" 12#define TARGET_CORE_MOD_VERSION "v4.0.0-rc6"
14#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) 13#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
@@ -195,6 +194,21 @@ typedef enum {
195 SAM_TASK_ATTR_EMULATED 194 SAM_TASK_ATTR_EMULATED
196} t10_task_attr_index_t; 195} t10_task_attr_index_t;
197 196
197/*
198 * Used for target SCSI statistics
199 */
200typedef enum {
201 SCSI_INST_INDEX,
202 SCSI_DEVICE_INDEX,
203 SCSI_AUTH_INTR_INDEX,
204 SCSI_INDEX_TYPE_MAX
205} scsi_index_t;
206
207struct scsi_index_table {
208 spinlock_t lock;
209 u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
210} ____cacheline_aligned;
211
198struct se_cmd; 212struct se_cmd;
199 213
200struct t10_alua { 214struct t10_alua {
@@ -578,8 +592,6 @@ struct se_node_acl {
578 spinlock_t stats_lock; 592 spinlock_t stats_lock;
579 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ 593 /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
580 atomic_t acl_pr_ref_count; 594 atomic_t acl_pr_ref_count;
581 /* Used for MIB access */
582 atomic_t mib_ref_count;
583 struct se_dev_entry *device_list; 595 struct se_dev_entry *device_list;
584 struct se_session *nacl_sess; 596 struct se_session *nacl_sess;
585 struct se_portal_group *se_tpg; 597 struct se_portal_group *se_tpg;
@@ -595,8 +607,6 @@ struct se_node_acl {
595} ____cacheline_aligned; 607} ____cacheline_aligned;
596 608
597struct se_session { 609struct se_session {
598 /* Used for MIB access */
599 atomic_t mib_ref_count;
600 u64 sess_bin_isid; 610 u64 sess_bin_isid;
601 struct se_node_acl *se_node_acl; 611 struct se_node_acl *se_node_acl;
602 struct se_portal_group *se_tpg; 612 struct se_portal_group *se_tpg;
@@ -806,7 +816,6 @@ struct se_hba {
806 /* Virtual iSCSI devices attached. */ 816 /* Virtual iSCSI devices attached. */
807 u32 dev_count; 817 u32 dev_count;
808 u32 hba_index; 818 u32 hba_index;
809 atomic_t dev_mib_access_count;
810 atomic_t load_balance_queue; 819 atomic_t load_balance_queue;
811 atomic_t left_queue_depth; 820 atomic_t left_queue_depth;
812 /* Maximum queue depth the HBA can handle. */ 821 /* Maximum queue depth the HBA can handle. */
@@ -845,6 +854,12 @@ struct se_lun {
845 854
846#define SE_LUN(c) ((struct se_lun *)(c)->se_lun) 855#define SE_LUN(c) ((struct se_lun *)(c)->se_lun)
847 856
857struct scsi_port_stats {
858 u64 cmd_pdus;
859 u64 tx_data_octets;
860 u64 rx_data_octets;
861} ____cacheline_aligned;
862
848struct se_port { 863struct se_port {
849 /* RELATIVE TARGET PORT IDENTIFER */ 864 /* RELATIVE TARGET PORT IDENTIFER */
850 u16 sep_rtpi; 865 u16 sep_rtpi;
@@ -867,6 +882,7 @@ struct se_port {
867} ____cacheline_aligned; 882} ____cacheline_aligned;
868 883
869struct se_tpg_np { 884struct se_tpg_np {
885 struct se_portal_group *tpg_np_parent;
870 struct config_group tpg_np_group; 886 struct config_group tpg_np_group;
871} ____cacheline_aligned; 887} ____cacheline_aligned;
872 888
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 66f44e56eb80..2e8ec51f0615 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -111,6 +111,8 @@ struct se_subsystem_api;
111 111
112extern int init_se_global(void); 112extern int init_se_global(void);
113extern void release_se_global(void); 113extern void release_se_global(void);
114extern void init_scsi_index_table(void);
115extern u32 scsi_get_new_index(scsi_index_t);
114extern void transport_init_queue_obj(struct se_queue_obj *); 116extern void transport_init_queue_obj(struct se_queue_obj *);
115extern int transport_subsystem_check_init(void); 117extern int transport_subsystem_check_init(void);
116extern int transport_subsystem_register(struct se_subsystem_api *); 118extern int transport_subsystem_register(struct se_subsystem_api *);
@@ -133,6 +135,8 @@ extern void transport_complete_task(struct se_task *, int);
133extern void transport_add_task_to_execute_queue(struct se_task *, 135extern void transport_add_task_to_execute_queue(struct se_task *,
134 struct se_task *, 136 struct se_task *,
135 struct se_device *); 137 struct se_device *);
138extern void transport_remove_task_from_execute_queue(struct se_task *,
139 struct se_device *);
136unsigned char *transport_dump_cmd_direction(struct se_cmd *); 140unsigned char *transport_dump_cmd_direction(struct se_cmd *);
137extern void transport_dump_dev_state(struct se_device *, char *, int *); 141extern void transport_dump_dev_state(struct se_device *, char *, int *);
138extern void transport_dump_dev_info(struct se_device *, struct se_lun *, 142extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index aba421d68f6f..78f18adb49c8 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
31 0 : blk_rq_sectors(rq); 31 0 : blk_rq_sectors(rq);
32 __entry->errors = rq->errors; 32 __entry->errors = rq->errors;
33 33
34 blk_fill_rwbs_rq(__entry->rwbs, rq); 34 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
35 blk_dump_cmd(__get_str(cmd), rq); 35 blk_dump_cmd(__get_str(cmd), rq);
36 ), 36 ),
37 37
@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq,
118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 118 __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
119 blk_rq_bytes(rq) : 0; 119 blk_rq_bytes(rq) : 0;
120 120
121 blk_fill_rwbs_rq(__entry->rwbs, rq); 121 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
122 blk_dump_cmd(__get_str(cmd), rq); 122 blk_dump_cmd(__get_str(cmd), rq);
123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 123 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
124 ), 124 ),
@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap,
563 __entry->nr_sector = blk_rq_sectors(rq); 563 __entry->nr_sector = blk_rq_sectors(rq);
564 __entry->old_dev = dev; 564 __entry->old_dev = dev;
565 __entry->old_sector = from; 565 __entry->old_sector = from;
566 blk_fill_rwbs_rq(__entry->rwbs, rq); 566 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
567 ), 567 ),
568 568
569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 569 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4349935c2ad8..e92e98189032 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1575 return -ENODEV; 1575 return -ENODEV;
1576 1576
1577 trialcs = alloc_trial_cpuset(cs); 1577 trialcs = alloc_trial_cpuset(cs);
1578 if (!trialcs) 1578 if (!trialcs) {
1579 return -ENOMEM; 1579 retval = -ENOMEM;
1580 goto out;
1581 }
1580 1582
1581 switch (cft->private) { 1583 switch (cft->private) {
1582 case FILE_CPULIST: 1584 case FILE_CPULIST:
@@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft,
1591 } 1593 }
1592 1594
1593 free_trial_cpuset(trialcs); 1595 free_trial_cpuset(trialcs);
1596out:
1594 cgroup_unlock(); 1597 cgroup_unlock();
1595 return retval; 1598 return retval;
1596} 1599}
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4571ae7e085a..99c3bc8a6fb4 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -3,6 +3,12 @@
3 */ 3 */
4#include <linux/irqdesc.h> 4#include <linux/irqdesc.h>
5 5
6#ifdef CONFIG_SPARSE_IRQ
7# define IRQ_BITMAP_BITS (NR_IRQS + 8196)
8#else
9# define IRQ_BITMAP_BITS NR_IRQS
10#endif
11
6extern int noirqdebug; 12extern int noirqdebug;
7 13
8#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) 14#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 282f20230e67..2039bea31bdf 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS;
94EXPORT_SYMBOL_GPL(nr_irqs); 94EXPORT_SYMBOL_GPL(nr_irqs);
95 95
96static DEFINE_MUTEX(sparse_irq_lock); 96static DEFINE_MUTEX(sparse_irq_lock);
97static DECLARE_BITMAP(allocated_irqs, NR_IRQS); 97static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
98 98
99#ifdef CONFIG_SPARSE_IRQ 99#ifdef CONFIG_SPARSE_IRQ
100 100
@@ -217,6 +217,15 @@ int __init early_irq_init(void)
217 initcnt = arch_probe_nr_irqs(); 217 initcnt = arch_probe_nr_irqs();
218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); 218 printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt);
219 219
220 if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
221 nr_irqs = IRQ_BITMAP_BITS;
222
223 if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
224 initcnt = IRQ_BITMAP_BITS;
225
226 if (initcnt > nr_irqs)
227 nr_irqs = initcnt;
228
220 for (i = 0; i < initcnt; i++) { 229 for (i = 0; i < initcnt; i++) {
221 desc = alloc_desc(i, node); 230 desc = alloc_desc(i, node);
222 set_bit(i, allocated_irqs); 231 set_bit(i, allocated_irqs);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0caa59f747dd..9033c1c70828 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1100,7 +1100,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1100 if (retval) 1100 if (retval)
1101 kfree(action); 1101 kfree(action);
1102 1102
1103#ifdef CONFIG_DEBUG_SHIRQ 1103#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1104 if (!retval && (irqflags & IRQF_SHARED)) { 1104 if (!retval && (irqflags & IRQF_SHARED)) {
1105 /* 1105 /*
1106 * It's a shared IRQ -- the driver ought to be prepared for it 1106 * It's a shared IRQ -- the driver ought to be prepared for it
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index 891115a929aa..dc49358b73fa 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -23,7 +23,7 @@
23#ifdef CONFIG_HARDIRQS_SW_RESEND 23#ifdef CONFIG_HARDIRQS_SW_RESEND
24 24
25/* Bitmap to handle software resend of interrupts: */ 25/* Bitmap to handle software resend of interrupts: */
26static DECLARE_BITMAP(irqs_resend, NR_IRQS); 26static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
27 27
28/* 28/*
29 * Run software resends of IRQ's 29 * Run software resends of IRQ's
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 999835b6112b..656222fcf767 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -782,6 +782,10 @@ retry:
782 raw_spin_unlock_irq(&ctx->lock); 782 raw_spin_unlock_irq(&ctx->lock);
783} 783}
784 784
785#define MAX_INTERRUPTS (~0ULL)
786
787static void perf_log_throttle(struct perf_event *event, int enable);
788
785static int 789static int
786event_sched_in(struct perf_event *event, 790event_sched_in(struct perf_event *event,
787 struct perf_cpu_context *cpuctx, 791 struct perf_cpu_context *cpuctx,
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event,
794 798
795 event->state = PERF_EVENT_STATE_ACTIVE; 799 event->state = PERF_EVENT_STATE_ACTIVE;
796 event->oncpu = smp_processor_id(); 800 event->oncpu = smp_processor_id();
801
802 /*
803 * Unthrottle events, since we scheduled we might have missed several
804 * ticks already, also for a heavily scheduling task there is little
805 * guarantee it'll get a tick in a timely manner.
806 */
807 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
808 perf_log_throttle(event, 1);
809 event->hw.interrupts = 0;
810 }
811
797 /* 812 /*
798 * The new state must be visible before we turn it on in the hardware: 813 * The new state must be visible before we turn it on in the hardware:
799 */ 814 */
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task)
1596 } 1611 }
1597} 1612}
1598 1613
1599#define MAX_INTERRUPTS (~0ULL)
1600
1601static void perf_log_throttle(struct perf_event *event, int enable);
1602
1603static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) 1614static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
1604{ 1615{
1605 u64 frequency = event->attr.sample_freq; 1616 u64 frequency = event->attr.sample_freq;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 7b5db6a8561e..701853042c28 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
326 326
327static int __init pm_start_workqueue(void) 327static int __init pm_start_workqueue(void)
328{ 328{
329 pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); 329 pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
330 330
331 return pm_wq ? 0 : -ENOMEM; 331 return pm_wq ? 0 : -ENOMEM;
332} 332}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index d6d2a10320e0..0cf3a27a6c9d 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -22,7 +22,7 @@
22 */ 22 */
23#define TIMEOUT (20 * HZ) 23#define TIMEOUT (20 * HZ)
24 24
25static inline int freezeable(struct task_struct * p) 25static inline int freezable(struct task_struct * p)
26{ 26{
27 if ((p == current) || 27 if ((p == current) ||
28 (p->flags & PF_NOFREEZE) || 28 (p->flags & PF_NOFREEZE) ||
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only)
53 todo = 0; 53 todo = 0;
54 read_lock(&tasklist_lock); 54 read_lock(&tasklist_lock);
55 do_each_thread(g, p) { 55 do_each_thread(g, p) {
56 if (frozen(p) || !freezeable(p)) 56 if (frozen(p) || !freezable(p))
57 continue; 57 continue;
58 58
59 if (!freeze_task(p, sig_only)) 59 if (!freeze_task(p, sig_only))
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only)
167 167
168 read_lock(&tasklist_lock); 168 read_lock(&tasklist_lock);
169 do_each_thread(g, p) { 169 do_each_thread(g, p) {
170 if (!freezeable(p)) 170 if (!freezable(p))
171 continue; 171 continue;
172 172
173 if (nosig_only && should_send_signal(p)) 173 if (nosig_only && should_send_signal(p))
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 0dac75ea4456..64db648ff911 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1519,11 +1519,8 @@ static int
1519swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, 1519swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1520 unsigned int nr_pages, unsigned int nr_highmem) 1520 unsigned int nr_pages, unsigned int nr_highmem)
1521{ 1521{
1522 int error = 0;
1523
1524 if (nr_highmem > 0) { 1522 if (nr_highmem > 0) {
1525 error = get_highmem_buffer(PG_ANY); 1523 if (get_highmem_buffer(PG_ANY))
1526 if (error)
1527 goto err_out; 1524 goto err_out;
1528 if (nr_highmem > alloc_highmem) { 1525 if (nr_highmem > alloc_highmem) {
1529 nr_highmem -= alloc_highmem; 1526 nr_highmem -= alloc_highmem;
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm,
1546 1543
1547 err_out: 1544 err_out:
1548 swsusp_free(); 1545 swsusp_free();
1549 return error; 1546 return -ENOMEM;
1550} 1547}
1551 1548
1552asmlinkage int swsusp_save(void) 1549asmlinkage int swsusp_save(void)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 1708b1e2972d..e2302e40b360 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
163 return !err; 163 return !err;
164} 164}
165 165
166int ptrace_attach(struct task_struct *task) 166static int ptrace_attach(struct task_struct *task)
167{ 167{
168 int retval; 168 int retval;
169 169
@@ -219,7 +219,7 @@ out:
219 * Performs checks and sets PT_PTRACED. 219 * Performs checks and sets PT_PTRACED.
220 * Should be used by all ptrace implementations for PTRACE_TRACEME. 220 * Should be used by all ptrace implementations for PTRACE_TRACEME.
221 */ 221 */
222int ptrace_traceme(void) 222static int ptrace_traceme(void)
223{ 223{
224 int ret = -EPERM; 224 int ret = -EPERM;
225 225
@@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p)
293 return false; 293 return false;
294} 294}
295 295
296int ptrace_detach(struct task_struct *child, unsigned int data) 296static int ptrace_detach(struct task_struct *child, unsigned int data)
297{ 297{
298 bool dead = false; 298 bool dead = false;
299 299
diff --git a/kernel/sched.c b/kernel/sched.c
index 18d38e4ec7ba..42eab5a8437d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key)
4213{ 4213{
4214 __wake_up_common(q, mode, 1, 0, key); 4214 __wake_up_common(q, mode, 1, 0, key);
4215} 4215}
4216EXPORT_SYMBOL_GPL(__wake_up_locked_key);
4216 4217
4217/** 4218/**
4218 * __wake_up_sync_key - wake up threads blocked on a waitqueue. 4219 * __wake_up_sync_key - wake up threads blocked on a waitqueue.
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index ad6267714c84..01f75a5f17af 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
210 210
211static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 211static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
212{ 212{
213 int this_cpu = smp_processor_id();
214 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 213 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
215 struct sched_rt_entity *rt_se; 214 struct sched_rt_entity *rt_se;
216 215
217 rt_se = rt_rq->tg->rt_se[this_cpu]; 216 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
217
218 rt_se = rt_rq->tg->rt_se[cpu];
218 219
219 if (rt_rq->rt_nr_running) { 220 if (rt_rq->rt_nr_running) {
220 if (rt_se && !on_rt_rq(rt_se)) 221 if (rt_se && !on_rt_rq(rt_se))
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
226 227
227static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) 228static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
228{ 229{
229 int this_cpu = smp_processor_id();
230 struct sched_rt_entity *rt_se; 230 struct sched_rt_entity *rt_se;
231 int cpu = cpu_of(rq_of_rt_rq(rt_rq));
231 232
232 rt_se = rt_rq->tg->rt_se[this_cpu]; 233 rt_se = rt_rq->tg->rt_se[cpu];
233 234
234 if (rt_se && on_rt_rq(rt_se)) 235 if (rt_se && on_rt_rq(rt_se))
235 dequeue_rt_entity(rt_se); 236 dequeue_rt_entity(rt_se);
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
565 if (rt_rq->rt_time || rt_rq->rt_nr_running) 566 if (rt_rq->rt_time || rt_rq->rt_nr_running)
566 idle = 0; 567 idle = 0;
567 raw_spin_unlock(&rt_rq->rt_runtime_lock); 568 raw_spin_unlock(&rt_rq->rt_runtime_lock);
568 } else if (rt_rq->rt_nr_running) 569 } else if (rt_rq->rt_nr_running) {
569 idle = 0; 570 idle = 0;
571 if (!rt_rq_throttled(rt_rq))
572 enqueue = 1;
573 }
570 574
571 if (enqueue) 575 if (enqueue)
572 sched_rt_rq_enqueue(rt_rq); 576 sched_rt_rq_enqueue(rt_rq);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 0f1bd83db985..4eed0af5d144 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -194,9 +194,9 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
194static struct ctl_table root_table[]; 194static struct ctl_table root_table[];
195static struct ctl_table_root sysctl_table_root; 195static struct ctl_table_root sysctl_table_root;
196static struct ctl_table_header root_table_header = { 196static struct ctl_table_header root_table_header = {
197 .count = 1, 197 {{.count = 1,
198 .ctl_table = root_table, 198 .ctl_table = root_table,
199 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), 199 .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}},
200 .root = &sysctl_table_root, 200 .root = &sysctl_table_root,
201 .set = &sysctl_table_root.default_set, 201 .set = &sysctl_table_root.default_set,
202}; 202};
@@ -1567,11 +1567,16 @@ void sysctl_head_get(struct ctl_table_header *head)
1567 spin_unlock(&sysctl_lock); 1567 spin_unlock(&sysctl_lock);
1568} 1568}
1569 1569
1570static void free_head(struct rcu_head *rcu)
1571{
1572 kfree(container_of(rcu, struct ctl_table_header, rcu));
1573}
1574
1570void sysctl_head_put(struct ctl_table_header *head) 1575void sysctl_head_put(struct ctl_table_header *head)
1571{ 1576{
1572 spin_lock(&sysctl_lock); 1577 spin_lock(&sysctl_lock);
1573 if (!--head->count) 1578 if (!--head->count)
1574 kfree(head); 1579 call_rcu(&head->rcu, free_head);
1575 spin_unlock(&sysctl_lock); 1580 spin_unlock(&sysctl_lock);
1576} 1581}
1577 1582
@@ -1948,10 +1953,10 @@ void unregister_sysctl_table(struct ctl_table_header * header)
1948 start_unregistering(header); 1953 start_unregistering(header);
1949 if (!--header->parent->count) { 1954 if (!--header->parent->count) {
1950 WARN_ON(1); 1955 WARN_ON(1);
1951 kfree(header->parent); 1956 call_rcu(&header->parent->rcu, free_head);
1952 } 1957 }
1953 if (!--header->count) 1958 if (!--header->count)
1954 kfree(header); 1959 call_rcu(&header->rcu, free_head);
1955 spin_unlock(&sysctl_lock); 1960 spin_unlock(&sysctl_lock);
1956} 1961}
1957 1962
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 48b2761b5668..a3b5aff62606 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void)
600 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; 600 return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
601} 601}
602 602
603/*
604 * Check whether the broadcast device supports oneshot.
605 */
606bool tick_broadcast_oneshot_available(void)
607{
608 struct clock_event_device *bc = tick_broadcast_device.evtdev;
609
610 return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
611}
612
603#endif 613#endif
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index 051bc80a0c43..ed228ef6f6b8 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void)
51{ 51{
52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
53 53
54 return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); 54 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT))
55 return 0;
56 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
57 return 1;
58 return tick_broadcast_oneshot_available();
55} 59}
56 60
57/* 61/*
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 290eefbc1f60..f65d3a723a64 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
36extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 36extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
37extern int tick_broadcast_oneshot_active(void); 37extern int tick_broadcast_oneshot_active(void);
38extern void tick_check_oneshot_broadcast(int cpu); 38extern void tick_check_oneshot_broadcast(int cpu);
39bool tick_broadcast_oneshot_available(void);
39# else /* BROADCAST */ 40# else /* BROADCAST */
40static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 41static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
41{ 42{
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { }
46static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 47static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
47static inline int tick_broadcast_oneshot_active(void) { return 0; } 48static inline int tick_broadcast_oneshot_active(void) { return 0; }
48static inline void tick_check_oneshot_broadcast(int cpu) { } 49static inline void tick_check_oneshot_broadcast(int cpu) { }
50static inline bool tick_broadcast_oneshot_available(void) { return true; }
49# endif /* !BROADCAST */ 51# endif /* !BROADCAST */
50 52
51#else /* !ONESHOT */ 53#else /* !ONESHOT */
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
76 return 0; 78 return 0;
77} 79}
78static inline int tick_broadcast_oneshot_active(void) { return 0; } 80static inline int tick_broadcast_oneshot_active(void) { return 0; }
81static inline bool tick_broadcast_oneshot_available(void) { return false; }
79#endif /* !TICK_ONESHOT */ 82#endif /* !TICK_ONESHOT */
80 83
81/* 84/*
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 32a19f9397fc..3258455549f4 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -41,7 +41,7 @@ static void print_name_offset(struct seq_file *m, void *sym)
41 char symname[KSYM_NAME_LEN]; 41 char symname[KSYM_NAME_LEN];
42 42
43 if (lookup_symbol_name((unsigned long)sym, symname) < 0) 43 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
44 SEQ_printf(m, "<%p>", sym); 44 SEQ_printf(m, "<%pK>", sym);
45 else 45 else
46 SEQ_printf(m, "%s", symname); 46 SEQ_printf(m, "%s", symname);
47} 47}
@@ -112,7 +112,7 @@ next_one:
112static void 112static void
113print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) 113print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
114{ 114{
115 SEQ_printf(m, " .base: %p\n", base); 115 SEQ_printf(m, " .base: %pK\n", base);
116 SEQ_printf(m, " .index: %d\n", 116 SEQ_printf(m, " .index: %d\n",
117 base->index); 117 base->index);
118 SEQ_printf(m, " .resolution: %Lu nsecs\n", 118 SEQ_printf(m, " .resolution: %Lu nsecs\n",
diff --git a/kernel/timer.c b/kernel/timer.c
index d53ce66daea0..d6459923d245 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -959,7 +959,7 @@ EXPORT_SYMBOL(try_to_del_timer_sync);
959 * 959 *
960 * Synchronization rules: Callers must prevent restarting of the timer, 960 * Synchronization rules: Callers must prevent restarting of the timer,
961 * otherwise this function is meaningless. It must not be called from 961 * otherwise this function is meaningless. It must not be called from
962 * hardirq contexts. The caller must not hold locks which would prevent 962 * interrupt contexts. The caller must not hold locks which would prevent
963 * completion of the timer's handler. The timer's handler must not call 963 * completion of the timer's handler. The timer's handler must not call
964 * add_timer_on(). Upon exit the timer is not queued and the handler is 964 * add_timer_on(). Upon exit the timer is not queued and the handler is
965 * not running on any CPU. 965 * not running on any CPU.
@@ -971,12 +971,10 @@ int del_timer_sync(struct timer_list *timer)
971#ifdef CONFIG_LOCKDEP 971#ifdef CONFIG_LOCKDEP
972 unsigned long flags; 972 unsigned long flags;
973 973
974 raw_local_irq_save(flags); 974 local_irq_save(flags);
975 local_bh_disable();
976 lock_map_acquire(&timer->lockdep_map); 975 lock_map_acquire(&timer->lockdep_map);
977 lock_map_release(&timer->lockdep_map); 976 lock_map_release(&timer->lockdep_map);
978 _local_bh_enable(); 977 local_irq_restore(flags);
979 raw_local_irq_restore(flags);
980#endif 978#endif
981 /* 979 /*
982 * don't use it in hardirq context, because it 980 * don't use it in hardirq context, because it
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index d95721f33702..cbafed7d4f38 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1827 rwbs[i] = '\0'; 1827 rwbs[i] = '\0';
1828} 1828}
1829 1829
1830void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1831{
1832 int rw = rq->cmd_flags & 0x03;
1833 int bytes;
1834
1835 if (rq->cmd_flags & REQ_DISCARD)
1836 rw |= REQ_DISCARD;
1837
1838 if (rq->cmd_flags & REQ_SECURE)
1839 rw |= REQ_SECURE;
1840
1841 bytes = blk_rq_bytes(rq);
1842
1843 blk_fill_rwbs(rwbs, rw, bytes);
1844}
1845
1846#endif /* CONFIG_EVENT_TRACING */ 1830#endif /* CONFIG_EVENT_TRACING */
1847 1831
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index f37f974aa81b..18bb15776c57 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -363,8 +363,14 @@ static int watchdog_nmi_enable(int cpu)
363 goto out_save; 363 goto out_save;
364 } 364 }
365 365
366 printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", 366
367 cpu, PTR_ERR(event)); 367 /* vary the KERN level based on the returned errno */
368 if (PTR_ERR(event) == -EOPNOTSUPP)
369 printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
370 else if (PTR_ERR(event) == -ENOENT)
371 printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu);
372 else
373 printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event));
368 return PTR_ERR(event); 374 return PTR_ERR(event);
369 375
370 /* success path */ 376 /* success path */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 11869faa6819..ee6578b578ad 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -79,7 +79,9 @@ enum {
79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
81 81
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ 82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83 /* call for help after 10ms
84 (min two ticks) */
83 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
84 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */
85 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
@@ -2047,6 +2049,15 @@ repeat:
2047 move_linked_works(work, scheduled, &n); 2049 move_linked_works(work, scheduled, &n);
2048 2050
2049 process_scheduled_works(rescuer); 2051 process_scheduled_works(rescuer);
2052
2053 /*
2054 * Leave this gcwq. If keep_working() is %true, notify a
2055 * regular worker; otherwise, we end up with 0 concurrency
2056 * and stalling the execution.
2057 */
2058 if (keep_working(gcwq))
2059 wake_up_worker(gcwq);
2060
2050 spin_unlock_irq(&gcwq->lock); 2061 spin_unlock_irq(&gcwq->lock);
2051 } 2062 }
2052 2063
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2956 */ 2967 */
2957 spin_lock(&workqueue_lock); 2968 spin_lock(&workqueue_lock);
2958 2969
2959 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2970 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
2960 for_each_cwq_cpu(cpu, wq) 2971 for_each_cwq_cpu(cpu, wq)
2961 get_cwq(cpu, wq)->max_active = 0; 2972 get_cwq(cpu, wq)->max_active = 0;
2962 2973
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3068 3079
3069 spin_lock_irq(&gcwq->lock); 3080 spin_lock_irq(&gcwq->lock);
3070 3081
3071 if (!(wq->flags & WQ_FREEZEABLE) || 3082 if (!(wq->flags & WQ_FREEZABLE) ||
3072 !(gcwq->flags & GCWQ_FREEZING)) 3083 !(gcwq->flags & GCWQ_FREEZING))
3073 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3084 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3074 3085
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
3318 * want to get it over with ASAP - spam rescuers, wake up as 3329 * want to get it over with ASAP - spam rescuers, wake up as
3319 * many idlers as necessary and create new ones till the 3330 * many idlers as necessary and create new ones till the
3320 * worklist is empty. Note that if the gcwq is frozen, there 3331 * worklist is empty. Note that if the gcwq is frozen, there
3321 * may be frozen works in freezeable cwqs. Don't declare 3332 * may be frozen works in freezable cwqs. Don't declare
3322 * completion while frozen. 3333 * completion while frozen.
3323 */ 3334 */
3324 while (gcwq->nr_workers != gcwq->nr_idle || 3335 while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3576/** 3587/**
3577 * freeze_workqueues_begin - begin freezing workqueues 3588 * freeze_workqueues_begin - begin freezing workqueues
3578 * 3589 *
3579 * Start freezing workqueues. After this function returns, all 3590 * Start freezing workqueues. After this function returns, all freezable
3580 * freezeable workqueues will queue new works to their frozen_works 3591 * workqueues will queue new works to their frozen_works list instead of
3581 * list instead of gcwq->worklist. 3592 * gcwq->worklist.
3582 * 3593 *
3583 * CONTEXT: 3594 * CONTEXT:
3584 * Grabs and releases workqueue_lock and gcwq->lock's. 3595 * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void)
3604 list_for_each_entry(wq, &workqueues, list) { 3615 list_for_each_entry(wq, &workqueues, list) {
3605 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3616 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3606 3617
3607 if (cwq && wq->flags & WQ_FREEZEABLE) 3618 if (cwq && wq->flags & WQ_FREEZABLE)
3608 cwq->max_active = 0; 3619 cwq->max_active = 0;
3609 } 3620 }
3610 3621
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void)
3615} 3626}
3616 3627
3617/** 3628/**
3618 * freeze_workqueues_busy - are freezeable workqueues still busy? 3629 * freeze_workqueues_busy - are freezable workqueues still busy?
3619 * 3630 *
3620 * Check whether freezing is complete. This function must be called 3631 * Check whether freezing is complete. This function must be called
3621 * between freeze_workqueues_begin() and thaw_workqueues(). 3632 * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void)
3624 * Grabs and releases workqueue_lock. 3635 * Grabs and releases workqueue_lock.
3625 * 3636 *
3626 * RETURNS: 3637 * RETURNS:
3627 * %true if some freezeable workqueues are still busy. %false if 3638 * %true if some freezable workqueues are still busy. %false if freezing
3628 * freezing is complete. 3639 * is complete.
3629 */ 3640 */
3630bool freeze_workqueues_busy(void) 3641bool freeze_workqueues_busy(void)
3631{ 3642{
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void)
3645 list_for_each_entry(wq, &workqueues, list) { 3656 list_for_each_entry(wq, &workqueues, list) {
3646 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3657 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3647 3658
3648 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3659 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3649 continue; 3660 continue;
3650 3661
3651 BUG_ON(cwq->nr_active < 0); 3662 BUG_ON(cwq->nr_active < 0);
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void)
3690 list_for_each_entry(wq, &workqueues, list) { 3701 list_for_each_entry(wq, &workqueues, list) {
3691 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3702 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3692 3703
3693 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3704 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3694 continue; 3705 continue;
3695 3706
3696 /* restore max_active and repopulate worklist */ 3707 /* restore max_active and repopulate worklist */
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3967c2356e37..2b97418c67e2 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -805,7 +805,7 @@ config ARCH_WANT_FRAME_POINTERS
805config FRAME_POINTER 805config FRAME_POINTER
806 bool "Compile the kernel with frame pointers" 806 bool "Compile the kernel with frame pointers"
807 depends on DEBUG_KERNEL && \ 807 depends on DEBUG_KERNEL && \
808 (CRIS || M68K || M68KNOMMU || FRV || UML || \ 808 (CRIS || M68K || FRV || UML || \
809 AVR32 || SUPERH || BLACKFIN || MN10300) || \ 809 AVR32 || SUPERH || BLACKFIN || MN10300) || \
810 ARCH_WANT_FRAME_POINTERS 810 ARCH_WANT_FRAME_POINTERS
811 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS 811 default y if (DEBUG_INFO && UML) || ARCH_WANT_FRAME_POINTERS
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 344c710d16ca..b8029a5583ff 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new,
35} 35}
36EXPORT_SYMBOL(__list_add); 36EXPORT_SYMBOL(__list_add);
37 37
38void __list_del_entry(struct list_head *entry)
39{
40 struct list_head *prev, *next;
41
42 prev = entry->prev;
43 next = entry->next;
44
45 if (WARN(next == LIST_POISON1,
46 "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
47 entry, LIST_POISON1) ||
48 WARN(prev == LIST_POISON2,
49 "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
50 entry, LIST_POISON2) ||
51 WARN(prev->next != entry,
52 "list_del corruption. prev->next should be %p, "
53 "but was %p\n", entry, prev->next) ||
54 WARN(next->prev != entry,
55 "list_del corruption. next->prev should be %p, "
56 "but was %p\n", entry, next->prev))
57 return;
58
59 __list_del(prev, next);
60}
61EXPORT_SYMBOL(__list_del_entry);
62
38/** 63/**
39 * list_del - deletes entry from list. 64 * list_del - deletes entry from list.
40 * @entry: the element to delete from the list. 65 * @entry: the element to delete from the list.
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add);
43 */ 68 */
44void list_del(struct list_head *entry) 69void list_del(struct list_head *entry)
45{ 70{
46 WARN(entry->next == LIST_POISON1, 71 __list_del_entry(entry);
47 "list_del corruption, next is LIST_POISON1 (%p)\n",
48 LIST_POISON1);
49 WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2,
50 "list_del corruption, prev is LIST_POISON2 (%p)\n",
51 LIST_POISON2);
52 WARN(entry->prev->next != entry,
53 "list_del corruption. prev->next should be %p, "
54 "but was %p\n", entry, entry->prev->next);
55 WARN(entry->next->prev != entry,
56 "list_del corruption. next->prev should be %p, "
57 "but was %p\n", entry, entry->next->prev);
58 __list_del(entry->prev, entry->next);
59 entry->next = LIST_POISON1; 72 entry->next = LIST_POISON1;
60 entry->prev = LIST_POISON2; 73 entry->prev = LIST_POISON2;
61} 74}
diff --git a/lib/nlattr.c b/lib/nlattr.c
index 5021cbc34411..ac09f2226dc7 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n)
148{ 148{
149 int i, len = 0; 149 int i, len = 0;
150 150
151 for (i = 0; i < n; i++) { 151 for (i = 0; i < n; i++, p++) {
152 if (p->len) 152 if (p->len)
153 len += nla_total_size(p->len); 153 len += nla_total_size(p->len);
154 else if (nla_attr_minlen[p->type]) 154 else if (nla_attr_minlen[p->type])
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index c47bbe11b804..93ca08b8a451 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
686 /* 686 /*
687 * Ensure that the address returned is DMA'ble 687 * Ensure that the address returned is DMA'ble
688 */ 688 */
689 if (!dma_capable(dev, dev_addr, size)) 689 if (!dma_capable(dev, dev_addr, size)) {
690 panic("map_single: bounce buffer is not DMA'ble"); 690 swiotlb_tbl_unmap_single(dev, map, size, dir);
691 dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer);
692 }
691 693
692 return dev_addr; 694 return dev_addr;
693} 695}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e62ddb8f24b6..113e35c47502 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag)
650 650
651static inline struct page *alloc_hugepage_vma(int defrag, 651static inline struct page *alloc_hugepage_vma(int defrag,
652 struct vm_area_struct *vma, 652 struct vm_area_struct *vma,
653 unsigned long haddr) 653 unsigned long haddr, int nd)
654{ 654{
655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), 655 return alloc_pages_vma(alloc_hugepage_gfpmask(defrag),
656 HPAGE_PMD_ORDER, vma, haddr); 656 HPAGE_PMD_ORDER, vma, haddr, nd);
657} 657}
658 658
659#ifndef CONFIG_NUMA 659#ifndef CONFIG_NUMA
@@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
678 if (unlikely(khugepaged_enter(vma))) 678 if (unlikely(khugepaged_enter(vma)))
679 return VM_FAULT_OOM; 679 return VM_FAULT_OOM;
680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 680 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
681 vma, haddr); 681 vma, haddr, numa_node_id());
682 if (unlikely(!page)) 682 if (unlikely(!page))
683 goto out; 683 goto out;
684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 684 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
@@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
799 } 799 }
800 800
801 for (i = 0; i < HPAGE_PMD_NR; i++) { 801 for (i = 0; i < HPAGE_PMD_NR; i++) {
802 pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, 802 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE,
803 vma, address); 803 vma, address, page_to_nid(page));
804 if (unlikely(!pages[i] || 804 if (unlikely(!pages[i] ||
805 mem_cgroup_newpage_charge(pages[i], mm, 805 mem_cgroup_newpage_charge(pages[i], mm,
806 GFP_KERNEL))) { 806 GFP_KERNEL))) {
@@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
902 if (transparent_hugepage_enabled(vma) && 902 if (transparent_hugepage_enabled(vma) &&
903 !transparent_hugepage_debug_cow()) 903 !transparent_hugepage_debug_cow())
904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 904 new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
905 vma, haddr); 905 vma, haddr, numa_node_id());
906 else 906 else
907 new_page = NULL; 907 new_page = NULL;
908 908
@@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
1745static void collapse_huge_page(struct mm_struct *mm, 1745static void collapse_huge_page(struct mm_struct *mm,
1746 unsigned long address, 1746 unsigned long address,
1747 struct page **hpage, 1747 struct page **hpage,
1748 struct vm_area_struct *vma) 1748 struct vm_area_struct *vma,
1749 int node)
1749{ 1750{
1750 pgd_t *pgd; 1751 pgd_t *pgd;
1751 pud_t *pud; 1752 pud_t *pud;
@@ -1761,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm,
1761#ifndef CONFIG_NUMA 1762#ifndef CONFIG_NUMA
1762 VM_BUG_ON(!*hpage); 1763 VM_BUG_ON(!*hpage);
1763 new_page = *hpage; 1764 new_page = *hpage;
1765 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1766 up_read(&mm->mmap_sem);
1767 return;
1768 }
1764#else 1769#else
1765 VM_BUG_ON(*hpage); 1770 VM_BUG_ON(*hpage);
1766 /* 1771 /*
@@ -1773,18 +1778,19 @@ static void collapse_huge_page(struct mm_struct *mm,
1773 * mmap_sem in read mode is good idea also to allow greater 1778 * mmap_sem in read mode is good idea also to allow greater
1774 * scalability. 1779 * scalability.
1775 */ 1780 */
1776 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); 1781 new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
1782 node);
1777 if (unlikely(!new_page)) { 1783 if (unlikely(!new_page)) {
1778 up_read(&mm->mmap_sem); 1784 up_read(&mm->mmap_sem);
1779 *hpage = ERR_PTR(-ENOMEM); 1785 *hpage = ERR_PTR(-ENOMEM);
1780 return; 1786 return;
1781 } 1787 }
1782#endif
1783 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 1788 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
1784 up_read(&mm->mmap_sem); 1789 up_read(&mm->mmap_sem);
1785 put_page(new_page); 1790 put_page(new_page);
1786 return; 1791 return;
1787 } 1792 }
1793#endif
1788 1794
1789 /* after allocating the hugepage upgrade to mmap_sem write mode */ 1795 /* after allocating the hugepage upgrade to mmap_sem write mode */
1790 up_read(&mm->mmap_sem); 1796 up_read(&mm->mmap_sem);
@@ -1811,6 +1817,8 @@ static void collapse_huge_page(struct mm_struct *mm,
1811 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 1817 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
1812 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) 1818 if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
1813 goto out; 1819 goto out;
1820 if (is_vma_temporary_stack(vma))
1821 goto out;
1814 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 1822 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
1815 1823
1816 pgd = pgd_offset(mm, address); 1824 pgd = pgd_offset(mm, address);
@@ -1917,6 +1925,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
1917 struct page *page; 1925 struct page *page;
1918 unsigned long _address; 1926 unsigned long _address;
1919 spinlock_t *ptl; 1927 spinlock_t *ptl;
1928 int node = -1;
1920 1929
1921 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1930 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1922 1931
@@ -1947,6 +1956,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
1947 page = vm_normal_page(vma, _address, pteval); 1956 page = vm_normal_page(vma, _address, pteval);
1948 if (unlikely(!page)) 1957 if (unlikely(!page))
1949 goto out_unmap; 1958 goto out_unmap;
1959 /*
1960 * Chose the node of the first page. This could
1961 * be more sophisticated and look at more pages,
1962 * but isn't for now.
1963 */
1964 if (node == -1)
1965 node = page_to_nid(page);
1950 VM_BUG_ON(PageCompound(page)); 1966 VM_BUG_ON(PageCompound(page));
1951 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) 1967 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
1952 goto out_unmap; 1968 goto out_unmap;
@@ -1963,7 +1979,7 @@ out_unmap:
1963 pte_unmap_unlock(pte, ptl); 1979 pte_unmap_unlock(pte, ptl);
1964 if (ret) 1980 if (ret)
1965 /* collapse_huge_page will return with the mmap_sem released */ 1981 /* collapse_huge_page will return with the mmap_sem released */
1966 collapse_huge_page(mm, address, hpage, vma); 1982 collapse_huge_page(mm, address, hpage, vma, node);
1967out: 1983out:
1968 return ret; 1984 return ret;
1969} 1985}
@@ -2032,32 +2048,27 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2032 if ((!(vma->vm_flags & VM_HUGEPAGE) && 2048 if ((!(vma->vm_flags & VM_HUGEPAGE) &&
2033 !khugepaged_always()) || 2049 !khugepaged_always()) ||
2034 (vma->vm_flags & VM_NOHUGEPAGE)) { 2050 (vma->vm_flags & VM_NOHUGEPAGE)) {
2051 skip:
2035 progress++; 2052 progress++;
2036 continue; 2053 continue;
2037 } 2054 }
2038
2039 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */ 2055 /* VM_PFNMAP vmas may have vm_ops null but vm_file set */
2040 if (!vma->anon_vma || vma->vm_ops || vma->vm_file) { 2056 if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
2041 khugepaged_scan.address = vma->vm_end; 2057 goto skip;
2042 progress++; 2058 if (is_vma_temporary_stack(vma))
2043 continue; 2059 goto skip;
2044 } 2060
2045 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma)); 2061 VM_BUG_ON(is_linear_pfn_mapping(vma) || is_pfn_mapping(vma));
2046 2062
2047 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2063 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2048 hend = vma->vm_end & HPAGE_PMD_MASK; 2064 hend = vma->vm_end & HPAGE_PMD_MASK;
2049 if (hstart >= hend) { 2065 if (hstart >= hend)
2050 progress++; 2066 goto skip;
2051 continue; 2067 if (khugepaged_scan.address > hend)
2052 } 2068 goto skip;
2053 if (khugepaged_scan.address < hstart) 2069 if (khugepaged_scan.address < hstart)
2054 khugepaged_scan.address = hstart; 2070 khugepaged_scan.address = hstart;
2055 if (khugepaged_scan.address > hend) { 2071 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2056 khugepaged_scan.address = hend + HPAGE_PMD_SIZE;
2057 progress++;
2058 continue;
2059 }
2060 BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2061 2072
2062 while (khugepaged_scan.address < hend) { 2073 while (khugepaged_scan.address < hend) {
2063 int ret; 2074 int ret;
@@ -2086,7 +2097,7 @@ breakouterloop:
2086breakouterloop_mmap_sem: 2097breakouterloop_mmap_sem:
2087 2098
2088 spin_lock(&khugepaged_mm_lock); 2099 spin_lock(&khugepaged_mm_lock);
2089 BUG_ON(khugepaged_scan.mm_slot != mm_slot); 2100 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2090 /* 2101 /*
2091 * Release the current mm_slot if this mm is about to die, or 2102 * Release the current mm_slot if this mm is about to die, or
2092 * if we scanned all vmas of this mm. 2103 * if we scanned all vmas of this mm.
@@ -2241,9 +2252,9 @@ static int khugepaged(void *none)
2241 2252
2242 for (;;) { 2253 for (;;) {
2243 mutex_unlock(&khugepaged_mutex); 2254 mutex_unlock(&khugepaged_mutex);
2244 BUG_ON(khugepaged_thread != current); 2255 VM_BUG_ON(khugepaged_thread != current);
2245 khugepaged_loop(); 2256 khugepaged_loop();
2246 BUG_ON(khugepaged_thread != current); 2257 VM_BUG_ON(khugepaged_thread != current);
2247 2258
2248 mutex_lock(&khugepaged_mutex); 2259 mutex_lock(&khugepaged_mutex);
2249 if (!khugepaged_enabled()) 2260 if (!khugepaged_enabled())
diff --git a/mm/memory.c b/mm/memory.c
index 8e8c18324863..5823698c2b71 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2648,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping,
2648 details.last_index = ULONG_MAX; 2648 details.last_index = ULONG_MAX;
2649 details.i_mmap_lock = &mapping->i_mmap_lock; 2649 details.i_mmap_lock = &mapping->i_mmap_lock;
2650 2650
2651 mutex_lock(&mapping->unmap_mutex);
2651 spin_lock(&mapping->i_mmap_lock); 2652 spin_lock(&mapping->i_mmap_lock);
2652 2653
2653 /* Protect against endless unmapping loops */ 2654 /* Protect against endless unmapping loops */
@@ -2664,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping,
2664 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2665 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2665 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2666 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2666 spin_unlock(&mapping->i_mmap_lock); 2667 spin_unlock(&mapping->i_mmap_lock);
2668 mutex_unlock(&mapping->unmap_mutex);
2667} 2669}
2668EXPORT_SYMBOL(unmap_mapping_range); 2670EXPORT_SYMBOL(unmap_mapping_range);
2669 2671
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 368fc9d23610..b53ec99f1428 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1524} 1524}
1525 1525
1526/* Return a zonelist indicated by gfp for node representing a mempolicy */ 1526/* Return a zonelist indicated by gfp for node representing a mempolicy */
1527static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) 1527static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1528 int nd)
1528{ 1529{
1529 int nd = numa_node_id();
1530
1531 switch (policy->mode) { 1530 switch (policy->mode) {
1532 case MPOL_PREFERRED: 1531 case MPOL_PREFERRED:
1533 if (!(policy->flags & MPOL_F_LOCAL)) 1532 if (!(policy->flags & MPOL_F_LOCAL))
@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1679 zl = node_zonelist(interleave_nid(*mpol, vma, addr, 1678 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1680 huge_page_shift(hstate_vma(vma))), gfp_flags); 1679 huge_page_shift(hstate_vma(vma))), gfp_flags);
1681 } else { 1680 } else {
1682 zl = policy_zonelist(gfp_flags, *mpol); 1681 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1683 if ((*mpol)->mode == MPOL_BIND) 1682 if ((*mpol)->mode == MPOL_BIND)
1684 *nodemask = &(*mpol)->v.nodes; 1683 *nodemask = &(*mpol)->v.nodes;
1685 } 1684 }
@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1820 */ 1819 */
1821struct page * 1820struct page *
1822alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, 1821alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1823 unsigned long addr) 1822 unsigned long addr, int node)
1824{ 1823{
1825 struct mempolicy *pol = get_vma_policy(current, vma, addr); 1824 struct mempolicy *pol = get_vma_policy(current, vma, addr);
1826 struct zonelist *zl; 1825 struct zonelist *zl;
@@ -1830,13 +1829,13 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1830 if (unlikely(pol->mode == MPOL_INTERLEAVE)) { 1829 if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
1831 unsigned nid; 1830 unsigned nid;
1832 1831
1833 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); 1832 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
1834 mpol_cond_put(pol); 1833 mpol_cond_put(pol);
1835 page = alloc_page_interleave(gfp, order, nid); 1834 page = alloc_page_interleave(gfp, order, nid);
1836 put_mems_allowed(); 1835 put_mems_allowed();
1837 return page; 1836 return page;
1838 } 1837 }
1839 zl = policy_zonelist(gfp, pol); 1838 zl = policy_zonelist(gfp, pol, node);
1840 if (unlikely(mpol_needs_cond_ref(pol))) { 1839 if (unlikely(mpol_needs_cond_ref(pol))) {
1841 /* 1840 /*
1842 * slow path: ref counted shared policy 1841 * slow path: ref counted shared policy
@@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
1892 page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); 1891 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
1893 else 1892 else
1894 page = __alloc_pages_nodemask(gfp, order, 1893 page = __alloc_pages_nodemask(gfp, order,
1895 policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); 1894 policy_zonelist(gfp, pol, numa_node_id()),
1895 policy_nodemask(gfp, pol));
1896 put_mems_allowed(); 1896 put_mems_allowed();
1897 return page; 1897 return page;
1898} 1898}
diff --git a/mm/migrate.c b/mm/migrate.c
index 766115253807..352de555626c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
1287 return -EPERM; 1287 return -EPERM;
1288 1288
1289 /* Find the mm_struct */ 1289 /* Find the mm_struct */
1290 read_lock(&tasklist_lock); 1290 rcu_read_lock();
1291 task = pid ? find_task_by_vpid(pid) : current; 1291 task = pid ? find_task_by_vpid(pid) : current;
1292 if (!task) { 1292 if (!task) {
1293 read_unlock(&tasklist_lock); 1293 rcu_read_unlock();
1294 return -ESRCH; 1294 return -ESRCH;
1295 } 1295 }
1296 mm = get_task_mm(task); 1296 mm = get_task_mm(task);
1297 read_unlock(&tasklist_lock); 1297 rcu_read_unlock();
1298 1298
1299 if (!mm) 1299 if (!mm)
1300 return -EINVAL; 1300 return -EINVAL;
diff --git a/mm/mremap.c b/mm/mremap.c
index 9925b6391b80..1de98d492ddc 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
94 */ 94 */
95 mapping = vma->vm_file->f_mapping; 95 mapping = vma->vm_file->f_mapping;
96 spin_lock(&mapping->i_mmap_lock); 96 spin_lock(&mapping->i_mmap_lock);
97 if (new_vma->vm_truncate_count && 97 new_vma->vm_truncate_count = 0;
98 new_vma->vm_truncate_count != vma->vm_truncate_count)
99 new_vma->vm_truncate_count = 0;
100 } 98 }
101 99
102 /* 100 /*
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index da0fe32059b3..bd7625676a64 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5371,10 +5371,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
5371 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { 5371 for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) {
5372 unsigned long check = pfn + iter; 5372 unsigned long check = pfn + iter;
5373 5373
5374 if (!pfn_valid_within(check)) { 5374 if (!pfn_valid_within(check))
5375 iter++;
5376 continue; 5375 continue;
5377 } 5376
5378 page = pfn_to_page(check); 5377 page = pfn_to_page(check);
5379 if (!page_count(page)) { 5378 if (!page_count(page)) {
5380 if (PageBuddy(page)) 5379 if (PageBuddy(page))
diff --git a/mm/rmap.c b/mm/rmap.c
index f21f4a1d6a1c..941bf82e8961 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
497 struct mm_struct *mm = vma->vm_mm; 497 struct mm_struct *mm = vma->vm_mm;
498 int referenced = 0; 498 int referenced = 0;
499 499
500 /*
501 * Don't want to elevate referenced for mlocked page that gets this far,
502 * in order that it progresses to try_to_unmap and is moved to the
503 * unevictable list.
504 */
505 if (vma->vm_flags & VM_LOCKED) {
506 *mapcount = 0; /* break early from loop */
507 *vm_flags |= VM_LOCKED;
508 goto out;
509 }
510
511 /* Pretend the page is referenced if the task has the
512 swap token and is in the middle of a page fault. */
513 if (mm != current->mm && has_swap_token(mm) &&
514 rwsem_is_locked(&mm->mmap_sem))
515 referenced++;
516
517 if (unlikely(PageTransHuge(page))) { 500 if (unlikely(PageTransHuge(page))) {
518 pmd_t *pmd; 501 pmd_t *pmd;
519 502
520 spin_lock(&mm->page_table_lock); 503 spin_lock(&mm->page_table_lock);
504 /*
505 * rmap might return false positives; we must filter
506 * these out using page_check_address_pmd().
507 */
521 pmd = page_check_address_pmd(page, mm, address, 508 pmd = page_check_address_pmd(page, mm, address,
522 PAGE_CHECK_ADDRESS_PMD_FLAG); 509 PAGE_CHECK_ADDRESS_PMD_FLAG);
523 if (pmd && !pmd_trans_splitting(*pmd) && 510 if (!pmd) {
524 pmdp_clear_flush_young_notify(vma, address, pmd)) 511 spin_unlock(&mm->page_table_lock);
512 goto out;
513 }
514
515 if (vma->vm_flags & VM_LOCKED) {
516 spin_unlock(&mm->page_table_lock);
517 *mapcount = 0; /* break early from loop */
518 *vm_flags |= VM_LOCKED;
519 goto out;
520 }
521
522 /* go ahead even if the pmd is pmd_trans_splitting() */
523 if (pmdp_clear_flush_young_notify(vma, address, pmd))
525 referenced++; 524 referenced++;
526 spin_unlock(&mm->page_table_lock); 525 spin_unlock(&mm->page_table_lock);
527 } else { 526 } else {
528 pte_t *pte; 527 pte_t *pte;
529 spinlock_t *ptl; 528 spinlock_t *ptl;
530 529
530 /*
531 * rmap might return false positives; we must filter
532 * these out using page_check_address().
533 */
531 pte = page_check_address(page, mm, address, &ptl, 0); 534 pte = page_check_address(page, mm, address, &ptl, 0);
532 if (!pte) 535 if (!pte)
533 goto out; 536 goto out;
534 537
538 if (vma->vm_flags & VM_LOCKED) {
539 pte_unmap_unlock(pte, ptl);
540 *mapcount = 0; /* break early from loop */
541 *vm_flags |= VM_LOCKED;
542 goto out;
543 }
544
535 if (ptep_clear_flush_young_notify(vma, address, pte)) { 545 if (ptep_clear_flush_young_notify(vma, address, pte)) {
536 /* 546 /*
537 * Don't treat a reference through a sequentially read 547 * Don't treat a reference through a sequentially read
@@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
546 pte_unmap_unlock(pte, ptl); 556 pte_unmap_unlock(pte, ptl);
547 } 557 }
548 558
559 /* Pretend the page is referenced if the task has the
560 swap token and is in the middle of a page fault. */
561 if (mm != current->mm && has_swap_token(mm) &&
562 rwsem_is_locked(&mm->mmap_sem))
563 referenced++;
564
549 (*mapcount)--; 565 (*mapcount)--;
550 566
551 if (referenced) 567 if (referenced)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 07a458d72fa8..0341c5700e34 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1940 1940
1941 error = -EINVAL; 1941 error = -EINVAL;
1942 if (S_ISBLK(inode->i_mode)) { 1942 if (S_ISBLK(inode->i_mode)) {
1943 bdev = I_BDEV(inode); 1943 bdev = bdgrab(I_BDEV(inode));
1944 error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, 1944 error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1945 sys_swapon); 1945 sys_swapon);
1946 if (error < 0) { 1946 if (error < 0) {
diff --git a/mm/truncate.c b/mm/truncate.c
index 49feb46e77b8..d64296be00d3 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
225 next = start; 225 next = start;
226 while (next <= end && 226 while (next <= end &&
227 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { 227 pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
228 mem_cgroup_uncharge_start();
228 for (i = 0; i < pagevec_count(&pvec); i++) { 229 for (i = 0; i < pagevec_count(&pvec); i++) {
229 struct page *page = pvec.pages[i]; 230 struct page *page = pvec.pages[i];
230 pgoff_t page_index = page->index; 231 pgoff_t page_index = page->index;
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
247 unlock_page(page); 248 unlock_page(page);
248 } 249 }
249 pagevec_release(&pvec); 250 pagevec_release(&pvec);
251 mem_cgroup_uncharge_end();
250 cond_resched(); 252 cond_resched();
251 } 253 }
252 254
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 17497d0cd8b9..6771ea70bfe7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
1841 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1841 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
1842 return false; 1842 return false;
1843 1843
1844 /* 1844 /* Consider stopping depending on scan and reclaim activity */
1845 * If we failed to reclaim and have scanned the full list, stop. 1845 if (sc->gfp_mask & __GFP_REPEAT) {
1846 * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far 1846 /*
1847 * faster but obviously would be less likely to succeed 1847 * For __GFP_REPEAT allocations, stop reclaiming if the
1848 * allocation. If this is desirable, use GFP_REPEAT to decide 1848 * full LRU list has been scanned and we are still failing
1849 * if both reclaimed and scanned should be checked or just 1849 * to reclaim pages. This full LRU scan is potentially
1850 * reclaimed 1850 * expensive but a __GFP_REPEAT caller really wants to succeed
1851 */ 1851 */
1852 if (!nr_reclaimed && !nr_scanned) 1852 if (!nr_reclaimed && !nr_scanned)
1853 return false; 1853 return false;
1854 } else {
1855 /*
1856 * For non-__GFP_REPEAT allocations which can presumably
1857 * fail without consequence, stop if we failed to reclaim
1858 * any pages from the last SWAP_CLUSTER_MAX number of
1859 * pages that were scanned. This will return to the
1860 * caller faster at the risk reclaim/compaction and
1861 * the resulting allocation attempt fails
1862 */
1863 if (!nr_reclaimed)
1864 return false;
1865 }
1854 1866
1855 /* 1867 /*
1856 * If we have not reclaimed enough pages for compaction and the 1868 * If we have not reclaimed enough pages for compaction and the
diff --git a/net/Makefile b/net/Makefile
index a3330ebe2c53..a51d9465e628 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/
19obj-$(CONFIG_INET) += ipv4/ 19obj-$(CONFIG_INET) += ipv4/
20obj-$(CONFIG_XFRM) += xfrm/ 20obj-$(CONFIG_XFRM) += xfrm/
21obj-$(CONFIG_UNIX) += unix/ 21obj-$(CONFIG_UNIX) += unix/
22ifneq ($(CONFIG_IPV6),) 22obj-$(CONFIG_NET) += ipv6/
23obj-y += ipv6/
24endif
25obj-$(CONFIG_PACKET) += packet/ 23obj-$(CONFIG_PACKET) += packet/
26obj-$(CONFIG_NET_KEY) += key/ 24obj-$(CONFIG_NET_KEY) += key/
27obj-$(CONFIG_BRIDGE) += bridge/ 25obj-$(CONFIG_BRIDGE) += bridge/
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 7550abb0c96a..675614e38e14 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason)
859 result = L2CAP_CR_SEC_BLOCK; 859 result = L2CAP_CR_SEC_BLOCK;
860 else 860 else
861 result = L2CAP_CR_BAD_PSM; 861 result = L2CAP_CR_BAD_PSM;
862 sk->sk_state = BT_DISCONN;
862 863
863 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); 864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
864 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); 865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index 2575c2db6404..d7b9af4703d0 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
727 break; 727 break;
728 } 728 }
729 729
730 tty_unlock();
730 schedule(); 731 schedule();
732 tty_lock();
731 } 733 }
732 set_current_state(TASK_RUNNING); 734 set_current_state(TASK_RUNNING);
733 remove_wait_queue(&dev->wait, &wait); 735 remove_wait_queue(&dev->wait, &wait);
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig
index 9190ae462cb4..6dee7bf648a9 100644
--- a/net/bridge/Kconfig
+++ b/net/bridge/Kconfig
@@ -6,6 +6,7 @@ config BRIDGE
6 tristate "802.1d Ethernet Bridging" 6 tristate "802.1d Ethernet Bridging"
7 select LLC 7 select LLC
8 select STP 8 select STP
9 depends on IPV6 || IPV6=n
9 ---help--- 10 ---help---
10 If you say Y here, then your Linux box will be able to act as an 11 If you say Y here, then your Linux box will be able to act as an
11 Ethernet bridge, which means that the different Ethernet segments it 12 Ethernet bridge, which means that the different Ethernet segments it
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 6f6d8e1b776f..88e4aa9cb1f9 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
80 if (is_multicast_ether_addr(dest)) { 80 if (is_multicast_ether_addr(dest)) {
81 mdst = br_mdb_get(br, skb); 81 mdst = br_mdb_get(br, skb);
82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { 82 if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {
83 if ((mdst && !hlist_unhashed(&mdst->mglist)) || 83 if ((mdst && mdst->mglist) ||
84 br_multicast_is_router(br)) 84 br_multicast_is_router(br))
85 skb2 = skb; 85 skb2 = skb;
86 br_multicast_forward(mdst, skb, skb2); 86 br_multicast_forward(mdst, skb, skb2);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index f701a21acb34..030a002ff8ee 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -37,10 +37,9 @@
37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) 37 rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
38 38
39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 39#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
40static inline int ipv6_is_local_multicast(const struct in6_addr *addr) 40static inline int ipv6_is_transient_multicast(const struct in6_addr *addr)
41{ 41{
42 if (ipv6_addr_is_multicast(addr) && 42 if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr))
43 IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL)
44 return 1; 43 return 1;
45 return 0; 44 return 0;
46} 45}
@@ -232,8 +231,7 @@ static void br_multicast_group_expired(unsigned long data)
232 if (!netif_running(br->dev) || timer_pending(&mp->timer)) 231 if (!netif_running(br->dev) || timer_pending(&mp->timer))
233 goto out; 232 goto out;
234 233
235 if (!hlist_unhashed(&mp->mglist)) 234 mp->mglist = false;
236 hlist_del_init(&mp->mglist);
237 235
238 if (mp->ports) 236 if (mp->ports)
239 goto out; 237 goto out;
@@ -276,7 +274,7 @@ static void br_multicast_del_pg(struct net_bridge *br,
276 del_timer(&p->query_timer); 274 del_timer(&p->query_timer);
277 call_rcu_bh(&p->rcu, br_multicast_free_pg); 275 call_rcu_bh(&p->rcu, br_multicast_free_pg);
278 276
279 if (!mp->ports && hlist_unhashed(&mp->mglist) && 277 if (!mp->ports && !mp->mglist &&
280 netif_running(br->dev)) 278 netif_running(br->dev))
281 mod_timer(&mp->timer, jiffies); 279 mod_timer(&mp->timer, jiffies);
282 280
@@ -436,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
436 eth = eth_hdr(skb); 434 eth = eth_hdr(skb);
437 435
438 memcpy(eth->h_source, br->dev->dev_addr, 6); 436 memcpy(eth->h_source, br->dev->dev_addr, 6);
439 ipv6_eth_mc_map(group, eth->h_dest);
440 eth->h_proto = htons(ETH_P_IPV6); 437 eth->h_proto = htons(ETH_P_IPV6);
441 skb_put(skb, sizeof(*eth)); 438 skb_put(skb, sizeof(*eth));
442 439
@@ -448,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
448 ip6h->payload_len = htons(8 + sizeof(*mldq)); 445 ip6h->payload_len = htons(8 + sizeof(*mldq));
449 ip6h->nexthdr = IPPROTO_HOPOPTS; 446 ip6h->nexthdr = IPPROTO_HOPOPTS;
450 ip6h->hop_limit = 1; 447 ip6h->hop_limit = 1;
451 ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); 448 ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
449 &ip6h->saddr);
452 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); 450 ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
451 ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
453 452
454 hopopt = (u8 *)(ip6h + 1); 453 hopopt = (u8 *)(ip6h + 1);
455 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ 454 hopopt[0] = IPPROTO_ICMPV6; /* next hdr */
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data)
528 struct net_bridge *br = mp->br; 527 struct net_bridge *br = mp->br;
529 528
530 spin_lock(&br->multicast_lock); 529 spin_lock(&br->multicast_lock);
531 if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || 530 if (!netif_running(br->dev) || !mp->mglist ||
532 mp->queries_sent >= br->multicast_last_member_count) 531 mp->queries_sent >= br->multicast_last_member_count)
533 goto out; 532 goto out;
534 533
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br,
719 goto err; 718 goto err;
720 719
721 if (!port) { 720 if (!port) {
722 hlist_add_head(&mp->mglist, &br->mglist); 721 mp->mglist = true;
723 mod_timer(&mp->timer, now + br->multicast_membership_interval); 722 mod_timer(&mp->timer, now + br->multicast_membership_interval);
724 goto out; 723 goto out;
725 } 724 }
@@ -781,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
781{ 780{
782 struct br_ip br_group; 781 struct br_ip br_group;
783 782
784 if (ipv6_is_local_multicast(group)) 783 if (!ipv6_is_transient_multicast(group))
785 return 0; 784 return 0;
786 785
787 ipv6_addr_copy(&br_group.u.ip6, group); 786 ipv6_addr_copy(&br_group.u.ip6, group);
788 br_group.proto = htons(ETH_P_IP); 787 br_group.proto = htons(ETH_P_IPV6);
789 788
790 return br_multicast_add_group(br, port, &br_group); 789 return br_multicast_add_group(br, port, &br_group);
791} 790}
@@ -1014,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1014 1013
1015 nsrcs = skb_header_pointer(skb, 1014 nsrcs = skb_header_pointer(skb,
1016 len + offsetof(struct mld2_grec, 1015 len + offsetof(struct mld2_grec,
1017 grec_mca), 1016 grec_nsrcs),
1018 sizeof(_nsrcs), &_nsrcs); 1017 sizeof(_nsrcs), &_nsrcs);
1019 if (!nsrcs) 1018 if (!nsrcs)
1020 return -EINVAL; 1019 return -EINVAL;
1021 1020
1022 if (!pskb_may_pull(skb, 1021 if (!pskb_may_pull(skb,
1023 len + sizeof(*grec) + 1022 len + sizeof(*grec) +
1024 sizeof(struct in6_addr) * (*nsrcs))) 1023 sizeof(struct in6_addr) * ntohs(*nsrcs)))
1025 return -EINVAL; 1024 return -EINVAL;
1026 1025
1027 grec = (struct mld2_grec *)(skb->data + len); 1026 grec = (struct mld2_grec *)(skb->data + len);
1028 len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); 1027 len += sizeof(*grec) +
1028 sizeof(struct in6_addr) * ntohs(*nsrcs);
1029 1029
1030 /* We treat these as MLDv1 reports for now. */ 1030 /* We treat these as MLDv1 reports for now. */
1031 switch (grec->grec_type) { 1031 switch (grec->grec_type) {
@@ -1165,7 +1165,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1165 1165
1166 max_delay *= br->multicast_last_member_count; 1166 max_delay *= br->multicast_last_member_count;
1167 1167
1168 if (!hlist_unhashed(&mp->mglist) && 1168 if (mp->mglist &&
1169 (timer_pending(&mp->timer) ? 1169 (timer_pending(&mp->timer) ?
1170 time_after(mp->timer.expires, now + max_delay) : 1170 time_after(mp->timer.expires, now + max_delay) :
1171 try_to_del_timer_sync(&mp->timer) >= 0)) 1171 try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1177,7 +1177,7 @@ static int br_ip4_multicast_query(struct net_bridge *br,
1177 if (timer_pending(&p->timer) ? 1177 if (timer_pending(&p->timer) ?
1178 time_after(p->timer.expires, now + max_delay) : 1178 time_after(p->timer.expires, now + max_delay) :
1179 try_to_del_timer_sync(&p->timer) >= 0) 1179 try_to_del_timer_sync(&p->timer) >= 0)
1180 mod_timer(&mp->timer, now + max_delay); 1180 mod_timer(&p->timer, now + max_delay);
1181 } 1181 }
1182 1182
1183out: 1183out:
@@ -1236,7 +1236,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1236 goto out; 1236 goto out;
1237 1237
1238 max_delay *= br->multicast_last_member_count; 1238 max_delay *= br->multicast_last_member_count;
1239 if (!hlist_unhashed(&mp->mglist) && 1239 if (mp->mglist &&
1240 (timer_pending(&mp->timer) ? 1240 (timer_pending(&mp->timer) ?
1241 time_after(mp->timer.expires, now + max_delay) : 1241 time_after(mp->timer.expires, now + max_delay) :
1242 try_to_del_timer_sync(&mp->timer) >= 0)) 1242 try_to_del_timer_sync(&mp->timer) >= 0))
@@ -1248,7 +1248,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1248 if (timer_pending(&p->timer) ? 1248 if (timer_pending(&p->timer) ?
1249 time_after(p->timer.expires, now + max_delay) : 1249 time_after(p->timer.expires, now + max_delay) :
1250 try_to_del_timer_sync(&p->timer) >= 0) 1250 try_to_del_timer_sync(&p->timer) >= 0)
1251 mod_timer(&mp->timer, now + max_delay); 1251 mod_timer(&p->timer, now + max_delay);
1252 } 1252 }
1253 1253
1254out: 1254out:
@@ -1283,7 +1283,7 @@ static void br_multicast_leave_group(struct net_bridge *br,
1283 br->multicast_last_member_interval; 1283 br->multicast_last_member_interval;
1284 1284
1285 if (!port) { 1285 if (!port) {
1286 if (!hlist_unhashed(&mp->mglist) && 1286 if (mp->mglist &&
1287 (timer_pending(&mp->timer) ? 1287 (timer_pending(&mp->timer) ?
1288 time_after(mp->timer.expires, time) : 1288 time_after(mp->timer.expires, time) :
1289 try_to_del_timer_sync(&mp->timer) >= 0)) { 1289 try_to_del_timer_sync(&mp->timer) >= 0)) {
@@ -1341,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
1341{ 1341{
1342 struct br_ip br_group; 1342 struct br_ip br_group;
1343 1343
1344 if (ipv6_is_local_multicast(group)) 1344 if (!ipv6_is_transient_multicast(group))
1345 return; 1345 return;
1346 1346
1347 ipv6_addr_copy(&br_group.u.ip6, group); 1347 ipv6_addr_copy(&br_group.u.ip6, group);
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 84aac7734bfc..4e1b620b6be6 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -84,13 +84,13 @@ struct net_bridge_port_group {
84struct net_bridge_mdb_entry 84struct net_bridge_mdb_entry
85{ 85{
86 struct hlist_node hlist[2]; 86 struct hlist_node hlist[2];
87 struct hlist_node mglist;
88 struct net_bridge *br; 87 struct net_bridge *br;
89 struct net_bridge_port_group __rcu *ports; 88 struct net_bridge_port_group __rcu *ports;
90 struct rcu_head rcu; 89 struct rcu_head rcu;
91 struct timer_list timer; 90 struct timer_list timer;
92 struct timer_list query_timer; 91 struct timer_list query_timer;
93 struct br_ip addr; 92 struct br_ip addr;
93 bool mglist;
94 u32 queries_sent; 94 u32 queries_sent;
95}; 95};
96 96
@@ -238,7 +238,6 @@ struct net_bridge
238 spinlock_t multicast_lock; 238 spinlock_t multicast_lock;
239 struct net_bridge_mdb_htable __rcu *mdb; 239 struct net_bridge_mdb_htable __rcu *mdb;
240 struct hlist_head router_list; 240 struct hlist_head router_list;
241 struct hlist_head mglist;
242 241
243 struct timer_list multicast_router_timer; 242 struct timer_list multicast_router_timer;
244 struct timer_list multicast_querier_timer; 243 struct timer_list multicast_querier_timer;
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index dff633d62e5b..05f357828a2f 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
252{ 252{
253 struct kvec iov = {buf, len}; 253 struct kvec iov = {buf, len};
254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 254 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
255 int r;
255 256
256 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); 257 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
258 if (r == -EAGAIN)
259 r = 0;
260 return r;
257} 261}
258 262
259/* 263/*
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
264 size_t kvlen, size_t len, int more) 268 size_t kvlen, size_t len, int more)
265{ 269{
266 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; 270 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
271 int r;
267 272
268 if (more) 273 if (more)
269 msg.msg_flags |= MSG_MORE; 274 msg.msg_flags |= MSG_MORE;
270 else 275 else
271 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ 276 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
272 277
273 return kernel_sendmsg(sock, &msg, iov, kvlen, len); 278 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
279 if (r == -EAGAIN)
280 r = 0;
281 return r;
274} 282}
275 283
276 284
@@ -328,7 +336,6 @@ static void reset_connection(struct ceph_connection *con)
328 ceph_msg_put(con->out_msg); 336 ceph_msg_put(con->out_msg);
329 con->out_msg = NULL; 337 con->out_msg = NULL;
330 } 338 }
331 con->out_keepalive_pending = false;
332 con->in_seq = 0; 339 con->in_seq = 0;
333 con->in_seq_acked = 0; 340 con->in_seq_acked = 0;
334} 341}
@@ -847,6 +854,8 @@ static int write_partial_msg_pages(struct ceph_connection *con)
847 (msg->pages || msg->pagelist || msg->bio || in_trail)) 854 (msg->pages || msg->pagelist || msg->bio || in_trail))
848 kunmap(page); 855 kunmap(page);
849 856
857 if (ret == -EAGAIN)
858 ret = 0;
850 if (ret <= 0) 859 if (ret <= 0)
851 goto out; 860 goto out;
852 861
@@ -1238,8 +1247,6 @@ static int process_connect(struct ceph_connection *con)
1238 con->auth_retry); 1247 con->auth_retry);
1239 if (con->auth_retry == 2) { 1248 if (con->auth_retry == 2) {
1240 con->error_msg = "connect authorization failure"; 1249 con->error_msg = "connect authorization failure";
1241 reset_connection(con);
1242 set_bit(CLOSED, &con->state);
1243 return -1; 1250 return -1;
1244 } 1251 }
1245 con->auth_retry = 1; 1252 con->auth_retry = 1;
@@ -1705,14 +1712,6 @@ more:
1705 1712
1706 /* open the socket first? */ 1713 /* open the socket first? */
1707 if (con->sock == NULL) { 1714 if (con->sock == NULL) {
1708 /*
1709 * if we were STANDBY and are reconnecting _this_
1710 * connection, bump connect_seq now. Always bump
1711 * global_seq.
1712 */
1713 if (test_and_clear_bit(STANDBY, &con->state))
1714 con->connect_seq++;
1715
1716 prepare_write_banner(msgr, con); 1715 prepare_write_banner(msgr, con);
1717 prepare_write_connect(msgr, con, 1); 1716 prepare_write_connect(msgr, con, 1);
1718 prepare_read_banner(con); 1717 prepare_read_banner(con);
@@ -1737,16 +1736,12 @@ more_kvec:
1737 if (con->out_skip) { 1736 if (con->out_skip) {
1738 ret = write_partial_skip(con); 1737 ret = write_partial_skip(con);
1739 if (ret <= 0) 1738 if (ret <= 0)
1740 goto done; 1739 goto out;
1741 if (ret < 0) {
1742 dout("try_write write_partial_skip err %d\n", ret);
1743 goto done;
1744 }
1745 } 1740 }
1746 if (con->out_kvec_left) { 1741 if (con->out_kvec_left) {
1747 ret = write_partial_kvec(con); 1742 ret = write_partial_kvec(con);
1748 if (ret <= 0) 1743 if (ret <= 0)
1749 goto done; 1744 goto out;
1750 } 1745 }
1751 1746
1752 /* msg pages? */ 1747 /* msg pages? */
@@ -1761,11 +1756,11 @@ more_kvec:
1761 if (ret == 1) 1756 if (ret == 1)
1762 goto more_kvec; /* we need to send the footer, too! */ 1757 goto more_kvec; /* we need to send the footer, too! */
1763 if (ret == 0) 1758 if (ret == 0)
1764 goto done; 1759 goto out;
1765 if (ret < 0) { 1760 if (ret < 0) {
1766 dout("try_write write_partial_msg_pages err %d\n", 1761 dout("try_write write_partial_msg_pages err %d\n",
1767 ret); 1762 ret);
1768 goto done; 1763 goto out;
1769 } 1764 }
1770 } 1765 }
1771 1766
@@ -1789,10 +1784,9 @@ do_next:
1789 /* Nothing to do! */ 1784 /* Nothing to do! */
1790 clear_bit(WRITE_PENDING, &con->state); 1785 clear_bit(WRITE_PENDING, &con->state);
1791 dout("try_write nothing else to write.\n"); 1786 dout("try_write nothing else to write.\n");
1792done:
1793 ret = 0; 1787 ret = 0;
1794out: 1788out:
1795 dout("try_write done on %p\n", con); 1789 dout("try_write done on %p ret %d\n", con, ret);
1796 return ret; 1790 return ret;
1797} 1791}
1798 1792
@@ -1821,19 +1815,17 @@ more:
1821 dout("try_read connecting\n"); 1815 dout("try_read connecting\n");
1822 ret = read_partial_banner(con); 1816 ret = read_partial_banner(con);
1823 if (ret <= 0) 1817 if (ret <= 0)
1824 goto done;
1825 if (process_banner(con) < 0) {
1826 ret = -1;
1827 goto out; 1818 goto out;
1828 } 1819 ret = process_banner(con);
1820 if (ret < 0)
1821 goto out;
1829 } 1822 }
1830 ret = read_partial_connect(con); 1823 ret = read_partial_connect(con);
1831 if (ret <= 0) 1824 if (ret <= 0)
1832 goto done;
1833 if (process_connect(con) < 0) {
1834 ret = -1;
1835 goto out; 1825 goto out;
1836 } 1826 ret = process_connect(con);
1827 if (ret < 0)
1828 goto out;
1837 goto more; 1829 goto more;
1838 } 1830 }
1839 1831
@@ -1848,7 +1840,7 @@ more:
1848 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); 1840 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1849 ret = ceph_tcp_recvmsg(con->sock, buf, skip); 1841 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1850 if (ret <= 0) 1842 if (ret <= 0)
1851 goto done; 1843 goto out;
1852 con->in_base_pos += ret; 1844 con->in_base_pos += ret;
1853 if (con->in_base_pos) 1845 if (con->in_base_pos)
1854 goto more; 1846 goto more;
@@ -1859,7 +1851,7 @@ more:
1859 */ 1851 */
1860 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); 1852 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1861 if (ret <= 0) 1853 if (ret <= 0)
1862 goto done; 1854 goto out;
1863 dout("try_read got tag %d\n", (int)con->in_tag); 1855 dout("try_read got tag %d\n", (int)con->in_tag);
1864 switch (con->in_tag) { 1856 switch (con->in_tag) {
1865 case CEPH_MSGR_TAG_MSG: 1857 case CEPH_MSGR_TAG_MSG:
@@ -1870,7 +1862,7 @@ more:
1870 break; 1862 break;
1871 case CEPH_MSGR_TAG_CLOSE: 1863 case CEPH_MSGR_TAG_CLOSE:
1872 set_bit(CLOSED, &con->state); /* fixme */ 1864 set_bit(CLOSED, &con->state); /* fixme */
1873 goto done; 1865 goto out;
1874 default: 1866 default:
1875 goto bad_tag; 1867 goto bad_tag;
1876 } 1868 }
@@ -1882,13 +1874,12 @@ more:
1882 case -EBADMSG: 1874 case -EBADMSG:
1883 con->error_msg = "bad crc"; 1875 con->error_msg = "bad crc";
1884 ret = -EIO; 1876 ret = -EIO;
1885 goto out; 1877 break;
1886 case -EIO: 1878 case -EIO:
1887 con->error_msg = "io error"; 1879 con->error_msg = "io error";
1888 goto out; 1880 break;
1889 default:
1890 goto done;
1891 } 1881 }
1882 goto out;
1892 } 1883 }
1893 if (con->in_tag == CEPH_MSGR_TAG_READY) 1884 if (con->in_tag == CEPH_MSGR_TAG_READY)
1894 goto more; 1885 goto more;
@@ -1898,15 +1889,13 @@ more:
1898 if (con->in_tag == CEPH_MSGR_TAG_ACK) { 1889 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1899 ret = read_partial_ack(con); 1890 ret = read_partial_ack(con);
1900 if (ret <= 0) 1891 if (ret <= 0)
1901 goto done; 1892 goto out;
1902 process_ack(con); 1893 process_ack(con);
1903 goto more; 1894 goto more;
1904 } 1895 }
1905 1896
1906done:
1907 ret = 0;
1908out: 1897out:
1909 dout("try_read done on %p\n", con); 1898 dout("try_read done on %p ret %d\n", con, ret);
1910 return ret; 1899 return ret;
1911 1900
1912bad_tag: 1901bad_tag:
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work)
1951 work.work); 1940 work.work);
1952 1941
1953 mutex_lock(&con->mutex); 1942 mutex_lock(&con->mutex);
1943 if (test_and_clear_bit(BACKOFF, &con->state)) {
1944 dout("con_work %p backing off\n", con);
1945 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1946 round_jiffies_relative(con->delay))) {
1947 dout("con_work %p backoff %lu\n", con, con->delay);
1948 mutex_unlock(&con->mutex);
1949 return;
1950 } else {
1951 con->ops->put(con);
1952 dout("con_work %p FAILED to back off %lu\n", con,
1953 con->delay);
1954 }
1955 }
1954 1956
1957 if (test_bit(STANDBY, &con->state)) {
1958 dout("con_work %p STANDBY\n", con);
1959 goto done;
1960 }
1955 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ 1961 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1956 dout("con_work CLOSED\n"); 1962 dout("con_work CLOSED\n");
1957 con_close_socket(con); 1963 con_close_socket(con);
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con)
2008 /* Requeue anything that hasn't been acked */ 2014 /* Requeue anything that hasn't been acked */
2009 list_splice_init(&con->out_sent, &con->out_queue); 2015 list_splice_init(&con->out_sent, &con->out_queue);
2010 2016
2011 /* If there are no messages in the queue, place the connection 2017 /* If there are no messages queued or keepalive pending, place
2012 * in a STANDBY state (i.e., don't try to reconnect just yet). */ 2018 * the connection in a STANDBY state */
2013 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { 2019 if (list_empty(&con->out_queue) &&
2014 dout("fault setting STANDBY\n"); 2020 !test_bit(KEEPALIVE_PENDING, &con->state)) {
2021 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2022 clear_bit(WRITE_PENDING, &con->state);
2015 set_bit(STANDBY, &con->state); 2023 set_bit(STANDBY, &con->state);
2016 } else { 2024 } else {
2017 /* retry after a delay. */ 2025 /* retry after a delay. */
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con)
2019 con->delay = BASE_DELAY_INTERVAL; 2027 con->delay = BASE_DELAY_INTERVAL;
2020 else if (con->delay < MAX_DELAY_INTERVAL) 2028 else if (con->delay < MAX_DELAY_INTERVAL)
2021 con->delay *= 2; 2029 con->delay *= 2;
2022 dout("fault queueing %p delay %lu\n", con, con->delay);
2023 con->ops->get(con); 2030 con->ops->get(con);
2024 if (queue_delayed_work(ceph_msgr_wq, &con->work, 2031 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2025 round_jiffies_relative(con->delay)) == 0) 2032 round_jiffies_relative(con->delay))) {
2033 dout("fault queued %p delay %lu\n", con, con->delay);
2034 } else {
2026 con->ops->put(con); 2035 con->ops->put(con);
2036 dout("fault failed to queue %p delay %lu, backoff\n",
2037 con, con->delay);
2038 /*
2039 * In many cases we see a socket state change
2040 * while con_work is running and end up
2041 * queuing (non-delayed) work, such that we
2042 * can't backoff with a delay. Set a flag so
2043 * that when con_work restarts we schedule the
2044 * delay then.
2045 */
2046 set_bit(BACKOFF, &con->state);
2047 }
2027 } 2048 }
2028 2049
2029out_unlock: 2050out_unlock:
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr)
2094} 2115}
2095EXPORT_SYMBOL(ceph_messenger_destroy); 2116EXPORT_SYMBOL(ceph_messenger_destroy);
2096 2117
2118static void clear_standby(struct ceph_connection *con)
2119{
2120 /* come back from STANDBY? */
2121 if (test_and_clear_bit(STANDBY, &con->state)) {
2122 mutex_lock(&con->mutex);
2123 dout("clear_standby %p and ++connect_seq\n", con);
2124 con->connect_seq++;
2125 WARN_ON(test_bit(WRITE_PENDING, &con->state));
2126 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state));
2127 mutex_unlock(&con->mutex);
2128 }
2129}
2130
2097/* 2131/*
2098 * Queue up an outgoing message on the given connection. 2132 * Queue up an outgoing message on the given connection.
2099 */ 2133 */
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2126 2160
2127 /* if there wasn't anything waiting to send before, queue 2161 /* if there wasn't anything waiting to send before, queue
2128 * new work */ 2162 * new work */
2163 clear_standby(con);
2129 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2164 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2130 queue_con(con); 2165 queue_con(con);
2131} 2166}
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2191 */ 2226 */
2192void ceph_con_keepalive(struct ceph_connection *con) 2227void ceph_con_keepalive(struct ceph_connection *con)
2193{ 2228{
2229 dout("con_keepalive %p\n", con);
2230 clear_standby(con);
2194 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && 2231 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2195 test_and_set_bit(WRITE_PENDING, &con->state) == 0) 2232 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2196 queue_con(con); 2233 queue_con(con);
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c
index 1a040e64c69f..cd9c21df87d1 100644
--- a/net/ceph/pagevec.c
+++ b/net/ceph/pagevec.c
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data,
16 int num_pages, bool write_page) 16 int num_pages, bool write_page)
17{ 17{
18 struct page **pages; 18 struct page **pages;
19 int rc; 19 int got = 0;
20 int rc = 0;
20 21
21 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); 22 pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS);
22 if (!pages) 23 if (!pages)
23 return ERR_PTR(-ENOMEM); 24 return ERR_PTR(-ENOMEM);
24 25
25 down_read(&current->mm->mmap_sem); 26 down_read(&current->mm->mmap_sem);
26 rc = get_user_pages(current, current->mm, (unsigned long)data, 27 while (got < num_pages) {
27 num_pages, write_page, 0, pages, NULL); 28 rc = get_user_pages(current, current->mm,
29 (unsigned long)data + ((unsigned long)got * PAGE_SIZE),
30 num_pages - got, write_page, 0, pages + got, NULL);
31 if (rc < 0)
32 break;
33 BUG_ON(rc == 0);
34 got += rc;
35 }
28 up_read(&current->mm->mmap_sem); 36 up_read(&current->mm->mmap_sem);
29 if (rc < num_pages) 37 if (rc < 0)
30 goto fail; 38 goto fail;
31 return pages; 39 return pages;
32 40
33fail: 41fail:
34 ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); 42 ceph_put_page_vector(pages, got, false);
35 return ERR_PTR(rc); 43 return ERR_PTR(rc);
36} 44}
37EXPORT_SYMBOL(ceph_get_direct_page_vector); 45EXPORT_SYMBOL(ceph_get_direct_page_vector);
diff --git a/net/core/dev.c b/net/core/dev.c
index 8e726cb47ed7..6561021d22d1 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1114,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change);
1114void dev_load(struct net *net, const char *name) 1114void dev_load(struct net *net, const char *name)
1115{ 1115{
1116 struct net_device *dev; 1116 struct net_device *dev;
1117 int no_module;
1117 1118
1118 rcu_read_lock(); 1119 rcu_read_lock();
1119 dev = dev_get_by_name_rcu(net, name); 1120 dev = dev_get_by_name_rcu(net, name);
1120 rcu_read_unlock(); 1121 rcu_read_unlock();
1121 1122
1122 if (!dev && capable(CAP_NET_ADMIN)) 1123 no_module = !dev;
1123 request_module("%s", name); 1124 if (no_module && capable(CAP_NET_ADMIN))
1125 no_module = request_module("netdev-%s", name);
1126 if (no_module && capable(CAP_SYS_MODULE)) {
1127 if (!request_module("%s", name))
1128 pr_err("Loading kernel module for a network device "
1129"with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
1130"instead\n", name);
1131 }
1124} 1132}
1125EXPORT_SYMBOL(dev_load); 1133EXPORT_SYMBOL(dev_load);
1126 1134
@@ -1280,10 +1288,13 @@ static int __dev_close_many(struct list_head *head)
1280 1288
1281static int __dev_close(struct net_device *dev) 1289static int __dev_close(struct net_device *dev)
1282{ 1290{
1291 int retval;
1283 LIST_HEAD(single); 1292 LIST_HEAD(single);
1284 1293
1285 list_add(&dev->unreg_list, &single); 1294 list_add(&dev->unreg_list, &single);
1286 return __dev_close_many(&single); 1295 retval = __dev_close_many(&single);
1296 list_del(&single);
1297 return retval;
1287} 1298}
1288 1299
1289int dev_close_many(struct list_head *head) 1300int dev_close_many(struct list_head *head)
@@ -1325,7 +1336,7 @@ int dev_close(struct net_device *dev)
1325 1336
1326 list_add(&dev->unreg_list, &single); 1337 list_add(&dev->unreg_list, &single);
1327 dev_close_many(&single); 1338 dev_close_many(&single);
1328 1339 list_del(&single);
1329 return 0; 1340 return 0;
1330} 1341}
1331EXPORT_SYMBOL(dev_close); 1342EXPORT_SYMBOL(dev_close);
@@ -5063,6 +5074,7 @@ static void rollback_registered(struct net_device *dev)
5063 5074
5064 list_add(&dev->unreg_list, &single); 5075 list_add(&dev->unreg_list, &single);
5065 rollback_registered_many(&single); 5076 rollback_registered_many(&single);
5077 list_del(&single);
5066} 5078}
5067 5079
5068unsigned long netdev_fix_features(unsigned long features, const char *name) 5080unsigned long netdev_fix_features(unsigned long features, const char *name)
@@ -6216,6 +6228,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
6216 } 6228 }
6217 } 6229 }
6218 unregister_netdevice_many(&dev_kill_list); 6230 unregister_netdevice_many(&dev_kill_list);
6231 list_del(&dev_kill_list);
6219 rtnl_unlock(); 6232 rtnl_unlock();
6220} 6233}
6221 6234
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index 508f9c18992f..133fd22ea287 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
144 144
145 list_for_each_entry(ha, &from_list->list, list) { 145 list_for_each_entry(ha, &from_list->list, list) {
146 type = addr_type ? addr_type : ha->type; 146 type = addr_type ? addr_type : ha->type;
147 __hw_addr_del(to_list, ha->addr, addr_len, addr_type); 147 __hw_addr_del(to_list, ha->addr, addr_len, type);
148 } 148 }
149} 149}
150EXPORT_SYMBOL(__hw_addr_del_multiple); 150EXPORT_SYMBOL(__hw_addr_del_multiple);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index a9e7fc4c461f..b5bada92f637 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags)
3321 pkt_dev->started_at); 3321 pkt_dev->started_at);
3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); 3322 ktime_t idle = ns_to_ktime(pkt_dev->idle_acc);
3323 3323
3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", 3324 p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n",
3325 (unsigned long long)ktime_to_us(elapsed), 3325 (unsigned long long)ktime_to_us(elapsed),
3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), 3326 (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)),
3327 (unsigned long long)ktime_to_us(idle), 3327 (unsigned long long)ktime_to_us(idle),
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index 6b03f561caec..c44348adba3b 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
626 dcb->cmd = DCB_CMD_GAPP; 626 dcb->cmd = DCB_CMD_GAPP;
627 627
628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); 628 app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
629 if (!app_nest)
630 goto out_cancel;
631
629 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); 632 ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
630 if (ret) 633 if (ret)
631 goto out_cancel; 634 goto out_cancel;
@@ -1190,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
1190 goto err; 1193 goto err;
1191 } 1194 }
1192 1195
1193 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { 1196 if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1194 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); 1197 struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1195 err = ops->ieee_setpfc(netdev, pfc); 1198 err = ops->ieee_setpfc(netdev, pfc);
1196 if (err) 1199 if (err)
@@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp);
1613u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) 1616u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1614{ 1617{
1615 struct dcb_app_type *itr; 1618 struct dcb_app_type *itr;
1619 struct dcb_app_type event;
1620
1621 memcpy(&event.name, dev->name, sizeof(event.name));
1622 memcpy(&event.app, new, sizeof(event.app));
1616 1623
1617 spin_lock(&dcb_lock); 1624 spin_lock(&dcb_lock);
1618 /* Search for existing match and replace */ 1625 /* Search for existing match and replace */
@@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new)
1644 } 1651 }
1645out: 1652out:
1646 spin_unlock(&dcb_lock); 1653 spin_unlock(&dcb_lock);
1647 call_dcbevent_notifiers(DCB_APP_EVENT, new); 1654 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1648 return 0; 1655 return 0;
1649} 1656}
1650EXPORT_SYMBOL(dcb_setapp); 1657EXPORT_SYMBOL(dcb_setapp);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 8cde009e8b85..4222e7a654b0 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
614 /* Caller (dccp_v4_do_rcv) will send Reset */ 614 /* Caller (dccp_v4_do_rcv) will send Reset */
615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; 615 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
616 return 1; 616 return 1;
617 } else if (sk->sk_state == DCCP_CLOSED) {
618 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
619 return 1;
617 } 620 }
618 621
619 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { 622 if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) {
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
668 } 671 }
669 672
670 switch (sk->sk_state) { 673 switch (sk->sk_state) {
671 case DCCP_CLOSED:
672 dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION;
673 return 1;
674
675 case DCCP_REQUESTING: 674 case DCCP_REQUESTING:
676 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); 675 queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len);
677 if (queued >= 0) 676 if (queued >= 0)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 739435a6af39..cfa7a5e1c5c9 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen)
67 size_t result_len = 0; 67 size_t result_len = 0;
68 const char *data = _data, *end, *opt; 68 const char *data = _data, *end, *opt;
69 69
70 kenter("%%%d,%s,'%s',%zu", 70 kenter("%%%d,%s,'%*.*s',%zu",
71 key->serial, key->description, data, datalen); 71 key->serial, key->description,
72 (int)datalen, (int)datalen, data, datalen);
72 73
73 if (datalen <= 1 || !data || data[datalen - 1] != '\0') 74 if (datalen <= 1 || !data || data[datalen - 1] != '\0')
74 return -EINVAL; 75 return -EINVAL;
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m)
217 seq_printf(m, ": %u", key->datalen); 218 seq_printf(m, ": %u", key->datalen);
218} 219}
219 220
221/*
222 * read the DNS data
223 * - the key's semaphore is read-locked
224 */
225static long dns_resolver_read(const struct key *key,
226 char __user *buffer, size_t buflen)
227{
228 if (key->type_data.x[0])
229 return key->type_data.x[0];
230
231 return user_read(key, buffer, buflen);
232}
233
220struct key_type key_type_dns_resolver = { 234struct key_type key_type_dns_resolver = {
221 .name = "dns_resolver", 235 .name = "dns_resolver",
222 .instantiate = dns_resolver_instantiate, 236 .instantiate = dns_resolver_instantiate,
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = {
224 .revoke = user_revoke, 238 .revoke = user_revoke,
225 .destroy = user_destroy, 239 .destroy = user_destroy,
226 .describe = dns_resolver_describe, 240 .describe = dns_resolver_describe,
227 .read = user_read, 241 .read = dns_resolver_read,
228}; 242};
229 243
230static int __init init_dns_resolver(void) 244static int __init init_dns_resolver(void)
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 748cb5b337bd..036652c8166d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg)
670 ifap = &ifa->ifa_next) { 670 ifap = &ifa->ifa_next) {
671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) && 671 if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
672 sin_orig.sin_addr.s_addr == 672 sin_orig.sin_addr.s_addr ==
673 ifa->ifa_address) { 673 ifa->ifa_local) {
674 break; /* found */ 674 break; /* found */
675 } 675 }
676 } 676 }
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu)
1030 return mtu >= 68; 1030 return mtu >= 68;
1031} 1031}
1032 1032
1033static void inetdev_send_gratuitous_arp(struct net_device *dev,
1034 struct in_device *in_dev)
1035
1036{
1037 struct in_ifaddr *ifa = in_dev->ifa_list;
1038
1039 if (!ifa)
1040 return;
1041
1042 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1043 ifa->ifa_local, dev,
1044 ifa->ifa_local, NULL,
1045 dev->dev_addr, NULL);
1046}
1047
1033/* Called only under RTNL semaphore */ 1048/* Called only under RTNL semaphore */
1034 1049
1035static int inetdev_event(struct notifier_block *this, unsigned long event, 1050static int inetdev_event(struct notifier_block *this, unsigned long event,
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1082 } 1097 }
1083 ip_mc_up(in_dev); 1098 ip_mc_up(in_dev);
1084 /* fall through */ 1099 /* fall through */
1085 case NETDEV_NOTIFY_PEERS:
1086 case NETDEV_CHANGEADDR: 1100 case NETDEV_CHANGEADDR:
1101 if (!IN_DEV_ARP_NOTIFY(in_dev))
1102 break;
1103 /* fall through */
1104 case NETDEV_NOTIFY_PEERS:
1087 /* Send gratuitous ARP to notify of link change */ 1105 /* Send gratuitous ARP to notify of link change */
1088 if (IN_DEV_ARP_NOTIFY(in_dev)) { 1106 inetdev_send_gratuitous_arp(dev, in_dev);
1089 struct in_ifaddr *ifa = in_dev->ifa_list;
1090
1091 if (ifa)
1092 arp_send(ARPOP_REQUEST, ETH_P_ARP,
1093 ifa->ifa_address, dev,
1094 ifa->ifa_address, NULL,
1095 dev->dev_addr, NULL);
1096 }
1097 break; 1107 break;
1098 case NETDEV_DOWN: 1108 case NETDEV_DOWN:
1099 ip_mc_down(in_dev); 1109 ip_mc_down(in_dev);
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c5af909cf701..3c8dfa16614d 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -505,7 +505,9 @@ restart:
505 } 505 }
506 506
507 rcu_read_unlock(); 507 rcu_read_unlock();
508 local_bh_disable();
508 inet_twsk_deschedule(tw, twdr); 509 inet_twsk_deschedule(tw, twdr);
510 local_bh_enable();
509 inet_twsk_put(tw); 511 inet_twsk_put(tw);
510 goto restart_rcu; 512 goto restart_rcu;
511 } 513 }
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index eb68a0e34e49..d1d0e2c256fc 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev
775 .fl4_dst = dst, 775 .fl4_dst = dst,
776 .fl4_src = tiph->saddr, 776 .fl4_src = tiph->saddr,
777 .fl4_tos = RT_TOS(tos), 777 .fl4_tos = RT_TOS(tos),
778 .proto = IPPROTO_GRE,
778 .fl_gre_key = tunnel->parms.o_key 779 .fl_gre_key = tunnel->parms.o_key
779 }; 780 };
780 if (ip_route_output_key(dev_net(dev), &rt, &fl)) { 781 if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -1764,4 +1765,4 @@ module_exit(ipgre_fini);
1764MODULE_LICENSE("GPL"); 1765MODULE_LICENSE("GPL");
1765MODULE_ALIAS_RTNL_LINK("gre"); 1766MODULE_ALIAS_RTNL_LINK("gre");
1766MODULE_ALIAS_RTNL_LINK("gretap"); 1767MODULE_ALIAS_RTNL_LINK("gretap");
1767MODULE_ALIAS("gre0"); 1768MODULE_ALIAS_NETDEV("gre0");
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index 988f52fba54a..a5f58e7cbb26 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -913,4 +913,4 @@ static void __exit ipip_fini(void)
913module_init(ipip_init); 913module_init(ipip_init);
914module_exit(ipip_fini); 914module_exit(ipip_fini);
915MODULE_LICENSE("GPL"); 915MODULE_LICENSE("GPL");
916MODULE_ALIAS("tunl0"); 916MODULE_ALIAS_NETDEV("tunl0");
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 788a3e74834e..6ed6603c2f6d 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = {
2722 .destroy = ipv4_dst_destroy, 2722 .destroy = ipv4_dst_destroy,
2723 .check = ipv4_blackhole_dst_check, 2723 .check = ipv4_blackhole_dst_check,
2724 .default_mtu = ipv4_blackhole_default_mtu, 2724 .default_mtu = ipv4_blackhole_default_mtu,
2725 .default_advmss = ipv4_default_advmss,
2725 .update_pmtu = ipv4_rt_blackhole_update_pmtu, 2726 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2726}; 2727};
2727 2728
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eb7f82ebf4a3..65f6c0406245 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb,
1222 } 1222 }
1223 1223
1224 /* D-SACK for already forgotten data... Do dumb counting. */ 1224 /* D-SACK for already forgotten data... Do dumb counting. */
1225 if (dup_sack && 1225 if (dup_sack && tp->undo_marker && tp->undo_retrans &&
1226 !after(end_seq_0, prior_snd_una) && 1226 !after(end_seq_0, prior_snd_una) &&
1227 after(end_seq_0, tp->undo_marker)) 1227 after(end_seq_0, tp->undo_marker))
1228 tp->undo_retrans--; 1228 tp->undo_retrans--;
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1299 1299
1300 /* Account D-SACK for retransmitted packet. */ 1300 /* Account D-SACK for retransmitted packet. */
1301 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1301 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1302 if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) 1302 if (tp->undo_marker && tp->undo_retrans &&
1303 after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
1303 tp->undo_retrans--; 1304 tp->undo_retrans--;
1304 if (sacked & TCPCB_SACKED_ACKED) 1305 if (sacked & TCPCB_SACKED_ACKED)
1305 state->reord = min(fack_count, state->reord); 1306 state->reord = min(fack_count, state->reord);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 406f320336e6..dfa5beb0c1c8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2162 if (!tp->retrans_stamp) 2162 if (!tp->retrans_stamp)
2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2163 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2164 2164
2165 tp->undo_retrans++; 2165 tp->undo_retrans += tcp_skb_pcount(skb);
2166 2166
2167 /* snd_nxt is stored to detect loss of retransmitted segment, 2167 /* snd_nxt is stored to detect loss of retransmitted segment,
2168 * see tcp_input.c tcp_sacktag_write_queue(). 2168 * see tcp_input.c tcp_sacktag_write_queue().
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 4f4483e697bd..e528a42a52be 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -57,6 +57,7 @@
57MODULE_AUTHOR("Ville Nuorvala"); 57MODULE_AUTHOR("Ville Nuorvala");
58MODULE_DESCRIPTION("IPv6 tunneling device"); 58MODULE_DESCRIPTION("IPv6 tunneling device");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_ALIAS_NETDEV("ip6tnl0");
60 61
61#ifdef IP6_TNL_DEBUG 62#ifdef IP6_TNL_DEBUG
62#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) 63#define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c
index 09c88891a753..de338037a736 100644
--- a/net/ipv6/netfilter/ip6t_LOG.c
+++ b/net/ipv6/netfilter/ip6t_LOG.c
@@ -410,7 +410,7 @@ fallback:
410 if (p != NULL) { 410 if (p != NULL) {
411 sb_add(m, "%02x", *p++); 411 sb_add(m, "%02x", *p++);
412 for (i = 1; i < len; i++) 412 for (i = 1; i < len; i++)
413 sb_add(m, ":%02x", p[i]); 413 sb_add(m, ":%02x", *p++);
414 } 414 }
415 sb_add(m, " "); 415 sb_add(m, " ");
416 416
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 1c29f95695de..e7db7014e89f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = {
128 .destroy = ip6_dst_destroy, 128 .destroy = ip6_dst_destroy,
129 .check = ip6_dst_check, 129 .check = ip6_dst_check,
130 .default_mtu = ip6_blackhole_default_mtu, 130 .default_mtu = ip6_blackhole_default_mtu,
131 .default_advmss = ip6_default_advmss,
131 .update_pmtu = ip6_rt_blackhole_update_pmtu, 132 .update_pmtu = ip6_rt_blackhole_update_pmtu,
132}; 133};
133 134
@@ -738,8 +739,10 @@ restart:
738 739
739 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) 740 if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP))
740 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); 741 nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src);
741 else 742 else if (!(rt->dst.flags & DST_HOST))
742 nrt = rt6_alloc_clone(rt, &fl->fl6_dst); 743 nrt = rt6_alloc_clone(rt, &fl->fl6_dst);
744 else
745 goto out2;
743 746
744 dst_release(&rt->dst); 747 dst_release(&rt->dst);
745 rt = nrt ? : net->ipv6.ip6_null_entry; 748 rt = nrt ? : net->ipv6.ip6_null_entry;
@@ -2556,14 +2559,16 @@ static
2556int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, 2559int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write,
2557 void __user *buffer, size_t *lenp, loff_t *ppos) 2560 void __user *buffer, size_t *lenp, loff_t *ppos)
2558{ 2561{
2559 struct net *net = current->nsproxy->net_ns; 2562 struct net *net;
2560 int delay = net->ipv6.sysctl.flush_delay; 2563 int delay;
2561 if (write) { 2564 if (!write)
2562 proc_dointvec(ctl, write, buffer, lenp, ppos);
2563 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2564 return 0;
2565 } else
2566 return -EINVAL; 2565 return -EINVAL;
2566
2567 net = (struct net *)ctl->extra1;
2568 delay = net->ipv6.sysctl.flush_delay;
2569 proc_dointvec(ctl, write, buffer, lenp, ppos);
2570 fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net);
2571 return 0;
2567} 2572}
2568 2573
2569ctl_table ipv6_route_table_template[] = { 2574ctl_table ipv6_route_table_template[] = {
@@ -2650,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
2650 2655
2651 if (table) { 2656 if (table) {
2652 table[0].data = &net->ipv6.sysctl.flush_delay; 2657 table[0].data = &net->ipv6.sysctl.flush_delay;
2658 table[0].extra1 = net;
2653 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; 2659 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
2654 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; 2660 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
2655 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; 2661 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 8ce38f10a547..d2c16e10f650 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1290,4 +1290,4 @@ static int __init sit_init(void)
1290module_init(sit_init); 1290module_init(sit_init);
1291module_exit(sit_cleanup); 1291module_exit(sit_cleanup);
1292MODULE_LICENSE("GPL"); 1292MODULE_LICENSE("GPL");
1293MODULE_ALIAS("sit0"); 1293MODULE_ALIAS_NETDEV("sit0");
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 8acba456744e..7a10a8d1b2d0 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1229 } 1229 }
1230 mutex_unlock(&local->iflist_mtx); 1230 mutex_unlock(&local->iflist_mtx);
1231 unregister_netdevice_many(&unreg_list); 1231 unregister_netdevice_many(&unreg_list);
1232 list_del(&unreg_list);
1232} 1233}
1233 1234
1234static u32 ieee80211_idle_off(struct ieee80211_local *local, 1235static u32 ieee80211_idle_off(struct ieee80211_local *local,
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 45fbb9e33746..c9ceb4d57ab0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1033 if (is_multicast_ether_addr(hdr->addr1)) 1033 if (is_multicast_ether_addr(hdr->addr1))
1034 return; 1034 return;
1035 1035
1036 /*
1037 * In case we receive frames after disassociation.
1038 */
1039 if (!sdata->u.mgd.associated)
1040 return;
1041
1036 ieee80211_sta_reset_conn_monitor(sdata); 1042 ieee80211_sta_reset_conn_monitor(sdata);
1037} 1043}
1038 1044
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index cf68700abffa..d036597aabbe 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1210 switch (sdata->vif.type) { 1210 switch (sdata->vif.type) {
1211 case NL80211_IFTYPE_STATION: 1211 case NL80211_IFTYPE_STATION:
1212 changed |= BSS_CHANGED_ASSOC; 1212 changed |= BSS_CHANGED_ASSOC;
1213 mutex_lock(&sdata->u.mgd.mtx);
1213 ieee80211_bss_info_change_notify(sdata, changed); 1214 ieee80211_bss_info_change_notify(sdata, changed);
1215 mutex_unlock(&sdata->u.mgd.mtx);
1214 break; 1216 break;
1215 case NL80211_IFTYPE_ADHOC: 1217 case NL80211_IFTYPE_ADHOC:
1216 changed |= BSS_CHANGED_IBSS; 1218 changed |= BSS_CHANGED_IBSS;
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 32fcbe290c04..4aa614b8a96a 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head,
133 133
134 /* Optimization: we don't need to hold module 134 /* Optimization: we don't need to hold module
135 reference here, since function can't sleep. --RR */ 135 reference here, since function can't sleep. --RR */
136repeat:
136 verdict = elem->hook(hook, skb, indev, outdev, okfn); 137 verdict = elem->hook(hook, skb, indev, outdev, okfn);
137 if (verdict != NF_ACCEPT) { 138 if (verdict != NF_ACCEPT) {
138#ifdef CONFIG_NETFILTER_DEBUG 139#ifdef CONFIG_NETFILTER_DEBUG
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head,
145#endif 146#endif
146 if (verdict != NF_REPEAT) 147 if (verdict != NF_REPEAT)
147 return verdict; 148 return verdict;
148 *i = (*i)->prev; 149 goto repeat;
149 } 150 }
150 } 151 }
151 return NF_ACCEPT; 152 return NF_ACCEPT;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 22f7ad5101ab..ba98e1308f3c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
808 dest->u_threshold = udest->u_threshold; 808 dest->u_threshold = udest->u_threshold;
809 dest->l_threshold = udest->l_threshold; 809 dest->l_threshold = udest->l_threshold;
810 810
811 spin_lock(&dest->dst_lock); 811 spin_lock_bh(&dest->dst_lock);
812 ip_vs_dst_reset(dest); 812 ip_vs_dst_reset(dest);
813 spin_unlock(&dest->dst_lock); 813 spin_unlock_bh(&dest->dst_lock);
814 814
815 if (add) 815 if (add)
816 ip_vs_new_estimator(&dest->stats); 816 ip_vs_new_estimator(&dest->stats);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index b07393eab88e..91816998ed86 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister);
85 85
86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) 86int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger)
87{ 87{
88 if (pf >= ARRAY_SIZE(nf_loggers))
89 return -EINVAL;
88 mutex_lock(&nf_log_mutex); 90 mutex_lock(&nf_log_mutex);
89 if (__find_logger(pf, logger->name) == NULL) { 91 if (__find_logger(pf, logger->name) == NULL) {
90 mutex_unlock(&nf_log_mutex); 92 mutex_unlock(&nf_log_mutex);
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf);
98 100
99void nf_log_unbind_pf(u_int8_t pf) 101void nf_log_unbind_pf(u_int8_t pf)
100{ 102{
103 if (pf >= ARRAY_SIZE(nf_loggers))
104 return;
101 mutex_lock(&nf_log_mutex); 105 mutex_lock(&nf_log_mutex);
102 rcu_assign_pointer(nf_loggers[pf], NULL); 106 rcu_assign_pointer(nf_loggers[pf], NULL);
103 mutex_unlock(&nf_log_mutex); 107 mutex_unlock(&nf_log_mutex);
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c
index 4d87befb04c0..474d621cbc2e 100644
--- a/net/netfilter/nf_tproxy_core.c
+++ b/net/netfilter/nf_tproxy_core.c
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb)
28 skb->destructor = NULL; 28 skb->destructor = NULL;
29 29
30 if (sk) 30 if (sk)
31 nf_tproxy_put_sock(sk); 31 sock_put(sk);
32} 32}
33 33
34/* consumes sk */ 34/* consumes sk */
35int 35void
36nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) 36nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
37{ 37{
38 bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? 38 /* assigning tw sockets complicates things; most
39 inet_twsk(sk)->tw_transparent : 39 * skb->sk->X checks would have to test sk->sk_state first */
40 inet_sk(sk)->transparent; 40 if (sk->sk_state == TCP_TIME_WAIT) {
41 41 inet_twsk_put(inet_twsk(sk));
42 if (transparent) { 42 return;
43 skb_orphan(skb); 43 }
44 skb->sk = sk; 44
45 skb->destructor = nf_tproxy_destructor; 45 skb_orphan(skb);
46 return 1; 46 skb->sk = sk;
47 } else 47 skb->destructor = nf_tproxy_destructor;
48 nf_tproxy_put_sock(sk);
49
50 return 0;
51} 48}
52EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); 49EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock);
53 50
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 640678f47a2a..dcfd57eb9d02 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -33,6 +33,20 @@
33#include <net/netfilter/nf_tproxy_core.h> 33#include <net/netfilter/nf_tproxy_core.h>
34#include <linux/netfilter/xt_TPROXY.h> 34#include <linux/netfilter/xt_TPROXY.h>
35 35
36static bool tproxy_sk_is_transparent(struct sock *sk)
37{
38 if (sk->sk_state != TCP_TIME_WAIT) {
39 if (inet_sk(sk)->transparent)
40 return true;
41 sock_put(sk);
42 } else {
43 if (inet_twsk(sk)->tw_transparent)
44 return true;
45 inet_twsk_put(inet_twsk(sk));
46 }
47 return false;
48}
49
36static inline __be32 50static inline __be32
37tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) 51tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
38{ 52{
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
141 skb->dev, NFT_LOOKUP_LISTENER); 155 skb->dev, NFT_LOOKUP_LISTENER);
142 156
143 /* NOTE: assign_sock consumes our sk reference */ 157 /* NOTE: assign_sock consumes our sk reference */
144 if (sk && nf_tproxy_assign_sock(skb, sk)) { 158 if (sk && tproxy_sk_is_transparent(sk)) {
145 /* This should be in a separate target, but we don't do multiple 159 /* This should be in a separate target, but we don't do multiple
146 targets on the same rule yet */ 160 targets on the same rule yet */
147 skb->mark = (skb->mark & ~mark_mask) ^ mark_value; 161 skb->mark = (skb->mark & ~mark_mask) ^ mark_value;
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport,
149 pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", 163 pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n",
150 iph->protocol, &iph->daddr, ntohs(hp->dest), 164 iph->protocol, &iph->daddr, ntohs(hp->dest),
151 &laddr, ntohs(lport), skb->mark); 165 &laddr, ntohs(lport), skb->mark);
166
167 nf_tproxy_assign_sock(skb, sk);
152 return NF_ACCEPT; 168 return NF_ACCEPT;
153 } 169 }
154 170
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
306 par->in, NFT_LOOKUP_LISTENER); 322 par->in, NFT_LOOKUP_LISTENER);
307 323
308 /* NOTE: assign_sock consumes our sk reference */ 324 /* NOTE: assign_sock consumes our sk reference */
309 if (sk && nf_tproxy_assign_sock(skb, sk)) { 325 if (sk && tproxy_sk_is_transparent(sk)) {
310 /* This should be in a separate target, but we don't do multiple 326 /* This should be in a separate target, but we don't do multiple
311 targets on the same rule yet */ 327 targets on the same rule yet */
312 skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; 328 skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value;
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
314 pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", 330 pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n",
315 tproto, &iph->saddr, ntohs(hp->source), 331 tproto, &iph->saddr, ntohs(hp->source),
316 laddr, ntohs(lport), skb->mark); 332 laddr, ntohs(lport), skb->mark);
333
334 nf_tproxy_assign_sock(skb, sk);
317 return NF_ACCEPT; 335 return NF_ACCEPT;
318 } 336 }
319 337
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 00d6ae838303..9cc46356b577 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -35,6 +35,15 @@
35#include <net/netfilter/nf_conntrack.h> 35#include <net/netfilter/nf_conntrack.h>
36#endif 36#endif
37 37
38static void
39xt_socket_put_sk(struct sock *sk)
40{
41 if (sk->sk_state == TCP_TIME_WAIT)
42 inet_twsk_put(inet_twsk(sk));
43 else
44 sock_put(sk);
45}
46
38static int 47static int
39extract_icmp4_fields(const struct sk_buff *skb, 48extract_icmp4_fields(const struct sk_buff *skb,
40 u8 *protocol, 49 u8 *protocol,
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
164 (sk->sk_state == TCP_TIME_WAIT && 173 (sk->sk_state == TCP_TIME_WAIT &&
165 inet_twsk(sk)->tw_transparent)); 174 inet_twsk(sk)->tw_transparent));
166 175
167 nf_tproxy_put_sock(sk); 176 xt_socket_put_sk(sk);
168 177
169 if (wildcard || !transparent) 178 if (wildcard || !transparent)
170 sk = NULL; 179 sk = NULL;
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par)
298 (sk->sk_state == TCP_TIME_WAIT && 307 (sk->sk_state == TCP_TIME_WAIT &&
299 inet_twsk(sk)->tw_transparent)); 308 inet_twsk(sk)->tw_transparent));
300 309
301 nf_tproxy_put_sock(sk); 310 xt_socket_put_sk(sk);
302 311
303 if (wildcard || !transparent) 312 if (wildcard || !transparent)
304 sk = NULL; 313 sk = NULL;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 478181d53c55..1f924595bdef 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1407 int noblock = flags&MSG_DONTWAIT; 1407 int noblock = flags&MSG_DONTWAIT;
1408 size_t copied; 1408 size_t copied;
1409 struct sk_buff *skb, *data_skb; 1409 struct sk_buff *skb, *data_skb;
1410 int err; 1410 int err, ret;
1411 1411
1412 if (flags&MSG_OOB) 1412 if (flags&MSG_OOB)
1413 return -EOPNOTSUPP; 1413 return -EOPNOTSUPP;
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1470 1470
1471 skb_free_datagram(sk, skb); 1471 skb_free_datagram(sk, skb);
1472 1472
1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) 1473 if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
1474 netlink_dump(sk); 1474 ret = netlink_dump(sk);
1475 if (ret) {
1476 sk->sk_err = ret;
1477 sk->sk_error_report(sk);
1478 }
1479 }
1475 1480
1476 scm_recv(sock, msg, siocb->scm, flags); 1481 scm_recv(sock, msg, siocb->scm, flags);
1477out: 1482out:
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1736 struct netlink_callback *cb; 1741 struct netlink_callback *cb;
1737 struct sock *sk; 1742 struct sock *sk;
1738 struct netlink_sock *nlk; 1743 struct netlink_sock *nlk;
1744 int ret;
1739 1745
1740 cb = kzalloc(sizeof(*cb), GFP_KERNEL); 1746 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
1741 if (cb == NULL) 1747 if (cb == NULL)
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1764 nlk->cb = cb; 1770 nlk->cb = cb;
1765 mutex_unlock(nlk->cb_mutex); 1771 mutex_unlock(nlk->cb_mutex);
1766 1772
1767 netlink_dump(sk); 1773 ret = netlink_dump(sk);
1774
1768 sock_put(sk); 1775 sock_put(sk);
1769 1776
1777 if (ret)
1778 return ret;
1779
1770 /* We successfully started a dump, by returning -EINTR we 1780 /* We successfully started a dump, by returning -EINTR we
1771 * signal not to send ACK even if it was requested. 1781 * signal not to send ACK even if it was requested.
1772 */ 1782 */
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index 71f373c421bc..c47a511f203d 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
551 if (conn->c_loopback 551 if (conn->c_loopback
552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 552 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 553 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
554 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 554 scat = &rm->data.op_sg[sg];
555 ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
556 ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
557 return ret;
555 } 558 }
556 559
557 /* FIXME we may overallocate here */ 560 /* FIXME we may overallocate here */
diff --git a/net/rds/loop.c b/net/rds/loop.c
index aeec1d483b17..bca6761a3ca2 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
61 unsigned int hdr_off, unsigned int sg, 61 unsigned int hdr_off, unsigned int sg,
62 unsigned int off) 62 unsigned int off)
63{ 63{
64 struct scatterlist *sgp = &rm->data.op_sg[sg];
65 int ret = sizeof(struct rds_header) +
66 be32_to_cpu(rm->m_inc.i_hdr.h_len);
67
64 /* Do not send cong updates to loopback */ 68 /* Do not send cong updates to loopback */
65 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 69 if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
66 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 70 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
67 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 71 ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off);
72 goto out;
68 } 73 }
69 74
70 BUG_ON(hdr_off || sg || off); 75 BUG_ON(hdr_off || sg || off);
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
80 NULL); 85 NULL);
81 86
82 rds_inc_put(&rm->m_inc); 87 rds_inc_put(&rm->m_inc);
83 88out:
84 return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); 89 return ret;
85} 90}
86 91
87/* 92/*
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
index 89315009bab1..1a2b0633fece 100644
--- a/net/rxrpc/ar-input.c
+++ b/net/rxrpc/ar-input.c
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
423 goto protocol_error; 423 goto protocol_error;
424 } 424 }
425 425
426 case RXRPC_PACKET_TYPE_ACKALL:
426 case RXRPC_PACKET_TYPE_ACK: 427 case RXRPC_PACKET_TYPE_ACK:
427 /* ACK processing is done in process context */ 428 /* ACK processing is done in process context */
428 read_lock_bh(&call->state_lock); 429 read_lock_bh(&call->state_lock);
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c
index 5ee16f0353fe..d763793d39de 100644
--- a/net/rxrpc/ar-key.c
+++ b/net/rxrpc/ar-key.c
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr,
89 return ret; 89 return ret;
90 90
91 plen -= sizeof(*token); 91 plen -= sizeof(*token);
92 token = kmalloc(sizeof(*token), GFP_KERNEL); 92 token = kzalloc(sizeof(*token), GFP_KERNEL);
93 if (!token) 93 if (!token)
94 return -ENOMEM; 94 return -ENOMEM;
95 95
96 token->kad = kmalloc(plen, GFP_KERNEL); 96 token->kad = kzalloc(plen, GFP_KERNEL);
97 if (!token->kad) { 97 if (!token->kad) {
98 kfree(token); 98 kfree(token);
99 return -ENOMEM; 99 return -ENOMEM;
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen)
731 goto error; 731 goto error;
732 732
733 ret = -ENOMEM; 733 ret = -ENOMEM;
734 token = kmalloc(sizeof(*token), GFP_KERNEL); 734 token = kzalloc(sizeof(*token), GFP_KERNEL);
735 if (!token) 735 if (!token)
736 goto error; 736 goto error;
737 token->kad = kmalloc(plen, GFP_KERNEL); 737 token->kad = kzalloc(plen, GFP_KERNEL);
738 if (!token->kad) 738 if (!token->kad)
739 goto error_free; 739 goto error_free;
740 740
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 34dc598440a2..1bc698039ae2 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev)
839 839
840 list_add(&dev->unreg_list, &single); 840 list_add(&dev->unreg_list, &single);
841 dev_deactivate_many(&single); 841 dev_deactivate_many(&single);
842 list_del(&single);
842} 843}
843 844
844static void dev_init_scheduler_queue(struct net_device *dev, 845static void dev_init_scheduler_queue(struct net_device *dev,
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 2cc46f0962ca..b23428f3c0dd 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc,
2029 *errp = sctp_make_op_error_fixed(asoc, chunk); 2029 *errp = sctp_make_op_error_fixed(asoc, chunk);
2030 2030
2031 if (*errp) { 2031 if (*errp) {
2032 sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, 2032 if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM,
2033 WORD_ROUND(ntohs(param.p->length))); 2033 WORD_ROUND(ntohs(param.p->length))))
2034 sctp_addto_chunk_fixed(*errp, 2034 sctp_addto_chunk_fixed(*errp,
2035 WORD_ROUND(ntohs(param.p->length)), 2035 WORD_ROUND(ntohs(param.p->length)),
2036 param.v); 2036 param.v);
2037 } else { 2037 } else {
2038 /* If there is no memory for generating the ERROR 2038 /* If there is no memory for generating the ERROR
2039 * report as specified, an ABORT will be triggered 2039 * report as specified, an ABORT will be triggered
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 243fc09b164e..59e599498e37 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task)
252 252
253/* 253/*
254 * Mark an RPC call as having completed by clearing the 'active' bit 254 * Mark an RPC call as having completed by clearing the 'active' bit
255 * and then waking up all tasks that were sleeping.
255 */ 256 */
256static void rpc_mark_complete_task(struct rpc_task *task) 257static int rpc_complete_task(struct rpc_task *task)
257{ 258{
258 smp_mb__before_clear_bit(); 259 void *m = &task->tk_runstate;
260 wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
261 struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
262 unsigned long flags;
263 int ret;
264
265 spin_lock_irqsave(&wq->lock, flags);
259 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); 266 clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
260 smp_mb__after_clear_bit(); 267 ret = atomic_dec_and_test(&task->tk_count);
261 wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); 268 if (waitqueue_active(wq))
269 __wake_up_locked_key(wq, TASK_NORMAL, &k);
270 spin_unlock_irqrestore(&wq->lock, flags);
271 return ret;
262} 272}
263 273
264/* 274/*
265 * Allow callers to wait for completion of an RPC call 275 * Allow callers to wait for completion of an RPC call
276 *
277 * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
278 * to enforce taking of the wq->lock and hence avoid races with
279 * rpc_complete_task().
266 */ 280 */
267int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) 281int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *))
268{ 282{
269 if (action == NULL) 283 if (action == NULL)
270 action = rpc_wait_bit_killable; 284 action = rpc_wait_bit_killable;
271 return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, 285 return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
272 action, TASK_KILLABLE); 286 action, TASK_KILLABLE);
273} 287}
274EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); 288EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work)
857 rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); 871 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
858} 872}
859 873
860void rpc_put_task(struct rpc_task *task) 874static void rpc_release_resources_task(struct rpc_task *task)
861{ 875{
862 if (!atomic_dec_and_test(&task->tk_count))
863 return;
864 /* Release resources */
865 if (task->tk_rqstp) 876 if (task->tk_rqstp)
866 xprt_release(task); 877 xprt_release(task);
867 if (task->tk_msg.rpc_cred) 878 if (task->tk_msg.rpc_cred)
868 put_rpccred(task->tk_msg.rpc_cred); 879 put_rpccred(task->tk_msg.rpc_cred);
869 rpc_task_release_client(task); 880 rpc_task_release_client(task);
870 if (task->tk_workqueue != NULL) { 881}
882
883static void rpc_final_put_task(struct rpc_task *task,
884 struct workqueue_struct *q)
885{
886 if (q != NULL) {
871 INIT_WORK(&task->u.tk_work, rpc_async_release); 887 INIT_WORK(&task->u.tk_work, rpc_async_release);
872 queue_work(task->tk_workqueue, &task->u.tk_work); 888 queue_work(q, &task->u.tk_work);
873 } else 889 } else
874 rpc_free_task(task); 890 rpc_free_task(task);
875} 891}
892
893static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
894{
895 if (atomic_dec_and_test(&task->tk_count)) {
896 rpc_release_resources_task(task);
897 rpc_final_put_task(task, q);
898 }
899}
900
901void rpc_put_task(struct rpc_task *task)
902{
903 rpc_do_put_task(task, NULL);
904}
876EXPORT_SYMBOL_GPL(rpc_put_task); 905EXPORT_SYMBOL_GPL(rpc_put_task);
877 906
907void rpc_put_task_async(struct rpc_task *task)
908{
909 rpc_do_put_task(task, task->tk_workqueue);
910}
911EXPORT_SYMBOL_GPL(rpc_put_task_async);
912
878static void rpc_release_task(struct rpc_task *task) 913static void rpc_release_task(struct rpc_task *task)
879{ 914{
880 dprintk("RPC: %5u release task\n", task->tk_pid); 915 dprintk("RPC: %5u release task\n", task->tk_pid);
881 916
882 BUG_ON (RPC_IS_QUEUED(task)); 917 BUG_ON (RPC_IS_QUEUED(task));
883 918
884 /* Wake up anyone who is waiting for task completion */ 919 rpc_release_resources_task(task);
885 rpc_mark_complete_task(task);
886 920
887 rpc_put_task(task); 921 /*
922 * Note: at this point we have been removed from rpc_clnt->cl_tasks,
923 * so it should be safe to use task->tk_count as a test for whether
924 * or not any other processes still hold references to our rpc_task.
925 */
926 if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
927 /* Wake up anyone who may be waiting for task completion */
928 if (!rpc_complete_task(task))
929 return;
930 } else {
931 if (!atomic_dec_and_test(&task->tk_count))
932 return;
933 }
934 rpc_final_put_task(task, task->tk_workqueue);
888} 935}
889 936
890int rpciod_up(void) 937int rpciod_up(void)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 9df1eadc912a..1a10dcd999ea 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1335 p, 0, length, DMA_FROM_DEVICE); 1335 p, 0, length, DMA_FROM_DEVICE);
1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { 1336 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1337 put_page(p); 1337 put_page(p);
1338 svc_rdma_put_context(ctxt, 1);
1338 return; 1339 return;
1339 } 1340 }
1340 atomic_inc(&xprt->sc_dma_used); 1341 atomic_inc(&xprt->sc_dma_used);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c431f5a57960..be96d429b475 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt,
1631 } 1631 }
1632 xs_reclassify_socket(family, sock); 1632 xs_reclassify_socket(family, sock);
1633 1633
1634 if (xs_bind(transport, sock)) { 1634 err = xs_bind(transport, sock);
1635 if (err) {
1635 sock_release(sock); 1636 sock_release(sock);
1636 goto out; 1637 goto out;
1637 } 1638 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index dd419d286204..437a99e560e1 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
1724 1724
1725 msg->msg_namelen = 0; 1725 msg->msg_namelen = 0;
1726 1726
1727 mutex_lock(&u->readlock); 1727 err = mutex_lock_interruptible(&u->readlock);
1728 if (err) {
1729 err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
1730 goto out;
1731 }
1728 1732
1729 skb = skb_recv_datagram(sk, flags, noblock, &err); 1733 skb = skb_recv_datagram(sk, flags, noblock, &err);
1730 if (!skb) { 1734 if (!skb) {
@@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1864 memset(&tmp_scm, 0, sizeof(tmp_scm)); 1868 memset(&tmp_scm, 0, sizeof(tmp_scm));
1865 } 1869 }
1866 1870
1867 mutex_lock(&u->readlock); 1871 err = mutex_lock_interruptible(&u->readlock);
1872 if (err) {
1873 err = sock_intr_errno(timeo);
1874 goto out;
1875 }
1868 1876
1869 do { 1877 do {
1870 int chunk; 1878 int chunk;
@@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
1895 1903
1896 timeo = unix_stream_data_wait(sk, timeo); 1904 timeo = unix_stream_data_wait(sk, timeo);
1897 1905
1898 if (signal_pending(current)) { 1906 if (signal_pending(current)
1907 || mutex_lock_interruptible(&u->readlock)) {
1899 err = sock_intr_errno(timeo); 1908 err = sock_intr_errno(timeo);
1900 goto out; 1909 goto out;
1901 } 1910 }
1902 mutex_lock(&u->readlock); 1911
1903 continue; 1912 continue;
1904 unlock: 1913 unlock:
1905 unix_state_unlock(sk); 1914 unix_state_unlock(sk);
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c
index 3e5dbd4e4cd5..d112f038edf0 100644
--- a/net/wireless/wext-compat.c
+++ b/net/wireless/wext-compat.c
@@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev,
802 return freq; 802 return freq;
803 if (freq == 0) 803 if (freq == 0)
804 return -EINVAL; 804 return -EINVAL;
805 wdev_lock(wdev);
806 mutex_lock(&rdev->devlist_mtx); 805 mutex_lock(&rdev->devlist_mtx);
806 wdev_lock(wdev);
807 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); 807 err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT);
808 mutex_unlock(&rdev->devlist_mtx);
809 wdev_unlock(wdev); 808 wdev_unlock(wdev);
809 mutex_unlock(&rdev->devlist_mtx);
810 return err; 810 return err;
811 default: 811 default:
812 return -EOPNOTSUPP; 812 return -EOPNOTSUPP;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 8b3ef404c794..6459588befc3 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1340 default: 1340 default:
1341 BUG(); 1341 BUG();
1342 } 1342 }
1343 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1343 xdst = dst_alloc(dst_ops);
1344 xfrm_policy_put_afinfo(afinfo); 1344 xfrm_policy_put_afinfo(afinfo);
1345 1345
1346 xdst->flo.ops = &xfrm_bundle_fc_ops; 1346 if (likely(xdst))
1347 xdst->flo.ops = &xfrm_bundle_fc_ops;
1348 else
1349 xdst = ERR_PTR(-ENOBUFS);
1347 1350
1348 return xdst; 1351 return xdst;
1349} 1352}
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
index c9a16abacab4..291228e25984 100644
--- a/scripts/basic/fixdep.c
+++ b/scripts/basic/fixdep.c
@@ -309,12 +309,18 @@ static void do_config_file(const char *filename)
309 close(fd); 309 close(fd);
310} 310}
311 311
312/*
313 * Important: The below generated source_foo.o and deps_foo.o variable
314 * assignments are parsed not only by make, but also by the rather simple
315 * parser in scripts/mod/sumversion.c.
316 */
312static void parse_dep_file(void *map, size_t len) 317static void parse_dep_file(void *map, size_t len)
313{ 318{
314 char *m = map; 319 char *m = map;
315 char *end = m + len; 320 char *end = m + len;
316 char *p; 321 char *p;
317 char s[PATH_MAX]; 322 char s[PATH_MAX];
323 int first;
318 324
319 p = strchr(m, ':'); 325 p = strchr(m, ':');
320 if (!p) { 326 if (!p) {
@@ -322,11 +328,11 @@ static void parse_dep_file(void *map, size_t len)
322 exit(1); 328 exit(1);
323 } 329 }
324 memcpy(s, m, p-m); s[p-m] = 0; 330 memcpy(s, m, p-m); s[p-m] = 0;
325 printf("deps_%s := \\\n", target);
326 m = p+1; 331 m = p+1;
327 332
328 clear_config(); 333 clear_config();
329 334
335 first = 1;
330 while (m < end) { 336 while (m < end) {
331 while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) 337 while (m < end && (*m == ' ' || *m == '\\' || *m == '\n'))
332 m++; 338 m++;
@@ -340,9 +346,20 @@ static void parse_dep_file(void *map, size_t len)
340 if (strrcmp(s, "include/generated/autoconf.h") && 346 if (strrcmp(s, "include/generated/autoconf.h") &&
341 strrcmp(s, "arch/um/include/uml-config.h") && 347 strrcmp(s, "arch/um/include/uml-config.h") &&
342 strrcmp(s, ".ver")) { 348 strrcmp(s, ".ver")) {
343 printf(" %s \\\n", s); 349 /*
350 * Do not list the source file as dependency, so that
351 * kbuild is not confused if a .c file is rewritten
352 * into .S or vice versa. Storing it in source_* is
353 * needed for modpost to compute srcversions.
354 */
355 if (first) {
356 printf("source_%s := %s\n\n", target, s);
357 printf("deps_%s := \\\n", target);
358 } else
359 printf(" %s \\\n", s);
344 do_config_file(s); 360 do_config_file(s);
345 } 361 }
362 first = 0;
346 m = p + 1; 363 m = p + 1;
347 } 364 }
348 printf("\n%s: $(deps_%s)\n\n", target, target); 365 printf("\n%s: $(deps_%s)\n\n", target, target);
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
index ecf9c7dc1825..9dfcd6d988da 100644
--- a/scripts/mod/sumversion.c
+++ b/scripts/mod/sumversion.c
@@ -300,8 +300,8 @@ static int is_static_library(const char *objfile)
300 return 0; 300 return 0;
301} 301}
302 302
303/* We have dir/file.o. Open dir/.file.o.cmd, look for deps_ line to 303/* We have dir/file.o. Open dir/.file.o.cmd, look for source_ and deps_ line
304 * figure out source file. */ 304 * to figure out source files. */
305static int parse_source_files(const char *objfile, struct md4_ctx *md) 305static int parse_source_files(const char *objfile, struct md4_ctx *md)
306{ 306{
307 char *cmd, *file, *line, *dir; 307 char *cmd, *file, *line, *dir;
@@ -340,6 +340,21 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md)
340 */ 340 */
341 while ((line = get_next_line(&pos, file, flen)) != NULL) { 341 while ((line = get_next_line(&pos, file, flen)) != NULL) {
342 char* p = line; 342 char* p = line;
343
344 if (strncmp(line, "source_", sizeof("source_")-1) == 0) {
345 p = strrchr(line, ' ');
346 if (!p) {
347 warn("malformed line: %s\n", line);
348 goto out_file;
349 }
350 p++;
351 if (!parse_file(p, md)) {
352 warn("could not open %s: %s\n",
353 p, strerror(errno));
354 goto out_file;
355 }
356 continue;
357 }
343 if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) { 358 if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) {
344 check_files = 1; 359 check_files = 1;
345 continue; 360 continue;
diff --git a/sound/core/jack.c b/sound/core/jack.c
index 4902ae568730..53b53e97c896 100644
--- a/sound/core/jack.c
+++ b/sound/core/jack.c
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
141 141
142fail_input: 142fail_input:
143 input_free_device(jack->input_dev); 143 input_free_device(jack->input_dev);
144 kfree(jack->id);
144 kfree(jack); 145 kfree(jack);
145 return err; 146 return err;
146} 147}
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c
index 23f49f356e0f..16c0bdfbb164 100644
--- a/sound/pci/au88x0/au88x0_core.c
+++ b/sound/pci/au88x0/au88x0_core.c
@@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) {
1252static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) 1252static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma)
1253{ 1253{
1254 stream_t *dma = &vortex->dma_adb[adbdma]; 1254 stream_t *dma = &vortex->dma_adb[adbdma];
1255 int temp; 1255 int temp, page, delta;
1256 1256
1257 temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); 1257 temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2));
1258 temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); 1258 page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT;
1259 return temp; 1259 if (dma->nr_periods >= 4)
1260 delta = (page - dma->period_real) & 3;
1261 else {
1262 delta = (page - dma->period_real);
1263 if (delta < 0)
1264 delta += dma->nr_periods;
1265 }
1266 return (dma->period_virt + delta) * dma->period_bytes
1267 + (temp & (dma->period_bytes - 1));
1260} 1268}
1261 1269
1262static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) 1270static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 0baffcdee8f9..fcedad9a5fef 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
2308 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), 2308 SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
2309 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), 2309 SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
2310 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), 2310 SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
2311 SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB),
2311 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), 2312 SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
2312 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), 2313 SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
2313 SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), 2314 SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index a07b031090d8..067982f4f182 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1039,9 +1039,11 @@ static struct hda_verb cs_errata_init_verbs[] = {
1039 {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, 1039 {0x11, AC_VERB_SET_PROC_COEF, 0x0008},
1040 {0x11, AC_VERB_SET_PROC_STATE, 0x00}, 1040 {0x11, AC_VERB_SET_PROC_STATE, 0x00},
1041 1041
1042#if 0 /* Don't to set to D3 as we are in power-up sequence */
1042 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ 1043 {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */
1043 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ 1044 {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */
1044 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ 1045 /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */
1046#endif
1045 1047
1046 {} /* terminator */ 1048 {} /* terminator */
1047}; 1049};
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index fbe97d32140d..4d5004e693f0 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = {
3114 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), 3114 SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
3115 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), 3115 SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
3116 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), 3116 SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
3117 SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
3118 SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
3117 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), 3119 SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
3118 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), 3120 SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
3119 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), 3121 SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
@@ -3410,7 +3412,7 @@ static void cx_auto_parse_output(struct hda_codec *codec)
3410 } 3412 }
3411 } 3413 }
3412 spec->multiout.dac_nids = spec->private_dac_nids; 3414 spec->multiout.dac_nids = spec->private_dac_nids;
3413 spec->multiout.max_channels = nums * 2; 3415 spec->multiout.max_channels = spec->multiout.num_dacs * 2;
3414 3416
3415 if (cfg->hp_outs > 0) 3417 if (cfg->hp_outs > 0)
3416 spec->auto_mute = 1; 3418 spec->auto_mute = 1;
@@ -3729,9 +3731,9 @@ static int cx_auto_init(struct hda_codec *codec)
3729 return 0; 3731 return 0;
3730} 3732}
3731 3733
3732static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, 3734static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename,
3733 const char *dir, int cidx, 3735 const char *dir, int cidx,
3734 hda_nid_t nid, int hda_dir) 3736 hda_nid_t nid, int hda_dir, int amp_idx)
3735{ 3737{
3736 static char name[32]; 3738 static char name[32];
3737 static struct snd_kcontrol_new knew[] = { 3739 static struct snd_kcontrol_new knew[] = {
@@ -3743,7 +3745,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
3743 3745
3744 for (i = 0; i < 2; i++) { 3746 for (i = 0; i < 2; i++) {
3745 struct snd_kcontrol *kctl; 3747 struct snd_kcontrol *kctl;
3746 knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir); 3748 knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx,
3749 hda_dir);
3747 knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; 3750 knew[i].subdevice = HDA_SUBDEV_AMP_FLAG;
3748 knew[i].index = cidx; 3751 knew[i].index = cidx;
3749 snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); 3752 snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]);
@@ -3759,6 +3762,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename,
3759 return 0; 3762 return 0;
3760} 3763}
3761 3764
3765#define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \
3766 cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0)
3767
3762#define cx_auto_add_pb_volume(codec, nid, str, idx) \ 3768#define cx_auto_add_pb_volume(codec, nid, str, idx) \
3763 cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) 3769 cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT)
3764 3770
@@ -3808,29 +3814,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec)
3808 struct conexant_spec *spec = codec->spec; 3814 struct conexant_spec *spec = codec->spec;
3809 struct auto_pin_cfg *cfg = &spec->autocfg; 3815 struct auto_pin_cfg *cfg = &spec->autocfg;
3810 static const char *prev_label; 3816 static const char *prev_label;
3811 int i, err, cidx; 3817 int i, err, cidx, conn_len;
3818 hda_nid_t conn[HDA_MAX_CONNECTIONS];
3819
3820 int multi_adc_volume = 0; /* If the ADC nid has several input volumes */
3821 int adc_nid = spec->adc_nids[0];
3822
3823 conn_len = snd_hda_get_connections(codec, adc_nid, conn,
3824 HDA_MAX_CONNECTIONS);
3825 if (conn_len < 0)
3826 return conn_len;
3827
3828 multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1;
3829 if (!multi_adc_volume) {
3830 err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid,
3831 HDA_INPUT);
3832 if (err < 0)
3833 return err;
3834 }
3812 3835
3813 err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0],
3814 HDA_INPUT);
3815 if (err < 0)
3816 return err;
3817 prev_label = NULL; 3836 prev_label = NULL;
3818 cidx = 0; 3837 cidx = 0;
3819 for (i = 0; i < cfg->num_inputs; i++) { 3838 for (i = 0; i < cfg->num_inputs; i++) {
3820 hda_nid_t nid = cfg->inputs[i].pin; 3839 hda_nid_t nid = cfg->inputs[i].pin;
3821 const char *label; 3840 const char *label;
3822 if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) 3841 int j;
3842 int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP;
3843 if (!pin_amp && !multi_adc_volume)
3823 continue; 3844 continue;
3845
3824 label = hda_get_autocfg_input_label(codec, cfg, i); 3846 label = hda_get_autocfg_input_label(codec, cfg, i);
3825 if (label == prev_label) 3847 if (label == prev_label)
3826 cidx++; 3848 cidx++;
3827 else 3849 else
3828 cidx = 0; 3850 cidx = 0;
3829 prev_label = label; 3851 prev_label = label;
3830 err = cx_auto_add_volume(codec, label, " Capture", cidx, 3852
3831 nid, HDA_INPUT); 3853 if (pin_amp) {
3832 if (err < 0) 3854 err = cx_auto_add_volume(codec, label, " Boost", cidx,
3833 return err; 3855 nid, HDA_INPUT);
3856 if (err < 0)
3857 return err;
3858 }
3859
3860 if (!multi_adc_volume)
3861 continue;
3862 for (j = 0; j < conn_len; j++) {
3863 if (conn[j] == nid) {
3864 err = cx_auto_add_volume_idx(codec, label,
3865 " Capture", cidx, adc_nid, HDA_INPUT, j);
3866 if (err < 0)
3867 return err;
3868 break;
3869 }
3870 }
3834 } 3871 }
3835 return 0; 3872 return 0;
3836} 3873}
@@ -3902,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = {
3902 .patch = patch_cxt5066 }, 3939 .patch = patch_cxt5066 },
3903 { .id = 0x14f15069, .name = "CX20585", 3940 { .id = 0x14f15069, .name = "CX20585",
3904 .patch = patch_cxt5066 }, 3941 .patch = patch_cxt5066 },
3942 { .id = 0x14f1506e, .name = "CX20590",
3943 .patch = patch_cxt5066 },
3905 { .id = 0x14f15097, .name = "CX20631", 3944 { .id = 0x14f15097, .name = "CX20631",
3906 .patch = patch_conexant_auto }, 3945 .patch = patch_conexant_auto },
3907 { .id = 0x14f15098, .name = "CX20632", 3946 { .id = 0x14f15098, .name = "CX20632",
@@ -3928,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066");
3928MODULE_ALIAS("snd-hda-codec-id:14f15067"); 3967MODULE_ALIAS("snd-hda-codec-id:14f15067");
3929MODULE_ALIAS("snd-hda-codec-id:14f15068"); 3968MODULE_ALIAS("snd-hda-codec-id:14f15068");
3930MODULE_ALIAS("snd-hda-codec-id:14f15069"); 3969MODULE_ALIAS("snd-hda-codec-id:14f15069");
3970MODULE_ALIAS("snd-hda-codec-id:14f1506e");
3931MODULE_ALIAS("snd-hda-codec-id:14f15097"); 3971MODULE_ALIAS("snd-hda-codec-id:14f15097");
3932MODULE_ALIAS("snd-hda-codec-id:14f15098"); 3972MODULE_ALIAS("snd-hda-codec-id:14f15098");
3933MODULE_ALIAS("snd-hda-codec-id:14f150a1"); 3973MODULE_ALIAS("snd-hda-codec-id:14f150a1");
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index a58767736727..ec0fa2dd0a27 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1634,6 +1634,9 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = {
1634{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1634{ .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1635{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1635{ .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1636{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1636{ .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1637{ .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1638{ .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1639/* 17 is known to be absent */
1637{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1640{ .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1638{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1641{ .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
1639{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, 1642{ .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 },
@@ -1676,6 +1679,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0011");
1676MODULE_ALIAS("snd-hda-codec-id:10de0012"); 1679MODULE_ALIAS("snd-hda-codec-id:10de0012");
1677MODULE_ALIAS("snd-hda-codec-id:10de0013"); 1680MODULE_ALIAS("snd-hda-codec-id:10de0013");
1678MODULE_ALIAS("snd-hda-codec-id:10de0014"); 1681MODULE_ALIAS("snd-hda-codec-id:10de0014");
1682MODULE_ALIAS("snd-hda-codec-id:10de0015");
1683MODULE_ALIAS("snd-hda-codec-id:10de0016");
1679MODULE_ALIAS("snd-hda-codec-id:10de0018"); 1684MODULE_ALIAS("snd-hda-codec-id:10de0018");
1680MODULE_ALIAS("snd-hda-codec-id:10de0019"); 1685MODULE_ALIAS("snd-hda-codec-id:10de0019");
1681MODULE_ALIAS("snd-hda-codec-id:10de001a"); 1686MODULE_ALIAS("snd-hda-codec-id:10de001a");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 3328a259a242..4261bb8eec1d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl)
1133 nid = spec->autocfg.hp_pins[i]; 1133 nid = spec->autocfg.hp_pins[i];
1134 if (!nid) 1134 if (!nid)
1135 break; 1135 break;
1136 if (snd_hda_jack_detect(codec, nid)) { 1136 alc_report_jack(codec, nid);
1137 spec->jack_present = 1; 1137 spec->jack_present |= snd_hda_jack_detect(codec, nid);
1138 break;
1139 }
1140 alc_report_jack(codec, spec->autocfg.hp_pins[i]);
1141 } 1138 }
1142 1139
1143 mute = spec->jack_present ? HDA_AMP_MUTE : 0; 1140 mute = spec->jack_present ? HDA_AMP_MUTE : 0;
@@ -15015,7 +15012,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = {
15015 SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), 15012 SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC),
15016 SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), 15013 SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC),
15017 SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), 15014 SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC),
15018 SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC), 15015 SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC),
15019 SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), 15016 SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC),
15020 SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), 15017 SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC),
15021 SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC), 15018 SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 9ea48b425d0b..bd7b123f6440 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = {
586 0x0f, 0x10, 0x11, 0x1f, 0x20, 586 0x0f, 0x10, 0x11, 0x1f, 0x20,
587}; 587};
588 588
589static hda_nid_t stac92hd88xxx_pin_nids[10] = { 589static hda_nid_t stac92hd87xxx_pin_nids[6] = {
590 0x0a, 0x0b, 0x0c, 0x0d,
591 0x0f, 0x11,
592};
593
594static hda_nid_t stac92hd88xxx_pin_nids[8] = {
590 0x0a, 0x0b, 0x0c, 0x0d, 595 0x0a, 0x0b, 0x0c, 0x0d,
591 0x0f, 0x11, 0x1f, 0x20, 596 0x0f, 0x11, 0x1f, 0x20,
592}; 597};
@@ -5430,12 +5435,13 @@ again:
5430 switch (codec->vendor_id) { 5435 switch (codec->vendor_id) {
5431 case 0x111d76d1: 5436 case 0x111d76d1:
5432 case 0x111d76d9: 5437 case 0x111d76d9:
5438 case 0x111d76e5:
5433 spec->dmic_nids = stac92hd87b_dmic_nids; 5439 spec->dmic_nids = stac92hd87b_dmic_nids;
5434 spec->num_dmics = stac92xx_connected_ports(codec, 5440 spec->num_dmics = stac92xx_connected_ports(codec,
5435 stac92hd87b_dmic_nids, 5441 stac92hd87b_dmic_nids,
5436 STAC92HD87B_NUM_DMICS); 5442 STAC92HD87B_NUM_DMICS);
5437 spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); 5443 spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids);
5438 spec->pin_nids = stac92hd88xxx_pin_nids; 5444 spec->pin_nids = stac92hd87xxx_pin_nids;
5439 spec->mono_nid = 0; 5445 spec->mono_nid = 0;
5440 spec->num_pwrs = 0; 5446 spec->num_pwrs = 0;
5441 break; 5447 break;
@@ -5443,6 +5449,7 @@ again:
5443 case 0x111d7667: 5449 case 0x111d7667:
5444 case 0x111d7668: 5450 case 0x111d7668:
5445 case 0x111d7669: 5451 case 0x111d7669:
5452 case 0x111d76e3:
5446 spec->num_dmics = stac92xx_connected_ports(codec, 5453 spec->num_dmics = stac92xx_connected_ports(codec,
5447 stac92hd88xxx_dmic_nids, 5454 stac92hd88xxx_dmic_nids,
5448 STAC92HD88XXX_NUM_DMICS); 5455 STAC92HD88XXX_NUM_DMICS);
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = {
6387 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, 6394 { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx },
6388 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, 6395 { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx },
6389 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, 6396 { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx},
6397 { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx},
6398 { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx},
6390 { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, 6399 { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx},
6391 {} /* terminator */ 6400 {} /* terminator */
6392}; 6401};
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
index a76c3260d941..63b0054200a8 100644
--- a/sound/pci/hda/patch_via.c
+++ b/sound/pci/hda/patch_via.c
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
567 hda_nid_t nid = cfg->inputs[i].pin; 567 hda_nid_t nid = cfg->inputs[i].pin;
568 if (spec->smart51_enabled && is_smart51_pins(spec, nid)) 568 if (spec->smart51_enabled && is_smart51_pins(spec, nid))
569 ctl = PIN_OUT; 569 ctl = PIN_OUT;
570 else if (i == AUTO_PIN_MIC) 570 else if (cfg->inputs[i].type == AUTO_PIN_MIC)
571 ctl = PIN_VREF50; 571 ctl = PIN_VREF50;
572 else 572 else
573 ctl = PIN_IN; 573 ctl = PIN_IN;
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c
index bb4bf65b9e7e..0bb424af956f 100644
--- a/sound/soc/codecs/cx20442.c
+++ b/sound/soc/codecs/cx20442.c
@@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec)
367 return 0; 367 return 0;
368} 368}
369 369
370static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC; 370static const u8 cx20442_reg;
371 371
372static struct snd_soc_codec_driver cx20442_codec_dev = { 372static struct snd_soc_codec_driver cx20442_codec_dev = {
373 .probe = cx20442_codec_probe, 373 .probe = cx20442_codec_probe,
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index 987476a5895f..017d99ceb42e 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
1482 WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, 1482 WM8903_MICDET_EINT | WM8903_MICSHRT_EINT,
1483 irq_mask); 1483 irq_mask);
1484 1484
1485 if (det && shrt) { 1485 if (det || shrt) {
1486 /* Enable mic detection, this may not have been set through 1486 /* Enable mic detection, this may not have been set through
1487 * platform data (eg, if the defaults are OK). */ 1487 * platform data (eg, if the defaults are OK). */
1488 snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, 1488 snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0,
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h
index e8490f3edd03..e3ec2433b215 100644
--- a/sound/soc/codecs/wm8903.h
+++ b/sound/soc/codecs/wm8903.h
@@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec,
165 165
166#define WM8903_VMID_RES_50K 2 166#define WM8903_VMID_RES_50K 2
167#define WM8903_VMID_RES_250K 3 167#define WM8903_VMID_RES_250K 3
168#define WM8903_VMID_RES_5K 4 168#define WM8903_VMID_RES_5K 6
169 169
170/* 170/*
171 * R8 (0x08) - Analogue DAC 0 171 * R8 (0x08) - Analogue DAC 0
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c
index 4bbc3442703f..8dfb0a0da673 100644
--- a/sound/soc/codecs/wm8978.c
+++ b/sound/soc/codecs/wm8978.c
@@ -145,18 +145,18 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = {
145 SOC_SINGLE("DAC Playback Limiter Threshold", 145 SOC_SINGLE("DAC Playback Limiter Threshold",
146 WM8978_DAC_LIMITER_2, 4, 7, 0), 146 WM8978_DAC_LIMITER_2, 4, 7, 0),
147 SOC_SINGLE("DAC Playback Limiter Boost", 147 SOC_SINGLE("DAC Playback Limiter Boost",
148 WM8978_DAC_LIMITER_2, 0, 15, 0), 148 WM8978_DAC_LIMITER_2, 0, 12, 0),
149 149
150 SOC_ENUM("ALC Enable Switch", alc1), 150 SOC_ENUM("ALC Enable Switch", alc1),
151 SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0), 151 SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0),
152 SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0), 152 SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0),
153 153
154 SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 7, 0), 154 SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0),
155 SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0), 155 SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0),
156 156
157 SOC_ENUM("ALC Capture Mode", alc3), 157 SOC_ENUM("ALC Capture Mode", alc3),
158 SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 15, 0), 158 SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0),
159 SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 15, 0), 159 SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0),
160 160
161 SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0), 161 SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0),
162 SOC_SINGLE("ALC Capture Noise Gate Threshold", 162 SOC_SINGLE("ALC Capture Noise Gate Threshold",
@@ -211,8 +211,10 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = {
211 WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1), 211 WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1),
212 212
213 /* DAC / ADC oversampling */ 213 /* DAC / ADC oversampling */
214 SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, 8, 1, 0), 214 SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL,
215 SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, 8, 1, 0), 215 5, 1, 0),
216 SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL,
217 5, 1, 0),
216}; 218};
217 219
218/* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */ 220/* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 37b8aa8a680f..c6c958ee5d59 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -107,6 +107,12 @@ struct wm8994_priv {
107 107
108 int revision; 108 int revision;
109 struct wm8994_pdata *pdata; 109 struct wm8994_pdata *pdata;
110
111 unsigned int aif1clk_enable:1;
112 unsigned int aif2clk_enable:1;
113
114 unsigned int aif1clk_disable:1;
115 unsigned int aif2clk_disable:1;
110}; 116};
111 117
112static int wm8994_readable(unsigned int reg) 118static int wm8994_readable(unsigned int reg)
@@ -1004,6 +1010,110 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec)
1004 } 1010 }
1005} 1011}
1006 1012
1013static int late_enable_ev(struct snd_soc_dapm_widget *w,
1014 struct snd_kcontrol *kcontrol, int event)
1015{
1016 struct snd_soc_codec *codec = w->codec;
1017 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1018
1019 switch (event) {
1020 case SND_SOC_DAPM_PRE_PMU:
1021 if (wm8994->aif1clk_enable) {
1022 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1023 WM8994_AIF1CLK_ENA_MASK,
1024 WM8994_AIF1CLK_ENA);
1025 wm8994->aif1clk_enable = 0;
1026 }
1027 if (wm8994->aif2clk_enable) {
1028 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1029 WM8994_AIF2CLK_ENA_MASK,
1030 WM8994_AIF2CLK_ENA);
1031 wm8994->aif2clk_enable = 0;
1032 }
1033 break;
1034 }
1035
1036 return 0;
1037}
1038
1039static int late_disable_ev(struct snd_soc_dapm_widget *w,
1040 struct snd_kcontrol *kcontrol, int event)
1041{
1042 struct snd_soc_codec *codec = w->codec;
1043 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1044
1045 switch (event) {
1046 case SND_SOC_DAPM_POST_PMD:
1047 if (wm8994->aif1clk_disable) {
1048 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1049 WM8994_AIF1CLK_ENA_MASK, 0);
1050 wm8994->aif1clk_disable = 0;
1051 }
1052 if (wm8994->aif2clk_disable) {
1053 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1054 WM8994_AIF2CLK_ENA_MASK, 0);
1055 wm8994->aif2clk_disable = 0;
1056 }
1057 break;
1058 }
1059
1060 return 0;
1061}
1062
1063static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1064 struct snd_kcontrol *kcontrol, int event)
1065{
1066 struct snd_soc_codec *codec = w->codec;
1067 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1068
1069 switch (event) {
1070 case SND_SOC_DAPM_PRE_PMU:
1071 wm8994->aif1clk_enable = 1;
1072 break;
1073 case SND_SOC_DAPM_POST_PMD:
1074 wm8994->aif1clk_disable = 1;
1075 break;
1076 }
1077
1078 return 0;
1079}
1080
1081static int aif2clk_ev(struct snd_soc_dapm_widget *w,
1082 struct snd_kcontrol *kcontrol, int event)
1083{
1084 struct snd_soc_codec *codec = w->codec;
1085 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
1086
1087 switch (event) {
1088 case SND_SOC_DAPM_PRE_PMU:
1089 wm8994->aif2clk_enable = 1;
1090 break;
1091 case SND_SOC_DAPM_POST_PMD:
1092 wm8994->aif2clk_disable = 1;
1093 break;
1094 }
1095
1096 return 0;
1097}
1098
1099static int adc_mux_ev(struct snd_soc_dapm_widget *w,
1100 struct snd_kcontrol *kcontrol, int event)
1101{
1102 late_enable_ev(w, kcontrol, event);
1103 return 0;
1104}
1105
1106static int dac_ev(struct snd_soc_dapm_widget *w,
1107 struct snd_kcontrol *kcontrol, int event)
1108{
1109 struct snd_soc_codec *codec = w->codec;
1110 unsigned int mask = 1 << w->shift;
1111
1112 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
1113 mask, mask);
1114 return 0;
1115}
1116
1007static const char *hp_mux_text[] = { 1117static const char *hp_mux_text[] = {
1008 "Mixer", 1118 "Mixer",
1009 "DAC", 1119 "DAC",
@@ -1272,6 +1382,59 @@ static const struct soc_enum aif2dacr_src_enum =
1272static const struct snd_kcontrol_new aif2dacr_src_mux = 1382static const struct snd_kcontrol_new aif2dacr_src_mux =
1273 SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); 1383 SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
1274 1384
1385static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = {
1386SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev,
1387 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
1388SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev,
1389 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
1390
1391SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1392 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1393SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1394 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1395SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1396 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1397SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0,
1398 late_enable_ev, SND_SOC_DAPM_PRE_PMU),
1399
1400SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
1401};
1402
1403static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
1404SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
1405SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0)
1406};
1407
1408static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = {
1409SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0,
1410 dac_ev, SND_SOC_DAPM_PRE_PMU),
1411SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0,
1412 dac_ev, SND_SOC_DAPM_PRE_PMU),
1413SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0,
1414 dac_ev, SND_SOC_DAPM_PRE_PMU),
1415SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0,
1416 dac_ev, SND_SOC_DAPM_PRE_PMU),
1417};
1418
1419static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = {
1420SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
1421SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
1422SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
1423SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
1424};
1425
1426static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
1427SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
1428 adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
1429SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
1430 adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
1431};
1432
1433static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
1434SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
1435SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
1436};
1437
1275static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { 1438static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
1276SND_SOC_DAPM_INPUT("DMIC1DAT"), 1439SND_SOC_DAPM_INPUT("DMIC1DAT"),
1277SND_SOC_DAPM_INPUT("DMIC2DAT"), 1440SND_SOC_DAPM_INPUT("DMIC2DAT"),
@@ -1284,9 +1447,6 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0),
1284SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), 1447SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0),
1285SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), 1448SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0),
1286 1449
1287SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0),
1288SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0),
1289
1290SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, 1450SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL,
1291 0, WM8994_POWER_MANAGEMENT_4, 9, 0), 1451 0, WM8994_POWER_MANAGEMENT_4, 9, 0),
1292SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, 1452SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL,
@@ -1369,14 +1529,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0),
1369SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), 1529SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0),
1370SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), 1530SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0),
1371 1531
1372SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
1373SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
1374
1375SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0),
1376SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0),
1377SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0),
1378SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0),
1379
1380SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), 1532SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux),
1381SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), 1533SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux),
1382 1534
@@ -1516,14 +1668,12 @@ static const struct snd_soc_dapm_route intercon[] = {
1516 { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, 1668 { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" },
1517 1669
1518 /* DAC1 inputs */ 1670 /* DAC1 inputs */
1519 { "DAC1L", NULL, "DAC1L Mixer" },
1520 { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, 1671 { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" },
1521 { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, 1672 { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
1522 { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, 1673 { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
1523 { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, 1674 { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" },
1524 { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, 1675 { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" },
1525 1676
1526 { "DAC1R", NULL, "DAC1R Mixer" },
1527 { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, 1677 { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" },
1528 { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, 1678 { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
1529 { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, 1679 { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1532,7 +1682,6 @@ static const struct snd_soc_dapm_route intercon[] = {
1532 1682
1533 /* DAC2/AIF2 outputs */ 1683 /* DAC2/AIF2 outputs */
1534 { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, 1684 { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" },
1535 { "DAC2L", NULL, "AIF2DAC2L Mixer" },
1536 { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, 1685 { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" },
1537 { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, 1686 { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" },
1538 { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, 1687 { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" },
@@ -1540,7 +1689,6 @@ static const struct snd_soc_dapm_route intercon[] = {
1540 { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, 1689 { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" },
1541 1690
1542 { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, 1691 { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" },
1543 { "DAC2R", NULL, "AIF2DAC2R Mixer" },
1544 { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, 1692 { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" },
1545 { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, 1693 { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" },
1546 { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, 1694 { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" },
@@ -1584,6 +1732,24 @@ static const struct snd_soc_dapm_route intercon[] = {
1584 { "Right Headphone Mux", "DAC", "DAC1R" }, 1732 { "Right Headphone Mux", "DAC", "DAC1R" },
1585}; 1733};
1586 1734
1735static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = {
1736 { "DAC1L", NULL, "Late DAC1L Enable PGA" },
1737 { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" },
1738 { "DAC1R", NULL, "Late DAC1R Enable PGA" },
1739 { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" },
1740 { "DAC2L", NULL, "Late DAC2L Enable PGA" },
1741 { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" },
1742 { "DAC2R", NULL, "Late DAC2R Enable PGA" },
1743 { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" }
1744};
1745
1746static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = {
1747 { "DAC1L", NULL, "DAC1L Mixer" },
1748 { "DAC1R", NULL, "DAC1R Mixer" },
1749 { "DAC2L", NULL, "AIF2DAC2L Mixer" },
1750 { "DAC2R", NULL, "AIF2DAC2R Mixer" },
1751};
1752
1587static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { 1753static const struct snd_soc_dapm_route wm8994_revd_intercon[] = {
1588 { "AIF1DACDAT", NULL, "AIF2DACDAT" }, 1754 { "AIF1DACDAT", NULL, "AIF2DACDAT" },
1589 { "AIF2DACDAT", NULL, "AIF1DACDAT" }, 1755 { "AIF2DACDAT", NULL, "AIF1DACDAT" },
@@ -2514,6 +2680,22 @@ static int wm8994_resume(struct snd_soc_codec *codec)
2514{ 2680{
2515 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 2681 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
2516 int i, ret; 2682 int i, ret;
2683 unsigned int val, mask;
2684
2685 if (wm8994->revision < 4) {
2686 /* force a HW read */
2687 val = wm8994_reg_read(codec->control_data,
2688 WM8994_POWER_MANAGEMENT_5);
2689
2690 /* modify the cache only */
2691 codec->cache_only = 1;
2692 mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA |
2693 WM8994_DAC2R_ENA | WM8994_DAC2L_ENA;
2694 val &= mask;
2695 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
2696 mask, val);
2697 codec->cache_only = 0;
2698 }
2517 2699
2518 /* Restore the registers */ 2700 /* Restore the registers */
2519 ret = snd_soc_cache_sync(codec); 2701 ret = snd_soc_cache_sync(codec);
@@ -2847,11 +3029,10 @@ static void wm8958_default_micdet(u16 status, void *data)
2847 report |= SND_JACK_BTN_5; 3029 report |= SND_JACK_BTN_5;
2848 3030
2849done: 3031done:
2850 snd_soc_jack_report(wm8994->micdet[0].jack, 3032 snd_soc_jack_report(wm8994->micdet[0].jack, report,
2851 SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | 3033 SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 |
2852 SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | 3034 SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 |
2853 SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT, 3035 SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT);
2854 report);
2855} 3036}
2856 3037
2857/** 3038/**
@@ -3125,10 +3306,31 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3125 case WM8994: 3306 case WM8994:
3126 snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, 3307 snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets,
3127 ARRAY_SIZE(wm8994_specific_dapm_widgets)); 3308 ARRAY_SIZE(wm8994_specific_dapm_widgets));
3309 if (wm8994->revision < 4) {
3310 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets,
3311 ARRAY_SIZE(wm8994_lateclk_revd_widgets));
3312 snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets,
3313 ARRAY_SIZE(wm8994_adc_revd_widgets));
3314 snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets,
3315 ARRAY_SIZE(wm8994_dac_revd_widgets));
3316 } else {
3317 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
3318 ARRAY_SIZE(wm8994_lateclk_widgets));
3319 snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
3320 ARRAY_SIZE(wm8994_adc_widgets));
3321 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
3322 ARRAY_SIZE(wm8994_dac_widgets));
3323 }
3128 break; 3324 break;
3129 case WM8958: 3325 case WM8958:
3130 snd_soc_add_controls(codec, wm8958_snd_controls, 3326 snd_soc_add_controls(codec, wm8958_snd_controls,
3131 ARRAY_SIZE(wm8958_snd_controls)); 3327 ARRAY_SIZE(wm8958_snd_controls));
3328 snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets,
3329 ARRAY_SIZE(wm8994_lateclk_widgets));
3330 snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets,
3331 ARRAY_SIZE(wm8994_adc_widgets));
3332 snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets,
3333 ARRAY_SIZE(wm8994_dac_widgets));
3132 snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets, 3334 snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets,
3133 ARRAY_SIZE(wm8958_dapm_widgets)); 3335 ARRAY_SIZE(wm8958_dapm_widgets));
3134 break; 3336 break;
@@ -3143,12 +3345,19 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3143 snd_soc_dapm_add_routes(dapm, wm8994_intercon, 3345 snd_soc_dapm_add_routes(dapm, wm8994_intercon,
3144 ARRAY_SIZE(wm8994_intercon)); 3346 ARRAY_SIZE(wm8994_intercon));
3145 3347
3146 if (wm8994->revision < 4) 3348 if (wm8994->revision < 4) {
3147 snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, 3349 snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon,
3148 ARRAY_SIZE(wm8994_revd_intercon)); 3350 ARRAY_SIZE(wm8994_revd_intercon));
3149 3351 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon,
3352 ARRAY_SIZE(wm8994_lateclk_revd_intercon));
3353 } else {
3354 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
3355 ARRAY_SIZE(wm8994_lateclk_intercon));
3356 }
3150 break; 3357 break;
3151 case WM8958: 3358 case WM8958:
3359 snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon,
3360 ARRAY_SIZE(wm8994_lateclk_intercon));
3152 snd_soc_dapm_add_routes(dapm, wm8958_intercon, 3361 snd_soc_dapm_add_routes(dapm, wm8958_intercon,
3153 ARRAY_SIZE(wm8958_intercon)); 3362 ARRAY_SIZE(wm8958_intercon));
3154 break; 3363 break;
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c
index 43825b2102a5..cce704c275c6 100644
--- a/sound/soc/codecs/wm9081.c
+++ b/sound/soc/codecs/wm9081.c
@@ -15,6 +15,7 @@
15#include <linux/moduleparam.h> 15#include <linux/moduleparam.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/device.h>
18#include <linux/pm.h> 19#include <linux/pm.h>
19#include <linux/i2c.h> 20#include <linux/i2c.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
@@ -1341,6 +1342,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c,
1341 wm9081->control_type = SND_SOC_I2C; 1342 wm9081->control_type = SND_SOC_I2C;
1342 wm9081->control_data = i2c; 1343 wm9081->control_data = i2c;
1343 1344
1345 if (dev_get_platdata(&i2c->dev))
1346 memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev),
1347 sizeof(wm9081->retune));
1348
1344 ret = snd_soc_register_codec(&i2c->dev, 1349 ret = snd_soc_register_codec(&i2c->dev,
1345 &soc_codec_dev_wm9081, &wm9081_dai, 1); 1350 &soc_codec_dev_wm9081, &wm9081_dai, 1);
1346 if (ret < 0) 1351 if (ret < 0)
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 613df5db0b32..516892706063 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"),
674}; 674};
675 675
676static const struct snd_soc_dapm_route analogue_routes[] = { 676static const struct snd_soc_dapm_route analogue_routes[] = {
677 { "MICBIAS1", NULL, "CLK_SYS" },
678 { "MICBIAS2", NULL, "CLK_SYS" },
679
677 { "IN1L PGA", "IN1LP Switch", "IN1LP" }, 680 { "IN1L PGA", "IN1LP Switch", "IN1LP" },
678 { "IN1L PGA", "IN1LN Switch", "IN1LN" }, 681 { "IN1L PGA", "IN1LN Switch", "IN1LN" },
679 682
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c
index e20c9e1457c0..1e9bccae4e80 100644
--- a/sound/soc/imx/eukrea-tlv320.c
+++ b/sound/soc/imx/eukrea-tlv320.c
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = {
79 .name = "tlv320aic23", 79 .name = "tlv320aic23",
80 .stream_name = "TLV320AIC23", 80 .stream_name = "TLV320AIC23",
81 .codec_dai_name = "tlv320aic23-hifi", 81 .codec_dai_name = "tlv320aic23-hifi",
82 .platform_name = "imx-pcm-audio.0", 82 .platform_name = "imx-fiq-pcm-audio.0",
83 .codec_name = "tlv320aic23-codec.0-001a", 83 .codec_name = "tlv320aic23-codec.0-001a",
84 .cpu_dai_name = "imx-ssi.0", 84 .cpu_dai_name = "imx-ssi.0",
85 .ops = &eukrea_tlv320_snd_ops, 85 .ops = &eukrea_tlv320_snd_ops,
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c
index 161750443ebc..73dde4a1adc3 100644
--- a/sound/soc/omap/am3517evm.c
+++ b/sound/soc/omap/am3517evm.c
@@ -139,7 +139,7 @@ static struct snd_soc_dai_link am3517evm_dai = {
139 .cpu_dai_name ="omap-mcbsp-dai.0", 139 .cpu_dai_name ="omap-mcbsp-dai.0",
140 .codec_dai_name = "tlv320aic23-hifi", 140 .codec_dai_name = "tlv320aic23-hifi",
141 .platform_name = "omap-pcm-audio", 141 .platform_name = "omap-pcm-audio",
142 .codec_name = "tlv320aic23-codec", 142 .codec_name = "tlv320aic23-codec.2-001a",
143 .init = am3517evm_aic23_init, 143 .init = am3517evm_aic23_init,
144 .ops = &am3517evm_ops, 144 .ops = &am3517evm_ops,
145}; 145};
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c
index 28333e7d9c50..dc65650a6fa1 100644
--- a/sound/soc/pxa/e740_wm9705.c
+++ b/sound/soc/pxa/e740_wm9705.c
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = {
117 { 117 {
118 .name = "AC97", 118 .name = "AC97",
119 .stream_name = "AC97 HiFi", 119 .stream_name = "AC97 HiFi",
120 .cpu_dai_name = "pxa-ac97.0", 120 .cpu_dai_name = "pxa2xx-ac97",
121 .codec_dai_name = "wm9705-hifi", 121 .codec_dai_name = "wm9705-hifi",
122 .platform_name = "pxa-pcm-audio", 122 .platform_name = "pxa-pcm-audio",
123 .codec_name = "wm9705-codec", 123 .codec_name = "wm9705-codec",
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = {
126 { 126 {
127 .name = "AC97 Aux", 127 .name = "AC97 Aux",
128 .stream_name = "AC97 Aux", 128 .stream_name = "AC97 Aux",
129 .cpu_dai_name = "pxa-ac97.1", 129 .cpu_dai_name = "pxa2xx-ac97-aux",
130 .codec_dai_name = "wm9705-aux", 130 .codec_dai_name = "wm9705-aux",
131 .platform_name = "pxa-pcm-audio", 131 .platform_name = "pxa-pcm-audio",
132 .codec_name = "wm9705-codec", 132 .codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c
index 01bf31675c55..51897fcd911b 100644
--- a/sound/soc/pxa/e750_wm9705.c
+++ b/sound/soc/pxa/e750_wm9705.c
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = {
99 { 99 {
100 .name = "AC97", 100 .name = "AC97",
101 .stream_name = "AC97 HiFi", 101 .stream_name = "AC97 HiFi",
102 .cpu_dai_name = "pxa-ac97.0", 102 .cpu_dai_name = "pxa2xx-ac97",
103 .codec_dai_name = "wm9705-hifi", 103 .codec_dai_name = "wm9705-hifi",
104 .platform_name = "pxa-pcm-audio", 104 .platform_name = "pxa-pcm-audio",
105 .codec_name = "wm9705-codec", 105 .codec_name = "wm9705-codec",
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = {
109 { 109 {
110 .name = "AC97 Aux", 110 .name = "AC97 Aux",
111 .stream_name = "AC97 Aux", 111 .stream_name = "AC97 Aux",
112 .cpu_dai_name = "pxa-ac97.1", 112 .cpu_dai_name = "pxa2xx-ac97-aux",
113 .codec_dai_name ="wm9705-aux", 113 .codec_dai_name ="wm9705-aux",
114 .platform_name = "pxa-pcm-audio", 114 .platform_name = "pxa-pcm-audio",
115 .codec_name = "wm9705-codec", 115 .codec_name = "wm9705-codec",
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c
index c6a37c6ef23b..053ed208e59f 100644
--- a/sound/soc/pxa/e800_wm9712.c
+++ b/sound/soc/pxa/e800_wm9712.c
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = {
89 { 89 {
90 .name = "AC97", 90 .name = "AC97",
91 .stream_name = "AC97 HiFi", 91 .stream_name = "AC97 HiFi",
92 .cpu_dai_name = "pxa-ac97.0", 92 .cpu_dai_name = "pxa2xx-ac97",
93 .codec_dai_name = "wm9712-hifi", 93 .codec_dai_name = "wm9712-hifi",
94 .platform_name = "pxa-pcm-audio", 94 .platform_name = "pxa-pcm-audio",
95 .codec_name = "wm9712-codec", 95 .codec_name = "wm9712-codec",
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = {
98 { 98 {
99 .name = "AC97 Aux", 99 .name = "AC97 Aux",
100 .stream_name = "AC97 Aux", 100 .stream_name = "AC97 Aux",
101 .cpu_dai_name = "pxa-ac97.1", 101 .cpu_dai_name = "pxa2xx-ac97-aux",
102 .codec_dai_name ="wm9712-aux", 102 .codec_dai_name ="wm9712-aux",
103 .platform_name = "pxa-pcm-audio", 103 .platform_name = "pxa-pcm-audio",
104 .codec_name = "wm9712-codec", 104 .codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c
index fc22e6eefc98..b13a4252812d 100644
--- a/sound/soc/pxa/em-x270.c
+++ b/sound/soc/pxa/em-x270.c
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
37 { 37 {
38 .name = "AC97", 38 .name = "AC97",
39 .stream_name = "AC97 HiFi", 39 .stream_name = "AC97 HiFi",
40 .cpu_dai_name = "pxa-ac97.0", 40 .cpu_dai_name = "pxa2xx-ac97",
41 .codec_dai_name = "wm9712-hifi", 41 .codec_dai_name = "wm9712-hifi",
42 .platform_name = "pxa-pcm-audio", 42 .platform_name = "pxa-pcm-audio",
43 .codec_name = "wm9712-codec", 43 .codec_name = "wm9712-codec",
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = {
45 { 45 {
46 .name = "AC97 Aux", 46 .name = "AC97 Aux",
47 .stream_name = "AC97 Aux", 47 .stream_name = "AC97 Aux",
48 .cpu_dai_name = "pxa-ac97.1", 48 .cpu_dai_name = "pxa2xx-ac97-aux",
49 .codec_dai_name ="wm9712-aux", 49 .codec_dai_name ="wm9712-aux",
50 .platform_name = "pxa-pcm-audio", 50 .platform_name = "pxa-pcm-audio",
51 .codec_name = "wm9712-codec", 51 .codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c
index 0d70fc8c12bd..38ca6759907e 100644
--- a/sound/soc/pxa/mioa701_wm9713.c
+++ b/sound/soc/pxa/mioa701_wm9713.c
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
162 { 162 {
163 .name = "AC97", 163 .name = "AC97",
164 .stream_name = "AC97 HiFi", 164 .stream_name = "AC97 HiFi",
165 .cpu_dai_name = "pxa-ac97.0", 165 .cpu_dai_name = "pxa2xx-ac97",
166 .codec_dai_name = "wm9713-hifi", 166 .codec_dai_name = "wm9713-hifi",
167 .codec_name = "wm9713-codec", 167 .codec_name = "wm9713-codec",
168 .init = mioa701_wm9713_init, 168 .init = mioa701_wm9713_init,
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = {
172 { 172 {
173 .name = "AC97 Aux", 173 .name = "AC97 Aux",
174 .stream_name = "AC97 Aux", 174 .stream_name = "AC97 Aux",
175 .cpu_dai_name = "pxa-ac97.1", 175 .cpu_dai_name = "pxa2xx-ac97-aux",
176 .codec_dai_name ="wm9713-aux", 176 .codec_dai_name ="wm9713-aux",
177 .codec_name = "wm9713-codec", 177 .codec_name = "wm9713-codec",
178 .platform_name = "pxa-pcm-audio", 178 .platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c
index 857db96d4a4f..504e4004f004 100644
--- a/sound/soc/pxa/palm27x.c
+++ b/sound/soc/pxa/palm27x.c
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
132{ 132{
133 .name = "AC97 HiFi", 133 .name = "AC97 HiFi",
134 .stream_name = "AC97 HiFi", 134 .stream_name = "AC97 HiFi",
135 .cpu_dai_name = "pxa-ac97.0", 135 .cpu_dai_name = "pxa2xx-ac97",
136 .codec_dai_name = "wm9712-hifi", 136 .codec_dai_name = "wm9712-hifi",
137 .codec_name = "wm9712-codec", 137 .codec_name = "wm9712-codec",
138 .platform_name = "pxa-pcm-audio", 138 .platform_name = "pxa-pcm-audio",
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = {
141{ 141{
142 .name = "AC97 Aux", 142 .name = "AC97 Aux",
143 .stream_name = "AC97 Aux", 143 .stream_name = "AC97 Aux",
144 .cpu_dai_name = "pxa-ac97.1", 144 .cpu_dai_name = "pxa2xx-ac97-aux",
145 .codec_dai_name = "wm9712-aux", 145 .codec_dai_name = "wm9712-aux",
146 .codec_name = "wm9712-codec", 146 .codec_name = "wm9712-codec",
147 .platform_name = "pxa-pcm-audio", 147 .platform_name = "pxa-pcm-audio",
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index f75804ef0897..4b6e5d608b42 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
219{ 219{
220 .name = "AC97", 220 .name = "AC97",
221 .stream_name = "AC97 HiFi", 221 .stream_name = "AC97 HiFi",
222 .cpu_dai_name = "pxa-ac97.0", 222 .cpu_dai_name = "pxa2xx-ac97",
223 .codec_dai_name = "wm9712-hifi", 223 .codec_dai_name = "wm9712-hifi",
224 .platform_name = "pxa-pcm-audio", 224 .platform_name = "pxa-pcm-audio",
225 .codec_name = "wm9712-codec", 225 .codec_name = "wm9712-codec",
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = {
229{ 229{
230 .name = "AC97 Aux", 230 .name = "AC97 Aux",
231 .stream_name = "AC97 Aux", 231 .stream_name = "AC97 Aux",
232 .cpu_dai_name = "pxa-ac97.1", 232 .cpu_dai_name = "pxa2xx-ac97-aux",
233 .codec_dai_name = "wm9712-aux", 233 .codec_dai_name = "wm9712-aux",
234 .platform_name = "pxa-pcm-audio", 234 .platform_name = "pxa-pcm-audio",
235 .codec_name = "wm9712-codec", 235 .codec_name = "wm9712-codec",
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c
index b222a7d72027..25bba108fea3 100644
--- a/sound/soc/pxa/zylonite.c
+++ b/sound/soc/pxa/zylonite.c
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
166 .stream_name = "AC97 HiFi", 166 .stream_name = "AC97 HiFi",
167 .codec_name = "wm9713-codec", 167 .codec_name = "wm9713-codec",
168 .platform_name = "pxa-pcm-audio", 168 .platform_name = "pxa-pcm-audio",
169 .cpu_dai_name = "pxa-ac97.0", 169 .cpu_dai_name = "pxa2xx-ac97",
170 .codec_name = "wm9713-hifi", 170 .codec_name = "wm9713-hifi",
171 .init = zylonite_wm9713_init, 171 .init = zylonite_wm9713_init,
172}, 172},
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = {
175 .stream_name = "AC97 Aux", 175 .stream_name = "AC97 Aux",
176 .codec_name = "wm9713-codec", 176 .codec_name = "wm9713-codec",
177 .platform_name = "pxa-pcm-audio", 177 .platform_name = "pxa-pcm-audio",
178 .cpu_dai_name = "pxa-ac97.1", 178 .cpu_dai_name = "pxa2xx-ac97-aux",
179 .codec_name = "wm9713-aux", 179 .codec_name = "wm9713-aux",
180}, 180},
181{ 181{
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 8194f150bab7..1790f83ee665 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
712 !path->connected(path->source, path->sink)) 712 !path->connected(path->source, path->sink))
713 continue; 713 continue;
714 714
715 if (path->sink && path->sink->power_check && 715 if (!path->sink)
716 continue;
717
718 if (path->sink->force) {
719 power = 1;
720 break;
721 }
722
723 if (path->sink->power_check &&
716 path->sink->power_check(path->sink)) { 724 path->sink->power_check(path->sink)) {
717 power = 1; 725 power = 1;
718 break; 726 break;
@@ -933,7 +941,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm,
933 } 941 }
934 942
935 if (!list_empty(&pending)) 943 if (!list_empty(&pending))
936 dapm_seq_run_coalesced(dapm, &pending); 944 dapm_seq_run_coalesced(cur_dapm, &pending);
937} 945}
938 946
939static void dapm_widget_update(struct snd_soc_dapm_context *dapm) 947static void dapm_widget_update(struct snd_soc_dapm_context *dapm)
@@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes);
1627int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) 1635int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1628{ 1636{
1629 struct snd_soc_dapm_widget *w; 1637 struct snd_soc_dapm_widget *w;
1638 unsigned int val;
1630 1639
1631 list_for_each_entry(w, &dapm->card->widgets, list) 1640 list_for_each_entry(w, &dapm->card->widgets, list)
1632 { 1641 {
@@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm)
1675 case snd_soc_dapm_post: 1684 case snd_soc_dapm_post:
1676 break; 1685 break;
1677 } 1686 }
1687
1688 /* Read the initial power state from the device */
1689 if (w->reg >= 0) {
1690 val = snd_soc_read(w->codec, w->reg);
1691 val &= 1 << w->shift;
1692 if (w->invert)
1693 val = !val;
1694
1695 if (val)
1696 w->power = 1;
1697 }
1698
1678 w->new = 1; 1699 w->new = 1;
1679 } 1700 }
1680 1701
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 68b97477577b..66eabafb1c24 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev)
785 } 785 }
786 786
787 dev->pcm->private_data = dev; 787 dev->pcm->private_data = dev;
788 strcpy(dev->pcm->name, dev->product_name); 788 strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name));
789 789
790 memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); 790 memset(dev->sub_playback, 0, sizeof(dev->sub_playback));
791 memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); 791 memset(dev->sub_capture, 0, sizeof(dev->sub_capture));
diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c
index 2f218c77fff2..a1a47088fd0c 100644
--- a/sound/usb/caiaq/midi.c
+++ b/sound/usb/caiaq/midi.c
@@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device)
136 if (ret < 0) 136 if (ret < 0)
137 return ret; 137 return ret;
138 138
139 strcpy(rmidi->name, device->product_name); 139 strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name));
140 140
141 rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; 141 rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX;
142 rmidi->private_data = device; 142 rmidi->private_data = device;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 800f7cb4f251..c0f8270bc199 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx,
323 return -ENOMEM; 323 return -ENOMEM;
324 } 324 }
325 325
326 mutex_init(&chip->shutdown_mutex);
326 chip->index = idx; 327 chip->index = idx;
327 chip->dev = dev; 328 chip->dev = dev;
328 chip->card = card; 329 chip->card = card;
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
531 chip = ptr; 532 chip = ptr;
532 card = chip->card; 533 card = chip->card;
533 mutex_lock(&register_mutex); 534 mutex_lock(&register_mutex);
535 mutex_lock(&chip->shutdown_mutex);
534 chip->shutdown = 1; 536 chip->shutdown = 1;
535 chip->num_interfaces--; 537 chip->num_interfaces--;
536 if (chip->num_interfaces <= 0) { 538 if (chip->num_interfaces <= 0) {
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr)
548 snd_usb_mixer_disconnect(p); 550 snd_usb_mixer_disconnect(p);
549 } 551 }
550 usb_chip[chip->index] = NULL; 552 usb_chip[chip->index] = NULL;
553 mutex_unlock(&chip->shutdown_mutex);
551 mutex_unlock(&register_mutex); 554 mutex_unlock(&register_mutex);
552 snd_card_free_when_closed(card); 555 snd_card_free_when_closed(card);
553 } else { 556 } else {
557 mutex_unlock(&chip->shutdown_mutex);
554 mutex_unlock(&register_mutex); 558 mutex_unlock(&register_mutex);
555 } 559 }
556} 560}
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 4132522ac90f..e3f680526cb5 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
361 } 361 }
362 362
363 if (changed) { 363 if (changed) {
364 mutex_lock(&subs->stream->chip->shutdown_mutex);
364 /* format changed */ 365 /* format changed */
365 snd_usb_release_substream_urbs(subs, 0); 366 snd_usb_release_substream_urbs(subs, 0);
366 /* influenced: period_bytes, channels, rate, format, */ 367 /* influenced: period_bytes, channels, rate, format, */
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream,
368 params_rate(hw_params), 369 params_rate(hw_params),
369 snd_pcm_format_physical_width(params_format(hw_params)) * 370 snd_pcm_format_physical_width(params_format(hw_params)) *
370 params_channels(hw_params)); 371 params_channels(hw_params));
372 mutex_unlock(&subs->stream->chip->shutdown_mutex);
371 } 373 }
372 374
373 return ret; 375 return ret;
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream)
385 subs->cur_audiofmt = NULL; 387 subs->cur_audiofmt = NULL;
386 subs->cur_rate = 0; 388 subs->cur_rate = 0;
387 subs->period_bytes = 0; 389 subs->period_bytes = 0;
388 if (!subs->stream->chip->shutdown) 390 mutex_lock(&subs->stream->chip->shutdown_mutex);
389 snd_usb_release_substream_urbs(subs, 0); 391 snd_usb_release_substream_urbs(subs, 0);
392 mutex_unlock(&subs->stream->chip->shutdown_mutex);
390 return snd_pcm_lib_free_vmalloc_buffer(substream); 393 return snd_pcm_lib_free_vmalloc_buffer(substream);
391} 394}
392 395
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
index db3eb21627ee..6e66fffe87f5 100644
--- a/sound/usb/usbaudio.h
+++ b/sound/usb/usbaudio.h
@@ -36,6 +36,7 @@ struct snd_usb_audio {
36 struct snd_card *card; 36 struct snd_card *card;
37 u32 usb_id; 37 u32 usb_id;
38 int shutdown; 38 int shutdown;
39 struct mutex shutdown_mutex;
39 unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ 40 unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */
40 int num_interfaces; 41 int num_interfaces;
41 int num_suspended_intf; 42 int num_suspended_intf;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index b2f729fdb317..60cac6f92e8b 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -759,8 +759,8 @@ static int __cmd_record(int argc, const char **argv)
759 perf_session__process_machines(session, event__synthesize_guest_os); 759 perf_session__process_machines(session, event__synthesize_guest_os);
760 760
761 if (!system_wide) 761 if (!system_wide)
762 event__synthesize_thread(target_tid, process_synthesized_event, 762 event__synthesize_thread_map(threads, process_synthesized_event,
763 session); 763 session);
764 else 764 else
765 event__synthesize_threads(process_synthesized_event, session); 765 event__synthesize_threads(process_synthesized_event, session);
766 766
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
index 746cf03cb05d..0ace786e83e0 100644
--- a/tools/perf/builtin-timechart.c
+++ b/tools/perf/builtin-timechart.c
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
264 c->start_time = start; 264 c->start_time = start;
265 if (p->start_time == 0 || p->start_time > start) 265 if (p->start_time == 0 || p->start_time > start)
266 p->start_time = start; 266 p->start_time = start;
267
268 if (cpu > numcpus)
269 numcpus = cpu;
270} 267}
271 268
272#define MAX_CPUS 4096 269#define MAX_CPUS 4096
@@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used,
511 if (!event_str) 508 if (!event_str)
512 return 0; 509 return 0;
513 510
511 if (sample->cpu > numcpus)
512 numcpus = sample->cpu;
513
514 if (strcmp(event_str, "power:cpu_idle") == 0) { 514 if (strcmp(event_str, "power:cpu_idle") == 0) {
515 struct power_processor_entry *ppe = (void *)te; 515 struct power_processor_entry *ppe = (void *)te;
516 if (ppe->state == (u32)PWR_EVENT_EXIT) 516 if (ppe->state == (u32)PWR_EVENT_EXIT)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b6998e055767..5a29d9cd9486 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -1306,7 +1306,7 @@ static int __cmd_top(void)
1306 return -ENOMEM; 1306 return -ENOMEM;
1307 1307
1308 if (target_tid != -1) 1308 if (target_tid != -1)
1309 event__synthesize_thread(target_tid, event__process, session); 1309 event__synthesize_thread_map(threads, event__process, session);
1310 else 1310 else
1311 event__synthesize_threads(event__process, session); 1311 event__synthesize_threads(event__process, session);
1312 1312
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 1478ab4ee222..50d0a931497a 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -263,11 +263,12 @@ static int __event__synthesize_thread(event_t *comm_event, event_t *mmap_event,
263 process, session); 263 process, session);
264} 264}
265 265
266int event__synthesize_thread(pid_t pid, event__handler_t process, 266int event__synthesize_thread_map(struct thread_map *threads,
267 struct perf_session *session) 267 event__handler_t process,
268 struct perf_session *session)
268{ 269{
269 event_t *comm_event, *mmap_event; 270 event_t *comm_event, *mmap_event;
270 int err = -1; 271 int err = -1, thread;
271 272
272 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size); 273 comm_event = malloc(sizeof(comm_event->comm) + session->id_hdr_size);
273 if (comm_event == NULL) 274 if (comm_event == NULL)
@@ -277,8 +278,15 @@ int event__synthesize_thread(pid_t pid, event__handler_t process,
277 if (mmap_event == NULL) 278 if (mmap_event == NULL)
278 goto out_free_comm; 279 goto out_free_comm;
279 280
280 err = __event__synthesize_thread(comm_event, mmap_event, pid, 281 err = 0;
281 process, session); 282 for (thread = 0; thread < threads->nr; ++thread) {
283 if (__event__synthesize_thread(comm_event, mmap_event,
284 threads->map[thread],
285 process, session)) {
286 err = -1;
287 break;
288 }
289 }
282 free(mmap_event); 290 free(mmap_event);
283out_free_comm: 291out_free_comm:
284 free(comm_event); 292 free(comm_event);
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index 2b7e91902f10..cc7b52f9b492 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -135,14 +135,16 @@ typedef union event_union {
135void event__print_totals(void); 135void event__print_totals(void);
136 136
137struct perf_session; 137struct perf_session;
138struct thread_map;
138 139
139typedef int (*event__handler_synth_t)(event_t *event, 140typedef int (*event__handler_synth_t)(event_t *event,
140 struct perf_session *session); 141 struct perf_session *session);
141typedef int (*event__handler_t)(event_t *event, struct sample_data *sample, 142typedef int (*event__handler_t)(event_t *event, struct sample_data *sample,
142 struct perf_session *session); 143 struct perf_session *session);
143 144
144int event__synthesize_thread(pid_t pid, event__handler_t process, 145int event__synthesize_thread_map(struct thread_map *threads,
145 struct perf_session *session); 146 event__handler_t process,
147 struct perf_session *session);
146int event__synthesize_threads(event__handler_t process, 148int event__synthesize_threads(event__handler_t process,
147 struct perf_session *session); 149 struct perf_session *session);
148int event__synthesize_kernel_mmap(event__handler_t process, 150int event__synthesize_kernel_mmap(event__handler_t process,
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index f6a929e74981..0866bcdb5e8e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -270,11 +270,15 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
270 const char *name, bool is_kallsyms) 270 const char *name, bool is_kallsyms)
271{ 271{
272 const size_t size = PATH_MAX; 272 const size_t size = PATH_MAX;
273 char *realname = realpath(name, NULL), 273 char *realname, *filename = malloc(size),
274 *filename = malloc(size),
275 *linkname = malloc(size), *targetname; 274 *linkname = malloc(size), *targetname;
276 int len, err = -1; 275 int len, err = -1;
277 276
277 if (is_kallsyms)
278 realname = (char *)name;
279 else
280 realname = realpath(name, NULL);
281
278 if (realname == NULL || filename == NULL || linkname == NULL) 282 if (realname == NULL || filename == NULL || linkname == NULL)
279 goto out_free; 283 goto out_free;
280 284
@@ -306,7 +310,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
306 if (symlink(targetname, linkname) == 0) 310 if (symlink(targetname, linkname) == 0)
307 err = 0; 311 err = 0;
308out_free: 312out_free:
309 free(realname); 313 if (!is_kallsyms)
314 free(realname);
310 free(filename); 315 free(filename);
311 free(linkname); 316 free(linkname);
312 return err; 317 return err;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 32f4f1f2f6e4..df51560f16f7 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
585{ 585{
586 struct sort_entry *se; 586 struct sort_entry *se;
587 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; 587 u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us;
588 u64 nr_events;
588 const char *sep = symbol_conf.field_sep; 589 const char *sep = symbol_conf.field_sep;
589 int ret; 590 int ret;
590 591
@@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
593 594
594 if (pair_hists) { 595 if (pair_hists) {
595 period = self->pair ? self->pair->period : 0; 596 period = self->pair ? self->pair->period : 0;
597 nr_events = self->pair ? self->pair->nr_events : 0;
596 total = pair_hists->stats.total_period; 598 total = pair_hists->stats.total_period;
597 period_sys = self->pair ? self->pair->period_sys : 0; 599 period_sys = self->pair ? self->pair->period_sys : 0;
598 period_us = self->pair ? self->pair->period_us : 0; 600 period_us = self->pair ? self->pair->period_us : 0;
@@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
600 period_guest_us = self->pair ? self->pair->period_guest_us : 0; 602 period_guest_us = self->pair ? self->pair->period_guest_us : 0;
601 } else { 603 } else {
602 period = self->period; 604 period = self->period;
605 nr_events = self->nr_events;
603 total = session_total; 606 total = session_total;
604 period_sys = self->period_sys; 607 period_sys = self->period_sys;
605 period_us = self->period_us; 608 period_us = self->period_us;
@@ -640,9 +643,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size,
640 643
641 if (symbol_conf.show_nr_samples) { 644 if (symbol_conf.show_nr_samples) {
642 if (sep) 645 if (sep)
643 ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); 646 ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events);
644 else 647 else
645 ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); 648 ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events);
646 } 649 }
647 650
648 if (pair_hists) { 651 if (pair_hists) {
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
index fb737fe9be91..96c866045d60 100644
--- a/tools/perf/util/svghelper.c
+++ b/tools/perf/util/svghelper.c
@@ -456,9 +456,9 @@ void svg_legenda(void)
456 return; 456 return;
457 457
458 svg_legenda_box(0, "Running", "sample"); 458 svg_legenda_box(0, "Running", "sample");
459 svg_legenda_box(100, "Idle","rect.c1"); 459 svg_legenda_box(100, "Idle","c1");
460 svg_legenda_box(200, "Deeper Idle", "rect.c3"); 460 svg_legenda_box(200, "Deeper Idle", "c3");
461 svg_legenda_box(350, "Deepest Idle", "rect.c6"); 461 svg_legenda_box(350, "Deepest Idle", "c6");
462 svg_legenda_box(550, "Sleeping", "process2"); 462 svg_legenda_box(550, "Sleeping", "process2");
463 svg_legenda_box(650, "Waiting for cpu", "waiting"); 463 svg_legenda_box(650, "Waiting for cpu", "waiting");
464 svg_legenda_box(800, "Blocked on IO", "blocked"); 464 svg_legenda_box(800, "Blocked on IO", "blocked");
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 7821d0e6866f..b1bf490aff88 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -1836,7 +1836,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map,
1836 int err = -1, fd; 1836 int err = -1, fd;
1837 char symfs_vmlinux[PATH_MAX]; 1837 char symfs_vmlinux[PATH_MAX];
1838 1838
1839 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s", 1839 snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s",
1840 symbol_conf.symfs, vmlinux); 1840 symbol_conf.symfs, vmlinux);
1841 fd = open(symfs_vmlinux, O_RDONLY); 1841 fd = open(symfs_vmlinux, O_RDONLY);
1842 if (fd < 0) 1842 if (fd < 0)
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index 4c6983de6fd9..362a0cb448db 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -72,7 +72,7 @@ int need_reinitialize;
72 72
73int num_cpus; 73int num_cpus;
74 74
75typedef struct per_cpu_counters { 75struct counters {
76 unsigned long long tsc; /* per thread */ 76 unsigned long long tsc; /* per thread */
77 unsigned long long aperf; /* per thread */ 77 unsigned long long aperf; /* per thread */
78 unsigned long long mperf; /* per thread */ 78 unsigned long long mperf; /* per thread */
@@ -88,13 +88,13 @@ typedef struct per_cpu_counters {
88 int pkg; 88 int pkg;
89 int core; 89 int core;
90 int cpu; 90 int cpu;
91 struct per_cpu_counters *next; 91 struct counters *next;
92} PCC; 92};
93 93
94PCC *pcc_even; 94struct counters *cnt_even;
95PCC *pcc_odd; 95struct counters *cnt_odd;
96PCC *pcc_delta; 96struct counters *cnt_delta;
97PCC *pcc_average; 97struct counters *cnt_average;
98struct timeval tv_even; 98struct timeval tv_even;
99struct timeval tv_odd; 99struct timeval tv_odd;
100struct timeval tv_delta; 100struct timeval tv_delta;
@@ -125,7 +125,7 @@ unsigned long long get_msr(int cpu, off_t offset)
125 return msr; 125 return msr;
126} 126}
127 127
128void print_header() 128void print_header(void)
129{ 129{
130 if (show_pkg) 130 if (show_pkg)
131 fprintf(stderr, "pkg "); 131 fprintf(stderr, "pkg ");
@@ -160,39 +160,39 @@ void print_header()
160 putc('\n', stderr); 160 putc('\n', stderr);
161} 161}
162 162
163void dump_pcc(PCC *pcc) 163void dump_cnt(struct counters *cnt)
164{ 164{
165 fprintf(stderr, "package: %d ", pcc->pkg); 165 fprintf(stderr, "package: %d ", cnt->pkg);
166 fprintf(stderr, "core:: %d ", pcc->core); 166 fprintf(stderr, "core:: %d ", cnt->core);
167 fprintf(stderr, "CPU: %d ", pcc->cpu); 167 fprintf(stderr, "CPU: %d ", cnt->cpu);
168 fprintf(stderr, "TSC: %016llX\n", pcc->tsc); 168 fprintf(stderr, "TSC: %016llX\n", cnt->tsc);
169 fprintf(stderr, "c3: %016llX\n", pcc->c3); 169 fprintf(stderr, "c3: %016llX\n", cnt->c3);
170 fprintf(stderr, "c6: %016llX\n", pcc->c6); 170 fprintf(stderr, "c6: %016llX\n", cnt->c6);
171 fprintf(stderr, "c7: %016llX\n", pcc->c7); 171 fprintf(stderr, "c7: %016llX\n", cnt->c7);
172 fprintf(stderr, "aperf: %016llX\n", pcc->aperf); 172 fprintf(stderr, "aperf: %016llX\n", cnt->aperf);
173 fprintf(stderr, "pc2: %016llX\n", pcc->pc2); 173 fprintf(stderr, "pc2: %016llX\n", cnt->pc2);
174 fprintf(stderr, "pc3: %016llX\n", pcc->pc3); 174 fprintf(stderr, "pc3: %016llX\n", cnt->pc3);
175 fprintf(stderr, "pc6: %016llX\n", pcc->pc6); 175 fprintf(stderr, "pc6: %016llX\n", cnt->pc6);
176 fprintf(stderr, "pc7: %016llX\n", pcc->pc7); 176 fprintf(stderr, "pc7: %016llX\n", cnt->pc7);
177 fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, pcc->extra_msr); 177 fprintf(stderr, "msr0x%x: %016llX\n", extra_msr_offset, cnt->extra_msr);
178} 178}
179 179
180void dump_list(PCC *pcc) 180void dump_list(struct counters *cnt)
181{ 181{
182 printf("dump_list 0x%p\n", pcc); 182 printf("dump_list 0x%p\n", cnt);
183 183
184 for (; pcc; pcc = pcc->next) 184 for (; cnt; cnt = cnt->next)
185 dump_pcc(pcc); 185 dump_cnt(cnt);
186} 186}
187 187
188void print_pcc(PCC *p) 188void print_cnt(struct counters *p)
189{ 189{
190 double interval_float; 190 double interval_float;
191 191
192 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0; 192 interval_float = tv_delta.tv_sec + tv_delta.tv_usec/1000000.0;
193 193
194 /* topology columns, print blanks on 1st (average) line */ 194 /* topology columns, print blanks on 1st (average) line */
195 if (p == pcc_average) { 195 if (p == cnt_average) {
196 if (show_pkg) 196 if (show_pkg)
197 fprintf(stderr, " "); 197 fprintf(stderr, " ");
198 if (show_core) 198 if (show_core)
@@ -262,24 +262,24 @@ void print_pcc(PCC *p)
262 putc('\n', stderr); 262 putc('\n', stderr);
263} 263}
264 264
265void print_counters(PCC *cnt) 265void print_counters(struct counters *counters)
266{ 266{
267 PCC *pcc; 267 struct counters *cnt;
268 268
269 print_header(); 269 print_header();
270 270
271 if (num_cpus > 1) 271 if (num_cpus > 1)
272 print_pcc(pcc_average); 272 print_cnt(cnt_average);
273 273
274 for (pcc = cnt; pcc != NULL; pcc = pcc->next) 274 for (cnt = counters; cnt != NULL; cnt = cnt->next)
275 print_pcc(pcc); 275 print_cnt(cnt);
276 276
277} 277}
278 278
279#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after)) 279#define SUBTRACT_COUNTER(after, before, delta) (delta = (after - before), (before > after))
280 280
281 281int compute_delta(struct counters *after,
282int compute_delta(PCC *after, PCC *before, PCC *delta) 282 struct counters *before, struct counters *delta)
283{ 283{
284 int errors = 0; 284 int errors = 0;
285 int perf_err = 0; 285 int perf_err = 0;
@@ -391,20 +391,20 @@ int compute_delta(PCC *after, PCC *before, PCC *delta)
391 delta->extra_msr = after->extra_msr; 391 delta->extra_msr = after->extra_msr;
392 if (errors) { 392 if (errors) {
393 fprintf(stderr, "ERROR cpu%d before:\n", before->cpu); 393 fprintf(stderr, "ERROR cpu%d before:\n", before->cpu);
394 dump_pcc(before); 394 dump_cnt(before);
395 fprintf(stderr, "ERROR cpu%d after:\n", before->cpu); 395 fprintf(stderr, "ERROR cpu%d after:\n", before->cpu);
396 dump_pcc(after); 396 dump_cnt(after);
397 errors = 0; 397 errors = 0;
398 } 398 }
399 } 399 }
400 return 0; 400 return 0;
401} 401}
402 402
403void compute_average(PCC *delta, PCC *avg) 403void compute_average(struct counters *delta, struct counters *avg)
404{ 404{
405 PCC *sum; 405 struct counters *sum;
406 406
407 sum = calloc(1, sizeof(PCC)); 407 sum = calloc(1, sizeof(struct counters));
408 if (sum == NULL) { 408 if (sum == NULL) {
409 perror("calloc sum"); 409 perror("calloc sum");
410 exit(1); 410 exit(1);
@@ -438,35 +438,34 @@ void compute_average(PCC *delta, PCC *avg)
438 free(sum); 438 free(sum);
439} 439}
440 440
441void get_counters(PCC *pcc) 441void get_counters(struct counters *cnt)
442{ 442{
443 for ( ; pcc; pcc = pcc->next) { 443 for ( ; cnt; cnt = cnt->next) {
444 pcc->tsc = get_msr(pcc->cpu, MSR_TSC); 444 cnt->tsc = get_msr(cnt->cpu, MSR_TSC);
445 if (do_nhm_cstates) 445 if (do_nhm_cstates)
446 pcc->c3 = get_msr(pcc->cpu, MSR_CORE_C3_RESIDENCY); 446 cnt->c3 = get_msr(cnt->cpu, MSR_CORE_C3_RESIDENCY);
447 if (do_nhm_cstates) 447 if (do_nhm_cstates)
448 pcc->c6 = get_msr(pcc->cpu, MSR_CORE_C6_RESIDENCY); 448 cnt->c6 = get_msr(cnt->cpu, MSR_CORE_C6_RESIDENCY);
449 if (do_snb_cstates) 449 if (do_snb_cstates)
450 pcc->c7 = get_msr(pcc->cpu, MSR_CORE_C7_RESIDENCY); 450 cnt->c7 = get_msr(cnt->cpu, MSR_CORE_C7_RESIDENCY);
451 if (has_aperf) 451 if (has_aperf)
452 pcc->aperf = get_msr(pcc->cpu, MSR_APERF); 452 cnt->aperf = get_msr(cnt->cpu, MSR_APERF);
453 if (has_aperf) 453 if (has_aperf)
454 pcc->mperf = get_msr(pcc->cpu, MSR_MPERF); 454 cnt->mperf = get_msr(cnt->cpu, MSR_MPERF);
455 if (do_snb_cstates) 455 if (do_snb_cstates)
456 pcc->pc2 = get_msr(pcc->cpu, MSR_PKG_C2_RESIDENCY); 456 cnt->pc2 = get_msr(cnt->cpu, MSR_PKG_C2_RESIDENCY);
457 if (do_nhm_cstates) 457 if (do_nhm_cstates)
458 pcc->pc3 = get_msr(pcc->cpu, MSR_PKG_C3_RESIDENCY); 458 cnt->pc3 = get_msr(cnt->cpu, MSR_PKG_C3_RESIDENCY);
459 if (do_nhm_cstates) 459 if (do_nhm_cstates)
460 pcc->pc6 = get_msr(pcc->cpu, MSR_PKG_C6_RESIDENCY); 460 cnt->pc6 = get_msr(cnt->cpu, MSR_PKG_C6_RESIDENCY);
461 if (do_snb_cstates) 461 if (do_snb_cstates)
462 pcc->pc7 = get_msr(pcc->cpu, MSR_PKG_C7_RESIDENCY); 462 cnt->pc7 = get_msr(cnt->cpu, MSR_PKG_C7_RESIDENCY);
463 if (extra_msr_offset) 463 if (extra_msr_offset)
464 pcc->extra_msr = get_msr(pcc->cpu, extra_msr_offset); 464 cnt->extra_msr = get_msr(cnt->cpu, extra_msr_offset);
465 } 465 }
466} 466}
467 467
468 468void print_nehalem_info(void)
469void print_nehalem_info()
470{ 469{
471 unsigned long long msr; 470 unsigned long long msr;
472 unsigned int ratio; 471 unsigned int ratio;
@@ -514,38 +513,38 @@ void print_nehalem_info()
514 513
515} 514}
516 515
517void free_counter_list(PCC *list) 516void free_counter_list(struct counters *list)
518{ 517{
519 PCC *p; 518 struct counters *p;
520 519
521 for (p = list; p; ) { 520 for (p = list; p; ) {
522 PCC *free_me; 521 struct counters *free_me;
523 522
524 free_me = p; 523 free_me = p;
525 p = p->next; 524 p = p->next;
526 free(free_me); 525 free(free_me);
527 } 526 }
528 return;
529} 527}
530 528
531void free_all_counters(void) 529void free_all_counters(void)
532{ 530{
533 free_counter_list(pcc_even); 531 free_counter_list(cnt_even);
534 pcc_even = NULL; 532 cnt_even = NULL;
535 533
536 free_counter_list(pcc_odd); 534 free_counter_list(cnt_odd);
537 pcc_odd = NULL; 535 cnt_odd = NULL;
538 536
539 free_counter_list(pcc_delta); 537 free_counter_list(cnt_delta);
540 pcc_delta = NULL; 538 cnt_delta = NULL;
541 539
542 free_counter_list(pcc_average); 540 free_counter_list(cnt_average);
543 pcc_average = NULL; 541 cnt_average = NULL;
544} 542}
545 543
546void insert_cpu_counters(PCC **list, PCC *new) 544void insert_counters(struct counters **list,
545 struct counters *new)
547{ 546{
548 PCC *prev; 547 struct counters *prev;
549 548
550 /* 549 /*
551 * list was empty 550 * list was empty
@@ -594,18 +593,16 @@ void insert_cpu_counters(PCC **list, PCC *new)
594 */ 593 */
595 new->next = prev->next; 594 new->next = prev->next;
596 prev->next = new; 595 prev->next = new;
597
598 return;
599} 596}
600 597
601void alloc_new_cpu_counters(int pkg, int core, int cpu) 598void alloc_new_counters(int pkg, int core, int cpu)
602{ 599{
603 PCC *new; 600 struct counters *new;
604 601
605 if (verbose > 1) 602 if (verbose > 1)
606 printf("pkg%d core%d, cpu%d\n", pkg, core, cpu); 603 printf("pkg%d core%d, cpu%d\n", pkg, core, cpu);
607 604
608 new = (PCC *)calloc(1, sizeof(PCC)); 605 new = (struct counters *)calloc(1, sizeof(struct counters));
609 if (new == NULL) { 606 if (new == NULL) {
610 perror("calloc"); 607 perror("calloc");
611 exit(1); 608 exit(1);
@@ -613,9 +610,10 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
613 new->pkg = pkg; 610 new->pkg = pkg;
614 new->core = core; 611 new->core = core;
615 new->cpu = cpu; 612 new->cpu = cpu;
616 insert_cpu_counters(&pcc_odd, new); 613 insert_counters(&cnt_odd, new);
617 614
618 new = (PCC *)calloc(1, sizeof(PCC)); 615 new = (struct counters *)calloc(1,
616 sizeof(struct counters));
619 if (new == NULL) { 617 if (new == NULL) {
620 perror("calloc"); 618 perror("calloc");
621 exit(1); 619 exit(1);
@@ -623,9 +621,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
623 new->pkg = pkg; 621 new->pkg = pkg;
624 new->core = core; 622 new->core = core;
625 new->cpu = cpu; 623 new->cpu = cpu;
626 insert_cpu_counters(&pcc_even, new); 624 insert_counters(&cnt_even, new);
627 625
628 new = (PCC *)calloc(1, sizeof(PCC)); 626 new = (struct counters *)calloc(1, sizeof(struct counters));
629 if (new == NULL) { 627 if (new == NULL) {
630 perror("calloc"); 628 perror("calloc");
631 exit(1); 629 exit(1);
@@ -633,9 +631,9 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
633 new->pkg = pkg; 631 new->pkg = pkg;
634 new->core = core; 632 new->core = core;
635 new->cpu = cpu; 633 new->cpu = cpu;
636 insert_cpu_counters(&pcc_delta, new); 634 insert_counters(&cnt_delta, new);
637 635
638 new = (PCC *)calloc(1, sizeof(PCC)); 636 new = (struct counters *)calloc(1, sizeof(struct counters));
639 if (new == NULL) { 637 if (new == NULL) {
640 perror("calloc"); 638 perror("calloc");
641 exit(1); 639 exit(1);
@@ -643,7 +641,7 @@ void alloc_new_cpu_counters(int pkg, int core, int cpu)
643 new->pkg = pkg; 641 new->pkg = pkg;
644 new->core = core; 642 new->core = core;
645 new->cpu = cpu; 643 new->cpu = cpu;
646 pcc_average = new; 644 cnt_average = new;
647} 645}
648 646
649int get_physical_package_id(int cpu) 647int get_physical_package_id(int cpu)
@@ -719,7 +717,7 @@ void re_initialize(void)
719{ 717{
720 printf("turbostat: topology changed, re-initializing.\n"); 718 printf("turbostat: topology changed, re-initializing.\n");
721 free_all_counters(); 719 free_all_counters();
722 num_cpus = for_all_cpus(alloc_new_cpu_counters); 720 num_cpus = for_all_cpus(alloc_new_counters);
723 need_reinitialize = 0; 721 need_reinitialize = 0;
724 printf("num_cpus is now %d\n", num_cpus); 722 printf("num_cpus is now %d\n", num_cpus);
725} 723}
@@ -728,7 +726,7 @@ void dummy(int pkg, int core, int cpu) { return; }
728/* 726/*
729 * check to see if a cpu came on-line 727 * check to see if a cpu came on-line
730 */ 728 */
731void verify_num_cpus() 729void verify_num_cpus(void)
732{ 730{
733 int new_num_cpus; 731 int new_num_cpus;
734 732
@@ -740,14 +738,12 @@ void verify_num_cpus()
740 num_cpus, new_num_cpus); 738 num_cpus, new_num_cpus);
741 need_reinitialize = 1; 739 need_reinitialize = 1;
742 } 740 }
743
744 return;
745} 741}
746 742
747void turbostat_loop() 743void turbostat_loop()
748{ 744{
749restart: 745restart:
750 get_counters(pcc_even); 746 get_counters(cnt_even);
751 gettimeofday(&tv_even, (struct timezone *)NULL); 747 gettimeofday(&tv_even, (struct timezone *)NULL);
752 748
753 while (1) { 749 while (1) {
@@ -757,24 +753,24 @@ restart:
757 goto restart; 753 goto restart;
758 } 754 }
759 sleep(interval_sec); 755 sleep(interval_sec);
760 get_counters(pcc_odd); 756 get_counters(cnt_odd);
761 gettimeofday(&tv_odd, (struct timezone *)NULL); 757 gettimeofday(&tv_odd, (struct timezone *)NULL);
762 758
763 compute_delta(pcc_odd, pcc_even, pcc_delta); 759 compute_delta(cnt_odd, cnt_even, cnt_delta);
764 timersub(&tv_odd, &tv_even, &tv_delta); 760 timersub(&tv_odd, &tv_even, &tv_delta);
765 compute_average(pcc_delta, pcc_average); 761 compute_average(cnt_delta, cnt_average);
766 print_counters(pcc_delta); 762 print_counters(cnt_delta);
767 if (need_reinitialize) { 763 if (need_reinitialize) {
768 re_initialize(); 764 re_initialize();
769 goto restart; 765 goto restart;
770 } 766 }
771 sleep(interval_sec); 767 sleep(interval_sec);
772 get_counters(pcc_even); 768 get_counters(cnt_even);
773 gettimeofday(&tv_even, (struct timezone *)NULL); 769 gettimeofday(&tv_even, (struct timezone *)NULL);
774 compute_delta(pcc_even, pcc_odd, pcc_delta); 770 compute_delta(cnt_even, cnt_odd, cnt_delta);
775 timersub(&tv_even, &tv_odd, &tv_delta); 771 timersub(&tv_even, &tv_odd, &tv_delta);
776 compute_average(pcc_delta, pcc_average); 772 compute_average(cnt_delta, cnt_average);
777 print_counters(pcc_delta); 773 print_counters(cnt_delta);
778 } 774 }
779} 775}
780 776
@@ -892,7 +888,7 @@ void check_cpuid()
892 * this check is valid for both Intel and AMD 888 * this check is valid for both Intel and AMD
893 */ 889 */
894 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007)); 890 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x80000007));
895 has_invariant_tsc = edx && (1 << 8); 891 has_invariant_tsc = edx & (1 << 8);
896 892
897 if (!has_invariant_tsc) { 893 if (!has_invariant_tsc) {
898 fprintf(stderr, "No invariant TSC\n"); 894 fprintf(stderr, "No invariant TSC\n");
@@ -905,7 +901,7 @@ void check_cpuid()
905 */ 901 */
906 902
907 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6)); 903 asm("cpuid" : "=a" (eax), "=b" (ebx), "=c" (ecx), "=d" (edx) : "a" (0x6));
908 has_aperf = ecx && (1 << 0); 904 has_aperf = ecx & (1 << 0);
909 if (!has_aperf) { 905 if (!has_aperf) {
910 fprintf(stderr, "No APERF MSR\n"); 906 fprintf(stderr, "No APERF MSR\n");
911 exit(1); 907 exit(1);
@@ -952,7 +948,7 @@ void turbostat_init()
952 check_dev_msr(); 948 check_dev_msr();
953 check_super_user(); 949 check_super_user();
954 950
955 num_cpus = for_all_cpus(alloc_new_cpu_counters); 951 num_cpus = for_all_cpus(alloc_new_counters);
956 952
957 if (verbose) 953 if (verbose)
958 print_nehalem_info(); 954 print_nehalem_info();
@@ -962,7 +958,7 @@ int fork_it(char **argv)
962{ 958{
963 int retval; 959 int retval;
964 pid_t child_pid; 960 pid_t child_pid;
965 get_counters(pcc_even); 961 get_counters(cnt_even);
966 gettimeofday(&tv_even, (struct timezone *)NULL); 962 gettimeofday(&tv_even, (struct timezone *)NULL);
967 963
968 child_pid = fork(); 964 child_pid = fork();
@@ -985,14 +981,14 @@ int fork_it(char **argv)
985 exit(1); 981 exit(1);
986 } 982 }
987 } 983 }
988 get_counters(pcc_odd); 984 get_counters(cnt_odd);
989 gettimeofday(&tv_odd, (struct timezone *)NULL); 985 gettimeofday(&tv_odd, (struct timezone *)NULL);
990 retval = compute_delta(pcc_odd, pcc_even, pcc_delta); 986 retval = compute_delta(cnt_odd, cnt_even, cnt_delta);
991 987
992 timersub(&tv_odd, &tv_even, &tv_delta); 988 timersub(&tv_odd, &tv_even, &tv_delta);
993 compute_average(pcc_delta, pcc_average); 989 compute_average(cnt_delta, cnt_average);
994 if (!retval) 990 if (!retval)
995 print_counters(pcc_delta); 991 print_counters(cnt_delta);
996 992
997 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);; 993 fprintf(stderr, "%.6f sec\n", tv_delta.tv_sec + tv_delta.tv_usec/1000000.0);;
998 994