aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt2
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmi.txt (renamed from Documentation/devicetree/bindings/drm/exynos/hdmi.txt)0
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmiddc.txt (renamed from Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt)0
-rw-r--r--Documentation/devicetree/bindings/video/exynos_hdmiphy.txt (renamed from Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt)0
-rw-r--r--Documentation/devicetree/bindings/video/exynos_mixer.txt (renamed from Documentation/devicetree/bindings/drm/exynos/mixer.txt)0
-rw-r--r--Documentation/devicetree/bindings/video/simple-framebuffer.txt25
-rw-r--r--Documentation/devicetree/usage-model.txt8
-rw-r--r--Documentation/kernel-parameters.txt21
-rw-r--r--Documentation/kernel-per-CPU-kthreads.txt202
-rw-r--r--Documentation/power/devices.txt15
-rw-r--r--Documentation/power/interface.txt4
-rw-r--r--Documentation/power/notifiers.txt6
-rw-r--r--Documentation/power/states.txt30
-rw-r--r--Documentation/rapidio/rapidio.txt128
-rw-r--r--Documentation/rapidio/sysfs.txt17
-rw-r--r--MAINTAINERS43
-rw-r--r--Makefile2
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arc/boot/dts/abilis_tb100_dvk.dts2
-rw-r--r--arch/arc/boot/dts/abilis_tb101_dvk.dts2
-rw-r--r--arch/arc/boot/dts/abilis_tb10x.dtsi6
-rw-r--r--arch/arc/include/asm/cacheflush.h6
-rw-r--r--arch/arc/include/asm/page.h9
-rw-r--r--arch/arc/include/asm/pgtable.h26
-rw-r--r--arch/arc/include/asm/tlb.h2
-rw-r--r--arch/arc/mm/cache_arc700.c23
-rw-r--r--arch/arc/mm/tlb.c3
-rw-r--r--arch/arc/mm/tlbex.S6
-rw-r--r--arch/arc/plat-tb10x/tb10x.c26
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/dts/Makefile4
-rw-r--r--arch/arm/boot/dts/armada-370-xp.dtsi3
-rw-r--r--arch/arm/boot/dts/armada-370.dtsi7
-rw-r--r--arch/arm/boot/dts/armada-xp-gp.dts3
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts3
-rw-r--r--arch/arm/boot/dts/armada-xp.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9260.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi1
-rw-r--r--arch/arm/boot/dts/at91sam9x25ek.dts2
-rw-r--r--arch/arm/boot/dts/omap3.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi12
-rw-r--r--arch/arm/boot/dts/sama5d3xcm.dtsi4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts12
-rw-r--r--arch/arm/boot/dts/sun4i-a10-mini-xplus.dts4
-rw-r--r--arch/arm/common/mcpm_platsmp.c3
-rw-r--r--arch/arm/configs/omap1_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/configs/tegra_defconfig3
-rw-r--r--arch/arm/crypto/sha1-armv4-large.S2
-rw-r--r--arch/arm/include/asm/cmpxchg.h8
-rw-r--r--arch/arm/include/debug/ux500.S6
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/kernel/smp.c2
-rw-r--r--arch/arm/mach-at91/at91rm9200_time.c7
-rw-r--r--arch/arm/mach-at91/at91sam9n12.c6
-rw-r--r--arch/arm/mach-at91/include/mach/at91_pmc.h6
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c11
-rw-r--r--arch/arm/mach-imx/headsmp.S12
-rw-r--r--arch/arm/mach-imx/platsmp.c14
-rw-r--r--arch/arm/mach-kirkwood/common.c6
-rw-r--r--arch/arm/mach-kirkwood/ts219-setup.c2
-rw-r--r--arch/arm/mach-mvebu/Kconfig1
-rw-r--r--arch/arm/mach-mvebu/armada-370-xp.c7
-rw-r--r--arch/arm/mach-omap1/dma.c1
-rw-r--r--arch/arm/mach-omap2/cclock33xx_data.c26
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c113
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h7
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_33xx_data.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c6
-rw-r--r--arch/arm/mach-omap2/serial.c31
-rw-r--r--arch/arm/mach-orion5x/common.c7
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c6
-rw-r--r--arch/arm/mach-sunxi/Kconfig1
-rw-r--r--arch/arm/mach-tegra/tegra2_emc.c5
-rw-r--r--arch/arm/mach-ux500/Kconfig1
-rw-r--r--arch/arm/mach-ux500/board-mop500.c6
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c6
-rw-r--r--arch/arm/mach-ux500/setup.h2
-rw-r--r--arch/arm/mach-vt8500/vt8500.c1
-rw-r--r--arch/arm/plat-orion/common.c12
-rw-r--r--arch/arm/plat-orion/include/plat/common.h1
-rw-r--r--arch/arm/plat-samsung/adc.c5
-rw-r--r--arch/arm/vfp/entry.S2
-rw-r--r--arch/arm/xen/enlighten.c33
-rw-r--r--arch/arm64/Kconfig2
-rw-r--r--arch/arm64/include/asm/assembler.h2
-rw-r--r--arch/arm64/kernel/debug-monitors.c2
-rw-r--r--arch/arm64/kernel/early_printk.c5
-rw-r--r--arch/arm64/kernel/setup.c12
-rw-r--r--arch/arm64/mm/cache.S2
-rw-r--r--arch/arm64/mm/proc.S3
-rw-r--r--arch/avr32/Kconfig5
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/numnodes.h7
-rw-r--r--arch/avr32/include/asm/param.h9
-rw-r--r--arch/avr32/include/uapi/asm/Kbuild1
-rw-r--r--arch/avr32/include/uapi/asm/param.h18
-rw-r--r--arch/avr32/kernel/module.c2
-rw-r--r--arch/mips/alchemy/board-gpr.c1
-rw-r--r--arch/mips/alchemy/common/time.c1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cobalt/reset.c1
-rw-r--r--arch/mips/configs/db1000_defconfig1
-rw-r--r--arch/mips/configs/db1235_defconfig1
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/include/asm/clock.h2
-rw-r--r--arch/mips/include/asm/idle.h23
-rw-r--r--arch/mips/include/asm/io.h2
-rw-r--r--arch/mips/include/asm/kvm_host.h2
-rw-r--r--arch/mips/include/asm/mmu_context.h95
-rw-r--r--arch/mips/include/asm/page.h17
-rw-r--r--arch/mips/include/asm/processor.h1
-rw-r--r--arch/mips/include/uapi/asm/kvm.h (renamed from arch/mips/include/asm/kvm.h)0
-rw-r--r--arch/mips/include/uapi/asm/unistd.h5
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cpu-probe.c198
-rw-r--r--arch/mips/kernel/crash_dump.c1
-rw-r--r--arch/mips/kernel/genex.S8
-rw-r--r--arch/mips/kernel/idle.c244
-rw-r--r--arch/mips/kernel/kprobes.c5
-rw-r--r--arch/mips/kernel/proc.c1
-rw-r--r--arch/mips/kernel/process.c53
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/smp.c1
-rw-r--r--arch/mips/kernel/smtc.c15
-rw-r--r--arch/mips/kernel/traps.c15
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c29
-rw-r--r--arch/mips/kvm/kvm_tlb.c61
-rw-r--r--arch/mips/lantiq/xway/gptu.c6
-rw-r--r--arch/mips/lib/dump_tlb.c5
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c7
-rw-r--r--arch/mips/loongson/common/reset.c1
-rw-r--r--arch/mips/loongson1/common/reset.c1
-rw-r--r--arch/mips/mm/tlb-r3k.c20
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c79
-rw-r--r--arch/mips/netlogic/xlp/setup.c1
-rw-r--r--arch/mips/netlogic/xlr/setup.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_prom.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c1
-rw-r--r--arch/mips/ralink/dts/rt3050.dtsi10
-rw-r--r--arch/mips/ralink/dts/rt3052_eval.dts4
-rw-r--r--arch/mips/txx9/generic/setup.c1
-rw-r--r--arch/mips/vr41xx/common/pmu.c1
-rw-r--r--arch/mips/wrppmc/reset.c1
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/Makefile21
-rw-r--r--arch/parisc/include/asm/assembly.h1
-rw-r--r--arch/parisc/include/asm/hardirq.h10
-rw-r--r--arch/parisc/include/asm/processor.h18
-rw-r--r--arch/parisc/kernel/entry.S174
-rw-r--r--arch/parisc/kernel/hardware.c1
-rw-r--r--arch/parisc/kernel/irq.c122
-rw-r--r--arch/parisc/kernel/pacache.S12
-rw-r--r--arch/parisc/kernel/traps.c1
-rw-r--r--arch/parisc/kernel/unaligned.c3
-rw-r--r--arch/parisc/mm/init.c4
-rw-r--r--arch/powerpc/Kconfig.debug23
-rw-r--r--arch/powerpc/configs/ps3_defconfig1
-rw-r--r--arch/powerpc/include/asm/context_tracking.h10
-rw-r--r--arch/powerpc/include/asm/firmware.h4
-rw-r--r--arch/powerpc/include/asm/hw_irq.h5
-rw-r--r--arch/powerpc/include/asm/opal.h5
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h2
-rw-r--r--arch/powerpc/include/asm/processor.h6
-rw-r--r--arch/powerpc/include/asm/pte-hash64-64k.h2
-rw-r--r--arch/powerpc/include/asm/rtas.h2
-rw-r--r--arch/powerpc/include/asm/thread_info.h7
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/kernel/asm-offsets.c6
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S8
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S33
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S8
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c4
-rw-r--r--arch/powerpc/kernel/misc_32.S11
-rw-r--r--arch/powerpc/kernel/misc_64.S11
-rw-r--r--arch/powerpc/kernel/pci-common.c12
-rw-r--r--arch/powerpc/kernel/pci_64.c10
-rw-r--r--arch/powerpc/kernel/pci_dn.c8
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c3
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/ptrace.c5
-rw-r--r--arch/powerpc/kernel/rtas.c113
-rw-r--r--arch/powerpc/kernel/rtas_flash.c10
-rw-r--r--arch/powerpc/kernel/signal.c7
-rw-r--r--arch/powerpc/kernel/traps.c80
-rw-r--r--arch/powerpc/kernel/udbg.c3
-rw-r--r--arch/powerpc/mm/fault.c41
-rw-r--r--arch/powerpc/mm/hash_utils_64.c36
-rw-r--r--arch/powerpc/mm/init_64.c3
-rw-r--r--arch/powerpc/perf/core-book3s.c280
-rw-r--r--arch/powerpc/platforms/Kconfig2
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig1
-rw-r--r--arch/powerpc/platforms/powernv/opal.c30
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c36
-rw-r--r--arch/powerpc/platforms/powernv/pci.c18
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c16
-rw-r--r--arch/powerpc/platforms/powernv/smp.c62
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/msi.c75
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c22
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/sysdev/Makefile2
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c2
-rw-r--r--arch/powerpc/sysdev/mpic.c2
-rw-r--r--arch/powerpc/sysdev/udbg_memcons.c105
-rw-r--r--arch/powerpc/sysdev/xics/ics-opal.c2
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/ftrace.h12
-rw-r--r--arch/s390/include/asm/page.h20
-rw-r--r--arch/s390/include/asm/pgtable.h4
-rw-r--r--arch/s390/kernel/dis.c2
-rw-r--r--arch/s390/kernel/ftrace.c9
-rw-r--r--arch/s390/kernel/mcount.S2
-rw-r--r--arch/s390/kernel/mcount64.S2
-rw-r--r--arch/s390/kernel/smp.c2
-rw-r--r--arch/s390/mm/pgtable.c3
-rw-r--r--arch/score/mm/init.c2
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/microcode_intel_early.c5
-rw-r--r--arch/x86/kernel/process.c5
-rw-r--r--arch/x86/mm/init.c19
-rw-r--r--arch/x86/pci/mrst.c10
-rw-r--r--drivers/acpi/Makefile3
-rw-r--r--drivers/acpi/ac.c33
-rw-r--r--drivers/acpi/acpi_lpss.c26
-rw-r--r--drivers/acpi/csrt.c159
-rw-r--r--drivers/acpi/device_pm.c126
-rw-r--r--drivers/acpi/ec.c4
-rw-r--r--drivers/acpi/internal.h1
-rw-r--r--drivers/acpi/pci_root.c4
-rw-r--r--drivers/acpi/processor_driver.c8
-rw-r--r--drivers/acpi/processor_idle.c29
-rw-r--r--drivers/acpi/scan.c4
-rw-r--r--drivers/acpi/video.c8
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/pata_ep93xx.c5
-rw-r--r--drivers/base/bus.c1
-rw-r--r--drivers/base/core.c6
-rw-r--r--drivers/base/power/common.c12
-rw-r--r--drivers/bcma/scan.c2
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/rbd.c935
-rw-r--r--drivers/block/xsysace.c3
-rw-r--r--drivers/char/hw_random/mxc-rnga.c6
-rw-r--r--drivers/char/hw_random/omap-rng.c5
-rw-r--r--drivers/char/ipmi/ipmi_bt_sm.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c14
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c3
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/char/lp.c3
-rw-r--r--drivers/char/random.c54
-rw-r--r--drivers/char/ttyprintk.c2
-rw-r--r--drivers/clk/tegra/clk-tegra20.c11
-rw-r--r--drivers/clk/x86/clk-lpt.c15
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm15
-rw-r--r--drivers/cpufreq/Kconfig.x862
-rw-r--r--drivers/cpufreq/arm_big_little.c7
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c94
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c27
-rw-r--r--drivers/cpufreq/cpufreq.c19
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/cpufreq_stats.c7
-rw-r--r--drivers/cpufreq/intel_pstate.c123
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c4
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c1
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c1
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/nx/nx-sha256.c8
-rw-r--r--drivers/crypto/nx/nx-sha512.c7
-rw-r--r--drivers/crypto/nx/nx.c38
-rw-r--r--drivers/dma/acpi-dma.c172
-rw-r--r--drivers/dma/tegra20-apb-dma.c5
-rw-r--r--drivers/edac/amd64_edac_inj.c4
-rw-r--r--drivers/gpio/Kconfig2
-rw-r--r--drivers/gpio/gpio-langwell.c17
-rw-r--r--drivers/gpio/gpio-ml-ioh.c3
-rw-r--r--drivers/gpio/gpio-mvebu.c5
-rw-r--r--drivers/gpio/gpio-mxs.c3
-rw-r--r--drivers/gpio/gpio-omap.c48
-rw-r--r--drivers/gpio/gpio-pch.c3
-rw-r--r--drivers/gpio/gpio-sch.c6
-rw-r--r--drivers/gpio/gpio-tegra.c5
-rw-r--r--drivers/gpio/gpio-viperboard.c3
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c27
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_encoder_slave.c6
-rw-r--r--drivers/gpu/drm/drm_mm.c34
-rw-r--r--drivers/gpu/drm/drm_modes.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c15
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c5
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c77
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c44
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c90
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c29
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h7
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c1
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c31
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c7
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c2
-rw-r--r--drivers/gpu/drm/radeon/si.c370
-rw-r--r--drivers/gpu/drm/radeon/sid.h1
-rw-r--r--drivers/gpu/host1x/drm/dc.c5
-rw-r--r--drivers/hv/channel_mgmt.c2
-rw-r--r--drivers/hwmon/abituguru.c16
-rw-r--r--drivers/hwmon/iio_hwmon.c8
-rw-r--r--drivers/hwmon/nct6775.c6
-rw-r--r--drivers/hwmon/tmp401.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c14
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c6
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c8
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c5
-rw-r--r--drivers/i2c/busses/i2c-sirf.c6
-rw-r--r--drivers/i2c/busses/i2c-tegra.c5
-rw-r--r--drivers/i2c/i2c-core.c3
-rw-r--r--drivers/iio/adc/exynos_adc.c8
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c2
-rw-r--r--drivers/iio/dac/Kconfig6
-rw-r--r--drivers/input/tablet/wacom_wac.c89
-rw-r--r--drivers/input/tablet/wacom_wac.h1
-rw-r--r--drivers/input/touchscreen/egalax_ts.c2
-rw-r--r--drivers/isdn/capi/kcapi.c6
-rw-r--r--drivers/leds/leds-gpio.c9
-rw-r--r--drivers/leds/leds-ot200.c14
-rw-r--r--drivers/lguest/page_tables.c1
-rw-r--r--drivers/md/dm-thin.c4
-rw-r--r--drivers/memory/emif.c6
-rw-r--r--drivers/mfd/Kconfig3
-rw-r--r--drivers/mfd/ab8500-core.c24
-rw-r--r--drivers/mfd/ab8500-debugfs.c16
-rw-r--r--drivers/mfd/ab8500-gpadc.c7
-rw-r--r--drivers/mfd/ab8500-sysctrl.c15
-rw-r--r--drivers/mfd/abx500-core.c2
-rw-r--r--drivers/mfd/cros_ec_spi.c6
-rw-r--r--drivers/mfd/db8500-prcmu.c3
-rw-r--r--drivers/mfd/intel_msic.c5
-rw-r--r--drivers/mfd/si476x-cmd.c152
-rw-r--r--drivers/misc/dummy-irq.c6
-rw-r--r--drivers/misc/mei/bus.c2
-rw-r--r--drivers/misc/vmw_vmci/Kconfig2
-rw-r--r--drivers/misc/vmw_vmci/vmci_queue_pair.c2
-rw-r--r--drivers/mmc/host/mmci.c9
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c5
-rw-r--r--drivers/net/bonding/bond_3ad.c21
-rw-r--r--drivers/net/bonding/bond_3ad.h2
-rw-r--r--drivers/net/bonding/bond_main.c26
-rw-r--r--drivers/net/bonding/bond_procfs.c2
-rw-r--r--drivers/net/bonding/bond_sysfs.c13
-rw-r--r--drivers/net/caif/Kconfig2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c25
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c9
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c61
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c5
-rw-r--r--drivers/net/ethernet/cadence/Kconfig3
-rw-r--r--drivers/net/ethernet/cadence/macb.c18
-rw-r--r--drivers/net/ethernet/cadence/macb.h7
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c13
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c18
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c34
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ptp.c1
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c36
-rw-r--r--drivers/net/ethernet/icplus/ipg.h86
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_resources.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c95
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c24
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c49
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c111
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c7
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c1
-rw-r--r--drivers/net/ethernet/realtek/r8169.c41
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c5
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h31
-rw-r--r--drivers/net/ethernet/sfc/rx.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/macvlan.c7
-rw-r--r--drivers/net/ntb_netdev.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/rtl8150.c100
-rw-r--r--drivers/net/usb/usbnet.c2
-rw-r--r--drivers/net/virtio_net.c11
-rw-r--r--drivers/net/vxlan.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h138
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c10
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c18
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c74
-rw-r--r--drivers/net/wireless/b43/dma.c19
-rw-r--r--drivers/net/wireless/b43/dma.h4
-rw-r--r--drivers/net/wireless/b43/main.c43
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c7
-rw-r--r--drivers/net/wireless/iwlegacy/4965-mac.c3
-rw-r--r--drivers/net/wireless/iwlegacy/common.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h27
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c48
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c18
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c3
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c21
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c1
-rw-r--r--drivers/ntb/ntb_hw.c10
-rw-r--r--drivers/ntb/ntb_transport.c175
-rw-r--r--drivers/of/base.c4
-rw-r--r--drivers/parisc/superio.c13
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c14
-rw-r--r--drivers/pinctrl/pinctrl-abx500.c30
-rw-r--r--drivers/pinctrl/pinctrl-coh901.c5
-rw-r--r--drivers/pinctrl/pinctrl-exynos5440.c5
-rw-r--r--drivers/pinctrl/pinctrl-lantiq.c3
-rw-r--r--drivers/pinctrl/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/pinctrl-xway.c4
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wm8750.c2
-rw-r--r--drivers/power/Kconfig2
-rw-r--r--drivers/power/pm2301_charger.c2
-rw-r--r--drivers/power/wm831x_backup.c1
-rw-r--r--drivers/pwm/pwm-imx.c5
-rw-r--r--drivers/pwm/pwm-puv3.c5
-rw-r--r--drivers/pwm/pwm-pxa.c5
-rw-r--r--drivers/pwm/pwm-tegra.c5
-rw-r--r--drivers/pwm/pwm-tiecap.c5
-rw-r--r--drivers/pwm/pwm-tiehrpwm.c5
-rw-r--r--drivers/pwm/pwm-tipwmss.c5
-rw-r--r--drivers/pwm/pwm-vt8500.c5
-rw-r--r--drivers/rapidio/Kconfig20
-rw-r--r--drivers/rapidio/Makefile3
-rw-r--r--drivers/rapidio/devices/tsi721.c12
-rw-r--r--drivers/rapidio/rio-driver.c8
-rw-r--r--drivers/rapidio/rio-scan.c190
-rw-r--r--drivers/rapidio/rio-sysfs.c45
-rw-r--r--drivers/rapidio/rio.c246
-rw-r--r--drivers/rapidio/rio.h13
-rw-r--r--drivers/rtc/Kconfig2
-rw-r--r--drivers/rtc/rtc-max8998.c2
-rw-r--r--drivers/rtc/rtc-nuc900.c5
-rw-r--r--drivers/rtc/rtc-omap.c5
-rw-r--r--drivers/rtc/rtc-pl031.c2
-rw-r--r--drivers/rtc/rtc-s3c.c5
-rw-r--r--drivers/rtc/rtc-tegra.c6
-rw-r--r--drivers/s390/block/xpram.c1
-rw-r--r--drivers/s390/cio/chp.c36
-rw-r--r--drivers/s390/cio/chsc.h4
-rw-r--r--drivers/spi/spi-atmel.c51
-rw-r--r--drivers/spi/spi-davinci.c2
-rw-r--r--drivers/spi/spi-tegra20-sflash.c5
-rw-r--r--drivers/spi/spi.c9
-rw-r--r--drivers/staging/Kconfig4
-rw-r--r--drivers/staging/android/logger.c4
-rw-r--r--drivers/staging/android/logger.h2
-rw-r--r--drivers/staging/comedi/Kconfig9
-rw-r--r--drivers/staging/comedi/comedi_buf.c12
-rw-r--r--drivers/staging/comedi/comedi_fops.c3
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.c8
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h1
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c20
-rw-r--r--drivers/staging/dwc2/Kconfig2
-rw-r--r--drivers/staging/dwc2/hcd_intr.c2
-rw-r--r--drivers/staging/dwc2/platform.c13
-rw-r--r--drivers/staging/gdm72xx/Kconfig2
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c5
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c9
-rw-r--r--drivers/staging/imx-drm/Kconfig5
-rw-r--r--drivers/staging/imx-drm/imx-tve.c4
-rw-r--r--drivers/staging/media/solo6x10/Kconfig1
-rw-r--r--drivers/staging/nvec/nvec.c26
-rw-r--r--drivers/staging/nvec/nvec.h5
-rw-r--r--drivers/staging/nvec/nvec_kbd.c10
-rw-r--r--drivers/staging/nvec/nvec_power.c1
-rw-r--r--drivers/staging/nvec/nvec_ps2.c8
-rw-r--r--drivers/staging/sep/Kconfig2
-rw-r--r--drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c6
-rw-r--r--drivers/staging/vt6656/hostap.c2
-rw-r--r--drivers/staging/vt6656/iwctl.c6
-rw-r--r--drivers/staging/zcache/ramster/ramster-howto.txt366
-rw-r--r--drivers/staging/zcache/zcache-main.c8
-rw-r--r--drivers/target/iscsi/iscsi_target.c63
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_parameters.h4
-rw-r--r--drivers/target/target_core_configfs.c11
-rw-r--r--drivers/target/target_core_device.c14
-rw-r--r--drivers/target/target_core_file.c9
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--drivers/target/target_core_internal.h1
-rw-r--r--drivers/target/target_core_rd.c21
-rw-r--r--drivers/target/target_core_rd.h1
-rw-r--r--drivers/target/target_core_transport.c13
-rw-r--r--drivers/thermal/armada_thermal.c10
-rw-r--r--drivers/thermal/dove_thermal.c4
-rw-r--r--drivers/thermal/exynos_thermal.c5
-rw-r--r--drivers/tty/ehv_bytechan.c1
-rw-r--r--drivers/tty/mxser.c11
-rw-r--r--drivers/tty/n_tty.c8
-rw-r--r--drivers/tty/rocket.c288
-rw-r--r--drivers/tty/serial/8250/8250_dw.c7
-rw-r--r--drivers/tty/serial/amba-pl011.c2
-rw-r--r--drivers/tty/serial/mcf.c4
-rw-r--r--drivers/tty/serial/mpc52xx_uart.c11
-rw-r--r--drivers/tty/serial/nwpserial.c2
-rw-r--r--drivers/tty/serial/omap-serial.c23
-rw-r--r--drivers/tty/serial/samsung.c1
-rw-r--r--drivers/tty/vt/vt.c14
-rw-r--r--drivers/tty/vt/vt_ioctl.c67
-rw-r--r--drivers/uio/Kconfig1
-rw-r--r--drivers/usb/atm/cxacru.c3
-rw-r--r--drivers/usb/chipidea/Kconfig2
-rw-r--r--drivers/usb/chipidea/ci13xxx_imx.c15
-rw-r--r--drivers/usb/chipidea/core.c5
-rw-r--r--drivers/usb/core/Kconfig2
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/Kconfig6
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c6
-rw-r--r--drivers/usb/gadget/Kconfig1
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c2
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c11
-rw-r--r--drivers/usb/gadget/configfs.c8
-rw-r--r--drivers/usb/gadget/dummy_hcd.c5
-rw-r--r--drivers/usb/gadget/f_ecm.c1
-rw-r--r--drivers/usb/gadget/f_subset.c1
-rw-r--r--drivers/usb/gadget/f_uac2.c2
-rw-r--r--drivers/usb/gadget/fusb300_udc.c4
-rw-r--r--drivers/usb/gadget/imx_udc.c2
-rw-r--r--drivers/usb/gadget/m66592-udc.c4
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c1
-rw-r--r--drivers/usb/gadget/r8a66597-udc.c4
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c3
-rw-r--r--drivers/usb/gadget/zero.c8
-rw-r--r--drivers/usb/host/Kconfig17
-rw-r--r--drivers/usb/host/ehci-atmel.c6
-rw-r--r--drivers/usb/host/ehci-hcd.c17
-rw-r--r--drivers/usb/host/ehci-omap.c8
-rw-r--r--drivers/usb/host/ehci-orion.c6
-rw-r--r--drivers/usb/host/ehci-s5p.c5
-rw-r--r--drivers/usb/host/ehci-spear.c6
-rw-r--r--drivers/usb/host/ehci-tegra.c6
-rw-r--r--drivers/usb/host/isp1760-hcd.c2
-rw-r--r--drivers/usb/host/isp1760-if.c4
-rw-r--r--drivers/usb/host/ohci-at91.c6
-rw-r--r--drivers/usb/host/ohci-exynos.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c34
-rw-r--r--drivers/usb/host/ohci-nxp.c50
-rw-r--r--drivers/usb/host/ohci-omap3.c8
-rw-r--r--drivers/usb/host/ohci-pxa27x.c6
-rw-r--r--drivers/usb/host/ohci-spear.c6
-rw-r--r--drivers/usb/host/oxu210hp-hcd.c2
-rw-r--r--drivers/usb/host/sl811-hcd.c2
-rw-r--r--drivers/usb/host/uhci-hub.c3
-rw-r--r--drivers/usb/host/uhci-platform.c6
-rw-r--r--drivers/usb/host/uhci-q.c2
-rw-r--r--drivers/usb/host/xhci-mem.c17
-rw-r--r--drivers/usb/musb/musb_dsps.c1
-rw-r--r--drivers/usb/musb/omap2430.c3
-rw-r--r--drivers/usb/phy/Kconfig5
-rw-r--r--drivers/usb/phy/phy-ab8500-usb.c2
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c1
-rw-r--r--drivers/usb/phy/phy-gpio-vbus-usb.c3
-rw-r--r--drivers/usb/phy/phy-isp1301.c1
-rw-r--r--drivers/usb/phy/phy-mv-u3d-usb.c5
-rw-r--r--drivers/usb/phy/phy-mv-usb.c3
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c8
-rw-r--r--drivers/usb/phy/phy-nop.c2
-rw-r--r--drivers/usb/phy/phy-samsung-usb2.c5
-rw-r--r--drivers/usb/phy/phy-samsung-usb3.c5
-rw-r--r--drivers/usb/serial/ftdi_sio.c30
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h2
-rw-r--r--drivers/usb/serial/generic.c31
-rw-r--r--drivers/usb/serial/io_ti.c22
-rw-r--r--drivers/usb/serial/option.c13
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c23
-rw-r--r--drivers/usb/serial/usb-serial.c30
-rw-r--r--drivers/usb/storage/realtek_cr.c8
-rw-r--r--drivers/vhost/vringh.c3
-rw-r--r--drivers/video/Kconfig19
-rw-r--r--drivers/video/Makefile1
-rw-r--r--drivers/video/console/Makefile2
-rw-r--r--drivers/video/omap2/dss/hdmi.c4
-rw-r--r--drivers/video/omap2/vrfb.c5
-rw-r--r--drivers/video/simplefb.c234
-rw-r--r--drivers/w1/masters/omap_hdq.c5
-rw-r--r--drivers/watchdog/ath79_wdt.c5
-rw-r--r--drivers/watchdog/davinci_wdt.c5
-rw-r--r--drivers/watchdog/imx2_wdt.c5
-rw-r--r--drivers/xen/Kconfig7
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/privcmd.c2
-rw-r--r--drivers/xen/tmem.c87
-rw-r--r--drivers/xen/xen-selfballoon.c47
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c21
-rw-r--r--fs/aio.c7
-rw-r--r--fs/btrfs/backref.c3
-rw-r--r--fs/btrfs/check-integrity.c2
-rw-r--r--fs/btrfs/ctree.c4
-rw-r--r--fs/btrfs/ctree.h8
-rw-r--r--fs/btrfs/delayed-ref.h1
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/disk-io.c52
-rw-r--r--fs/btrfs/extent-tree.c94
-rw-r--r--fs/btrfs/extent_io.c138
-rw-r--r--fs/btrfs/extent_io.h2
-rw-r--r--fs/btrfs/free-space-cache.c43
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode-map.c8
-rw-r--r--fs/btrfs/inode.c81
-rw-r--r--fs/btrfs/ioctl.c10
-rw-r--r--fs/btrfs/raid56.c2
-rw-r--r--fs/btrfs/relocation.c7
-rw-r--r--fs/btrfs/scrub.c10
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/volumes.c54
-rw-r--r--fs/btrfs/volumes.h20
-rw-r--r--fs/cifs/inode.c3
-rw-r--r--fs/ext4/ext4.h8
-rw-r--r--fs/ext4/extents.c9
-rw-r--r--fs/ext4/extents_status.c17
-rw-r--r--fs/ext4/extents_status.h3
-rw-r--r--fs/ext4/file.c4
-rw-r--r--fs/ext4/inode.c85
-rw-r--r--fs/ext4/mballoc.c6
-rw-r--r--fs/ext4/page-io.c121
-rw-r--r--fs/fat/inode.c15
-rw-r--r--fs/gfs2/Kconfig2
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/gfs2/rgrp.c9
-rw-r--r--fs/hfs/bnode.c6
-rw-r--r--fs/nfs/callback_proc.c2
-rw-r--r--fs/nfs/callback_xdr.c2
-rw-r--r--fs/nfs/nfs4client.c2
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/nfs4session.c4
-rw-r--r--fs/nfs/nfs4session.h13
-rw-r--r--fs/nfs/nfs4state.c15
-rw-r--r--fs/nilfs2/inode.c27
-rw-r--r--fs/ocfs2/extent_map.c2
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/xfs/xfs_aops.c19
-rw-r--r--fs/xfs/xfs_attr_leaf.c27
-rw-r--r--fs/xfs/xfs_buf.c2
-rw-r--r--fs/xfs/xfs_da_btree.c7
-rw-r--r--fs/xfs/xfs_dir2_leaf.c2
-rw-r--r--fs/xfs/xfs_extfree_item.c5
-rw-r--r--fs/xfs/xfs_log_cil.c2
-rw-r--r--fs/xfs/xfs_vnodeops.c4
-rw-r--r--include/acpi/acpi_bus.h40
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/processor.h10
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_fb_helper.h15
-rw-r--r--include/drm/drm_os_linux.h9
-rw-r--r--include/drm/drm_pciids.h6
-rw-r--r--include/linux/acpi_dma.h4
-rw-r--r--include/linux/bcma/bcma.h5
-rw-r--r--include/linux/brcmphy.h5
-rw-r--r--include/linux/journal-head.h8
-rw-r--r--include/linux/kernel.h5
-rw-r--r--include/linux/kref.h33
-rw-r--r--include/linux/mfd/abx500/ab8500.h2
-rw-r--r--include/linux/mlx4/qp.h29
-rw-r--r--include/linux/netdevice.h11
-rw-r--r--include/linux/of_platform.h5
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--include/linux/pinctrl/pinconf-generic.h12
-rw-r--r--include/linux/platform_data/clk-lpss.h5
-rw-r--r--include/linux/platform_data/serial-omap.h2
-rw-r--r--include/linux/printk.h1
-rw-r--r--include/linux/rio.h18
-rw-r--r--include/linux/rio_drv.h1
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/spi/spi.h4
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/uio.h3
-rw-r--r--include/linux/usb/gadget.h5
-rw-r--r--include/linux/usb/serial.h4
-rw-r--r--include/linux/vt_kern.h2
-rw-r--r--include/linux/wait.h16
-rw-r--r--include/net/mac80211.h12
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nfnetlink_log.h3
-rw-r--r--include/net/sock.h12
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/trace/events/ext4.h4
-rw-r--r--include/uapi/linux/virtio_console.h2
-rw-r--r--ipc/sem.c27
-rw-r--r--kernel/auditfilter.c3
-rw-r--r--kernel/cpu/idle.c2
-rw-r--r--kernel/events/core.c240
-rw-r--r--kernel/kmod.c5
-rw-r--r--kernel/module.c21
-rw-r--r--kernel/rcutree_plugin.h4
-rw-r--r--kernel/time/Kconfig5
-rw-r--r--kernel/time/tick-broadcast.c10
-rw-r--r--kernel/time/tick-sched.c3
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_events_filter.c4
-rw-r--r--kernel/trace/trace_kprobe.c53
-rw-r--r--kernel/workqueue.c19
-rw-r--r--lib/Makefile2
-rw-r--r--lib/iovec.c53
-rw-r--r--lib/klist.c2
-rw-r--r--lib/mpi/longlong.h5
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/memcontrol.c14
-rw-r--r--mm/memory_hotplug.c9
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mmu_notifier.c79
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/pagewalk.c70
-rw-r--r--net/802/mrp.c4
-rw-r--r--net/batman-adv/distributed-arp-table.c13
-rw-r--r--net/batman-adv/main.c19
-rw-r--r--net/batman-adv/network-coding.c8
-rw-r--r--net/batman-adv/originator.c16
-rw-r--r--net/batman-adv/originator.h1
-rw-r--r--net/batman-adv/soft-interface.c1
-rw-r--r--net/batman-adv/translation-table.c7
-rw-r--r--net/bridge/netfilter/ebt_log.c11
-rw-r--r--net/bridge/netfilter/ebt_ulog.c18
-rw-r--r--net/ceph/osd_client.c5
-rw-r--r--net/core/iovec.c50
-rw-r--r--net/core/sock.c12
-rw-r--r--net/ipv4/ip_gre.c3
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/netfilter/ipt_ULOG.c13
-rw-r--r--net/ipv4/tcp.c29
-rw-r--r--net/ipv4/tcp_input.c23
-rw-r--r--net/ipv4/tcp_output.c10
-rw-r--r--net/ipv6/ip6_gre.c2
-rw-r--r--net/ipv6/ip6_output.c2
-rw-r--r--net/ipv6/tcp_ipv6.c12
-rw-r--r--net/ipv6/udp.c13
-rw-r--r--net/ipv6/udp_impl.h2
-rw-r--r--net/ipv6/udplite.c2
-rw-r--r--net/ipv6/xfrm6_policy.c4
-rw-r--r--net/irda/irlap_frame.c2
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/mlme.c61
-rw-r--r--net/mac80211/rate.c9
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/tkip.c4
-rw-r--r--net/mac80211/util.c7
-rw-r--r--net/netfilter/nf_log.c7
-rw-r--r--net/netfilter/nfnetlink_log.c6
-rw-r--r--net/netfilter/nfnetlink_queue_core.c2
-rw-r--r--net/netfilter/xt_LOG.c13
-rw-r--r--net/netfilter/xt_NFLOG.c3
-rw-r--r--net/netfilter/xt_TCPOPTSTRIP.c17
-rw-r--r--net/netlabel/netlabel_domainhash.c69
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c62
-rw-r--r--net/sunrpc/netns.h4
-rw-r--r--net/sunrpc/rpc_pipe.c5
-rw-r--r--net/sunrpc/sched.c8
-rw-r--r--net/wireless/core.c17
-rw-r--r--net/wireless/nl80211.c4
-rw-r--r--net/wireless/sme.c3
-rw-r--r--net/wireless/trace.h23
-rw-r--r--net/xfrm/xfrm_output.c1
-rw-r--r--scripts/package/Makefile2
-rw-r--r--sound/aoa/fabrics/layout.c8
-rw-r--r--sound/aoa/soundbus/i2sbus/core.c3
-rw-r--r--sound/oss/Kconfig2
-rw-r--r--sound/pci/hda/hda_generic.c9
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/ab8500-codec.h36
-rw-r--r--sound/soc/codecs/da7213.c8
-rw-r--r--sound/soc/codecs/wm0010.c1
-rw-r--r--sound/soc/fsl/imx-ssi.c6
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c5
-rw-r--r--sound/usb/proc.c22
-rwxr-xr-xtools/perf/scripts/python/net_dropmonitor.py39
-rw-r--r--tools/testing/selftests/Makefile1
-rw-r--r--tools/testing/selftests/soft-dirty/Makefile10
-rw-r--r--tools/testing/selftests/soft-dirty/soft-dirty.c114
841 files changed, 9115 insertions, 5727 deletions
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 44afa0e5057d..4ff65047bb9a 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -4,7 +4,7 @@ Required properties:
4- compatible: Should be "cdns,[<chip>-]{macb|gem}" 4- compatible: Should be "cdns,[<chip>-]{macb|gem}"
5 Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs. 5 Use "cdns,at91sam9260-macb" Atmel at91sam9260 and at91sam9263 SoCs.
6 Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb". 6 Use "cdns,at32ap7000-macb" for other 10/100 usage or use the generic form: "cdns,macb".
7 Use "cnds,pc302-gem" for Picochip picoXcell pc302 and later devices based on 7 Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
8 the Cadence GEM, or the generic form: "cdns,gem". 8 the Cadence GEM, or the generic form: "cdns,gem".
9- reg: Address and length of the register set for the device 9- reg: Address and length of the register set for the device
10- interrupts: Should contain macb interrupt 10- interrupts: Should contain macb interrupt
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmi.txt b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
index 589edee37394..589edee37394 100644
--- a/Documentation/devicetree/bindings/drm/exynos/hdmi.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmi.txt
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt b/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt
index fa166d945809..fa166d945809 100644
--- a/Documentation/devicetree/bindings/drm/exynos/hdmiddc.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmiddc.txt
diff --git a/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt b/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt
index 858f4f9b902f..858f4f9b902f 100644
--- a/Documentation/devicetree/bindings/drm/exynos/hdmiphy.txt
+++ b/Documentation/devicetree/bindings/video/exynos_hdmiphy.txt
diff --git a/Documentation/devicetree/bindings/drm/exynos/mixer.txt b/Documentation/devicetree/bindings/video/exynos_mixer.txt
index 9b2ea0343566..9b2ea0343566 100644
--- a/Documentation/devicetree/bindings/drm/exynos/mixer.txt
+++ b/Documentation/devicetree/bindings/video/exynos_mixer.txt
diff --git a/Documentation/devicetree/bindings/video/simple-framebuffer.txt b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
new file mode 100644
index 000000000000..3ea460583111
--- /dev/null
+++ b/Documentation/devicetree/bindings/video/simple-framebuffer.txt
@@ -0,0 +1,25 @@
1Simple Framebuffer
2
3A simple frame-buffer describes a raw memory region that may be rendered to,
4with the assumption that the display hardware has already been set up to scan
5out from that buffer.
6
7Required properties:
8- compatible: "simple-framebuffer"
9- reg: Should contain the location and size of the framebuffer memory.
10- width: The width of the framebuffer in pixels.
11- height: The height of the framebuffer in pixels.
12- stride: The number of bytes in each line of the framebuffer.
13- format: The format of the framebuffer surface. Valid values are:
14 - r5g6b5 (16-bit pixels, d[15:11]=r, d[10:5]=g, d[4:0]=b).
15
16Example:
17
18 framebuffer {
19 compatible = "simple-framebuffer";
20 reg = <0x1d385000 (1600 * 1200 * 2)>;
21 width = <1600>;
22 height = <1200>;
23 stride = <(1600 * 2)>;
24 format = "r5g6b5";
25 };
diff --git a/Documentation/devicetree/usage-model.txt b/Documentation/devicetree/usage-model.txt
index ef9d06c9f8fd..0efedaad5165 100644
--- a/Documentation/devicetree/usage-model.txt
+++ b/Documentation/devicetree/usage-model.txt
@@ -191,9 +191,11 @@ Linux it will look something like this:
191 }; 191 };
192 192
193The bootargs property contains the kernel arguments, and the initrd-* 193The bootargs property contains the kernel arguments, and the initrd-*
194properties define the address and size of an initrd blob. The 194properties define the address and size of an initrd blob. Note that
195chosen node may also optionally contain an arbitrary number of 195initrd-end is the first address after the initrd image, so this doesn't
196additional properties for platform-specific configuration data. 196match the usual semantic of struct resource. The chosen node may also
197optionally contain an arbitrary number of additional properties for
198platform-specific configuration data.
197 199
198During early boot, the architecture setup code calls of_scan_flat_dt() 200During early boot, the architecture setup code calls of_scan_flat_dt()
199several times with different helper callbacks to parse device tree 201several times with different helper callbacks to parse device tree
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c3bfacb92910..6e3b18a8afc6 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3005 Force threading of all interrupt handlers except those 3005 Force threading of all interrupt handlers except those
3006 marked explicitly IRQF_NO_THREAD. 3006 marked explicitly IRQF_NO_THREAD.
3007 3007
3008 tmem [KNL,XEN]
3009 Enable the Transcendent memory driver if built-in.
3010
3011 tmem.cleancache=0|1 [KNL, XEN]
3012 Default is on (1). Disable the usage of the cleancache
3013 API to send anonymous pages to the hypervisor.
3014
3015 tmem.frontswap=0|1 [KNL, XEN]
3016 Default is on (1). Disable the usage of the frontswap
3017 API to send swap pages to the hypervisor. If disabled
3018 the selfballooning and selfshrinking are force disabled.
3019
3020 tmem.selfballooning=0|1 [KNL, XEN]
3021 Default is on (1). Disable the driving of swap pages
3022 to the hypervisor.
3023
3024 tmem.selfshrinking=0|1 [KNL, XEN]
3025 Default is on (1). Partial swapoff that immediately
3026 transfers pages from Xen hypervisor back to the
3027 kernel based on different criteria.
3028
3008 topology= [S390] 3029 topology= [S390]
3009 Format: {off | on} 3030 Format: {off | on}
3010 Specify if the kernel should make use of the cpu 3031 Specify if the kernel should make use of the cpu
diff --git a/Documentation/kernel-per-CPU-kthreads.txt b/Documentation/kernel-per-CPU-kthreads.txt
new file mode 100644
index 000000000000..cbf7ae412da4
--- /dev/null
+++ b/Documentation/kernel-per-CPU-kthreads.txt
@@ -0,0 +1,202 @@
1REDUCING OS JITTER DUE TO PER-CPU KTHREADS
2
3This document lists per-CPU kthreads in the Linux kernel and presents
4options to control their OS jitter. Note that non-per-CPU kthreads are
5not listed here. To reduce OS jitter from non-per-CPU kthreads, bind
6them to a "housekeeping" CPU dedicated to such work.
7
8
9REFERENCES
10
11o Documentation/IRQ-affinity.txt: Binding interrupts to sets of CPUs.
12
13o Documentation/cgroups: Using cgroups to bind tasks to sets of CPUs.
14
15o man taskset: Using the taskset command to bind tasks to sets
16 of CPUs.
17
18o man sched_setaffinity: Using the sched_setaffinity() system
19 call to bind tasks to sets of CPUs.
20
21o /sys/devices/system/cpu/cpuN/online: Control CPU N's hotplug state,
22 writing "0" to offline and "1" to online.
23
24o In order to locate kernel-generated OS jitter on CPU N:
25
26 cd /sys/kernel/debug/tracing
27 echo 1 > max_graph_depth # Increase the "1" for more detail
28 echo function_graph > current_tracer
29 # run workload
30 cat per_cpu/cpuN/trace
31
32
33KTHREADS
34
35Name: ehca_comp/%u
36Purpose: Periodically process Infiniband-related work.
37To reduce its OS jitter, do any of the following:
381. Don't use eHCA Infiniband hardware, instead choosing hardware
39 that does not require per-CPU kthreads. This will prevent these
40 kthreads from being created in the first place. (This will
41 work for most people, as this hardware, though important, is
42 relatively old and is produced in relatively low unit volumes.)
432. Do all eHCA-Infiniband-related work on other CPUs, including
44 interrupts.
453. Rework the eHCA driver so that its per-CPU kthreads are
46 provisioned only on selected CPUs.
47
48
49Name: irq/%d-%s
50Purpose: Handle threaded interrupts.
51To reduce its OS jitter, do the following:
521. Use irq affinity to force the irq threads to execute on
53 some other CPU.
54
55Name: kcmtpd_ctr_%d
56Purpose: Handle Bluetooth work.
57To reduce its OS jitter, do one of the following:
581. Don't use Bluetooth, in which case these kthreads won't be
59 created in the first place.
602. Use irq affinity to force Bluetooth-related interrupts to
61 occur on some other CPU and furthermore initiate all
62 Bluetooth activity on some other CPU.
63
64Name: ksoftirqd/%u
65Purpose: Execute softirq handlers when threaded or when under heavy load.
66To reduce its OS jitter, each softirq vector must be handled
67separately as follows:
68TIMER_SOFTIRQ: Do all of the following:
691. To the extent possible, keep the CPU out of the kernel when it
70 is non-idle, for example, by avoiding system calls and by forcing
71 both kernel threads and interrupts to execute elsewhere.
722. Build with CONFIG_HOTPLUG_CPU=y. After boot completes, force
73 the CPU offline, then bring it back online. This forces
74 recurring timers to migrate elsewhere. If you are concerned
75 with multiple CPUs, force them all offline before bringing the
76 first one back online. Once you have onlined the CPUs in question,
77 do not offline any other CPUs, because doing so could force the
78 timer back onto one of the CPUs in question.
79NET_TX_SOFTIRQ and NET_RX_SOFTIRQ: Do all of the following:
801. Force networking interrupts onto other CPUs.
812. Initiate any network I/O on other CPUs.
823. Once your application has started, prevent CPU-hotplug operations
83 from being initiated from tasks that might run on the CPU to
84 be de-jittered. (It is OK to force this CPU offline and then
85 bring it back online before you start your application.)
86BLOCK_SOFTIRQ: Do all of the following:
871. Force block-device interrupts onto some other CPU.
882. Initiate any block I/O on other CPUs.
893. Once your application has started, prevent CPU-hotplug operations
90 from being initiated from tasks that might run on the CPU to
91 be de-jittered. (It is OK to force this CPU offline and then
92 bring it back online before you start your application.)
93BLOCK_IOPOLL_SOFTIRQ: Do all of the following:
941. Force block-device interrupts onto some other CPU.
952. Initiate any block I/O and block-I/O polling on other CPUs.
963. Once your application has started, prevent CPU-hotplug operations
97 from being initiated from tasks that might run on the CPU to
98 be de-jittered. (It is OK to force this CPU offline and then
99 bring it back online before you start your application.)
100TASKLET_SOFTIRQ: Do one or more of the following:
1011. Avoid use of drivers that use tasklets. (Such drivers will contain
102 calls to things like tasklet_schedule().)
1032. Convert all drivers that you must use from tasklets to workqueues.
1043. Force interrupts for drivers using tasklets onto other CPUs,
105 and also do I/O involving these drivers on other CPUs.
106SCHED_SOFTIRQ: Do all of the following:
1071. Avoid sending scheduler IPIs to the CPU to be de-jittered,
108 for example, ensure that at most one runnable kthread is present
109 on that CPU. If a thread that expects to run on the de-jittered
110 CPU awakens, the scheduler will send an IPI that can result in
111 a subsequent SCHED_SOFTIRQ.
1122. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
113 CONFIG_NO_HZ_FULL=y, and, in addition, ensure that the CPU
114 to be de-jittered is marked as an adaptive-ticks CPU using the
115 "nohz_full=" boot parameter. This reduces the number of
116 scheduler-clock interrupts that the de-jittered CPU receives,
117 minimizing its chances of being selected to do the load balancing
118 work that runs in SCHED_SOFTIRQ context.
1193. To the extent possible, keep the CPU out of the kernel when it
120 is non-idle, for example, by avoiding system calls and by
121 forcing both kernel threads and interrupts to execute elsewhere.
122 This further reduces the number of scheduler-clock interrupts
123 received by the de-jittered CPU.
124HRTIMER_SOFTIRQ: Do all of the following:
1251. To the extent possible, keep the CPU out of the kernel when it
126 is non-idle. For example, avoid system calls and force both
127 kernel threads and interrupts to execute elsewhere.
1282. Build with CONFIG_HOTPLUG_CPU=y. Once boot completes, force the
129 CPU offline, then bring it back online. This forces recurring
130 timers to migrate elsewhere. If you are concerned with multiple
131 CPUs, force them all offline before bringing the first one
132 back online. Once you have onlined the CPUs in question, do not
133 offline any other CPUs, because doing so could force the timer
134 back onto one of the CPUs in question.
135RCU_SOFTIRQ: Do at least one of the following:
1361. Offload callbacks and keep the CPU in either dyntick-idle or
137 adaptive-ticks state by doing all of the following:
138 a. Build with CONFIG_RCU_NOCB_CPU=y, CONFIG_RCU_NOCB_CPU_ALL=y,
139 CONFIG_NO_HZ_FULL=y, and, in addition ensure that the CPU
140 to be de-jittered is marked as an adaptive-ticks CPU using
141 the "nohz_full=" boot parameter. Bind the rcuo kthreads
142 to housekeeping CPUs, which can tolerate OS jitter.
143 b. To the extent possible, keep the CPU out of the kernel
144 when it is non-idle, for example, by avoiding system
145 calls and by forcing both kernel threads and interrupts
146 to execute elsewhere.
1472. Enable RCU to do its processing remotely via dyntick-idle by
148 doing all of the following:
149 a. Build with CONFIG_NO_HZ=y and CONFIG_RCU_FAST_NO_HZ=y.
150 b. Ensure that the CPU goes idle frequently, allowing other
151 CPUs to detect that it has passed through an RCU quiescent
152 state. If the kernel is built with CONFIG_NO_HZ_FULL=y,
153 userspace execution also allows other CPUs to detect that
154 the CPU in question has passed through a quiescent state.
155 c. To the extent possible, keep the CPU out of the kernel
156 when it is non-idle, for example, by avoiding system
157 calls and by forcing both kernel threads and interrupts
158 to execute elsewhere.
159
160Name: rcuc/%u
161Purpose: Execute RCU callbacks in CONFIG_RCU_BOOST=y kernels.
162To reduce its OS jitter, do at least one of the following:
1631. Build the kernel with CONFIG_PREEMPT=n. This prevents these
164 kthreads from being created in the first place, and also obviates
165 the need for RCU priority boosting. This approach is feasible
166 for workloads that do not require high degrees of responsiveness.
1672. Build the kernel with CONFIG_RCU_BOOST=n. This prevents these
168 kthreads from being created in the first place. This approach
169 is feasible only if your workload never requires RCU priority
170 boosting, for example, if you ensure frequent idle time on all
171 CPUs that might execute within the kernel.
1723. Build with CONFIG_RCU_NOCB_CPU=y and CONFIG_RCU_NOCB_CPU_ALL=y,
173 which offloads all RCU callbacks to kthreads that can be moved
174 off of CPUs susceptible to OS jitter. This approach prevents the
175 rcuc/%u kthreads from having any work to do, so that they are
176 never awakened.
1774. Ensure that the CPU never enters the kernel, and, in particular,
178 avoid initiating any CPU hotplug operations on this CPU. This is
179 another way of preventing any callbacks from being queued on the
180 CPU, again preventing the rcuc/%u kthreads from having any work
181 to do.
182
183Name: rcuob/%d, rcuop/%d, and rcuos/%d
184Purpose: Offload RCU callbacks from the corresponding CPU.
185To reduce its OS jitter, do at least one of the following:
1861. Use affinity, cgroups, or other mechanism to force these kthreads
187 to execute on some other CPU.
1882. Build with CONFIG_RCU_NOCB_CPUS=n, which will prevent these
189 kthreads from being created in the first place. However, please
190 note that this will not eliminate OS jitter, but will instead
191 shift it to RCU_SOFTIRQ.
192
193Name: watchdog/%u
194Purpose: Detect software lockups on each CPU.
195To reduce its OS jitter, do at least one of the following:
1961. Build with CONFIG_LOCKUP_DETECTOR=n, which will prevent these
197 kthreads from being created in the first place.
1982. Echo a zero to /proc/sys/kernel/watchdog to disable the
199 watchdog timer.
2003. Echo a large number of /proc/sys/kernel/watchdog_thresh in
201 order to reduce the frequency of OS jitter due to the watchdog
202 timer down to a level that is acceptable for your workload.
diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt
index 504dfe4d52eb..a66c9821b5ce 100644
--- a/Documentation/power/devices.txt
+++ b/Documentation/power/devices.txt
@@ -268,7 +268,7 @@ situations.
268System Power Management Phases 268System Power Management Phases
269------------------------------ 269------------------------------
270Suspending or resuming the system is done in several phases. Different phases 270Suspending or resuming the system is done in several phases. Different phases
271are used for standby or memory sleep states ("suspend-to-RAM") and the 271are used for freeze, standby, and memory sleep states ("suspend-to-RAM") and the
272hibernation state ("suspend-to-disk"). Each phase involves executing callbacks 272hibernation state ("suspend-to-disk"). Each phase involves executing callbacks
273for every device before the next phase begins. Not all busses or classes 273for every device before the next phase begins. Not all busses or classes
274support all these callbacks and not all drivers use all the callbacks. The 274support all these callbacks and not all drivers use all the callbacks. The
@@ -309,7 +309,8 @@ execute the corresponding method from dev->driver->pm instead if there is one.
309 309
310Entering System Suspend 310Entering System Suspend
311----------------------- 311-----------------------
312When the system goes into the standby or memory sleep state, the phases are: 312When the system goes into the freeze, standby or memory sleep state,
313the phases are:
313 314
314 prepare, suspend, suspend_late, suspend_noirq. 315 prepare, suspend, suspend_late, suspend_noirq.
315 316
@@ -368,7 +369,7 @@ the devices that were suspended.
368 369
369Leaving System Suspend 370Leaving System Suspend
370---------------------- 371----------------------
371When resuming from standby or memory sleep, the phases are: 372When resuming from freeze, standby or memory sleep, the phases are:
372 373
373 resume_noirq, resume_early, resume, complete. 374 resume_noirq, resume_early, resume, complete.
374 375
@@ -433,8 +434,8 @@ the system log.
433 434
434Entering Hibernation 435Entering Hibernation
435-------------------- 436--------------------
436Hibernating the system is more complicated than putting it into the standby or 437Hibernating the system is more complicated than putting it into the other
437memory sleep state, because it involves creating and saving a system image. 438sleep states, because it involves creating and saving a system image.
438Therefore there are more phases for hibernation, with a different set of 439Therefore there are more phases for hibernation, with a different set of
439callbacks. These phases always run after tasks have been frozen and memory has 440callbacks. These phases always run after tasks have been frozen and memory has
440been freed. 441been freed.
@@ -485,8 +486,8 @@ image forms an atomic snapshot of the system state.
485 486
486At this point the system image is saved, and the devices then need to be 487At this point the system image is saved, and the devices then need to be
487prepared for the upcoming system shutdown. This is much like suspending them 488prepared for the upcoming system shutdown. This is much like suspending them
488before putting the system into the standby or memory sleep state, and the phases 489before putting the system into the freeze, standby or memory sleep state,
489are similar. 490and the phases are similar.
490 491
491 9. The prepare phase is discussed above. 492 9. The prepare phase is discussed above.
492 493
diff --git a/Documentation/power/interface.txt b/Documentation/power/interface.txt
index c537834af005..f1f0f59a7c47 100644
--- a/Documentation/power/interface.txt
+++ b/Documentation/power/interface.txt
@@ -7,8 +7,8 @@ running. The interface exists in /sys/power/ directory (assuming sysfs
7is mounted at /sys). 7is mounted at /sys).
8 8
9/sys/power/state controls system power state. Reading from this file 9/sys/power/state controls system power state. Reading from this file
10returns what states are supported, which is hard-coded to 'standby' 10returns what states are supported, which is hard-coded to 'freeze',
11(Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk' 11'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
12(Suspend-to-Disk). 12(Suspend-to-Disk).
13 13
14Writing to this file one of those strings causes the system to 14Writing to this file one of those strings causes the system to
diff --git a/Documentation/power/notifiers.txt b/Documentation/power/notifiers.txt
index c2a4a346c0d9..a81fa254303d 100644
--- a/Documentation/power/notifiers.txt
+++ b/Documentation/power/notifiers.txt
@@ -15,8 +15,10 @@ A suspend/hibernation notifier may be used for this purpose.
15The subsystems or drivers having such needs can register suspend notifiers that 15The subsystems or drivers having such needs can register suspend notifiers that
16will be called upon the following events by the PM core: 16will be called upon the following events by the PM core:
17 17
18PM_HIBERNATION_PREPARE The system is going to hibernate or suspend, tasks will 18PM_HIBERNATION_PREPARE The system is going to hibernate, tasks will be frozen
19 be frozen immediately. 19 immediately. This is different from PM_SUSPEND_PREPARE
20 below because here we do additional work between notifiers
21 and drivers freezing.
20 22
21PM_POST_HIBERNATION The system memory state has been restored from a 23PM_POST_HIBERNATION The system memory state has been restored from a
22 hibernation image or an error occurred during 24 hibernation image or an error occurred during
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 4416b28630df..442d43df9b25 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -2,12 +2,26 @@
2System Power Management States 2System Power Management States
3 3
4 4
5The kernel supports three power management states generically, though 5The kernel supports four power management states generically, though
6each is dependent on platform support code to implement the low-level 6one is generic and the other three are dependent on platform support
7details for each state. This file describes each state, what they are 7code to implement the low-level details for each state.
8This file describes each state, what they are
8commonly called, what ACPI state they map to, and what string to write 9commonly called, what ACPI state they map to, and what string to write
9to /sys/power/state to enter that state 10to /sys/power/state to enter that state
10 11
12state: Freeze / Low-Power Idle
13ACPI state: S0
14String: "freeze"
15
16This state is a generic, pure software, light-weight, low-power state.
17It allows more energy to be saved relative to idle by freezing user
18space and putting all I/O devices into low-power states (possibly
19lower-power than available at run time), such that the processors can
20spend more time in their idle states.
21This state can be used for platforms without Standby/Suspend-to-RAM
22support, or it can be used in addition to Suspend-to-RAM (memory sleep)
23to provide reduced resume latency.
24
11 25
12State: Standby / Power-On Suspend 26State: Standby / Power-On Suspend
13ACPI State: S1 27ACPI State: S1
@@ -22,9 +36,6 @@ We try to put devices in a low-power state equivalent to D1, which
22also offers low power savings, but low resume latency. Not all devices 36also offers low power savings, but low resume latency. Not all devices
23support D1, and those that don't are left on. 37support D1, and those that don't are left on.
24 38
25A transition from Standby to the On state should take about 1-2
26seconds.
27
28 39
29State: Suspend-to-RAM 40State: Suspend-to-RAM
30ACPI State: S3 41ACPI State: S3
@@ -42,9 +53,6 @@ transition back to the On state.
42For at least ACPI, STR requires some minimal boot-strapping code to 53For at least ACPI, STR requires some minimal boot-strapping code to
43resume the system from STR. This may be true on other platforms. 54resume the system from STR. This may be true on other platforms.
44 55
45A transition from Suspend-to-RAM to the On state should take about
463-5 seconds.
47
48 56
49State: Suspend-to-disk 57State: Suspend-to-disk
50ACPI State: S4 58ACPI State: S4
@@ -74,7 +82,3 @@ low-power state (like ACPI S4), or it may simply power down. Powering
74down offers greater savings, and allows this mechanism to work on any 82down offers greater savings, and allows this mechanism to work on any
75system. However, entering a real low-power state allows the user to 83system. However, entering a real low-power state allows the user to
76trigger wake up events (e.g. pressing a key or opening a laptop lid). 84trigger wake up events (e.g. pressing a key or opening a laptop lid).
77
78A transition from Suspend-to-Disk to the On state should take about 30
79seconds, though it's typically a bit more with the current
80implementation.
diff --git a/Documentation/rapidio/rapidio.txt b/Documentation/rapidio/rapidio.txt
index c75694b35d08..a9c16c979da2 100644
--- a/Documentation/rapidio/rapidio.txt
+++ b/Documentation/rapidio/rapidio.txt
@@ -79,20 +79,63 @@ master port that is used to communicate with devices within the network.
79In order to initialize the RapidIO subsystem, a platform must initialize and 79In order to initialize the RapidIO subsystem, a platform must initialize and
80register at least one master port within the RapidIO network. To register mport 80register at least one master port within the RapidIO network. To register mport
81within the subsystem controller driver initialization code calls function 81within the subsystem controller driver initialization code calls function
82rio_register_mport() for each available master port. After all active master 82rio_register_mport() for each available master port.
83ports are registered with a RapidIO subsystem, the rio_init_mports() routine
84is called to perform enumeration and discovery.
85 83
86In the current PowerPC-based implementation a subsys_initcall() is specified to 84RapidIO subsystem uses subsys_initcall() or device_initcall() to perform
87perform controller initialization and mport registration. At the end it directly 85controller initialization (depending on controller device type).
88calls rio_init_mports() to execute RapidIO enumeration and discovery. 86
87After all active master ports are registered with a RapidIO subsystem,
88an enumeration and/or discovery routine may be called automatically or
89by user-space command.
89 90
904. Enumeration and Discovery 914. Enumeration and Discovery
91---------------------------- 92----------------------------
92 93
93When rio_init_mports() is called it scans a list of registered master ports and 944.1 Overview
94calls an enumeration or discovery routine depending on the configured role of a 95------------
95master port: host or agent. 96
97RapidIO subsystem configuration options allow users to specify enumeration and
98discovery methods as statically linked components or loadable modules.
99An enumeration/discovery method implementation and available input parameters
100define how any given method can be attached to available RapidIO mports:
101simply to all available mports OR individually to the specified mport device.
102
103Depending on selected enumeration/discovery build configuration, there are
104several methods to initiate an enumeration and/or discovery process:
105
106 (a) Statically linked enumeration and discovery process can be started
107 automatically during kernel initialization time using corresponding module
108 parameters. This was the original method used since introduction of RapidIO
109 subsystem. Now this method relies on enumerator module parameter which is
110 'rio-scan.scan' for existing basic enumeration/discovery method.
111 When automatic start of enumeration/discovery is used a user has to ensure
112 that all discovering endpoints are started before the enumerating endpoint
113 and are waiting for enumeration to be completed.
114 Configuration option CONFIG_RAPIDIO_DISC_TIMEOUT defines time that discovering
115 endpoint waits for enumeration to be completed. If the specified timeout
116 expires the discovery process is terminated without obtaining RapidIO network
117 information. NOTE: a timed out discovery process may be restarted later using
118 a user-space command as it is described later if the given endpoint was
119 enumerated successfully.
120
121 (b) Statically linked enumeration and discovery process can be started by
122 a command from user space. This initiation method provides more flexibility
123 for a system startup compared to the option (a) above. After all participating
124 endpoints have been successfully booted, an enumeration process shall be
125 started first by issuing a user-space command, after an enumeration is
126 completed a discovery process can be started on all remaining endpoints.
127
128 (c) Modular enumeration and discovery process can be started by a command from
129 user space. After an enumeration/discovery module is loaded, a network scan
130 process can be started by issuing a user-space command.
131 Similar to the option (b) above, an enumerator has to be started first.
132
133 (d) Modular enumeration and discovery process can be started by a module
134 initialization routine. In this case an enumerating module shall be loaded
135 first.
136
137When a network scan process is started it calls an enumeration or discovery
138routine depending on the configured role of a master port: host or agent.
96 139
97Enumeration is performed by a master port if it is configured as a host port by 140Enumeration is performed by a master port if it is configured as a host port by
98assigning a host device ID greater than or equal to zero. A host device ID is 141assigning a host device ID greater than or equal to zero. A host device ID is
@@ -104,8 +147,58 @@ for it.
104The enumeration and discovery routines use RapidIO maintenance transactions 147The enumeration and discovery routines use RapidIO maintenance transactions
105to access the configuration space of devices. 148to access the configuration space of devices.
106 149
107The enumeration process is implemented according to the enumeration algorithm 1504.2 Automatic Start of Enumeration and Discovery
108outlined in the RapidIO Interconnect Specification: Annex I [1]. 151------------------------------------------------
152
153Automatic enumeration/discovery start method is applicable only to built-in
154enumeration/discovery RapidIO configuration selection. To enable automatic
155enumeration/discovery start by existing basic enumerator method set use boot
156command line parameter "rio-scan.scan=1".
157
158This configuration requires synchronized start of all RapidIO endpoints that
159form a network which will be enumerated/discovered. Discovering endpoints have
160to be started before an enumeration starts to ensure that all RapidIO
161controllers have been initialized and are ready to be discovered. Configuration
162parameter CONFIG_RAPIDIO_DISC_TIMEOUT defines time (in seconds) which
163a discovering endpoint will wait for enumeration to be completed.
164
165When automatic enumeration/discovery start is selected, basic method's
166initialization routine calls rio_init_mports() to perform enumeration or
167discovery for all known mport devices.
168
169Depending on RapidIO network size and configuration this automatic
170enumeration/discovery start method may be difficult to use due to the
171requirement for synchronized start of all endpoints.
172
1734.3 User-space Start of Enumeration and Discovery
174-------------------------------------------------
175
176User-space start of enumeration and discovery can be used with built-in and
177modular build configurations. For user-space controlled start RapidIO subsystem
178creates the sysfs write-only attribute file '/sys/bus/rapidio/scan'. To initiate
179an enumeration or discovery process on specific mport device, a user needs to
180write mport_ID (not RapidIO destination ID) into that file. The mport_ID is a
181sequential number (0 ... RIO_MAX_MPORTS) assigned during mport device
182registration. For example for machine with single RapidIO controller, mport_ID
183for that controller always will be 0.
184
185To initiate RapidIO enumeration/discovery on all available mports a user may
186write '-1' (or RIO_MPORT_ANY) into the scan attribute file.
187
1884.4 Basic Enumeration Method
189----------------------------
190
191This is an original enumeration/discovery method which is available since
192first release of RapidIO subsystem code. The enumeration process is
193implemented according to the enumeration algorithm outlined in the RapidIO
194Interconnect Specification: Annex I [1].
195
196This method can be configured as statically linked or loadable module.
197The method's single parameter "scan" allows to trigger the enumeration/discovery
198process from module initialization routine.
199
200This enumeration/discovery method can be started only once and does not support
201unloading if it is built as a module.
109 202
110The enumeration process traverses the network using a recursive depth-first 203The enumeration process traverses the network using a recursive depth-first
111algorithm. When a new device is found, the enumerator takes ownership of that 204algorithm. When a new device is found, the enumerator takes ownership of that
@@ -160,6 +253,19 @@ time period. If this wait time period expires before enumeration is completed,
160an agent skips RapidIO discovery and continues with remaining kernel 253an agent skips RapidIO discovery and continues with remaining kernel
161initialization. 254initialization.
162 255
2564.5 Adding New Enumeration/Discovery Method
257-------------------------------------------
258
259RapidIO subsystem code organization allows addition of new enumeration/discovery
260methods as new configuration options without significant impact to to the core
261RapidIO code.
262
263A new enumeration/discovery method has to be attached to one or more mport
264devices before an enumeration/discovery process can be started. Normally,
265method's module initialization routine calls rio_register_scan() to attach
266an enumerator to a specified mport device (or devices). The basic enumerator
267implementation demonstrates this process.
268
1635. References 2695. References
164------------- 270-------------
165 271
diff --git a/Documentation/rapidio/sysfs.txt b/Documentation/rapidio/sysfs.txt
index 97f71ce575d6..19878179da4c 100644
--- a/Documentation/rapidio/sysfs.txt
+++ b/Documentation/rapidio/sysfs.txt
@@ -88,3 +88,20 @@ that exports additional attributes.
88 88
89IDT_GEN2: 89IDT_GEN2:
90 errlog - reads contents of device error log until it is empty. 90 errlog - reads contents of device error log until it is empty.
91
92
935. RapidIO Bus Attributes
94-------------------------
95
96RapidIO bus subdirectory /sys/bus/rapidio implements the following bus-specific
97attribute:
98
99 scan - allows to trigger enumeration discovery process from user space. This
100 is a write-only attribute. To initiate an enumeration or discovery
101 process on specific mport device, a user needs to write mport_ID (not
102 RapidIO destination ID) into this file. The mport_ID is a sequential
103 number (0 ... RIO_MAX_MPORTS) assigned to the mport device.
104 For example, for a machine with a single RapidIO controller, mport_ID
105 for that controller always will be 0.
106 To initiate RapidIO enumeration/discovery on all available mports
107 a user must write '-1' (or RIO_MPORT_ANY) into this attribute file.
diff --git a/MAINTAINERS b/MAINTAINERS
index 3d7782b9f90d..fd3a495a0005 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3865,9 +3865,16 @@ M: K. Y. Srinivasan <kys@microsoft.com>
3865M: Haiyang Zhang <haiyangz@microsoft.com> 3865M: Haiyang Zhang <haiyangz@microsoft.com>
3866L: devel@linuxdriverproject.org 3866L: devel@linuxdriverproject.org
3867S: Maintained 3867S: Maintained
3868F: drivers/hv/ 3868F: arch/x86/include/asm/mshyperv.h
3869F: arch/x86/include/uapi/asm/hyperv.h
3870F: arch/x86/kernel/cpu/mshyperv.c
3869F: drivers/hid/hid-hyperv.c 3871F: drivers/hid/hid-hyperv.c
3872F: drivers/hv/
3870F: drivers/net/hyperv/ 3873F: drivers/net/hyperv/
3874F: drivers/scsi/storvsc_drv.c
3875F: drivers/video/hyperv_fb.c
3876F: include/linux/hyperv.h
3877F: tools/hv/
3871 3878
3872I2C OVER PARALLEL PORT 3879I2C OVER PARALLEL PORT
3873M: Jean Delvare <khali@linux-fr.org> 3880M: Jean Delvare <khali@linux-fr.org>
@@ -4641,12 +4648,13 @@ F: include/linux/sunrpc/
4641F: include/uapi/linux/sunrpc/ 4648F: include/uapi/linux/sunrpc/
4642 4649
4643KERNEL VIRTUAL MACHINE (KVM) 4650KERNEL VIRTUAL MACHINE (KVM)
4644M: Marcelo Tosatti <mtosatti@redhat.com>
4645M: Gleb Natapov <gleb@redhat.com> 4651M: Gleb Natapov <gleb@redhat.com>
4652M: Paolo Bonzini <pbonzini@redhat.com>
4646L: kvm@vger.kernel.org 4653L: kvm@vger.kernel.org
4647W: http://kvm.qumranet.com 4654W: http://linux-kvm.org
4648S: Supported 4655S: Supported
4649F: Documentation/*/kvm.txt 4656F: Documentation/*/kvm*.txt
4657F: Documentation/virtual/kvm/
4650F: arch/*/kvm/ 4658F: arch/*/kvm/
4651F: arch/*/include/asm/kvm* 4659F: arch/*/include/asm/kvm*
4652F: include/linux/kvm* 4660F: include/linux/kvm*
@@ -4976,6 +4984,13 @@ S: Maintained
4976F: Documentation/hwmon/lm90 4984F: Documentation/hwmon/lm90
4977F: drivers/hwmon/lm90.c 4985F: drivers/hwmon/lm90.c
4978 4986
4987LM95234 HARDWARE MONITOR DRIVER
4988M: Guenter Roeck <linux@roeck-us.net>
4989L: lm-sensors@lm-sensors.org
4990S: Maintained
4991F: Documentation/hwmon/lm95234
4992F: drivers/hwmon/lm95234.c
4993
4979LME2510 MEDIA DRIVER 4994LME2510 MEDIA DRIVER
4980M: Malcolm Priestley <tvboxspy@gmail.com> 4995M: Malcolm Priestley <tvboxspy@gmail.com>
4981L: linux-media@vger.kernel.org 4996L: linux-media@vger.kernel.org
@@ -5509,18 +5524,18 @@ F: Documentation/networking/s2io.txt
5509F: Documentation/networking/vxge.txt 5524F: Documentation/networking/vxge.txt
5510F: drivers/net/ethernet/neterion/ 5525F: drivers/net/ethernet/neterion/
5511 5526
5512NETFILTER/IPTABLES/IPCHAINS 5527NETFILTER/IPTABLES
5513P: Harald Welte
5514P: Jozsef Kadlecsik
5515M: Pablo Neira Ayuso <pablo@netfilter.org> 5528M: Pablo Neira Ayuso <pablo@netfilter.org>
5516M: Patrick McHardy <kaber@trash.net> 5529M: Patrick McHardy <kaber@trash.net>
5530M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
5517L: netfilter-devel@vger.kernel.org 5531L: netfilter-devel@vger.kernel.org
5518L: netfilter@vger.kernel.org 5532L: netfilter@vger.kernel.org
5519L: coreteam@netfilter.org 5533L: coreteam@netfilter.org
5520W: http://www.netfilter.org/ 5534W: http://www.netfilter.org/
5521W: http://www.iptables.org/ 5535W: http://www.iptables.org/
5522T: git git://1984.lsi.us.es/nf 5536Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/
5523T: git git://1984.lsi.us.es/nf-next 5537T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git
5538T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git
5524S: Supported 5539S: Supported
5525F: include/linux/netfilter* 5540F: include/linux/netfilter*
5526F: include/linux/netfilter/ 5541F: include/linux/netfilter/
@@ -6069,6 +6084,7 @@ L: linux-parisc@vger.kernel.org
6069W: http://www.parisc-linux.org/ 6084W: http://www.parisc-linux.org/
6070Q: http://patchwork.kernel.org/project/linux-parisc/list/ 6085Q: http://patchwork.kernel.org/project/linux-parisc/list/
6071T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git 6086T: git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/parisc-2.6.git
6087T: git git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux.git
6072S: Maintained 6088S: Maintained
6073F: arch/parisc/ 6089F: arch/parisc/
6074F: drivers/parisc/ 6090F: drivers/parisc/
@@ -7854,7 +7870,7 @@ L: linux-scsi@vger.kernel.org
7854L: target-devel@vger.kernel.org 7870L: target-devel@vger.kernel.org
7855L: http://groups.google.com/group/linux-iscsi-target-dev 7871L: http://groups.google.com/group/linux-iscsi-target-dev
7856W: http://www.linux-iscsi.org 7872W: http://www.linux-iscsi.org
7857T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/lio-core.git master 7873T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
7858S: Supported 7874S: Supported
7859F: drivers/target/ 7875F: drivers/target/
7860F: include/target/ 7876F: include/target/
@@ -8182,6 +8198,13 @@ F: drivers/mmc/host/sh_mobile_sdhi.c
8182F: include/linux/mmc/tmio.h 8198F: include/linux/mmc/tmio.h
8183F: include/linux/mmc/sh_mobile_sdhi.h 8199F: include/linux/mmc/sh_mobile_sdhi.h
8184 8200
8201TMP401 HARDWARE MONITOR DRIVER
8202M: Guenter Roeck <linux@roeck-us.net>
8203L: lm-sensors@lm-sensors.org
8204S: Maintained
8205F: Documentation/hwmon/tmp401
8206F: drivers/hwmon/tmp401.c
8207
8185TMPFS (SHMEM FILESYSTEM) 8208TMPFS (SHMEM FILESYSTEM)
8186M: Hugh Dickins <hughd@google.com> 8209M: Hugh Dickins <hughd@google.com>
8187L: linux-mm@kvack.org 8210L: linux-mm@kvack.org
diff --git a/Makefile b/Makefile
index cd11e8857604..73e20dba55c1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 10 2PATCHLEVEL = 10
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION = -rc3
5NAME = Unicycling Gorilla 5NAME = Unicycling Gorilla
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
diff --git a/arch/Kconfig b/arch/Kconfig
index dd0e8eb8042f..a4429bcd609e 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -213,6 +213,9 @@ config USE_GENERIC_SMP_HELPERS
213config GENERIC_SMP_IDLE_THREAD 213config GENERIC_SMP_IDLE_THREAD
214 bool 214 bool
215 215
216config GENERIC_IDLE_POLL_SETUP
217 bool
218
216# Select if arch init_task initializer is different to init/init_task.c 219# Select if arch init_task initializer is different to init/init_task.c
217config ARCH_INIT_TASK 220config ARCH_INIT_TASK
218 bool 221 bool
diff --git a/arch/arc/boot/dts/abilis_tb100_dvk.dts b/arch/arc/boot/dts/abilis_tb100_dvk.dts
index c0fd3623c393..0fa0d4abe795 100644
--- a/arch/arc/boot/dts/abilis_tb100_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb100_dvk.dts
@@ -37,7 +37,7 @@
37 37
38 soc100 { 38 soc100 {
39 uart@FF100000 { 39 uart@FF100000 {
40 pinctrl-names = "abilis,simple-default"; 40 pinctrl-names = "default";
41 pinctrl-0 = <&pctl_uart0>; 41 pinctrl-0 = <&pctl_uart0>;
42 }; 42 };
43 ethernet@FE100000 { 43 ethernet@FE100000 {
diff --git a/arch/arc/boot/dts/abilis_tb101_dvk.dts b/arch/arc/boot/dts/abilis_tb101_dvk.dts
index 6f8c381f6268..a4d80ce283ae 100644
--- a/arch/arc/boot/dts/abilis_tb101_dvk.dts
+++ b/arch/arc/boot/dts/abilis_tb101_dvk.dts
@@ -37,7 +37,7 @@
37 37
38 soc100 { 38 soc100 {
39 uart@FF100000 { 39 uart@FF100000 {
40 pinctrl-names = "abilis,simple-default"; 40 pinctrl-names = "default";
41 pinctrl-0 = <&pctl_uart0>; 41 pinctrl-0 = <&pctl_uart0>;
42 }; 42 };
43 ethernet@FE100000 { 43 ethernet@FE100000 {
diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi
index a6139fc5aaa3..b97e3051ba4b 100644
--- a/arch/arc/boot/dts/abilis_tb10x.dtsi
+++ b/arch/arc/boot/dts/abilis_tb10x.dtsi
@@ -88,8 +88,7 @@
88 }; 88 };
89 89
90 uart@FF100000 { 90 uart@FF100000 {
91 compatible = "snps,dw-apb-uart", 91 compatible = "snps,dw-apb-uart";
92 "abilis,simple-pinctrl";
93 reg = <0xFF100000 0x100>; 92 reg = <0xFF100000 0x100>;
94 clock-frequency = <166666666>; 93 clock-frequency = <166666666>;
95 interrupts = <25 1>; 94 interrupts = <25 1>;
@@ -184,8 +183,7 @@
184 #address-cells = <1>; 183 #address-cells = <1>;
185 #size-cells = <0>; 184 #size-cells = <0>;
186 cell-index = <1>; 185 cell-index = <1>;
187 compatible = "abilis,tb100-spi", 186 compatible = "abilis,tb100-spi";
188 "abilis,simple-pinctrl";
189 num-cs = <2>; 187 num-cs = <2>;
190 reg = <0xFE011000 0x20>; 188 reg = <0xFE011000 0x20>;
191 interrupt-parent = <&tb10x_ictl>; 189 interrupt-parent = <&tb10x_ictl>;
diff --git a/arch/arc/include/asm/cacheflush.h b/arch/arc/include/asm/cacheflush.h
index 9f841af41092..ef62682e8d95 100644
--- a/arch/arc/include/asm/cacheflush.h
+++ b/arch/arc/include/asm/cacheflush.h
@@ -93,14 +93,16 @@ static inline int cache_is_vipt_aliasing(void)
93#endif 93#endif
94} 94}
95 95
96#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 3) 96#define CACHE_COLOR(addr) (((unsigned long)(addr) >> (PAGE_SHIFT)) & 1)
97 97
98/* 98/*
99 * checks if two addresses (after page aligning) index into same cache set 99 * checks if two addresses (after page aligning) index into same cache set
100 */ 100 */
101#define addr_not_cache_congruent(addr1, addr2) \ 101#define addr_not_cache_congruent(addr1, addr2) \
102({ \
102 cache_is_vipt_aliasing() ? \ 103 cache_is_vipt_aliasing() ? \
103 (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0 \ 104 (CACHE_COLOR(addr1) != CACHE_COLOR(addr2)) : 0; \
105})
104 106
105#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 107#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
106do { \ 108do { \
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 374a35514116..ab84bf131fe1 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -19,13 +19,6 @@
19#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE) 19#define clear_page(paddr) memset((paddr), 0, PAGE_SIZE)
20#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) 20#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
21 21
22#ifndef CONFIG_ARC_CACHE_VIPT_ALIASING
23
24#define clear_user_page(addr, vaddr, pg) clear_page(addr)
25#define copy_user_page(vto, vfrom, vaddr, pg) copy_page(vto, vfrom)
26
27#else /* VIPT aliasing dcache */
28
29struct vm_area_struct; 22struct vm_area_struct;
30struct page; 23struct page;
31 24
@@ -35,8 +28,6 @@ void copy_user_highpage(struct page *to, struct page *from,
35 unsigned long u_vaddr, struct vm_area_struct *vma); 28 unsigned long u_vaddr, struct vm_area_struct *vma);
36void clear_user_page(void *to, unsigned long u_vaddr, struct page *page); 29void clear_user_page(void *to, unsigned long u_vaddr, struct page *page);
37 30
38#endif /* CONFIG_ARC_CACHE_VIPT_ALIASING */
39
40#undef STRICT_MM_TYPECHECKS 31#undef STRICT_MM_TYPECHECKS
41 32
42#ifdef STRICT_MM_TYPECHECKS 33#ifdef STRICT_MM_TYPECHECKS
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 1cc4720faccb..95b1522212a7 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -57,9 +57,9 @@
57 57
58#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ 58#define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
59#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ 59#define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
60#define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ 60#define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */
61#define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ 61#define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */
62#define _PAGE_READ (1<<5) /* Page has user read perm (H) */ 62#define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */
63#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */ 63#define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */
64#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */ 64#define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */
65#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */ 65#define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */
@@ -72,9 +72,9 @@
72 72
73/* PD1 */ 73/* PD1 */
74#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ 74#define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
75#define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ 75#define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */
76#define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ 76#define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */
77#define _PAGE_READ (1<<3) /* Page has user read perm (H) */ 77#define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */
78#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */ 78#define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */
79#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */ 79#define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */
80#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */ 80#define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */
@@ -93,7 +93,8 @@
93#endif 93#endif
94 94
95/* Kernel allowed all permissions for all pages */ 95/* Kernel allowed all permissions for all pages */
96#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) 96#define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \
97 _PAGE_GLOBAL | _PAGE_PRESENT)
97 98
98#ifdef CONFIG_ARC_CACHE_PAGES 99#ifdef CONFIG_ARC_CACHE_PAGES
99#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE 100#define _PAGE_DEF_CACHEABLE _PAGE_CACHEABLE
@@ -106,7 +107,11 @@
106 * -by default cached, unless config otherwise 107 * -by default cached, unless config otherwise
107 * -present in memory 108 * -present in memory
108 */ 109 */
109#define ___DEF (_PAGE_PRESENT | _K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) 110#define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE)
111
112#define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ)
113#define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE)
114#define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE)
110 115
111/* Set of bits not changed in pte_modify */ 116/* Set of bits not changed in pte_modify */
112#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) 117#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED)
@@ -125,11 +130,10 @@
125 * kernel vaddr space - visible in all addr spaces, but kernel mode only 130 * kernel vaddr space - visible in all addr spaces, but kernel mode only
126 * Thus Global, all-kernel-access, no-user-access, cached 131 * Thus Global, all-kernel-access, no-user-access, cached
127 */ 132 */
128#define PAGE_KERNEL __pgprot(___DEF | _PAGE_GLOBAL) 133#define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE)
129 134
130/* ioremap */ 135/* ioremap */
131#define PAGE_KERNEL_NO_CACHE __pgprot(_PAGE_PRESENT | _K_PAGE_PERMS | \ 136#define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
132 _PAGE_GLOBAL)
133 137
134/************************************************************************** 138/**************************************************************************
135 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) 139 * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
diff --git a/arch/arc/include/asm/tlb.h b/arch/arc/include/asm/tlb.h
index 85b6df839bd7..cb0c708ca665 100644
--- a/arch/arc/include/asm/tlb.h
+++ b/arch/arc/include/asm/tlb.h
@@ -16,7 +16,7 @@
16/* Masks for actual TLB "PD"s */ 16/* Masks for actual TLB "PD"s */
17#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) 17#define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT)
18#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ 18#define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \
19 _PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ 19 _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \
20 _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) 20 _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ)
21 21
22#ifndef __ASSEMBLY__ 22#ifndef __ASSEMBLY__
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 2f12bca8aef3..aedce1905441 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -610,7 +610,7 @@ void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
610 610
611 local_irq_save(flags); 611 local_irq_save(flags);
612 __ic_line_inv_vaddr(paddr, vaddr, len); 612 __ic_line_inv_vaddr(paddr, vaddr, len);
613 __dc_line_op(paddr, vaddr, len, OP_FLUSH); 613 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
614 local_irq_restore(flags); 614 local_irq_restore(flags);
615} 615}
616 616
@@ -676,6 +676,17 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
676 flush_cache_all(); 676 flush_cache_all();
677} 677}
678 678
679void flush_anon_page(struct vm_area_struct *vma, struct page *page,
680 unsigned long u_vaddr)
681{
682 /* TBD: do we really need to clear the kernel mapping */
683 __flush_dcache_page(page_address(page), u_vaddr);
684 __flush_dcache_page(page_address(page), page_address(page));
685
686}
687
688#endif
689
679void copy_user_highpage(struct page *to, struct page *from, 690void copy_user_highpage(struct page *to, struct page *from,
680 unsigned long u_vaddr, struct vm_area_struct *vma) 691 unsigned long u_vaddr, struct vm_area_struct *vma)
681{ 692{
@@ -725,16 +736,6 @@ void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
725 set_bit(PG_arch_1, &page->flags); 736 set_bit(PG_arch_1, &page->flags);
726} 737}
727 738
728void flush_anon_page(struct vm_area_struct *vma, struct page *page,
729 unsigned long u_vaddr)
730{
731 /* TBD: do we really need to clear the kernel mapping */
732 __flush_dcache_page(page_address(page), u_vaddr);
733 __flush_dcache_page(page_address(page), page_address(page));
734
735}
736
737#endif
738 739
739/********************************************************************** 740/**********************************************************************
740 * Explicit Cache flush request from user space via syscall 741 * Explicit Cache flush request from user space via syscall
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 066145b5f348..fe1c5a073afe 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -444,7 +444,8 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long vaddr_unaligned,
444 * so userspace sees the right data. 444 * so userspace sees the right data.
445 * (Avoids the flush for Non-exec + congruent mapping case) 445 * (Avoids the flush for Non-exec + congruent mapping case)
446 */ 446 */
447 if (vma->vm_flags & VM_EXEC || addr_not_cache_congruent(paddr, vaddr)) { 447 if ((vma->vm_flags & VM_EXEC) ||
448 addr_not_cache_congruent(paddr, vaddr)) {
448 struct page *page = pfn_to_page(pte_pfn(*ptep)); 449 struct page *page = pfn_to_page(pte_pfn(*ptep));
449 450
450 int dirty = test_and_clear_bit(PG_arch_1, &page->flags); 451 int dirty = test_and_clear_bit(PG_arch_1, &page->flags);
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 9df765dc7c3a..3357d26ffe54 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -277,7 +277,7 @@ ARC_ENTRY EV_TLBMissI
277 ;---------------------------------------------------------------- 277 ;----------------------------------------------------------------
278 ; VERIFY_PTE: Check if PTE permissions approp for executing code 278 ; VERIFY_PTE: Check if PTE permissions approp for executing code
279 cmp_s r2, VMALLOC_START 279 cmp_s r2, VMALLOC_START
280 mov.lo r2, (_PAGE_PRESENT | _PAGE_READ | _PAGE_EXECUTE) 280 mov.lo r2, (_PAGE_PRESENT | _PAGE_U_READ | _PAGE_U_EXECUTE)
281 mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE) 281 mov.hs r2, (_PAGE_PRESENT | _PAGE_K_READ | _PAGE_K_EXECUTE)
282 282
283 and r3, r0, r2 ; Mask out NON Flag bits from PTE 283 and r3, r0, r2 ; Mask out NON Flag bits from PTE
@@ -320,9 +320,9 @@ ARC_ENTRY EV_TLBMissD
320 mov_s r2, 0 320 mov_s r2, 0
321 lr r3, [ecr] 321 lr r3, [ecr]
322 btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access 322 btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
323 or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE 323 or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE
324 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access 324 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
325 or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE 325 or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE
326 ; Above laddering takes care of XCHG access 326 ; Above laddering takes care of XCHG access
327 ; which is both Read and Write 327 ; which is both Read and Write
328 328
diff --git a/arch/arc/plat-tb10x/tb10x.c b/arch/arc/plat-tb10x/tb10x.c
index d3567691c7e1..06cb30929460 100644
--- a/arch/arc/plat-tb10x/tb10x.c
+++ b/arch/arc/plat-tb10x/tb10x.c
@@ -34,31 +34,6 @@ static void __init tb10x_platform_init(void)
34 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 34 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
35} 35}
36 36
37static void __init tb10x_platform_late_init(void)
38{
39 struct device_node *dn;
40
41 /*
42 * Pinctrl documentation recommends setting up the iomux here for
43 * all modules which don't require control over the pins themselves.
44 * Modules which need this kind of assistance are compatible with
45 * "abilis,simple-pinctrl", i.e. we can easily iterate over them.
46 * TODO: Does this recommended method work cleanly with pins required
47 * by modules?
48 */
49 for_each_compatible_node(dn, NULL, "abilis,simple-pinctrl") {
50 struct platform_device *pd = of_find_device_by_node(dn);
51 struct pinctrl *pctl;
52
53 pctl = pinctrl_get_select(&pd->dev, "abilis,simple-default");
54 if (IS_ERR(pctl)) {
55 int ret = PTR_ERR(pctl);
56 dev_err(&pd->dev, "Could not set up pinctrl: %d\n",
57 ret);
58 }
59 }
60}
61
62static const char *tb10x_compat[] __initdata = { 37static const char *tb10x_compat[] __initdata = {
63 "abilis,arc-tb10x", 38 "abilis,arc-tb10x",
64 NULL, 39 NULL,
@@ -67,5 +42,4 @@ static const char *tb10x_compat[] __initdata = {
67MACHINE_START(TB10x, "tb10x") 42MACHINE_START(TB10x, "tb10x")
68 .dt_compat = tb10x_compat, 43 .dt_compat = tb10x_compat,
69 .init_machine = tb10x_platform_init, 44 .init_machine = tb10x_platform_init,
70 .init_late = tb10x_platform_late_init,
71MACHINE_END 45MACHINE_END
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index d423d58f938d..49d993cee512 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -38,6 +38,7 @@ config ARM
38 select HAVE_GENERIC_HARDIRQS 38 select HAVE_GENERIC_HARDIRQS
39 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)) 39 select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
40 select HAVE_IDE if PCI || ISA || PCMCIA 40 select HAVE_IDE if PCI || ISA || PCMCIA
41 select HAVE_IRQ_TIME_ACCOUNTING
41 select HAVE_KERNEL_GZIP 42 select HAVE_KERNEL_GZIP
42 select HAVE_KERNEL_LZMA 43 select HAVE_KERNEL_LZMA
43 select HAVE_KERNEL_LZO 44 select HAVE_KERNEL_LZO
@@ -488,7 +489,7 @@ config ARCH_IXP4XX
488config ARCH_DOVE 489config ARCH_DOVE
489 bool "Marvell Dove" 490 bool "Marvell Dove"
490 select ARCH_REQUIRE_GPIOLIB 491 select ARCH_REQUIRE_GPIOLIB
491 select CPU_V7 492 select CPU_PJ4
492 select GENERIC_CLOCKEVENTS 493 select GENERIC_CLOCKEVENTS
493 select MIGHT_HAVE_PCI 494 select MIGHT_HAVE_PCI
494 select PINCTRL 495 select PINCTRL
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 47374085befd..1ba358ba16b8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -309,7 +309,7 @@ define archhelp
309 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' 309 echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
310 echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)' 310 echo '* xipImage - XIP kernel image, if configured (arch/$(ARCH)/boot/xipImage)'
311 echo ' uImage - U-Boot wrapped zImage' 311 echo ' uImage - U-Boot wrapped zImage'
312 echo ' bootpImage - Combined zImage and initial RAM disk' 312 echo ' bootpImage - Combined zImage and initial RAM disk'
313 echo ' (supply initrd image via make variable INITRD=<path>)' 313 echo ' (supply initrd image via make variable INITRD=<path>)'
314 echo '* dtbs - Build device tree blobs for enabled boards' 314 echo '* dtbs - Build device tree blobs for enabled boards'
315 echo ' install - Install uncompressed kernel' 315 echo ' install - Install uncompressed kernel'
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index b9f7121e6ecf..f0895c581a89 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -177,7 +177,9 @@ dtb-$(CONFIG_ARCH_SPEAR3XX)+= spear300-evb.dtb \
177 spear320-evb.dtb \ 177 spear320-evb.dtb \
178 spear320-hmi.dtb 178 spear320-hmi.dtb
179dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb 179dtb-$(CONFIG_ARCH_SPEAR6XX)+= spear600-evb.dtb
180dtb-$(CONFIG_ARCH_SUNXI) += sun4i-a10-cubieboard.dtb \ 180dtb-$(CONFIG_ARCH_SUNXI) += \
181 sun4i-a10-cubieboard.dtb \
182 sun4i-a10-mini-xplus.dtb \
181 sun4i-a10-hackberry.dtb \ 183 sun4i-a10-hackberry.dtb \
182 sun5i-a13-olinuxino.dtb 184 sun5i-a13-olinuxino.dtb
183dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \ 185dtb-$(CONFIG_ARCH_TEGRA) += tegra20-harmony.dtb \
diff --git a/arch/arm/boot/dts/armada-370-xp.dtsi b/arch/arm/boot/dts/armada-370-xp.dtsi
index 272bbc65fab0..550eb772c30e 100644
--- a/arch/arm/boot/dts/armada-370-xp.dtsi
+++ b/arch/arm/boot/dts/armada-370-xp.dtsi
@@ -33,7 +33,8 @@
33 #size-cells = <1>; 33 #size-cells = <1>;
34 compatible = "simple-bus"; 34 compatible = "simple-bus";
35 interrupt-parent = <&mpic>; 35 interrupt-parent = <&mpic>;
36 ranges = <0 0 0xd0000000 0x100000>; 36 ranges = <0 0 0xd0000000 0x0100000 /* internal registers */
37 0xe0000000 0 0xe0000000 0x8100000 /* PCIe */>;
37 38
38 internal-regs { 39 internal-regs {
39 compatible = "simple-bus"; 40 compatible = "simple-bus";
diff --git a/arch/arm/boot/dts/armada-370.dtsi b/arch/arm/boot/dts/armada-370.dtsi
index b2c1b5af9749..aee2b1866ce2 100644
--- a/arch/arm/boot/dts/armada-370.dtsi
+++ b/arch/arm/boot/dts/armada-370.dtsi
@@ -29,7 +29,8 @@
29 }; 29 };
30 30
31 soc { 31 soc {
32 ranges = <0 0xd0000000 0x100000>; 32 ranges = <0 0xd0000000 0x0100000 /* internal registers */
33 0xe0000000 0xe0000000 0x8100000 /* PCIe */>;
33 internal-regs { 34 internal-regs {
34 system-controller@18200 { 35 system-controller@18200 {
35 compatible = "marvell,armada-370-xp-system-controller"; 36 compatible = "marvell,armada-370-xp-system-controller";
@@ -38,12 +39,12 @@
38 39
39 L2: l2-cache { 40 L2: l2-cache {
40 compatible = "marvell,aurora-outer-cache"; 41 compatible = "marvell,aurora-outer-cache";
41 reg = <0xd0008000 0x1000>; 42 reg = <0x08000 0x1000>;
42 cache-id-part = <0x100>; 43 cache-id-part = <0x100>;
43 wt-override; 44 wt-override;
44 }; 45 };
45 46
46 mpic: interrupt-controller@20000 { 47 interrupt-controller@20000 {
47 reg = <0x20a00 0x1d0>, <0x21870 0x58>; 48 reg = <0x20a00 0x1d0>, <0x21870 0x58>;
48 }; 49 };
49 50
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 26ad06fc147e..3ee63d128e27 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -39,6 +39,9 @@
39 }; 39 };
40 40
41 soc { 41 soc {
42 ranges = <0 0 0xd0000000 0x100000
43 0xf0000000 0 0xf0000000 0x1000000>;
44
42 internal-regs { 45 internal-regs {
43 serial@12000 { 46 serial@12000 {
44 clock-frequency = <250000000>; 47 clock-frequency = <250000000>;
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index f14d36c46159..46b785064dd8 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -27,6 +27,9 @@
27 }; 27 };
28 28
29 soc { 29 soc {
30 ranges = <0 0 0xd0000000 0x100000
31 0xf0000000 0 0xf0000000 0x8000000>;
32
30 internal-regs { 33 internal-regs {
31 serial@12000 { 34 serial@12000 {
32 clock-frequency = <250000000>; 35 clock-frequency = <250000000>;
diff --git a/arch/arm/boot/dts/armada-xp.dtsi b/arch/arm/boot/dts/armada-xp.dtsi
index bacab11c10dc..5b902f9a3af2 100644
--- a/arch/arm/boot/dts/armada-xp.dtsi
+++ b/arch/arm/boot/dts/armada-xp.dtsi
@@ -31,7 +31,7 @@
31 wt-override; 31 wt-override;
32 }; 32 };
33 33
34 mpic: interrupt-controller@20000 { 34 interrupt-controller@20000 {
35 reg = <0x20a00 0x2d0>, <0x21070 0x58>; 35 reg = <0x20a00 0x2d0>, <0x21070 0x58>;
36 }; 36 };
37 37
diff --git a/arch/arm/boot/dts/at91sam9260.dtsi b/arch/arm/boot/dts/at91sam9260.dtsi
index 70b5ccbac234..84c4bef2d726 100644
--- a/arch/arm/boot/dts/at91sam9260.dtsi
+++ b/arch/arm/boot/dts/at91sam9260.dtsi
@@ -264,7 +264,7 @@
264 atmel,pins = 264 atmel,pins =
265 <0 10 0x2 0x0 /* PA10 periph B */ 265 <0 10 0x2 0x0 /* PA10 periph B */
266 0 11 0x2 0x0 /* PA11 periph B */ 266 0 11 0x2 0x0 /* PA11 periph B */
267 0 24 0x2 0x0 /* PA24 periph B */ 267 0 22 0x2 0x0 /* PA22 periph B */
268 0 25 0x2 0x0 /* PA25 periph B */ 268 0 25 0x2 0x0 /* PA25 periph B */
269 0 26 0x2 0x0 /* PA26 periph B */ 269 0 26 0x2 0x0 /* PA26 periph B */
270 0 27 0x2 0x0 /* PA27 periph B */ 270 0 27 0x2 0x0 /* PA27 periph B */
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index 3de8e6dfbcb1..8d25f889928e 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -57,6 +57,7 @@
57 compatible = "atmel,at91rm9200-aic"; 57 compatible = "atmel,at91rm9200-aic";
58 interrupt-controller; 58 interrupt-controller;
59 reg = <0xfffff000 0x200>; 59 reg = <0xfffff000 0x200>;
60 atmel,external-irqs = <31>;
60 }; 61 };
61 62
62 ramc0: ramc@ffffe800 { 63 ramc0: ramc@ffffe800 {
diff --git a/arch/arm/boot/dts/at91sam9x25ek.dts b/arch/arm/boot/dts/at91sam9x25ek.dts
index 3b40d11d65e7..315250b4995e 100644
--- a/arch/arm/boot/dts/at91sam9x25ek.dts
+++ b/arch/arm/boot/dts/at91sam9x25ek.dts
@@ -11,7 +11,7 @@
11/include/ "at91sam9x5ek.dtsi" 11/include/ "at91sam9x5ek.dtsi"
12 12
13/ { 13/ {
14 model = "Atmel AT91SAM9G25-EK"; 14 model = "Atmel AT91SAM9X25-EK";
15 compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9"; 15 compatible = "atmel,at91sam9x25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
16 16
17 ahb { 17 ahb {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index 82a404da1c0d..99ba6e14ebf3 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -516,7 +516,7 @@
516 usb_otg_hs: usb_otg_hs@480ab000 { 516 usb_otg_hs: usb_otg_hs@480ab000 {
517 compatible = "ti,omap3-musb"; 517 compatible = "ti,omap3-musb";
518 reg = <0x480ab000 0x1000>; 518 reg = <0x480ab000 0x1000>;
519 interrupts = <0 92 0x4>, <0 93 0x4>; 519 interrupts = <92>, <93>;
520 interrupt-names = "mc", "dma"; 520 interrupt-names = "mc", "dma";
521 ti,hwmods = "usb_otg_hs"; 521 ti,hwmods = "usb_otg_hs";
522 multipoint = <1>; 522 multipoint = <1>;
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 2e643ea51cce..5000e0d42849 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -75,11 +75,6 @@
75 compatible = "atmel,at91sam9x5-spi"; 75 compatible = "atmel,at91sam9x5-spi";
76 reg = <0xf0004000 0x100>; 76 reg = <0xf0004000 0x100>;
77 interrupts = <24 4 3>; 77 interrupts = <24 4 3>;
78 cs-gpios = <&pioD 13 0
79 &pioD 14 0 /* conflicts with SCK0 and CANRX0 */
80 &pioD 15 0 /* conflicts with CTS0 and CANTX0 */
81 &pioD 16 0 /* conflicts with RTS0 and PWMFI3 */
82 >;
83 pinctrl-names = "default"; 78 pinctrl-names = "default";
84 pinctrl-0 = <&pinctrl_spi0>; 79 pinctrl-0 = <&pinctrl_spi0>;
85 status = "disabled"; 80 status = "disabled";
@@ -156,7 +151,7 @@
156 }; 151 };
157 152
158 macb0: ethernet@f0028000 { 153 macb0: ethernet@f0028000 {
159 compatible = "cnds,pc302-gem", "cdns,gem"; 154 compatible = "cdns,pc302-gem", "cdns,gem";
160 reg = <0xf0028000 0x100>; 155 reg = <0xf0028000 0x100>;
161 interrupts = <34 4 3>; 156 interrupts = <34 4 3>;
162 pinctrl-names = "default"; 157 pinctrl-names = "default";
@@ -203,11 +198,6 @@
203 compatible = "atmel,at91sam9x5-spi"; 198 compatible = "atmel,at91sam9x5-spi";
204 reg = <0xf8008000 0x100>; 199 reg = <0xf8008000 0x100>;
205 interrupts = <25 4 3>; 200 interrupts = <25 4 3>;
206 cs-gpios = <&pioC 25 0
207 &pioC 26 0 /* conflitcs with TWD1 and ISI_D11 */
208 &pioC 27 0 /* conflitcs with TWCK1 and ISI_D10 */
209 &pioC 28 0 /* conflitcs with PWMFI0 and ISI_D9 */
210 >;
211 pinctrl-names = "default"; 201 pinctrl-names = "default";
212 pinctrl-0 = <&pinctrl_spi1>; 202 pinctrl-0 = <&pinctrl_spi1>;
213 status = "disabled"; 203 status = "disabled";
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi
index 1f8ed404626c..b336e7787cb3 100644
--- a/arch/arm/boot/dts/sama5d3xcm.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm.dtsi
@@ -32,6 +32,10 @@
32 32
33 ahb { 33 ahb {
34 apb { 34 apb {
35 spi0: spi@f0004000 {
36 cs-gpios = <&pioD 13 0>, <0>, <0>, <0>;
37 };
38
35 macb0: ethernet@f0028000 { 39 macb0: ethernet@f0028000 {
36 phy-mode = "rgmii"; 40 phy-mode = "rgmii";
37 }; 41 };
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index b28fbf3408e3..6f82d9368948 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -14,13 +14,19 @@
14 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; 14 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
15 }; 15 };
16 16
17 /* This is where the interrupt is routed on the S8815 board */
18 external-bus@34000000 {
19 ethernet@300 {
20 interrupt-parent = <&gpio3>;
21 interrupts = <8 0x1>;
22 };
23 };
24
17 /* Custom board node with GPIO pins to active etc */ 25 /* Custom board node with GPIO pins to active etc */
18 usb-s8815 { 26 usb-s8815 {
19 /* The S8815 is using this very GPIO pin for the SMSC91x IRQs */ 27 /* The S8815 is using this very GPIO pin for the SMSC91x IRQs */
20 ethernet-gpio { 28 ethernet-gpio {
21 gpios = <&gpio3 19 0x1>; 29 gpios = <&gpio3 8 0x1>;
22 interrupts = <19 0x1>;
23 interrupt-parent = <&gpio3>;
24 }; 30 };
25 /* This will bias the MMC/SD card detect line */ 31 /* This will bias the MMC/SD card detect line */
26 mmcsd-gpio { 32 mmcsd-gpio {
diff --git a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
index 4a7c35d6726a..078ed7f618d7 100644
--- a/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
+++ b/arch/arm/boot/dts/sun4i-a10-mini-xplus.dts
@@ -22,8 +22,8 @@
22 bootargs = "earlyprintk console=ttyS0,115200"; 22 bootargs = "earlyprintk console=ttyS0,115200";
23 }; 23 };
24 24
25 soc { 25 soc@01c20000 {
26 uart0: uart@01c28000 { 26 uart0: serial@01c28000 {
27 pinctrl-names = "default"; 27 pinctrl-names = "default";
28 pinctrl-0 = <&uart0_pins_a>; 28 pinctrl-0 = <&uart0_pins_a>;
29 status = "okay"; 29 status = "okay";
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 52b88d81b7bb..3caed0db6986 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -15,8 +15,6 @@
15#include <linux/smp.h> 15#include <linux/smp.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17 17
18#include <linux/irqchip/arm-gic.h>
19
20#include <asm/mcpm.h> 18#include <asm/mcpm.h>
21#include <asm/smp.h> 19#include <asm/smp.h>
22#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
@@ -49,7 +47,6 @@ static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *i
49static void __cpuinit mcpm_secondary_init(unsigned int cpu) 47static void __cpuinit mcpm_secondary_init(unsigned int cpu)
50{ 48{
51 mcpm_cpu_powered_up(); 49 mcpm_cpu_powered_up();
52 gic_secondary_init(0);
53} 50}
54 51
55#ifdef CONFIG_HOTPLUG_CPU 52#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 7e0ebb64a7f9..9940f7b4e438 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -199,7 +199,6 @@ CONFIG_USB_PHY=y
199CONFIG_USB_DEBUG=y 199CONFIG_USB_DEBUG=y
200CONFIG_USB_DEVICEFS=y 200CONFIG_USB_DEVICEFS=y
201# CONFIG_USB_DEVICE_CLASS is not set 201# CONFIG_USB_DEVICE_CLASS is not set
202CONFIG_USB_SUSPEND=y
203CONFIG_USB_MON=y 202CONFIG_USB_MON=y
204CONFIG_USB_OHCI_HCD=y 203CONFIG_USB_OHCI_HCD=y
205CONFIG_USB_STORAGE=y 204CONFIG_USB_STORAGE=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c1ef64bc5abd..abbe31937c65 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -20,6 +20,7 @@ CONFIG_MODULE_FORCE_UNLOAD=y
20CONFIG_MODVERSIONS=y 20CONFIG_MODVERSIONS=y
21CONFIG_MODULE_SRCVERSION_ALL=y 21CONFIG_MODULE_SRCVERSION_ALL=y
22# CONFIG_BLK_DEV_BSG is not set 22# CONFIG_BLK_DEV_BSG is not set
23CONFIG_ARCH_MULTI_V6=y
23CONFIG_ARCH_OMAP2PLUS=y 24CONFIG_ARCH_OMAP2PLUS=y
24CONFIG_OMAP_RESET_CLOCKS=y 25CONFIG_OMAP_RESET_CLOCKS=y
25CONFIG_OMAP_MUX_DEBUG=y 26CONFIG_OMAP_MUX_DEBUG=y
@@ -204,7 +205,6 @@ CONFIG_USB=y
204CONFIG_USB_DEBUG=y 205CONFIG_USB_DEBUG=y
205CONFIG_USB_ANNOUNCE_NEW_DEVICES=y 206CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
206CONFIG_USB_DEVICEFS=y 207CONFIG_USB_DEVICEFS=y
207CONFIG_USB_SUSPEND=y
208CONFIG_USB_MON=y 208CONFIG_USB_MON=y
209CONFIG_USB_WDM=y 209CONFIG_USB_WDM=y
210CONFIG_USB_STORAGE=y 210CONFIG_USB_STORAGE=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index a5f0485133cf..f7ba316164d4 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -153,6 +153,7 @@ CONFIG_MEDIA_CAMERA_SUPPORT=y
153CONFIG_MEDIA_USB_SUPPORT=y 153CONFIG_MEDIA_USB_SUPPORT=y
154CONFIG_USB_VIDEO_CLASS=m 154CONFIG_USB_VIDEO_CLASS=m
155CONFIG_DRM=y 155CONFIG_DRM=y
156CONFIG_TEGRA_HOST1X=y
156CONFIG_DRM_TEGRA=y 157CONFIG_DRM_TEGRA=y
157CONFIG_BACKLIGHT_LCD_SUPPORT=y 158CONFIG_BACKLIGHT_LCD_SUPPORT=y
158# CONFIG_LCD_CLASS_DEVICE is not set 159# CONFIG_LCD_CLASS_DEVICE is not set
@@ -202,7 +203,7 @@ CONFIG_TEGRA20_APB_DMA=y
202CONFIG_STAGING=y 203CONFIG_STAGING=y
203CONFIG_SENSORS_ISL29018=y 204CONFIG_SENSORS_ISL29018=y
204CONFIG_SENSORS_ISL29028=y 205CONFIG_SENSORS_ISL29028=y
205CONFIG_SENSORS_AK8975=y 206CONFIG_AK8975=y
206CONFIG_MFD_NVEC=y 207CONFIG_MFD_NVEC=y
207CONFIG_KEYBOARD_NVEC=y 208CONFIG_KEYBOARD_NVEC=y
208CONFIG_SERIO_NVEC_PS2=y 209CONFIG_SERIO_NVEC_PS2=y
diff --git a/arch/arm/crypto/sha1-armv4-large.S b/arch/arm/crypto/sha1-armv4-large.S
index 92c6eed7aac9..99207c45ec10 100644
--- a/arch/arm/crypto/sha1-armv4-large.S
+++ b/arch/arm/crypto/sha1-armv4-large.S
@@ -195,6 +195,7 @@ ENTRY(sha1_block_data_order)
195 add r3,r3,r10 @ E+=F_00_19(B,C,D) 195 add r3,r3,r10 @ E+=F_00_19(B,C,D)
196 cmp r14,sp 196 cmp r14,sp
197 bne .L_00_15 @ [((11+4)*5+2)*3] 197 bne .L_00_15 @ [((11+4)*5+2)*3]
198 sub sp,sp,#25*4
198#if __ARM_ARCH__<7 199#if __ARM_ARCH__<7
199 ldrb r10,[r1,#2] 200 ldrb r10,[r1,#2]
200 ldrb r9,[r1,#3] 201 ldrb r9,[r1,#3]
@@ -290,7 +291,6 @@ ENTRY(sha1_block_data_order)
290 add r3,r3,r10 @ E+=F_00_19(B,C,D) 291 add r3,r3,r10 @ E+=F_00_19(B,C,D)
291 292
292 ldr r8,.LK_20_39 @ [+15+16*4] 293 ldr r8,.LK_20_39 @ [+15+16*4]
293 sub sp,sp,#25*4
294 cmn sp,#0 @ [+3], clear carry to denote 20_39 294 cmn sp,#0 @ [+3], clear carry to denote 20_39
295.L_20_39_or_60_79: 295.L_20_39_or_60_79:
296 ldr r9,[r14,#15*4] 296 ldr r9,[r14,#15*4]
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 7eb18c1d8d6c..4f009c10540d 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -233,15 +233,15 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ 233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \
234 atomic64_t, \ 234 atomic64_t, \
235 counter), \ 235 counter), \
236 (unsigned long)(o), \ 236 (unsigned long long)(o), \
237 (unsigned long)(n))) 237 (unsigned long long)(n)))
238 238
239#define cmpxchg64_local(ptr, o, n) \ 239#define cmpxchg64_local(ptr, o, n) \
240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ 240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \
241 local64_t, \ 241 local64_t, \
242 a), \ 242 a), \
243 (unsigned long)(o), \ 243 (unsigned long long)(o), \
244 (unsigned long)(n))) 244 (unsigned long long)(n)))
245 245
246#endif /* __LINUX_ARM_ARCH__ >= 6 */ 246#endif /* __LINUX_ARM_ARCH__ >= 6 */
247 247
diff --git a/arch/arm/include/debug/ux500.S b/arch/arm/include/debug/ux500.S
index 2848857f5b62..fbd24beeb1fa 100644
--- a/arch/arm/include/debug/ux500.S
+++ b/arch/arm/include/debug/ux500.S
@@ -24,9 +24,9 @@
24#define U8500_UART0_PHYS_BASE (0x80120000) 24#define U8500_UART0_PHYS_BASE (0x80120000)
25#define U8500_UART1_PHYS_BASE (0x80121000) 25#define U8500_UART1_PHYS_BASE (0x80121000)
26#define U8500_UART2_PHYS_BASE (0x80007000) 26#define U8500_UART2_PHYS_BASE (0x80007000)
27#define U8500_UART0_VIRT_BASE (0xa8120000) 27#define U8500_UART0_VIRT_BASE (0xf8120000)
28#define U8500_UART1_VIRT_BASE (0xa8121000) 28#define U8500_UART1_VIRT_BASE (0xf8121000)
29#define U8500_UART2_VIRT_BASE (0xa8007000) 29#define U8500_UART2_VIRT_BASE (0xf8007000)
30#define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE 30#define __UX500_PHYS_UART(n) U8500_UART##n##_PHYS_BASE
31#define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE 31#define __UX500_VIRT_UART(n) U8500_UART##n##_VIRT_BASE
32#endif 32#endif
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index f21970316836..282de4826abb 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -411,7 +411,6 @@ static struct vm_area_struct gate_vma = {
411 .vm_start = 0xffff0000, 411 .vm_start = 0xffff0000,
412 .vm_end = 0xffff0000 + PAGE_SIZE, 412 .vm_end = 0xffff0000 + PAGE_SIZE,
413 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, 413 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
414 .vm_mm = &init_mm,
415}; 414};
416 415
417static int __init gate_vma_init(void) 416static int __init gate_vma_init(void)
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 47ab90563bf4..550d63cef68e 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -251,7 +251,7 @@ void __ref cpu_die(void)
251 * this returns, power and/or clocks can be removed at any point 251 * this returns, power and/or clocks can be removed at any point
252 * from this CPU and its cache by platform_cpu_kill(). 252 * from this CPU and its cache by platform_cpu_kill().
253 */ 253 */
254 RCU_NONIDLE(complete(&cpu_died)); 254 complete(&cpu_died);
255 255
256 /* 256 /*
257 * Ensure that the cache lines associated with that completion are 257 * Ensure that the cache lines associated with that completion are
diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c
index 2acdff4c1dfe..180b3024bec3 100644
--- a/arch/arm/mach-at91/at91rm9200_time.c
+++ b/arch/arm/mach-at91/at91rm9200_time.c
@@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev)
174static struct clock_event_device clkevt = { 174static struct clock_event_device clkevt = {
175 .name = "at91_tick", 175 .name = "at91_tick",
176 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 176 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
177 .shift = 32,
177 .rating = 150, 178 .rating = 150,
178 .set_next_event = clkevt32k_next_event, 179 .set_next_event = clkevt32k_next_event,
179 .set_mode = clkevt32k_mode, 180 .set_mode = clkevt32k_mode,
@@ -264,9 +265,11 @@ void __init at91rm9200_timer_init(void)
264 at91_st_write(AT91_ST_RTMR, 1); 265 at91_st_write(AT91_ST_RTMR, 1);
265 266
266 /* Setup timer clockevent, with minimum of two ticks (important!!) */ 267 /* Setup timer clockevent, with minimum of two ticks (important!!) */
268 clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift);
269 clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt);
270 clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1;
267 clkevt.cpumask = cpumask_of(0); 271 clkevt.cpumask = cpumask_of(0);
268 clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, 272 clockevents_register_device(&clkevt);
269 2, AT91_ST_ALMV);
270 273
271 /* register clocksource */ 274 /* register clocksource */
272 clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); 275 clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK);
diff --git a/arch/arm/mach-at91/at91sam9n12.c b/arch/arm/mach-at91/at91sam9n12.c
index 13cdbcd48f51..c7d670d11802 100644
--- a/arch/arm/mach-at91/at91sam9n12.c
+++ b/arch/arm/mach-at91/at91sam9n12.c
@@ -223,13 +223,7 @@ static void __init at91sam9n12_map_io(void)
223 at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE); 223 at91_init_sram(0, AT91SAM9N12_SRAM_BASE, AT91SAM9N12_SRAM_SIZE);
224} 224}
225 225
226void __init at91sam9n12_initialize(void)
227{
228 at91_extern_irq = (1 << AT91SAM9N12_ID_IRQ0);
229}
230
231AT91_SOC_START(at91sam9n12) 226AT91_SOC_START(at91sam9n12)
232 .map_io = at91sam9n12_map_io, 227 .map_io = at91sam9n12_map_io,
233 .register_clocks = at91sam9n12_register_clocks, 228 .register_clocks = at91sam9n12_register_clocks,
234 .init = at91sam9n12_initialize,
235AT91_SOC_END 229AT91_SOC_END
diff --git a/arch/arm/mach-at91/include/mach/at91_pmc.h b/arch/arm/mach-at91/include/mach/at91_pmc.h
index 31df12029c4e..2bd7f51b0b82 100644
--- a/arch/arm/mach-at91/include/mach/at91_pmc.h
+++ b/arch/arm/mach-at91/include/mach/at91_pmc.h
@@ -179,9 +179,9 @@ extern void __iomem *at91_pmc_base;
179#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */ 179#define AT91_PMC_PCR_CMD (0x1 << 12) /* Command (read=0, write=1) */
180#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */ 180#define AT91_PMC_PCR_DIV(n) ((n) << 16) /* Divisor Value */
181#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */ 181#define AT91_PMC_PCR_DIV0 0x0 /* Peripheral clock is MCK */
182#define AT91_PMC_PCR_DIV2 0x2 /* Peripheral clock is MCK/2 */ 182#define AT91_PMC_PCR_DIV2 0x1 /* Peripheral clock is MCK/2 */
183#define AT91_PMC_PCR_DIV4 0x4 /* Peripheral clock is MCK/4 */ 183#define AT91_PMC_PCR_DIV4 0x2 /* Peripheral clock is MCK/4 */
184#define AT91_PMC_PCR_DIV8 0x8 /* Peripheral clock is MCK/8 */ 184#define AT91_PMC_PCR_DIV8 0x3 /* Peripheral clock is MCK/8 */
185#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */ 185#define AT91_PMC_PCR_EN (0x1 << 28) /* Enable */
186 186
187#endif 187#endif
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 151259003086..dda9a2bd3acb 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -177,7 +177,8 @@ int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
177static const char *step_sels[] = { "osc", "pll2_pfd2_396m", }; 177static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
178static const char *pll1_sw_sels[] = { "pll1_sys", "step", }; 178static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
179static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", }; 179static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
180static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", }; 180static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", "osc", "dummy", };
181static const char *periph2_clk2_sels[] = { "pll3_usb_otg", "pll2_bus", };
181static const char *periph_sels[] = { "periph_pre", "periph_clk2", }; 182static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
182static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", }; 183static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
183static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 184static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
@@ -185,7 +186,7 @@ static const char *audio_sels[] = { "pll4_post_div", "pll3_pfd2_508m", "pll3_pfd
185static const char *gpu_axi_sels[] = { "axi", "ahb", }; 186static const char *gpu_axi_sels[] = { "axi", "ahb", };
186static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", }; 187static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
187static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", }; 188static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
188static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", }; 189static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll3_pfd0_720m", };
189static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", }; 190static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
190static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", }; 191static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
191static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", }; 192static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video_div", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
@@ -369,8 +370,8 @@ int __init mx6q_clocks_init(void)
369 clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels)); 370 clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
370 clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); 371 clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
371 clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels)); 372 clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
372 clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); 373 clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 2, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
373 clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels)); 374 clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph2_clk2_sels, ARRAY_SIZE(periph2_clk2_sels));
374 clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels)); 375 clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels));
375 clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels)); 376 clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
376 clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels)); 377 clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
@@ -498,7 +499,7 @@ int __init mx6q_clocks_init(void)
498 clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14); 499 clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
499 clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10); 500 clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
500 clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16); 501 clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
501 clk[mlb] = imx_clk_gate2("mlb", "pll8_mlb", base + 0x74, 18); 502 clk[mlb] = imx_clk_gate2("mlb", "axi", base + 0x74, 18);
502 clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20); 503 clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
503 clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22); 504 clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
504 clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28); 505 clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
diff --git a/arch/arm/mach-imx/headsmp.S b/arch/arm/mach-imx/headsmp.S
index 67b9c48dcafe..627f16f0e9d1 100644
--- a/arch/arm/mach-imx/headsmp.S
+++ b/arch/arm/mach-imx/headsmp.S
@@ -18,8 +18,20 @@
18 .section ".text.head", "ax" 18 .section ".text.head", "ax"
19 19
20#ifdef CONFIG_SMP 20#ifdef CONFIG_SMP
21diag_reg_offset:
22 .word g_diag_reg - .
23
24 .macro set_diag_reg
25 adr r0, diag_reg_offset
26 ldr r1, [r0]
27 add r1, r1, r0 @ r1 = physical &g_diag_reg
28 ldr r0, [r1]
29 mcr p15, 0, r0, c15, c0, 1 @ write diagnostic register
30 .endm
31
21ENTRY(v7_secondary_startup) 32ENTRY(v7_secondary_startup)
22 bl v7_invalidate_l1 33 bl v7_invalidate_l1
34 set_diag_reg
23 b secondary_startup 35 b secondary_startup
24ENDPROC(v7_secondary_startup) 36ENDPROC(v7_secondary_startup)
25#endif 37#endif
diff --git a/arch/arm/mach-imx/platsmp.c b/arch/arm/mach-imx/platsmp.c
index 4a69305db65e..c6e1ab544882 100644
--- a/arch/arm/mach-imx/platsmp.c
+++ b/arch/arm/mach-imx/platsmp.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <asm/cacheflush.h>
15#include <asm/page.h> 16#include <asm/page.h>
16#include <asm/smp_scu.h> 17#include <asm/smp_scu.h>
17#include <asm/mach/map.h> 18#include <asm/mach/map.h>
@@ -21,6 +22,7 @@
21 22
22#define SCU_STANDBY_ENABLE (1 << 5) 23#define SCU_STANDBY_ENABLE (1 << 5)
23 24
25u32 g_diag_reg;
24static void __iomem *scu_base; 26static void __iomem *scu_base;
25 27
26static struct map_desc scu_io_desc __initdata = { 28static struct map_desc scu_io_desc __initdata = {
@@ -80,6 +82,18 @@ void imx_smp_prepare(void)
80static void __init imx_smp_prepare_cpus(unsigned int max_cpus) 82static void __init imx_smp_prepare_cpus(unsigned int max_cpus)
81{ 83{
82 imx_smp_prepare(); 84 imx_smp_prepare();
85
86 /*
87 * The diagnostic register holds the errata bits. Mostly bootloader
88 * does not bring up secondary cores, so that when errata bits are set
89 * in bootloader, they are set only for boot cpu. But on a SMP
90 * configuration, it should be equally done on every single core.
91 * Read the register from boot cpu here, and will replicate it into
92 * secondary cores when booting them.
93 */
94 asm("mrc p15, 0, %0, c15, c0, 1" : "=r" (g_diag_reg) : : "cc");
95 __cpuc_flush_dcache_area(&g_diag_reg, sizeof(g_diag_reg));
96 outer_clean_range(__pa(&g_diag_reg), __pa(&g_diag_reg + 1));
83} 97}
84 98
85struct smp_operations imx_smp_ops __initdata = { 99struct smp_operations imx_smp_ops __initdata = {
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index c2cae69e6d2b..f38922897563 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -528,12 +528,6 @@ void __init kirkwood_init_early(void)
528{ 528{
529 orion_time_set_base(TIMER_VIRT_BASE); 529 orion_time_set_base(TIMER_VIRT_BASE);
530 530
531 /*
532 * Some Kirkwood devices allocate their coherent buffers from atomic
533 * context. Increase size of atomic coherent pool to make sure such
534 * the allocations won't fail.
535 */
536 init_dma_coherent_pool_size(SZ_1M);
537 mvebu_mbus_init("marvell,kirkwood-mbus", 531 mvebu_mbus_init("marvell,kirkwood-mbus",
538 BRIDGE_WINS_BASE, BRIDGE_WINS_SZ, 532 BRIDGE_WINS_BASE, BRIDGE_WINS_SZ,
539 DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ); 533 DDR_WINDOW_CPU_BASE, DDR_WINDOW_CPU_SZ);
diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c
index 283abff90228..e1267d6b468f 100644
--- a/arch/arm/mach-kirkwood/ts219-setup.c
+++ b/arch/arm/mach-kirkwood/ts219-setup.c
@@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void)
124static int __init ts219_pci_init(void) 124static int __init ts219_pci_init(void)
125{ 125{
126 if (machine_is_ts219()) 126 if (machine_is_ts219())
127 kirkwood_pcie_init(KW_PCIE0); 127 kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
128 128
129 return 0; 129 return 0;
130} 130}
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index e11acbb0a46d..80a8bcacd9d5 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -15,6 +15,7 @@ config ARCH_MVEBU
15 select MVEBU_CLK_GATING 15 select MVEBU_CLK_GATING
16 select MVEBU_MBUS 16 select MVEBU_MBUS
17 select ZONE_DMA if ARM_LPAE 17 select ZONE_DMA if ARM_LPAE
18 select ARCH_REQUIRE_GPIOLIB
18 19
19if ARCH_MVEBU 20if ARCH_MVEBU
20 21
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c
index 42a4cb3087e2..1c48890bb72b 100644
--- a/arch/arm/mach-mvebu/armada-370-xp.c
+++ b/arch/arm/mach-mvebu/armada-370-xp.c
@@ -54,13 +54,6 @@ void __init armada_370_xp_init_early(void)
54 char *mbus_soc_name; 54 char *mbus_soc_name;
55 55
56 /* 56 /*
57 * Some Armada 370/XP devices allocate their coherent buffers
58 * from atomic context. Increase size of atomic coherent pool
59 * to make sure such the allocations won't fail.
60 */
61 init_dma_coherent_pool_size(SZ_1M);
62
63 /*
64 * This initialization will be replaced by a DT-based 57 * This initialization will be replaced by a DT-based
65 * initialization once the mvebu-mbus driver gains DT support. 58 * initialization once the mvebu-mbus driver gains DT support.
66 */ 59 */
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index 68ab858e27b7..a94b3a718d1a 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -345,6 +345,7 @@ static int __init omap1_system_dma_init(void)
345 dev_err(&pdev->dev, 345 dev_err(&pdev->dev,
346 "%s: Memory allocation failed for d->chan!\n", 346 "%s: Memory allocation failed for d->chan!\n",
347 __func__); 347 __func__);
348 ret = -ENOMEM;
348 goto exit_release_d; 349 goto exit_release_d;
349 } 350 }
350 351
diff --git a/arch/arm/mach-omap2/cclock33xx_data.c b/arch/arm/mach-omap2/cclock33xx_data.c
index 6ebc7803bc3e..af3544ce4f02 100644
--- a/arch/arm/mach-omap2/cclock33xx_data.c
+++ b/arch/arm/mach-omap2/cclock33xx_data.c
@@ -454,9 +454,29 @@ DEFINE_CLK_GATE(cefuse_fck, "sys_clkin_ck", &sys_clkin_ck, 0x0,
454 */ 454 */
455DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732); 455DEFINE_CLK_FIXED_FACTOR(clkdiv32k_ck, "clk_24mhz", &clk_24mhz, 0x0, 1, 732);
456 456
457DEFINE_CLK_GATE(clkdiv32k_ick, "clkdiv32k_ck", &clkdiv32k_ck, 0x0, 457static struct clk clkdiv32k_ick;
458 AM33XX_CM_PER_CLKDIV32K_CLKCTRL, AM33XX_MODULEMODE_SWCTRL_SHIFT, 458
459 0x0, NULL); 459static const char *clkdiv32k_ick_parent_names[] = {
460 "clkdiv32k_ck",
461};
462
463static const struct clk_ops clkdiv32k_ick_ops = {
464 .enable = &omap2_dflt_clk_enable,
465 .disable = &omap2_dflt_clk_disable,
466 .is_enabled = &omap2_dflt_clk_is_enabled,
467 .init = &omap2_init_clk_clkdm,
468};
469
470static struct clk_hw_omap clkdiv32k_ick_hw = {
471 .hw = {
472 .clk = &clkdiv32k_ick,
473 },
474 .enable_reg = AM33XX_CM_PER_CLKDIV32K_CLKCTRL,
475 .enable_bit = AM33XX_MODULEMODE_SWCTRL_SHIFT,
476 .clkdm_name = "clk_24mhz_clkdm",
477};
478
479DEFINE_STRUCT_CLK(clkdiv32k_ick, clkdiv32k_ick_parent_names, clkdiv32k_ick_ops);
460 480
461/* "usbotg_fck" is an additional clock and not really a modulemode */ 481/* "usbotg_fck" is an additional clock and not really a modulemode */
462DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0, 482DEFINE_CLK_GATE(usbotg_fck, "dpll_per_ck", &dpll_per_ck, 0x0,
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d25a95fe9921..7341eff63f56 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1356,13 +1356,27 @@ static void _enable_sysc(struct omap_hwmod *oh)
1356 1356
1357 clkdm = _get_clkdm(oh); 1357 clkdm = _get_clkdm(oh);
1358 if (sf & SYSC_HAS_SIDLEMODE) { 1358 if (sf & SYSC_HAS_SIDLEMODE) {
1359 if (oh->flags & HWMOD_SWSUP_SIDLE ||
1360 oh->flags & HWMOD_SWSUP_SIDLE_ACT) {
1361 idlemode = HWMOD_IDLEMODE_NO;
1362 } else {
1363 if (sf & SYSC_HAS_ENAWAKEUP)
1364 _enable_wakeup(oh, &v);
1365 if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
1366 idlemode = HWMOD_IDLEMODE_SMART_WKUP;
1367 else
1368 idlemode = HWMOD_IDLEMODE_SMART;
1369 }
1370
1371 /*
1372 * This is special handling for some IPs like
1373 * 32k sync timer. Force them to idle!
1374 */
1359 clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU); 1375 clkdm_act = (clkdm && clkdm->flags & CLKDM_ACTIVE_WITH_MPU);
1360 if (clkdm_act && !(oh->class->sysc->idlemodes & 1376 if (clkdm_act && !(oh->class->sysc->idlemodes &
1361 (SIDLE_SMART | SIDLE_SMART_WKUP))) 1377 (SIDLE_SMART | SIDLE_SMART_WKUP)))
1362 idlemode = HWMOD_IDLEMODE_FORCE; 1378 idlemode = HWMOD_IDLEMODE_FORCE;
1363 else 1379
1364 idlemode = (oh->flags & HWMOD_SWSUP_SIDLE) ?
1365 HWMOD_IDLEMODE_NO : HWMOD_IDLEMODE_SMART;
1366 _set_slave_idlemode(oh, idlemode, &v); 1380 _set_slave_idlemode(oh, idlemode, &v);
1367 } 1381 }
1368 1382
@@ -1391,10 +1405,6 @@ static void _enable_sysc(struct omap_hwmod *oh)
1391 (sf & SYSC_HAS_CLOCKACTIVITY)) 1405 (sf & SYSC_HAS_CLOCKACTIVITY))
1392 _set_clockactivity(oh, oh->class->sysc->clockact, &v); 1406 _set_clockactivity(oh, oh->class->sysc->clockact, &v);
1393 1407
1394 /* If slave is in SMARTIDLE, also enable wakeup */
1395 if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
1396 _enable_wakeup(oh, &v);
1397
1398 _write_sysconfig(v, oh); 1408 _write_sysconfig(v, oh);
1399 1409
1400 /* 1410 /*
@@ -1430,13 +1440,16 @@ static void _idle_sysc(struct omap_hwmod *oh)
1430 sf = oh->class->sysc->sysc_flags; 1440 sf = oh->class->sysc->sysc_flags;
1431 1441
1432 if (sf & SYSC_HAS_SIDLEMODE) { 1442 if (sf & SYSC_HAS_SIDLEMODE) {
1433 /* XXX What about HWMOD_IDLEMODE_SMART_WKUP? */ 1443 if (oh->flags & HWMOD_SWSUP_SIDLE) {
1434 if (oh->flags & HWMOD_SWSUP_SIDLE ||
1435 !(oh->class->sysc->idlemodes &
1436 (SIDLE_SMART | SIDLE_SMART_WKUP)))
1437 idlemode = HWMOD_IDLEMODE_FORCE; 1444 idlemode = HWMOD_IDLEMODE_FORCE;
1438 else 1445 } else {
1439 idlemode = HWMOD_IDLEMODE_SMART; 1446 if (sf & SYSC_HAS_ENAWAKEUP)
1447 _enable_wakeup(oh, &v);
1448 if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
1449 idlemode = HWMOD_IDLEMODE_SMART_WKUP;
1450 else
1451 idlemode = HWMOD_IDLEMODE_SMART;
1452 }
1440 _set_slave_idlemode(oh, idlemode, &v); 1453 _set_slave_idlemode(oh, idlemode, &v);
1441 } 1454 }
1442 1455
@@ -1455,10 +1468,6 @@ static void _idle_sysc(struct omap_hwmod *oh)
1455 _set_master_standbymode(oh, idlemode, &v); 1468 _set_master_standbymode(oh, idlemode, &v);
1456 } 1469 }
1457 1470
1458 /* If slave is in SMARTIDLE, also enable wakeup */
1459 if ((sf & SYSC_HAS_SIDLEMODE) && !(oh->flags & HWMOD_SWSUP_SIDLE))
1460 _enable_wakeup(oh, &v);
1461
1462 _write_sysconfig(v, oh); 1471 _write_sysconfig(v, oh);
1463} 1472}
1464 1473
@@ -2065,7 +2074,7 @@ static int _omap4_get_context_lost(struct omap_hwmod *oh)
2065 * do so is present in the hwmod data, then call it and pass along the 2074 * do so is present in the hwmod data, then call it and pass along the
2066 * return value; otherwise, return 0. 2075 * return value; otherwise, return 0.
2067 */ 2076 */
2068static int __init _enable_preprogram(struct omap_hwmod *oh) 2077static int _enable_preprogram(struct omap_hwmod *oh)
2069{ 2078{
2070 if (!oh->class->enable_preprogram) 2079 if (!oh->class->enable_preprogram)
2071 return 0; 2080 return 0;
@@ -2246,42 +2255,6 @@ static int _idle(struct omap_hwmod *oh)
2246} 2255}
2247 2256
2248/** 2257/**
2249 * omap_hwmod_set_ocp_autoidle - set the hwmod's OCP autoidle bit
2250 * @oh: struct omap_hwmod *
2251 * @autoidle: desired AUTOIDLE bitfield value (0 or 1)
2252 *
2253 * Sets the IP block's OCP autoidle bit in hardware, and updates our
2254 * local copy. Intended to be used by drivers that require
2255 * direct manipulation of the AUTOIDLE bits.
2256 * Returns -EINVAL if @oh is null or is not in the ENABLED state, or passes
2257 * along the return value from _set_module_autoidle().
2258 *
2259 * Any users of this function should be scrutinized carefully.
2260 */
2261int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle)
2262{
2263 u32 v;
2264 int retval = 0;
2265 unsigned long flags;
2266
2267 if (!oh || oh->_state != _HWMOD_STATE_ENABLED)
2268 return -EINVAL;
2269
2270 spin_lock_irqsave(&oh->_lock, flags);
2271
2272 v = oh->_sysc_cache;
2273
2274 retval = _set_module_autoidle(oh, autoidle, &v);
2275
2276 if (!retval)
2277 _write_sysconfig(v, oh);
2278
2279 spin_unlock_irqrestore(&oh->_lock, flags);
2280
2281 return retval;
2282}
2283
2284/**
2285 * _shutdown - shutdown an omap_hwmod 2258 * _shutdown - shutdown an omap_hwmod
2286 * @oh: struct omap_hwmod * 2259 * @oh: struct omap_hwmod *
2287 * 2260 *
@@ -3180,38 +3153,6 @@ error:
3180} 3153}
3181 3154
3182/** 3155/**
3183 * omap_hwmod_set_slave_idlemode - set the hwmod's OCP slave idlemode
3184 * @oh: struct omap_hwmod *
3185 * @idlemode: SIDLEMODE field bits (shifted to bit 0)
3186 *
3187 * Sets the IP block's OCP slave idlemode in hardware, and updates our
3188 * local copy. Intended to be used by drivers that have some erratum
3189 * that requires direct manipulation of the SIDLEMODE bits. Returns
3190 * -EINVAL if @oh is null, or passes along the return value from
3191 * _set_slave_idlemode().
3192 *
3193 * XXX Does this function have any current users? If not, we should
3194 * remove it; it is better to let the rest of the hwmod code handle this.
3195 * Any users of this function should be scrutinized carefully.
3196 */
3197int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode)
3198{
3199 u32 v;
3200 int retval = 0;
3201
3202 if (!oh)
3203 return -EINVAL;
3204
3205 v = oh->_sysc_cache;
3206
3207 retval = _set_slave_idlemode(oh, idlemode, &v);
3208 if (!retval)
3209 _write_sysconfig(v, oh);
3210
3211 return retval;
3212}
3213
3214/**
3215 * omap_hwmod_lookup - look up a registered omap_hwmod by name 3156 * omap_hwmod_lookup - look up a registered omap_hwmod by name
3216 * @name: name of the omap_hwmod to look up 3157 * @name: name of the omap_hwmod to look up
3217 * 3158 *
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index fe5962921f07..0c898f58ac9b 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -463,6 +463,9 @@ struct omap_hwmod_omap4_prcm {
463 * is kept in force-standby mode. Failing to do so causes PM problems 463 * is kept in force-standby mode. Failing to do so causes PM problems
464 * with musb on OMAP3630 at least. Note that musb has a dedicated register 464 * with musb on OMAP3630 at least. Note that musb has a dedicated register
465 * to control MSTANDBY signal when MIDLEMODE is set to force-standby. 465 * to control MSTANDBY signal when MIDLEMODE is set to force-standby.
466 * HWMOD_SWSUP_SIDLE_ACT: omap_hwmod code should manually bring the module
467 * out of idle, but rely on smart-idle to the put it back in idle,
468 * so the wakeups are still functional (Only known case for now is UART)
466 */ 469 */
467#define HWMOD_SWSUP_SIDLE (1 << 0) 470#define HWMOD_SWSUP_SIDLE (1 << 0)
468#define HWMOD_SWSUP_MSTANDBY (1 << 1) 471#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -476,6 +479,7 @@ struct omap_hwmod_omap4_prcm {
476#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9) 479#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
477#define HWMOD_BLOCK_WFI (1 << 10) 480#define HWMOD_BLOCK_WFI (1 << 10)
478#define HWMOD_FORCE_MSTANDBY (1 << 11) 481#define HWMOD_FORCE_MSTANDBY (1 << 11)
482#define HWMOD_SWSUP_SIDLE_ACT (1 << 12)
479 483
480/* 484/*
481 * omap_hwmod._int_flags definitions 485 * omap_hwmod._int_flags definitions
@@ -641,9 +645,6 @@ int omap_hwmod_read_hardreset(struct omap_hwmod *oh, const char *name);
641int omap_hwmod_enable_clocks(struct omap_hwmod *oh); 645int omap_hwmod_enable_clocks(struct omap_hwmod *oh);
642int omap_hwmod_disable_clocks(struct omap_hwmod *oh); 646int omap_hwmod_disable_clocks(struct omap_hwmod *oh);
643 647
644int omap_hwmod_set_slave_idlemode(struct omap_hwmod *oh, u8 idlemode);
645int omap_hwmod_set_ocp_autoidle(struct omap_hwmod *oh, u8 autoidle);
646
647int omap_hwmod_reset(struct omap_hwmod *oh); 648int omap_hwmod_reset(struct omap_hwmod *oh);
648void omap_hwmod_ocp_barrier(struct omap_hwmod *oh); 649void omap_hwmod_ocp_barrier(struct omap_hwmod *oh);
649 650
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index c8c64b3e1acc..d05fc7b54567 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -512,6 +512,7 @@ struct omap_hwmod omap2xxx_uart1_hwmod = {
512 .mpu_irqs = omap2_uart1_mpu_irqs, 512 .mpu_irqs = omap2_uart1_mpu_irqs,
513 .sdma_reqs = omap2_uart1_sdma_reqs, 513 .sdma_reqs = omap2_uart1_sdma_reqs,
514 .main_clk = "uart1_fck", 514 .main_clk = "uart1_fck",
515 .flags = HWMOD_SWSUP_SIDLE_ACT,
515 .prcm = { 516 .prcm = {
516 .omap2 = { 517 .omap2 = {
517 .module_offs = CORE_MOD, 518 .module_offs = CORE_MOD,
@@ -531,6 +532,7 @@ struct omap_hwmod omap2xxx_uart2_hwmod = {
531 .mpu_irqs = omap2_uart2_mpu_irqs, 532 .mpu_irqs = omap2_uart2_mpu_irqs,
532 .sdma_reqs = omap2_uart2_sdma_reqs, 533 .sdma_reqs = omap2_uart2_sdma_reqs,
533 .main_clk = "uart2_fck", 534 .main_clk = "uart2_fck",
535 .flags = HWMOD_SWSUP_SIDLE_ACT,
534 .prcm = { 536 .prcm = {
535 .omap2 = { 537 .omap2 = {
536 .module_offs = CORE_MOD, 538 .module_offs = CORE_MOD,
@@ -550,6 +552,7 @@ struct omap_hwmod omap2xxx_uart3_hwmod = {
550 .mpu_irqs = omap2_uart3_mpu_irqs, 552 .mpu_irqs = omap2_uart3_mpu_irqs,
551 .sdma_reqs = omap2_uart3_sdma_reqs, 553 .sdma_reqs = omap2_uart3_sdma_reqs,
552 .main_clk = "uart3_fck", 554 .main_clk = "uart3_fck",
555 .flags = HWMOD_SWSUP_SIDLE_ACT,
553 .prcm = { 556 .prcm = {
554 .omap2 = { 557 .omap2 = {
555 .module_offs = CORE_MOD, 558 .module_offs = CORE_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 01d8f324450a..075f7cc51026 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -1995,6 +1995,7 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
1995 .name = "uart1", 1995 .name = "uart1",
1996 .class = &uart_class, 1996 .class = &uart_class,
1997 .clkdm_name = "l4_wkup_clkdm", 1997 .clkdm_name = "l4_wkup_clkdm",
1998 .flags = HWMOD_SWSUP_SIDLE_ACT,
1998 .mpu_irqs = am33xx_uart1_irqs, 1999 .mpu_irqs = am33xx_uart1_irqs,
1999 .sdma_reqs = uart1_edma_reqs, 2000 .sdma_reqs = uart1_edma_reqs,
2000 .main_clk = "dpll_per_m2_div4_wkupdm_ck", 2001 .main_clk = "dpll_per_m2_div4_wkupdm_ck",
@@ -2015,6 +2016,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
2015 .name = "uart2", 2016 .name = "uart2",
2016 .class = &uart_class, 2017 .class = &uart_class,
2017 .clkdm_name = "l4ls_clkdm", 2018 .clkdm_name = "l4ls_clkdm",
2019 .flags = HWMOD_SWSUP_SIDLE_ACT,
2018 .mpu_irqs = am33xx_uart2_irqs, 2020 .mpu_irqs = am33xx_uart2_irqs,
2019 .sdma_reqs = uart1_edma_reqs, 2021 .sdma_reqs = uart1_edma_reqs,
2020 .main_clk = "dpll_per_m2_div4_ck", 2022 .main_clk = "dpll_per_m2_div4_ck",
@@ -2042,6 +2044,7 @@ static struct omap_hwmod am33xx_uart3_hwmod = {
2042 .name = "uart3", 2044 .name = "uart3",
2043 .class = &uart_class, 2045 .class = &uart_class,
2044 .clkdm_name = "l4ls_clkdm", 2046 .clkdm_name = "l4ls_clkdm",
2047 .flags = HWMOD_SWSUP_SIDLE_ACT,
2045 .mpu_irqs = am33xx_uart3_irqs, 2048 .mpu_irqs = am33xx_uart3_irqs,
2046 .sdma_reqs = uart3_edma_reqs, 2049 .sdma_reqs = uart3_edma_reqs,
2047 .main_clk = "dpll_per_m2_div4_ck", 2050 .main_clk = "dpll_per_m2_div4_ck",
@@ -2062,6 +2065,7 @@ static struct omap_hwmod am33xx_uart4_hwmod = {
2062 .name = "uart4", 2065 .name = "uart4",
2063 .class = &uart_class, 2066 .class = &uart_class,
2064 .clkdm_name = "l4ls_clkdm", 2067 .clkdm_name = "l4ls_clkdm",
2068 .flags = HWMOD_SWSUP_SIDLE_ACT,
2065 .mpu_irqs = am33xx_uart4_irqs, 2069 .mpu_irqs = am33xx_uart4_irqs,
2066 .sdma_reqs = uart1_edma_reqs, 2070 .sdma_reqs = uart1_edma_reqs,
2067 .main_clk = "dpll_per_m2_div4_ck", 2071 .main_clk = "dpll_per_m2_div4_ck",
@@ -2082,6 +2086,7 @@ static struct omap_hwmod am33xx_uart5_hwmod = {
2082 .name = "uart5", 2086 .name = "uart5",
2083 .class = &uart_class, 2087 .class = &uart_class,
2084 .clkdm_name = "l4ls_clkdm", 2088 .clkdm_name = "l4ls_clkdm",
2089 .flags = HWMOD_SWSUP_SIDLE_ACT,
2085 .mpu_irqs = am33xx_uart5_irqs, 2090 .mpu_irqs = am33xx_uart5_irqs,
2086 .sdma_reqs = uart1_edma_reqs, 2091 .sdma_reqs = uart1_edma_reqs,
2087 .main_clk = "dpll_per_m2_div4_ck", 2092 .main_clk = "dpll_per_m2_div4_ck",
@@ -2102,6 +2107,7 @@ static struct omap_hwmod am33xx_uart6_hwmod = {
2102 .name = "uart6", 2107 .name = "uart6",
2103 .class = &uart_class, 2108 .class = &uart_class,
2104 .clkdm_name = "l4ls_clkdm", 2109 .clkdm_name = "l4ls_clkdm",
2110 .flags = HWMOD_SWSUP_SIDLE_ACT,
2105 .mpu_irqs = am33xx_uart6_irqs, 2111 .mpu_irqs = am33xx_uart6_irqs,
2106 .sdma_reqs = uart1_edma_reqs, 2112 .sdma_reqs = uart1_edma_reqs,
2107 .main_clk = "dpll_per_m2_div4_ck", 2113 .main_clk = "dpll_per_m2_div4_ck",
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 4083606ea1da..31c7126eb3bb 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -490,6 +490,7 @@ static struct omap_hwmod omap3xxx_uart1_hwmod = {
490 .mpu_irqs = omap2_uart1_mpu_irqs, 490 .mpu_irqs = omap2_uart1_mpu_irqs,
491 .sdma_reqs = omap2_uart1_sdma_reqs, 491 .sdma_reqs = omap2_uart1_sdma_reqs,
492 .main_clk = "uart1_fck", 492 .main_clk = "uart1_fck",
493 .flags = HWMOD_SWSUP_SIDLE_ACT,
493 .prcm = { 494 .prcm = {
494 .omap2 = { 495 .omap2 = {
495 .module_offs = CORE_MOD, 496 .module_offs = CORE_MOD,
@@ -508,6 +509,7 @@ static struct omap_hwmod omap3xxx_uart2_hwmod = {
508 .mpu_irqs = omap2_uart2_mpu_irqs, 509 .mpu_irqs = omap2_uart2_mpu_irqs,
509 .sdma_reqs = omap2_uart2_sdma_reqs, 510 .sdma_reqs = omap2_uart2_sdma_reqs,
510 .main_clk = "uart2_fck", 511 .main_clk = "uart2_fck",
512 .flags = HWMOD_SWSUP_SIDLE_ACT,
511 .prcm = { 513 .prcm = {
512 .omap2 = { 514 .omap2 = {
513 .module_offs = CORE_MOD, 515 .module_offs = CORE_MOD,
@@ -526,6 +528,7 @@ static struct omap_hwmod omap3xxx_uart3_hwmod = {
526 .mpu_irqs = omap2_uart3_mpu_irqs, 528 .mpu_irqs = omap2_uart3_mpu_irqs,
527 .sdma_reqs = omap2_uart3_sdma_reqs, 529 .sdma_reqs = omap2_uart3_sdma_reqs,
528 .main_clk = "uart3_fck", 530 .main_clk = "uart3_fck",
531 .flags = HWMOD_SWSUP_SIDLE_ACT,
529 .prcm = { 532 .prcm = {
530 .omap2 = { 533 .omap2 = {
531 .module_offs = OMAP3430_PER_MOD, 534 .module_offs = OMAP3430_PER_MOD,
@@ -555,6 +558,7 @@ static struct omap_hwmod omap36xx_uart4_hwmod = {
555 .mpu_irqs = uart4_mpu_irqs, 558 .mpu_irqs = uart4_mpu_irqs,
556 .sdma_reqs = uart4_sdma_reqs, 559 .sdma_reqs = uart4_sdma_reqs,
557 .main_clk = "uart4_fck", 560 .main_clk = "uart4_fck",
561 .flags = HWMOD_SWSUP_SIDLE_ACT,
558 .prcm = { 562 .prcm = {
559 .omap2 = { 563 .omap2 = {
560 .module_offs = OMAP3430_PER_MOD, 564 .module_offs = OMAP3430_PER_MOD,
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index eaba9dc91a0d..848b6dc67590 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -3434,6 +3434,7 @@ static struct omap_hwmod omap44xx_uart1_hwmod = {
3434 .name = "uart1", 3434 .name = "uart1",
3435 .class = &omap44xx_uart_hwmod_class, 3435 .class = &omap44xx_uart_hwmod_class,
3436 .clkdm_name = "l4_per_clkdm", 3436 .clkdm_name = "l4_per_clkdm",
3437 .flags = HWMOD_SWSUP_SIDLE_ACT,
3437 .mpu_irqs = omap44xx_uart1_irqs, 3438 .mpu_irqs = omap44xx_uart1_irqs,
3438 .sdma_reqs = omap44xx_uart1_sdma_reqs, 3439 .sdma_reqs = omap44xx_uart1_sdma_reqs,
3439 .main_clk = "func_48m_fclk", 3440 .main_clk = "func_48m_fclk",
@@ -3462,6 +3463,7 @@ static struct omap_hwmod omap44xx_uart2_hwmod = {
3462 .name = "uart2", 3463 .name = "uart2",
3463 .class = &omap44xx_uart_hwmod_class, 3464 .class = &omap44xx_uart_hwmod_class,
3464 .clkdm_name = "l4_per_clkdm", 3465 .clkdm_name = "l4_per_clkdm",
3466 .flags = HWMOD_SWSUP_SIDLE_ACT,
3465 .mpu_irqs = omap44xx_uart2_irqs, 3467 .mpu_irqs = omap44xx_uart2_irqs,
3466 .sdma_reqs = omap44xx_uart2_sdma_reqs, 3468 .sdma_reqs = omap44xx_uart2_sdma_reqs,
3467 .main_clk = "func_48m_fclk", 3469 .main_clk = "func_48m_fclk",
@@ -3490,7 +3492,8 @@ static struct omap_hwmod omap44xx_uart3_hwmod = {
3490 .name = "uart3", 3492 .name = "uart3",
3491 .class = &omap44xx_uart_hwmod_class, 3493 .class = &omap44xx_uart_hwmod_class,
3492 .clkdm_name = "l4_per_clkdm", 3494 .clkdm_name = "l4_per_clkdm",
3493 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET, 3495 .flags = HWMOD_INIT_NO_IDLE | HWMOD_INIT_NO_RESET |
3496 HWMOD_SWSUP_SIDLE_ACT,
3494 .mpu_irqs = omap44xx_uart3_irqs, 3497 .mpu_irqs = omap44xx_uart3_irqs,
3495 .sdma_reqs = omap44xx_uart3_sdma_reqs, 3498 .sdma_reqs = omap44xx_uart3_sdma_reqs,
3496 .main_clk = "func_48m_fclk", 3499 .main_clk = "func_48m_fclk",
@@ -3519,6 +3522,7 @@ static struct omap_hwmod omap44xx_uart4_hwmod = {
3519 .name = "uart4", 3522 .name = "uart4",
3520 .class = &omap44xx_uart_hwmod_class, 3523 .class = &omap44xx_uart_hwmod_class,
3521 .clkdm_name = "l4_per_clkdm", 3524 .clkdm_name = "l4_per_clkdm",
3525 .flags = HWMOD_SWSUP_SIDLE_ACT,
3522 .mpu_irqs = omap44xx_uart4_irqs, 3526 .mpu_irqs = omap44xx_uart4_irqs,
3523 .sdma_reqs = omap44xx_uart4_sdma_reqs, 3527 .sdma_reqs = omap44xx_uart4_sdma_reqs,
3524 .main_clk = "func_48m_fclk", 3528 .main_clk = "func_48m_fclk",
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 8396b5b7e912..f6601563aa69 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -95,38 +95,9 @@ static void omap_uart_enable_wakeup(struct device *dev, bool enable)
95 omap_hwmod_disable_wakeup(od->hwmods[0]); 95 omap_hwmod_disable_wakeup(od->hwmods[0]);
96} 96}
97 97
98/*
99 * Errata i291: [UART]:Cannot Acknowledge Idle Requests
100 * in Smartidle Mode When Configured for DMA Operations.
101 * WA: configure uart in force idle mode.
102 */
103static void omap_uart_set_noidle(struct device *dev)
104{
105 struct platform_device *pdev = to_platform_device(dev);
106 struct omap_device *od = to_omap_device(pdev);
107
108 omap_hwmod_set_slave_idlemode(od->hwmods[0], HWMOD_IDLEMODE_NO);
109}
110
111static void omap_uart_set_smartidle(struct device *dev)
112{
113 struct platform_device *pdev = to_platform_device(dev);
114 struct omap_device *od = to_omap_device(pdev);
115 u8 idlemode;
116
117 if (od->hwmods[0]->class->sysc->idlemodes & SIDLE_SMART_WKUP)
118 idlemode = HWMOD_IDLEMODE_SMART_WKUP;
119 else
120 idlemode = HWMOD_IDLEMODE_SMART;
121
122 omap_hwmod_set_slave_idlemode(od->hwmods[0], idlemode);
123}
124
125#else 98#else
126static void omap_uart_enable_wakeup(struct device *dev, bool enable) 99static void omap_uart_enable_wakeup(struct device *dev, bool enable)
127{} 100{}
128static void omap_uart_set_noidle(struct device *dev) {}
129static void omap_uart_set_smartidle(struct device *dev) {}
130#endif /* CONFIG_PM */ 101#endif /* CONFIG_PM */
131 102
132#ifdef CONFIG_OMAP_MUX 103#ifdef CONFIG_OMAP_MUX
@@ -299,8 +270,6 @@ void __init omap_serial_init_port(struct omap_board_data *bdata,
299 omap_up.uartclk = OMAP24XX_BASE_BAUD * 16; 270 omap_up.uartclk = OMAP24XX_BASE_BAUD * 16;
300 omap_up.flags = UPF_BOOT_AUTOCONF; 271 omap_up.flags = UPF_BOOT_AUTOCONF;
301 omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count; 272 omap_up.get_context_loss_count = omap_pm_get_dev_context_loss_count;
302 omap_up.set_forceidle = omap_uart_set_smartidle;
303 omap_up.set_noidle = omap_uart_set_noidle;
304 omap_up.enable_wakeup = omap_uart_enable_wakeup; 273 omap_up.enable_wakeup = omap_uart_enable_wakeup;
305 omap_up.dma_rx_buf_size = info->dma_rx_buf_size; 274 omap_up.dma_rx_buf_size = info->dma_rx_buf_size;
306 omap_up.dma_rx_timeout = info->dma_rx_timeout; 275 omap_up.dma_rx_timeout = info->dma_rx_timeout;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index b97fd672e89d..f8a6db9239bf 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -199,13 +199,6 @@ void __init orion5x_init_early(void)
199 199
200 orion_time_set_base(TIMER_VIRT_BASE); 200 orion_time_set_base(TIMER_VIRT_BASE);
201 201
202 /*
203 * Some Orion5x devices allocate their coherent buffers from atomic
204 * context. Increase size of atomic coherent pool to make sure such
205 * the allocations won't fail.
206 */
207 init_dma_coherent_pool_size(SZ_1M);
208
209 /* Initialize the MBUS driver */ 202 /* Initialize the MBUS driver */
210 orion5x_pcie_id(&dev, &rev); 203 orion5x_pcie_id(&dev, &rev);
211 if (dev == MV88F5281_DEV_ID) 204 if (dev == MV88F5281_DEV_ID)
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index 91052855cc12..b9594e911ce7 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -212,8 +212,8 @@ static struct platform_device *marzen_devices[] __initdata = {
212static struct usb_phy *phy; 212static struct usb_phy *phy;
213static int usb_power_on(struct platform_device *pdev) 213static int usb_power_on(struct platform_device *pdev)
214{ 214{
215 if (!phy) 215 if (IS_ERR(phy))
216 return -EIO; 216 return PTR_ERR(phy);
217 217
218 pm_runtime_enable(&pdev->dev); 218 pm_runtime_enable(&pdev->dev);
219 pm_runtime_get_sync(&pdev->dev); 219 pm_runtime_get_sync(&pdev->dev);
@@ -225,7 +225,7 @@ static int usb_power_on(struct platform_device *pdev)
225 225
226static void usb_power_off(struct platform_device *pdev) 226static void usb_power_off(struct platform_device *pdev)
227{ 227{
228 if (!phy) 228 if (IS_ERR(phy))
229 return; 229 return;
230 230
231 usb_phy_shutdown(phy); 231 usb_phy_shutdown(phy);
diff --git a/arch/arm/mach-sunxi/Kconfig b/arch/arm/mach-sunxi/Kconfig
index d259c782d742..5b045e302b43 100644
--- a/arch/arm/mach-sunxi/Kconfig
+++ b/arch/arm/mach-sunxi/Kconfig
@@ -1,5 +1,6 @@
1config ARCH_SUNXI 1config ARCH_SUNXI
2 bool "Allwinner A1X SOCs" if ARCH_MULTI_V7 2 bool "Allwinner A1X SOCs" if ARCH_MULTI_V7
3 select ARCH_REQUIRE_GPIOLIB
3 select CLKSRC_MMIO 4 select CLKSRC_MMIO
4 select CLKSRC_OF 5 select CLKSRC_OF
5 select COMMON_CLK 6 select COMMON_CLK
diff --git a/arch/arm/mach-tegra/tegra2_emc.c b/arch/arm/mach-tegra/tegra2_emc.c
index 9e8bdfa2b369..31e69a019bdd 100644
--- a/arch/arm/mach-tegra/tegra2_emc.c
+++ b/arch/arm/mach-tegra/tegra2_emc.c
@@ -307,11 +307,6 @@ static int tegra_emc_probe(struct platform_device *pdev)
307 } 307 }
308 308
309 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 309 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
310 if (!res) {
311 dev_err(&pdev->dev, "missing register base\n");
312 return -ENOMEM;
313 }
314
315 emc_regbase = devm_ioremap_resource(&pdev->dev, res); 310 emc_regbase = devm_ioremap_resource(&pdev->dev, res);
316 if (IS_ERR(emc_regbase)) 311 if (IS_ERR(emc_regbase))
317 return PTR_ERR(emc_regbase); 312 return PTR_ERR(emc_regbase);
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 6a4387e39df8..b19b07204aaf 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -51,6 +51,7 @@ config MACH_MOP500
51 bool "U8500 Development platform, MOP500 versions" 51 bool "U8500 Development platform, MOP500 versions"
52 select I2C 52 select I2C
53 select I2C_NOMADIK 53 select I2C_NOMADIK
54 select REGULATOR
54 select REGULATOR_FIXED_VOLTAGE 55 select REGULATOR_FIXED_VOLTAGE
55 select SOC_BUS 56 select SOC_BUS
56 select UX500_SOC_DB8500 57 select UX500_SOC_DB8500
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index 3cd555ac6d0a..78389de94dde 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -623,7 +623,7 @@ static void __init mop500_init_machine(void)
623 sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL; 623 sdi0_reg_info.gpios[0].gpio = GPIO_SDMMC_1V8_3V_SEL;
624 624
625 mop500_pinmaps_init(); 625 mop500_pinmaps_init();
626 parent = u8500_init_devices(&ab8500_platdata); 626 parent = u8500_init_devices();
627 627
628 for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) 628 for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
629 mop500_platform_devs[i]->dev.parent = parent; 629 mop500_platform_devs[i]->dev.parent = parent;
@@ -660,7 +660,7 @@ static void __init snowball_init_machine(void)
660 sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO; 660 sdi0_reg_info.gpios[0].gpio = SNOWBALL_SDMMC_1V8_3V_GPIO;
661 661
662 snowball_pinmaps_init(); 662 snowball_pinmaps_init();
663 parent = u8500_init_devices(&ab8500_platdata); 663 parent = u8500_init_devices();
664 664
665 for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++) 665 for (i = 0; i < ARRAY_SIZE(snowball_platform_devs); i++)
666 snowball_platform_devs[i]->dev.parent = parent; 666 snowball_platform_devs[i]->dev.parent = parent;
@@ -698,7 +698,7 @@ static void __init hrefv60_init_machine(void)
698 sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO; 698 sdi0_reg_info.gpios[0].gpio = HREFV60_SDMMC_1V8_3V_GPIO;
699 699
700 hrefv60_pinmaps_init(); 700 hrefv60_pinmaps_init();
701 parent = u8500_init_devices(&ab8500_platdata); 701 parent = u8500_init_devices();
702 702
703 for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) 703 for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
704 mop500_platform_devs[i]->dev.parent = parent; 704 mop500_platform_devs[i]->dev.parent = parent;
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index e90b5ab23b6d..46cca52890bc 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -206,7 +206,7 @@ static struct device * __init db8500_soc_device_init(void)
206/* 206/*
207 * This function is called from the board init 207 * This function is called from the board init
208 */ 208 */
209struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500) 209struct device * __init u8500_init_devices(void)
210{ 210{
211 struct device *parent; 211 struct device *parent;
212 int i; 212 int i;
@@ -220,8 +220,6 @@ struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500)
220 for (i = 0; i < ARRAY_SIZE(platform_devs); i++) 220 for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
221 platform_devs[i]->dev.parent = parent; 221 platform_devs[i]->dev.parent = parent;
222 222
223 db8500_prcmu_device.dev.platform_data = ab8500;
224
225 platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs)); 223 platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
226 224
227 return parent; 225 return parent;
@@ -278,7 +276,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
278 OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL), 276 OF_DEV_AUXDATA("st,nomadik-i2c", 0x8012a000, "nmk-i2c.4", NULL),
279 OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu", 277 OF_DEV_AUXDATA("stericsson,db8500-prcmu", 0x80157000, "db8500-prcmu",
280 &db8500_prcmu_pdata), 278 &db8500_prcmu_pdata),
281 OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x", NULL), 279 OF_DEV_AUXDATA("smsc,lan9115", 0x50000000, "smsc911x.0", NULL),
282 /* Requires device name bindings. */ 280 /* Requires device name bindings. */
283 OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE, 281 OF_DEV_AUXDATA("stericsson,nmk-pinctrl", U8500_PRCMU_BASE,
284 "pinctrl-db8500", NULL), 282 "pinctrl-db8500", NULL),
diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
index bddce2b49372..cad3ca86c540 100644
--- a/arch/arm/mach-ux500/setup.h
+++ b/arch/arm/mach-ux500/setup.h
@@ -18,7 +18,7 @@
18void __init ux500_map_io(void); 18void __init ux500_map_io(void);
19extern void __init u8500_map_io(void); 19extern void __init u8500_map_io(void);
20 20
21extern struct device * __init u8500_init_devices(struct ab8500_platform_data *ab8500); 21extern struct device * __init u8500_init_devices(void);
22 22
23extern void __init ux500_init_irq(void); 23extern void __init ux500_init_irq(void);
24extern void __init ux500_init_late(void); 24extern void __init ux500_init_late(void);
diff --git a/arch/arm/mach-vt8500/vt8500.c b/arch/arm/mach-vt8500/vt8500.c
index 1dd281efc020..f5c33df7a597 100644
--- a/arch/arm/mach-vt8500/vt8500.c
+++ b/arch/arm/mach-vt8500/vt8500.c
@@ -173,6 +173,7 @@ static const char * const vt8500_dt_compat[] = {
173 "wm,wm8505", 173 "wm,wm8505",
174 "wm,wm8750", 174 "wm,wm8750",
175 "wm,wm8850", 175 "wm,wm8850",
176 NULL
176}; 177};
177 178
178DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)") 179DT_MACHINE_START(WMT_DT, "VIA/Wondermedia SoC (Device Tree Support)")
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 251f827271e9..c019b7aaf776 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -383,7 +383,7 @@ static struct resource orion_ge10_shared_resources[] = {
383 383
384static struct platform_device orion_ge10_shared = { 384static struct platform_device orion_ge10_shared = {
385 .name = MV643XX_ETH_SHARED_NAME, 385 .name = MV643XX_ETH_SHARED_NAME,
386 .id = 1, 386 .id = 2,
387 .dev = { 387 .dev = {
388 .platform_data = &orion_ge10_shared_data, 388 .platform_data = &orion_ge10_shared_data,
389 }, 389 },
@@ -398,8 +398,8 @@ static struct resource orion_ge10_resources[] = {
398 398
399static struct platform_device orion_ge10 = { 399static struct platform_device orion_ge10 = {
400 .name = MV643XX_ETH_NAME, 400 .name = MV643XX_ETH_NAME,
401 .id = 1, 401 .id = 2,
402 .num_resources = 2, 402 .num_resources = 1,
403 .resource = orion_ge10_resources, 403 .resource = orion_ge10_resources,
404 .dev = { 404 .dev = {
405 .coherent_dma_mask = DMA_BIT_MASK(32), 405 .coherent_dma_mask = DMA_BIT_MASK(32),
@@ -432,7 +432,7 @@ static struct resource orion_ge11_shared_resources[] = {
432 432
433static struct platform_device orion_ge11_shared = { 433static struct platform_device orion_ge11_shared = {
434 .name = MV643XX_ETH_SHARED_NAME, 434 .name = MV643XX_ETH_SHARED_NAME,
435 .id = 1, 435 .id = 3,
436 .dev = { 436 .dev = {
437 .platform_data = &orion_ge11_shared_data, 437 .platform_data = &orion_ge11_shared_data,
438 }, 438 },
@@ -447,8 +447,8 @@ static struct resource orion_ge11_resources[] = {
447 447
448static struct platform_device orion_ge11 = { 448static struct platform_device orion_ge11 = {
449 .name = MV643XX_ETH_NAME, 449 .name = MV643XX_ETH_NAME,
450 .id = 1, 450 .id = 3,
451 .num_resources = 2, 451 .num_resources = 1,
452 .resource = orion_ge11_resources, 452 .resource = orion_ge11_resources,
453 .dev = { 453 .dev = {
454 .coherent_dma_mask = DMA_BIT_MASK(32), 454 .coherent_dma_mask = DMA_BIT_MASK(32),
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index e06fc5fefa14..d9a24f605a2b 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -10,6 +10,7 @@
10 10
11#ifndef __PLAT_COMMON_H 11#ifndef __PLAT_COMMON_H
12#include <linux/mv643xx_eth.h> 12#include <linux/mv643xx_eth.h>
13#include <linux/platform_data/usb-ehci-orion.h>
13 14
14struct dsa_platform_data; 15struct dsa_platform_data;
15struct mv_sata_platform_data; 16struct mv_sata_platform_data;
diff --git a/arch/arm/plat-samsung/adc.c b/arch/arm/plat-samsung/adc.c
index ca07cb1b155a..79690f2f6d3f 100644
--- a/arch/arm/plat-samsung/adc.c
+++ b/arch/arm/plat-samsung/adc.c
@@ -381,11 +381,6 @@ static int s3c_adc_probe(struct platform_device *pdev)
381 } 381 }
382 382
383 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 383 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
384 if (!regs) {
385 dev_err(dev, "failed to find registers\n");
386 return -ENXIO;
387 }
388
389 adc->regs = devm_ioremap_resource(dev, regs); 384 adc->regs = devm_ioremap_resource(dev, regs);
390 if (IS_ERR(adc->regs)) 385 if (IS_ERR(adc->regs))
391 return PTR_ERR(adc->regs); 386 return PTR_ERR(adc->regs);
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S
index 323ce1a62bbf..46e17492fd1f 100644
--- a/arch/arm/vfp/entry.S
+++ b/arch/arm/vfp/entry.S
@@ -60,7 +60,7 @@ ENTRY(vfp_testing_entry)
60 str r11, [r10, #TI_PREEMPT] 60 str r11, [r10, #TI_PREEMPT]
61#endif 61#endif
62 ldr r0, VFP_arch_address 62 ldr r0, VFP_arch_address
63 str r5, [r0] @ known non-zero value 63 str r0, [r0] @ set to non-zero value
64 mov pc, r9 @ we have handled the fault 64 mov pc, r9 @ we have handled the fault
65ENDPROC(vfp_testing_entry) 65ENDPROC(vfp_testing_entry)
66 66
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index d30042e39974..13609e01f4b7 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -152,11 +152,12 @@ int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
152} 152}
153EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 153EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
154 154
155static int __init xen_secondary_init(unsigned int cpu) 155static void __init xen_percpu_init(void *unused)
156{ 156{
157 struct vcpu_register_vcpu_info info; 157 struct vcpu_register_vcpu_info info;
158 struct vcpu_info *vcpup; 158 struct vcpu_info *vcpup;
159 int err; 159 int err;
160 int cpu = get_cpu();
160 161
161 pr_info("Xen: initializing cpu%d\n", cpu); 162 pr_info("Xen: initializing cpu%d\n", cpu);
162 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); 163 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
@@ -165,14 +166,10 @@ static int __init xen_secondary_init(unsigned int cpu)
165 info.offset = offset_in_page(vcpup); 166 info.offset = offset_in_page(vcpup);
166 167
167 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 168 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
168 if (err) { 169 BUG_ON(err);
169 pr_debug("register_vcpu_info failed: err=%d\n", err); 170 per_cpu(xen_vcpu, cpu) = vcpup;
170 } else { 171
171 /* This cpu is using the registered vcpu info, even if 172 enable_percpu_irq(xen_events_irq, 0);
172 later ones fail to. */
173 per_cpu(xen_vcpu, cpu) = vcpup;
174 }
175 return 0;
176} 173}
177 174
178static void xen_restart(char str, const char *cmd) 175static void xen_restart(char str, const char *cmd)
@@ -208,7 +205,6 @@ static int __init xen_guest_init(void)
208 const char *version = NULL; 205 const char *version = NULL;
209 const char *xen_prefix = "xen,xen-"; 206 const char *xen_prefix = "xen,xen-";
210 struct resource res; 207 struct resource res;
211 int i;
212 208
213 node = of_find_compatible_node(NULL, NULL, "xen,xen"); 209 node = of_find_compatible_node(NULL, NULL, "xen,xen");
214 if (!node) { 210 if (!node) {
@@ -265,19 +261,23 @@ static int __init xen_guest_init(void)
265 sizeof(struct vcpu_info)); 261 sizeof(struct vcpu_info));
266 if (xen_vcpu_info == NULL) 262 if (xen_vcpu_info == NULL)
267 return -ENOMEM; 263 return -ENOMEM;
268 for_each_online_cpu(i)
269 xen_secondary_init(i);
270 264
271 gnttab_init(); 265 gnttab_init();
272 if (!xen_initial_domain()) 266 if (!xen_initial_domain())
273 xenbus_probe(NULL); 267 xenbus_probe(NULL);
274 268
269 return 0;
270}
271core_initcall(xen_guest_init);
272
273static int __init xen_pm_init(void)
274{
275 pm_power_off = xen_power_off; 275 pm_power_off = xen_power_off;
276 arm_pm_restart = xen_restart; 276 arm_pm_restart = xen_restart;
277 277
278 return 0; 278 return 0;
279} 279}
280core_initcall(xen_guest_init); 280subsys_initcall(xen_pm_init);
281 281
282static irqreturn_t xen_arm_callback(int irq, void *arg) 282static irqreturn_t xen_arm_callback(int irq, void *arg)
283{ 283{
@@ -285,11 +285,6 @@ static irqreturn_t xen_arm_callback(int irq, void *arg)
285 return IRQ_HANDLED; 285 return IRQ_HANDLED;
286} 286}
287 287
288static __init void xen_percpu_enable_events(void *unused)
289{
290 enable_percpu_irq(xen_events_irq, 0);
291}
292
293static int __init xen_init_events(void) 288static int __init xen_init_events(void)
294{ 289{
295 if (!xen_domain() || xen_events_irq < 0) 290 if (!xen_domain() || xen_events_irq < 0)
@@ -303,7 +298,7 @@ static int __init xen_init_events(void)
303 return -EINVAL; 298 return -EINVAL;
304 } 299 }
305 300
306 on_each_cpu(xen_percpu_enable_events, NULL, 0); 301 on_each_cpu(xen_percpu_init, NULL, 0);
307 302
308 return 0; 303 return 0;
309} 304}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 48347dcf0566..56b3f6d447ae 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -122,8 +122,6 @@ endmenu
122 122
123menu "Kernel Features" 123menu "Kernel Features"
124 124
125source "kernel/time/Kconfig"
126
127config ARM64_64K_PAGES 125config ARM64_64K_PAGES
128 bool "Enable 64KB pages support" 126 bool "Enable 64KB pages support"
129 help 127 help
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index c8eedc604984..5aceb83b3f5c 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -82,7 +82,7 @@
82 82
83 .macro enable_dbg_if_not_stepping, tmp 83 .macro enable_dbg_if_not_stepping, tmp
84 mrs \tmp, mdscr_el1 84 mrs \tmp, mdscr_el1
85 tbnz \tmp, #1, 9990f 85 tbnz \tmp, #0, 9990f
86 enable_dbg 86 enable_dbg
879990: 879990:
88 .endm 88 .endm
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 0c3ba9f51376..f4726dc054b3 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -136,8 +136,6 @@ void disable_debug_monitors(enum debug_el el)
136 */ 136 */
137static void clear_os_lock(void *unused) 137static void clear_os_lock(void *unused)
138{ 138{
139 asm volatile("msr mdscr_el1, %0" : : "r" (0));
140 isb();
141 asm volatile("msr oslar_el1, %0" : : "r" (0)); 139 asm volatile("msr oslar_el1, %0" : : "r" (0));
142 isb(); 140 isb();
143} 141}
diff --git a/arch/arm64/kernel/early_printk.c b/arch/arm64/kernel/early_printk.c
index ac974f48a7a2..fbb6e1843659 100644
--- a/arch/arm64/kernel/early_printk.c
+++ b/arch/arm64/kernel/early_printk.c
@@ -95,7 +95,7 @@ static void early_write(struct console *con, const char *s, unsigned n)
95 } 95 }
96} 96}
97 97
98static struct console early_console = { 98static struct console early_console_dev = {
99 .name = "earlycon", 99 .name = "earlycon",
100 .write = early_write, 100 .write = early_write,
101 .flags = CON_PRINTBUFFER | CON_BOOT, 101 .flags = CON_PRINTBUFFER | CON_BOOT,
@@ -145,7 +145,8 @@ static int __init setup_early_printk(char *buf)
145 early_base = early_io_map(paddr, EARLYCON_IOBASE); 145 early_base = early_io_map(paddr, EARLYCON_IOBASE);
146 146
147 printch = match->printch; 147 printch = match->printch;
148 register_console(&early_console); 148 early_console = &early_console_dev;
149 register_console(&early_console_dev);
149 150
150 return 0; 151 return 0;
151} 152}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 6a9a53292590..add6ea616843 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -282,12 +282,13 @@ void __init setup_arch(char **cmdline_p)
282#endif 282#endif
283} 283}
284 284
285static int __init arm64_of_clk_init(void) 285static int __init arm64_device_init(void)
286{ 286{
287 of_clk_init(NULL); 287 of_clk_init(NULL);
288 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
288 return 0; 289 return 0;
289} 290}
290arch_initcall(arm64_of_clk_init); 291arch_initcall(arm64_device_init);
291 292
292static DEFINE_PER_CPU(struct cpu, cpu_data); 293static DEFINE_PER_CPU(struct cpu, cpu_data);
293 294
@@ -305,13 +306,6 @@ static int __init topology_init(void)
305} 306}
306subsys_initcall(topology_init); 307subsys_initcall(topology_init);
307 308
308static int __init arm64_device_probe(void)
309{
310 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
311 return 0;
312}
313device_initcall(arm64_device_probe);
314
315static const char *hwcap_str[] = { 309static const char *hwcap_str[] = {
316 "fp", 310 "fp",
317 "asimd", 311 "asimd",
diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S
index abe69b80cf7f..48a386094fa3 100644
--- a/arch/arm64/mm/cache.S
+++ b/arch/arm64/mm/cache.S
@@ -52,7 +52,7 @@ loop1:
52 add x2, x2, #4 // add 4 (line length offset) 52 add x2, x2, #4 // add 4 (line length offset)
53 mov x4, #0x3ff 53 mov x4, #0x3ff
54 and x4, x4, x1, lsr #3 // find maximum number on the way size 54 and x4, x4, x1, lsr #3 // find maximum number on the way size
55 clz x5, x4 // find bit position of way size increment 55 clz w5, w4 // find bit position of way size increment
56 mov x7, #0x7fff 56 mov x7, #0x7fff
57 and x7, x7, x1, lsr #13 // extract max number of the index size 57 and x7, x7, x1, lsr #13 // extract max number of the index size
58loop2: 58loop2:
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index f1d8b9bbfdad..a82ae8868077 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -119,8 +119,7 @@ ENTRY(__cpu_setup)
119 119
120 mov x0, #3 << 20 120 mov x0, #3 << 20
121 msr cpacr_el1, x0 // Enable FP/ASIMD 121 msr cpacr_el1, x0 // Enable FP/ASIMD
122 mov x0, #1 122 msr mdscr_el1, xzr // Reset mdscr_el1
123 msr oslar_el1, x0 // Set the debug OS lock
124 tlbi vmalle1is // invalidate I + D TLBs 123 tlbi vmalle1is // invalidate I + D TLBs
125 /* 124 /*
126 * Memory region attributes for LPAE: 125 * Memory region attributes for LPAE:
diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig
index bdc35589277f..549903cfc2cb 100644
--- a/arch/avr32/Kconfig
+++ b/arch/avr32/Kconfig
@@ -205,6 +205,11 @@ config ARCH_DISCONTIGMEM_ENABLE
205config ARCH_SPARSEMEM_ENABLE 205config ARCH_SPARSEMEM_ENABLE
206 def_bool n 206 def_bool n
207 207
208config NODES_SHIFT
209 int
210 default "2"
211 depends on NEED_MULTIPLE_NODES
212
208source "mm/Kconfig" 213source "mm/Kconfig"
209 214
210config OWNERSHIP_TRACE 215config OWNERSHIP_TRACE
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index 4dd4f78d3dcc..d22af851f3f6 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -2,3 +2,4 @@
2generic-y += clkdev.h 2generic-y += clkdev.h
3generic-y += exec.h 3generic-y += exec.h
4generic-y += trace_clock.h 4generic-y += trace_clock.h
5generic-y += param.h
diff --git a/arch/avr32/include/asm/numnodes.h b/arch/avr32/include/asm/numnodes.h
deleted file mode 100644
index 0b864d7ce330..000000000000
--- a/arch/avr32/include/asm/numnodes.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __ASM_AVR32_NUMNODES_H
2#define __ASM_AVR32_NUMNODES_H
3
4/* Max 4 nodes */
5#define NODES_SHIFT 2
6
7#endif /* __ASM_AVR32_NUMNODES_H */
diff --git a/arch/avr32/include/asm/param.h b/arch/avr32/include/asm/param.h
deleted file mode 100644
index 009a167aea1f..000000000000
--- a/arch/avr32/include/asm/param.h
+++ /dev/null
@@ -1,9 +0,0 @@
1#ifndef __ASM_AVR32_PARAM_H
2#define __ASM_AVR32_PARAM_H
3
4#include <uapi/asm/param.h>
5
6# define HZ CONFIG_HZ
7# define USER_HZ 100 /* User interfaces are in "ticks" */
8# define CLOCKS_PER_SEC (USER_HZ) /* frequency at which times() counts */
9#endif /* __ASM_AVR32_PARAM_H */
diff --git a/arch/avr32/include/uapi/asm/Kbuild b/arch/avr32/include/uapi/asm/Kbuild
index df53e7a46774..3b85eaddf525 100644
--- a/arch/avr32/include/uapi/asm/Kbuild
+++ b/arch/avr32/include/uapi/asm/Kbuild
@@ -33,3 +33,4 @@ header-y += termbits.h
33header-y += termios.h 33header-y += termios.h
34header-y += types.h 34header-y += types.h
35header-y += unistd.h 35header-y += unistd.h
36generic-y += param.h
diff --git a/arch/avr32/include/uapi/asm/param.h b/arch/avr32/include/uapi/asm/param.h
deleted file mode 100644
index d28aa5ee6d37..000000000000
--- a/arch/avr32/include/uapi/asm/param.h
+++ /dev/null
@@ -1,18 +0,0 @@
1#ifndef _UAPI__ASM_AVR32_PARAM_H
2#define _UAPI__ASM_AVR32_PARAM_H
3
4
5#ifndef HZ
6# define HZ 100
7#endif
8
9/* TODO: Should be configurable */
10#define EXEC_PAGESIZE 4096
11
12#ifndef NOGROUP
13# define NOGROUP (-1)
14#endif
15
16#define MAXHOSTNAMELEN 64
17
18#endif /* _UAPI__ASM_AVR32_PARAM_H */
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 596f7305d93f..2c9412908024 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -264,7 +264,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
264 break; 264 break;
265 case R_AVR32_GOT18SW: 265 case R_AVR32_GOT18SW:
266 if ((relocation & 0xfffe0003) != 0 266 if ((relocation & 0xfffe0003) != 0
267 && (relocation & 0xfffc0003) != 0xffff0000) 267 && (relocation & 0xfffc0000) != 0xfffc0000)
268 return reloc_overflow(module, "R_AVR32_GOT18SW", 268 return reloc_overflow(module, "R_AVR32_GOT18SW",
269 relocation); 269 relocation);
270 relocation >>= 2; 270 relocation >>= 2;
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index cb0f6afb7389..9edc35ff8cf1 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -31,6 +31,7 @@
31#include <linux/i2c.h> 31#include <linux/i2c.h>
32#include <linux/i2c-gpio.h> 32#include <linux/i2c-gpio.h>
33#include <asm/bootinfo.h> 33#include <asm/bootinfo.h>
34#include <asm/idle.h>
34#include <asm/reboot.h> 35#include <asm/reboot.h>
35#include <asm/mach-au1x00/au1000.h> 36#include <asm/mach-au1x00/au1000.h>
36#include <prom.h> 37#include <prom.h>
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 38afb11ba2c4..93fa586d52e2 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -36,6 +36,7 @@
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/spinlock.h> 37#include <linux/spinlock.h>
38 38
39#include <asm/idle.h>
39#include <asm/processor.h> 40#include <asm/processor.h>
40#include <asm/time.h> 41#include <asm/time.h>
41#include <asm/mach-au1x00/au1000.h> 42#include <asm/mach-au1x00/au1000.h>
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index a0233a2c1988..8be4e856b8b8 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -19,6 +19,7 @@
19#include <linux/clk.h> 19#include <linux/clk.h>
20 20
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#include <asm/idle.h>
22#include <asm/time.h> /* for mips_hpt_frequency */ 23#include <asm/time.h> /* for mips_hpt_frequency */
23#include <asm/reboot.h> /* for _machine_{restart,halt} */ 24#include <asm/reboot.h> /* for _machine_{restart,halt} */
24#include <asm/mips_machine.h> 25#include <asm/mips_machine.h>
diff --git a/arch/mips/cobalt/reset.c b/arch/mips/cobalt/reset.c
index 516b4428df4e..4eedd481dd00 100644
--- a/arch/mips/cobalt/reset.c
+++ b/arch/mips/cobalt/reset.c
@@ -12,6 +12,7 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/leds.h> 13#include <linux/leds.h>
14 14
15#include <asm/idle.h>
15#include <asm/processor.h> 16#include <asm/processor.h>
16 17
17#include <cobalt.h> 18#include <cobalt.h>
diff --git a/arch/mips/configs/db1000_defconfig b/arch/mips/configs/db1000_defconfig
index face9d26e6d5..bac26b971c5e 100644
--- a/arch/mips/configs/db1000_defconfig
+++ b/arch/mips/configs/db1000_defconfig
@@ -228,7 +228,6 @@ CONFIG_HIDRAW=y
228CONFIG_USB_HID=y 228CONFIG_USB_HID=y
229CONFIG_USB_SUPPORT=y 229CONFIG_USB_SUPPORT=y
230CONFIG_USB=y 230CONFIG_USB=y
231CONFIG_USB_SUSPEND=y
232CONFIG_USB_EHCI_HCD=y 231CONFIG_USB_EHCI_HCD=y
233CONFIG_USB_EHCI_ROOT_HUB_TT=y 232CONFIG_USB_EHCI_ROOT_HUB_TT=y
234CONFIG_USB_EHCI_TT_NEWSCHED=y 233CONFIG_USB_EHCI_TT_NEWSCHED=y
diff --git a/arch/mips/configs/db1235_defconfig b/arch/mips/configs/db1235_defconfig
index 14752dde7540..e2b4ad55462f 100644
--- a/arch/mips/configs/db1235_defconfig
+++ b/arch/mips/configs/db1235_defconfig
@@ -344,7 +344,6 @@ CONFIG_UHID=y
344CONFIG_USB_HIDDEV=y 344CONFIG_USB_HIDDEV=y
345CONFIG_USB=y 345CONFIG_USB=y
346CONFIG_USB_DYNAMIC_MINORS=y 346CONFIG_USB_DYNAMIC_MINORS=y
347CONFIG_USB_SUSPEND=y
348CONFIG_USB_EHCI_HCD=y 347CONFIG_USB_EHCI_HCD=y
349CONFIG_USB_EHCI_HCD_PLATFORM=y 348CONFIG_USB_EHCI_HCD_PLATFORM=y
350CONFIG_USB_EHCI_ROOT_HUB_TT=y 349CONFIG_USB_EHCI_ROOT_HUB_TT=y
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index b6acd2f256b6..343bebc4b63b 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -300,7 +300,6 @@ CONFIG_USB=y
300CONFIG_USB_DEVICEFS=y 300CONFIG_USB_DEVICEFS=y
301# CONFIG_USB_DEVICE_CLASS is not set 301# CONFIG_USB_DEVICE_CLASS is not set
302CONFIG_USB_DYNAMIC_MINORS=y 302CONFIG_USB_DYNAMIC_MINORS=y
303CONFIG_USB_SUSPEND=y
304CONFIG_USB_OTG_WHITELIST=y 303CONFIG_USB_OTG_WHITELIST=y
305CONFIG_USB_MON=y 304CONFIG_USB_MON=y
306CONFIG_USB_EHCI_HCD=y 305CONFIG_USB_EHCI_HCD=y
diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h
index c9456e7a7283..778e32d817bc 100644
--- a/arch/mips/include/asm/clock.h
+++ b/arch/mips/include/asm/clock.h
@@ -6,8 +6,6 @@
6#include <linux/seq_file.h> 6#include <linux/seq_file.h>
7#include <linux/clk.h> 7#include <linux/clk.h>
8 8
9extern void (*cpu_wait) (void);
10
11struct clk; 9struct clk;
12 10
13struct clk_ops { 11struct clk_ops {
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
new file mode 100644
index 000000000000..d192158886b1
--- /dev/null
+++ b/arch/mips/include/asm/idle.h
@@ -0,0 +1,23 @@
1#ifndef __ASM_IDLE_H
2#define __ASM_IDLE_H
3
4#include <linux/linkage.h>
5
6extern void (*cpu_wait)(void);
7extern void r4k_wait(void);
8extern asmlinkage void __r4k_wait(void);
9extern void r4k_wait_irqoff(void);
10extern void __pastwait(void);
11
12static inline int using_rollback_handler(void)
13{
14 return cpu_wait == r4k_wait;
15}
16
17static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
18{
19 return addr >= (unsigned long)r4k_wait_irqoff &&
20 addr < (unsigned long)__pastwait;
21}
22
23#endif /* __ASM_IDLE_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 1be13727323f..b7e59853fd33 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -118,7 +118,7 @@ static inline void set_io_port_base(unsigned long base)
118 */ 118 */
119static inline unsigned long virt_to_phys(volatile const void *address) 119static inline unsigned long virt_to_phys(volatile const void *address)
120{ 120{
121 return (unsigned long)address - PAGE_OFFSET + PHYS_OFFSET; 121 return __pa(address);
122} 122}
123 123
124/* 124/*
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index e68781e18387..143875c6c95a 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -336,7 +336,7 @@ enum emulation_result {
336#define VPN2_MASK 0xffffe000 336#define VPN2_MASK 0xffffe000
337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G)) 337#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && ((x).tlb_lo1 & MIPS3_PG_G))
338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) 338#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
339#define TLB_ASID(x) (ASID_MASK((x).tlb_hi)) 339#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V)) 340#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) ? ((x).tlb_lo1 & MIPS3_PG_V) : ((x).tlb_lo0 & MIPS3_PG_V))
341 341
342struct kvm_mips_tlb { 342struct kvm_mips_tlb {
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 1554721e4808..820116067c10 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -67,68 +67,45 @@ extern unsigned long pgd_current[];
67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) 67 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
68#endif 68#endif
69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ 69#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
70#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
70 71
71#define ASID_INC(asid) \ 72#define ASID_INC 0x40
72({ \ 73#define ASID_MASK 0xfc0
73 unsigned long __asid = asid; \ 74
74 __asm__("1:\taddiu\t%0,1\t\t\t\t# patched\n\t" \ 75#elif defined(CONFIG_CPU_R8000)
75 ".section\t__asid_inc,\"a\"\n\t" \ 76
76 ".word\t1b\n\t" \ 77#define ASID_INC 0x10
77 ".previous" \ 78#define ASID_MASK 0xff0
78 :"=r" (__asid) \ 79
79 :"0" (__asid)); \ 80#elif defined(CONFIG_MIPS_MT_SMTC)
80 __asid; \ 81
81}) 82#define ASID_INC 0x1
82#define ASID_MASK(asid) \ 83extern unsigned long smtc_asid_mask;
83({ \ 84#define ASID_MASK (smtc_asid_mask)
84 unsigned long __asid = asid; \ 85#define HW_ASID_MASK 0xff
85 __asm__("1:\tandi\t%0,%1,0xfc0\t\t\t# patched\n\t" \ 86/* End SMTC/34K debug hack */
86 ".section\t__asid_mask,\"a\"\n\t" \ 87#else /* FIXME: not correct for R6000 */
87 ".word\t1b\n\t" \ 88
88 ".previous" \ 89#define ASID_INC 0x1
89 :"=r" (__asid) \ 90#define ASID_MASK 0xff
90 :"r" (__asid)); \
91 __asid; \
92})
93#define ASID_VERSION_MASK \
94({ \
95 unsigned long __asid; \
96 __asm__("1:\taddiu\t%0,$0,0xff00\t\t\t\t# patched\n\t" \
97 ".section\t__asid_version_mask,\"a\"\n\t" \
98 ".word\t1b\n\t" \
99 ".previous" \
100 :"=r" (__asid)); \
101 __asid; \
102})
103#define ASID_FIRST_VERSION \
104({ \
105 unsigned long __asid = asid; \
106 __asm__("1:\tli\t%0,0x100\t\t\t\t# patched\n\t" \
107 ".section\t__asid_first_version,\"a\"\n\t" \
108 ".word\t1b\n\t" \
109 ".previous" \
110 :"=r" (__asid)); \
111 __asid; \
112})
113
114#define ASID_FIRST_VERSION_R3000 0x1000
115#define ASID_FIRST_VERSION_R4000 0x100
116#define ASID_FIRST_VERSION_R8000 0x1000
117#define ASID_FIRST_VERSION_RM9000 0x1000
118 91
119#ifdef CONFIG_MIPS_MT_SMTC
120#define SMTC_HW_ASID_MASK 0xff
121extern unsigned int smtc_asid_mask;
122#endif 92#endif
123 93
124#define cpu_context(cpu, mm) ((mm)->context.asid[cpu]) 94#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
125#define cpu_asid(cpu, mm) ASID_MASK(cpu_context((cpu), (mm))) 95#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
126#define asid_cache(cpu) (cpu_data[cpu].asid_cache) 96#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
127 97
128static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 98static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
129{ 99{
130} 100}
131 101
102/*
103 * All unused by hardware upper bits will be considered
104 * as a software asid extension.
105 */
106#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
107#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
108
132#ifndef CONFIG_MIPS_MT_SMTC 109#ifndef CONFIG_MIPS_MT_SMTC
133/* Normal, classic MIPS get_new_mmu_context */ 110/* Normal, classic MIPS get_new_mmu_context */
134static inline void 111static inline void
@@ -137,7 +114,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
137 extern void kvm_local_flush_tlb_all(void); 114 extern void kvm_local_flush_tlb_all(void);
138 unsigned long asid = asid_cache(cpu); 115 unsigned long asid = asid_cache(cpu);
139 116
140 if (!ASID_MASK((asid = ASID_INC(asid)))) { 117 if (! ((asid += ASID_INC) & ASID_MASK) ) {
141 if (cpu_has_vtag_icache) 118 if (cpu_has_vtag_icache)
142 flush_icache_all(); 119 flush_icache_all();
143#ifdef CONFIG_VIRTUALIZATION 120#ifdef CONFIG_VIRTUALIZATION
@@ -200,7 +177,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
200 * free up the ASID value for use and flush any old 177 * free up the ASID value for use and flush any old
201 * instances of it from the TLB. 178 * instances of it from the TLB.
202 */ 179 */
203 oldasid = ASID_MASK(read_c0_entryhi()); 180 oldasid = (read_c0_entryhi() & ASID_MASK);
204 if(smtc_live_asid[mytlb][oldasid]) { 181 if(smtc_live_asid[mytlb][oldasid]) {
205 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 182 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
206 if(smtc_live_asid[mytlb][oldasid] == 0) 183 if(smtc_live_asid[mytlb][oldasid] == 0)
@@ -211,7 +188,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
211 * having ASID_MASK smaller than the hardware maximum, 188 * having ASID_MASK smaller than the hardware maximum,
212 * make sure no "soft" bits become "hard"... 189 * make sure no "soft" bits become "hard"...
213 */ 190 */
214 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 191 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
215 cpu_asid(cpu, next)); 192 cpu_asid(cpu, next));
216 ehb(); /* Make sure it propagates to TCStatus */ 193 ehb(); /* Make sure it propagates to TCStatus */
217 evpe(mtflags); 194 evpe(mtflags);
@@ -264,15 +241,15 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
264#ifdef CONFIG_MIPS_MT_SMTC 241#ifdef CONFIG_MIPS_MT_SMTC
265 /* See comments for similar code above */ 242 /* See comments for similar code above */
266 mtflags = dvpe(); 243 mtflags = dvpe();
267 oldasid = ASID_MASK(read_c0_entryhi()); 244 oldasid = read_c0_entryhi() & ASID_MASK;
268 if(smtc_live_asid[mytlb][oldasid]) { 245 if(smtc_live_asid[mytlb][oldasid]) {
269 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 246 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
270 if(smtc_live_asid[mytlb][oldasid] == 0) 247 if(smtc_live_asid[mytlb][oldasid] == 0)
271 smtc_flush_tlb_asid(oldasid); 248 smtc_flush_tlb_asid(oldasid);
272 } 249 }
273 /* See comments for similar code above */ 250 /* See comments for similar code above */
274 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) | 251 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
275 cpu_asid(cpu, next)); 252 cpu_asid(cpu, next));
276 ehb(); /* Make sure it propagates to TCStatus */ 253 ehb(); /* Make sure it propagates to TCStatus */
277 evpe(mtflags); 254 evpe(mtflags);
278#else 255#else
@@ -309,14 +286,14 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
309#ifdef CONFIG_MIPS_MT_SMTC 286#ifdef CONFIG_MIPS_MT_SMTC
310 /* See comments for similar code above */ 287 /* See comments for similar code above */
311 prevvpe = dvpe(); 288 prevvpe = dvpe();
312 oldasid = ASID_MASK(read_c0_entryhi()); 289 oldasid = (read_c0_entryhi() & ASID_MASK);
313 if (smtc_live_asid[mytlb][oldasid]) { 290 if (smtc_live_asid[mytlb][oldasid]) {
314 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu); 291 smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
315 if(smtc_live_asid[mytlb][oldasid] == 0) 292 if(smtc_live_asid[mytlb][oldasid] == 0)
316 smtc_flush_tlb_asid(oldasid); 293 smtc_flush_tlb_asid(oldasid);
317 } 294 }
318 /* See comments for similar code above */ 295 /* See comments for similar code above */
319 write_c0_entryhi((read_c0_entryhi() & ~SMTC_HW_ASID_MASK) 296 write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
320 | cpu_asid(cpu, mm)); 297 | cpu_asid(cpu, mm));
321 ehb(); /* Make sure it propagates to TCStatus */ 298 ehb(); /* Make sure it propagates to TCStatus */
322 evpe(prevvpe); 299 evpe(prevvpe);
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index eab99e536b5c..f59552fae917 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -46,7 +46,6 @@
46#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 46#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
47 47
48#include <linux/pfn.h> 48#include <linux/pfn.h>
49#include <asm/io.h>
50 49
51extern void build_clear_page(void); 50extern void build_clear_page(void);
52extern void build_copy_page(void); 51extern void build_copy_page(void);
@@ -151,6 +150,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
151 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET) 150 ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
152#endif 151#endif
153#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET)) 152#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
153#include <asm/io.h>
154 154
155/* 155/*
156 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad 156 * RELOC_HIDE was originally added by 6007b903dfe5f1d13e0c711ac2894bdd4a61b1ad
@@ -171,14 +171,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
171 171
172#ifdef CONFIG_FLATMEM 172#ifdef CONFIG_FLATMEM
173 173
174#define pfn_valid(pfn) \ 174static inline int pfn_valid(unsigned long pfn)
175({ \ 175{
176 unsigned long __pfn = (pfn); \ 176 /* avoid <linux/mm.h> include hell */
177 /* avoid <linux/bootmem.h> include hell */ \ 177 extern unsigned long max_mapnr;
178 extern unsigned long min_low_pfn; \ 178
179 \ 179 return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
180 __pfn >= min_low_pfn && __pfn < max_mapnr; \ 180}
181})
182 181
183#elif defined(CONFIG_SPARSEMEM) 182#elif defined(CONFIG_SPARSEMEM)
184 183
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 71686c897dea..1470b7b68b0e 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -28,7 +28,6 @@
28/* 28/*
29 * System setup and hardware flags.. 29 * System setup and hardware flags..
30 */ 30 */
31extern void (*cpu_wait)(void);
32 31
33extern unsigned int vced_count, vcei_count; 32extern unsigned int vced_count, vcei_count;
34 33
diff --git a/arch/mips/include/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index 85789eacbf18..85789eacbf18 100644
--- a/arch/mips/include/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 16338b84fa79..1dee279f9665 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -694,16 +694,17 @@
694#define __NR_process_vm_writev (__NR_Linux + 305) 694#define __NR_process_vm_writev (__NR_Linux + 305)
695#define __NR_kcmp (__NR_Linux + 306) 695#define __NR_kcmp (__NR_Linux + 306)
696#define __NR_finit_module (__NR_Linux + 307) 696#define __NR_finit_module (__NR_Linux + 307)
697#define __NR_getdents64 (__NR_Linux + 308)
697 698
698/* 699/*
699 * Offset of the last Linux 64-bit flavoured syscall 700 * Offset of the last Linux 64-bit flavoured syscall
700 */ 701 */
701#define __NR_Linux_syscalls 307 702#define __NR_Linux_syscalls 308
702 703
703#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 704#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
704 705
705#define __NR_64_Linux 5000 706#define __NR_64_Linux 5000
706#define __NR_64_Linux_syscalls 307 707#define __NR_64_Linux_syscalls 308
707 708
708#if _MIPS_SIM == _MIPS_SIM_NABI32 709#if _MIPS_SIM == _MIPS_SIM_NABI32
709 710
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6ad9e04bdf62..423d871a946b 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -4,7 +4,7 @@
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \
8 prom.o ptrace.o reset.o setup.o signal.o syscall.o \ 8 prom.o ptrace.o reset.o setup.o signal.o syscall.o \
9 time.o topology.o traps.o unaligned.o watch.o vdso.o 9 time.o topology.o traps.o unaligned.o watch.o vdso.o
10 10
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 4bbffdb9024f..c6568bf4b1b0 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -27,105 +27,6 @@
27#include <asm/spram.h> 27#include <asm/spram.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29 29
30/*
31 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
32 * the implementation of the "wait" feature differs between CPU families. This
33 * points to the function that implements CPU specific wait.
34 * The wait instruction stops the pipeline and reduces the power consumption of
35 * the CPU very much.
36 */
37void (*cpu_wait)(void);
38EXPORT_SYMBOL(cpu_wait);
39
40static void r3081_wait(void)
41{
42 unsigned long cfg = read_c0_conf();
43 write_c0_conf(cfg | R30XX_CONF_HALT);
44}
45
46static void r39xx_wait(void)
47{
48 local_irq_disable();
49 if (!need_resched())
50 write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
51 local_irq_enable();
52}
53
54extern void r4k_wait(void);
55
56/*
57 * This variant is preferable as it allows testing need_resched and going to
58 * sleep depending on the outcome atomically. Unfortunately the "It is
59 * implementation-dependent whether the pipeline restarts when a non-enabled
60 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
61 * using this version a gamble.
62 */
63void r4k_wait_irqoff(void)
64{
65 local_irq_disable();
66 if (!need_resched())
67 __asm__(" .set push \n"
68 " .set mips3 \n"
69 " wait \n"
70 " .set pop \n");
71 local_irq_enable();
72 __asm__(" .globl __pastwait \n"
73 "__pastwait: \n");
74}
75
76/*
77 * The RM7000 variant has to handle erratum 38. The workaround is to not
78 * have any pending stores when the WAIT instruction is executed.
79 */
80static void rm7k_wait_irqoff(void)
81{
82 local_irq_disable();
83 if (!need_resched())
84 __asm__(
85 " .set push \n"
86 " .set mips3 \n"
87 " .set noat \n"
88 " mfc0 $1, $12 \n"
89 " sync \n"
90 " mtc0 $1, $12 # stalls until W stage \n"
91 " wait \n"
92 " mtc0 $1, $12 # stalls until W stage \n"
93 " .set pop \n");
94 local_irq_enable();
95}
96
97/*
98 * The Au1xxx wait is available only if using 32khz counter or
99 * external timer source, but specifically not CP0 Counter.
100 * alchemy/common/time.c may override cpu_wait!
101 */
102static void au1k_wait(void)
103{
104 __asm__(" .set mips3 \n"
105 " cache 0x14, 0(%0) \n"
106 " cache 0x14, 32(%0) \n"
107 " sync \n"
108 " nop \n"
109 " wait \n"
110 " nop \n"
111 " nop \n"
112 " nop \n"
113 " nop \n"
114 " .set mips0 \n"
115 : : "r" (au1k_wait));
116}
117
118static int __initdata nowait;
119
120static int __init wait_disable(char *s)
121{
122 nowait = 1;
123
124 return 1;
125}
126
127__setup("nowait", wait_disable);
128
129static int __cpuinitdata mips_fpu_disabled; 30static int __cpuinitdata mips_fpu_disabled;
130 31
131static int __init fpu_disable(char *s) 32static int __init fpu_disable(char *s)
@@ -150,105 +51,6 @@ static int __init dsp_disable(char *s)
150 51
151__setup("nodsp", dsp_disable); 52__setup("nodsp", dsp_disable);
152 53
153void __init check_wait(void)
154{
155 struct cpuinfo_mips *c = &current_cpu_data;
156
157 if (nowait) {
158 printk("Wait instruction disabled.\n");
159 return;
160 }
161
162 switch (c->cputype) {
163 case CPU_R3081:
164 case CPU_R3081E:
165 cpu_wait = r3081_wait;
166 break;
167 case CPU_TX3927:
168 cpu_wait = r39xx_wait;
169 break;
170 case CPU_R4200:
171/* case CPU_R4300: */
172 case CPU_R4600:
173 case CPU_R4640:
174 case CPU_R4650:
175 case CPU_R4700:
176 case CPU_R5000:
177 case CPU_R5500:
178 case CPU_NEVADA:
179 case CPU_4KC:
180 case CPU_4KEC:
181 case CPU_4KSC:
182 case CPU_5KC:
183 case CPU_25KF:
184 case CPU_PR4450:
185 case CPU_BMIPS3300:
186 case CPU_BMIPS4350:
187 case CPU_BMIPS4380:
188 case CPU_BMIPS5000:
189 case CPU_CAVIUM_OCTEON:
190 case CPU_CAVIUM_OCTEON_PLUS:
191 case CPU_CAVIUM_OCTEON2:
192 case CPU_JZRISC:
193 case CPU_LOONGSON1:
194 case CPU_XLR:
195 case CPU_XLP:
196 cpu_wait = r4k_wait;
197 break;
198
199 case CPU_RM7000:
200 cpu_wait = rm7k_wait_irqoff;
201 break;
202
203 case CPU_M14KC:
204 case CPU_M14KEC:
205 case CPU_24K:
206 case CPU_34K:
207 case CPU_1004K:
208 cpu_wait = r4k_wait;
209 if (read_c0_config7() & MIPS_CONF7_WII)
210 cpu_wait = r4k_wait_irqoff;
211 break;
212
213 case CPU_74K:
214 cpu_wait = r4k_wait;
215 if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
216 cpu_wait = r4k_wait_irqoff;
217 break;
218
219 case CPU_TX49XX:
220 cpu_wait = r4k_wait_irqoff;
221 break;
222 case CPU_ALCHEMY:
223 cpu_wait = au1k_wait;
224 break;
225 case CPU_20KC:
226 /*
227 * WAIT on Rev1.0 has E1, E2, E3 and E16.
228 * WAIT on Rev2.0 and Rev3.0 has E16.
229 * Rev3.1 WAIT is nop, why bother
230 */
231 if ((c->processor_id & 0xff) <= 0x64)
232 break;
233
234 /*
235 * Another rev is incremeting c0_count at a reduced clock
236 * rate while in WAIT mode. So we basically have the choice
237 * between using the cp0 timer as clocksource or avoiding
238 * the WAIT instruction. Until more details are known,
239 * disable the use of WAIT for 20Kc entirely.
240 cpu_wait = r4k_wait;
241 */
242 break;
243 case CPU_RM9000:
244 if ((c->processor_id & 0x00ff) >= 0x40)
245 cpu_wait = r4k_wait;
246 break;
247 default:
248 break;
249 }
250}
251
252static inline void check_errata(void) 54static inline void check_errata(void)
253{ 55{
254 struct cpuinfo_mips *c = &current_cpu_data; 56 struct cpuinfo_mips *c = &current_cpu_data;
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index 35bed0d2342c..3be9e7bb30ff 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -2,6 +2,7 @@
2#include <linux/bootmem.h> 2#include <linux/bootmem.h>
3#include <linux/crash_dump.h> 3#include <linux/crash_dump.h>
4#include <asm/uaccess.h> 4#include <asm/uaccess.h>
5#include <linux/slab.h>
5 6
6static int __init parse_savemaxmem(char *p) 7static int __init parse_savemaxmem(char *p)
7{ 8{
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 5c2ba9f08a80..31fa856829cb 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -122,7 +122,7 @@ handle_vcei:
122 __FINIT 122 __FINIT
123 123
124 .align 5 /* 32 byte rollback region */ 124 .align 5 /* 32 byte rollback region */
125LEAF(r4k_wait) 125LEAF(__r4k_wait)
126 .set push 126 .set push
127 .set noreorder 127 .set noreorder
128 /* start of rollback region */ 128 /* start of rollback region */
@@ -146,14 +146,14 @@ LEAF(r4k_wait)
146 jr ra 146 jr ra
147 nop 147 nop
148 .set pop 148 .set pop
149 END(r4k_wait) 149 END(__r4k_wait)
150 150
151 .macro BUILD_ROLLBACK_PROLOGUE handler 151 .macro BUILD_ROLLBACK_PROLOGUE handler
152 FEXPORT(rollback_\handler) 152 FEXPORT(rollback_\handler)
153 .set push 153 .set push
154 .set noat 154 .set noat
155 MFC0 k0, CP0_EPC 155 MFC0 k0, CP0_EPC
156 PTR_LA k1, r4k_wait 156 PTR_LA k1, __r4k_wait
157 ori k0, 0x1f /* 32 byte rollback region */ 157 ori k0, 0x1f /* 32 byte rollback region */
158 xori k0, 0x1f 158 xori k0, 0x1f
159 bne k0, k1, 9f 159 bne k0, k1, 9f
@@ -493,7 +493,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
493 .set noreorder 493 .set noreorder
494 /* check if TLB contains a entry for EPC */ 494 /* check if TLB contains a entry for EPC */
495 MFC0 k1, CP0_ENTRYHI 495 MFC0 k1, CP0_ENTRYHI
496 andi k1, 0xff /* ASID_MASK patched at run-time!! */ 496 andi k1, 0xff /* ASID_MASK */
497 MFC0 k0, CP0_EPC 497 MFC0 k0, CP0_EPC
498 PTR_SRL k0, _PAGE_SHIFT + 1 498 PTR_SRL k0, _PAGE_SHIFT + 1
499 PTR_SLL k0, _PAGE_SHIFT + 1 499 PTR_SLL k0, _PAGE_SHIFT + 1
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644
index 000000000000..3b09b888afa9
--- /dev/null
+++ b/arch/mips/kernel/idle.c
@@ -0,0 +1,244 @@
1/*
2 * MIPS idle loop and WAIT instruction support.
3 *
4 * Copyright (C) xxxx the Anonymous
5 * Copyright (C) 1994 - 2006 Ralf Baechle
6 * Copyright (C) 2003, 2004 Maciej W. Rozycki
7 * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14#include <linux/export.h>
15#include <linux/init.h>
16#include <linux/irqflags.h>
17#include <linux/printk.h>
18#include <linux/sched.h>
19#include <asm/cpu.h>
20#include <asm/cpu-info.h>
21#include <asm/idle.h>
22#include <asm/mipsregs.h>
23
24/*
25 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
26 * the implementation of the "wait" feature differs between CPU families. This
27 * points to the function that implements CPU specific wait.
28 * The wait instruction stops the pipeline and reduces the power consumption of
29 * the CPU very much.
30 */
31void (*cpu_wait)(void);
32EXPORT_SYMBOL(cpu_wait);
33
34static void r3081_wait(void)
35{
36 unsigned long cfg = read_c0_conf();
37 write_c0_conf(cfg | R30XX_CONF_HALT);
38 local_irq_enable();
39}
40
41static void r39xx_wait(void)
42{
43 if (!need_resched())
44 write_c0_conf(read_c0_conf() | TX39_CONF_HALT);
45 local_irq_enable();
46}
47
48void r4k_wait(void)
49{
50 local_irq_enable();
51 __r4k_wait();
52}
53
54/*
55 * This variant is preferable as it allows testing need_resched and going to
56 * sleep depending on the outcome atomically. Unfortunately the "It is
57 * implementation-dependent whether the pipeline restarts when a non-enabled
58 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
59 * using this version a gamble.
60 */
61void r4k_wait_irqoff(void)
62{
63 if (!need_resched())
64 __asm__(
65 " .set push \n"
66 " .set mips3 \n"
67 " wait \n"
68 " .set pop \n");
69 local_irq_enable();
70 __asm__(
71 " .globl __pastwait \n"
72 "__pastwait: \n");
73}
74
75/*
76 * The RM7000 variant has to handle erratum 38. The workaround is to not
77 * have any pending stores when the WAIT instruction is executed.
78 */
79static void rm7k_wait_irqoff(void)
80{
81 if (!need_resched())
82 __asm__(
83 " .set push \n"
84 " .set mips3 \n"
85 " .set noat \n"
86 " mfc0 $1, $12 \n"
87 " sync \n"
88 " mtc0 $1, $12 # stalls until W stage \n"
89 " wait \n"
90 " mtc0 $1, $12 # stalls until W stage \n"
91 " .set pop \n");
92 local_irq_enable();
93}
94
95/*
96 * The Au1xxx wait is available only if using 32khz counter or
97 * external timer source, but specifically not CP0 Counter.
98 * alchemy/common/time.c may override cpu_wait!
99 */
100static void au1k_wait(void)
101{
102 __asm__(
103 " .set mips3 \n"
104 " cache 0x14, 0(%0) \n"
105 " cache 0x14, 32(%0) \n"
106 " sync \n"
107 " nop \n"
108 " wait \n"
109 " nop \n"
110 " nop \n"
111 " nop \n"
112 " nop \n"
113 " .set mips0 \n"
114 : : "r" (au1k_wait));
115 local_irq_enable();
116}
117
118static int __initdata nowait;
119
120static int __init wait_disable(char *s)
121{
122 nowait = 1;
123
124 return 1;
125}
126
127__setup("nowait", wait_disable);
128
129void __init check_wait(void)
130{
131 struct cpuinfo_mips *c = &current_cpu_data;
132
133 if (nowait) {
134 printk("Wait instruction disabled.\n");
135 return;
136 }
137
138 switch (c->cputype) {
139 case CPU_R3081:
140 case CPU_R3081E:
141 cpu_wait = r3081_wait;
142 break;
143 case CPU_TX3927:
144 cpu_wait = r39xx_wait;
145 break;
146 case CPU_R4200:
147/* case CPU_R4300: */
148 case CPU_R4600:
149 case CPU_R4640:
150 case CPU_R4650:
151 case CPU_R4700:
152 case CPU_R5000:
153 case CPU_R5500:
154 case CPU_NEVADA:
155 case CPU_4KC:
156 case CPU_4KEC:
157 case CPU_4KSC:
158 case CPU_5KC:
159 case CPU_25KF:
160 case CPU_PR4450:
161 case CPU_BMIPS3300:
162 case CPU_BMIPS4350:
163 case CPU_BMIPS4380:
164 case CPU_BMIPS5000:
165 case CPU_CAVIUM_OCTEON:
166 case CPU_CAVIUM_OCTEON_PLUS:
167 case CPU_CAVIUM_OCTEON2:
168 case CPU_JZRISC:
169 case CPU_LOONGSON1:
170 case CPU_XLR:
171 case CPU_XLP:
172 cpu_wait = r4k_wait;
173 break;
174
175 case CPU_RM7000:
176 cpu_wait = rm7k_wait_irqoff;
177 break;
178
179 case CPU_M14KC:
180 case CPU_M14KEC:
181 case CPU_24K:
182 case CPU_34K:
183 case CPU_1004K:
184 cpu_wait = r4k_wait;
185 if (read_c0_config7() & MIPS_CONF7_WII)
186 cpu_wait = r4k_wait_irqoff;
187 break;
188
189 case CPU_74K:
190 cpu_wait = r4k_wait;
191 if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
192 cpu_wait = r4k_wait_irqoff;
193 break;
194
195 case CPU_TX49XX:
196 cpu_wait = r4k_wait_irqoff;
197 break;
198 case CPU_ALCHEMY:
199 cpu_wait = au1k_wait;
200 break;
201 case CPU_20KC:
202 /*
203 * WAIT on Rev1.0 has E1, E2, E3 and E16.
204 * WAIT on Rev2.0 and Rev3.0 has E16.
205 * Rev3.1 WAIT is nop, why bother
206 */
207 if ((c->processor_id & 0xff) <= 0x64)
208 break;
209
210 /*
211 * Another rev is incremeting c0_count at a reduced clock
212 * rate while in WAIT mode. So we basically have the choice
213 * between using the cp0 timer as clocksource or avoiding
214 * the WAIT instruction. Until more details are known,
215 * disable the use of WAIT for 20Kc entirely.
216 cpu_wait = r4k_wait;
217 */
218 break;
219 case CPU_RM9000:
220 if ((c->processor_id & 0x00ff) >= 0x40)
221 cpu_wait = r4k_wait;
222 break;
223 default:
224 break;
225 }
226}
227
228static void smtc_idle_hook(void)
229{
230#ifdef CONFIG_MIPS_MT_SMTC
231 void smtc_idle_loop_hook(void);
232
233 smtc_idle_loop_hook();
234#endif
235}
236
237void arch_cpu_idle(void)
238{
239 smtc_idle_hook();
240 if (cpu_wait)
241 cpu_wait();
242 else
243 local_irq_enable();
244}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
index 12bc4ebdf55b..1f8187ab0997 100644
--- a/arch/mips/kernel/kprobes.c
+++ b/arch/mips/kernel/kprobes.c
@@ -207,7 +207,10 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
207 207
208void __kprobes arch_remove_kprobe(struct kprobe *p) 208void __kprobes arch_remove_kprobe(struct kprobe *p)
209{ 209{
210 free_insn_slot(p->ainsn.insn, 0); 210 if (p->ainsn.insn) {
211 free_insn_slot(p->ainsn.insn, 0);
212 p->ainsn.insn = NULL;
213 }
211} 214}
212 215
213static void save_previous_kprobe(struct kprobe_ctlblk *kcb) 216static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index a3e461408b7e..acb34373679e 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -10,6 +10,7 @@
10#include <asm/bootinfo.h> 10#include <asm/bootinfo.h>
11#include <asm/cpu.h> 11#include <asm/cpu.h>
12#include <asm/cpu-features.h> 12#include <asm/cpu-features.h>
13#include <asm/idle.h>
13#include <asm/mipsregs.h> 14#include <asm/mipsregs.h>
14#include <asm/processor.h> 15#include <asm/processor.h>
15#include <asm/prom.h> 16#include <asm/prom.h>
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eb902c1f0cad..c6a041d9d05d 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -51,19 +51,6 @@ void arch_cpu_idle_dead(void)
51} 51}
52#endif 52#endif
53 53
54void arch_cpu_idle(void)
55{
56#ifdef CONFIG_MIPS_MT_SMTC
57 extern void smtc_idle_loop_hook(void);
58
59 smtc_idle_loop_hook();
60#endif
61 if (cpu_wait)
62 (*cpu_wait)();
63 else
64 local_irq_enable();
65}
66
67asmlinkage void ret_from_fork(void); 54asmlinkage void ret_from_fork(void);
68asmlinkage void ret_from_kernel_thread(void); 55asmlinkage void ret_from_kernel_thread(void);
69 56
@@ -224,6 +211,9 @@ struct mips_frame_info {
224 int pc_offset; 211 int pc_offset;
225}; 212};
226 213
214#define J_TARGET(pc,target) \
215 (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
216
227static inline int is_ra_save_ins(union mips_instruction *ip) 217static inline int is_ra_save_ins(union mips_instruction *ip)
228{ 218{
229#ifdef CONFIG_CPU_MICROMIPS 219#ifdef CONFIG_CPU_MICROMIPS
@@ -264,7 +254,7 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
264#endif 254#endif
265} 255}
266 256
267static inline int is_jal_jalr_jr_ins(union mips_instruction *ip) 257static inline int is_jump_ins(union mips_instruction *ip)
268{ 258{
269#ifdef CONFIG_CPU_MICROMIPS 259#ifdef CONFIG_CPU_MICROMIPS
270 /* 260 /*
@@ -288,6 +278,8 @@ static inline int is_jal_jalr_jr_ins(union mips_instruction *ip)
288 return 0; 278 return 0;
289 return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); 279 return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op);
290#else 280#else
281 if (ip->j_format.opcode == j_op)
282 return 1;
291 if (ip->j_format.opcode == jal_op) 283 if (ip->j_format.opcode == jal_op)
292 return 1; 284 return 1;
293 if (ip->r_format.opcode != spec_op) 285 if (ip->r_format.opcode != spec_op)
@@ -350,7 +342,7 @@ static int get_frame_info(struct mips_frame_info *info)
350 342
351 for (i = 0; i < max_insns; i++, ip++) { 343 for (i = 0; i < max_insns; i++, ip++) {
352 344
353 if (is_jal_jalr_jr_ins(ip)) 345 if (is_jump_ins(ip))
354 break; 346 break;
355 if (!info->frame_size) { 347 if (!info->frame_size) {
356 if (is_sp_move_ins(ip)) 348 if (is_sp_move_ins(ip))
@@ -393,15 +385,42 @@ err:
393 385
394static struct mips_frame_info schedule_mfi __read_mostly; 386static struct mips_frame_info schedule_mfi __read_mostly;
395 387
388#ifdef CONFIG_KALLSYMS
389static unsigned long get___schedule_addr(void)
390{
391 return kallsyms_lookup_name("__schedule");
392}
393#else
394static unsigned long get___schedule_addr(void)
395{
396 union mips_instruction *ip = (void *)schedule;
397 int max_insns = 8;
398 int i;
399
400 for (i = 0; i < max_insns; i++, ip++) {
401 if (ip->j_format.opcode == j_op)
402 return J_TARGET(ip, ip->j_format.target);
403 }
404 return 0;
405}
406#endif
407
396static int __init frame_info_init(void) 408static int __init frame_info_init(void)
397{ 409{
398 unsigned long size = 0; 410 unsigned long size = 0;
399#ifdef CONFIG_KALLSYMS 411#ifdef CONFIG_KALLSYMS
400 unsigned long ofs; 412 unsigned long ofs;
413#endif
414 unsigned long addr;
415
416 addr = get___schedule_addr();
417 if (!addr)
418 addr = (unsigned long)schedule;
401 419
402 kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs); 420#ifdef CONFIG_KALLSYMS
421 kallsyms_lookup_size_offset(addr, &size, &ofs);
403#endif 422#endif
404 schedule_mfi.func = schedule; 423 schedule_mfi.func = (void *)addr;
405 schedule_mfi.func_size = size; 424 schedule_mfi.func_size = size;
406 425
407 get_frame_info(&schedule_mfi); 426 get_frame_info(&schedule_mfi);
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 36cfd4060e1f..97a5909a61cf 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -423,4 +423,5 @@ sys_call_table:
423 PTR sys_process_vm_writev /* 5305 */ 423 PTR sys_process_vm_writev /* 5305 */
424 PTR sys_kcmp 424 PTR sys_kcmp
425 PTR sys_finit_module 425 PTR sys_finit_module
426 PTR sys_getdents64
426 .size sys_call_table,.-sys_call_table 427 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index c17619fe18e3..6e7862ab46cc 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -37,6 +37,7 @@
37#include <linux/atomic.h> 37#include <linux/atomic.h>
38#include <asm/cpu.h> 38#include <asm/cpu.h>
39#include <asm/processor.h> 39#include <asm/processor.h>
40#include <asm/idle.h>
40#include <asm/r4k-timer.h> 41#include <asm/r4k-timer.h>
41#include <asm/mmu_context.h> 42#include <asm/mmu_context.h>
42#include <asm/time.h> 43#include <asm/time.h>
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 31d22f3121c9..75a4fd709841 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -34,6 +34,7 @@
34#include <asm/hardirq.h> 34#include <asm/hardirq.h>
35#include <asm/hazards.h> 35#include <asm/hazards.h>
36#include <asm/irq.h> 36#include <asm/irq.h>
37#include <asm/idle.h>
37#include <asm/mmu_context.h> 38#include <asm/mmu_context.h>
38#include <asm/mipsregs.h> 39#include <asm/mipsregs.h>
39#include <asm/cacheflush.h> 40#include <asm/cacheflush.h>
@@ -111,7 +112,7 @@ static int vpe0limit;
111static int ipibuffers; 112static int ipibuffers;
112static int nostlb; 113static int nostlb;
113static int asidmask; 114static int asidmask;
114unsigned int smtc_asid_mask = 0xff; 115unsigned long smtc_asid_mask = 0xff;
115 116
116static int __init vpe0tcs(char *str) 117static int __init vpe0tcs(char *str)
117{ 118{
@@ -858,7 +859,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
858 unsigned long flags; 859 unsigned long flags;
859 int mtflags; 860 int mtflags;
860 unsigned long tcrestart; 861 unsigned long tcrestart;
861 extern void r4k_wait_irqoff(void), __pastwait(void);
862 int set_resched_flag = (type == LINUX_SMP_IPI && 862 int set_resched_flag = (type == LINUX_SMP_IPI &&
863 action == SMP_RESCHEDULE_YOURSELF); 863 action == SMP_RESCHEDULE_YOURSELF);
864 864
@@ -914,8 +914,7 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
914 */ 914 */
915 if (cpu_wait == r4k_wait_irqoff) { 915 if (cpu_wait == r4k_wait_irqoff) {
916 tcrestart = read_tc_c0_tcrestart(); 916 tcrestart = read_tc_c0_tcrestart();
917 if (tcrestart >= (unsigned long)r4k_wait_irqoff 917 if (address_is_in_r4k_wait_irqoff(tcrestart)) {
918 && tcrestart < (unsigned long)__pastwait) {
919 write_tc_c0_tcrestart(__pastwait); 918 write_tc_c0_tcrestart(__pastwait);
920 tcstatus &= ~TCSTATUS_IXMT; 919 tcstatus &= ~TCSTATUS_IXMT;
921 write_tc_c0_tcstatus(tcstatus); 920 write_tc_c0_tcstatus(tcstatus);
@@ -1395,7 +1394,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1395 asid = asid_cache(cpu); 1394 asid = asid_cache(cpu);
1396 1395
1397 do { 1396 do {
1398 if (!ASID_MASK(ASID_INC(asid))) { 1397 if (!((asid += ASID_INC) & ASID_MASK) ) {
1399 if (cpu_has_vtag_icache) 1398 if (cpu_has_vtag_icache)
1400 flush_icache_all(); 1399 flush_icache_all();
1401 /* Traverse all online CPUs (hack requires contiguous range) */ 1400 /* Traverse all online CPUs (hack requires contiguous range) */
@@ -1414,7 +1413,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1414 mips_ihb(); 1413 mips_ihb();
1415 } 1414 }
1416 tcstat = read_tc_c0_tcstatus(); 1415 tcstat = read_tc_c0_tcstatus();
1417 smtc_live_asid[tlb][ASID_MASK(tcstat)] |= (asiduse)(0x1 << i); 1416 smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
1418 if (!prevhalt) 1417 if (!prevhalt)
1419 write_tc_c0_tchalt(0); 1418 write_tc_c0_tchalt(0);
1420 } 1419 }
@@ -1423,7 +1422,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
1423 asid = ASID_FIRST_VERSION; 1422 asid = ASID_FIRST_VERSION;
1424 local_flush_tlb_all(); /* start new asid cycle */ 1423 local_flush_tlb_all(); /* start new asid cycle */
1425 } 1424 }
1426 } while (smtc_live_asid[tlb][ASID_MASK(asid)]); 1425 } while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
1427 1426
1428 /* 1427 /*
1429 * SMTC shares the TLB within VPEs and possibly across all VPEs. 1428 * SMTC shares the TLB within VPEs and possibly across all VPEs.
@@ -1461,7 +1460,7 @@ void smtc_flush_tlb_asid(unsigned long asid)
1461 tlb_read(); 1460 tlb_read();
1462 ehb(); 1461 ehb();
1463 ehi = read_c0_entryhi(); 1462 ehi = read_c0_entryhi();
1464 if (ASID_MASK(ehi) == asid) { 1463 if ((ehi & ASID_MASK) == asid) {
1465 /* 1464 /*
1466 * Invalidate only entries with specified ASID, 1465 * Invalidate only entries with specified ASID,
1467 * makiing sure all entries differ. 1466 * makiing sure all entries differ.
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 77cff1f6d050..e3be67012d78 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -41,6 +41,7 @@
41#include <asm/dsp.h> 41#include <asm/dsp.h>
42#include <asm/fpu.h> 42#include <asm/fpu.h>
43#include <asm/fpu_emulator.h> 43#include <asm/fpu_emulator.h>
44#include <asm/idle.h>
44#include <asm/mipsregs.h> 45#include <asm/mipsregs.h>
45#include <asm/mipsmtregs.h> 46#include <asm/mipsmtregs.h>
46#include <asm/module.h> 47#include <asm/module.h>
@@ -57,7 +58,6 @@
57#include <asm/uasm.h> 58#include <asm/uasm.h>
58 59
59extern void check_wait(void); 60extern void check_wait(void);
60extern asmlinkage void r4k_wait(void);
61extern asmlinkage void rollback_handle_int(void); 61extern asmlinkage void rollback_handle_int(void);
62extern asmlinkage void handle_int(void); 62extern asmlinkage void handle_int(void);
63extern u32 handle_tlbl[]; 63extern u32 handle_tlbl[];
@@ -1542,7 +1542,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1542 extern char except_vec_vi, except_vec_vi_lui; 1542 extern char except_vec_vi, except_vec_vi_lui;
1543 extern char except_vec_vi_ori, except_vec_vi_end; 1543 extern char except_vec_vi_ori, except_vec_vi_end;
1544 extern char rollback_except_vec_vi; 1544 extern char rollback_except_vec_vi;
1545 char *vec_start = (cpu_wait == r4k_wait) ? 1545 char *vec_start = using_rollback_handler() ?
1546 &rollback_except_vec_vi : &except_vec_vi; 1546 &rollback_except_vec_vi : &except_vec_vi;
1547#ifdef CONFIG_MIPS_MT_SMTC 1547#ifdef CONFIG_MIPS_MT_SMTC
1548 /* 1548 /*
@@ -1656,7 +1656,6 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1656 unsigned int cpu = smp_processor_id(); 1656 unsigned int cpu = smp_processor_id();
1657 unsigned int status_set = ST0_CU0; 1657 unsigned int status_set = ST0_CU0;
1658 unsigned int hwrena = cpu_hwrena_impl_bits; 1658 unsigned int hwrena = cpu_hwrena_impl_bits;
1659 unsigned long asid = 0;
1660#ifdef CONFIG_MIPS_MT_SMTC 1659#ifdef CONFIG_MIPS_MT_SMTC
1661 int secondaryTC = 0; 1660 int secondaryTC = 0;
1662 int bootTC = (cpu == 0); 1661 int bootTC = (cpu == 0);
@@ -1740,9 +1739,8 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1740 } 1739 }
1741#endif /* CONFIG_MIPS_MT_SMTC */ 1740#endif /* CONFIG_MIPS_MT_SMTC */
1742 1741
1743 asid = ASID_FIRST_VERSION; 1742 if (!cpu_data[cpu].asid_cache)
1744 cpu_data[cpu].asid_cache = asid; 1743 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1745 TLBMISS_HANDLER_SETUP();
1746 1744
1747 atomic_inc(&init_mm.mm_count); 1745 atomic_inc(&init_mm.mm_count);
1748 current->active_mm = &init_mm; 1746 current->active_mm = &init_mm;
@@ -1814,10 +1812,8 @@ void __init trap_init(void)
1814 extern char except_vec4; 1812 extern char except_vec4;
1815 extern char except_vec3_r4000; 1813 extern char except_vec3_r4000;
1816 unsigned long i; 1814 unsigned long i;
1817 int rollback;
1818 1815
1819 check_wait(); 1816 check_wait();
1820 rollback = (cpu_wait == r4k_wait);
1821 1817
1822#if defined(CONFIG_KGDB) 1818#if defined(CONFIG_KGDB)
1823 if (kgdb_early_setup) 1819 if (kgdb_early_setup)
@@ -1894,7 +1890,8 @@ void __init trap_init(void)
1894 if (board_be_init) 1890 if (board_be_init)
1895 board_be_init(); 1891 board_be_init();
1896 1892
1897 set_except_vector(0, rollback ? rollback_handle_int : handle_int); 1893 set_except_vector(0, using_rollback_handler() ? rollback_handle_int
1894 : handle_int);
1898 set_except_vector(1, handle_tlbm); 1895 set_except_vector(1, handle_tlbm);
1899 set_except_vector(2, handle_tlbl); 1896 set_except_vector(2, handle_tlbl);
1900 set_except_vector(3, handle_tlbs); 1897 set_except_vector(3, handle_tlbs);
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 2b2bac9a40aa..4b6274b47f33 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
525 printk("MTCz, cop0->reg[EBASE]: %#lx\n", 525 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
526 kvm_read_c0_guest_ebase(cop0)); 526 kvm_read_c0_guest_ebase(cop0));
527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { 527 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
528 uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); 528 uint32_t nasid =
529 vcpu->arch.gprs[rt] & ASID_MASK;
529 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) 530 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
530 && 531 &&
531 (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) 532 ((kvm_read_c0_guest_entryhi(cop0) &
532 != nasid)) { 533 ASID_MASK) != nasid)) {
533 534
534 kvm_debug 535 kvm_debug
535 ("MTCz, change ASID from %#lx to %#lx\n", 536 ("MTCz, change ASID from %#lx to %#lx\n",
536 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), 537 kvm_read_c0_guest_entryhi(cop0) &
537 ASID_MASK(vcpu->arch.gprs[rt])); 538 ASID_MASK,
539 vcpu->arch.gprs[rt] & ASID_MASK);
538 540
539 /* Blow away the shadow host TLBs */ 541 /* Blow away the shadow host TLBs */
540 kvm_mips_flush_host_tlb(1); 542 kvm_mips_flush_host_tlb(1);
@@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
986 * resulting handler will do the right thing 988 * resulting handler will do the right thing
987 */ 989 */
988 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | 990 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
989 ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); 991 (kvm_read_c0_guest_entryhi
992 (cop0) & ASID_MASK));
990 993
991 if (index < 0) { 994 if (index < 0) {
992 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); 995 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
@@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1151 struct kvm_vcpu_arch *arch = &vcpu->arch; 1154 struct kvm_vcpu_arch *arch = &vcpu->arch;
1152 enum emulation_result er = EMULATE_DONE; 1155 enum emulation_result er = EMULATE_DONE;
1153 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | 1156 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
1154 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1157 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1155 1158
1156 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1159 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1157 /* save old pc */ 1160 /* save old pc */
@@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1198 enum emulation_result er = EMULATE_DONE; 1201 enum emulation_result er = EMULATE_DONE;
1199 unsigned long entryhi = 1202 unsigned long entryhi =
1200 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1203 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1201 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1204 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1202 1205
1203 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1206 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1204 /* save old pc */ 1207 /* save old pc */
@@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1243 struct kvm_vcpu_arch *arch = &vcpu->arch; 1246 struct kvm_vcpu_arch *arch = &vcpu->arch;
1244 enum emulation_result er = EMULATE_DONE; 1247 enum emulation_result er = EMULATE_DONE;
1245 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1248 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1246 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1249 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1247 1250
1248 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1251 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1249 /* save old pc */ 1252 /* save old pc */
@@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1287 struct kvm_vcpu_arch *arch = &vcpu->arch; 1290 struct kvm_vcpu_arch *arch = &vcpu->arch;
1288 enum emulation_result er = EMULATE_DONE; 1291 enum emulation_result er = EMULATE_DONE;
1289 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1292 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1290 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1293 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1291 1294
1292 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { 1295 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1293 /* save old pc */ 1296 /* save old pc */
@@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1356{ 1359{
1357 struct mips_coproc *cop0 = vcpu->arch.cop0; 1360 struct mips_coproc *cop0 = vcpu->arch.cop0;
1358 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | 1361 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1359 ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); 1362 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1360 struct kvm_vcpu_arch *arch = &vcpu->arch; 1363 struct kvm_vcpu_arch *arch = &vcpu->arch;
1361 enum emulation_result er = EMULATE_DONE; 1364 enum emulation_result er = EMULATE_DONE;
1362 1365
@@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
1783 */ 1786 */
1784 index = kvm_mips_guest_tlb_lookup(vcpu, 1787 index = kvm_mips_guest_tlb_lookup(vcpu,
1785 (va & VPN2_MASK) | 1788 (va & VPN2_MASK) |
1786 ASID_MASK(kvm_read_c0_guest_entryhi 1789 (kvm_read_c0_guest_entryhi
1787 (vcpu->arch.cop0))); 1790 (vcpu->arch.cop0) & ASID_MASK));
1788 if (index < 0) { 1791 if (index < 0) {
1789 if (exccode == T_TLB_LD_MISS) { 1792 if (exccode == T_TLB_LD_MISS) {
1790 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); 1793 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c
index 89511a9258d3..c777dd36d4a8 100644
--- a/arch/mips/kvm/kvm_tlb.c
+++ b/arch/mips/kvm/kvm_tlb.c
@@ -17,6 +17,8 @@
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/kvm_host.h> 19#include <linux/kvm_host.h>
20#include <linux/srcu.h>
21
20 22
21#include <asm/cpu.h> 23#include <asm/cpu.h>
22#include <asm/bootinfo.h> 24#include <asm/bootinfo.h>
@@ -51,13 +53,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn);
51 53
52uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) 54uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
53{ 55{
54 return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); 56 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
55} 57}
56 58
57 59
58uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) 60uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
59{ 61{
60 return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); 62 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
61} 63}
62 64
63inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) 65inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
@@ -84,7 +86,7 @@ void kvm_mips_dump_host_tlbs(void)
84 old_pagemask = read_c0_pagemask(); 86 old_pagemask = read_c0_pagemask();
85 87
86 printk("HOST TLBs:\n"); 88 printk("HOST TLBs:\n");
87 printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); 89 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
88 90
89 for (i = 0; i < current_cpu_data.tlbsize; i++) { 91 for (i = 0; i < current_cpu_data.tlbsize; i++) {
90 write_c0_index(i); 92 write_c0_index(i);
@@ -169,21 +171,27 @@ void kvm_mips_dump_shadow_tlbs(struct kvm_vcpu *vcpu)
169 } 171 }
170} 172}
171 173
172static void kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) 174static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
173{ 175{
176 int srcu_idx, err = 0;
174 pfn_t pfn; 177 pfn_t pfn;
175 178
176 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) 179 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
177 return; 180 return 0;
178 181
182 srcu_idx = srcu_read_lock(&kvm->srcu);
179 pfn = kvm_mips_gfn_to_pfn(kvm, gfn); 183 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
180 184
181 if (kvm_mips_is_error_pfn(pfn)) { 185 if (kvm_mips_is_error_pfn(pfn)) {
182 panic("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn); 186 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
187 err = -EFAULT;
188 goto out;
183 } 189 }
184 190
185 kvm->arch.guest_pmap[gfn] = pfn; 191 kvm->arch.guest_pmap[gfn] = pfn;
186 return; 192out:
193 srcu_read_unlock(&kvm->srcu, srcu_idx);
194 return err;
187} 195}
188 196
189/* Translate guest KSEG0 addresses to Host PA */ 197/* Translate guest KSEG0 addresses to Host PA */
@@ -207,7 +215,10 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
207 gva); 215 gva);
208 return KVM_INVALID_PAGE; 216 return KVM_INVALID_PAGE;
209 } 217 }
210 kvm_mips_map_page(vcpu->kvm, gfn); 218
219 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
220 return KVM_INVALID_ADDR;
221
211 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; 222 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
212} 223}
213 224
@@ -310,8 +321,11 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
310 even = !(gfn & 0x1); 321 even = !(gfn & 0x1);
311 vaddr = badvaddr & (PAGE_MASK << 1); 322 vaddr = badvaddr & (PAGE_MASK << 1);
312 323
313 kvm_mips_map_page(vcpu->kvm, gfn); 324 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
314 kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1); 325 return -1;
326
327 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
328 return -1;
315 329
316 if (even) { 330 if (even) {
317 pfn0 = kvm->arch.guest_pmap[gfn]; 331 pfn0 = kvm->arch.guest_pmap[gfn];
@@ -389,8 +403,11 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
389 pfn0 = 0; 403 pfn0 = 0;
390 pfn1 = 0; 404 pfn1 = 0;
391 } else { 405 } else {
392 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT); 406 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
393 kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT); 407 return -1;
408
409 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
410 return -1;
394 411
395 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; 412 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
396 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; 413 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
@@ -428,7 +445,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
428 445
429 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { 446 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
430 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && 447 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
431 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { 448 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
432 index = i; 449 index = i;
433 break; 450 break;
434 } 451 }
@@ -626,7 +643,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
626{ 643{
627 unsigned long asid = asid_cache(cpu); 644 unsigned long asid = asid_cache(cpu);
628 645
629 if (!(ASID_MASK(ASID_INC(asid)))) { 646 if (!((asid += ASID_INC) & ASID_MASK)) {
630 if (cpu_has_vtag_icache) { 647 if (cpu_has_vtag_icache) {
631 flush_icache_all(); 648 flush_icache_all();
632 } 649 }
@@ -804,7 +821,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
804 if (!newasid) { 821 if (!newasid) {
805 /* If we preempted while the guest was executing, then reload the pre-empted ASID */ 822 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
806 if (current->flags & PF_VCPU) { 823 if (current->flags & PF_VCPU) {
807 write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); 824 write_c0_entryhi(vcpu->arch.
825 preempt_entryhi & ASID_MASK);
808 ehb(); 826 ehb();
809 } 827 }
810 } else { 828 } else {
@@ -816,11 +834,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
816 */ 834 */
817 if (current->flags & PF_VCPU) { 835 if (current->flags & PF_VCPU) {
818 if (KVM_GUEST_KERNEL_MODE(vcpu)) 836 if (KVM_GUEST_KERNEL_MODE(vcpu))
819 write_c0_entryhi(ASID_MASK(vcpu->arch. 837 write_c0_entryhi(vcpu->arch.
820 guest_kernel_asid[cpu])); 838 guest_kernel_asid[cpu] &
839 ASID_MASK);
821 else 840 else
822 write_c0_entryhi(ASID_MASK(vcpu->arch. 841 write_c0_entryhi(vcpu->arch.
823 guest_user_asid[cpu])); 842 guest_user_asid[cpu] &
843 ASID_MASK);
824 ehb(); 844 ehb();
825 } 845 }
826 } 846 }
@@ -879,7 +899,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
879 kvm_mips_guest_tlb_lookup(vcpu, 899 kvm_mips_guest_tlb_lookup(vcpu,
880 ((unsigned long) opc & VPN2_MASK) 900 ((unsigned long) opc & VPN2_MASK)
881 | 901 |
882 ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); 902 (kvm_read_c0_guest_entryhi
903 (cop0) & ASID_MASK));
883 if (index < 0) { 904 if (index < 0) {
884 kvm_err 905 kvm_err
885 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", 906 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index 9861c8669fab..850821df924c 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -144,10 +144,6 @@ static int gptu_probe(struct platform_device *pdev)
144 } 144 }
145 145
146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 146 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
147 if (!res) {
148 dev_err(&pdev->dev, "Failed to get resource\n");
149 return -ENOMEM;
150 }
151 147
152 /* remap gptu register range */ 148 /* remap gptu register range */
153 gptu_membase = devm_ioremap_resource(&pdev->dev, res); 149 gptu_membase = devm_ioremap_resource(&pdev->dev, res);
@@ -169,6 +165,8 @@ static int gptu_probe(struct platform_device *pdev)
169 if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) { 165 if (((gptu_r32(GPTU_ID) >> 8) & 0xff) != GPTU_MAGIC) {
170 dev_err(&pdev->dev, "Failed to find magic\n"); 166 dev_err(&pdev->dev, "Failed to find magic\n");
171 gptu_hwexit(); 167 gptu_hwexit();
168 clk_disable(clk);
169 clk_put(clk);
172 return -ENAVAIL; 170 return -ENAVAIL;
173 } 171 }
174 172
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 8a12d00908e0..32b9f21bfd85 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -11,7 +11,6 @@
11#include <asm/page.h> 11#include <asm/page.h>
12#include <asm/pgtable.h> 12#include <asm/pgtable.h>
13#include <asm/tlbdebug.h> 13#include <asm/tlbdebug.h>
14#include <asm/mmu_context.h>
15 14
16static inline const char *msk2str(unsigned int mask) 15static inline const char *msk2str(unsigned int mask)
17{ 16{
@@ -56,7 +55,7 @@ static void dump_tlb(int first, int last)
56 s_pagemask = read_c0_pagemask(); 55 s_pagemask = read_c0_pagemask();
57 s_entryhi = read_c0_entryhi(); 56 s_entryhi = read_c0_entryhi();
58 s_index = read_c0_index(); 57 s_index = read_c0_index();
59 asid = ASID_MASK(s_entryhi); 58 asid = s_entryhi & 0xff;
60 59
61 for (i = first; i <= last; i++) { 60 for (i = first; i <= last; i++) {
62 write_c0_index(i); 61 write_c0_index(i);
@@ -86,7 +85,7 @@ static void dump_tlb(int first, int last)
86 85
87 printk("va=%0*lx asid=%02lx\n", 86 printk("va=%0*lx asid=%02lx\n",
88 width, (entryhi & ~0x1fffUL), 87 width, (entryhi & ~0x1fffUL),
89 ASID_MASK(entryhi)); 88 entryhi & 0xff);
90 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ", 89 printk("\t[pa=%0*llx c=%d d=%d v=%d g=%d] ",
91 width, 90 width,
92 (entrylo0 << 6) & PAGE_MASK, c0, 91 (entrylo0 << 6) & PAGE_MASK, c0,
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 8327698b9937..91615c2ef0cf 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,7 +9,6 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10 10
11#include <asm/mipsregs.h> 11#include <asm/mipsregs.h>
12#include <asm/mmu_context.h>
13#include <asm/page.h> 12#include <asm/page.h>
14#include <asm/pgtable.h> 13#include <asm/pgtable.h>
15#include <asm/tlbdebug.h> 14#include <asm/tlbdebug.h>
@@ -22,7 +21,7 @@ static void dump_tlb(int first, int last)
22 unsigned int asid; 21 unsigned int asid;
23 unsigned long entryhi, entrylo0; 22 unsigned long entryhi, entrylo0;
24 23
25 asid = ASID_MASK(read_c0_entryhi()); 24 asid = read_c0_entryhi() & 0xfc0;
26 25
27 for (i = first; i <= last; i++) { 26 for (i = first; i <= last; i++) {
28 write_c0_index(i<<8); 27 write_c0_index(i<<8);
@@ -36,7 +35,7 @@ static void dump_tlb(int first, int last)
36 35
37 /* Unused entries have a virtual address of KSEG0. */ 36 /* Unused entries have a virtual address of KSEG0. */
38 if ((entryhi & 0xffffe000) != 0x80000000 37 if ((entryhi & 0xffffe000) != 0x80000000
39 && (ASID_MASK(entryhi) == asid)) { 38 && (entryhi & 0xfc0) == asid) {
40 /* 39 /*
41 * Only print entries in use 40 * Only print entries in use
42 */ 41 */
@@ -45,7 +44,7 @@ static void dump_tlb(int first, int last)
45 printk("va=%08lx asid=%08lx" 44 printk("va=%08lx asid=%08lx"
46 " [pa=%06lx n=%d d=%d v=%d g=%d]", 45 " [pa=%06lx n=%d d=%d v=%d g=%d]",
47 (entryhi & 0xffffe000), 46 (entryhi & 0xffffe000),
48 ASID_MASK(entryhi), 47 entryhi & 0xfc0,
49 entrylo0 & PAGE_MASK, 48 entrylo0 & PAGE_MASK,
50 (entrylo0 & (1 << 11)) ? 1 : 0, 49 (entrylo0 & (1 << 11)) ? 1 : 0,
51 (entrylo0 & (1 << 10)) ? 1 : 0, 50 (entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/loongson/common/reset.c b/arch/mips/loongson/common/reset.c
index 35c8c6468494..65bfbb5d06f4 100644
--- a/arch/mips/loongson/common/reset.c
+++ b/arch/mips/loongson/common/reset.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/pm.h> 13#include <linux/pm.h>
14 14
15#include <asm/idle.h>
15#include <asm/reboot.h> 16#include <asm/reboot.h>
16 17
17#include <loongson.h> 18#include <loongson.h>
diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c
index d4f610f9604a..547f34b69e4c 100644
--- a/arch/mips/loongson1/common/reset.c
+++ b/arch/mips/loongson1/common/reset.c
@@ -9,6 +9,7 @@
9 9
10#include <linux/io.h> 10#include <linux/io.h>
11#include <linux/pm.h> 11#include <linux/pm.h>
12#include <asm/idle.h>
12#include <asm/reboot.h> 13#include <asm/reboot.h>
13 14
14#include <loongson1.h> 15#include <loongson1.h>
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 4a13c150f31b..a63d1ed0827f 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -51,7 +51,7 @@ void local_flush_tlb_all(void)
51#endif 51#endif
52 52
53 local_irq_save(flags); 53 local_irq_save(flags);
54 old_ctx = ASID_MASK(read_c0_entryhi()); 54 old_ctx = read_c0_entryhi() & ASID_MASK;
55 write_c0_entrylo0(0); 55 write_c0_entrylo0(0);
56 entry = r3k_have_wired_reg ? read_c0_wired() : 8; 56 entry = r3k_have_wired_reg ? read_c0_wired() : 8;
57 for (; entry < current_cpu_data.tlbsize; entry++) { 57 for (; entry < current_cpu_data.tlbsize; entry++) {
@@ -87,13 +87,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
87 87
88#ifdef DEBUG_TLB 88#ifdef DEBUG_TLB
89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]", 89 printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
90 ASID_MASK(cpu_context(cpu, mm)), start, end); 90 cpu_context(cpu, mm) & ASID_MASK, start, end);
91#endif 91#endif
92 local_irq_save(flags); 92 local_irq_save(flags);
93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 93 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
94 if (size <= current_cpu_data.tlbsize) { 94 if (size <= current_cpu_data.tlbsize) {
95 int oldpid = ASID_MASK(read_c0_entryhi()); 95 int oldpid = read_c0_entryhi() & ASID_MASK;
96 int newpid = ASID_MASK(cpu_context(cpu, mm)); 96 int newpid = cpu_context(cpu, mm) & ASID_MASK;
97 97
98 start &= PAGE_MASK; 98 start &= PAGE_MASK;
99 end += PAGE_SIZE - 1; 99 end += PAGE_SIZE - 1;
@@ -166,10 +166,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
166#ifdef DEBUG_TLB 166#ifdef DEBUG_TLB
167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page); 167 printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
168#endif 168#endif
169 newpid = ASID_MASK(cpu_context(cpu, vma->vm_mm)); 169 newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
170 page &= PAGE_MASK; 170 page &= PAGE_MASK;
171 local_irq_save(flags); 171 local_irq_save(flags);
172 oldpid = ASID_MASK(read_c0_entryhi()); 172 oldpid = read_c0_entryhi() & ASID_MASK;
173 write_c0_entryhi(page | newpid); 173 write_c0_entryhi(page | newpid);
174 BARRIER; 174 BARRIER;
175 tlb_probe(); 175 tlb_probe();
@@ -197,10 +197,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
197 if (current->active_mm != vma->vm_mm) 197 if (current->active_mm != vma->vm_mm)
198 return; 198 return;
199 199
200 pid = ASID_MASK(read_c0_entryhi()); 200 pid = read_c0_entryhi() & ASID_MASK;
201 201
202#ifdef DEBUG_TLB 202#ifdef DEBUG_TLB
203 if ((pid != ASID_MASK(cpu_context(cpu, vma->vm_mm))) || (cpu_context(cpu, vma->vm_mm) == 0)) { 203 if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n", 204 printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
205 (cpu_context(cpu, vma->vm_mm)), pid); 205 (cpu_context(cpu, vma->vm_mm)), pid);
206 } 206 }
@@ -241,7 +241,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
241 241
242 local_irq_save(flags); 242 local_irq_save(flags);
243 /* Save old context and create impossible VPN2 value */ 243 /* Save old context and create impossible VPN2 value */
244 old_ctx = ASID_MASK(read_c0_entryhi()); 244 old_ctx = read_c0_entryhi() & ASID_MASK;
245 old_pagemask = read_c0_pagemask(); 245 old_pagemask = read_c0_pagemask();
246 w = read_c0_wired(); 246 w = read_c0_wired();
247 write_c0_wired(w + 1); 247 write_c0_wired(w + 1);
@@ -264,7 +264,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
264#endif 264#endif
265 265
266 local_irq_save(flags); 266 local_irq_save(flags);
267 old_ctx = ASID_MASK(read_c0_entryhi()); 267 old_ctx = read_c0_entryhi() & ASID_MASK;
268 write_c0_entrylo0(entrylo0); 268 write_c0_entrylo0(entrylo0);
269 write_c0_entryhi(entryhi); 269 write_c0_entryhi(entryhi);
270 write_c0_index(wired); 270 write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 09653b290d53..c643de4c473a 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -287,7 +287,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
287 287
288 ENTER_CRITICAL(flags); 288 ENTER_CRITICAL(flags);
289 289
290 pid = ASID_MASK(read_c0_entryhi()); 290 pid = read_c0_entryhi() & ASID_MASK;
291 address &= (PAGE_MASK << 1); 291 address &= (PAGE_MASK << 1);
292 write_c0_entryhi(address | pid); 292 write_c0_entryhi(address | pid);
293 pgdp = pgd_offset(vma->vm_mm, address); 293 pgdp = pgd_offset(vma->vm_mm, address);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 122f9207f49e..91c2499f806a 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -195,7 +195,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
195 if (current->active_mm != vma->vm_mm) 195 if (current->active_mm != vma->vm_mm)
196 return; 196 return;
197 197
198 pid = ASID_MASK(read_c0_entryhi()); 198 pid = read_c0_entryhi() & ASID_MASK;
199 199
200 local_irq_save(flags); 200 local_irq_save(flags);
201 address &= PAGE_MASK; 201 address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 4d46d3787576..ce9818eef7d3 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -29,7 +29,6 @@
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/cache.h> 30#include <linux/cache.h>
31 31
32#include <asm/mmu_context.h>
33#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
34#include <asm/pgtable.h> 33#include <asm/pgtable.h>
35#include <asm/war.h> 34#include <asm/war.h>
@@ -306,78 +305,6 @@ static struct uasm_reloc relocs[128] __cpuinitdata;
306static int check_for_high_segbits __cpuinitdata; 305static int check_for_high_segbits __cpuinitdata;
307#endif 306#endif
308 307
309static void __cpuinit insn_fixup(unsigned int **start, unsigned int **stop,
310 unsigned int i_const)
311{
312 unsigned int **p;
313
314 for (p = start; p < stop; p++) {
315#ifndef CONFIG_CPU_MICROMIPS
316 unsigned int *ip;
317
318 ip = *p;
319 *ip = (*ip & 0xffff0000) | i_const;
320#else
321 unsigned short *ip;
322
323 ip = ((unsigned short *)((unsigned int)*p - 1));
324 if ((*ip & 0xf000) == 0x4000) {
325 *ip &= 0xfff1;
326 *ip |= (i_const << 1);
327 } else if ((*ip & 0xf000) == 0x6000) {
328 *ip &= 0xfff1;
329 *ip |= ((i_const >> 2) << 1);
330 } else {
331 ip++;
332 *ip = i_const;
333 }
334#endif
335 local_flush_icache_range((unsigned long)ip,
336 (unsigned long)ip + sizeof(*ip));
337 }
338}
339
340#define asid_insn_fixup(section, const) \
341do { \
342 extern unsigned int *__start_ ## section; \
343 extern unsigned int *__stop_ ## section; \
344 insn_fixup(&__start_ ## section, &__stop_ ## section, const); \
345} while(0)
346
347/*
348 * Caller is assumed to flush the caches before the first context switch.
349 */
350static void __cpuinit setup_asid(unsigned int inc, unsigned int mask,
351 unsigned int version_mask,
352 unsigned int first_version)
353{
354 extern asmlinkage void handle_ri_rdhwr_vivt(void);
355 unsigned long *vivt_exc;
356
357#ifdef CONFIG_CPU_MICROMIPS
358 /*
359 * Worst case optimised microMIPS addiu instructions support
360 * only a 3-bit immediate value.
361 */
362 if(inc > 7)
363 panic("Invalid ASID increment value!");
364#endif
365 asid_insn_fixup(__asid_inc, inc);
366 asid_insn_fixup(__asid_mask, mask);
367 asid_insn_fixup(__asid_version_mask, version_mask);
368 asid_insn_fixup(__asid_first_version, first_version);
369
370 /* Patch up the 'handle_ri_rdhwr_vivt' handler. */
371 vivt_exc = (unsigned long *) &handle_ri_rdhwr_vivt;
372#ifdef CONFIG_CPU_MICROMIPS
373 vivt_exc = (unsigned long *)((unsigned long) vivt_exc - 1);
374#endif
375 vivt_exc++;
376 *vivt_exc = (*vivt_exc & ~mask) | mask;
377
378 current_cpu_data.asid_cache = first_version;
379}
380
381static int check_for_high_segbits __cpuinitdata; 308static int check_for_high_segbits __cpuinitdata;
382 309
383static unsigned int kscratch_used_mask __cpuinitdata; 310static unsigned int kscratch_used_mask __cpuinitdata;
@@ -2256,7 +2183,6 @@ void __cpuinit build_tlb_refill_handler(void)
2256 case CPU_TX3922: 2183 case CPU_TX3922:
2257 case CPU_TX3927: 2184 case CPU_TX3927:
2258#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 2185#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
2259 setup_asid(0x40, 0xfc0, 0xf000, ASID_FIRST_VERSION_R3000);
2260 if (cpu_has_local_ebase) 2186 if (cpu_has_local_ebase)
2261 build_r3000_tlb_refill_handler(); 2187 build_r3000_tlb_refill_handler();
2262 if (!run_once) { 2188 if (!run_once) {
@@ -2282,11 +2208,6 @@ void __cpuinit build_tlb_refill_handler(void)
2282 break; 2208 break;
2283 2209
2284 default: 2210 default:
2285#ifndef CONFIG_MIPS_MT_SMTC
2286 setup_asid(0x1, 0xff, 0xff00, ASID_FIRST_VERSION_R4000);
2287#else
2288 setup_asid(0x1, smtc_asid_mask, 0xff00, ASID_FIRST_VERSION_R4000);
2289#endif
2290 if (!run_once) { 2211 if (!run_once) {
2291 scratch_reg = allocate_kscratch(); 2212 scratch_reg = allocate_kscratch();
2292#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2213#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index af319143b591..eaa99d28cb8e 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -37,6 +37,7 @@
37#include <linux/pm.h> 37#include <linux/pm.h>
38#include <linux/bootmem.h> 38#include <linux/bootmem.h>
39 39
40#include <asm/idle.h>
40#include <asm/reboot.h> 41#include <asm/reboot.h>
41#include <asm/time.h> 42#include <asm/time.h>
42#include <asm/bootinfo.h> 43#include <asm/bootinfo.h>
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index e3e094100e3e..89c8c1066632 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -36,6 +36,7 @@
36#include <linux/serial_8250.h> 36#include <linux/serial_8250.h>
37#include <linux/pm.h> 37#include <linux/pm.h>
38 38
39#include <asm/idle.h>
39#include <asm/reboot.h> 40#include <asm/reboot.h>
40#include <asm/time.h> 41#include <asm/time.h>
41#include <asm/bootinfo.h> 42#include <asm/bootinfo.h>
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
index 0edb89a63516..1c9897531660 100644
--- a/arch/mips/pmcs-msp71xx/msp_prom.c
+++ b/arch/mips/pmcs-msp71xx/msp_prom.c
@@ -83,7 +83,7 @@ static inline unsigned char str2hexnum(unsigned char c)
83 return 0; /* foo */ 83 return 0; /* foo */
84} 84}
85 85
86static inline int str2eaddr(unsigned char *ea, unsigned char *str) 86int str2eaddr(unsigned char *ea, unsigned char *str)
87{ 87{
88 int index = 0; 88 int index = 0;
89 unsigned char num = 0; 89 unsigned char num = 0;
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 1651cfdbfe7b..396b2967ad85 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -12,6 +12,7 @@
12 12
13#include <asm/bootinfo.h> 13#include <asm/bootinfo.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/idle.h>
15#include <asm/r4kcache.h> 16#include <asm/r4kcache.h>
16#include <asm/reboot.h> 17#include <asm/reboot.h>
17#include <asm/smp-ops.h> 18#include <asm/smp-ops.h>
diff --git a/arch/mips/ralink/dts/rt3050.dtsi b/arch/mips/ralink/dts/rt3050.dtsi
index ef7da1e227e6..e3203d414fee 100644
--- a/arch/mips/ralink/dts/rt3050.dtsi
+++ b/arch/mips/ralink/dts/rt3050.dtsi
@@ -55,4 +55,14 @@
55 reg-shift = <2>; 55 reg-shift = <2>;
56 }; 56 };
57 }; 57 };
58
59 usb@101c0000 {
60 compatible = "ralink,rt3050-usb", "snps,dwc2";
61 reg = <0x101c0000 40000>;
62
63 interrupt-parent = <&intc>;
64 interrupts = <18>;
65
66 status = "disabled";
67 };
58}; 68};
diff --git a/arch/mips/ralink/dts/rt3052_eval.dts b/arch/mips/ralink/dts/rt3052_eval.dts
index c18c9a84f4c4..0ac73ea28198 100644
--- a/arch/mips/ralink/dts/rt3052_eval.dts
+++ b/arch/mips/ralink/dts/rt3052_eval.dts
@@ -43,4 +43,8 @@
43 reg = <0x50000 0x7b0000>; 43 reg = <0x50000 0x7b0000>;
44 }; 44 };
45 }; 45 };
46
47 usb@101c0000 {
48 status = "ok";
49 };
46}; 50};
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 5364aabc2102..681e7f86c080 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/irq.h> 27#include <linux/irq.h>
28#include <asm/bootinfo.h> 28#include <asm/bootinfo.h>
29#include <asm/idle.h>
29#include <asm/time.h> 30#include <asm/time.h>
30#include <asm/reboot.h> 31#include <asm/reboot.h>
31#include <asm/r4kcache.h> 32#include <asm/r4kcache.h>
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index 70a3f90131d8..d7f755833c3f 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -27,6 +27,7 @@
27 27
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29#include <asm/cpu.h> 29#include <asm/cpu.h>
30#include <asm/idle.h>
30#include <asm/io.h> 31#include <asm/io.h>
31#include <asm/processor.h> 32#include <asm/processor.h>
32#include <asm/reboot.h> 33#include <asm/reboot.h>
diff --git a/arch/mips/wrppmc/reset.c b/arch/mips/wrppmc/reset.c
index cc5474b24f06..80beb188ed47 100644
--- a/arch/mips/wrppmc/reset.c
+++ b/arch/mips/wrppmc/reset.c
@@ -9,6 +9,7 @@
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10 10
11#include <asm/cacheflush.h> 11#include <asm/cacheflush.h>
12#include <asm/idle.h>
12#include <asm/mipsregs.h> 13#include <asm/mipsregs.h>
13#include <asm/processor.h> 14#include <asm/processor.h>
14 15
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index cad060f288cf..6507dabdd5dd 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -245,7 +245,7 @@ config SMP
245 245
246config IRQSTACKS 246config IRQSTACKS
247 bool "Use separate kernel stacks when processing interrupts" 247 bool "Use separate kernel stacks when processing interrupts"
248 default n 248 default y
249 help 249 help
250 If you say Y here the kernel will use separate kernel stacks 250 If you say Y here the kernel will use separate kernel stacks
251 for handling hard and soft interrupts. This can help avoid 251 for handling hard and soft interrupts. This can help avoid
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 2f967cc6649e..197690068f88 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -23,24 +23,21 @@ NM = sh $(srctree)/arch/parisc/nm
23CHECKFLAGS += -D__hppa__=1 23CHECKFLAGS += -D__hppa__=1
24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name) 24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
25 25
26MACHINE := $(shell uname -m)
27NATIVE := $(if $(filter parisc%,$(MACHINE)),1,0)
28
29ifdef CONFIG_64BIT 26ifdef CONFIG_64BIT
30UTS_MACHINE := parisc64 27UTS_MACHINE := parisc64
31CHECKFLAGS += -D__LP64__=1 -m64 28CHECKFLAGS += -D__LP64__=1 -m64
32WIDTH := 64 29CC_ARCHES = hppa64
33else # 32-bit 30else # 32-bit
34WIDTH := 31CC_ARCHES = hppa hppa2.0 hppa1.1
35endif 32endif
36 33
37# attempt to help out folks who are cross-compiling 34ifneq ($(SUBARCH),$(UTS_MACHINE))
38ifeq ($(NATIVE),1) 35 ifeq ($(CROSS_COMPILE),)
39CROSS_COMPILE := hppa$(WIDTH)-linux- 36 CC_SUFFIXES = linux linux-gnu unknown-linux-gnu
40else 37 CROSS_COMPILE := $(call cc-cross-prefix, \
41 ifeq ($(CROSS_COMPILE),) 38 $(foreach a,$(CC_ARCHES), \
42 CROSS_COMPILE := hppa$(WIDTH)-linux-gnu- 39 $(foreach s,$(CC_SUFFIXES),$(a)-$(s)-)))
43 endif 40 endif
44endif 41endif
45 42
46OBJCOPY_FLAGS =-O binary -R .note -R .comment -S 43OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
diff --git a/arch/parisc/include/asm/assembly.h b/arch/parisc/include/asm/assembly.h
index 89fb40005e3f..0da848232344 100644
--- a/arch/parisc/include/asm/assembly.h
+++ b/arch/parisc/include/asm/assembly.h
@@ -438,7 +438,6 @@
438 SAVE_SP (%sr4, PT_SR4 (\regs)) 438 SAVE_SP (%sr4, PT_SR4 (\regs))
439 SAVE_SP (%sr5, PT_SR5 (\regs)) 439 SAVE_SP (%sr5, PT_SR5 (\regs))
440 SAVE_SP (%sr6, PT_SR6 (\regs)) 440 SAVE_SP (%sr6, PT_SR6 (\regs))
441 SAVE_SP (%sr7, PT_SR7 (\regs))
442 441
443 SAVE_CR (%cr17, PT_IASQ0(\regs)) 442 SAVE_CR (%cr17, PT_IASQ0(\regs))
444 mtctl %r0, %cr17 443 mtctl %r0, %cr17
diff --git a/arch/parisc/include/asm/hardirq.h b/arch/parisc/include/asm/hardirq.h
index 12373c4dabab..241c34518465 100644
--- a/arch/parisc/include/asm/hardirq.h
+++ b/arch/parisc/include/asm/hardirq.h
@@ -11,15 +11,20 @@
11#include <linux/threads.h> 11#include <linux/threads.h>
12#include <linux/irq.h> 12#include <linux/irq.h>
13 13
14#ifdef CONFIG_IRQSTACKS
15#define __ARCH_HAS_DO_SOFTIRQ
16#endif
17
14typedef struct { 18typedef struct {
15 unsigned int __softirq_pending; 19 unsigned int __softirq_pending;
16#ifdef CONFIG_DEBUG_STACKOVERFLOW
17 unsigned int kernel_stack_usage; 20 unsigned int kernel_stack_usage;
18#endif 21 unsigned int irq_stack_usage;
19#ifdef CONFIG_SMP 22#ifdef CONFIG_SMP
20 unsigned int irq_resched_count; 23 unsigned int irq_resched_count;
21 unsigned int irq_call_count; 24 unsigned int irq_call_count;
22#endif 25#endif
26 unsigned int irq_unaligned_count;
27 unsigned int irq_fpassist_count;
23 unsigned int irq_tlb_count; 28 unsigned int irq_tlb_count;
24} ____cacheline_aligned irq_cpustat_t; 29} ____cacheline_aligned irq_cpustat_t;
25 30
@@ -28,6 +33,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
28#define __ARCH_IRQ_STAT 33#define __ARCH_IRQ_STAT
29#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member) 34#define __IRQ_STAT(cpu, member) (irq_stat[cpu].member)
30#define inc_irq_stat(member) this_cpu_inc(irq_stat.member) 35#define inc_irq_stat(member) this_cpu_inc(irq_stat.member)
36#define __inc_irq_stat(member) __this_cpu_inc(irq_stat.member)
31#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending) 37#define local_softirq_pending() this_cpu_read(irq_stat.__softirq_pending)
32 38
33#define __ARCH_SET_SOFTIRQ_PENDING 39#define __ARCH_SET_SOFTIRQ_PENDING
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 064015547d1e..cc2290a3cace 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -17,7 +17,6 @@
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18#include <asm/types.h> 18#include <asm/types.h>
19#include <asm/percpu.h> 19#include <asm/percpu.h>
20
21#endif /* __ASSEMBLY__ */ 20#endif /* __ASSEMBLY__ */
22 21
23/* 22/*
@@ -59,23 +58,6 @@
59#ifndef __ASSEMBLY__ 58#ifndef __ASSEMBLY__
60 59
61/* 60/*
62 * IRQ STACK - used for irq handler
63 */
64#ifdef __KERNEL__
65
66#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
67
68union irq_stack_union {
69 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
70};
71
72DECLARE_PER_CPU(union irq_stack_union, irq_stack_union);
73
74void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
75
76#endif /* __KERNEL__ */
77
78/*
79 * Data detected about CPUs at boot time which is the same for all CPU's. 61 * Data detected about CPUs at boot time which is the same for all CPU's.
80 * HP boxes are SMP - ie identical processors. 62 * HP boxes are SMP - ie identical processors.
81 * 63 *
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 4bb96ad9b0b1..e8f07dd28401 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -65,15 +65,11 @@
65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */ 65 rsm PSW_SM_I, %r0 /* barrier for "Relied upon Translation */
66 mtsp %r0, %sr4 66 mtsp %r0, %sr4
67 mtsp %r0, %sr5 67 mtsp %r0, %sr5
68 mfsp %sr7, %r1 68 mtsp %r0, %sr6
69 or,= %r0,%r1,%r0 /* Only save sr7 in sr3 if sr7 != 0 */
70 mtsp %r1, %sr3
71 tovirt_r1 %r29 69 tovirt_r1 %r29
72 load32 KERNEL_PSW, %r1 70 load32 KERNEL_PSW, %r1
73 71
74 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */ 72 rsm PSW_SM_QUIET,%r0 /* second "heavy weight" ctl op */
75 mtsp %r0, %sr6
76 mtsp %r0, %sr7
77 mtctl %r0, %cr17 /* Clear IIASQ tail */ 73 mtctl %r0, %cr17 /* Clear IIASQ tail */
78 mtctl %r0, %cr17 /* Clear IIASQ head */ 74 mtctl %r0, %cr17 /* Clear IIASQ head */
79 mtctl %r1, %ipsw 75 mtctl %r1, %ipsw
@@ -119,17 +115,20 @@
119 115
120 /* we save the registers in the task struct */ 116 /* we save the registers in the task struct */
121 117
118 copy %r30, %r17
122 mfctl %cr30, %r1 119 mfctl %cr30, %r1
120 ldo THREAD_SZ_ALGN(%r1), %r30
121 mtsp %r0,%sr7
122 mtsp %r16,%sr3
123 tophys %r1,%r9 123 tophys %r1,%r9
124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */ 124 LDREG TI_TASK(%r9), %r1 /* thread_info -> task_struct */
125 tophys %r1,%r9 125 tophys %r1,%r9
126 ldo TASK_REGS(%r9),%r9 126 ldo TASK_REGS(%r9),%r9
127 STREG %r30, PT_GR30(%r9) 127 STREG %r17,PT_GR30(%r9)
128 STREG %r29,PT_GR29(%r9) 128 STREG %r29,PT_GR29(%r9)
129 STREG %r26,PT_GR26(%r9) 129 STREG %r26,PT_GR26(%r9)
130 STREG %r16,PT_SR7(%r9)
130 copy %r9,%r29 131 copy %r9,%r29
131 mfctl %cr30, %r1
132 ldo THREAD_SZ_ALGN(%r1), %r30
133 .endm 132 .endm
134 133
135 .macro get_stack_use_r30 134 .macro get_stack_use_r30
@@ -137,10 +136,12 @@
137 /* we put a struct pt_regs on the stack and save the registers there */ 136 /* we put a struct pt_regs on the stack and save the registers there */
138 137
139 tophys %r30,%r9 138 tophys %r30,%r9
140 STREG %r30,PT_GR30(%r9) 139 copy %r30,%r1
141 ldo PT_SZ_ALGN(%r30),%r30 140 ldo PT_SZ_ALGN(%r30),%r30
141 STREG %r1,PT_GR30(%r9)
142 STREG %r29,PT_GR29(%r9) 142 STREG %r29,PT_GR29(%r9)
143 STREG %r26,PT_GR26(%r9) 143 STREG %r26,PT_GR26(%r9)
144 STREG %r16,PT_SR7(%r9)
144 copy %r9,%r29 145 copy %r9,%r29
145 .endm 146 .endm
146 147
@@ -452,9 +453,41 @@
452 L2_ptep \pgd,\pte,\index,\va,\fault 453 L2_ptep \pgd,\pte,\index,\va,\fault
453 .endm 454 .endm
454 455
456 /* Acquire pa_dbit_lock lock. */
457 .macro dbit_lock spc,tmp,tmp1
458#ifdef CONFIG_SMP
459 cmpib,COND(=),n 0,\spc,2f
460 load32 PA(pa_dbit_lock),\tmp
4611: LDCW 0(\tmp),\tmp1
462 cmpib,COND(=) 0,\tmp1,1b
463 nop
4642:
465#endif
466 .endm
467
468 /* Release pa_dbit_lock lock without reloading lock address. */
469 .macro dbit_unlock0 spc,tmp
470#ifdef CONFIG_SMP
471 or,COND(=) %r0,\spc,%r0
472 stw \spc,0(\tmp)
473#endif
474 .endm
475
476 /* Release pa_dbit_lock lock. */
477 .macro dbit_unlock1 spc,tmp
478#ifdef CONFIG_SMP
479 load32 PA(pa_dbit_lock),\tmp
480 dbit_unlock0 \spc,\tmp
481#endif
482 .endm
483
455 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and 484 /* Set the _PAGE_ACCESSED bit of the PTE. Be clever and
456 * don't needlessly dirty the cache line if it was already set */ 485 * don't needlessly dirty the cache line if it was already set */
457 .macro update_ptep ptep,pte,tmp,tmp1 486 .macro update_ptep spc,ptep,pte,tmp,tmp1
487#ifdef CONFIG_SMP
488 or,COND(=) %r0,\spc,%r0
489 LDREG 0(\ptep),\pte
490#endif
458 ldi _PAGE_ACCESSED,\tmp1 491 ldi _PAGE_ACCESSED,\tmp1
459 or \tmp1,\pte,\tmp 492 or \tmp1,\pte,\tmp
460 and,COND(<>) \tmp1,\pte,%r0 493 and,COND(<>) \tmp1,\pte,%r0
@@ -463,7 +496,11 @@
463 496
464 /* Set the dirty bit (and accessed bit). No need to be 497 /* Set the dirty bit (and accessed bit). No need to be
465 * clever, this is only used from the dirty fault */ 498 * clever, this is only used from the dirty fault */
466 .macro update_dirty ptep,pte,tmp 499 .macro update_dirty spc,ptep,pte,tmp
500#ifdef CONFIG_SMP
501 or,COND(=) %r0,\spc,%r0
502 LDREG 0(\ptep),\pte
503#endif
467 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp 504 ldi _PAGE_ACCESSED|_PAGE_DIRTY,\tmp
468 or \tmp,\pte,\pte 505 or \tmp,\pte,\pte
469 STREG \pte,0(\ptep) 506 STREG \pte,0(\ptep)
@@ -1111,11 +1148,13 @@ dtlb_miss_20w:
1111 1148
1112 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w 1149 L3_ptep ptp,pte,t0,va,dtlb_check_alias_20w
1113 1150
1114 update_ptep ptp,pte,t0,t1 1151 dbit_lock spc,t0,t1
1152 update_ptep spc,ptp,pte,t0,t1
1115 1153
1116 make_insert_tlb spc,pte,prot 1154 make_insert_tlb spc,pte,prot
1117 1155
1118 idtlbt pte,prot 1156 idtlbt pte,prot
1157 dbit_unlock1 spc,t0
1119 1158
1120 rfir 1159 rfir
1121 nop 1160 nop
@@ -1135,11 +1174,13 @@ nadtlb_miss_20w:
1135 1174
1136 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w 1175 L3_ptep ptp,pte,t0,va,nadtlb_check_alias_20w
1137 1176
1138 update_ptep ptp,pte,t0,t1 1177 dbit_lock spc,t0,t1
1178 update_ptep spc,ptp,pte,t0,t1
1139 1179
1140 make_insert_tlb spc,pte,prot 1180 make_insert_tlb spc,pte,prot
1141 1181
1142 idtlbt pte,prot 1182 idtlbt pte,prot
1183 dbit_unlock1 spc,t0
1143 1184
1144 rfir 1185 rfir
1145 nop 1186 nop
@@ -1161,7 +1202,8 @@ dtlb_miss_11:
1161 1202
1162 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11 1203 L2_ptep ptp,pte,t0,va,dtlb_check_alias_11
1163 1204
1164 update_ptep ptp,pte,t0,t1 1205 dbit_lock spc,t0,t1
1206 update_ptep spc,ptp,pte,t0,t1
1165 1207
1166 make_insert_tlb_11 spc,pte,prot 1208 make_insert_tlb_11 spc,pte,prot
1167 1209
@@ -1172,6 +1214,7 @@ dtlb_miss_11:
1172 idtlbp prot,(%sr1,va) 1214 idtlbp prot,(%sr1,va)
1173 1215
1174 mtsp t0, %sr1 /* Restore sr1 */ 1216 mtsp t0, %sr1 /* Restore sr1 */
1217 dbit_unlock1 spc,t0
1175 1218
1176 rfir 1219 rfir
1177 nop 1220 nop
@@ -1192,7 +1235,8 @@ nadtlb_miss_11:
1192 1235
1193 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11 1236 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_11
1194 1237
1195 update_ptep ptp,pte,t0,t1 1238 dbit_lock spc,t0,t1
1239 update_ptep spc,ptp,pte,t0,t1
1196 1240
1197 make_insert_tlb_11 spc,pte,prot 1241 make_insert_tlb_11 spc,pte,prot
1198 1242
@@ -1204,6 +1248,7 @@ nadtlb_miss_11:
1204 idtlbp prot,(%sr1,va) 1248 idtlbp prot,(%sr1,va)
1205 1249
1206 mtsp t0, %sr1 /* Restore sr1 */ 1250 mtsp t0, %sr1 /* Restore sr1 */
1251 dbit_unlock1 spc,t0
1207 1252
1208 rfir 1253 rfir
1209 nop 1254 nop
@@ -1224,13 +1269,15 @@ dtlb_miss_20:
1224 1269
1225 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20 1270 L2_ptep ptp,pte,t0,va,dtlb_check_alias_20
1226 1271
1227 update_ptep ptp,pte,t0,t1 1272 dbit_lock spc,t0,t1
1273 update_ptep spc,ptp,pte,t0,t1
1228 1274
1229 make_insert_tlb spc,pte,prot 1275 make_insert_tlb spc,pte,prot
1230 1276
1231 f_extend pte,t0 1277 f_extend pte,t0
1232 1278
1233 idtlbt pte,prot 1279 idtlbt pte,prot
1280 dbit_unlock1 spc,t0
1234 1281
1235 rfir 1282 rfir
1236 nop 1283 nop
@@ -1250,13 +1297,15 @@ nadtlb_miss_20:
1250 1297
1251 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20 1298 L2_ptep ptp,pte,t0,va,nadtlb_check_alias_20
1252 1299
1253 update_ptep ptp,pte,t0,t1 1300 dbit_lock spc,t0,t1
1301 update_ptep spc,ptp,pte,t0,t1
1254 1302
1255 make_insert_tlb spc,pte,prot 1303 make_insert_tlb spc,pte,prot
1256 1304
1257 f_extend pte,t0 1305 f_extend pte,t0
1258 1306
1259 idtlbt pte,prot 1307 idtlbt pte,prot
1308 dbit_unlock1 spc,t0
1260 1309
1261 rfir 1310 rfir
1262 nop 1311 nop
@@ -1357,11 +1406,13 @@ itlb_miss_20w:
1357 1406
1358 L3_ptep ptp,pte,t0,va,itlb_fault 1407 L3_ptep ptp,pte,t0,va,itlb_fault
1359 1408
1360 update_ptep ptp,pte,t0,t1 1409 dbit_lock spc,t0,t1
1410 update_ptep spc,ptp,pte,t0,t1
1361 1411
1362 make_insert_tlb spc,pte,prot 1412 make_insert_tlb spc,pte,prot
1363 1413
1364 iitlbt pte,prot 1414 iitlbt pte,prot
1415 dbit_unlock1 spc,t0
1365 1416
1366 rfir 1417 rfir
1367 nop 1418 nop
@@ -1379,11 +1430,13 @@ naitlb_miss_20w:
1379 1430
1380 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w 1431 L3_ptep ptp,pte,t0,va,naitlb_check_alias_20w
1381 1432
1382 update_ptep ptp,pte,t0,t1 1433 dbit_lock spc,t0,t1
1434 update_ptep spc,ptp,pte,t0,t1
1383 1435
1384 make_insert_tlb spc,pte,prot 1436 make_insert_tlb spc,pte,prot
1385 1437
1386 iitlbt pte,prot 1438 iitlbt pte,prot
1439 dbit_unlock1 spc,t0
1387 1440
1388 rfir 1441 rfir
1389 nop 1442 nop
@@ -1405,7 +1458,8 @@ itlb_miss_11:
1405 1458
1406 L2_ptep ptp,pte,t0,va,itlb_fault 1459 L2_ptep ptp,pte,t0,va,itlb_fault
1407 1460
1408 update_ptep ptp,pte,t0,t1 1461 dbit_lock spc,t0,t1
1462 update_ptep spc,ptp,pte,t0,t1
1409 1463
1410 make_insert_tlb_11 spc,pte,prot 1464 make_insert_tlb_11 spc,pte,prot
1411 1465
@@ -1416,6 +1470,7 @@ itlb_miss_11:
1416 iitlbp prot,(%sr1,va) 1470 iitlbp prot,(%sr1,va)
1417 1471
1418 mtsp t0, %sr1 /* Restore sr1 */ 1472 mtsp t0, %sr1 /* Restore sr1 */
1473 dbit_unlock1 spc,t0
1419 1474
1420 rfir 1475 rfir
1421 nop 1476 nop
@@ -1427,7 +1482,8 @@ naitlb_miss_11:
1427 1482
1428 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11 1483 L2_ptep ptp,pte,t0,va,naitlb_check_alias_11
1429 1484
1430 update_ptep ptp,pte,t0,t1 1485 dbit_lock spc,t0,t1
1486 update_ptep spc,ptp,pte,t0,t1
1431 1487
1432 make_insert_tlb_11 spc,pte,prot 1488 make_insert_tlb_11 spc,pte,prot
1433 1489
@@ -1438,6 +1494,7 @@ naitlb_miss_11:
1438 iitlbp prot,(%sr1,va) 1494 iitlbp prot,(%sr1,va)
1439 1495
1440 mtsp t0, %sr1 /* Restore sr1 */ 1496 mtsp t0, %sr1 /* Restore sr1 */
1497 dbit_unlock1 spc,t0
1441 1498
1442 rfir 1499 rfir
1443 nop 1500 nop
@@ -1459,13 +1516,15 @@ itlb_miss_20:
1459 1516
1460 L2_ptep ptp,pte,t0,va,itlb_fault 1517 L2_ptep ptp,pte,t0,va,itlb_fault
1461 1518
1462 update_ptep ptp,pte,t0,t1 1519 dbit_lock spc,t0,t1
1520 update_ptep spc,ptp,pte,t0,t1
1463 1521
1464 make_insert_tlb spc,pte,prot 1522 make_insert_tlb spc,pte,prot
1465 1523
1466 f_extend pte,t0 1524 f_extend pte,t0
1467 1525
1468 iitlbt pte,prot 1526 iitlbt pte,prot
1527 dbit_unlock1 spc,t0
1469 1528
1470 rfir 1529 rfir
1471 nop 1530 nop
@@ -1477,13 +1536,15 @@ naitlb_miss_20:
1477 1536
1478 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20 1537 L2_ptep ptp,pte,t0,va,naitlb_check_alias_20
1479 1538
1480 update_ptep ptp,pte,t0,t1 1539 dbit_lock spc,t0,t1
1540 update_ptep spc,ptp,pte,t0,t1
1481 1541
1482 make_insert_tlb spc,pte,prot 1542 make_insert_tlb spc,pte,prot
1483 1543
1484 f_extend pte,t0 1544 f_extend pte,t0
1485 1545
1486 iitlbt pte,prot 1546 iitlbt pte,prot
1547 dbit_unlock1 spc,t0
1487 1548
1488 rfir 1549 rfir
1489 nop 1550 nop
@@ -1507,29 +1568,13 @@ dbit_trap_20w:
1507 1568
1508 L3_ptep ptp,pte,t0,va,dbit_fault 1569 L3_ptep ptp,pte,t0,va,dbit_fault
1509 1570
1510#ifdef CONFIG_SMP 1571 dbit_lock spc,t0,t1
1511 cmpib,COND(=),n 0,spc,dbit_nolock_20w 1572 update_dirty spc,ptp,pte,t1
1512 load32 PA(pa_dbit_lock),t0
1513
1514dbit_spin_20w:
1515 LDCW 0(t0),t1
1516 cmpib,COND(=) 0,t1,dbit_spin_20w
1517 nop
1518
1519dbit_nolock_20w:
1520#endif
1521 update_dirty ptp,pte,t1
1522 1573
1523 make_insert_tlb spc,pte,prot 1574 make_insert_tlb spc,pte,prot
1524 1575
1525 idtlbt pte,prot 1576 idtlbt pte,prot
1526#ifdef CONFIG_SMP 1577 dbit_unlock0 spc,t0
1527 cmpib,COND(=),n 0,spc,dbit_nounlock_20w
1528 ldi 1,t1
1529 stw t1,0(t0)
1530
1531dbit_nounlock_20w:
1532#endif
1533 1578
1534 rfir 1579 rfir
1535 nop 1580 nop
@@ -1543,18 +1588,8 @@ dbit_trap_11:
1543 1588
1544 L2_ptep ptp,pte,t0,va,dbit_fault 1589 L2_ptep ptp,pte,t0,va,dbit_fault
1545 1590
1546#ifdef CONFIG_SMP 1591 dbit_lock spc,t0,t1
1547 cmpib,COND(=),n 0,spc,dbit_nolock_11 1592 update_dirty spc,ptp,pte,t1
1548 load32 PA(pa_dbit_lock),t0
1549
1550dbit_spin_11:
1551 LDCW 0(t0),t1
1552 cmpib,= 0,t1,dbit_spin_11
1553 nop
1554
1555dbit_nolock_11:
1556#endif
1557 update_dirty ptp,pte,t1
1558 1593
1559 make_insert_tlb_11 spc,pte,prot 1594 make_insert_tlb_11 spc,pte,prot
1560 1595
@@ -1565,13 +1600,7 @@ dbit_nolock_11:
1565 idtlbp prot,(%sr1,va) 1600 idtlbp prot,(%sr1,va)
1566 1601
1567 mtsp t1, %sr1 /* Restore sr1 */ 1602 mtsp t1, %sr1 /* Restore sr1 */
1568#ifdef CONFIG_SMP 1603 dbit_unlock0 spc,t0
1569 cmpib,COND(=),n 0,spc,dbit_nounlock_11
1570 ldi 1,t1
1571 stw t1,0(t0)
1572
1573dbit_nounlock_11:
1574#endif
1575 1604
1576 rfir 1605 rfir
1577 nop 1606 nop
@@ -1583,32 +1612,15 @@ dbit_trap_20:
1583 1612
1584 L2_ptep ptp,pte,t0,va,dbit_fault 1613 L2_ptep ptp,pte,t0,va,dbit_fault
1585 1614
1586#ifdef CONFIG_SMP 1615 dbit_lock spc,t0,t1
1587 cmpib,COND(=),n 0,spc,dbit_nolock_20 1616 update_dirty spc,ptp,pte,t1
1588 load32 PA(pa_dbit_lock),t0
1589
1590dbit_spin_20:
1591 LDCW 0(t0),t1
1592 cmpib,= 0,t1,dbit_spin_20
1593 nop
1594
1595dbit_nolock_20:
1596#endif
1597 update_dirty ptp,pte,t1
1598 1617
1599 make_insert_tlb spc,pte,prot 1618 make_insert_tlb spc,pte,prot
1600 1619
1601 f_extend pte,t1 1620 f_extend pte,t1
1602 1621
1603 idtlbt pte,prot 1622 idtlbt pte,prot
1604 1623 dbit_unlock0 spc,t0
1605#ifdef CONFIG_SMP
1606 cmpib,COND(=),n 0,spc,dbit_nounlock_20
1607 ldi 1,t1
1608 stw t1,0(t0)
1609
1610dbit_nounlock_20:
1611#endif
1612 1624
1613 rfir 1625 rfir
1614 nop 1626 nop
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index f7752f6af29e..9e2d2e408529 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -222,6 +222,7 @@ static struct hp_hardware hp_hardware_list[] = {
222 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"}, 222 {HPHW_NPROC,0x5DD,0x4,0x81,"Duet W2"},
223 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"}, 223 {HPHW_NPROC,0x5DE,0x4,0x81,"Piccolo W+"},
224 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"}, 224 {HPHW_NPROC,0x5DF,0x4,0x81,"Cantata W2"},
225 {HPHW_NPROC,0x5DF,0x0,0x00,"Marcato W+? (rp5470)"},
225 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"}, 226 {HPHW_NPROC,0x5E0,0x4,0x91,"Cantata DC- W2"},
226 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"}, 227 {HPHW_NPROC,0x5E1,0x4,0x91,"Crescendo DC- W2"},
227 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"}, 228 {HPHW_NPROC,0x5E2,0x4,0x91,"Crescendo 650 W2"},
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index e255db0bb761..2e6443b1e922 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -27,11 +27,11 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/seq_file.h> 29#include <linux/seq_file.h>
30#include <linux/spinlock.h>
31#include <linux/types.h> 30#include <linux/types.h>
32#include <asm/io.h> 31#include <asm/io.h>
33 32
34#include <asm/smp.h> 33#include <asm/smp.h>
34#include <asm/ldcw.h>
35 35
36#undef PARISC_IRQ_CR16_COUNTS 36#undef PARISC_IRQ_CR16_COUNTS
37 37
@@ -166,22 +166,36 @@ int arch_show_interrupts(struct seq_file *p, int prec)
166 seq_printf(p, "%*s: ", prec, "STK"); 166 seq_printf(p, "%*s: ", prec, "STK");
167 for_each_online_cpu(j) 167 for_each_online_cpu(j)
168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage); 168 seq_printf(p, "%10u ", irq_stats(j)->kernel_stack_usage);
169 seq_printf(p, " Kernel stack usage\n"); 169 seq_puts(p, " Kernel stack usage\n");
170# ifdef CONFIG_IRQSTACKS
171 seq_printf(p, "%*s: ", prec, "IST");
172 for_each_online_cpu(j)
173 seq_printf(p, "%10u ", irq_stats(j)->irq_stack_usage);
174 seq_puts(p, " Interrupt stack usage\n");
175# endif
170#endif 176#endif
171#ifdef CONFIG_SMP 177#ifdef CONFIG_SMP
172 seq_printf(p, "%*s: ", prec, "RES"); 178 seq_printf(p, "%*s: ", prec, "RES");
173 for_each_online_cpu(j) 179 for_each_online_cpu(j)
174 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); 180 seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
175 seq_printf(p, " Rescheduling interrupts\n"); 181 seq_puts(p, " Rescheduling interrupts\n");
176 seq_printf(p, "%*s: ", prec, "CAL"); 182 seq_printf(p, "%*s: ", prec, "CAL");
177 for_each_online_cpu(j) 183 for_each_online_cpu(j)
178 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count); 184 seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
179 seq_printf(p, " Function call interrupts\n"); 185 seq_puts(p, " Function call interrupts\n");
180#endif 186#endif
187 seq_printf(p, "%*s: ", prec, "UAH");
188 for_each_online_cpu(j)
189 seq_printf(p, "%10u ", irq_stats(j)->irq_unaligned_count);
190 seq_puts(p, " Unaligned access handler traps\n");
191 seq_printf(p, "%*s: ", prec, "FPA");
192 for_each_online_cpu(j)
193 seq_printf(p, "%10u ", irq_stats(j)->irq_fpassist_count);
194 seq_puts(p, " Floating point assist traps\n");
181 seq_printf(p, "%*s: ", prec, "TLB"); 195 seq_printf(p, "%*s: ", prec, "TLB");
182 for_each_online_cpu(j) 196 for_each_online_cpu(j)
183 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); 197 seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
184 seq_printf(p, " TLB shootdowns\n"); 198 seq_puts(p, " TLB shootdowns\n");
185 return 0; 199 return 0;
186} 200}
187 201
@@ -366,6 +380,24 @@ static inline int eirr_to_irq(unsigned long eirr)
366 return (BITS_PER_LONG - bit) + TIMER_IRQ; 380 return (BITS_PER_LONG - bit) + TIMER_IRQ;
367} 381}
368 382
383#ifdef CONFIG_IRQSTACKS
384/*
385 * IRQ STACK - used for irq handler
386 */
387#define IRQ_STACK_SIZE (4096 << 2) /* 16k irq stack size */
388
389union irq_stack_union {
390 unsigned long stack[IRQ_STACK_SIZE/sizeof(unsigned long)];
391 volatile unsigned int slock[4];
392 volatile unsigned int lock[1];
393};
394
395DEFINE_PER_CPU(union irq_stack_union, irq_stack_union) = {
396 .slock = { 1,1,1,1 },
397 };
398#endif
399
400
369int sysctl_panic_on_stackoverflow = 1; 401int sysctl_panic_on_stackoverflow = 1;
370 402
371static inline void stack_overflow_check(struct pt_regs *regs) 403static inline void stack_overflow_check(struct pt_regs *regs)
@@ -378,6 +410,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
378 unsigned long sp = regs->gr[30]; 410 unsigned long sp = regs->gr[30];
379 unsigned long stack_usage; 411 unsigned long stack_usage;
380 unsigned int *last_usage; 412 unsigned int *last_usage;
413 int cpu = smp_processor_id();
381 414
382 /* if sr7 != 0, we interrupted a userspace process which we do not want 415 /* if sr7 != 0, we interrupted a userspace process which we do not want
383 * to check for stack overflow. We will only check the kernel stack. */ 416 * to check for stack overflow. We will only check the kernel stack. */
@@ -386,7 +419,31 @@ static inline void stack_overflow_check(struct pt_regs *regs)
386 419
387 /* calculate kernel stack usage */ 420 /* calculate kernel stack usage */
388 stack_usage = sp - stack_start; 421 stack_usage = sp - stack_start;
389 last_usage = &per_cpu(irq_stat.kernel_stack_usage, smp_processor_id()); 422#ifdef CONFIG_IRQSTACKS
423 if (likely(stack_usage <= THREAD_SIZE))
424 goto check_kernel_stack; /* found kernel stack */
425
426 /* check irq stack usage */
427 stack_start = (unsigned long) &per_cpu(irq_stack_union, cpu).stack;
428 stack_usage = sp - stack_start;
429
430 last_usage = &per_cpu(irq_stat.irq_stack_usage, cpu);
431 if (unlikely(stack_usage > *last_usage))
432 *last_usage = stack_usage;
433
434 if (likely(stack_usage < (IRQ_STACK_SIZE - STACK_MARGIN)))
435 return;
436
437 pr_emerg("stackcheck: %s will most likely overflow irq stack "
438 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
439 current->comm, sp, stack_start, stack_start + IRQ_STACK_SIZE);
440 goto panic_check;
441
442check_kernel_stack:
443#endif
444
445 /* check kernel stack usage */
446 last_usage = &per_cpu(irq_stat.kernel_stack_usage, cpu);
390 447
391 if (unlikely(stack_usage > *last_usage)) 448 if (unlikely(stack_usage > *last_usage))
392 *last_usage = stack_usage; 449 *last_usage = stack_usage;
@@ -398,31 +455,66 @@ static inline void stack_overflow_check(struct pt_regs *regs)
398 "(sp:%lx, stk bottom-top:%lx-%lx)\n", 455 "(sp:%lx, stk bottom-top:%lx-%lx)\n",
399 current->comm, sp, stack_start, stack_start + THREAD_SIZE); 456 current->comm, sp, stack_start, stack_start + THREAD_SIZE);
400 457
458#ifdef CONFIG_IRQSTACKS
459panic_check:
460#endif
401 if (sysctl_panic_on_stackoverflow) 461 if (sysctl_panic_on_stackoverflow)
402 panic("low stack detected by irq handler - check messages\n"); 462 panic("low stack detected by irq handler - check messages\n");
403#endif 463#endif
404} 464}
405 465
406#ifdef CONFIG_IRQSTACKS 466#ifdef CONFIG_IRQSTACKS
407DEFINE_PER_CPU(union irq_stack_union, irq_stack_union); 467/* in entry.S: */
468void call_on_stack(unsigned long p1, void *func, unsigned long new_stack);
408 469
409static void execute_on_irq_stack(void *func, unsigned long param1) 470static void execute_on_irq_stack(void *func, unsigned long param1)
410{ 471{
411 unsigned long *irq_stack_start; 472 union irq_stack_union *union_ptr;
412 unsigned long irq_stack; 473 unsigned long irq_stack;
413 int cpu = smp_processor_id(); 474 volatile unsigned int *irq_stack_in_use;
475
476 union_ptr = &per_cpu(irq_stack_union, smp_processor_id());
477 irq_stack = (unsigned long) &union_ptr->stack;
478 irq_stack = ALIGN(irq_stack + sizeof(irq_stack_union.slock),
479 64); /* align for stack frame usage */
414 480
415 irq_stack_start = &per_cpu(irq_stack_union, cpu).stack[0]; 481 /* We may be called recursive. If we are already using the irq stack,
416 irq_stack = (unsigned long) irq_stack_start; 482 * just continue to use it. Use spinlocks to serialize
417 irq_stack = ALIGN(irq_stack, 16); /* align for stack frame usage */ 483 * the irq stack usage.
484 */
485 irq_stack_in_use = (volatile unsigned int *)__ldcw_align(union_ptr);
486 if (!__ldcw(irq_stack_in_use)) {
487 void (*direct_call)(unsigned long p1) = func;
418 488
419 BUG_ON(*irq_stack_start); /* report bug if we were called recursive. */ 489 /* We are using the IRQ stack already.
420 *irq_stack_start = 1; 490 * Do direct call on current stack. */
491 direct_call(param1);
492 return;
493 }
421 494
422 /* This is where we switch to the IRQ stack. */ 495 /* This is where we switch to the IRQ stack. */
423 call_on_stack(param1, func, irq_stack); 496 call_on_stack(param1, func, irq_stack);
424 497
425 *irq_stack_start = 0; 498 /* free up irq stack usage. */
499 *irq_stack_in_use = 1;
500}
501
502asmlinkage void do_softirq(void)
503{
504 __u32 pending;
505 unsigned long flags;
506
507 if (in_interrupt())
508 return;
509
510 local_irq_save(flags);
511
512 pending = local_softirq_pending();
513
514 if (pending)
515 execute_on_irq_stack(__do_softirq, 0);
516
517 local_irq_restore(flags);
426} 518}
427#endif /* CONFIG_IRQSTACKS */ 519#endif /* CONFIG_IRQSTACKS */
428 520
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 5e1de6072be5..36d7f402e48e 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -605,14 +605,14 @@ ENTRY(copy_user_page_asm)
605 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ 605 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
606 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */ 606 convert_phys_for_tlb_insert20 %r23 /* convert phys addr to tlb insert format */
607 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */ 607 depd %r24,63,22, %r28 /* Form aliased virtual address 'to' */
608 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ 608 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
609 copy %r28, %r29 609 copy %r28, %r29
610 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */ 610 depdi 1, 41,1, %r29 /* Form aliased virtual address 'from' */
611#else 611#else
612 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 612 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
613 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */ 613 extrw,u %r23, 24,25, %r23 /* convert phys addr to tlb insert format */
614 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */ 614 depw %r24, 31,22, %r28 /* Form aliased virtual address 'to' */
615 depwi 0, 31,12, %r28 /* Clear any offset bits */ 615 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
616 copy %r28, %r29 616 copy %r28, %r29
617 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */ 617 depwi 1, 9,1, %r29 /* Form aliased virtual address 'from' */
618#endif 618#endif
@@ -762,7 +762,7 @@ ENTRY(clear_user_page_asm)
762#else 762#else
763 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 763 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
764 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 764 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
765 depwi 0, 31,12, %r28 /* Clear any offset bits */ 765 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
766#endif 766#endif
767 767
768 /* Purge any old translation */ 768 /* Purge any old translation */
@@ -846,7 +846,7 @@ ENTRY(flush_dcache_page_asm)
846#else 846#else
847 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 847 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
848 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 848 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
849 depwi 0, 31,12, %r28 /* Clear any offset bits */ 849 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
850#endif 850#endif
851 851
852 /* Purge any old translation */ 852 /* Purge any old translation */
@@ -918,11 +918,11 @@ ENTRY(flush_icache_page_asm)
918#endif 918#endif
919 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */ 919 convert_phys_for_tlb_insert20 %r26 /* convert phys addr to tlb insert format */
920 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */ 920 depd %r25, 63,22, %r28 /* Form aliased virtual address 'to' */
921 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */ 921 depdi 0, 63,PAGE_SHIFT, %r28 /* Clear any offset bits */
922#else 922#else
923 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */ 923 extrw,u %r26, 24,25, %r26 /* convert phys addr to tlb insert format */
924 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */ 924 depw %r25, 31,22, %r28 /* Form aliased virtual address 'to' */
925 depwi 0, 31,12, %r28 /* Clear any offset bits */ 925 depwi 0, 31,PAGE_SHIFT, %r28 /* Clear any offset bits */
926#endif 926#endif
927 927
928 /* Purge any old translation */ 928 /* Purge any old translation */
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index fe41a98043bb..04e47c6a4562 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -646,6 +646,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
646 case 14: 646 case 14:
647 /* Assist Exception Trap, i.e. floating point exception. */ 647 /* Assist Exception Trap, i.e. floating point exception. */
648 die_if_kernel("Floating point exception", regs, 0); /* quiet */ 648 die_if_kernel("Floating point exception", regs, 0); /* quiet */
649 __inc_irq_stat(irq_fpassist_count);
649 handle_fpe(regs); 650 handle_fpe(regs);
650 return; 651 return;
651 652
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 234e3682cf09..d7c0acb35ec2 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -27,6 +27,7 @@
27#include <linux/signal.h> 27#include <linux/signal.h>
28#include <linux/ratelimit.h> 28#include <linux/ratelimit.h>
29#include <asm/uaccess.h> 29#include <asm/uaccess.h>
30#include <asm/hardirq.h>
30 31
31/* #define DEBUG_UNALIGNED 1 */ 32/* #define DEBUG_UNALIGNED 1 */
32 33
@@ -454,6 +455,8 @@ void handle_unaligned(struct pt_regs *regs)
454 struct siginfo si; 455 struct siginfo si;
455 register int flop=0; /* true if this is a flop */ 456 register int flop=0; /* true if this is a flop */
456 457
458 __inc_irq_stat(irq_unaligned_count);
459
457 /* log a message with pacing */ 460 /* log a message with pacing */
458 if (user_mode(regs)) { 461 if (user_mode(regs)) {
459 if (current->thread.flags & PARISC_UAC_SIGBUS) { 462 if (current->thread.flags & PARISC_UAC_SIGBUS) {
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index ce939ac8622b..1c965642068b 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -1069,7 +1069,7 @@ void flush_tlb_all(void)
1069{ 1069{
1070 int do_recycle; 1070 int do_recycle;
1071 1071
1072 inc_irq_stat(irq_tlb_count); 1072 __inc_irq_stat(irq_tlb_count);
1073 do_recycle = 0; 1073 do_recycle = 0;
1074 spin_lock(&sid_lock); 1074 spin_lock(&sid_lock);
1075 if (dirty_space_ids > RECYCLE_THRESHOLD) { 1075 if (dirty_space_ids > RECYCLE_THRESHOLD) {
@@ -1090,7 +1090,7 @@ void flush_tlb_all(void)
1090#else 1090#else
1091void flush_tlb_all(void) 1091void flush_tlb_all(void)
1092{ 1092{
1093 inc_irq_stat(irq_tlb_count); 1093 __inc_irq_stat(irq_tlb_count);
1094 spin_lock(&sid_lock); 1094 spin_lock(&sid_lock);
1095 flush_tlb_all_local(NULL); 1095 flush_tlb_all_local(NULL);
1096 recycle_sids(); 1096 recycle_sids();
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 5416e28a7538..863d877e0b5f 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -262,8 +262,31 @@ config PPC_EARLY_DEBUG_OPAL_HVSI
262 Select this to enable early debugging for the PowerNV platform 262 Select this to enable early debugging for the PowerNV platform
263 using an "hvsi" console 263 using an "hvsi" console
264 264
265config PPC_EARLY_DEBUG_MEMCONS
266 bool "In memory console"
267 help
268 Select this to enable early debugging using an in memory console.
269 This console provides input and output buffers stored within the
270 kernel BSS and should be safe to select on any system. A debugger
271 can then be used to read kernel output or send input to the console.
265endchoice 272endchoice
266 273
274config PPC_MEMCONS_OUTPUT_SIZE
275 int "In memory console output buffer size"
276 depends on PPC_EARLY_DEBUG_MEMCONS
277 default 4096
278 help
279 Selects the size of the output buffer (in bytes) of the in memory
280 console.
281
282config PPC_MEMCONS_INPUT_SIZE
283 int "In memory console input buffer size"
284 depends on PPC_EARLY_DEBUG_MEMCONS
285 default 128
286 help
287 Selects the size of the input buffer (in bytes) of the in memory
288 console.
289
267config PPC_EARLY_DEBUG_OPAL 290config PPC_EARLY_DEBUG_OPAL
268 def_bool y 291 def_bool y
269 depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI 292 depends on PPC_EARLY_DEBUG_OPAL_RAW || PPC_EARLY_DEBUG_OPAL_HVSI
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig
index f79196232917..139a8308070c 100644
--- a/arch/powerpc/configs/ps3_defconfig
+++ b/arch/powerpc/configs/ps3_defconfig
@@ -136,7 +136,6 @@ CONFIG_HID_SMARTJOYPLUS=m
136CONFIG_USB_HIDDEV=y 136CONFIG_USB_HIDDEV=y
137CONFIG_USB=m 137CONFIG_USB=m
138CONFIG_USB_ANNOUNCE_NEW_DEVICES=y 138CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
139CONFIG_USB_SUSPEND=y
140CONFIG_USB_MON=m 139CONFIG_USB_MON=m
141CONFIG_USB_EHCI_HCD=m 140CONFIG_USB_EHCI_HCD=m
142# CONFIG_USB_EHCI_HCD_PPC_OF is not set 141# CONFIG_USB_EHCI_HCD_PPC_OF is not set
diff --git a/arch/powerpc/include/asm/context_tracking.h b/arch/powerpc/include/asm/context_tracking.h
new file mode 100644
index 000000000000..b6f5a33b8ee2
--- /dev/null
+++ b/arch/powerpc/include/asm/context_tracking.h
@@ -0,0 +1,10 @@
1#ifndef _ASM_POWERPC_CONTEXT_TRACKING_H
2#define _ASM_POWERPC_CONTEXT_TRACKING_H
3
4#ifdef CONFIG_CONTEXT_TRACKING
5#define SCHEDULE_USER bl .schedule_user
6#else
7#define SCHEDULE_USER bl .schedule
8#endif
9
10#endif
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 0df54646f968..681bc0314b6b 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -52,6 +52,7 @@
52#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000) 52#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
53#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000) 53#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
54#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000) 54#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
55#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
55 56
56#ifndef __ASSEMBLY__ 57#ifndef __ASSEMBLY__
57 58
@@ -69,7 +70,8 @@ enum {
69 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY | 70 FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
70 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN, 71 FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
71 FW_FEATURE_PSERIES_ALWAYS = 0, 72 FW_FEATURE_PSERIES_ALWAYS = 0,
72 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, 73 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
74 FW_FEATURE_OPALv3,
73 FW_FEATURE_POWERNV_ALWAYS = 0, 75 FW_FEATURE_POWERNV_ALWAYS = 0,
74 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 76 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
75 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 77 FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index d615b28dda82..ba713f166fa5 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -96,11 +96,12 @@ static inline bool arch_irqs_disabled(void)
96#endif 96#endif
97 97
98#define hard_irq_disable() do { \ 98#define hard_irq_disable() do { \
99 u8 _was_enabled = get_paca()->soft_enabled; \
99 __hard_irq_disable(); \ 100 __hard_irq_disable(); \
100 if (local_paca->soft_enabled) \
101 trace_hardirqs_off(); \
102 get_paca()->soft_enabled = 0; \ 101 get_paca()->soft_enabled = 0; \
103 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \ 102 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; \
103 if (_was_enabled) \
104 trace_hardirqs_off(); \
104} while(0) 105} while(0)
105 106
106static inline bool lazy_irq_pending(void) 107static inline bool lazy_irq_pending(void)
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index b6c8b58b1d76..cbb9305ab15a 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -243,7 +243,8 @@ enum OpalMCE_TlbErrorType {
243 243
244enum OpalThreadStatus { 244enum OpalThreadStatus {
245 OPAL_THREAD_INACTIVE = 0x0, 245 OPAL_THREAD_INACTIVE = 0x0,
246 OPAL_THREAD_STARTED = 0x1 246 OPAL_THREAD_STARTED = 0x1,
247 OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
247}; 248};
248 249
249enum OpalPciBusCompare { 250enum OpalPciBusCompare {
@@ -563,6 +564,8 @@ extern void opal_nvram_init(void);
563 564
564extern int opal_machine_check(struct pt_regs *regs); 565extern int opal_machine_check(struct pt_regs *regs);
565 566
567extern void opal_shutdown(void);
568
566#endif /* __ASSEMBLY__ */ 569#endif /* __ASSEMBLY__ */
567 570
568#endif /* __OPAL_H */ 571#endif /* __OPAL_H */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 8b11b5bd9938..2c1d8cb9b265 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -174,6 +174,8 @@ struct pci_dn {
174/* Get the pointer to a device_node's pci_dn */ 174/* Get the pointer to a device_node's pci_dn */
175#define PCI_DN(dn) ((struct pci_dn *) (dn)->data) 175#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
176 176
177extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
178
177extern void * update_dn_pci_info(struct device_node *dn, void *data); 179extern void * update_dn_pci_info(struct device_node *dn, void *data);
178 180
179static inline int pci_device_from_OF_node(struct device_node *np, 181static inline int pci_device_from_OF_node(struct device_node *np,
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 91acb12bac92..b66ae722a8e9 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -186,7 +186,7 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
186 186
187static inline pgtable_t pmd_pgtable(pmd_t pmd) 187static inline pgtable_t pmd_pgtable(pmd_t pmd)
188{ 188{
189 return (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE); 189 return (pgtable_t)(pmd_val(pmd) & ~PMD_MASKED_BITS);
190} 190}
191 191
192static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, 192static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index d7e67ca8b4a6..594db6bc093c 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -284,6 +284,12 @@ struct thread_struct {
284 unsigned long ebbrr; 284 unsigned long ebbrr;
285 unsigned long ebbhr; 285 unsigned long ebbhr;
286 unsigned long bescr; 286 unsigned long bescr;
287 unsigned long siar;
288 unsigned long sdar;
289 unsigned long sier;
290 unsigned long mmcr0;
291 unsigned long mmcr2;
292 unsigned long mmcra;
287#endif 293#endif
288}; 294};
289 295
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h
index 3e13e23e4fdf..d836d945068d 100644
--- a/arch/powerpc/include/asm/pte-hash64-64k.h
+++ b/arch/powerpc/include/asm/pte-hash64-64k.h
@@ -47,7 +47,7 @@
47 * generic accessors and iterators here 47 * generic accessors and iterators here
48 */ 48 */
49#define __real_pte(e,p) ((real_pte_t) { \ 49#define __real_pte(e,p) ((real_pte_t) { \
50 (e), ((e) & _PAGE_COMBO) ? \ 50 (e), (pte_val(e) & _PAGE_COMBO) ? \
51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 }) 51 (pte_val(*((p) + PTRS_PER_PTE))) : 0 })
52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \ 52#define __rpte_to_hidx(r,index) ((pte_val((r).pte) & _PAGE_COMBO) ? \
53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf)) 53 (((r).hidx >> ((index)<<2)) & 0xf) : ((pte_val((r).pte) >> 12) & 0xf))
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index a8bc2bb4adc9..34fd70488d83 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -264,6 +264,8 @@ extern void rtas_progress(char *s, unsigned short hex);
264extern void rtas_initialize(void); 264extern void rtas_initialize(void);
265extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); 265extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
266extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); 266extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
267extern int rtas_online_cpus_mask(cpumask_var_t cpus);
268extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
267extern int rtas_ibm_suspend_me(struct rtas_args *); 269extern int rtas_ibm_suspend_me(struct rtas_args *);
268 270
269struct rtc_time; 271struct rtc_time;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 8ceea14d6fe4..ba7b1973866e 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -97,7 +97,7 @@ static inline struct thread_info *current_thread_info(void)
97#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */ 97#define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
98#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ 98#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
99#define TIF_SINGLESTEP 8 /* singlestepping active */ 99#define TIF_SINGLESTEP 8 /* singlestepping active */
100#define TIF_MEMDIE 9 /* is terminating due to OOM killer */ 100#define TIF_NOHZ 9 /* in adaptive nohz mode */
101#define TIF_SECCOMP 10 /* secure computing */ 101#define TIF_SECCOMP 10 /* secure computing */
102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */ 102#define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
103#define TIF_NOERROR 12 /* Force successful syscall return */ 103#define TIF_NOERROR 12 /* Force successful syscall return */
@@ -106,6 +106,7 @@ static inline struct thread_info *current_thread_info(void)
106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 106#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation 107#define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
108 for stack store? */ 108 for stack store? */
109#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
109 110
110/* as above, but as bit values */ 111/* as above, but as bit values */
111#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 112#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -124,8 +125,10 @@ static inline struct thread_info *current_thread_info(void)
124#define _TIF_UPROBE (1<<TIF_UPROBE) 125#define _TIF_UPROBE (1<<TIF_UPROBE)
125#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 126#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
126#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) 127#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
128#define _TIF_NOHZ (1<<TIF_NOHZ)
127#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ 129#define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
128 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) 130 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
131 _TIF_NOHZ)
129 132
130#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ 133#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
131 _TIF_NOTIFY_RESUME | _TIF_UPROBE) 134 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 5a7510e9d09d..dc590919f8eb 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -52,6 +52,7 @@ extern void __init udbg_init_40x_realmode(void);
52extern void __init udbg_init_cpm(void); 52extern void __init udbg_init_cpm(void);
53extern void __init udbg_init_usbgecko(void); 53extern void __init udbg_init_usbgecko(void);
54extern void __init udbg_init_wsp(void); 54extern void __init udbg_init_wsp(void);
55extern void __init udbg_init_memcons(void);
55extern void __init udbg_init_ehv_bc(void); 56extern void __init udbg_init_ehv_bc(void);
56extern void __init udbg_init_ps3gelic(void); 57extern void __init udbg_init_ps3gelic(void);
57extern void __init udbg_init_debug_opal_raw(void); 58extern void __init udbg_init_debug_opal_raw(void);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b51a97cfedf8..6f16ffafa6f0 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -127,6 +127,12 @@ int main(void)
127 DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr)); 127 DEFINE(THREAD_BESCR, offsetof(struct thread_struct, bescr));
128 DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr)); 128 DEFINE(THREAD_EBBHR, offsetof(struct thread_struct, ebbhr));
129 DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr)); 129 DEFINE(THREAD_EBBRR, offsetof(struct thread_struct, ebbrr));
130 DEFINE(THREAD_SIAR, offsetof(struct thread_struct, siar));
131 DEFINE(THREAD_SDAR, offsetof(struct thread_struct, sdar));
132 DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
133 DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
134 DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
135 DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
130#endif 136#endif
131#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 137#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
132 DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch)); 138 DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index a283b6442b26..18b5b9cf8e37 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -135,8 +135,12 @@ __init_HFSCR:
135 blr 135 blr
136 136
137__init_TLB: 137__init_TLB:
138 /* Clear the TLB */ 138 /*
139 li r6,128 139 * Clear the TLB using the "IS 3" form of tlbiel instruction
140 * (invalidate by congruence class). P7 has 128 CCs, P8 has 512
141 * so we just always do 512
142 */
143 li r6,512
140 mtctr r6 144 mtctr r6
141 li r7,0xc00 /* IS field = 0b11 */ 145 li r7,0xc00 /* IS field = 0b11 */
142 ptesync 146 ptesync
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index e514de57a125..d22e73e4618b 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -439,8 +439,6 @@ ret_from_fork:
439ret_from_kernel_thread: 439ret_from_kernel_thread:
440 REST_NVGPRS(r1) 440 REST_NVGPRS(r1)
441 bl schedule_tail 441 bl schedule_tail
442 li r3,0
443 stw r3,0(r1)
444 mtlr r14 442 mtlr r14
445 mr r3,r15 443 mr r3,r15
446 PPC440EP_ERR42 444 PPC440EP_ERR42
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 915fbb4fc2fe..0e9095e47b5b 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -33,6 +33,7 @@
33#include <asm/irqflags.h> 33#include <asm/irqflags.h>
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/hw_irq.h> 35#include <asm/hw_irq.h>
36#include <asm/context_tracking.h>
36 37
37/* 38/*
38 * System calls. 39 * System calls.
@@ -376,8 +377,6 @@ _GLOBAL(ret_from_fork)
376_GLOBAL(ret_from_kernel_thread) 377_GLOBAL(ret_from_kernel_thread)
377 bl .schedule_tail 378 bl .schedule_tail
378 REST_NVGPRS(r1) 379 REST_NVGPRS(r1)
379 li r3,0
380 std r3,0(r1)
381 ld r14, 0(r14) 380 ld r14, 0(r14)
382 mtlr r14 381 mtlr r14
383 mr r3,r15 382 mr r3,r15
@@ -466,6 +465,20 @@ BEGIN_FTR_SECTION
466 std r0, THREAD_EBBHR(r3) 465 std r0, THREAD_EBBHR(r3)
467 mfspr r0, SPRN_EBBRR 466 mfspr r0, SPRN_EBBRR
468 std r0, THREAD_EBBRR(r3) 467 std r0, THREAD_EBBRR(r3)
468
469 /* PMU registers made user read/(write) by EBB */
470 mfspr r0, SPRN_SIAR
471 std r0, THREAD_SIAR(r3)
472 mfspr r0, SPRN_SDAR
473 std r0, THREAD_SDAR(r3)
474 mfspr r0, SPRN_SIER
475 std r0, THREAD_SIER(r3)
476 mfspr r0, SPRN_MMCR0
477 std r0, THREAD_MMCR0(r3)
478 mfspr r0, SPRN_MMCR2
479 std r0, THREAD_MMCR2(r3)
480 mfspr r0, SPRN_MMCRA
481 std r0, THREAD_MMCRA(r3)
469END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 482END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
470#endif 483#endif
471 484
@@ -561,6 +574,20 @@ BEGIN_FTR_SECTION
561 ld r0, THREAD_EBBRR(r4) 574 ld r0, THREAD_EBBRR(r4)
562 mtspr SPRN_EBBRR, r0 575 mtspr SPRN_EBBRR, r0
563 576
577 /* PMU registers made user read/(write) by EBB */
578 ld r0, THREAD_SIAR(r4)
579 mtspr SPRN_SIAR, r0
580 ld r0, THREAD_SDAR(r4)
581 mtspr SPRN_SDAR, r0
582 ld r0, THREAD_SIER(r4)
583 mtspr SPRN_SIER, r0
584 ld r0, THREAD_MMCR0(r4)
585 mtspr SPRN_MMCR0, r0
586 ld r0, THREAD_MMCR2(r4)
587 mtspr SPRN_MMCR2, r0
588 ld r0, THREAD_MMCRA(r4)
589 mtspr SPRN_MMCRA, r0
590
564 ld r0,THREAD_TAR(r4) 591 ld r0,THREAD_TAR(r4)
565 mtspr SPRN_TAR,r0 592 mtspr SPRN_TAR,r0
566END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S) 593END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
@@ -634,7 +661,7 @@ _GLOBAL(ret_from_except_lite)
634 andi. r0,r4,_TIF_NEED_RESCHED 661 andi. r0,r4,_TIF_NEED_RESCHED
635 beq 1f 662 beq 1f
636 bl .restore_interrupts 663 bl .restore_interrupts
637 bl .schedule 664 SCHEDULE_USER
638 b .ret_from_except_lite 665 b .ret_from_except_lite
639 666
6401: bl .save_nvgprs 6671: bl .save_nvgprs
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 42a756eec9ff..645170a07ada 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -489,7 +489,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
489 */ 489 */
490 490
491 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 491 mfspr r14,SPRN_DBSR /* check single-step/branch taken */
492 andis. r15,r14,DBSR_IC@h 492 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
493 beq+ 1f 493 beq+ 1f
494 494
495 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 495 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -500,7 +500,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
500 bge+ cr1,1f 500 bge+ cr1,1f
501 501
502 /* here it looks like we got an inappropriate debug exception. */ 502 /* here it looks like we got an inappropriate debug exception. */
503 lis r14,DBSR_IC@h /* clear the IC event */ 503 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
504 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */ 504 rlwinm r11,r11,0,~MSR_DE /* clear DE in the CSRR1 value */
505 mtspr SPRN_DBSR,r14 505 mtspr SPRN_DBSR,r14
506 mtspr SPRN_CSRR1,r11 506 mtspr SPRN_CSRR1,r11
@@ -555,7 +555,7 @@ kernel_dbg_exc:
555 */ 555 */
556 556
557 mfspr r14,SPRN_DBSR /* check single-step/branch taken */ 557 mfspr r14,SPRN_DBSR /* check single-step/branch taken */
558 andis. r15,r14,DBSR_IC@h 558 andis. r15,r14,(DBSR_IC|DBSR_BT)@h
559 beq+ 1f 559 beq+ 1f
560 560
561 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) 561 LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e)
@@ -566,7 +566,7 @@ kernel_dbg_exc:
566 bge+ cr1,1f 566 bge+ cr1,1f
567 567
568 /* here it looks like we got an inappropriate debug exception. */ 568 /* here it looks like we got an inappropriate debug exception. */
569 lis r14,DBSR_IC@h /* clear the IC event */ 569 lis r14,(DBSR_IC|DBSR_BT)@h /* clear the event */
570 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ 570 rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */
571 mtspr SPRN_DBSR,r14 571 mtspr SPRN_DBSR,r14
572 mtspr SPRN_DSRR1,r11 572 mtspr SPRN_DSRR1,r11
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 466a2908bb63..611acdf30096 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -17,6 +17,7 @@
17#include <linux/errno.h> 17#include <linux/errno.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/hardirq.h>
20 21
21#include <asm/page.h> 22#include <asm/page.h>
22#include <asm/current.h> 23#include <asm/current.h>
@@ -335,10 +336,13 @@ void default_machine_kexec(struct kimage *image)
335 pr_debug("kexec: Starting switchover sequence.\n"); 336 pr_debug("kexec: Starting switchover sequence.\n");
336 337
337 /* switch to a staticly allocated stack. Based on irq stack code. 338 /* switch to a staticly allocated stack. Based on irq stack code.
339 * We setup preempt_count to avoid using VMX in memcpy.
338 * XXX: the task struct will likely be invalid once we do the copy! 340 * XXX: the task struct will likely be invalid once we do the copy!
339 */ 341 */
340 kexec_stack.thread_info.task = current_thread_info()->task; 342 kexec_stack.thread_info.task = current_thread_info()->task;
341 kexec_stack.thread_info.flags = 0; 343 kexec_stack.thread_info.flags = 0;
344 kexec_stack.thread_info.preempt_count = HARDIRQ_OFFSET;
345 kexec_stack.thread_info.cpu = current_thread_info()->cpu;
342 346
343 /* We need a static PACA, too; copy this CPU's PACA over and switch to 347 /* We need a static PACA, too; copy this CPU's PACA over and switch to
344 * it. Also poison per_cpu_offset to catch anyone using non-static 348 * it. Also poison per_cpu_offset to catch anyone using non-static
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 19e096bd0e73..e469f30e6eeb 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -657,6 +657,17 @@ _GLOBAL(__ucmpdi2)
657 li r3,2 657 li r3,2
658 blr 658 blr
659 659
660_GLOBAL(__bswapdi2)
661 rotlwi r9,r4,8
662 rotlwi r10,r3,8
663 rlwimi r9,r4,24,0,7
664 rlwimi r10,r3,24,0,7
665 rlwimi r9,r4,24,16,23
666 rlwimi r10,r3,24,16,23
667 mr r3,r9
668 mr r4,r10
669 blr
670
660_GLOBAL(abs) 671_GLOBAL(abs)
661 srawi r4,r3,31 672 srawi r4,r3,31
662 xor r3,r3,r4 673 xor r3,r3,r4
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 5cfa8008693b..6820e45f557b 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -234,6 +234,17 @@ _GLOBAL(__flush_dcache_icache)
234 isync 234 isync
235 blr 235 blr
236 236
237_GLOBAL(__bswapdi2)
238 srdi r8,r3,32
239 rlwinm r7,r3,8,0xffffffff
240 rlwimi r7,r3,24,0,7
241 rlwinm r9,r8,8,0xffffffff
242 rlwimi r7,r3,24,16,23
243 rlwimi r9,r8,24,0,7
244 rlwimi r9,r8,24,16,23
245 sldi r7,r7,32
246 or r3,r7,r9
247 blr
237 248
238#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) 249#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
239/* 250/*
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index f5c5c90799a7..e9acf50dd5b2 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -359,7 +359,6 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
359 enum pci_mmap_state mmap_state, 359 enum pci_mmap_state mmap_state,
360 int write_combine) 360 int write_combine)
361{ 361{
362 unsigned long prot = pgprot_val(protection);
363 362
364 /* Write combine is always 0 on non-memory space mappings. On 363 /* Write combine is always 0 on non-memory space mappings. On
365 * memory space, if the user didn't pass 1, we check for a 364 * memory space, if the user didn't pass 1, we check for a
@@ -376,9 +375,9 @@ static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
376 375
377 /* XXX would be nice to have a way to ask for write-through */ 376 /* XXX would be nice to have a way to ask for write-through */
378 if (write_combine) 377 if (write_combine)
379 return pgprot_noncached_wc(prot); 378 return pgprot_noncached_wc(protection);
380 else 379 else
381 return pgprot_noncached(prot); 380 return pgprot_noncached(protection);
382} 381}
383 382
384/* 383/*
@@ -1521,9 +1520,10 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
1521 for (i = 0; i < 3; ++i) { 1520 for (i = 0; i < 3; ++i) {
1522 res = &hose->mem_resources[i]; 1521 res = &hose->mem_resources[i];
1523 if (!res->flags) { 1522 if (!res->flags) {
1524 printk(KERN_ERR "PCI: Memory resource 0 not set for " 1523 if (i == 0)
1525 "host bridge %s (domain %d)\n", 1524 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1526 hose->dn->full_name, hose->global_number); 1525 "host bridge %s (domain %d)\n",
1526 hose->dn->full_name, hose->global_number);
1527 continue; 1527 continue;
1528 } 1528 }
1529 offset = hose->mem_offset[i]; 1529 offset = hose->mem_offset[i];
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 873050d26840..2e8629654ca8 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -266,3 +266,13 @@ int pcibus_to_node(struct pci_bus *bus)
266} 266}
267EXPORT_SYMBOL(pcibus_to_node); 267EXPORT_SYMBOL(pcibus_to_node);
268#endif 268#endif
269
270static void quirk_radeon_32bit_msi(struct pci_dev *dev)
271{
272 struct pci_dn *pdn = pci_get_pdn(dev);
273
274 if (pdn)
275 pdn->force_32bit_msi = 1;
276}
277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index e7af165f8b9d..df038442548a 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -32,6 +32,14 @@
32#include <asm/ppc-pci.h> 32#include <asm/ppc-pci.h>
33#include <asm/firmware.h> 33#include <asm/firmware.h>
34 34
35struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
36{
37 struct device_node *dn = pci_device_to_OF_node(pdev);
38 if (!dn)
39 return NULL;
40 return PCI_DN(dn);
41}
42
35/* 43/*
36 * Traverse_func that inits the PCI fields of the device node. 44 * Traverse_func that inits the PCI fields of the device node.
37 * NOTE: this *must* be done before read/write config to the device. 45 * NOTE: this *must* be done before read/write config to the device.
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 78b8766fd79e..c29666586998 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -143,7 +143,8 @@ EXPORT_SYMBOL(__lshrdi3);
143int __ucmpdi2(unsigned long long, unsigned long long); 143int __ucmpdi2(unsigned long long, unsigned long long);
144EXPORT_SYMBOL(__ucmpdi2); 144EXPORT_SYMBOL(__ucmpdi2);
145#endif 145#endif
146 146long long __bswapdi2(long long);
147EXPORT_SYMBOL(__bswapdi2);
147EXPORT_SYMBOL(memcpy); 148EXPORT_SYMBOL(memcpy);
148EXPORT_SYMBOL(memset); 149EXPORT_SYMBOL(memset);
149EXPORT_SYMBOL(memmove); 150EXPORT_SYMBOL(memmove);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ceb4e7b62cf4..a902723fdc69 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,6 +339,13 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
339 339
340static void prime_debug_regs(struct thread_struct *thread) 340static void prime_debug_regs(struct thread_struct *thread)
341{ 341{
342 /*
343 * We could have inherited MSR_DE from userspace, since
344 * it doesn't get cleared on exception entry. Make sure
345 * MSR_DE is clear before we enable any debug events.
346 */
347 mtmsr(mfmsr() & ~MSR_DE);
348
342 mtspr(SPRN_IAC1, thread->iac1); 349 mtspr(SPRN_IAC1, thread->iac1);
343 mtspr(SPRN_IAC2, thread->iac2); 350 mtspr(SPRN_IAC2, thread->iac2);
344#if CONFIG_PPC_ADV_DEBUG_IACS > 2 351#if CONFIG_PPC_ADV_DEBUG_IACS > 2
@@ -971,6 +978,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
971 * do some house keeping and then return from the fork or clone 978 * do some house keeping and then return from the fork or clone
972 * system call, using the stack frame created above. 979 * system call, using the stack frame created above.
973 */ 980 */
981 ((unsigned long *)sp)[0] = 0;
974 sp -= sizeof(struct pt_regs); 982 sp -= sizeof(struct pt_regs);
975 kregs = (struct pt_regs *) sp; 983 kregs = (struct pt_regs *) sp;
976 sp -= STACK_FRAME_OVERHEAD; 984 sp -= STACK_FRAME_OVERHEAD;
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3b14d320e69f..98c2fc198712 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -32,6 +32,7 @@
32#include <trace/syscall.h> 32#include <trace/syscall.h>
33#include <linux/hw_breakpoint.h> 33#include <linux/hw_breakpoint.h>
34#include <linux/perf_event.h> 34#include <linux/perf_event.h>
35#include <linux/context_tracking.h>
35 36
36#include <asm/uaccess.h> 37#include <asm/uaccess.h>
37#include <asm/page.h> 38#include <asm/page.h>
@@ -1788,6 +1789,8 @@ long do_syscall_trace_enter(struct pt_regs *regs)
1788{ 1789{
1789 long ret = 0; 1790 long ret = 0;
1790 1791
1792 user_exit();
1793
1791 secure_computing_strict(regs->gpr[0]); 1794 secure_computing_strict(regs->gpr[0]);
1792 1795
1793 if (test_thread_flag(TIF_SYSCALL_TRACE) && 1796 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
@@ -1832,4 +1835,6 @@ void do_syscall_trace_leave(struct pt_regs *regs)
1832 step = test_thread_flag(TIF_SINGLESTEP); 1835 step = test_thread_flag(TIF_SINGLESTEP);
1833 if (step || test_thread_flag(TIF_SYSCALL_TRACE)) 1836 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1834 tracehook_report_syscall_exit(regs, step); 1837 tracehook_report_syscall_exit(regs, step);
1838
1839 user_enter();
1835} 1840}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 1fd6e7b2f390..52add6f3e201 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -19,6 +19,7 @@
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/capability.h> 20#include <linux/capability.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/cpu.h>
22#include <linux/smp.h> 23#include <linux/smp.h>
23#include <linux/completion.h> 24#include <linux/completion.h>
24#include <linux/cpumask.h> 25#include <linux/cpumask.h>
@@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)
807 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); 808 __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);
808} 809}
809 810
811enum rtas_cpu_state {
812 DOWN,
813 UP,
814};
815
816#ifndef CONFIG_SMP
817static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
818 cpumask_var_t cpus)
819{
820 if (!cpumask_empty(cpus)) {
821 cpumask_clear(cpus);
822 return -EINVAL;
823 } else
824 return 0;
825}
826#else
827/* On return cpumask will be altered to indicate CPUs changed.
828 * CPUs with states changed will be set in the mask,
829 * CPUs with status unchanged will be unset in the mask. */
830static int rtas_cpu_state_change_mask(enum rtas_cpu_state state,
831 cpumask_var_t cpus)
832{
833 int cpu;
834 int cpuret = 0;
835 int ret = 0;
836
837 if (cpumask_empty(cpus))
838 return 0;
839
840 for_each_cpu(cpu, cpus) {
841 switch (state) {
842 case DOWN:
843 cpuret = cpu_down(cpu);
844 break;
845 case UP:
846 cpuret = cpu_up(cpu);
847 break;
848 }
849 if (cpuret) {
850 pr_debug("%s: cpu_%s for cpu#%d returned %d.\n",
851 __func__,
852 ((state == UP) ? "up" : "down"),
853 cpu, cpuret);
854 if (!ret)
855 ret = cpuret;
856 if (state == UP) {
857 /* clear bits for unchanged cpus, return */
858 cpumask_shift_right(cpus, cpus, cpu);
859 cpumask_shift_left(cpus, cpus, cpu);
860 break;
861 } else {
862 /* clear bit for unchanged cpu, continue */
863 cpumask_clear_cpu(cpu, cpus);
864 }
865 }
866 }
867
868 return ret;
869}
870#endif
871
872int rtas_online_cpus_mask(cpumask_var_t cpus)
873{
874 int ret;
875
876 ret = rtas_cpu_state_change_mask(UP, cpus);
877
878 if (ret) {
879 cpumask_var_t tmp_mask;
880
881 if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY))
882 return ret;
883
884 /* Use tmp_mask to preserve cpus mask from first failure */
885 cpumask_copy(tmp_mask, cpus);
886 rtas_offline_cpus_mask(tmp_mask);
887 free_cpumask_var(tmp_mask);
888 }
889
890 return ret;
891}
892EXPORT_SYMBOL(rtas_online_cpus_mask);
893
894int rtas_offline_cpus_mask(cpumask_var_t cpus)
895{
896 return rtas_cpu_state_change_mask(DOWN, cpus);
897}
898EXPORT_SYMBOL(rtas_offline_cpus_mask);
899
810int rtas_ibm_suspend_me(struct rtas_args *args) 900int rtas_ibm_suspend_me(struct rtas_args *args)
811{ 901{
812 long state; 902 long state;
@@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
814 unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; 904 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
815 struct rtas_suspend_me_data data; 905 struct rtas_suspend_me_data data;
816 DECLARE_COMPLETION_ONSTACK(done); 906 DECLARE_COMPLETION_ONSTACK(done);
907 cpumask_var_t offline_mask;
908 int cpuret;
817 909
818 if (!rtas_service_present("ibm,suspend-me")) 910 if (!rtas_service_present("ibm,suspend-me"))
819 return -ENOSYS; 911 return -ENOSYS;
@@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
837 return 0; 929 return 0;
838 } 930 }
839 931
932 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
933 return -ENOMEM;
934
840 atomic_set(&data.working, 0); 935 atomic_set(&data.working, 0);
841 atomic_set(&data.done, 0); 936 atomic_set(&data.done, 0);
842 atomic_set(&data.error, 0); 937 atomic_set(&data.error, 0);
843 data.token = rtas_token("ibm,suspend-me"); 938 data.token = rtas_token("ibm,suspend-me");
844 data.complete = &done; 939 data.complete = &done;
940
941 /* All present CPUs must be online */
942 cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask);
943 cpuret = rtas_online_cpus_mask(offline_mask);
944 if (cpuret) {
945 pr_err("%s: Could not bring present CPUs online.\n", __func__);
946 atomic_set(&data.error, cpuret);
947 goto out;
948 }
949
845 stop_topology_update(); 950 stop_topology_update();
846 951
847 /* Call function on all CPUs. One of us will make the 952 /* Call function on all CPUs. One of us will make the
@@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
857 962
858 start_topology_update(); 963 start_topology_update();
859 964
965 /* Take down CPUs not online prior to suspend */
966 cpuret = rtas_offline_cpus_mask(offline_mask);
967 if (cpuret)
968 pr_warn("%s: Could not restore CPUs to offline state.\n",
969 __func__);
970
971out:
972 free_cpumask_var(offline_mask);
860 return atomic_read(&data.error); 973 return atomic_read(&data.error);
861} 974}
862#else /* CONFIG_PPC_PSERIES */ 975#else /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c
index 5b3022470126..2f3cdb01506d 100644
--- a/arch/powerpc/kernel/rtas_flash.c
+++ b/arch/powerpc/kernel/rtas_flash.c
@@ -89,6 +89,7 @@
89 89
90/* Array sizes */ 90/* Array sizes */
91#define VALIDATE_BUF_SIZE 4096 91#define VALIDATE_BUF_SIZE 4096
92#define VALIDATE_MSG_LEN 256
92#define RTAS_MSG_MAXLEN 64 93#define RTAS_MSG_MAXLEN 64
93 94
94/* Quirk - RTAS requires 4k list length and block size */ 95/* Quirk - RTAS requires 4k list length and block size */
@@ -466,7 +467,7 @@ static void validate_flash(struct rtas_validate_flash_t *args_buf)
466} 467}
467 468
468static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf, 469static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
469 char *msg) 470 char *msg, int msglen)
470{ 471{
471 int n; 472 int n;
472 473
@@ -474,7 +475,8 @@ static int get_validate_flash_msg(struct rtas_validate_flash_t *args_buf,
474 n = sprintf(msg, "%d\n", args_buf->update_results); 475 n = sprintf(msg, "%d\n", args_buf->update_results);
475 if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) || 476 if ((args_buf->update_results >= VALIDATE_CUR_UNKNOWN) ||
476 (args_buf->update_results == VALIDATE_TMP_UPDATE)) 477 (args_buf->update_results == VALIDATE_TMP_UPDATE))
477 n += sprintf(msg + n, "%s\n", args_buf->buf); 478 n += snprintf(msg + n, msglen - n, "%s\n",
479 args_buf->buf);
478 } else { 480 } else {
479 n = sprintf(msg, "%d\n", args_buf->status); 481 n = sprintf(msg, "%d\n", args_buf->status);
480 } 482 }
@@ -486,11 +488,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf,
486{ 488{
487 struct rtas_validate_flash_t *const args_buf = 489 struct rtas_validate_flash_t *const args_buf =
488 &rtas_validate_flash_data; 490 &rtas_validate_flash_data;
489 char msg[RTAS_MSG_MAXLEN]; 491 char msg[VALIDATE_MSG_LEN];
490 int msglen; 492 int msglen;
491 493
492 mutex_lock(&rtas_validate_flash_mutex); 494 mutex_lock(&rtas_validate_flash_mutex);
493 msglen = get_validate_flash_msg(args_buf, msg); 495 msglen = get_validate_flash_msg(args_buf, msg, VALIDATE_MSG_LEN);
494 mutex_unlock(&rtas_validate_flash_mutex); 496 mutex_unlock(&rtas_validate_flash_mutex);
495 497
496 return simple_read_from_buffer(buf, count, ppos, msg, msglen); 498 return simple_read_from_buffer(buf, count, ppos, msg, msglen);
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index cf12eae02de5..577a8aa69c6e 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -13,6 +13,7 @@
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <linux/uprobes.h> 14#include <linux/uprobes.h>
15#include <linux/key.h> 15#include <linux/key.h>
16#include <linux/context_tracking.h>
16#include <asm/hw_breakpoint.h> 17#include <asm/hw_breakpoint.h>
17#include <asm/uaccess.h> 18#include <asm/uaccess.h>
18#include <asm/unistd.h> 19#include <asm/unistd.h>
@@ -24,7 +25,7 @@
24 * through debug.exception-trace sysctl. 25 * through debug.exception-trace sysctl.
25 */ 26 */
26 27
27int show_unhandled_signals = 0; 28int show_unhandled_signals = 1;
28 29
29/* 30/*
30 * Allocate space for the signal frame 31 * Allocate space for the signal frame
@@ -159,6 +160,8 @@ static int do_signal(struct pt_regs *regs)
159 160
160void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) 161void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
161{ 162{
163 user_exit();
164
162 if (thread_info_flags & _TIF_UPROBE) 165 if (thread_info_flags & _TIF_UPROBE)
163 uprobe_notify_resume(regs); 166 uprobe_notify_resume(regs);
164 167
@@ -169,4 +172,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
169 clear_thread_flag(TIF_NOTIFY_RESUME); 172 clear_thread_flag(TIF_NOTIFY_RESUME);
170 tracehook_notify_resume(regs); 173 tracehook_notify_resume(regs);
171 } 174 }
175
176 user_enter();
172} 177}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 83efa2f7d926..a7a648f6b750 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -35,6 +35,7 @@
35#include <linux/kdebug.h> 35#include <linux/kdebug.h>
36#include <linux/debugfs.h> 36#include <linux/debugfs.h>
37#include <linux/ratelimit.h> 37#include <linux/ratelimit.h>
38#include <linux/context_tracking.h>
38 39
39#include <asm/emulated_ops.h> 40#include <asm/emulated_ops.h>
40#include <asm/pgtable.h> 41#include <asm/pgtable.h>
@@ -667,6 +668,7 @@ int machine_check_generic(struct pt_regs *regs)
667 668
668void machine_check_exception(struct pt_regs *regs) 669void machine_check_exception(struct pt_regs *regs)
669{ 670{
671 enum ctx_state prev_state = exception_enter();
670 int recover = 0; 672 int recover = 0;
671 673
672 __get_cpu_var(irq_stat).mce_exceptions++; 674 __get_cpu_var(irq_stat).mce_exceptions++;
@@ -683,7 +685,7 @@ void machine_check_exception(struct pt_regs *regs)
683 recover = cur_cpu_spec->machine_check(regs); 685 recover = cur_cpu_spec->machine_check(regs);
684 686
685 if (recover > 0) 687 if (recover > 0)
686 return; 688 goto bail;
687 689
688#if defined(CONFIG_8xx) && defined(CONFIG_PCI) 690#if defined(CONFIG_8xx) && defined(CONFIG_PCI)
689 /* the qspan pci read routines can cause machine checks -- Cort 691 /* the qspan pci read routines can cause machine checks -- Cort
@@ -693,20 +695,23 @@ void machine_check_exception(struct pt_regs *regs)
693 * -- BenH 695 * -- BenH
694 */ 696 */
695 bad_page_fault(regs, regs->dar, SIGBUS); 697 bad_page_fault(regs, regs->dar, SIGBUS);
696 return; 698 goto bail;
697#endif 699#endif
698 700
699 if (debugger_fault_handler(regs)) 701 if (debugger_fault_handler(regs))
700 return; 702 goto bail;
701 703
702 if (check_io_access(regs)) 704 if (check_io_access(regs))
703 return; 705 goto bail;
704 706
705 die("Machine check", regs, SIGBUS); 707 die("Machine check", regs, SIGBUS);
706 708
707 /* Must die if the interrupt is not recoverable */ 709 /* Must die if the interrupt is not recoverable */
708 if (!(regs->msr & MSR_RI)) 710 if (!(regs->msr & MSR_RI))
709 panic("Unrecoverable Machine check"); 711 panic("Unrecoverable Machine check");
712
713bail:
714 exception_exit(prev_state);
710} 715}
711 716
712void SMIException(struct pt_regs *regs) 717void SMIException(struct pt_regs *regs)
@@ -716,20 +721,29 @@ void SMIException(struct pt_regs *regs)
716 721
717void unknown_exception(struct pt_regs *regs) 722void unknown_exception(struct pt_regs *regs)
718{ 723{
724 enum ctx_state prev_state = exception_enter();
725
719 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n", 726 printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
720 regs->nip, regs->msr, regs->trap); 727 regs->nip, regs->msr, regs->trap);
721 728
722 _exception(SIGTRAP, regs, 0, 0); 729 _exception(SIGTRAP, regs, 0, 0);
730
731 exception_exit(prev_state);
723} 732}
724 733
725void instruction_breakpoint_exception(struct pt_regs *regs) 734void instruction_breakpoint_exception(struct pt_regs *regs)
726{ 735{
736 enum ctx_state prev_state = exception_enter();
737
727 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5, 738 if (notify_die(DIE_IABR_MATCH, "iabr_match", regs, 5,
728 5, SIGTRAP) == NOTIFY_STOP) 739 5, SIGTRAP) == NOTIFY_STOP)
729 return; 740 goto bail;
730 if (debugger_iabr_match(regs)) 741 if (debugger_iabr_match(regs))
731 return; 742 goto bail;
732 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 743 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
744
745bail:
746 exception_exit(prev_state);
733} 747}
734 748
735void RunModeException(struct pt_regs *regs) 749void RunModeException(struct pt_regs *regs)
@@ -739,15 +753,20 @@ void RunModeException(struct pt_regs *regs)
739 753
740void __kprobes single_step_exception(struct pt_regs *regs) 754void __kprobes single_step_exception(struct pt_regs *regs)
741{ 755{
756 enum ctx_state prev_state = exception_enter();
757
742 clear_single_step(regs); 758 clear_single_step(regs);
743 759
744 if (notify_die(DIE_SSTEP, "single_step", regs, 5, 760 if (notify_die(DIE_SSTEP, "single_step", regs, 5,
745 5, SIGTRAP) == NOTIFY_STOP) 761 5, SIGTRAP) == NOTIFY_STOP)
746 return; 762 goto bail;
747 if (debugger_sstep(regs)) 763 if (debugger_sstep(regs))
748 return; 764 goto bail;
749 765
750 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip); 766 _exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
767
768bail:
769 exception_exit(prev_state);
751} 770}
752 771
753/* 772/*
@@ -1005,6 +1024,7 @@ int is_valid_bugaddr(unsigned long addr)
1005 1024
1006void __kprobes program_check_exception(struct pt_regs *regs) 1025void __kprobes program_check_exception(struct pt_regs *regs)
1007{ 1026{
1027 enum ctx_state prev_state = exception_enter();
1008 unsigned int reason = get_reason(regs); 1028 unsigned int reason = get_reason(regs);
1009 extern int do_mathemu(struct pt_regs *regs); 1029 extern int do_mathemu(struct pt_regs *regs);
1010 1030
@@ -1014,26 +1034,26 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1014 if (reason & REASON_FP) { 1034 if (reason & REASON_FP) {
1015 /* IEEE FP exception */ 1035 /* IEEE FP exception */
1016 parse_fpe(regs); 1036 parse_fpe(regs);
1017 return; 1037 goto bail;
1018 } 1038 }
1019 if (reason & REASON_TRAP) { 1039 if (reason & REASON_TRAP) {
1020 /* Debugger is first in line to stop recursive faults in 1040 /* Debugger is first in line to stop recursive faults in
1021 * rcu_lock, notify_die, or atomic_notifier_call_chain */ 1041 * rcu_lock, notify_die, or atomic_notifier_call_chain */
1022 if (debugger_bpt(regs)) 1042 if (debugger_bpt(regs))
1023 return; 1043 goto bail;
1024 1044
1025 /* trap exception */ 1045 /* trap exception */
1026 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) 1046 if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP)
1027 == NOTIFY_STOP) 1047 == NOTIFY_STOP)
1028 return; 1048 goto bail;
1029 1049
1030 if (!(regs->msr & MSR_PR) && /* not user-mode */ 1050 if (!(regs->msr & MSR_PR) && /* not user-mode */
1031 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1051 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1032 regs->nip += 4; 1052 regs->nip += 4;
1033 return; 1053 goto bail;
1034 } 1054 }
1035 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip); 1055 _exception(SIGTRAP, regs, TRAP_BRKPT, regs->nip);
1036 return; 1056 goto bail;
1037 } 1057 }
1038#ifdef CONFIG_PPC_TRANSACTIONAL_MEM 1058#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1039 if (reason & REASON_TM) { 1059 if (reason & REASON_TM) {
@@ -1049,7 +1069,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1049 if (!user_mode(regs) && 1069 if (!user_mode(regs) &&
1050 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) { 1070 report_bug(regs->nip, regs) == BUG_TRAP_TYPE_WARN) {
1051 regs->nip += 4; 1071 regs->nip += 4;
1052 return; 1072 goto bail;
1053 } 1073 }
1054 /* If usermode caused this, it's done something illegal and 1074 /* If usermode caused this, it's done something illegal and
1055 * gets a SIGILL slap on the wrist. We call it an illegal 1075 * gets a SIGILL slap on the wrist. We call it an illegal
@@ -1059,7 +1079,7 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1059 */ 1079 */
1060 if (user_mode(regs)) { 1080 if (user_mode(regs)) {
1061 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip); 1081 _exception(SIGILL, regs, ILL_ILLOPN, regs->nip);
1062 return; 1082 goto bail;
1063 } else { 1083 } else {
1064 printk(KERN_EMERG "Unexpected TM Bad Thing exception " 1084 printk(KERN_EMERG "Unexpected TM Bad Thing exception "
1065 "at %lx (msr 0x%x)\n", regs->nip, reason); 1085 "at %lx (msr 0x%x)\n", regs->nip, reason);
@@ -1083,16 +1103,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1083 switch (do_mathemu(regs)) { 1103 switch (do_mathemu(regs)) {
1084 case 0: 1104 case 0:
1085 emulate_single_step(regs); 1105 emulate_single_step(regs);
1086 return; 1106 goto bail;
1087 case 1: { 1107 case 1: {
1088 int code = 0; 1108 int code = 0;
1089 code = __parse_fpscr(current->thread.fpscr.val); 1109 code = __parse_fpscr(current->thread.fpscr.val);
1090 _exception(SIGFPE, regs, code, regs->nip); 1110 _exception(SIGFPE, regs, code, regs->nip);
1091 return; 1111 goto bail;
1092 } 1112 }
1093 case -EFAULT: 1113 case -EFAULT:
1094 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1114 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1095 return; 1115 goto bail;
1096 } 1116 }
1097 /* fall through on any other errors */ 1117 /* fall through on any other errors */
1098#endif /* CONFIG_MATH_EMULATION */ 1118#endif /* CONFIG_MATH_EMULATION */
@@ -1103,10 +1123,10 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1103 case 0: 1123 case 0:
1104 regs->nip += 4; 1124 regs->nip += 4;
1105 emulate_single_step(regs); 1125 emulate_single_step(regs);
1106 return; 1126 goto bail;
1107 case -EFAULT: 1127 case -EFAULT:
1108 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip); 1128 _exception(SIGSEGV, regs, SEGV_MAPERR, regs->nip);
1109 return; 1129 goto bail;
1110 } 1130 }
1111 } 1131 }
1112 1132
@@ -1114,10 +1134,14 @@ void __kprobes program_check_exception(struct pt_regs *regs)
1114 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip); 1134 _exception(SIGILL, regs, ILL_PRVOPC, regs->nip);
1115 else 1135 else
1116 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1136 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1137
1138bail:
1139 exception_exit(prev_state);
1117} 1140}
1118 1141
1119void alignment_exception(struct pt_regs *regs) 1142void alignment_exception(struct pt_regs *regs)
1120{ 1143{
1144 enum ctx_state prev_state = exception_enter();
1121 int sig, code, fixed = 0; 1145 int sig, code, fixed = 0;
1122 1146
1123 /* We restore the interrupt state now */ 1147 /* We restore the interrupt state now */
@@ -1131,7 +1155,7 @@ void alignment_exception(struct pt_regs *regs)
1131 if (fixed == 1) { 1155 if (fixed == 1) {
1132 regs->nip += 4; /* skip over emulated instruction */ 1156 regs->nip += 4; /* skip over emulated instruction */
1133 emulate_single_step(regs); 1157 emulate_single_step(regs);
1134 return; 1158 goto bail;
1135 } 1159 }
1136 1160
1137 /* Operand address was bad */ 1161 /* Operand address was bad */
@@ -1146,6 +1170,9 @@ void alignment_exception(struct pt_regs *regs)
1146 _exception(sig, regs, code, regs->dar); 1170 _exception(sig, regs, code, regs->dar);
1147 else 1171 else
1148 bad_page_fault(regs, regs->dar, sig); 1172 bad_page_fault(regs, regs->dar, sig);
1173
1174bail:
1175 exception_exit(prev_state);
1149} 1176}
1150 1177
1151void StackOverflow(struct pt_regs *regs) 1178void StackOverflow(struct pt_regs *regs)
@@ -1174,23 +1201,32 @@ void trace_syscall(struct pt_regs *regs)
1174 1201
1175void kernel_fp_unavailable_exception(struct pt_regs *regs) 1202void kernel_fp_unavailable_exception(struct pt_regs *regs)
1176{ 1203{
1204 enum ctx_state prev_state = exception_enter();
1205
1177 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception " 1206 printk(KERN_EMERG "Unrecoverable FP Unavailable Exception "
1178 "%lx at %lx\n", regs->trap, regs->nip); 1207 "%lx at %lx\n", regs->trap, regs->nip);
1179 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT); 1208 die("Unrecoverable FP Unavailable Exception", regs, SIGABRT);
1209
1210 exception_exit(prev_state);
1180} 1211}
1181 1212
1182void altivec_unavailable_exception(struct pt_regs *regs) 1213void altivec_unavailable_exception(struct pt_regs *regs)
1183{ 1214{
1215 enum ctx_state prev_state = exception_enter();
1216
1184 if (user_mode(regs)) { 1217 if (user_mode(regs)) {
1185 /* A user program has executed an altivec instruction, 1218 /* A user program has executed an altivec instruction,
1186 but this kernel doesn't support altivec. */ 1219 but this kernel doesn't support altivec. */
1187 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip); 1220 _exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
1188 return; 1221 goto bail;
1189 } 1222 }
1190 1223
1191 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception " 1224 printk(KERN_EMERG "Unrecoverable VMX/Altivec Unavailable Exception "
1192 "%lx at %lx\n", regs->trap, regs->nip); 1225 "%lx at %lx\n", regs->trap, regs->nip);
1193 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); 1226 die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT);
1227
1228bail:
1229 exception_exit(prev_state);
1194} 1230}
1195 1231
1196void vsx_unavailable_exception(struct pt_regs *regs) 1232void vsx_unavailable_exception(struct pt_regs *regs)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 13b867093499..9d3fdcd66290 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -64,6 +64,9 @@ void __init udbg_early_init(void)
64 udbg_init_usbgecko(); 64 udbg_init_usbgecko();
65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) 65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
66 udbg_init_wsp(); 66 udbg_init_wsp();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
68 /* In memory console */
69 udbg_init_memcons();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC) 70#elif defined(CONFIG_PPC_EARLY_DEBUG_EHV_BC)
68 udbg_init_ehv_bc(); 71 udbg_init_ehv_bc();
69#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC) 72#elif defined(CONFIG_PPC_EARLY_DEBUG_PS3GELIC)
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 229951ffc351..8726779e1409 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -32,6 +32,7 @@
32#include <linux/perf_event.h> 32#include <linux/perf_event.h>
33#include <linux/magic.h> 33#include <linux/magic.h>
34#include <linux/ratelimit.h> 34#include <linux/ratelimit.h>
35#include <linux/context_tracking.h>
35 36
36#include <asm/firmware.h> 37#include <asm/firmware.h>
37#include <asm/page.h> 38#include <asm/page.h>
@@ -196,6 +197,7 @@ static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
196int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, 197int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
197 unsigned long error_code) 198 unsigned long error_code)
198{ 199{
200 enum ctx_state prev_state = exception_enter();
199 struct vm_area_struct * vma; 201 struct vm_area_struct * vma;
200 struct mm_struct *mm = current->mm; 202 struct mm_struct *mm = current->mm;
201 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 203 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -204,6 +206,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
204 int trap = TRAP(regs); 206 int trap = TRAP(regs);
205 int is_exec = trap == 0x400; 207 int is_exec = trap == 0x400;
206 int fault; 208 int fault;
209 int rc = 0;
207 210
208#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 211#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
209 /* 212 /*
@@ -230,28 +233,30 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
230 * look at it 233 * look at it
231 */ 234 */
232 if (error_code & ICSWX_DSI_UCT) { 235 if (error_code & ICSWX_DSI_UCT) {
233 int rc = acop_handle_fault(regs, address, error_code); 236 rc = acop_handle_fault(regs, address, error_code);
234 if (rc) 237 if (rc)
235 return rc; 238 goto bail;
236 } 239 }
237#endif /* CONFIG_PPC_ICSWX */ 240#endif /* CONFIG_PPC_ICSWX */
238 241
239 if (notify_page_fault(regs)) 242 if (notify_page_fault(regs))
240 return 0; 243 goto bail;
241 244
242 if (unlikely(debugger_fault_handler(regs))) 245 if (unlikely(debugger_fault_handler(regs)))
243 return 0; 246 goto bail;
244 247
245 /* On a kernel SLB miss we can only check for a valid exception entry */ 248 /* On a kernel SLB miss we can only check for a valid exception entry */
246 if (!user_mode(regs) && (address >= TASK_SIZE)) 249 if (!user_mode(regs) && (address >= TASK_SIZE)) {
247 return SIGSEGV; 250 rc = SIGSEGV;
251 goto bail;
252 }
248 253
249#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \ 254#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE) || \
250 defined(CONFIG_PPC_BOOK3S_64)) 255 defined(CONFIG_PPC_BOOK3S_64))
251 if (error_code & DSISR_DABRMATCH) { 256 if (error_code & DSISR_DABRMATCH) {
252 /* breakpoint match */ 257 /* breakpoint match */
253 do_break(regs, address, error_code); 258 do_break(regs, address, error_code);
254 return 0; 259 goto bail;
255 } 260 }
256#endif 261#endif
257 262
@@ -260,8 +265,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
260 local_irq_enable(); 265 local_irq_enable();
261 266
262 if (in_atomic() || mm == NULL) { 267 if (in_atomic() || mm == NULL) {
263 if (!user_mode(regs)) 268 if (!user_mode(regs)) {
264 return SIGSEGV; 269 rc = SIGSEGV;
270 goto bail;
271 }
265 /* in_atomic() in user mode is really bad, 272 /* in_atomic() in user mode is really bad,
266 as is current->mm == NULL. */ 273 as is current->mm == NULL. */
267 printk(KERN_EMERG "Page fault in user mode with " 274 printk(KERN_EMERG "Page fault in user mode with "
@@ -417,9 +424,11 @@ good_area:
417 */ 424 */
418 fault = handle_mm_fault(mm, vma, address, flags); 425 fault = handle_mm_fault(mm, vma, address, flags);
419 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { 426 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
420 int rc = mm_fault_error(regs, address, fault); 427 rc = mm_fault_error(regs, address, fault);
421 if (rc >= MM_FAULT_RETURN) 428 if (rc >= MM_FAULT_RETURN)
422 return rc; 429 goto bail;
430 else
431 rc = 0;
423 } 432 }
424 433
425 /* 434 /*
@@ -454,7 +463,7 @@ good_area:
454 } 463 }
455 464
456 up_read(&mm->mmap_sem); 465 up_read(&mm->mmap_sem);
457 return 0; 466 goto bail;
458 467
459bad_area: 468bad_area:
460 up_read(&mm->mmap_sem); 469 up_read(&mm->mmap_sem);
@@ -463,7 +472,7 @@ bad_area_nosemaphore:
463 /* User mode accesses cause a SIGSEGV */ 472 /* User mode accesses cause a SIGSEGV */
464 if (user_mode(regs)) { 473 if (user_mode(regs)) {
465 _exception(SIGSEGV, regs, code, address); 474 _exception(SIGSEGV, regs, code, address);
466 return 0; 475 goto bail;
467 } 476 }
468 477
469 if (is_exec && (error_code & DSISR_PROTFAULT)) 478 if (is_exec && (error_code & DSISR_PROTFAULT))
@@ -471,7 +480,11 @@ bad_area_nosemaphore:
471 " page (%lx) - exploit attempt? (uid: %d)\n", 480 " page (%lx) - exploit attempt? (uid: %d)\n",
472 address, from_kuid(&init_user_ns, current_uid())); 481 address, from_kuid(&init_user_ns, current_uid()));
473 482
474 return SIGSEGV; 483 rc = SIGSEGV;
484
485bail:
486 exception_exit(prev_state);
487 return rc;
475 488
476} 489}
477 490
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 88ac0eeaadde..e303a6d74e3a 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -33,6 +33,7 @@
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/signal.h> 34#include <linux/signal.h>
35#include <linux/memblock.h> 35#include <linux/memblock.h>
36#include <linux/context_tracking.h>
36 37
37#include <asm/processor.h> 38#include <asm/processor.h>
38#include <asm/pgtable.h> 39#include <asm/pgtable.h>
@@ -954,6 +955,7 @@ void hash_failure_debug(unsigned long ea, unsigned long access,
954 */ 955 */
955int hash_page(unsigned long ea, unsigned long access, unsigned long trap) 956int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
956{ 957{
958 enum ctx_state prev_state = exception_enter();
957 pgd_t *pgdir; 959 pgd_t *pgdir;
958 unsigned long vsid; 960 unsigned long vsid;
959 struct mm_struct *mm; 961 struct mm_struct *mm;
@@ -973,7 +975,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
973 mm = current->mm; 975 mm = current->mm;
974 if (! mm) { 976 if (! mm) {
975 DBG_LOW(" user region with no mm !\n"); 977 DBG_LOW(" user region with no mm !\n");
976 return 1; 978 rc = 1;
979 goto bail;
977 } 980 }
978 psize = get_slice_psize(mm, ea); 981 psize = get_slice_psize(mm, ea);
979 ssize = user_segment_size(ea); 982 ssize = user_segment_size(ea);
@@ -992,19 +995,23 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
992 /* Not a valid range 995 /* Not a valid range
993 * Send the problem up to do_page_fault 996 * Send the problem up to do_page_fault
994 */ 997 */
995 return 1; 998 rc = 1;
999 goto bail;
996 } 1000 }
997 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); 1001 DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
998 1002
999 /* Bad address. */ 1003 /* Bad address. */
1000 if (!vsid) { 1004 if (!vsid) {
1001 DBG_LOW("Bad address!\n"); 1005 DBG_LOW("Bad address!\n");
1002 return 1; 1006 rc = 1;
1007 goto bail;
1003 } 1008 }
1004 /* Get pgdir */ 1009 /* Get pgdir */
1005 pgdir = mm->pgd; 1010 pgdir = mm->pgd;
1006 if (pgdir == NULL) 1011 if (pgdir == NULL) {
1007 return 1; 1012 rc = 1;
1013 goto bail;
1014 }
1008 1015
1009 /* Check CPU locality */ 1016 /* Check CPU locality */
1010 tmp = cpumask_of(smp_processor_id()); 1017 tmp = cpumask_of(smp_processor_id());
@@ -1027,7 +1034,8 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1027 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift); 1034 ptep = find_linux_pte_or_hugepte(pgdir, ea, &hugeshift);
1028 if (ptep == NULL || !pte_present(*ptep)) { 1035 if (ptep == NULL || !pte_present(*ptep)) {
1029 DBG_LOW(" no PTE !\n"); 1036 DBG_LOW(" no PTE !\n");
1030 return 1; 1037 rc = 1;
1038 goto bail;
1031 } 1039 }
1032 1040
1033 /* Add _PAGE_PRESENT to the required access perm */ 1041 /* Add _PAGE_PRESENT to the required access perm */
@@ -1038,13 +1046,16 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1038 */ 1046 */
1039 if (access & ~pte_val(*ptep)) { 1047 if (access & ~pte_val(*ptep)) {
1040 DBG_LOW(" no access !\n"); 1048 DBG_LOW(" no access !\n");
1041 return 1; 1049 rc = 1;
1050 goto bail;
1042 } 1051 }
1043 1052
1044#ifdef CONFIG_HUGETLB_PAGE 1053#ifdef CONFIG_HUGETLB_PAGE
1045 if (hugeshift) 1054 if (hugeshift) {
1046 return __hash_page_huge(ea, access, vsid, ptep, trap, local, 1055 rc = __hash_page_huge(ea, access, vsid, ptep, trap, local,
1047 ssize, hugeshift, psize); 1056 ssize, hugeshift, psize);
1057 goto bail;
1058 }
1048#endif /* CONFIG_HUGETLB_PAGE */ 1059#endif /* CONFIG_HUGETLB_PAGE */
1049 1060
1050#ifndef CONFIG_PPC_64K_PAGES 1061#ifndef CONFIG_PPC_64K_PAGES
@@ -1124,6 +1135,9 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
1124 pte_val(*(ptep + PTRS_PER_PTE))); 1135 pte_val(*(ptep + PTRS_PER_PTE)));
1125#endif 1136#endif
1126 DBG_LOW(" -> rc=%d\n", rc); 1137 DBG_LOW(" -> rc=%d\n", rc);
1138
1139bail:
1140 exception_exit(prev_state);
1127 return rc; 1141 return rc;
1128} 1142}
1129EXPORT_SYMBOL_GPL(hash_page); 1143EXPORT_SYMBOL_GPL(hash_page);
@@ -1259,6 +1273,8 @@ void flush_hash_range(unsigned long number, int local)
1259 */ 1273 */
1260void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc) 1274void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1261{ 1275{
1276 enum ctx_state prev_state = exception_enter();
1277
1262 if (user_mode(regs)) { 1278 if (user_mode(regs)) {
1263#ifdef CONFIG_PPC_SUBPAGE_PROT 1279#ifdef CONFIG_PPC_SUBPAGE_PROT
1264 if (rc == -2) 1280 if (rc == -2)
@@ -1268,6 +1284,8 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
1268 _exception(SIGBUS, regs, BUS_ADRERR, address); 1284 _exception(SIGBUS, regs, BUS_ADRERR, address);
1269 } else 1285 } else
1270 bad_page_fault(regs, address, SIGBUS); 1286 bad_page_fault(regs, address, SIGBUS);
1287
1288 exception_exit(prev_state);
1271} 1289}
1272 1290
1273long hpte_insert_repeating(unsigned long hash, unsigned long vpn, 1291long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index c2787bf779ca..a90b9c458990 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -215,7 +215,8 @@ static void __meminit vmemmap_create_mapping(unsigned long start,
215 unsigned long phys) 215 unsigned long phys)
216{ 216{
217 int mapped = htab_bolt_mapping(start, start + page_size, phys, 217 int mapped = htab_bolt_mapping(start, start + page_size, phys,
218 PAGE_KERNEL, mmu_vmemmap_psize, 218 pgprot_val(PAGE_KERNEL),
219 mmu_vmemmap_psize,
219 mmu_kernel_ssize); 220 mmu_kernel_ssize);
220 BUG_ON(mapped < 0); 221 BUG_ON(mapped < 0);
221} 222}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index c627843c5b2e..426180b84978 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -13,11 +13,13 @@
13#include <linux/perf_event.h> 13#include <linux/perf_event.h>
14#include <linux/percpu.h> 14#include <linux/percpu.h>
15#include <linux/hardirq.h> 15#include <linux/hardirq.h>
16#include <linux/uaccess.h>
16#include <asm/reg.h> 17#include <asm/reg.h>
17#include <asm/pmc.h> 18#include <asm/pmc.h>
18#include <asm/machdep.h> 19#include <asm/machdep.h>
19#include <asm/firmware.h> 20#include <asm/firmware.h>
20#include <asm/ptrace.h> 21#include <asm/ptrace.h>
22#include <asm/code-patching.h>
21 23
22#define BHRB_MAX_ENTRIES 32 24#define BHRB_MAX_ENTRIES 32
23#define BHRB_TARGET 0x0000000000000002 25#define BHRB_TARGET 0x0000000000000002
@@ -100,6 +102,10 @@ static inline int siar_valid(struct pt_regs *regs)
100 return 1; 102 return 1;
101} 103}
102 104
105static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
106static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
107void power_pmu_flush_branch_stack(void) {}
108static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
103#endif /* CONFIG_PPC32 */ 109#endif /* CONFIG_PPC32 */
104 110
105static bool regs_use_siar(struct pt_regs *regs) 111static bool regs_use_siar(struct pt_regs *regs)
@@ -308,6 +314,159 @@ static inline int siar_valid(struct pt_regs *regs)
308 return 1; 314 return 1;
309} 315}
310 316
317
318/* Reset all possible BHRB entries */
319static void power_pmu_bhrb_reset(void)
320{
321 asm volatile(PPC_CLRBHRB);
322}
323
324static void power_pmu_bhrb_enable(struct perf_event *event)
325{
326 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
327
328 if (!ppmu->bhrb_nr)
329 return;
330
331 /* Clear BHRB if we changed task context to avoid data leaks */
332 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
333 power_pmu_bhrb_reset();
334 cpuhw->bhrb_context = event->ctx;
335 }
336 cpuhw->bhrb_users++;
337}
338
339static void power_pmu_bhrb_disable(struct perf_event *event)
340{
341 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
342
343 if (!ppmu->bhrb_nr)
344 return;
345
346 cpuhw->bhrb_users--;
347 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
348
349 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
350 /* BHRB cannot be turned off when other
351 * events are active on the PMU.
352 */
353
354 /* avoid stale pointer */
355 cpuhw->bhrb_context = NULL;
356 }
357}
358
359/* Called from ctxsw to prevent one process's branch entries to
360 * mingle with the other process's entries during context switch.
361 */
362void power_pmu_flush_branch_stack(void)
363{
364 if (ppmu->bhrb_nr)
365 power_pmu_bhrb_reset();
366}
367/* Calculate the to address for a branch */
368static __u64 power_pmu_bhrb_to(u64 addr)
369{
370 unsigned int instr;
371 int ret;
372 __u64 target;
373
374 if (is_kernel_addr(addr))
375 return branch_target((unsigned int *)addr);
376
377 /* Userspace: need copy instruction here then translate it */
378 pagefault_disable();
379 ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
380 if (ret) {
381 pagefault_enable();
382 return 0;
383 }
384 pagefault_enable();
385
386 target = branch_target(&instr);
387 if ((!target) || (instr & BRANCH_ABSOLUTE))
388 return target;
389
390 /* Translate relative branch target from kernel to user address */
391 return target - (unsigned long)&instr + addr;
392}
393
394/* Processing BHRB entries */
395void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
396{
397 u64 val;
398 u64 addr;
399 int r_index, u_index, pred;
400
401 r_index = 0;
402 u_index = 0;
403 while (r_index < ppmu->bhrb_nr) {
404 /* Assembly read function */
405 val = read_bhrb(r_index++);
406 if (!val)
407 /* Terminal marker: End of valid BHRB entries */
408 break;
409 else {
410 addr = val & BHRB_EA;
411 pred = val & BHRB_PREDICTION;
412
413 if (!addr)
414 /* invalid entry */
415 continue;
416
417 /* Branches are read most recent first (ie. mfbhrb 0 is
418 * the most recent branch).
419 * There are two types of valid entries:
420 * 1) a target entry which is the to address of a
421 * computed goto like a blr,bctr,btar. The next
422 * entry read from the bhrb will be branch
423 * corresponding to this target (ie. the actual
424 * blr/bctr/btar instruction).
425 * 2) a from address which is an actual branch. If a
426 * target entry proceeds this, then this is the
427 * matching branch for that target. If this is not
428 * following a target entry, then this is a branch
429 * where the target is given as an immediate field
430 * in the instruction (ie. an i or b form branch).
431 * In this case we need to read the instruction from
432 * memory to determine the target/to address.
433 */
434
435 if (val & BHRB_TARGET) {
436 /* Target branches use two entries
437 * (ie. computed gotos/XL form)
438 */
439 cpuhw->bhrb_entries[u_index].to = addr;
440 cpuhw->bhrb_entries[u_index].mispred = pred;
441 cpuhw->bhrb_entries[u_index].predicted = ~pred;
442
443 /* Get from address in next entry */
444 val = read_bhrb(r_index++);
445 addr = val & BHRB_EA;
446 if (val & BHRB_TARGET) {
447 /* Shouldn't have two targets in a
448 row.. Reset index and try again */
449 r_index--;
450 addr = 0;
451 }
452 cpuhw->bhrb_entries[u_index].from = addr;
453 } else {
454 /* Branches to immediate field
455 (ie I or B form) */
456 cpuhw->bhrb_entries[u_index].from = addr;
457 cpuhw->bhrb_entries[u_index].to =
458 power_pmu_bhrb_to(addr);
459 cpuhw->bhrb_entries[u_index].mispred = pred;
460 cpuhw->bhrb_entries[u_index].predicted = ~pred;
461 }
462 u_index++;
463
464 }
465 }
466 cpuhw->bhrb_stack.nr = u_index;
467 return;
468}
469
311#endif /* CONFIG_PPC64 */ 470#endif /* CONFIG_PPC64 */
312 471
313static void perf_event_interrupt(struct pt_regs *regs); 472static void perf_event_interrupt(struct pt_regs *regs);
@@ -904,47 +1063,6 @@ static int collect_events(struct perf_event *group, int max_count,
904 return n; 1063 return n;
905} 1064}
906 1065
907/* Reset all possible BHRB entries */
908static void power_pmu_bhrb_reset(void)
909{
910 asm volatile(PPC_CLRBHRB);
911}
912
913void power_pmu_bhrb_enable(struct perf_event *event)
914{
915 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
916
917 if (!ppmu->bhrb_nr)
918 return;
919
920 /* Clear BHRB if we changed task context to avoid data leaks */
921 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
922 power_pmu_bhrb_reset();
923 cpuhw->bhrb_context = event->ctx;
924 }
925 cpuhw->bhrb_users++;
926}
927
928void power_pmu_bhrb_disable(struct perf_event *event)
929{
930 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
931
932 if (!ppmu->bhrb_nr)
933 return;
934
935 cpuhw->bhrb_users--;
936 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
937
938 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
939 /* BHRB cannot be turned off when other
940 * events are active on the PMU.
941 */
942
943 /* avoid stale pointer */
944 cpuhw->bhrb_context = NULL;
945 }
946}
947
948/* 1066/*
949 * Add a event to the PMU. 1067 * Add a event to the PMU.
950 * If all events are not already frozen, then we disable and 1068 * If all events are not already frozen, then we disable and
@@ -1180,15 +1298,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
1180 return 0; 1298 return 0;
1181} 1299}
1182 1300
1183/* Called from ctxsw to prevent one process's branch entries to
1184 * mingle with the other process's entries during context switch.
1185 */
1186void power_pmu_flush_branch_stack(void)
1187{
1188 if (ppmu->bhrb_nr)
1189 power_pmu_bhrb_reset();
1190}
1191
1192/* 1301/*
1193 * Return 1 if we might be able to put event on a limited PMC, 1302 * Return 1 if we might be able to put event on a limited PMC,
1194 * or 0 if not. 1303 * or 0 if not.
@@ -1458,77 +1567,6 @@ struct pmu power_pmu = {
1458 .flush_branch_stack = power_pmu_flush_branch_stack, 1567 .flush_branch_stack = power_pmu_flush_branch_stack,
1459}; 1568};
1460 1569
1461/* Processing BHRB entries */
1462void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
1463{
1464 u64 val;
1465 u64 addr;
1466 int r_index, u_index, target, pred;
1467
1468 r_index = 0;
1469 u_index = 0;
1470 while (r_index < ppmu->bhrb_nr) {
1471 /* Assembly read function */
1472 val = read_bhrb(r_index);
1473
1474 /* Terminal marker: End of valid BHRB entries */
1475 if (val == 0) {
1476 break;
1477 } else {
1478 /* BHRB field break up */
1479 addr = val & BHRB_EA;
1480 pred = val & BHRB_PREDICTION;
1481 target = val & BHRB_TARGET;
1482
1483 /* Probable Missed entry: Not applicable for POWER8 */
1484 if ((addr == 0) && (target == 0) && (pred == 1)) {
1485 r_index++;
1486 continue;
1487 }
1488
1489 /* Real Missed entry: Power8 based missed entry */
1490 if ((addr == 0) && (target == 1) && (pred == 1)) {
1491 r_index++;
1492 continue;
1493 }
1494
1495 /* Reserved condition: Not a valid entry */
1496 if ((addr == 0) && (target == 1) && (pred == 0)) {
1497 r_index++;
1498 continue;
1499 }
1500
1501 /* Is a target address */
1502 if (val & BHRB_TARGET) {
1503 /* First address cannot be a target address */
1504 if (r_index == 0) {
1505 r_index++;
1506 continue;
1507 }
1508
1509 /* Update target address for the previous entry */
1510 cpuhw->bhrb_entries[u_index - 1].to = addr;
1511 cpuhw->bhrb_entries[u_index - 1].mispred = pred;
1512 cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
1513
1514 /* Dont increment u_index */
1515 r_index++;
1516 } else {
1517 /* Update address, flags for current entry */
1518 cpuhw->bhrb_entries[u_index].from = addr;
1519 cpuhw->bhrb_entries[u_index].mispred = pred;
1520 cpuhw->bhrb_entries[u_index].predicted = ~pred;
1521
1522 /* Successfully popullated one entry */
1523 u_index++;
1524 r_index++;
1525 }
1526 }
1527 }
1528 cpuhw->bhrb_stack.nr = u_index;
1529 return;
1530}
1531
1532/* 1570/*
1533 * A counter has overflowed; update its count and record 1571 * A counter has overflowed; update its count and record
1534 * things if requested. Note that interrupts are hard-disabled 1572 * things if requested. Note that interrupts are hard-disabled
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index a881232a3cce..b62aab3e22ec 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -128,7 +128,7 @@ config PPC_RTAS_DAEMON
128 128
129config RTAS_PROC 129config RTAS_PROC
130 bool "Proc interface to RTAS" 130 bool "Proc interface to RTAS"
131 depends on PPC_RTAS 131 depends on PPC_RTAS && PROC_FS
132 default y 132 default y
133 133
134config RTAS_FLASH 134config RTAS_FLASH
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index d3e840d643af..c24684c818ab 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -6,6 +6,7 @@ config PPC_POWERNV
6 select PPC_ICP_NATIVE 6 select PPC_ICP_NATIVE
7 select PPC_P7_NAP 7 select PPC_P7_NAP
8 select PPC_PCI_CHOICE if EMBEDDED 8 select PPC_PCI_CHOICE if EMBEDDED
9 select EPAPR_BOOT
9 default y 10 default y
10 11
11config POWERNV_MSI 12config POWERNV_MSI
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index ade4463226c6..628c564ceadb 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -15,6 +15,7 @@
15#include <linux/of.h> 15#include <linux/of.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/slab.h>
18#include <asm/opal.h> 19#include <asm/opal.h>
19#include <asm/firmware.h> 20#include <asm/firmware.h>
20 21
@@ -28,6 +29,8 @@ struct opal {
28static struct device_node *opal_node; 29static struct device_node *opal_node;
29static DEFINE_SPINLOCK(opal_write_lock); 30static DEFINE_SPINLOCK(opal_write_lock);
30extern u64 opal_mc_secondary_handler[]; 31extern u64 opal_mc_secondary_handler[];
32static unsigned int *opal_irqs;
33static unsigned int opal_irq_count;
31 34
32int __init early_init_dt_scan_opal(unsigned long node, 35int __init early_init_dt_scan_opal(unsigned long node,
33 const char *uname, int depth, void *data) 36 const char *uname, int depth, void *data)
@@ -53,7 +56,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
53 opal.entry, entryp, entrysz); 56 opal.entry, entryp, entrysz);
54 57
55 powerpc_firmware_features |= FW_FEATURE_OPAL; 58 powerpc_firmware_features |= FW_FEATURE_OPAL;
56 if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) { 59 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
60 powerpc_firmware_features |= FW_FEATURE_OPALv2;
61 powerpc_firmware_features |= FW_FEATURE_OPALv3;
62 printk("OPAL V3 detected !\n");
63 } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
57 powerpc_firmware_features |= FW_FEATURE_OPALv2; 64 powerpc_firmware_features |= FW_FEATURE_OPALv2;
58 printk("OPAL V2 detected !\n"); 65 printk("OPAL V2 detected !\n");
59 } else { 66 } else {
@@ -144,6 +151,13 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
144 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) { 151 rc == OPAL_BUSY_EVENT || rc == OPAL_SUCCESS)) {
145 len = total_len; 152 len = total_len;
146 rc = opal_console_write(vtermno, &len, data); 153 rc = opal_console_write(vtermno, &len, data);
154
155 /* Closed or other error drop */
156 if (rc != OPAL_SUCCESS && rc != OPAL_BUSY &&
157 rc != OPAL_BUSY_EVENT) {
158 written = total_len;
159 break;
160 }
147 if (rc == OPAL_SUCCESS) { 161 if (rc == OPAL_SUCCESS) {
148 total_len -= len; 162 total_len -= len;
149 data += len; 163 data += len;
@@ -316,6 +330,8 @@ static int __init opal_init(void)
316 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen); 330 irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
317 pr_debug("opal: Found %d interrupts reserved for OPAL\n", 331 pr_debug("opal: Found %d interrupts reserved for OPAL\n",
318 irqs ? (irqlen / 4) : 0); 332 irqs ? (irqlen / 4) : 0);
333 opal_irq_count = irqlen / 4;
334 opal_irqs = kzalloc(opal_irq_count * sizeof(unsigned int), GFP_KERNEL);
319 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) { 335 for (i = 0; irqs && i < (irqlen / 4); i++, irqs++) {
320 unsigned int hwirq = be32_to_cpup(irqs); 336 unsigned int hwirq = be32_to_cpup(irqs);
321 unsigned int irq = irq_create_mapping(NULL, hwirq); 337 unsigned int irq = irq_create_mapping(NULL, hwirq);
@@ -327,7 +343,19 @@ static int __init opal_init(void)
327 if (rc) 343 if (rc)
328 pr_warning("opal: Error %d requesting irq %d" 344 pr_warning("opal: Error %d requesting irq %d"
329 " (0x%x)\n", rc, irq, hwirq); 345 " (0x%x)\n", rc, irq, hwirq);
346 opal_irqs[i] = irq;
330 } 347 }
331 return 0; 348 return 0;
332} 349}
333subsys_initcall(opal_init); 350subsys_initcall(opal_init);
351
352void opal_shutdown(void)
353{
354 unsigned int i;
355
356 for (i = 0; i < opal_irq_count; i++) {
357 if (opal_irqs[i])
358 free_irq(opal_irqs[i], 0);
359 opal_irqs[i] = 0;
360 }
361}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 1da578b7c1bf..9c9d15e4cdf2 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -68,16 +68,6 @@ define_pe_printk_level(pe_err, KERN_ERR);
68define_pe_printk_level(pe_warn, KERN_WARNING); 68define_pe_printk_level(pe_warn, KERN_WARNING);
69define_pe_printk_level(pe_info, KERN_INFO); 69define_pe_printk_level(pe_info, KERN_INFO);
70 70
71static struct pci_dn *pnv_ioda_get_pdn(struct pci_dev *dev)
72{
73 struct device_node *np;
74
75 np = pci_device_to_OF_node(dev);
76 if (!np)
77 return NULL;
78 return PCI_DN(np);
79}
80
81static int pnv_ioda_alloc_pe(struct pnv_phb *phb) 71static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
82{ 72{
83 unsigned long pe; 73 unsigned long pe;
@@ -110,7 +100,7 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
110{ 100{
111 struct pci_controller *hose = pci_bus_to_host(dev->bus); 101 struct pci_controller *hose = pci_bus_to_host(dev->bus);
112 struct pnv_phb *phb = hose->private_data; 102 struct pnv_phb *phb = hose->private_data;
113 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 103 struct pci_dn *pdn = pci_get_pdn(dev);
114 104
115 if (!pdn) 105 if (!pdn)
116 return NULL; 106 return NULL;
@@ -173,7 +163,7 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
173 163
174 /* Add to all parents PELT-V */ 164 /* Add to all parents PELT-V */
175 while (parent) { 165 while (parent) {
176 struct pci_dn *pdn = pnv_ioda_get_pdn(parent); 166 struct pci_dn *pdn = pci_get_pdn(parent);
177 if (pdn && pdn->pe_number != IODA_INVALID_PE) { 167 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
178 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number, 168 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
179 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); 169 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
@@ -252,7 +242,7 @@ static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
252{ 242{
253 struct pci_controller *hose = pci_bus_to_host(dev->bus); 243 struct pci_controller *hose = pci_bus_to_host(dev->bus);
254 struct pnv_phb *phb = hose->private_data; 244 struct pnv_phb *phb = hose->private_data;
255 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 245 struct pci_dn *pdn = pci_get_pdn(dev);
256 struct pnv_ioda_pe *pe; 246 struct pnv_ioda_pe *pe;
257 int pe_num; 247 int pe_num;
258 248
@@ -323,7 +313,7 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
323 struct pci_dev *dev; 313 struct pci_dev *dev;
324 314
325 list_for_each_entry(dev, &bus->devices, bus_list) { 315 list_for_each_entry(dev, &bus->devices, bus_list) {
326 struct pci_dn *pdn = pnv_ioda_get_pdn(dev); 316 struct pci_dn *pdn = pci_get_pdn(dev);
327 317
328 if (pdn == NULL) { 318 if (pdn == NULL) {
329 pr_warn("%s: No device node associated with device !\n", 319 pr_warn("%s: No device node associated with device !\n",
@@ -436,7 +426,7 @@ static void pnv_pci_ioda_setup_PEs(void)
436 426
437static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev) 427static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
438{ 428{
439 struct pci_dn *pdn = pnv_ioda_get_pdn(pdev); 429 struct pci_dn *pdn = pci_get_pdn(pdev);
440 struct pnv_ioda_pe *pe; 430 struct pnv_ioda_pe *pe;
441 431
442 /* 432 /*
@@ -768,6 +758,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
768 unsigned int is_64, struct msi_msg *msg) 758 unsigned int is_64, struct msi_msg *msg)
769{ 759{
770 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); 760 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
761 struct pci_dn *pdn = pci_get_pdn(dev);
771 struct irq_data *idata; 762 struct irq_data *idata;
772 struct irq_chip *ichip; 763 struct irq_chip *ichip;
773 unsigned int xive_num = hwirq - phb->msi_base; 764 unsigned int xive_num = hwirq - phb->msi_base;
@@ -783,6 +774,10 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
783 if (pe->mve_number < 0) 774 if (pe->mve_number < 0)
784 return -ENXIO; 775 return -ENXIO;
785 776
777 /* Force 32-bit MSI on some broken devices */
778 if (pdn && pdn->force_32bit_msi)
779 is_64 = 0;
780
786 /* Assign XIVE to PE */ 781 /* Assign XIVE to PE */
787 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num); 782 rc = opal_pci_set_xive_pe(phb->opal_id, pe->pe_number, xive_num);
788 if (rc) { 783 if (rc) {
@@ -1035,7 +1030,7 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
1035 if (!phb->initialized) 1030 if (!phb->initialized)
1036 return 0; 1031 return 0;
1037 1032
1038 pdn = pnv_ioda_get_pdn(dev); 1033 pdn = pci_get_pdn(dev);
1039 if (!pdn || pdn->pe_number == IODA_INVALID_PE) 1034 if (!pdn || pdn->pe_number == IODA_INVALID_PE)
1040 return -EINVAL; 1035 return -EINVAL;
1041 1036
@@ -1048,6 +1043,12 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
1048 return phb->ioda.pe_rmap[(bus->number << 8) | devfn]; 1043 return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
1049} 1044}
1050 1045
1046static void pnv_pci_ioda_shutdown(struct pnv_phb *phb)
1047{
1048 opal_pci_reset(phb->opal_id, OPAL_PCI_IODA_TABLE_RESET,
1049 OPAL_ASSERT_RESET);
1050}
1051
1051void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type) 1052void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1052{ 1053{
1053 struct pci_controller *hose; 1054 struct pci_controller *hose;
@@ -1178,6 +1179,9 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
1178 /* Setup TCEs */ 1179 /* Setup TCEs */
1179 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup; 1180 phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
1180 1181
1182 /* Setup shutdown function for kexec */
1183 phb->shutdown = pnv_pci_ioda_shutdown;
1184
1181 /* Setup MSI support */ 1185 /* Setup MSI support */
1182 pnv_pci_init_ioda_msis(phb); 1186 pnv_pci_init_ioda_msis(phb);
1183 1187
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 55dfca844ddf..277343cc6a3d 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -47,6 +47,10 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
47{ 47{
48 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 48 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
49 struct pnv_phb *phb = hose->private_data; 49 struct pnv_phb *phb = hose->private_data;
50 struct pci_dn *pdn = pci_get_pdn(pdev);
51
52 if (pdn && pdn->force_32bit_msi && !phb->msi32_support)
53 return -ENODEV;
50 54
51 return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV; 55 return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
52} 56}
@@ -367,7 +371,7 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
367 while (npages--) 371 while (npages--)
368 *(tcep++) = 0; 372 *(tcep++) = 0;
369 373
370 if (tbl->it_type & TCE_PCI_SWINV_CREATE) 374 if (tbl->it_type & TCE_PCI_SWINV_FREE)
371 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1); 375 pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
372} 376}
373 377
@@ -450,6 +454,18 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
450 pnv_pci_dma_fallback_setup(hose, pdev); 454 pnv_pci_dma_fallback_setup(hose, pdev);
451} 455}
452 456
457void pnv_pci_shutdown(void)
458{
459 struct pci_controller *hose;
460
461 list_for_each_entry(hose, &hose_list, list_node) {
462 struct pnv_phb *phb = hose->private_data;
463
464 if (phb && phb->shutdown)
465 phb->shutdown(phb);
466 }
467}
468
453/* Fixup wrong class code in p7ioc and p8 root complex */ 469/* Fixup wrong class code in p7ioc and p8 root complex */
454static void pnv_p7ioc_rc_quirk(struct pci_dev *dev) 470static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
455{ 471{
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 48dc4bb856a1..25d76c4df50b 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -86,6 +86,7 @@ struct pnv_phb {
86 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev); 86 void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
87 void (*fixup_phb)(struct pci_controller *hose); 87 void (*fixup_phb)(struct pci_controller *hose);
88 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 88 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
89 void (*shutdown)(struct pnv_phb *phb);
89 90
90 union { 91 union {
91 struct { 92 struct {
@@ -158,4 +159,5 @@ extern void pnv_pci_init_ioda_hub(struct device_node *np);
158extern void pnv_pci_init_ioda2_phb(struct device_node *np); 159extern void pnv_pci_init_ioda2_phb(struct device_node *np);
159extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl, 160extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
160 u64 *startp, u64 *endp); 161 u64 *startp, u64 *endp);
162
161#endif /* __POWERNV_PCI_H */ 163#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 8a9df7f9667e..a1c6f83fc391 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -9,8 +9,10 @@ static inline void pnv_smp_init(void) { }
9 9
10#ifdef CONFIG_PCI 10#ifdef CONFIG_PCI
11extern void pnv_pci_init(void); 11extern void pnv_pci_init(void);
12extern void pnv_pci_shutdown(void);
12#else 13#else
13static inline void pnv_pci_init(void) { } 14static inline void pnv_pci_init(void) { }
15static inline void pnv_pci_shutdown(void) { }
14#endif 16#endif
15 17
16#endif /* _POWERNV_H */ 18#endif /* _POWERNV_H */
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index db1ad1c8f68f..d4459bfc92f7 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -78,7 +78,9 @@ static void pnv_show_cpuinfo(struct seq_file *m)
78 if (root) 78 if (root)
79 model = of_get_property(root, "model", NULL); 79 model = of_get_property(root, "model", NULL);
80 seq_printf(m, "machine\t\t: PowerNV %s\n", model); 80 seq_printf(m, "machine\t\t: PowerNV %s\n", model);
81 if (firmware_has_feature(FW_FEATURE_OPALv2)) 81 if (firmware_has_feature(FW_FEATURE_OPALv3))
82 seq_printf(m, "firmware\t: OPAL v3\n");
83 else if (firmware_has_feature(FW_FEATURE_OPALv2))
82 seq_printf(m, "firmware\t: OPAL v2\n"); 84 seq_printf(m, "firmware\t: OPAL v2\n");
83 else if (firmware_has_feature(FW_FEATURE_OPAL)) 85 else if (firmware_has_feature(FW_FEATURE_OPAL))
84 seq_printf(m, "firmware\t: OPAL v1\n"); 86 seq_printf(m, "firmware\t: OPAL v1\n");
@@ -126,6 +128,17 @@ static void pnv_progress(char *s, unsigned short hex)
126{ 128{
127} 129}
128 130
131static void pnv_shutdown(void)
132{
133 /* Let the PCI code clear up IODA tables */
134 pnv_pci_shutdown();
135
136 /* And unregister all OPAL interrupts so they don't fire
137 * up while we kexec
138 */
139 opal_shutdown();
140}
141
129#ifdef CONFIG_KEXEC 142#ifdef CONFIG_KEXEC
130static void pnv_kexec_cpu_down(int crash_shutdown, int secondary) 143static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
131{ 144{
@@ -187,6 +200,7 @@ define_machine(powernv) {
187 .init_IRQ = pnv_init_IRQ, 200 .init_IRQ = pnv_init_IRQ,
188 .show_cpuinfo = pnv_show_cpuinfo, 201 .show_cpuinfo = pnv_show_cpuinfo,
189 .progress = pnv_progress, 202 .progress = pnv_progress,
203 .machine_shutdown = pnv_shutdown,
190 .power_save = power7_idle, 204 .power_save = power7_idle,
191 .calibrate_decr = generic_calibrate_decr, 205 .calibrate_decr = generic_calibrate_decr,
192#ifdef CONFIG_KEXEC 206#ifdef CONFIG_KEXEC
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 6a3ecca5b725..88c9459c3e07 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -71,18 +71,68 @@ int pnv_smp_kick_cpu(int nr)
71 71
72 BUG_ON(nr < 0 || nr >= NR_CPUS); 72 BUG_ON(nr < 0 || nr >= NR_CPUS);
73 73
74 /* On OPAL v2 the CPU are still spinning inside OPAL itself, 74 /*
75 * get them back now 75 * If we already started or OPALv2 is not supported, we just
76 * kick the CPU via the PACA
76 */ 77 */
77 if (!paca[nr].cpu_start && firmware_has_feature(FW_FEATURE_OPALv2)) { 78 if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
78 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu); 79 goto kick;
79 rc = opal_start_cpu(pcpu, start_here); 80
81 /*
82 * At this point, the CPU can either be spinning on the way in
83 * from kexec or be inside OPAL waiting to be started for the
84 * first time. OPAL v3 allows us to query OPAL to know if it
85 * has the CPUs, so we do that
86 */
87 if (firmware_has_feature(FW_FEATURE_OPALv3)) {
88 uint8_t status;
89
90 rc = opal_query_cpu_status(pcpu, &status);
80 if (rc != OPAL_SUCCESS) { 91 if (rc != OPAL_SUCCESS) {
81 pr_warn("OPAL Error %ld starting CPU %d\n", 92 pr_warn("OPAL Error %ld querying CPU %d state\n",
82 rc, nr); 93 rc, nr);
83 return -ENODEV; 94 return -ENODEV;
84 } 95 }
96
97 /*
98 * Already started, just kick it, probably coming from
99 * kexec and spinning
100 */
101 if (status == OPAL_THREAD_STARTED)
102 goto kick;
103
104 /*
105 * Available/inactive, let's kick it
106 */
107 if (status == OPAL_THREAD_INACTIVE) {
108 pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
109 nr, pcpu);
110 rc = opal_start_cpu(pcpu, start_here);
111 if (rc != OPAL_SUCCESS) {
112 pr_warn("OPAL Error %ld starting CPU %d\n",
113 rc, nr);
114 return -ENODEV;
115 }
116 } else {
117 /*
118 * An unavailable CPU (or any other unknown status)
119 * shouldn't be started. It should also
120 * not be in the possible map but currently it can
121 * happen
122 */
123 pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
124 " (status %d)...\n", nr, pcpu, status);
125 return -ENODEV;
126 }
127 } else {
128 /*
129 * On OPAL v2, we just kick it and hope for the best,
130 * we must not test the error from opal_start_cpu() or
131 * we would fail to get CPUs from kexec.
132 */
133 opal_start_cpu(pcpu, start_here);
85 } 134 }
135 kick:
86 return smp_generic_kick_cpu(nr); 136 return smp_generic_kick_cpu(nr);
87} 137}
88 138
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 9a0941bc4d31..023b288f895b 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -18,6 +18,7 @@ config PPC_PSERIES
18 select PPC_PCI_CHOICE if EXPERT 18 select PPC_PCI_CHOICE if EXPERT
19 select ZLIB_DEFLATE 19 select ZLIB_DEFLATE
20 select PPC_DOORBELL 20 select PPC_DOORBELL
21 select HAVE_CONTEXT_TRACKING
21 default y 22 default y
22 23
23config PPC_SPLPAR 24config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 420524e6f8c9..6d2f0abce6fa 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -26,26 +26,6 @@ static int query_token, change_token;
26#define RTAS_CHANGE_MSIX_FN 4 26#define RTAS_CHANGE_MSIX_FN 4
27#define RTAS_CHANGE_32MSI_FN 5 27#define RTAS_CHANGE_32MSI_FN 5
28 28
29static struct pci_dn *get_pdn(struct pci_dev *pdev)
30{
31 struct device_node *dn;
32 struct pci_dn *pdn;
33
34 dn = pci_device_to_OF_node(pdev);
35 if (!dn) {
36 dev_dbg(&pdev->dev, "rtas_msi: No OF device node\n");
37 return NULL;
38 }
39
40 pdn = PCI_DN(dn);
41 if (!pdn) {
42 dev_dbg(&pdev->dev, "rtas_msi: No PCI DN\n");
43 return NULL;
44 }
45
46 return pdn;
47}
48
49/* RTAS Helpers */ 29/* RTAS Helpers */
50 30
51static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs) 31static int rtas_change_msi(struct pci_dn *pdn, u32 func, u32 num_irqs)
@@ -91,7 +71,7 @@ static void rtas_disable_msi(struct pci_dev *pdev)
91{ 71{
92 struct pci_dn *pdn; 72 struct pci_dn *pdn;
93 73
94 pdn = get_pdn(pdev); 74 pdn = pci_get_pdn(pdev);
95 if (!pdn) 75 if (!pdn)
96 return; 76 return;
97 77
@@ -152,7 +132,7 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
152 struct pci_dn *pdn; 132 struct pci_dn *pdn;
153 const u32 *req_msi; 133 const u32 *req_msi;
154 134
155 pdn = get_pdn(pdev); 135 pdn = pci_get_pdn(pdev);
156 if (!pdn) 136 if (!pdn)
157 return -ENODEV; 137 return -ENODEV;
158 138
@@ -394,6 +374,23 @@ static int check_msix_entries(struct pci_dev *pdev)
394 return 0; 374 return 0;
395} 375}
396 376
377static void rtas_hack_32bit_msi_gen2(struct pci_dev *pdev)
378{
379 u32 addr_hi, addr_lo;
380
381 /*
382 * We should only get in here for IODA1 configs. This is based on the
383 * fact that we using RTAS for MSIs, we don't have the 32 bit MSI RTAS
384 * support, and we are in a PCIe Gen2 slot.
385 */
386 dev_info(&pdev->dev,
387 "rtas_msi: No 32 bit MSI firmware support, forcing 32 bit MSI\n");
388 pci_read_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, &addr_hi);
389 addr_lo = 0xffff0000 | ((addr_hi >> (48 - 32)) << 4);
390 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_LO, addr_lo);
391 pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_ADDRESS_HI, 0);
392}
393
397static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type) 394static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
398{ 395{
399 struct pci_dn *pdn; 396 struct pci_dn *pdn;
@@ -401,8 +398,9 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
401 struct msi_desc *entry; 398 struct msi_desc *entry;
402 struct msi_msg msg; 399 struct msi_msg msg;
403 int nvec = nvec_in; 400 int nvec = nvec_in;
401 int use_32bit_msi_hack = 0;
404 402
405 pdn = get_pdn(pdev); 403 pdn = pci_get_pdn(pdev);
406 if (!pdn) 404 if (!pdn)
407 return -ENODEV; 405 return -ENODEV;
408 406
@@ -428,15 +426,31 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
428 */ 426 */
429again: 427again:
430 if (type == PCI_CAP_ID_MSI) { 428 if (type == PCI_CAP_ID_MSI) {
431 if (pdn->force_32bit_msi) 429 if (pdn->force_32bit_msi) {
432 rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); 430 rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
433 else 431 if (rc < 0) {
432 /*
433 * We only want to run the 32 bit MSI hack below if
434 * the max bus speed is Gen2 speed
435 */
436 if (pdev->bus->max_bus_speed != PCIE_SPEED_5_0GT)
437 return rc;
438
439 use_32bit_msi_hack = 1;
440 }
441 } else
442 rc = -1;
443
444 if (rc < 0)
434 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec); 445 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, nvec);
435 446
436 if (rc < 0 && !pdn->force_32bit_msi) { 447 if (rc < 0) {
437 pr_debug("rtas_msi: trying the old firmware call.\n"); 448 pr_debug("rtas_msi: trying the old firmware call.\n");
438 rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec); 449 rc = rtas_change_msi(pdn, RTAS_CHANGE_FN, nvec);
439 } 450 }
451
452 if (use_32bit_msi_hack && rc > 0)
453 rtas_hack_32bit_msi_gen2(pdev);
440 } else 454 } else
441 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec); 455 rc = rtas_change_msi(pdn, RTAS_CHANGE_MSIX_FN, nvec);
442 456
@@ -518,12 +532,3 @@ static int rtas_msi_init(void)
518} 532}
519arch_initcall(rtas_msi_init); 533arch_initcall(rtas_msi_init);
520 534
521static void quirk_radeon(struct pci_dev *dev)
522{
523 struct pci_dn *pdn = get_pdn(dev);
524
525 if (pdn)
526 pdn->force_32bit_msi = 1;
527}
528DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon);
529DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon);
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index 47226e04126d..5f997e79d570 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -16,6 +16,7 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19#include <linux/cpu.h>
19#include <linux/delay.h> 20#include <linux/delay.h>
20#include <linux/suspend.h> 21#include <linux/suspend.h>
21#include <linux/stat.h> 22#include <linux/stat.h>
@@ -126,11 +127,15 @@ static ssize_t store_hibernate(struct device *dev,
126 struct device_attribute *attr, 127 struct device_attribute *attr,
127 const char *buf, size_t count) 128 const char *buf, size_t count)
128{ 129{
130 cpumask_var_t offline_mask;
129 int rc; 131 int rc;
130 132
131 if (!capable(CAP_SYS_ADMIN)) 133 if (!capable(CAP_SYS_ADMIN))
132 return -EPERM; 134 return -EPERM;
133 135
136 if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
137 return -ENOMEM;
138
134 stream_id = simple_strtoul(buf, NULL, 16); 139 stream_id = simple_strtoul(buf, NULL, 16);
135 140
136 do { 141 do {
@@ -140,15 +145,32 @@ static ssize_t store_hibernate(struct device *dev,
140 } while (rc == -EAGAIN); 145 } while (rc == -EAGAIN);
141 146
142 if (!rc) { 147 if (!rc) {
148 /* All present CPUs must be online */
149 cpumask_andnot(offline_mask, cpu_present_mask,
150 cpu_online_mask);
151 rc = rtas_online_cpus_mask(offline_mask);
152 if (rc) {
153 pr_err("%s: Could not bring present CPUs online.\n",
154 __func__);
155 goto out;
156 }
157
143 stop_topology_update(); 158 stop_topology_update();
144 rc = pm_suspend(PM_SUSPEND_MEM); 159 rc = pm_suspend(PM_SUSPEND_MEM);
145 start_topology_update(); 160 start_topology_update();
161
162 /* Take down CPUs not online prior to suspend */
163 if (!rtas_offline_cpus_mask(offline_mask))
164 pr_warn("%s: Could not restore CPUs to offline "
165 "state.\n", __func__);
146 } 166 }
147 167
148 stream_id = 0; 168 stream_id = 0;
149 169
150 if (!rc) 170 if (!rc)
151 rc = count; 171 rc = count;
172out:
173 free_cpumask_var(offline_mask);
152 return rc; 174 return rc;
153} 175}
154 176
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 97fe82ee8633..2d3b1dd9571d 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -361,7 +361,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); 361 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
362 wsp_ics_set_xive(ics, hw_irq, xive); 362 wsp_ics_set_xive(ics, hw_irq, xive);
363 363
364 return 0; 364 return IRQ_SET_MASK_OK;
365} 365}
366 366
367static struct irq_chip wsp_irq_chip = { 367static struct irq_chip wsp_irq_chip = {
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index b0a518e97599..99464a7bdb3b 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -64,6 +64,8 @@ endif
64 64
65obj-$(CONFIG_PPC_SCOM) += scom.o 65obj-$(CONFIG_PPC_SCOM) += scom.o
66 66
67obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
68
67subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror 69subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
68 70
69obj-$(CONFIG_PPC_XICS) += xics/ 71obj-$(CONFIG_PPC_XICS) += xics/
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index 6e0e1005227f..9cd0e60716fe 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -81,7 +81,7 @@ int ehv_pic_set_affinity(struct irq_data *d, const struct cpumask *dest,
81 ev_int_set_config(src, config, prio, cpuid); 81 ev_int_set_config(src, config, prio, cpuid);
82 spin_unlock_irqrestore(&ehv_pic_lock, flags); 82 spin_unlock_irqrestore(&ehv_pic_lock, flags);
83 83
84 return 0; 84 return IRQ_SET_MASK_OK;
85} 85}
86 86
87static unsigned int ehv_pic_type_to_vecpri(unsigned int type) 87static unsigned int ehv_pic_type_to_vecpri(unsigned int type)
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index ee21b5e71aec..0a13ecb270c7 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -836,7 +836,7 @@ int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
836 mpic_physmask(mask)); 836 mpic_physmask(mask));
837 } 837 }
838 838
839 return 0; 839 return IRQ_SET_MASK_OK;
840} 840}
841 841
842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) 842static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
diff --git a/arch/powerpc/sysdev/udbg_memcons.c b/arch/powerpc/sysdev/udbg_memcons.c
new file mode 100644
index 000000000000..ce5a7b489e4b
--- /dev/null
+++ b/arch/powerpc/sysdev/udbg_memcons.c
@@ -0,0 +1,105 @@
1/*
2 * A udbg backend which logs messages and reads input from in memory
3 * buffers.
4 *
5 * The console output can be read from memcons_output which is a
6 * circular buffer whose next write position is stored in memcons.output_pos.
7 *
8 * Input may be passed by writing into the memcons_input buffer when it is
9 * empty. The input buffer is empty when both input_pos == input_start and
10 * *input_start == '\0'.
11 *
12 * Copyright (C) 2003-2005 Anton Blanchard and Milton Miller, IBM Corp
13 * Copyright (C) 2013 Alistair Popple, IBM Corp
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
19 */
20
21#include <linux/init.h>
22#include <linux/kernel.h>
23#include <asm/barrier.h>
24#include <asm/page.h>
25#include <asm/processor.h>
26#include <asm/udbg.h>
27
28struct memcons {
29 char *output_start;
30 char *output_pos;
31 char *output_end;
32 char *input_start;
33 char *input_pos;
34 char *input_end;
35};
36
37static char memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE];
38static char memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE];
39
40struct memcons memcons = {
41 .output_start = memcons_output,
42 .output_pos = memcons_output,
43 .output_end = &memcons_output[CONFIG_PPC_MEMCONS_OUTPUT_SIZE],
44 .input_start = memcons_input,
45 .input_pos = memcons_input,
46 .input_end = &memcons_input[CONFIG_PPC_MEMCONS_INPUT_SIZE],
47};
48
49void memcons_putc(char c)
50{
51 char *new_output_pos;
52
53 *memcons.output_pos = c;
54 wmb();
55 new_output_pos = memcons.output_pos + 1;
56 if (new_output_pos >= memcons.output_end)
57 new_output_pos = memcons.output_start;
58
59 memcons.output_pos = new_output_pos;
60}
61
62int memcons_getc_poll(void)
63{
64 char c;
65 char *new_input_pos;
66
67 if (*memcons.input_pos) {
68 c = *memcons.input_pos;
69
70 new_input_pos = memcons.input_pos + 1;
71 if (new_input_pos >= memcons.input_end)
72 new_input_pos = memcons.input_start;
73 else if (*new_input_pos == '\0')
74 new_input_pos = memcons.input_start;
75
76 *memcons.input_pos = '\0';
77 wmb();
78 memcons.input_pos = new_input_pos;
79 return c;
80 }
81
82 return -1;
83}
84
85int memcons_getc(void)
86{
87 int c;
88
89 while (1) {
90 c = memcons_getc_poll();
91 if (c == -1)
92 cpu_relax();
93 else
94 break;
95 }
96
97 return c;
98}
99
100void udbg_init_memcons(void)
101{
102 udbg_putc = memcons_putc;
103 udbg_getc = memcons_getc;
104 udbg_getc_poll = memcons_getc_poll;
105}
diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c
index f7e8609df0d5..39d72212655e 100644
--- a/arch/powerpc/sysdev/xics/ics-opal.c
+++ b/arch/powerpc/sysdev/xics/ics-opal.c
@@ -148,7 +148,7 @@ static int ics_opal_set_affinity(struct irq_data *d,
148 __func__, d->irq, hw_irq, server, rc); 148 __func__, d->irq, hw_irq, server, rc);
149 return -1; 149 return -1;
150 } 150 }
151 return 0; 151 return IRQ_SET_MASK_OK;
152} 152}
153 153
154static struct irq_chip ics_opal_irq_chip = { 154static struct irq_chip ics_opal_irq_chip = {
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2c9789da0e24..da183c5a103c 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -98,7 +98,6 @@ config S390
98 select CLONE_BACKWARDS2 98 select CLONE_BACKWARDS2
99 select GENERIC_CLOCKEVENTS 99 select GENERIC_CLOCKEVENTS
100 select GENERIC_CPU_DEVICES if !SMP 100 select GENERIC_CPU_DEVICES if !SMP
101 select GENERIC_KERNEL_THREAD
102 select GENERIC_SMP_IDLE_THREAD 101 select GENERIC_SMP_IDLE_THREAD
103 select GENERIC_TIME_VSYSCALL_OLD 102 select GENERIC_TIME_VSYSCALL_OLD
104 select HAVE_ALIGNED_STRUCT_PAGE if SLUB 103 select HAVE_ALIGNED_STRUCT_PAGE if SLUB
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index b7931faaef6d..bf246dae1367 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -9,11 +9,6 @@ struct dyn_arch_ftrace { };
9 9
10#define MCOUNT_ADDR ((long)_mcount) 10#define MCOUNT_ADDR ((long)_mcount)
11 11
12#ifdef CONFIG_64BIT
13#define MCOUNT_INSN_SIZE 12
14#else
15#define MCOUNT_INSN_SIZE 20
16#endif
17 12
18static inline unsigned long ftrace_call_adjust(unsigned long addr) 13static inline unsigned long ftrace_call_adjust(unsigned long addr)
19{ 14{
@@ -21,4 +16,11 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
21} 16}
22 17
23#endif /* __ASSEMBLY__ */ 18#endif /* __ASSEMBLY__ */
19
20#ifdef CONFIG_64BIT
21#define MCOUNT_INSN_SIZE 12
22#else
23#define MCOUNT_INSN_SIZE 22
24#endif
25
24#endif /* _ASM_S390_FTRACE_H */ 26#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 75ce9b065f9f..5d64fb7619cc 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -32,7 +32,7 @@
32 32
33void storage_key_init_range(unsigned long start, unsigned long end); 33void storage_key_init_range(unsigned long start, unsigned long end);
34 34
35static unsigned long pfmf(unsigned long function, unsigned long address) 35static inline unsigned long pfmf(unsigned long function, unsigned long address)
36{ 36{
37 asm volatile( 37 asm volatile(
38 " .insn rre,0xb9af0000,%[function],%[address]" 38 " .insn rre,0xb9af0000,%[function],%[address]"
@@ -44,17 +44,13 @@ static unsigned long pfmf(unsigned long function, unsigned long address)
44 44
45static inline void clear_page(void *page) 45static inline void clear_page(void *page)
46{ 46{
47 if (MACHINE_HAS_PFMF) { 47 register unsigned long reg1 asm ("1") = 0;
48 pfmf(0x10000, (unsigned long)page); 48 register void *reg2 asm ("2") = page;
49 } else { 49 register unsigned long reg3 asm ("3") = 4096;
50 register unsigned long reg1 asm ("1") = 0; 50 asm volatile(
51 register void *reg2 asm ("2") = page; 51 " mvcl 2,0"
52 register unsigned long reg3 asm ("3") = 4096; 52 : "+d" (reg2), "+d" (reg3) : "d" (reg1)
53 asm volatile( 53 : "memory", "cc");
54 " mvcl 2,0"
55 : "+d" (reg2), "+d" (reg3) : "d" (reg1)
56 : "memory", "cc");
57 }
58} 54}
59 55
60static inline void copy_page(void *to, void *from) 56static inline void copy_page(void *to, void *from)
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 4105b8221fdd..0f0de30e3e3f 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -306,7 +306,7 @@ extern unsigned long MODULES_END;
306#define RCP_HC_BIT 0x00200000UL 306#define RCP_HC_BIT 0x00200000UL
307#define RCP_GR_BIT 0x00040000UL 307#define RCP_GR_BIT 0x00040000UL
308#define RCP_GC_BIT 0x00020000UL 308#define RCP_GC_BIT 0x00020000UL
309#define RCP_IN_BIT 0x00008000UL /* IPTE notify bit */ 309#define RCP_IN_BIT 0x00002000UL /* IPTE notify bit */
310 310
311/* User dirty / referenced bit for KVM's migration feature */ 311/* User dirty / referenced bit for KVM's migration feature */
312#define KVM_UR_BIT 0x00008000UL 312#define KVM_UR_BIT 0x00008000UL
@@ -374,7 +374,7 @@ extern unsigned long MODULES_END;
374#define RCP_HC_BIT 0x0020000000000000UL 374#define RCP_HC_BIT 0x0020000000000000UL
375#define RCP_GR_BIT 0x0004000000000000UL 375#define RCP_GR_BIT 0x0004000000000000UL
376#define RCP_GC_BIT 0x0002000000000000UL 376#define RCP_GC_BIT 0x0002000000000000UL
377#define RCP_IN_BIT 0x0000800000000000UL /* IPTE notify bit */ 377#define RCP_IN_BIT 0x0000200000000000UL /* IPTE notify bit */
378 378
379/* User dirty / referenced bit for KVM's migration feature */ 379/* User dirty / referenced bit for KVM's migration feature */
380#define KVM_UR_BIT 0x0000800000000000UL 380#define KVM_UR_BIT 0x0000800000000000UL
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
index 7f4a4a8c847c..be87d3e05a5b 100644
--- a/arch/s390/kernel/dis.c
+++ b/arch/s390/kernel/dis.c
@@ -1862,6 +1862,8 @@ void print_fn_code(unsigned char *code, unsigned long len)
1862 while (len) { 1862 while (len) {
1863 ptr = buffer; 1863 ptr = buffer;
1864 opsize = insn_length(*code); 1864 opsize = insn_length(*code);
1865 if (opsize > len)
1866 break;
1865 ptr += sprintf(ptr, "%p: ", code); 1867 ptr += sprintf(ptr, "%p: ", code);
1866 for (i = 0; i < opsize; i++) 1868 for (i = 0; i < opsize; i++)
1867 ptr += sprintf(ptr, "%02x", code[i]); 1869 ptr += sprintf(ptr, "%02x", code[i]);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 78bdf0e5dff7..e3043aef87a9 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -16,12 +16,6 @@
16#include <trace/syscall.h> 16#include <trace/syscall.h>
17#include <asm/asm-offsets.h> 17#include <asm/asm-offsets.h>
18 18
19#ifdef CONFIG_64BIT
20#define MCOUNT_OFFSET_RET 12
21#else
22#define MCOUNT_OFFSET_RET 22
23#endif
24
25#ifdef CONFIG_DYNAMIC_FTRACE 19#ifdef CONFIG_DYNAMIC_FTRACE
26 20
27void ftrace_disable_code(void); 21void ftrace_disable_code(void);
@@ -155,9 +149,10 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
155 149
156 if (unlikely(atomic_read(&current->tracing_graph_pause))) 150 if (unlikely(atomic_read(&current->tracing_graph_pause)))
157 goto out; 151 goto out;
152 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
158 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY) 153 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
159 goto out; 154 goto out;
160 trace.func = (ip & PSW_ADDR_INSN) - MCOUNT_OFFSET_RET; 155 trace.func = ip;
161 /* Only trace if the calling function expects to. */ 156 /* Only trace if the calling function expects to. */
162 if (!ftrace_graph_entry(&trace)) { 157 if (!ftrace_graph_entry(&trace)) {
163 current->curr_ret_stack--; 158 current->curr_ret_stack--;
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 4567ce20d900..08dcf21cb8df 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -7,6 +7,7 @@
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
10 11
11 .section .kprobes.text, "ax" 12 .section .kprobes.text, "ax"
12 13
@@ -33,6 +34,7 @@ ENTRY(ftrace_caller)
33 la %r2,0(%r14) 34 la %r2,0(%r14)
34 st %r0,__SF_BACKCHAIN(%r15) 35 st %r0,__SF_BACKCHAIN(%r15)
35 la %r3,0(%r3) 36 la %r3,0(%r3)
37 ahi %r2,-MCOUNT_INSN_SIZE
36 l %r14,0b-0b(%r1) 38 l %r14,0b-0b(%r1)
37 l %r14,0(%r14) 39 l %r14,0(%r14)
38 basr %r14,%r14 40 basr %r14,%r14
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S
index 11332193db30..1c52eae3396a 100644
--- a/arch/s390/kernel/mcount64.S
+++ b/arch/s390/kernel/mcount64.S
@@ -7,6 +7,7 @@
7 7
8#include <linux/linkage.h> 8#include <linux/linkage.h>
9#include <asm/asm-offsets.h> 9#include <asm/asm-offsets.h>
10#include <asm/ftrace.h>
10 11
11 .section .kprobes.text, "ax" 12 .section .kprobes.text, "ax"
12 13
@@ -29,6 +30,7 @@ ENTRY(ftrace_caller)
29 stg %r1,__SF_BACKCHAIN(%r15) 30 stg %r1,__SF_BACKCHAIN(%r15)
30 lgr %r2,%r14 31 lgr %r2,%r14
31 lg %r3,168(%r15) 32 lg %r3,168(%r15)
33 aghi %r2,-MCOUNT_INSN_SIZE
32 larl %r14,ftrace_trace_function 34 larl %r14,ftrace_trace_function
33 lg %r14,0(%r14) 35 lg %r14,0(%r14)
34 basr %r14,%r14 36 basr %r14,%r14
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8074cb4b7cbf..05674b669001 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -645,7 +645,7 @@ static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info,
645 continue; 645 continue;
646 pcpu = pcpu_devices + cpu; 646 pcpu = pcpu_devices + cpu;
647 pcpu->address = info->cpu[i].address; 647 pcpu->address = info->cpu[i].address;
648 pcpu->state = (cpu >= info->configured) ? 648 pcpu->state = (i >= info->configured) ?
649 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED; 649 CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
650 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); 650 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
651 set_cpu_present(cpu, true); 651 set_cpu_present(cpu, true);
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7805ddca833d..18dc417aaf79 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -677,8 +677,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
677 break; 677 break;
678 } 678 }
679 /* Get the page mapped */ 679 /* Get the page mapped */
680 if (get_user_pages(current, gmap->mm, addr, 1, 1, 0, 680 if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
681 NULL, NULL) != 1) {
682 rc = -EFAULT; 681 rc = -EFAULT;
683 break; 682 break;
684 } 683 }
diff --git a/arch/score/mm/init.c b/arch/score/mm/init.c
index d8f988a37d16..0940682ab38b 100644
--- a/arch/score/mm/init.c
+++ b/arch/score/mm/init.c
@@ -41,8 +41,6 @@
41unsigned long empty_zero_page; 41unsigned long empty_zero_page;
42EXPORT_SYMBOL_GPL(empty_zero_page); 42EXPORT_SYMBOL_GPL(empty_zero_page);
43 43
44static struct kcore_list kcore_mem, kcore_vmalloc;
45
46static void setup_zero_page(void) 44static void setup_zero_page(void)
47{ 45{
48 struct page *page; 46 struct page *page;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 6a154a91c7e7..685692c94f05 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -108,7 +108,6 @@ config X86
108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 108 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
109 select GENERIC_TIME_VSYSCALL if X86_64 109 select GENERIC_TIME_VSYSCALL if X86_64
110 select KTIME_SCALAR if X86_32 110 select KTIME_SCALAR if X86_32
111 select ALWAYS_USE_PERSISTENT_CLOCK
112 select GENERIC_STRNCPY_FROM_USER 111 select GENERIC_STRNCPY_FROM_USER
113 select GENERIC_STRNLEN_USER 112 select GENERIC_STRNLEN_USER
114 select HAVE_CONTEXT_TRACKING if X86_64 113 select HAVE_CONTEXT_TRACKING if X86_64
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index dab95a85f7f8..55b67614ed94 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -34,7 +34,7 @@
34extern pgd_t early_level4_pgt[PTRS_PER_PGD]; 34extern pgd_t early_level4_pgt[PTRS_PER_PGD];
35extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; 35extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
36static unsigned int __initdata next_early_pgt = 2; 36static unsigned int __initdata next_early_pgt = 2;
37pmdval_t __initdata early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); 37pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
38 38
39/* Wipe all early page tables except for the kernel symbol map */ 39/* Wipe all early page tables except for the kernel symbol map */
40static void __init reset_early_page_tables(void) 40static void __init reset_early_page_tables(void)
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index d893e8ed8ac9..2e9e12871c2b 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -487,6 +487,7 @@ static inline void show_saved_mc(void)
487#endif 487#endif
488 488
489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU) 489#if defined(CONFIG_MICROCODE_INTEL_EARLY) && defined(CONFIG_HOTPLUG_CPU)
490static DEFINE_MUTEX(x86_cpu_microcode_mutex);
490/* 491/*
491 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is 492 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
492 * hot added or resumes. 493 * hot added or resumes.
@@ -507,7 +508,7 @@ int save_mc_for_early(u8 *mc)
507 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in 508 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
508 * hotplug. 509 * hotplug.
509 */ 510 */
510 cpu_hotplug_driver_lock(); 511 mutex_lock(&x86_cpu_microcode_mutex);
511 512
512 mc_saved_count_init = mc_saved_data.mc_saved_count; 513 mc_saved_count_init = mc_saved_data.mc_saved_count;
513 mc_saved_count = mc_saved_data.mc_saved_count; 514 mc_saved_count = mc_saved_data.mc_saved_count;
@@ -544,7 +545,7 @@ int save_mc_for_early(u8 *mc)
544 } 545 }
545 546
546out: 547out:
547 cpu_hotplug_driver_unlock(); 548 mutex_unlock(&x86_cpu_microcode_mutex);
548 549
549 return ret; 550 return ret;
550} 551}
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 607af0d4d5ef..4e7a37ff03ab 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -312,6 +312,8 @@ void arch_cpu_idle(void)
312{ 312{
313 if (cpuidle_idle_call()) 313 if (cpuidle_idle_call())
314 x86_idle(); 314 x86_idle();
315 else
316 local_irq_enable();
315} 317}
316 318
317/* 319/*
@@ -368,9 +370,6 @@ void amd_e400_remove_cpu(int cpu)
368 */ 370 */
369static void amd_e400_idle(void) 371static void amd_e400_idle(void)
370{ 372{
371 if (need_resched())
372 return;
373
374 if (!amd_e400_c1e_detected) { 373 if (!amd_e400_c1e_detected) {
375 u32 lo, hi; 374 u32 lo, hi;
376 375
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index fdc5dca14fb3..eaac1743def7 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -359,7 +359,17 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
359} 359}
360 360
361/* 361/*
362 * would have hole in the middle or ends, and only ram parts will be mapped. 362 * We need to iterate through the E820 memory map and create direct mappings
363 * for only E820_RAM and E820_KERN_RESERVED regions. We cannot simply
364 * create direct mappings for all pfns from [0 to max_low_pfn) and
365 * [4GB to max_pfn) because of possible memory holes in high addresses
366 * that cannot be marked as UC by fixed/variable range MTRRs.
367 * Depending on the alignment of E820 ranges, this may possibly result
368 * in using smaller size (i.e. 4K instead of 2M or 1G) page tables.
369 *
370 * init_mem_mapping() calls init_range_memory_mapping() with big range.
371 * That range would have hole in the middle or ends, and only ram parts
372 * will be mapped in init_range_memory_mapping().
363 */ 373 */
364static unsigned long __init init_range_memory_mapping( 374static unsigned long __init init_range_memory_mapping(
365 unsigned long r_start, 375 unsigned long r_start,
@@ -419,6 +429,13 @@ void __init init_mem_mapping(void)
419 max_pfn_mapped = 0; /* will get exact value next */ 429 max_pfn_mapped = 0; /* will get exact value next */
420 min_pfn_mapped = real_end >> PAGE_SHIFT; 430 min_pfn_mapped = real_end >> PAGE_SHIFT;
421 last_start = start = real_end; 431 last_start = start = real_end;
432
433 /*
434 * We start from the top (end of memory) and go to the bottom.
435 * The memblock_find_in_range() gets us a block of RAM from the
436 * end of RAM in [min_pfn_mapped, max_pfn_mapped) used as new pages
437 * for page table.
438 */
422 while (last_start > ISA_END_ADDRESS) { 439 while (last_start > ISA_END_ADDRESS) {
423 if (last_start > step_size) { 440 if (last_start > step_size) {
424 start = round_down(last_start - 1, step_size); 441 start = round_down(last_start - 1, step_size);
diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
index 0e0fabf17342..6eb18c42a28a 100644
--- a/arch/x86/pci/mrst.c
+++ b/arch/x86/pci/mrst.c
@@ -141,11 +141,6 @@ static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn,
141 */ 141 */
142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) 142static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
143{ 143{
144 if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
145 || devfn == PCI_DEVFN(0, 0)
146 || devfn == PCI_DEVFN(3, 0)))
147 return 1;
148
149 /* This is a workaround for A0 LNC bug where PCI status register does 144 /* This is a workaround for A0 LNC bug where PCI status register does
150 * not have new CAP bit set. can not be written by SW either. 145 * not have new CAP bit set. can not be written by SW either.
151 * 146 *
@@ -155,7 +150,10 @@ static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg)
155 */ 150 */
156 if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) 151 if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE)
157 return 0; 152 return 0;
158 153 if (bus == 0 && (devfn == PCI_DEVFN(2, 0)
154 || devfn == PCI_DEVFN(0, 0)
155 || devfn == PCI_DEVFN(3, 0)))
156 return 1;
159 return 0; /* langwell on others */ 157 return 0; /* langwell on others */
160} 158}
161 159
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index ecb743bf05a5..536562c626a2 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -24,7 +24,7 @@ acpi-y += nvs.o
24# Power management related files 24# Power management related files
25acpi-y += wakeup.o 25acpi-y += wakeup.o
26acpi-y += sleep.o 26acpi-y += sleep.o
27acpi-$(CONFIG_PM) += device_pm.o 27acpi-y += device_pm.o
28acpi-$(CONFIG_ACPI_SLEEP) += proc.o 28acpi-$(CONFIG_ACPI_SLEEP) += proc.o
29 29
30 30
@@ -38,7 +38,6 @@ acpi-y += processor_core.o
38acpi-y += ec.o 38acpi-y += ec.o
39acpi-$(CONFIG_ACPI_DOCK) += dock.o 39acpi-$(CONFIG_ACPI_DOCK) += dock.o
40acpi-y += pci_root.o pci_link.o pci_irq.o 40acpi-y += pci_root.o pci_link.o pci_irq.o
41acpi-y += csrt.o
42acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o 41acpi-$(CONFIG_X86_INTEL_LPSS) += acpi_lpss.o
43acpi-y += acpi_platform.o 42acpi-y += acpi_platform.o
44acpi-y += power.o 43acpi-y += power.o
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 00d2efd674df..4f4e741d34b2 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -28,6 +28,8 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/dmi.h>
32#include <linux/delay.h>
31#ifdef CONFIG_ACPI_PROCFS_POWER 33#ifdef CONFIG_ACPI_PROCFS_POWER
32#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
33#include <linux/seq_file.h> 35#include <linux/seq_file.h>
@@ -74,6 +76,8 @@ static int acpi_ac_resume(struct device *dev);
74#endif 76#endif
75static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); 77static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
76 78
79static int ac_sleep_before_get_state_ms;
80
77static struct acpi_driver acpi_ac_driver = { 81static struct acpi_driver acpi_ac_driver = {
78 .name = "ac", 82 .name = "ac",
79 .class = ACPI_AC_CLASS, 83 .class = ACPI_AC_CLASS,
@@ -252,6 +256,16 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
252 case ACPI_AC_NOTIFY_STATUS: 256 case ACPI_AC_NOTIFY_STATUS:
253 case ACPI_NOTIFY_BUS_CHECK: 257 case ACPI_NOTIFY_BUS_CHECK:
254 case ACPI_NOTIFY_DEVICE_CHECK: 258 case ACPI_NOTIFY_DEVICE_CHECK:
259 /*
260 * A buggy BIOS may notify AC first and then sleep for
261 * a specific time before doing actual operations in the
262 * EC event handler (_Qxx). This will cause the AC state
263 * reported by the ACPI event to be incorrect, so wait for a
264 * specific time for the EC event handler to make progress.
265 */
266 if (ac_sleep_before_get_state_ms > 0)
267 msleep(ac_sleep_before_get_state_ms);
268
255 acpi_ac_get_state(ac); 269 acpi_ac_get_state(ac);
256 acpi_bus_generate_proc_event(device, event, (u32) ac->state); 270 acpi_bus_generate_proc_event(device, event, (u32) ac->state);
257 acpi_bus_generate_netlink_event(device->pnp.device_class, 271 acpi_bus_generate_netlink_event(device->pnp.device_class,
@@ -264,6 +278,24 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
264 return; 278 return;
265} 279}
266 280
281static int thinkpad_e530_quirk(const struct dmi_system_id *d)
282{
283 ac_sleep_before_get_state_ms = 1000;
284 return 0;
285}
286
287static struct dmi_system_id ac_dmi_table[] = {
288 {
289 .callback = thinkpad_e530_quirk,
290 .ident = "thinkpad e530",
291 .matches = {
292 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
293 DMI_MATCH(DMI_PRODUCT_NAME, "32597CG"),
294 },
295 },
296 {},
297};
298
267static int acpi_ac_add(struct acpi_device *device) 299static int acpi_ac_add(struct acpi_device *device)
268{ 300{
269 int result = 0; 301 int result = 0;
@@ -312,6 +344,7 @@ static int acpi_ac_add(struct acpi_device *device)
312 kfree(ac); 344 kfree(ac);
313 } 345 }
314 346
347 dmi_check_system(ac_dmi_table);
315 return result; 348 return result;
316} 349}
317 350
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index b1c95422ce74..652fd5ce303c 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -35,11 +35,16 @@ ACPI_MODULE_NAME("acpi_lpss");
35 35
36struct lpss_device_desc { 36struct lpss_device_desc {
37 bool clk_required; 37 bool clk_required;
38 const char *clk_parent; 38 const char *clkdev_name;
39 bool ltr_required; 39 bool ltr_required;
40 unsigned int prv_offset; 40 unsigned int prv_offset;
41}; 41};
42 42
43static struct lpss_device_desc lpss_dma_desc = {
44 .clk_required = true,
45 .clkdev_name = "hclk",
46};
47
43struct lpss_private_data { 48struct lpss_private_data {
44 void __iomem *mmio_base; 49 void __iomem *mmio_base;
45 resource_size_t mmio_size; 50 resource_size_t mmio_size;
@@ -49,7 +54,6 @@ struct lpss_private_data {
49 54
50static struct lpss_device_desc lpt_dev_desc = { 55static struct lpss_device_desc lpt_dev_desc = {
51 .clk_required = true, 56 .clk_required = true,
52 .clk_parent = "lpss_clk",
53 .prv_offset = 0x800, 57 .prv_offset = 0x800,
54 .ltr_required = true, 58 .ltr_required = true,
55}; 59};
@@ -60,6 +64,9 @@ static struct lpss_device_desc lpt_sdio_dev_desc = {
60}; 64};
61 65
62static const struct acpi_device_id acpi_lpss_device_ids[] = { 66static const struct acpi_device_id acpi_lpss_device_ids[] = {
67 /* Generic LPSS devices */
68 { "INTL9C60", (unsigned long)&lpss_dma_desc },
69
63 /* Lynxpoint LPSS devices */ 70 /* Lynxpoint LPSS devices */
64 { "INT33C0", (unsigned long)&lpt_dev_desc }, 71 { "INT33C0", (unsigned long)&lpt_dev_desc },
65 { "INT33C1", (unsigned long)&lpt_dev_desc }, 72 { "INT33C1", (unsigned long)&lpt_dev_desc },
@@ -91,16 +98,27 @@ static int register_device_clock(struct acpi_device *adev,
91 struct lpss_private_data *pdata) 98 struct lpss_private_data *pdata)
92{ 99{
93 const struct lpss_device_desc *dev_desc = pdata->dev_desc; 100 const struct lpss_device_desc *dev_desc = pdata->dev_desc;
101 struct lpss_clk_data *clk_data;
94 102
95 if (!lpss_clk_dev) 103 if (!lpss_clk_dev)
96 lpt_register_clock_device(); 104 lpt_register_clock_device();
97 105
98 if (!dev_desc->clk_parent || !pdata->mmio_base 106 clk_data = platform_get_drvdata(lpss_clk_dev);
107 if (!clk_data)
108 return -ENODEV;
109
110 if (dev_desc->clkdev_name) {
111 clk_register_clkdev(clk_data->clk, dev_desc->clkdev_name,
112 dev_name(&adev->dev));
113 return 0;
114 }
115
116 if (!pdata->mmio_base
99 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE) 117 || pdata->mmio_size < dev_desc->prv_offset + LPSS_CLK_SIZE)
100 return -ENODATA; 118 return -ENODATA;
101 119
102 pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev), 120 pdata->clk = clk_register_gate(NULL, dev_name(&adev->dev),
103 dev_desc->clk_parent, 0, 121 clk_data->name, 0,
104 pdata->mmio_base + dev_desc->prv_offset, 122 pdata->mmio_base + dev_desc->prv_offset,
105 0, 0, NULL); 123 0, 0, NULL);
106 if (IS_ERR(pdata->clk)) 124 if (IS_ERR(pdata->clk))
diff --git a/drivers/acpi/csrt.c b/drivers/acpi/csrt.c
deleted file mode 100644
index 5c15a91faf0b..000000000000
--- a/drivers/acpi/csrt.c
+++ /dev/null
@@ -1,159 +0,0 @@
1/*
2 * Support for Core System Resources Table (CSRT)
3 *
4 * Copyright (C) 2013, Intel Corporation
5 * Authors: Mika Westerberg <mika.westerberg@linux.intel.com>
6 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#define pr_fmt(fmt) "ACPI: CSRT: " fmt
14
15#include <linux/acpi.h>
16#include <linux/device.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/platform_device.h>
20#include <linux/sizes.h>
21
22ACPI_MODULE_NAME("CSRT");
23
24static int __init acpi_csrt_parse_shared_info(struct platform_device *pdev,
25 const struct acpi_csrt_group *grp)
26{
27 const struct acpi_csrt_shared_info *si;
28 struct resource res[3];
29 size_t nres;
30 int ret;
31
32 memset(res, 0, sizeof(res));
33 nres = 0;
34
35 si = (const struct acpi_csrt_shared_info *)&grp[1];
36 /*
37 * The peripherals that are listed on CSRT typically support only
38 * 32-bit addresses so we only use the low part of MMIO base for
39 * now.
40 */
41 if (!si->mmio_base_high && si->mmio_base_low) {
42 /*
43 * There is no size of the memory resource in shared_info
44 * so we assume that it is 4k here.
45 */
46 res[nres].start = si->mmio_base_low;
47 res[nres].end = res[0].start + SZ_4K - 1;
48 res[nres++].flags = IORESOURCE_MEM;
49 }
50
51 if (si->gsi_interrupt) {
52 int irq = acpi_register_gsi(NULL, si->gsi_interrupt,
53 si->interrupt_mode,
54 si->interrupt_polarity);
55 res[nres].start = irq;
56 res[nres].end = irq;
57 res[nres++].flags = IORESOURCE_IRQ;
58 }
59
60 if (si->base_request_line || si->num_handshake_signals) {
61 /*
62 * We pass the driver a DMA resource describing the range
63 * of request lines the device supports.
64 */
65 res[nres].start = si->base_request_line;
66 res[nres].end = res[nres].start + si->num_handshake_signals - 1;
67 res[nres++].flags = IORESOURCE_DMA;
68 }
69
70 ret = platform_device_add_resources(pdev, res, nres);
71 if (ret) {
72 if (si->gsi_interrupt)
73 acpi_unregister_gsi(si->gsi_interrupt);
74 return ret;
75 }
76
77 return 0;
78}
79
80static int __init
81acpi_csrt_parse_resource_group(const struct acpi_csrt_group *grp)
82{
83 struct platform_device *pdev;
84 char vendor[5], name[16];
85 int ret, i;
86
87 vendor[0] = grp->vendor_id;
88 vendor[1] = grp->vendor_id >> 8;
89 vendor[2] = grp->vendor_id >> 16;
90 vendor[3] = grp->vendor_id >> 24;
91 vendor[4] = '\0';
92
93 if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
94 return -ENODEV;
95
96 snprintf(name, sizeof(name), "%s%04X", vendor, grp->device_id);
97 pdev = platform_device_alloc(name, PLATFORM_DEVID_AUTO);
98 if (!pdev)
99 return -ENOMEM;
100
101 /* Add resources based on the shared info */
102 ret = acpi_csrt_parse_shared_info(pdev, grp);
103 if (ret)
104 goto fail;
105
106 ret = platform_device_add(pdev);
107 if (ret)
108 goto fail;
109
110 for (i = 0; i < pdev->num_resources; i++)
111 dev_dbg(&pdev->dev, "%pR\n", &pdev->resource[i]);
112
113 return 0;
114
115fail:
116 platform_device_put(pdev);
117 return ret;
118}
119
120/*
121 * CSRT or Core System Resources Table is a proprietary ACPI table
122 * introduced by Microsoft. This table can contain devices that are not in
123 * the system DSDT table. In particular DMA controllers might be described
124 * here.
125 *
126 * We present these devices as normal platform devices that don't have ACPI
127 * IDs or handle. The platform device name will be something like
128 * <VENDOR><DEVID>.<n>.auto for example: INTL9C06.0.auto.
129 */
130void __init acpi_csrt_init(void)
131{
132 struct acpi_csrt_group *grp, *end;
133 struct acpi_table_csrt *csrt;
134 acpi_status status;
135 int ret;
136
137 status = acpi_get_table(ACPI_SIG_CSRT, 0,
138 (struct acpi_table_header **)&csrt);
139 if (ACPI_FAILURE(status)) {
140 if (status != AE_NOT_FOUND)
141 pr_warn("failed to get the CSRT table\n");
142 return;
143 }
144
145 pr_debug("parsing CSRT table for devices\n");
146
147 grp = (struct acpi_csrt_group *)(csrt + 1);
148 end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
149
150 while (grp < end) {
151 ret = acpi_csrt_parse_resource_group(grp);
152 if (ret) {
153 pr_warn("error in parsing resource group: %d\n", ret);
154 return;
155 }
156
157 grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
158 }
159}
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 96de787e6104..bc493aa3af19 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -37,68 +37,6 @@
37#define _COMPONENT ACPI_POWER_COMPONENT 37#define _COMPONENT ACPI_POWER_COMPONENT
38ACPI_MODULE_NAME("device_pm"); 38ACPI_MODULE_NAME("device_pm");
39 39
40static DEFINE_MUTEX(acpi_pm_notifier_lock);
41
42/**
43 * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
44 * @adev: ACPI device to add the notifier for.
45 * @context: Context information to pass to the notifier routine.
46 *
47 * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
48 * PM wakeup events. For example, wakeup events may be generated for bridges
49 * if one of the devices below the bridge is signaling wakeup, even if the
50 * bridge itself doesn't have a wakeup GPE associated with it.
51 */
52acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
53 acpi_notify_handler handler, void *context)
54{
55 acpi_status status = AE_ALREADY_EXISTS;
56
57 mutex_lock(&acpi_pm_notifier_lock);
58
59 if (adev->wakeup.flags.notifier_present)
60 goto out;
61
62 status = acpi_install_notify_handler(adev->handle,
63 ACPI_SYSTEM_NOTIFY,
64 handler, context);
65 if (ACPI_FAILURE(status))
66 goto out;
67
68 adev->wakeup.flags.notifier_present = true;
69
70 out:
71 mutex_unlock(&acpi_pm_notifier_lock);
72 return status;
73}
74
75/**
76 * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
77 * @adev: ACPI device to remove the notifier from.
78 */
79acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
80 acpi_notify_handler handler)
81{
82 acpi_status status = AE_BAD_PARAMETER;
83
84 mutex_lock(&acpi_pm_notifier_lock);
85
86 if (!adev->wakeup.flags.notifier_present)
87 goto out;
88
89 status = acpi_remove_notify_handler(adev->handle,
90 ACPI_SYSTEM_NOTIFY,
91 handler);
92 if (ACPI_FAILURE(status))
93 goto out;
94
95 adev->wakeup.flags.notifier_present = false;
96
97 out:
98 mutex_unlock(&acpi_pm_notifier_lock);
99 return status;
100}
101
102/** 40/**
103 * acpi_power_state_string - String representation of ACPI device power state. 41 * acpi_power_state_string - String representation of ACPI device power state.
104 * @state: ACPI device power state to return the string representation of. 42 * @state: ACPI device power state to return the string representation of.
@@ -385,6 +323,69 @@ bool acpi_bus_power_manageable(acpi_handle handle)
385} 323}
386EXPORT_SYMBOL(acpi_bus_power_manageable); 324EXPORT_SYMBOL(acpi_bus_power_manageable);
387 325
326#ifdef CONFIG_PM
327static DEFINE_MUTEX(acpi_pm_notifier_lock);
328
329/**
330 * acpi_add_pm_notifier - Register PM notifier for given ACPI device.
331 * @adev: ACPI device to add the notifier for.
332 * @context: Context information to pass to the notifier routine.
333 *
334 * NOTE: @adev need not be a run-wake or wakeup device to be a valid source of
335 * PM wakeup events. For example, wakeup events may be generated for bridges
336 * if one of the devices below the bridge is signaling wakeup, even if the
337 * bridge itself doesn't have a wakeup GPE associated with it.
338 */
339acpi_status acpi_add_pm_notifier(struct acpi_device *adev,
340 acpi_notify_handler handler, void *context)
341{
342 acpi_status status = AE_ALREADY_EXISTS;
343
344 mutex_lock(&acpi_pm_notifier_lock);
345
346 if (adev->wakeup.flags.notifier_present)
347 goto out;
348
349 status = acpi_install_notify_handler(adev->handle,
350 ACPI_SYSTEM_NOTIFY,
351 handler, context);
352 if (ACPI_FAILURE(status))
353 goto out;
354
355 adev->wakeup.flags.notifier_present = true;
356
357 out:
358 mutex_unlock(&acpi_pm_notifier_lock);
359 return status;
360}
361
362/**
363 * acpi_remove_pm_notifier - Unregister PM notifier from given ACPI device.
364 * @adev: ACPI device to remove the notifier from.
365 */
366acpi_status acpi_remove_pm_notifier(struct acpi_device *adev,
367 acpi_notify_handler handler)
368{
369 acpi_status status = AE_BAD_PARAMETER;
370
371 mutex_lock(&acpi_pm_notifier_lock);
372
373 if (!adev->wakeup.flags.notifier_present)
374 goto out;
375
376 status = acpi_remove_notify_handler(adev->handle,
377 ACPI_SYSTEM_NOTIFY,
378 handler);
379 if (ACPI_FAILURE(status))
380 goto out;
381
382 adev->wakeup.flags.notifier_present = false;
383
384 out:
385 mutex_unlock(&acpi_pm_notifier_lock);
386 return status;
387}
388
388bool acpi_bus_can_wakeup(acpi_handle handle) 389bool acpi_bus_can_wakeup(acpi_handle handle)
389{ 390{
390 struct acpi_device *device; 391 struct acpi_device *device;
@@ -1023,3 +1024,4 @@ void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
1023 mutex_unlock(&adev->physical_node_lock); 1024 mutex_unlock(&adev->physical_node_lock);
1024} 1025}
1025EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent); 1026EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
1027#endif /* CONFIG_PM */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index d45b2871d33b..edc00818c803 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -223,7 +223,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
223static int ec_poll(struct acpi_ec *ec) 223static int ec_poll(struct acpi_ec *ec)
224{ 224{
225 unsigned long flags; 225 unsigned long flags;
226 int repeat = 2; /* number of command restarts */ 226 int repeat = 5; /* number of command restarts */
227 while (repeat--) { 227 while (repeat--) {
228 unsigned long delay = jiffies + 228 unsigned long delay = jiffies +
229 msecs_to_jiffies(ec_delay); 229 msecs_to_jiffies(ec_delay);
@@ -241,8 +241,6 @@ static int ec_poll(struct acpi_ec *ec)
241 } 241 }
242 advance_transaction(ec, acpi_ec_read_status(ec)); 242 advance_transaction(ec, acpi_ec_read_status(ec));
243 } while (time_before(jiffies, delay)); 243 } while (time_before(jiffies, delay));
244 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
245 break;
246 pr_debug(PREFIX "controller reset, restart transaction\n"); 244 pr_debug(PREFIX "controller reset, restart transaction\n");
247 spin_lock_irqsave(&ec->lock, flags); 245 spin_lock_irqsave(&ec->lock, flags);
248 start_transaction(ec); 246 start_transaction(ec);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 6f1afd9118c8..297cbf456f86 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -35,7 +35,6 @@ void acpi_pci_link_init(void);
35void acpi_pci_root_hp_init(void); 35void acpi_pci_root_hp_init(void);
36void acpi_platform_init(void); 36void acpi_platform_init(void);
37int acpi_sysfs_init(void); 37int acpi_sysfs_init(void);
38void acpi_csrt_init(void);
39#ifdef CONFIG_ACPI_CONTAINER 38#ifdef CONFIG_ACPI_CONTAINER
40void acpi_container_init(void); 39void acpi_container_init(void);
41#else 40#else
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 1dd6f6c85874..e427dc516c76 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -641,7 +641,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
641 /* bus enumerate */ 641 /* bus enumerate */
642 printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__, 642 printk(KERN_DEBUG "%s: Bus check notify on %s\n", __func__,
643 (char *)buffer.pointer); 643 (char *)buffer.pointer);
644 if (!root) 644 if (root)
645 acpiphp_check_host_bridge(handle);
646 else
645 handle_root_bridge_insertion(handle); 647 handle_root_bridge_insertion(handle);
646 648
647 break; 649 break;
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index bec717ffd25f..c266cdc11784 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -95,9 +95,6 @@ static const struct acpi_device_id processor_device_ids[] = {
95}; 95};
96MODULE_DEVICE_TABLE(acpi, processor_device_ids); 96MODULE_DEVICE_TABLE(acpi, processor_device_ids);
97 97
98static SIMPLE_DEV_PM_OPS(acpi_processor_pm,
99 acpi_processor_suspend, acpi_processor_resume);
100
101static struct acpi_driver acpi_processor_driver = { 98static struct acpi_driver acpi_processor_driver = {
102 .name = "processor", 99 .name = "processor",
103 .class = ACPI_PROCESSOR_CLASS, 100 .class = ACPI_PROCESSOR_CLASS,
@@ -107,7 +104,6 @@ static struct acpi_driver acpi_processor_driver = {
107 .remove = acpi_processor_remove, 104 .remove = acpi_processor_remove,
108 .notify = acpi_processor_notify, 105 .notify = acpi_processor_notify,
109 }, 106 },
110 .drv.pm = &acpi_processor_pm,
111}; 107};
112 108
113#define INSTALL_NOTIFY_HANDLER 1 109#define INSTALL_NOTIFY_HANDLER 1
@@ -934,6 +930,8 @@ static int __init acpi_processor_init(void)
934 if (result < 0) 930 if (result < 0)
935 return result; 931 return result;
936 932
933 acpi_processor_syscore_init();
934
937 acpi_processor_install_hotplug_notify(); 935 acpi_processor_install_hotplug_notify();
938 936
939 acpi_thermal_cpufreq_init(); 937 acpi_thermal_cpufreq_init();
@@ -956,6 +954,8 @@ static void __exit acpi_processor_exit(void)
956 954
957 acpi_processor_uninstall_hotplug_notify(); 955 acpi_processor_uninstall_hotplug_notify();
958 956
957 acpi_processor_syscore_exit();
958
959 acpi_bus_unregister_driver(&acpi_processor_driver); 959 acpi_bus_unregister_driver(&acpi_processor_driver);
960 960
961 return; 961 return;
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index f0df2c9434d2..eb133c77aadb 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -34,6 +34,7 @@
34#include <linux/sched.h> /* need_resched() */ 34#include <linux/sched.h> /* need_resched() */
35#include <linux/clockchips.h> 35#include <linux/clockchips.h>
36#include <linux/cpuidle.h> 36#include <linux/cpuidle.h>
37#include <linux/syscore_ops.h>
37 38
38/* 39/*
39 * Include the apic definitions for x86 to have the APIC timer related defines 40 * Include the apic definitions for x86 to have the APIC timer related defines
@@ -210,33 +211,41 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr,
210 211
211#endif 212#endif
212 213
214#ifdef CONFIG_PM_SLEEP
213static u32 saved_bm_rld; 215static u32 saved_bm_rld;
214 216
215static void acpi_idle_bm_rld_save(void) 217int acpi_processor_suspend(void)
216{ 218{
217 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 219 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld);
220 return 0;
218} 221}
219static void acpi_idle_bm_rld_restore(void) 222
223void acpi_processor_resume(void)
220{ 224{
221 u32 resumed_bm_rld; 225 u32 resumed_bm_rld;
222 226
223 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 227 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld);
228 if (resumed_bm_rld == saved_bm_rld)
229 return;
224 230
225 if (resumed_bm_rld != saved_bm_rld) 231 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
226 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld);
227} 232}
228 233
229int acpi_processor_suspend(struct device *dev) 234static struct syscore_ops acpi_processor_syscore_ops = {
235 .suspend = acpi_processor_suspend,
236 .resume = acpi_processor_resume,
237};
238
239void acpi_processor_syscore_init(void)
230{ 240{
231 acpi_idle_bm_rld_save(); 241 register_syscore_ops(&acpi_processor_syscore_ops);
232 return 0;
233} 242}
234 243
235int acpi_processor_resume(struct device *dev) 244void acpi_processor_syscore_exit(void)
236{ 245{
237 acpi_idle_bm_rld_restore(); 246 unregister_syscore_ops(&acpi_processor_syscore_ops);
238 return 0;
239} 247}
248#endif /* CONFIG_PM_SLEEP */
240 249
241#if defined(CONFIG_X86) 250#if defined(CONFIG_X86)
242static void tsc_check_state(int state) 251static void tsc_check_state(int state)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index fe158fd4f1df..44225cb15f3a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1785,7 +1785,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1785 acpi_set_pnp_ids(handle, &pnp, type); 1785 acpi_set_pnp_ids(handle, &pnp, type);
1786 1786
1787 if (!pnp.type.hardware_id) 1787 if (!pnp.type.hardware_id)
1788 return; 1788 goto out;
1789 1789
1790 /* 1790 /*
1791 * This relies on the fact that acpi_install_notify_handler() will not 1791 * This relies on the fact that acpi_install_notify_handler() will not
@@ -1800,6 +1800,7 @@ static void acpi_scan_init_hotplug(acpi_handle handle, int type)
1800 } 1800 }
1801 } 1801 }
1802 1802
1803out:
1803 acpi_free_pnp_ids(&pnp); 1804 acpi_free_pnp_ids(&pnp);
1804} 1805}
1805 1806
@@ -2042,7 +2043,6 @@ int __init acpi_scan_init(void)
2042 acpi_pci_link_init(); 2043 acpi_pci_link_init();
2043 acpi_platform_init(); 2044 acpi_platform_init();
2044 acpi_lpss_init(); 2045 acpi_lpss_init();
2045 acpi_csrt_init();
2046 acpi_container_init(); 2046 acpi_container_init();
2047 acpi_memory_hotplug_init(); 2047 acpi_memory_hotplug_init();
2048 2048
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index c3932d0876e0..5b32e15a65ce 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -456,6 +456,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
456 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"), 456 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dm4 Notebook PC"),
457 }, 457 },
458 }, 458 },
459 {
460 .callback = video_ignore_initial_backlight,
461 .ident = "HP 1000 Notebook PC",
462 .matches = {
463 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
464 DMI_MATCH(DMI_PRODUCT_NAME, "HP 1000 Notebook PC"),
465 },
466 },
459 {} 467 {}
460}; 468};
461 469
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 66f67626f02e..e6bd910bc6ed 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -161,6 +161,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
161 DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"), 161 DMI_MATCH(DMI_PRODUCT_NAME, "UL30VT"),
162 }, 162 },
163 }, 163 },
164 {
165 .callback = video_detect_force_vendor,
166 .ident = "Asus UL30A",
167 .matches = {
168 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
169 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
170 },
171 },
164 { }, 172 { },
165}; 173};
166 174
diff --git a/drivers/ata/pata_ep93xx.c b/drivers/ata/pata_ep93xx.c
index c1bfaf43d109..980b88e109fc 100644
--- a/drivers/ata/pata_ep93xx.c
+++ b/drivers/ata/pata_ep93xx.c
@@ -933,11 +933,6 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
933 } 933 }
934 934
935 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 935 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
936 if (!mem_res) {
937 err = -ENXIO;
938 goto err_rel_gpio;
939 }
940
941 ide_base = devm_ioremap_resource(&pdev->dev, mem_res); 936 ide_base = devm_ioremap_resource(&pdev->dev, mem_res);
942 if (IS_ERR(ide_base)) { 937 if (IS_ERR(ide_base)) {
943 err = PTR_ERR(ide_base); 938 err = PTR_ERR(ide_base);
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 1a68f947ded8..d414331b480e 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -1295,6 +1295,7 @@ int subsys_virtual_register(struct bus_type *subsys,
1295 1295
1296 return subsys_register(subsys, groups, virtual_dir); 1296 return subsys_register(subsys, groups, virtual_dir);
1297} 1297}
1298EXPORT_SYMBOL_GPL(subsys_virtual_register);
1298 1299
1299int __init buses_init(void) 1300int __init buses_init(void)
1300{ 1301{
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 016312437577..2499cefdcdf2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -572,9 +572,11 @@ int device_create_file(struct device *dev,
572 572
573 if (dev) { 573 if (dev) {
574 WARN(((attr->attr.mode & S_IWUGO) && !attr->store), 574 WARN(((attr->attr.mode & S_IWUGO) && !attr->store),
575 "Write permission without 'store'\n"); 575 "Attribute %s: write permission without 'store'\n",
576 attr->attr.name);
576 WARN(((attr->attr.mode & S_IRUGO) && !attr->show), 577 WARN(((attr->attr.mode & S_IRUGO) && !attr->show),
577 "Read permission without 'show'\n"); 578 "Attribute %s: read permission without 'show'\n",
579 attr->attr.name);
578 error = sysfs_create_file(&dev->kobj, &attr->attr); 580 error = sysfs_create_file(&dev->kobj, &attr->attr);
579 } 581 }
580 582
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index 39c32529b833..5da914041305 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -61,24 +61,24 @@ EXPORT_SYMBOL_GPL(dev_pm_get_subsys_data);
61int dev_pm_put_subsys_data(struct device *dev) 61int dev_pm_put_subsys_data(struct device *dev)
62{ 62{
63 struct pm_subsys_data *psd; 63 struct pm_subsys_data *psd;
64 int ret = 0; 64 int ret = 1;
65 65
66 spin_lock_irq(&dev->power.lock); 66 spin_lock_irq(&dev->power.lock);
67 67
68 psd = dev_to_psd(dev); 68 psd = dev_to_psd(dev);
69 if (!psd) { 69 if (!psd)
70 ret = -EINVAL;
71 goto out; 70 goto out;
72 }
73 71
74 if (--psd->refcount == 0) { 72 if (--psd->refcount == 0) {
75 dev->power.subsys_data = NULL; 73 dev->power.subsys_data = NULL;
76 kfree(psd); 74 } else {
77 ret = 1; 75 psd = NULL;
76 ret = 0;
78 } 77 }
79 78
80 out: 79 out:
81 spin_unlock_irq(&dev->power.lock); 80 spin_unlock_irq(&dev->power.lock);
81 kfree(psd);
82 82
83 return ret; 83 return ret;
84} 84}
diff --git a/drivers/bcma/scan.c b/drivers/bcma/scan.c
index bca9c80056fe..8bffa5c9818c 100644
--- a/drivers/bcma/scan.c
+++ b/drivers/bcma/scan.c
@@ -84,6 +84,8 @@ static const struct bcma_device_id_name bcma_bcm_device_names[] = {
84 { BCMA_CORE_I2S, "I2S" }, 84 { BCMA_CORE_I2S, "I2S" },
85 { BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" }, 85 { BCMA_CORE_SDR_DDR1_MEM_CTL, "SDR/DDR1 Memory Controller" },
86 { BCMA_CORE_SHIM, "SHIM" }, 86 { BCMA_CORE_SHIM, "SHIM" },
87 { BCMA_CORE_PCIE2, "PCIe Gen2" },
88 { BCMA_CORE_ARM_CR4, "ARM CR4" },
87 { BCMA_CORE_DEFAULT, "Default" }, 89 { BCMA_CORE_DEFAULT, "Default" },
88}; 90};
89 91
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index f1a29f8e9d33..9bf4371755f2 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
117 117
118 spin_lock(&brd->brd_lock); 118 spin_lock(&brd->brd_lock);
119 idx = sector >> PAGE_SECTORS_SHIFT; 119 idx = sector >> PAGE_SECTORS_SHIFT;
120 page->index = idx;
120 if (radix_tree_insert(&brd->brd_pages, idx, page)) { 121 if (radix_tree_insert(&brd->brd_pages, idx, page)) {
121 __free_page(page); 122 __free_page(page);
122 page = radix_tree_lookup(&brd->brd_pages, idx); 123 page = radix_tree_lookup(&brd->brd_pages, idx);
123 BUG_ON(!page); 124 BUG_ON(!page);
124 BUG_ON(page->index != idx); 125 BUG_ON(page->index != idx);
125 } else 126 }
126 page->index = idx;
127 spin_unlock(&brd->brd_lock); 127 spin_unlock(&brd->brd_lock);
128 128
129 radix_tree_preload_end(); 129 radix_tree_preload_end();
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index ca63104136e0..d6d314027b5d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -55,6 +55,39 @@
55#define SECTOR_SHIFT 9 55#define SECTOR_SHIFT 9
56#define SECTOR_SIZE (1ULL << SECTOR_SHIFT) 56#define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
57 57
58/*
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
63 */
64static int atomic_inc_return_safe(atomic_t *v)
65{
66 unsigned int counter;
67
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
70 return (int)counter;
71
72 atomic_dec(v);
73
74 return -EINVAL;
75}
76
77/* Decrement the counter. Return the resulting value, or -EINVAL */
78static int atomic_dec_return_safe(atomic_t *v)
79{
80 int counter;
81
82 counter = atomic_dec_return(v);
83 if (counter >= 0)
84 return counter;
85
86 atomic_inc(v);
87
88 return -EINVAL;
89}
90
58#define RBD_DRV_NAME "rbd" 91#define RBD_DRV_NAME "rbd"
59#define RBD_DRV_NAME_LONG "rbd (rados block device)" 92#define RBD_DRV_NAME_LONG "rbd (rados block device)"
60 93
@@ -100,21 +133,20 @@
100 * block device image metadata (in-memory version) 133 * block device image metadata (in-memory version)
101 */ 134 */
102struct rbd_image_header { 135struct rbd_image_header {
103 /* These four fields never change for a given rbd image */ 136 /* These six fields never change for a given rbd image */
104 char *object_prefix; 137 char *object_prefix;
105 u64 features;
106 __u8 obj_order; 138 __u8 obj_order;
107 __u8 crypt_type; 139 __u8 crypt_type;
108 __u8 comp_type; 140 __u8 comp_type;
141 u64 stripe_unit;
142 u64 stripe_count;
143 u64 features; /* Might be changeable someday? */
109 144
110 /* The remaining fields need to be updated occasionally */ 145 /* The remaining fields need to be updated occasionally */
111 u64 image_size; 146 u64 image_size;
112 struct ceph_snap_context *snapc; 147 struct ceph_snap_context *snapc;
113 char *snap_names; 148 char *snap_names; /* format 1 only */
114 u64 *snap_sizes; 149 u64 *snap_sizes; /* format 1 only */
115
116 u64 stripe_unit;
117 u64 stripe_count;
118}; 150};
119 151
120/* 152/*
@@ -225,6 +257,7 @@ struct rbd_obj_request {
225 }; 257 };
226 }; 258 };
227 struct page **copyup_pages; 259 struct page **copyup_pages;
260 u32 copyup_page_count;
228 261
229 struct ceph_osd_request *osd_req; 262 struct ceph_osd_request *osd_req;
230 263
@@ -257,6 +290,7 @@ struct rbd_img_request {
257 struct rbd_obj_request *obj_request; /* obj req initiator */ 290 struct rbd_obj_request *obj_request; /* obj req initiator */
258 }; 291 };
259 struct page **copyup_pages; 292 struct page **copyup_pages;
293 u32 copyup_page_count;
260 spinlock_t completion_lock;/* protects next_completion */ 294 spinlock_t completion_lock;/* protects next_completion */
261 u32 next_completion; 295 u32 next_completion;
262 rbd_img_callback_t callback; 296 rbd_img_callback_t callback;
@@ -311,6 +345,7 @@ struct rbd_device {
311 345
312 struct rbd_spec *parent_spec; 346 struct rbd_spec *parent_spec;
313 u64 parent_overlap; 347 u64 parent_overlap;
348 atomic_t parent_ref;
314 struct rbd_device *parent; 349 struct rbd_device *parent;
315 350
316 /* protects updating the header */ 351 /* protects updating the header */
@@ -359,7 +394,8 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf,
359 size_t count); 394 size_t count);
360static ssize_t rbd_remove(struct bus_type *bus, const char *buf, 395static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
361 size_t count); 396 size_t count);
362static int rbd_dev_image_probe(struct rbd_device *rbd_dev); 397static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398static void rbd_spec_put(struct rbd_spec *spec);
363 399
364static struct bus_attribute rbd_bus_attrs[] = { 400static struct bus_attribute rbd_bus_attrs[] = {
365 __ATTR(add, S_IWUSR, NULL, rbd_add), 401 __ATTR(add, S_IWUSR, NULL, rbd_add),
@@ -426,7 +462,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
426static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 462static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
427 463
428static int rbd_dev_refresh(struct rbd_device *rbd_dev); 464static int rbd_dev_refresh(struct rbd_device *rbd_dev);
429static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); 465static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
430static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, 467static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
431 u64 snap_id); 468 u64 snap_id);
432static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, 469static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
@@ -726,88 +763,123 @@ static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
726} 763}
727 764
728/* 765/*
729 * Create a new header structure, translate header format from the on-disk 766 * Fill an rbd image header with information from the given format 1
730 * header. 767 * on-disk header.
731 */ 768 */
732static int rbd_header_from_disk(struct rbd_image_header *header, 769static int rbd_header_from_disk(struct rbd_device *rbd_dev,
733 struct rbd_image_header_ondisk *ondisk) 770 struct rbd_image_header_ondisk *ondisk)
734{ 771{
772 struct rbd_image_header *header = &rbd_dev->header;
773 bool first_time = header->object_prefix == NULL;
774 struct ceph_snap_context *snapc;
775 char *object_prefix = NULL;
776 char *snap_names = NULL;
777 u64 *snap_sizes = NULL;
735 u32 snap_count; 778 u32 snap_count;
736 size_t len;
737 size_t size; 779 size_t size;
780 int ret = -ENOMEM;
738 u32 i; 781 u32 i;
739 782
740 memset(header, 0, sizeof (*header)); 783 /* Allocate this now to avoid having to handle failure below */
741 784
742 snap_count = le32_to_cpu(ondisk->snap_count); 785 if (first_time) {
786 size_t len;
743 787
744 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix)); 788 len = strnlen(ondisk->object_prefix,
745 header->object_prefix = kmalloc(len + 1, GFP_KERNEL); 789 sizeof (ondisk->object_prefix));
746 if (!header->object_prefix) 790 object_prefix = kmalloc(len + 1, GFP_KERNEL);
747 return -ENOMEM; 791 if (!object_prefix)
748 memcpy(header->object_prefix, ondisk->object_prefix, len); 792 return -ENOMEM;
749 header->object_prefix[len] = '\0'; 793 memcpy(object_prefix, ondisk->object_prefix, len);
794 object_prefix[len] = '\0';
795 }
750 796
797 /* Allocate the snapshot context and fill it in */
798
799 snap_count = le32_to_cpu(ondisk->snap_count);
800 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
801 if (!snapc)
802 goto out_err;
803 snapc->seq = le64_to_cpu(ondisk->snap_seq);
751 if (snap_count) { 804 if (snap_count) {
805 struct rbd_image_snap_ondisk *snaps;
752 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len); 806 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
753 807
754 /* Save a copy of the snapshot names */ 808 /* We'll keep a copy of the snapshot names... */
755 809
756 if (snap_names_len > (u64) SIZE_MAX) 810 if (snap_names_len > (u64)SIZE_MAX)
757 return -EIO; 811 goto out_2big;
758 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL); 812 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
759 if (!header->snap_names) 813 if (!snap_names)
760 goto out_err; 814 goto out_err;
815
816 /* ...as well as the array of their sizes. */
817
818 size = snap_count * sizeof (*header->snap_sizes);
819 snap_sizes = kmalloc(size, GFP_KERNEL);
820 if (!snap_sizes)
821 goto out_err;
822
761 /* 823 /*
762 * Note that rbd_dev_v1_header_read() guarantees 824 * Copy the names, and fill in each snapshot's id
763 * the ondisk buffer we're working with has 825 * and size.
826 *
827 * Note that rbd_dev_v1_header_info() guarantees the
828 * ondisk buffer we're working with has
764 * snap_names_len bytes beyond the end of the 829 * snap_names_len bytes beyond the end of the
765 * snapshot id array, this memcpy() is safe. 830 * snapshot id array, this memcpy() is safe.
766 */ 831 */
767 memcpy(header->snap_names, &ondisk->snaps[snap_count], 832 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
768 snap_names_len); 833 snaps = ondisk->snaps;
834 for (i = 0; i < snap_count; i++) {
835 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
836 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
837 }
838 }
769 839
770 /* Record each snapshot's size */ 840 /* We won't fail any more, fill in the header */
771 841
772 size = snap_count * sizeof (*header->snap_sizes); 842 down_write(&rbd_dev->header_rwsem);
773 header->snap_sizes = kmalloc(size, GFP_KERNEL); 843 if (first_time) {
774 if (!header->snap_sizes) 844 header->object_prefix = object_prefix;
775 goto out_err; 845 header->obj_order = ondisk->options.order;
776 for (i = 0; i < snap_count; i++) 846 header->crypt_type = ondisk->options.crypt_type;
777 header->snap_sizes[i] = 847 header->comp_type = ondisk->options.comp_type;
778 le64_to_cpu(ondisk->snaps[i].image_size); 848 /* The rest aren't used for format 1 images */
849 header->stripe_unit = 0;
850 header->stripe_count = 0;
851 header->features = 0;
779 } else { 852 } else {
780 header->snap_names = NULL; 853 ceph_put_snap_context(header->snapc);
781 header->snap_sizes = NULL; 854 kfree(header->snap_names);
855 kfree(header->snap_sizes);
782 } 856 }
783 857
784 header->features = 0; /* No features support in v1 images */ 858 /* The remaining fields always get updated (when we refresh) */
785 header->obj_order = ondisk->options.order;
786 header->crypt_type = ondisk->options.crypt_type;
787 header->comp_type = ondisk->options.comp_type;
788
789 /* Allocate and fill in the snapshot context */
790 859
791 header->image_size = le64_to_cpu(ondisk->image_size); 860 header->image_size = le64_to_cpu(ondisk->image_size);
861 header->snapc = snapc;
862 header->snap_names = snap_names;
863 header->snap_sizes = snap_sizes;
792 864
793 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); 865 /* Make sure mapping size is consistent with header info */
794 if (!header->snapc)
795 goto out_err;
796 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
797 for (i = 0; i < snap_count; i++)
798 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
799 866
800 return 0; 867 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
868 if (rbd_dev->mapping.size != header->image_size)
869 rbd_dev->mapping.size = header->image_size;
870
871 up_write(&rbd_dev->header_rwsem);
801 872
873 return 0;
874out_2big:
875 ret = -EIO;
802out_err: 876out_err:
803 kfree(header->snap_sizes); 877 kfree(snap_sizes);
804 header->snap_sizes = NULL; 878 kfree(snap_names);
805 kfree(header->snap_names); 879 ceph_put_snap_context(snapc);
806 header->snap_names = NULL; 880 kfree(object_prefix);
807 kfree(header->object_prefix);
808 header->object_prefix = NULL;
809 881
810 return -ENOMEM; 882 return ret;
811} 883}
812 884
813static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) 885static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
@@ -934,20 +1006,11 @@ static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
934 1006
935static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) 1007static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
936{ 1008{
937 const char *snap_name = rbd_dev->spec->snap_name; 1009 u64 snap_id = rbd_dev->spec->snap_id;
938 u64 snap_id;
939 u64 size = 0; 1010 u64 size = 0;
940 u64 features = 0; 1011 u64 features = 0;
941 int ret; 1012 int ret;
942 1013
943 if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
944 snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
945 if (snap_id == CEPH_NOSNAP)
946 return -ENOENT;
947 } else {
948 snap_id = CEPH_NOSNAP;
949 }
950
951 ret = rbd_snap_size(rbd_dev, snap_id, &size); 1014 ret = rbd_snap_size(rbd_dev, snap_id, &size);
952 if (ret) 1015 if (ret)
953 return ret; 1016 return ret;
@@ -958,11 +1021,6 @@ static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
958 rbd_dev->mapping.size = size; 1021 rbd_dev->mapping.size = size;
959 rbd_dev->mapping.features = features; 1022 rbd_dev->mapping.features = features;
960 1023
961 /* If we are mapping a snapshot it must be marked read-only */
962
963 if (snap_id != CEPH_NOSNAP)
964 rbd_dev->mapping.read_only = true;
965
966 return 0; 1024 return 0;
967} 1025}
968 1026
@@ -970,14 +1028,6 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
970{ 1028{
971 rbd_dev->mapping.size = 0; 1029 rbd_dev->mapping.size = 0;
972 rbd_dev->mapping.features = 0; 1030 rbd_dev->mapping.features = 0;
973 rbd_dev->mapping.read_only = true;
974}
975
976static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
977{
978 rbd_dev->mapping.size = 0;
979 rbd_dev->mapping.features = 0;
980 rbd_dev->mapping.read_only = true;
981} 1031}
982 1032
983static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) 1033static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
@@ -1342,20 +1392,18 @@ static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1342 kref_put(&obj_request->kref, rbd_obj_request_destroy); 1392 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1343} 1393}
1344 1394
1345static void rbd_img_request_get(struct rbd_img_request *img_request) 1395static bool img_request_child_test(struct rbd_img_request *img_request);
1346{ 1396static void rbd_parent_request_destroy(struct kref *kref);
1347 dout("%s: img %p (was %d)\n", __func__, img_request,
1348 atomic_read(&img_request->kref.refcount));
1349 kref_get(&img_request->kref);
1350}
1351
1352static void rbd_img_request_destroy(struct kref *kref); 1397static void rbd_img_request_destroy(struct kref *kref);
1353static void rbd_img_request_put(struct rbd_img_request *img_request) 1398static void rbd_img_request_put(struct rbd_img_request *img_request)
1354{ 1399{
1355 rbd_assert(img_request != NULL); 1400 rbd_assert(img_request != NULL);
1356 dout("%s: img %p (was %d)\n", __func__, img_request, 1401 dout("%s: img %p (was %d)\n", __func__, img_request,
1357 atomic_read(&img_request->kref.refcount)); 1402 atomic_read(&img_request->kref.refcount));
1358 kref_put(&img_request->kref, rbd_img_request_destroy); 1403 if (img_request_child_test(img_request))
1404 kref_put(&img_request->kref, rbd_parent_request_destroy);
1405 else
1406 kref_put(&img_request->kref, rbd_img_request_destroy);
1359} 1407}
1360 1408
1361static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, 1409static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
@@ -1472,6 +1520,12 @@ static void img_request_child_set(struct rbd_img_request *img_request)
1472 smp_mb(); 1520 smp_mb();
1473} 1521}
1474 1522
1523static void img_request_child_clear(struct rbd_img_request *img_request)
1524{
1525 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1526 smp_mb();
1527}
1528
1475static bool img_request_child_test(struct rbd_img_request *img_request) 1529static bool img_request_child_test(struct rbd_img_request *img_request)
1476{ 1530{
1477 smp_mb(); 1531 smp_mb();
@@ -1484,6 +1538,12 @@ static void img_request_layered_set(struct rbd_img_request *img_request)
1484 smp_mb(); 1538 smp_mb();
1485} 1539}
1486 1540
1541static void img_request_layered_clear(struct rbd_img_request *img_request)
1542{
1543 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1544 smp_mb();
1545}
1546
1487static bool img_request_layered_test(struct rbd_img_request *img_request) 1547static bool img_request_layered_test(struct rbd_img_request *img_request)
1488{ 1548{
1489 smp_mb(); 1549 smp_mb();
@@ -1827,6 +1887,74 @@ static void rbd_obj_request_destroy(struct kref *kref)
1827 kmem_cache_free(rbd_obj_request_cache, obj_request); 1887 kmem_cache_free(rbd_obj_request_cache, obj_request);
1828} 1888}
1829 1889
1890/* It's OK to call this for a device with no parent */
1891
1892static void rbd_spec_put(struct rbd_spec *spec);
1893static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1894{
1895 rbd_dev_remove_parent(rbd_dev);
1896 rbd_spec_put(rbd_dev->parent_spec);
1897 rbd_dev->parent_spec = NULL;
1898 rbd_dev->parent_overlap = 0;
1899}
1900
1901/*
1902 * Parent image reference counting is used to determine when an
1903 * image's parent fields can be safely torn down--after there are no
1904 * more in-flight requests to the parent image. When the last
1905 * reference is dropped, cleaning them up is safe.
1906 */
1907static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1908{
1909 int counter;
1910
1911 if (!rbd_dev->parent_spec)
1912 return;
1913
1914 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1915 if (counter > 0)
1916 return;
1917
1918 /* Last reference; clean up parent data structures */
1919
1920 if (!counter)
1921 rbd_dev_unparent(rbd_dev);
1922 else
1923 rbd_warn(rbd_dev, "parent reference underflow\n");
1924}
1925
1926/*
1927 * If an image has a non-zero parent overlap, get a reference to its
1928 * parent.
1929 *
1930 * We must get the reference before checking for the overlap to
1931 * coordinate properly with zeroing the parent overlap in
1932 * rbd_dev_v2_parent_info() when an image gets flattened. We
1933 * drop it again if there is no overlap.
1934 *
1935 * Returns true if the rbd device has a parent with a non-zero
1936 * overlap and a reference for it was successfully taken, or
1937 * false otherwise.
1938 */
1939static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1940{
1941 int counter;
1942
1943 if (!rbd_dev->parent_spec)
1944 return false;
1945
1946 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1947 if (counter > 0 && rbd_dev->parent_overlap)
1948 return true;
1949
1950 /* Image was flattened, but parent is not yet torn down */
1951
1952 if (counter < 0)
1953 rbd_warn(rbd_dev, "parent reference overflow\n");
1954
1955 return false;
1956}
1957
1830/* 1958/*
1831 * Caller is responsible for filling in the list of object requests 1959 * Caller is responsible for filling in the list of object requests
1832 * that comprises the image request, and the Linux request pointer 1960 * that comprises the image request, and the Linux request pointer
@@ -1835,8 +1963,7 @@ static void rbd_obj_request_destroy(struct kref *kref)
1835static struct rbd_img_request *rbd_img_request_create( 1963static struct rbd_img_request *rbd_img_request_create(
1836 struct rbd_device *rbd_dev, 1964 struct rbd_device *rbd_dev,
1837 u64 offset, u64 length, 1965 u64 offset, u64 length,
1838 bool write_request, 1966 bool write_request)
1839 bool child_request)
1840{ 1967{
1841 struct rbd_img_request *img_request; 1968 struct rbd_img_request *img_request;
1842 1969
@@ -1861,9 +1988,7 @@ static struct rbd_img_request *rbd_img_request_create(
1861 } else { 1988 } else {
1862 img_request->snap_id = rbd_dev->spec->snap_id; 1989 img_request->snap_id = rbd_dev->spec->snap_id;
1863 } 1990 }
1864 if (child_request) 1991 if (rbd_dev_parent_get(rbd_dev))
1865 img_request_child_set(img_request);
1866 if (rbd_dev->parent_spec)
1867 img_request_layered_set(img_request); 1992 img_request_layered_set(img_request);
1868 spin_lock_init(&img_request->completion_lock); 1993 spin_lock_init(&img_request->completion_lock);
1869 img_request->next_completion = 0; 1994 img_request->next_completion = 0;
@@ -1873,9 +1998,6 @@ static struct rbd_img_request *rbd_img_request_create(
1873 INIT_LIST_HEAD(&img_request->obj_requests); 1998 INIT_LIST_HEAD(&img_request->obj_requests);
1874 kref_init(&img_request->kref); 1999 kref_init(&img_request->kref);
1875 2000
1876 rbd_img_request_get(img_request); /* Avoid a warning */
1877 rbd_img_request_put(img_request); /* TEMPORARY */
1878
1879 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev, 2001 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1880 write_request ? "write" : "read", offset, length, 2002 write_request ? "write" : "read", offset, length,
1881 img_request); 2003 img_request);
@@ -1897,15 +2019,54 @@ static void rbd_img_request_destroy(struct kref *kref)
1897 rbd_img_obj_request_del(img_request, obj_request); 2019 rbd_img_obj_request_del(img_request, obj_request);
1898 rbd_assert(img_request->obj_request_count == 0); 2020 rbd_assert(img_request->obj_request_count == 0);
1899 2021
2022 if (img_request_layered_test(img_request)) {
2023 img_request_layered_clear(img_request);
2024 rbd_dev_parent_put(img_request->rbd_dev);
2025 }
2026
1900 if (img_request_write_test(img_request)) 2027 if (img_request_write_test(img_request))
1901 ceph_put_snap_context(img_request->snapc); 2028 ceph_put_snap_context(img_request->snapc);
1902 2029
1903 if (img_request_child_test(img_request))
1904 rbd_obj_request_put(img_request->obj_request);
1905
1906 kmem_cache_free(rbd_img_request_cache, img_request); 2030 kmem_cache_free(rbd_img_request_cache, img_request);
1907} 2031}
1908 2032
2033static struct rbd_img_request *rbd_parent_request_create(
2034 struct rbd_obj_request *obj_request,
2035 u64 img_offset, u64 length)
2036{
2037 struct rbd_img_request *parent_request;
2038 struct rbd_device *rbd_dev;
2039
2040 rbd_assert(obj_request->img_request);
2041 rbd_dev = obj_request->img_request->rbd_dev;
2042
2043 parent_request = rbd_img_request_create(rbd_dev->parent,
2044 img_offset, length, false);
2045 if (!parent_request)
2046 return NULL;
2047
2048 img_request_child_set(parent_request);
2049 rbd_obj_request_get(obj_request);
2050 parent_request->obj_request = obj_request;
2051
2052 return parent_request;
2053}
2054
2055static void rbd_parent_request_destroy(struct kref *kref)
2056{
2057 struct rbd_img_request *parent_request;
2058 struct rbd_obj_request *orig_request;
2059
2060 parent_request = container_of(kref, struct rbd_img_request, kref);
2061 orig_request = parent_request->obj_request;
2062
2063 parent_request->obj_request = NULL;
2064 rbd_obj_request_put(orig_request);
2065 img_request_child_clear(parent_request);
2066
2067 rbd_img_request_destroy(kref);
2068}
2069
1909static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) 2070static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1910{ 2071{
1911 struct rbd_img_request *img_request; 2072 struct rbd_img_request *img_request;
@@ -2114,7 +2275,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2114{ 2275{
2115 struct rbd_img_request *img_request; 2276 struct rbd_img_request *img_request;
2116 struct rbd_device *rbd_dev; 2277 struct rbd_device *rbd_dev;
2117 u64 length; 2278 struct page **pages;
2118 u32 page_count; 2279 u32 page_count;
2119 2280
2120 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2281 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
@@ -2124,12 +2285,14 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2124 2285
2125 rbd_dev = img_request->rbd_dev; 2286 rbd_dev = img_request->rbd_dev;
2126 rbd_assert(rbd_dev); 2287 rbd_assert(rbd_dev);
2127 length = (u64)1 << rbd_dev->header.obj_order;
2128 page_count = (u32)calc_pages_for(0, length);
2129 2288
2130 rbd_assert(obj_request->copyup_pages); 2289 pages = obj_request->copyup_pages;
2131 ceph_release_page_vector(obj_request->copyup_pages, page_count); 2290 rbd_assert(pages != NULL);
2132 obj_request->copyup_pages = NULL; 2291 obj_request->copyup_pages = NULL;
2292 page_count = obj_request->copyup_page_count;
2293 rbd_assert(page_count);
2294 obj_request->copyup_page_count = 0;
2295 ceph_release_page_vector(pages, page_count);
2133 2296
2134 /* 2297 /*
2135 * We want the transfer count to reflect the size of the 2298 * We want the transfer count to reflect the size of the
@@ -2153,9 +2316,11 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2153 struct ceph_osd_client *osdc; 2316 struct ceph_osd_client *osdc;
2154 struct rbd_device *rbd_dev; 2317 struct rbd_device *rbd_dev;
2155 struct page **pages; 2318 struct page **pages;
2156 int result; 2319 u32 page_count;
2157 u64 obj_size; 2320 int img_result;
2158 u64 xferred; 2321 u64 parent_length;
2322 u64 offset;
2323 u64 length;
2159 2324
2160 rbd_assert(img_request_child_test(img_request)); 2325 rbd_assert(img_request_child_test(img_request));
2161 2326
@@ -2164,46 +2329,74 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2164 pages = img_request->copyup_pages; 2329 pages = img_request->copyup_pages;
2165 rbd_assert(pages != NULL); 2330 rbd_assert(pages != NULL);
2166 img_request->copyup_pages = NULL; 2331 img_request->copyup_pages = NULL;
2332 page_count = img_request->copyup_page_count;
2333 rbd_assert(page_count);
2334 img_request->copyup_page_count = 0;
2167 2335
2168 orig_request = img_request->obj_request; 2336 orig_request = img_request->obj_request;
2169 rbd_assert(orig_request != NULL); 2337 rbd_assert(orig_request != NULL);
2170 rbd_assert(orig_request->type == OBJ_REQUEST_BIO); 2338 rbd_assert(obj_request_type_valid(orig_request->type));
2171 result = img_request->result; 2339 img_result = img_request->result;
2172 obj_size = img_request->length; 2340 parent_length = img_request->length;
2173 xferred = img_request->xferred; 2341 rbd_assert(parent_length == img_request->xferred);
2342 rbd_img_request_put(img_request);
2174 2343
2175 rbd_dev = img_request->rbd_dev; 2344 rbd_assert(orig_request->img_request);
2345 rbd_dev = orig_request->img_request->rbd_dev;
2176 rbd_assert(rbd_dev); 2346 rbd_assert(rbd_dev);
2177 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2178 2347
2179 rbd_img_request_put(img_request); 2348 /*
2349 * If the overlap has become 0 (most likely because the
2350 * image has been flattened) we need to free the pages
2351 * and re-submit the original write request.
2352 */
2353 if (!rbd_dev->parent_overlap) {
2354 struct ceph_osd_client *osdc;
2180 2355
2181 if (result) 2356 ceph_release_page_vector(pages, page_count);
2182 goto out_err; 2357 osdc = &rbd_dev->rbd_client->client->osdc;
2358 img_result = rbd_obj_request_submit(osdc, orig_request);
2359 if (!img_result)
2360 return;
2361 }
2183 2362
2184 /* Allocate the new copyup osd request for the original request */ 2363 if (img_result)
2364 goto out_err;
2185 2365
2186 result = -ENOMEM; 2366 /*
2187 rbd_assert(!orig_request->osd_req); 2367 * The original osd request is of no use to use any more.
2368 * We need a new one that can hold the two ops in a copyup
2369 * request. Allocate the new copyup osd request for the
2370 * original request, and release the old one.
2371 */
2372 img_result = -ENOMEM;
2188 osd_req = rbd_osd_req_create_copyup(orig_request); 2373 osd_req = rbd_osd_req_create_copyup(orig_request);
2189 if (!osd_req) 2374 if (!osd_req)
2190 goto out_err; 2375 goto out_err;
2376 rbd_osd_req_destroy(orig_request->osd_req);
2191 orig_request->osd_req = osd_req; 2377 orig_request->osd_req = osd_req;
2192 orig_request->copyup_pages = pages; 2378 orig_request->copyup_pages = pages;
2379 orig_request->copyup_page_count = page_count;
2193 2380
2194 /* Initialize the copyup op */ 2381 /* Initialize the copyup op */
2195 2382
2196 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); 2383 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2197 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, 2384 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2198 false, false); 2385 false, false);
2199 2386
2200 /* Then the original write request op */ 2387 /* Then the original write request op */
2201 2388
2389 offset = orig_request->offset;
2390 length = orig_request->length;
2202 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, 2391 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2203 orig_request->offset, 2392 offset, length, 0, 0);
2204 orig_request->length, 0, 0); 2393 if (orig_request->type == OBJ_REQUEST_BIO)
2205 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, 2394 osd_req_op_extent_osd_data_bio(osd_req, 1,
2206 orig_request->length); 2395 orig_request->bio_list, length);
2396 else
2397 osd_req_op_extent_osd_data_pages(osd_req, 1,
2398 orig_request->pages, length,
2399 offset & ~PAGE_MASK, false, false);
2207 2400
2208 rbd_osd_req_format_write(orig_request); 2401 rbd_osd_req_format_write(orig_request);
2209 2402
@@ -2211,13 +2404,13 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2211 2404
2212 orig_request->callback = rbd_img_obj_copyup_callback; 2405 orig_request->callback = rbd_img_obj_copyup_callback;
2213 osdc = &rbd_dev->rbd_client->client->osdc; 2406 osdc = &rbd_dev->rbd_client->client->osdc;
2214 result = rbd_obj_request_submit(osdc, orig_request); 2407 img_result = rbd_obj_request_submit(osdc, orig_request);
2215 if (!result) 2408 if (!img_result)
2216 return; 2409 return;
2217out_err: 2410out_err:
2218 /* Record the error code and complete the request */ 2411 /* Record the error code and complete the request */
2219 2412
2220 orig_request->result = result; 2413 orig_request->result = img_result;
2221 orig_request->xferred = 0; 2414 orig_request->xferred = 0;
2222 obj_request_done_set(orig_request); 2415 obj_request_done_set(orig_request);
2223 rbd_obj_request_complete(orig_request); 2416 rbd_obj_request_complete(orig_request);
@@ -2249,7 +2442,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2249 int result; 2442 int result;
2250 2443
2251 rbd_assert(obj_request_img_data_test(obj_request)); 2444 rbd_assert(obj_request_img_data_test(obj_request));
2252 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2445 rbd_assert(obj_request_type_valid(obj_request->type));
2253 2446
2254 img_request = obj_request->img_request; 2447 img_request = obj_request->img_request;
2255 rbd_assert(img_request != NULL); 2448 rbd_assert(img_request != NULL);
@@ -2257,15 +2450,6 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2257 rbd_assert(rbd_dev->parent != NULL); 2450 rbd_assert(rbd_dev->parent != NULL);
2258 2451
2259 /* 2452 /*
2260 * First things first. The original osd request is of no
2261 * use to use any more, we'll need a new one that can hold
2262 * the two ops in a copyup request. We'll get that later,
2263 * but for now we can release the old one.
2264 */
2265 rbd_osd_req_destroy(obj_request->osd_req);
2266 obj_request->osd_req = NULL;
2267
2268 /*
2269 * Determine the byte range covered by the object in the 2453 * Determine the byte range covered by the object in the
2270 * child image to which the original request was to be sent. 2454 * child image to which the original request was to be sent.
2271 */ 2455 */
@@ -2295,18 +2479,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2295 } 2479 }
2296 2480
2297 result = -ENOMEM; 2481 result = -ENOMEM;
2298 parent_request = rbd_img_request_create(rbd_dev->parent, 2482 parent_request = rbd_parent_request_create(obj_request,
2299 img_offset, length, 2483 img_offset, length);
2300 false, true);
2301 if (!parent_request) 2484 if (!parent_request)
2302 goto out_err; 2485 goto out_err;
2303 rbd_obj_request_get(obj_request);
2304 parent_request->obj_request = obj_request;
2305 2486
2306 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); 2487 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2307 if (result) 2488 if (result)
2308 goto out_err; 2489 goto out_err;
2309 parent_request->copyup_pages = pages; 2490 parent_request->copyup_pages = pages;
2491 parent_request->copyup_page_count = page_count;
2310 2492
2311 parent_request->callback = rbd_img_obj_parent_read_full_callback; 2493 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2312 result = rbd_img_request_submit(parent_request); 2494 result = rbd_img_request_submit(parent_request);
@@ -2314,6 +2496,7 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2314 return 0; 2496 return 0;
2315 2497
2316 parent_request->copyup_pages = NULL; 2498 parent_request->copyup_pages = NULL;
2499 parent_request->copyup_page_count = 0;
2317 parent_request->obj_request = NULL; 2500 parent_request->obj_request = NULL;
2318 rbd_obj_request_put(obj_request); 2501 rbd_obj_request_put(obj_request);
2319out_err: 2502out_err:
@@ -2331,6 +2514,7 @@ out_err:
2331static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) 2514static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2332{ 2515{
2333 struct rbd_obj_request *orig_request; 2516 struct rbd_obj_request *orig_request;
2517 struct rbd_device *rbd_dev;
2334 int result; 2518 int result;
2335 2519
2336 rbd_assert(!obj_request_img_data_test(obj_request)); 2520 rbd_assert(!obj_request_img_data_test(obj_request));
@@ -2353,8 +2537,21 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2353 obj_request->xferred, obj_request->length); 2537 obj_request->xferred, obj_request->length);
2354 rbd_obj_request_put(obj_request); 2538 rbd_obj_request_put(obj_request);
2355 2539
2356 rbd_assert(orig_request); 2540 /*
2357 rbd_assert(orig_request->img_request); 2541 * If the overlap has become 0 (most likely because the
2542 * image has been flattened) we need to free the pages
2543 * and re-submit the original write request.
2544 */
2545 rbd_dev = orig_request->img_request->rbd_dev;
2546 if (!rbd_dev->parent_overlap) {
2547 struct ceph_osd_client *osdc;
2548
2549 rbd_obj_request_put(orig_request);
2550 osdc = &rbd_dev->rbd_client->client->osdc;
2551 result = rbd_obj_request_submit(osdc, orig_request);
2552 if (!result)
2553 return;
2554 }
2358 2555
2359 /* 2556 /*
2360 * Our only purpose here is to determine whether the object 2557 * Our only purpose here is to determine whether the object
@@ -2512,14 +2709,36 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2512 struct rbd_obj_request *obj_request; 2709 struct rbd_obj_request *obj_request;
2513 struct rbd_device *rbd_dev; 2710 struct rbd_device *rbd_dev;
2514 u64 obj_end; 2711 u64 obj_end;
2712 u64 img_xferred;
2713 int img_result;
2515 2714
2516 rbd_assert(img_request_child_test(img_request)); 2715 rbd_assert(img_request_child_test(img_request));
2517 2716
2717 /* First get what we need from the image request and release it */
2718
2518 obj_request = img_request->obj_request; 2719 obj_request = img_request->obj_request;
2720 img_xferred = img_request->xferred;
2721 img_result = img_request->result;
2722 rbd_img_request_put(img_request);
2723
2724 /*
2725 * If the overlap has become 0 (most likely because the
2726 * image has been flattened) we need to re-submit the
2727 * original request.
2728 */
2519 rbd_assert(obj_request); 2729 rbd_assert(obj_request);
2520 rbd_assert(obj_request->img_request); 2730 rbd_assert(obj_request->img_request);
2731 rbd_dev = obj_request->img_request->rbd_dev;
2732 if (!rbd_dev->parent_overlap) {
2733 struct ceph_osd_client *osdc;
2734
2735 osdc = &rbd_dev->rbd_client->client->osdc;
2736 img_result = rbd_obj_request_submit(osdc, obj_request);
2737 if (!img_result)
2738 return;
2739 }
2521 2740
2522 obj_request->result = img_request->result; 2741 obj_request->result = img_result;
2523 if (obj_request->result) 2742 if (obj_request->result)
2524 goto out; 2743 goto out;
2525 2744
@@ -2532,7 +2751,6 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2532 */ 2751 */
2533 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); 2752 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2534 obj_end = obj_request->img_offset + obj_request->length; 2753 obj_end = obj_request->img_offset + obj_request->length;
2535 rbd_dev = obj_request->img_request->rbd_dev;
2536 if (obj_end > rbd_dev->parent_overlap) { 2754 if (obj_end > rbd_dev->parent_overlap) {
2537 u64 xferred = 0; 2755 u64 xferred = 0;
2538 2756
@@ -2540,43 +2758,39 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2540 xferred = rbd_dev->parent_overlap - 2758 xferred = rbd_dev->parent_overlap -
2541 obj_request->img_offset; 2759 obj_request->img_offset;
2542 2760
2543 obj_request->xferred = min(img_request->xferred, xferred); 2761 obj_request->xferred = min(img_xferred, xferred);
2544 } else { 2762 } else {
2545 obj_request->xferred = img_request->xferred; 2763 obj_request->xferred = img_xferred;
2546 } 2764 }
2547out: 2765out:
2548 rbd_img_request_put(img_request);
2549 rbd_img_obj_request_read_callback(obj_request); 2766 rbd_img_obj_request_read_callback(obj_request);
2550 rbd_obj_request_complete(obj_request); 2767 rbd_obj_request_complete(obj_request);
2551} 2768}
2552 2769
2553static void rbd_img_parent_read(struct rbd_obj_request *obj_request) 2770static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2554{ 2771{
2555 struct rbd_device *rbd_dev;
2556 struct rbd_img_request *img_request; 2772 struct rbd_img_request *img_request;
2557 int result; 2773 int result;
2558 2774
2559 rbd_assert(obj_request_img_data_test(obj_request)); 2775 rbd_assert(obj_request_img_data_test(obj_request));
2560 rbd_assert(obj_request->img_request != NULL); 2776 rbd_assert(obj_request->img_request != NULL);
2561 rbd_assert(obj_request->result == (s32) -ENOENT); 2777 rbd_assert(obj_request->result == (s32) -ENOENT);
2562 rbd_assert(obj_request->type == OBJ_REQUEST_BIO); 2778 rbd_assert(obj_request_type_valid(obj_request->type));
2563 2779
2564 rbd_dev = obj_request->img_request->rbd_dev;
2565 rbd_assert(rbd_dev->parent != NULL);
2566 /* rbd_read_finish(obj_request, obj_request->length); */ 2780 /* rbd_read_finish(obj_request, obj_request->length); */
2567 img_request = rbd_img_request_create(rbd_dev->parent, 2781 img_request = rbd_parent_request_create(obj_request,
2568 obj_request->img_offset, 2782 obj_request->img_offset,
2569 obj_request->length, 2783 obj_request->length);
2570 false, true);
2571 result = -ENOMEM; 2784 result = -ENOMEM;
2572 if (!img_request) 2785 if (!img_request)
2573 goto out_err; 2786 goto out_err;
2574 2787
2575 rbd_obj_request_get(obj_request); 2788 if (obj_request->type == OBJ_REQUEST_BIO)
2576 img_request->obj_request = obj_request; 2789 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2577 2790 obj_request->bio_list);
2578 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, 2791 else
2579 obj_request->bio_list); 2792 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2793 obj_request->pages);
2580 if (result) 2794 if (result)
2581 goto out_err; 2795 goto out_err;
2582 2796
@@ -2626,6 +2840,7 @@ out:
2626static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) 2840static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2627{ 2841{
2628 struct rbd_device *rbd_dev = (struct rbd_device *)data; 2842 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2843 int ret;
2629 2844
2630 if (!rbd_dev) 2845 if (!rbd_dev)
2631 return; 2846 return;
@@ -2633,7 +2848,9 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2633 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, 2848 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2634 rbd_dev->header_name, (unsigned long long)notify_id, 2849 rbd_dev->header_name, (unsigned long long)notify_id,
2635 (unsigned int)opcode); 2850 (unsigned int)opcode);
2636 (void)rbd_dev_refresh(rbd_dev); 2851 ret = rbd_dev_refresh(rbd_dev);
2852 if (ret)
2853 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2637 2854
2638 rbd_obj_notify_ack(rbd_dev, notify_id); 2855 rbd_obj_notify_ack(rbd_dev, notify_id);
2639} 2856}
@@ -2642,7 +2859,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2642 * Request sync osd watch/unwatch. The value of "start" determines 2859 * Request sync osd watch/unwatch. The value of "start" determines
2643 * whether a watch request is being initiated or torn down. 2860 * whether a watch request is being initiated or torn down.
2644 */ 2861 */
2645static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) 2862static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2646{ 2863{
2647 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; 2864 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2648 struct rbd_obj_request *obj_request; 2865 struct rbd_obj_request *obj_request;
@@ -2676,7 +2893,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2676 rbd_dev->watch_request->osd_req); 2893 rbd_dev->watch_request->osd_req);
2677 2894
2678 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, 2895 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2679 rbd_dev->watch_event->cookie, 0, start); 2896 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2680 rbd_osd_req_format_write(obj_request); 2897 rbd_osd_req_format_write(obj_request);
2681 2898
2682 ret = rbd_obj_request_submit(osdc, obj_request); 2899 ret = rbd_obj_request_submit(osdc, obj_request);
@@ -2869,9 +3086,16 @@ static void rbd_request_fn(struct request_queue *q)
2869 goto end_request; /* Shouldn't happen */ 3086 goto end_request; /* Shouldn't happen */
2870 } 3087 }
2871 3088
3089 result = -EIO;
3090 if (offset + length > rbd_dev->mapping.size) {
3091 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3092 offset, length, rbd_dev->mapping.size);
3093 goto end_request;
3094 }
3095
2872 result = -ENOMEM; 3096 result = -ENOMEM;
2873 img_request = rbd_img_request_create(rbd_dev, offset, length, 3097 img_request = rbd_img_request_create(rbd_dev, offset, length,
2874 write_request, false); 3098 write_request);
2875 if (!img_request) 3099 if (!img_request)
2876 goto end_request; 3100 goto end_request;
2877 3101
@@ -3022,17 +3246,11 @@ out:
3022} 3246}
3023 3247
3024/* 3248/*
3025 * Read the complete header for the given rbd device. 3249 * Read the complete header for the given rbd device. On successful
3026 * 3250 * return, the rbd_dev->header field will contain up-to-date
3027 * Returns a pointer to a dynamically-allocated buffer containing 3251 * information about the image.
3028 * the complete and validated header. Caller can pass the address
3029 * of a variable that will be filled in with the version of the
3030 * header object at the time it was read.
3031 *
3032 * Returns a pointer-coded errno if a failure occurs.
3033 */ 3252 */
3034static struct rbd_image_header_ondisk * 3253static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3035rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3036{ 3254{
3037 struct rbd_image_header_ondisk *ondisk = NULL; 3255 struct rbd_image_header_ondisk *ondisk = NULL;
3038 u32 snap_count = 0; 3256 u32 snap_count = 0;
@@ -3057,22 +3275,22 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3057 size += names_size; 3275 size += names_size;
3058 ondisk = kmalloc(size, GFP_KERNEL); 3276 ondisk = kmalloc(size, GFP_KERNEL);
3059 if (!ondisk) 3277 if (!ondisk)
3060 return ERR_PTR(-ENOMEM); 3278 return -ENOMEM;
3061 3279
3062 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, 3280 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3063 0, size, ondisk); 3281 0, size, ondisk);
3064 if (ret < 0) 3282 if (ret < 0)
3065 goto out_err; 3283 goto out;
3066 if ((size_t)ret < size) { 3284 if ((size_t)ret < size) {
3067 ret = -ENXIO; 3285 ret = -ENXIO;
3068 rbd_warn(rbd_dev, "short header read (want %zd got %d)", 3286 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3069 size, ret); 3287 size, ret);
3070 goto out_err; 3288 goto out;
3071 } 3289 }
3072 if (!rbd_dev_ondisk_valid(ondisk)) { 3290 if (!rbd_dev_ondisk_valid(ondisk)) {
3073 ret = -ENXIO; 3291 ret = -ENXIO;
3074 rbd_warn(rbd_dev, "invalid header"); 3292 rbd_warn(rbd_dev, "invalid header");
3075 goto out_err; 3293 goto out;
3076 } 3294 }
3077 3295
3078 names_size = le64_to_cpu(ondisk->snap_names_len); 3296 names_size = le64_to_cpu(ondisk->snap_names_len);
@@ -3080,85 +3298,13 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3080 snap_count = le32_to_cpu(ondisk->snap_count); 3298 snap_count = le32_to_cpu(ondisk->snap_count);
3081 } while (snap_count != want_count); 3299 } while (snap_count != want_count);
3082 3300
3083 return ondisk; 3301 ret = rbd_header_from_disk(rbd_dev, ondisk);
3084 3302out:
3085out_err:
3086 kfree(ondisk);
3087
3088 return ERR_PTR(ret);
3089}
3090
3091/*
3092 * reload the ondisk the header
3093 */
3094static int rbd_read_header(struct rbd_device *rbd_dev,
3095 struct rbd_image_header *header)
3096{
3097 struct rbd_image_header_ondisk *ondisk;
3098 int ret;
3099
3100 ondisk = rbd_dev_v1_header_read(rbd_dev);
3101 if (IS_ERR(ondisk))
3102 return PTR_ERR(ondisk);
3103 ret = rbd_header_from_disk(header, ondisk);
3104 kfree(ondisk); 3303 kfree(ondisk);
3105 3304
3106 return ret; 3305 return ret;
3107} 3306}
3108 3307
3109static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3110{
3111 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3112 return;
3113
3114 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
3115 sector_t size;
3116
3117 rbd_dev->mapping.size = rbd_dev->header.image_size;
3118 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3119 dout("setting size to %llu sectors", (unsigned long long)size);
3120 set_capacity(rbd_dev->disk, size);
3121 }
3122}
3123
3124/*
3125 * only read the first part of the ondisk header, without the snaps info
3126 */
3127static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3128{
3129 int ret;
3130 struct rbd_image_header h;
3131
3132 ret = rbd_read_header(rbd_dev, &h);
3133 if (ret < 0)
3134 return ret;
3135
3136 down_write(&rbd_dev->header_rwsem);
3137
3138 /* Update image size, and check for resize of mapped image */
3139 rbd_dev->header.image_size = h.image_size;
3140 rbd_update_mapping_size(rbd_dev);
3141
3142 /* rbd_dev->header.object_prefix shouldn't change */
3143 kfree(rbd_dev->header.snap_sizes);
3144 kfree(rbd_dev->header.snap_names);
3145 /* osd requests may still refer to snapc */
3146 ceph_put_snap_context(rbd_dev->header.snapc);
3147
3148 rbd_dev->header.image_size = h.image_size;
3149 rbd_dev->header.snapc = h.snapc;
3150 rbd_dev->header.snap_names = h.snap_names;
3151 rbd_dev->header.snap_sizes = h.snap_sizes;
3152 /* Free the extra copy of the object prefix */
3153 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3154 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
3155 kfree(h.object_prefix);
3156
3157 up_write(&rbd_dev->header_rwsem);
3158
3159 return ret;
3160}
3161
3162/* 3308/*
3163 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to 3309 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3164 * has disappeared from the (just updated) snapshot context. 3310 * has disappeared from the (just updated) snapshot context.
@@ -3180,26 +3326,29 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
3180 3326
3181static int rbd_dev_refresh(struct rbd_device *rbd_dev) 3327static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3182{ 3328{
3183 u64 image_size; 3329 u64 mapping_size;
3184 int ret; 3330 int ret;
3185 3331
3186 rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); 3332 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3187 image_size = rbd_dev->header.image_size; 3333 mapping_size = rbd_dev->mapping.size;
3188 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); 3334 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3189 if (rbd_dev->image_format == 1) 3335 if (rbd_dev->image_format == 1)
3190 ret = rbd_dev_v1_refresh(rbd_dev); 3336 ret = rbd_dev_v1_header_info(rbd_dev);
3191 else 3337 else
3192 ret = rbd_dev_v2_refresh(rbd_dev); 3338 ret = rbd_dev_v2_header_info(rbd_dev);
3193 3339
3194 /* If it's a mapped snapshot, validate its EXISTS flag */ 3340 /* If it's a mapped snapshot, validate its EXISTS flag */
3195 3341
3196 rbd_exists_validate(rbd_dev); 3342 rbd_exists_validate(rbd_dev);
3197 mutex_unlock(&ctl_mutex); 3343 mutex_unlock(&ctl_mutex);
3198 if (ret) 3344 if (mapping_size != rbd_dev->mapping.size) {
3199 rbd_warn(rbd_dev, "got notification but failed to " 3345 sector_t size;
3200 " update snaps: %d\n", ret); 3346
3201 if (image_size != rbd_dev->header.image_size) 3347 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3348 dout("setting size to %llu sectors", (unsigned long long)size);
3349 set_capacity(rbd_dev->disk, size);
3202 revalidate_disk(rbd_dev->disk); 3350 revalidate_disk(rbd_dev->disk);
3351 }
3203 3352
3204 return ret; 3353 return ret;
3205} 3354}
@@ -3403,6 +3552,8 @@ static ssize_t rbd_image_refresh(struct device *dev,
3403 int ret; 3552 int ret;
3404 3553
3405 ret = rbd_dev_refresh(rbd_dev); 3554 ret = rbd_dev_refresh(rbd_dev);
3555 if (ret)
3556 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3406 3557
3407 return ret < 0 ? ret : size; 3558 return ret < 0 ? ret : size;
3408} 3559}
@@ -3501,6 +3652,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3501 3652
3502 spin_lock_init(&rbd_dev->lock); 3653 spin_lock_init(&rbd_dev->lock);
3503 rbd_dev->flags = 0; 3654 rbd_dev->flags = 0;
3655 atomic_set(&rbd_dev->parent_ref, 0);
3504 INIT_LIST_HEAD(&rbd_dev->node); 3656 INIT_LIST_HEAD(&rbd_dev->node);
3505 init_rwsem(&rbd_dev->header_rwsem); 3657 init_rwsem(&rbd_dev->header_rwsem);
3506 3658
@@ -3650,6 +3802,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3650 __le64 snapid; 3802 __le64 snapid;
3651 void *p; 3803 void *p;
3652 void *end; 3804 void *end;
3805 u64 pool_id;
3653 char *image_id; 3806 char *image_id;
3654 u64 overlap; 3807 u64 overlap;
3655 int ret; 3808 int ret;
@@ -3680,18 +3833,37 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3680 p = reply_buf; 3833 p = reply_buf;
3681 end = reply_buf + ret; 3834 end = reply_buf + ret;
3682 ret = -ERANGE; 3835 ret = -ERANGE;
3683 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); 3836 ceph_decode_64_safe(&p, end, pool_id, out_err);
3684 if (parent_spec->pool_id == CEPH_NOPOOL) 3837 if (pool_id == CEPH_NOPOOL) {
3838 /*
3839 * Either the parent never existed, or we have
3840 * record of it but the image got flattened so it no
3841 * longer has a parent. When the parent of a
3842 * layered image disappears we immediately set the
3843 * overlap to 0. The effect of this is that all new
3844 * requests will be treated as if the image had no
3845 * parent.
3846 */
3847 if (rbd_dev->parent_overlap) {
3848 rbd_dev->parent_overlap = 0;
3849 smp_mb();
3850 rbd_dev_parent_put(rbd_dev);
3851 pr_info("%s: clone image has been flattened\n",
3852 rbd_dev->disk->disk_name);
3853 }
3854
3685 goto out; /* No parent? No problem. */ 3855 goto out; /* No parent? No problem. */
3856 }
3686 3857
3687 /* The ceph file layout needs to fit pool id in 32 bits */ 3858 /* The ceph file layout needs to fit pool id in 32 bits */
3688 3859
3689 ret = -EIO; 3860 ret = -EIO;
3690 if (parent_spec->pool_id > (u64)U32_MAX) { 3861 if (pool_id > (u64)U32_MAX) {
3691 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", 3862 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3692 (unsigned long long)parent_spec->pool_id, U32_MAX); 3863 (unsigned long long)pool_id, U32_MAX);
3693 goto out_err; 3864 goto out_err;
3694 } 3865 }
3866 parent_spec->pool_id = pool_id;
3695 3867
3696 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); 3868 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3697 if (IS_ERR(image_id)) { 3869 if (IS_ERR(image_id)) {
@@ -3702,9 +3874,14 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3702 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err); 3874 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3703 ceph_decode_64_safe(&p, end, overlap, out_err); 3875 ceph_decode_64_safe(&p, end, overlap, out_err);
3704 3876
3705 rbd_dev->parent_overlap = overlap; 3877 if (overlap) {
3706 rbd_dev->parent_spec = parent_spec; 3878 rbd_spec_put(rbd_dev->parent_spec);
3707 parent_spec = NULL; /* rbd_dev now owns this */ 3879 rbd_dev->parent_spec = parent_spec;
3880 parent_spec = NULL; /* rbd_dev now owns this */
3881 rbd_dev->parent_overlap = overlap;
3882 } else {
3883 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3884 }
3708out: 3885out:
3709 ret = 0; 3886 ret = 0;
3710out_err: 3887out_err:
@@ -4002,6 +4179,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4002 for (i = 0; i < snap_count; i++) 4179 for (i = 0; i < snap_count; i++)
4003 snapc->snaps[i] = ceph_decode_64(&p); 4180 snapc->snaps[i] = ceph_decode_64(&p);
4004 4181
4182 ceph_put_snap_context(rbd_dev->header.snapc);
4005 rbd_dev->header.snapc = snapc; 4183 rbd_dev->header.snapc = snapc;
4006 4184
4007 dout(" snap context seq = %llu, snap_count = %u\n", 4185 dout(" snap context seq = %llu, snap_count = %u\n",
@@ -4053,21 +4231,56 @@ out:
4053 return snap_name; 4231 return snap_name;
4054} 4232}
4055 4233
4056static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) 4234static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4057{ 4235{
4236 bool first_time = rbd_dev->header.object_prefix == NULL;
4058 int ret; 4237 int ret;
4059 4238
4060 down_write(&rbd_dev->header_rwsem); 4239 down_write(&rbd_dev->header_rwsem);
4061 4240
4241 if (first_time) {
4242 ret = rbd_dev_v2_header_onetime(rbd_dev);
4243 if (ret)
4244 goto out;
4245 }
4246
4247 /*
4248 * If the image supports layering, get the parent info. We
4249 * need to probe the first time regardless. Thereafter we
4250 * only need to if there's a parent, to see if it has
4251 * disappeared due to the mapped image getting flattened.
4252 */
4253 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4254 (first_time || rbd_dev->parent_spec)) {
4255 bool warn;
4256
4257 ret = rbd_dev_v2_parent_info(rbd_dev);
4258 if (ret)
4259 goto out;
4260
4261 /*
4262 * Print a warning if this is the initial probe and
4263 * the image has a parent. Don't print it if the
4264 * image now being probed is itself a parent. We
4265 * can tell at this point because we won't know its
4266 * pool name yet (just its pool id).
4267 */
4268 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4269 if (first_time && warn)
4270 rbd_warn(rbd_dev, "WARNING: kernel layering "
4271 "is EXPERIMENTAL!");
4272 }
4273
4062 ret = rbd_dev_v2_image_size(rbd_dev); 4274 ret = rbd_dev_v2_image_size(rbd_dev);
4063 if (ret) 4275 if (ret)
4064 goto out; 4276 goto out;
4065 rbd_update_mapping_size(rbd_dev); 4277
4278 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4279 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4280 rbd_dev->mapping.size = rbd_dev->header.image_size;
4066 4281
4067 ret = rbd_dev_v2_snap_context(rbd_dev); 4282 ret = rbd_dev_v2_snap_context(rbd_dev);
4068 dout("rbd_dev_v2_snap_context returned %d\n", ret); 4283 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4069 if (ret)
4070 goto out;
4071out: 4284out:
4072 up_write(&rbd_dev->header_rwsem); 4285 up_write(&rbd_dev->header_rwsem);
4073 4286
@@ -4490,10 +4703,10 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4490{ 4703{
4491 struct rbd_image_header *header; 4704 struct rbd_image_header *header;
4492 4705
4493 rbd_dev_remove_parent(rbd_dev); 4706 /* Drop parent reference unless it's already been done (or none) */
4494 rbd_spec_put(rbd_dev->parent_spec); 4707
4495 rbd_dev->parent_spec = NULL; 4708 if (rbd_dev->parent_overlap)
4496 rbd_dev->parent_overlap = 0; 4709 rbd_dev_parent_put(rbd_dev);
4497 4710
4498 /* Free dynamic fields from the header, then zero it out */ 4711 /* Free dynamic fields from the header, then zero it out */
4499 4712
@@ -4505,72 +4718,22 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4505 memset(header, 0, sizeof (*header)); 4718 memset(header, 0, sizeof (*header));
4506} 4719}
4507 4720
4508static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) 4721static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4509{ 4722{
4510 int ret; 4723 int ret;
4511 4724
4512 /* Populate rbd image metadata */
4513
4514 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4515 if (ret < 0)
4516 goto out_err;
4517
4518 /* Version 1 images have no parent (no layering) */
4519
4520 rbd_dev->parent_spec = NULL;
4521 rbd_dev->parent_overlap = 0;
4522
4523 dout("discovered version 1 image, header name is %s\n",
4524 rbd_dev->header_name);
4525
4526 return 0;
4527
4528out_err:
4529 kfree(rbd_dev->header_name);
4530 rbd_dev->header_name = NULL;
4531 kfree(rbd_dev->spec->image_id);
4532 rbd_dev->spec->image_id = NULL;
4533
4534 return ret;
4535}
4536
4537static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4538{
4539 int ret;
4540
4541 ret = rbd_dev_v2_image_size(rbd_dev);
4542 if (ret)
4543 goto out_err;
4544
4545 /* Get the object prefix (a.k.a. block_name) for the image */
4546
4547 ret = rbd_dev_v2_object_prefix(rbd_dev); 4725 ret = rbd_dev_v2_object_prefix(rbd_dev);
4548 if (ret) 4726 if (ret)
4549 goto out_err; 4727 goto out_err;
4550 4728
4551 /* Get the and check features for the image */ 4729 /*
4552 4730 * Get the and check features for the image. Currently the
4731 * features are assumed to never change.
4732 */
4553 ret = rbd_dev_v2_features(rbd_dev); 4733 ret = rbd_dev_v2_features(rbd_dev);
4554 if (ret) 4734 if (ret)
4555 goto out_err; 4735 goto out_err;
4556 4736
4557 /* If the image supports layering, get the parent info */
4558
4559 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4560 ret = rbd_dev_v2_parent_info(rbd_dev);
4561 if (ret)
4562 goto out_err;
4563
4564 /*
4565 * Don't print a warning for parent images. We can
4566 * tell this point because we won't know its pool
4567 * name yet (just its pool id).
4568 */
4569 if (rbd_dev->spec->pool_name)
4570 rbd_warn(rbd_dev, "WARNING: kernel layering "
4571 "is EXPERIMENTAL!");
4572 }
4573
4574 /* If the image supports fancy striping, get its parameters */ 4737 /* If the image supports fancy striping, get its parameters */
4575 4738
4576 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { 4739 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
@@ -4578,28 +4741,11 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4578 if (ret < 0) 4741 if (ret < 0)
4579 goto out_err; 4742 goto out_err;
4580 } 4743 }
4581 4744 /* No support for crypto and compression type format 2 images */
4582 /* crypto and compression type aren't (yet) supported for v2 images */
4583
4584 rbd_dev->header.crypt_type = 0;
4585 rbd_dev->header.comp_type = 0;
4586
4587 /* Get the snapshot context, plus the header version */
4588
4589 ret = rbd_dev_v2_snap_context(rbd_dev);
4590 if (ret)
4591 goto out_err;
4592
4593 dout("discovered version 2 image, header name is %s\n",
4594 rbd_dev->header_name);
4595 4745
4596 return 0; 4746 return 0;
4597out_err: 4747out_err:
4598 rbd_dev->parent_overlap = 0; 4748 rbd_dev->header.features = 0;
4599 rbd_spec_put(rbd_dev->parent_spec);
4600 rbd_dev->parent_spec = NULL;
4601 kfree(rbd_dev->header_name);
4602 rbd_dev->header_name = NULL;
4603 kfree(rbd_dev->header.object_prefix); 4749 kfree(rbd_dev->header.object_prefix);
4604 rbd_dev->header.object_prefix = NULL; 4750 rbd_dev->header.object_prefix = NULL;
4605 4751
@@ -4628,15 +4774,16 @@ static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4628 if (!parent) 4774 if (!parent)
4629 goto out_err; 4775 goto out_err;
4630 4776
4631 ret = rbd_dev_image_probe(parent); 4777 ret = rbd_dev_image_probe(parent, false);
4632 if (ret < 0) 4778 if (ret < 0)
4633 goto out_err; 4779 goto out_err;
4634 rbd_dev->parent = parent; 4780 rbd_dev->parent = parent;
4781 atomic_set(&rbd_dev->parent_ref, 1);
4635 4782
4636 return 0; 4783 return 0;
4637out_err: 4784out_err:
4638 if (parent) { 4785 if (parent) {
4639 rbd_spec_put(rbd_dev->parent_spec); 4786 rbd_dev_unparent(rbd_dev);
4640 kfree(rbd_dev->header_name); 4787 kfree(rbd_dev->header_name);
4641 rbd_dev_destroy(parent); 4788 rbd_dev_destroy(parent);
4642 } else { 4789 } else {
@@ -4651,10 +4798,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4651{ 4798{
4652 int ret; 4799 int ret;
4653 4800
4654 ret = rbd_dev_mapping_set(rbd_dev);
4655 if (ret)
4656 return ret;
4657
4658 /* generate unique id: find highest unique id, add one */ 4801 /* generate unique id: find highest unique id, add one */
4659 rbd_dev_id_get(rbd_dev); 4802 rbd_dev_id_get(rbd_dev);
4660 4803
@@ -4676,13 +4819,17 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4676 if (ret) 4819 if (ret)
4677 goto err_out_blkdev; 4820 goto err_out_blkdev;
4678 4821
4679 ret = rbd_bus_add_dev(rbd_dev); 4822 ret = rbd_dev_mapping_set(rbd_dev);
4680 if (ret) 4823 if (ret)
4681 goto err_out_disk; 4824 goto err_out_disk;
4825 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4826
4827 ret = rbd_bus_add_dev(rbd_dev);
4828 if (ret)
4829 goto err_out_mapping;
4682 4830
4683 /* Everything's ready. Announce the disk to the world. */ 4831 /* Everything's ready. Announce the disk to the world. */
4684 4832
4685 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4686 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 4833 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4687 add_disk(rbd_dev->disk); 4834 add_disk(rbd_dev->disk);
4688 4835
@@ -4691,6 +4838,8 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4691 4838
4692 return ret; 4839 return ret;
4693 4840
4841err_out_mapping:
4842 rbd_dev_mapping_clear(rbd_dev);
4694err_out_disk: 4843err_out_disk:
4695 rbd_free_disk(rbd_dev); 4844 rbd_free_disk(rbd_dev);
4696err_out_blkdev: 4845err_out_blkdev:
@@ -4731,12 +4880,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4731 4880
4732static void rbd_dev_image_release(struct rbd_device *rbd_dev) 4881static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4733{ 4882{
4734 int ret;
4735
4736 rbd_dev_unprobe(rbd_dev); 4883 rbd_dev_unprobe(rbd_dev);
4737 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4738 if (ret)
4739 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4740 kfree(rbd_dev->header_name); 4884 kfree(rbd_dev->header_name);
4741 rbd_dev->header_name = NULL; 4885 rbd_dev->header_name = NULL;
4742 rbd_dev->image_format = 0; 4886 rbd_dev->image_format = 0;
@@ -4748,10 +4892,11 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4748 4892
4749/* 4893/*
4750 * Probe for the existence of the header object for the given rbd 4894 * Probe for the existence of the header object for the given rbd
4751 * device. For format 2 images this includes determining the image 4895 * device. If this image is the one being mapped (i.e., not a
4752 * id. 4896 * parent), initiate a watch on its header object before using that
4897 * object to get detailed information about the rbd image.
4753 */ 4898 */
4754static int rbd_dev_image_probe(struct rbd_device *rbd_dev) 4899static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4755{ 4900{
4756 int ret; 4901 int ret;
4757 int tmp; 4902 int tmp;
@@ -4771,14 +4916,16 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4771 if (ret) 4916 if (ret)
4772 goto err_out_format; 4917 goto err_out_format;
4773 4918
4774 ret = rbd_dev_header_watch_sync(rbd_dev, 1); 4919 if (mapping) {
4775 if (ret) 4920 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4776 goto out_header_name; 4921 if (ret)
4922 goto out_header_name;
4923 }
4777 4924
4778 if (rbd_dev->image_format == 1) 4925 if (rbd_dev->image_format == 1)
4779 ret = rbd_dev_v1_probe(rbd_dev); 4926 ret = rbd_dev_v1_header_info(rbd_dev);
4780 else 4927 else
4781 ret = rbd_dev_v2_probe(rbd_dev); 4928 ret = rbd_dev_v2_header_info(rbd_dev);
4782 if (ret) 4929 if (ret)
4783 goto err_out_watch; 4930 goto err_out_watch;
4784 4931
@@ -4787,15 +4934,22 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4787 goto err_out_probe; 4934 goto err_out_probe;
4788 4935
4789 ret = rbd_dev_probe_parent(rbd_dev); 4936 ret = rbd_dev_probe_parent(rbd_dev);
4790 if (!ret) 4937 if (ret)
4791 return 0; 4938 goto err_out_probe;
4939
4940 dout("discovered format %u image, header name is %s\n",
4941 rbd_dev->image_format, rbd_dev->header_name);
4792 4942
4943 return 0;
4793err_out_probe: 4944err_out_probe:
4794 rbd_dev_unprobe(rbd_dev); 4945 rbd_dev_unprobe(rbd_dev);
4795err_out_watch: 4946err_out_watch:
4796 tmp = rbd_dev_header_watch_sync(rbd_dev, 0); 4947 if (mapping) {
4797 if (tmp) 4948 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4798 rbd_warn(rbd_dev, "unable to tear down watch request\n"); 4949 if (tmp)
4950 rbd_warn(rbd_dev, "unable to tear down "
4951 "watch request (%d)\n", tmp);
4952 }
4799out_header_name: 4953out_header_name:
4800 kfree(rbd_dev->header_name); 4954 kfree(rbd_dev->header_name);
4801 rbd_dev->header_name = NULL; 4955 rbd_dev->header_name = NULL;
@@ -4819,6 +4973,7 @@ static ssize_t rbd_add(struct bus_type *bus,
4819 struct rbd_spec *spec = NULL; 4973 struct rbd_spec *spec = NULL;
4820 struct rbd_client *rbdc; 4974 struct rbd_client *rbdc;
4821 struct ceph_osd_client *osdc; 4975 struct ceph_osd_client *osdc;
4976 bool read_only;
4822 int rc = -ENOMEM; 4977 int rc = -ENOMEM;
4823 4978
4824 if (!try_module_get(THIS_MODULE)) 4979 if (!try_module_get(THIS_MODULE))
@@ -4828,6 +4983,9 @@ static ssize_t rbd_add(struct bus_type *bus,
4828 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec); 4983 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4829 if (rc < 0) 4984 if (rc < 0)
4830 goto err_out_module; 4985 goto err_out_module;
4986 read_only = rbd_opts->read_only;
4987 kfree(rbd_opts);
4988 rbd_opts = NULL; /* done with this */
4831 4989
4832 rbdc = rbd_get_client(ceph_opts); 4990 rbdc = rbd_get_client(ceph_opts);
4833 if (IS_ERR(rbdc)) { 4991 if (IS_ERR(rbdc)) {
@@ -4858,14 +5016,16 @@ static ssize_t rbd_add(struct bus_type *bus,
4858 rbdc = NULL; /* rbd_dev now owns this */ 5016 rbdc = NULL; /* rbd_dev now owns this */
4859 spec = NULL; /* rbd_dev now owns this */ 5017 spec = NULL; /* rbd_dev now owns this */
4860 5018
4861 rbd_dev->mapping.read_only = rbd_opts->read_only; 5019 rc = rbd_dev_image_probe(rbd_dev, true);
4862 kfree(rbd_opts);
4863 rbd_opts = NULL; /* done with this */
4864
4865 rc = rbd_dev_image_probe(rbd_dev);
4866 if (rc < 0) 5020 if (rc < 0)
4867 goto err_out_rbd_dev; 5021 goto err_out_rbd_dev;
4868 5022
5023 /* If we are mapping a snapshot it must be marked read-only */
5024
5025 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5026 read_only = true;
5027 rbd_dev->mapping.read_only = read_only;
5028
4869 rc = rbd_dev_device_setup(rbd_dev); 5029 rc = rbd_dev_device_setup(rbd_dev);
4870 if (!rc) 5030 if (!rc)
4871 return count; 5031 return count;
@@ -4911,7 +5071,7 @@ static void rbd_dev_device_release(struct device *dev)
4911 5071
4912 rbd_free_disk(rbd_dev); 5072 rbd_free_disk(rbd_dev);
4913 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5073 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4914 rbd_dev_clear_mapping(rbd_dev); 5074 rbd_dev_mapping_clear(rbd_dev);
4915 unregister_blkdev(rbd_dev->major, rbd_dev->name); 5075 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4916 rbd_dev->major = 0; 5076 rbd_dev->major = 0;
4917 rbd_dev_id_put(rbd_dev); 5077 rbd_dev_id_put(rbd_dev);
@@ -4978,10 +5138,13 @@ static ssize_t rbd_remove(struct bus_type *bus,
4978 spin_unlock_irq(&rbd_dev->lock); 5138 spin_unlock_irq(&rbd_dev->lock);
4979 if (ret < 0) 5139 if (ret < 0)
4980 goto done; 5140 goto done;
4981 ret = count;
4982 rbd_bus_del_dev(rbd_dev); 5141 rbd_bus_del_dev(rbd_dev);
5142 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5143 if (ret)
5144 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4983 rbd_dev_image_release(rbd_dev); 5145 rbd_dev_image_release(rbd_dev);
4984 module_put(THIS_MODULE); 5146 module_put(THIS_MODULE);
5147 ret = count;
4985done: 5148done:
4986 mutex_unlock(&ctl_mutex); 5149 mutex_unlock(&ctl_mutex);
4987 5150
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c
index f8ef15f37c5e..3fd130fdfbc1 100644
--- a/drivers/block/xsysace.c
+++ b/drivers/block/xsysace.c
@@ -1160,8 +1160,7 @@ static int ace_probe(struct platform_device *dev)
1160 dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); 1160 dev_dbg(&dev->dev, "ace_probe(%p)\n", dev);
1161 1161
1162 /* device id and bus width */ 1162 /* device id and bus width */
1163 of_property_read_u32(dev->dev.of_node, "port-number", &id); 1163 if (of_property_read_u32(dev->dev.of_node, "port-number", &id))
1164 if (id < 0)
1165 id = 0; 1164 id = 0;
1166 if (of_find_property(dev->dev.of_node, "8-bit", NULL)) 1165 if (of_find_property(dev->dev.of_node, "8-bit", NULL))
1167 bus_width = ACE_BUS_WIDTH_8; 1166 bus_width = ACE_BUS_WIDTH_8;
diff --git a/drivers/char/hw_random/mxc-rnga.c b/drivers/char/hw_random/mxc-rnga.c
index 4ca35e8a5d8c..19a12ac64a9e 100644
--- a/drivers/char/hw_random/mxc-rnga.c
+++ b/drivers/char/hw_random/mxc-rnga.c
@@ -167,11 +167,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
167 clk_prepare_enable(mxc_rng->clk); 167 clk_prepare_enable(mxc_rng->clk);
168 168
169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 169 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
170 if (!res) {
171 err = -ENOENT;
172 goto err_region;
173 }
174
175 mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res); 170 mxc_rng->mem = devm_ioremap_resource(&pdev->dev, res);
176 if (IS_ERR(mxc_rng->mem)) { 171 if (IS_ERR(mxc_rng->mem)) {
177 err = PTR_ERR(mxc_rng->mem); 172 err = PTR_ERR(mxc_rng->mem);
@@ -189,7 +184,6 @@ static int __init mxc_rnga_probe(struct platform_device *pdev)
189 return 0; 184 return 0;
190 185
191err_ioremap: 186err_ioremap:
192err_region:
193 clk_disable_unprepare(mxc_rng->clk); 187 clk_disable_unprepare(mxc_rng->clk);
194 188
195out: 189out:
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 749dc16ca2cc..d2903e772270 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -119,11 +119,6 @@ static int omap_rng_probe(struct platform_device *pdev)
119 dev_set_drvdata(&pdev->dev, priv); 119 dev_set_drvdata(&pdev->dev, priv);
120 120
121 priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 121 priv->mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
122 if (!priv->mem_res) {
123 ret = -ENOENT;
124 goto err_ioremap;
125 }
126
127 priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res); 122 priv->base = devm_ioremap_resource(&pdev->dev, priv->mem_res);
128 if (IS_ERR(priv->base)) { 123 if (IS_ERR(priv->base)) {
129 ret = PTR_ERR(priv->base); 124 ret = PTR_ERR(priv->base);
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c
index cdd4c09fda96..a22a7a502740 100644
--- a/drivers/char/ipmi/ipmi_bt_sm.c
+++ b/drivers/char/ipmi/ipmi_bt_sm.c
@@ -95,9 +95,9 @@ struct si_sm_data {
95 enum bt_states state; 95 enum bt_states state;
96 unsigned char seq; /* BT sequence number */ 96 unsigned char seq; /* BT sequence number */
97 struct si_sm_io *io; 97 struct si_sm_io *io;
98 unsigned char write_data[IPMI_MAX_MSG_LENGTH]; 98 unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
99 int write_count; 99 int write_count;
100 unsigned char read_data[IPMI_MAX_MSG_LENGTH]; 100 unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */
101 int read_count; 101 int read_count;
102 int truncated; 102 int truncated;
103 long timeout; /* microseconds countdown */ 103 long timeout; /* microseconds countdown */
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 9eb360ff8cab..d5a5f020810a 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -837,13 +837,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
837 return ipmi_ioctl(filep, cmd, arg); 837 return ipmi_ioctl(filep, cmd, arg);
838 } 838 }
839} 839}
840
841static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd,
842 unsigned long arg)
843{
844 int ret;
845
846 mutex_lock(&ipmi_mutex);
847 ret = compat_ipmi_ioctl(filep, cmd, arg);
848 mutex_unlock(&ipmi_mutex);
849
850 return ret;
851}
840#endif 852#endif
841 853
842static const struct file_operations ipmi_fops = { 854static const struct file_operations ipmi_fops = {
843 .owner = THIS_MODULE, 855 .owner = THIS_MODULE,
844 .unlocked_ioctl = ipmi_unlocked_ioctl, 856 .unlocked_ioctl = ipmi_unlocked_ioctl,
845#ifdef CONFIG_COMPAT 857#ifdef CONFIG_COMPAT
846 .compat_ioctl = compat_ipmi_ioctl, 858 .compat_ioctl = unlocked_compat_ipmi_ioctl,
847#endif 859#endif
848 .open = ipmi_open, 860 .open = ipmi_open,
849 .release = ipmi_release, 861 .release = ipmi_release,
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 4d439d2fcfd6..4445fa164a2d 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2037,12 +2037,11 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2037 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 2037 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2038 if (!entry) 2038 if (!entry)
2039 return -ENOMEM; 2039 return -ENOMEM;
2040 entry->name = kmalloc(strlen(name)+1, GFP_KERNEL); 2040 entry->name = kstrdup(name, GFP_KERNEL);
2041 if (!entry->name) { 2041 if (!entry->name) {
2042 kfree(entry); 2042 kfree(entry);
2043 return -ENOMEM; 2043 return -ENOMEM;
2044 } 2044 }
2045 strcpy(entry->name, name);
2046 2045
2047 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); 2046 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2048 if (!file) { 2047 if (!file) {
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 313538abe63c..af4b23ffc5a6 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -663,8 +663,10 @@ static void handle_transaction_done(struct smi_info *smi_info)
663 /* We got the flags from the SMI, now handle them. */ 663 /* We got the flags from the SMI, now handle them. */
664 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 664 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
665 if (msg[2] != 0) { 665 if (msg[2] != 0) {
666 dev_warn(smi_info->dev, "Could not enable interrupts" 666 dev_warn(smi_info->dev,
667 ", failed get, using polled mode.\n"); 667 "Couldn't get irq info: %x.\n", msg[2]);
668 dev_warn(smi_info->dev,
669 "Maybe ok, but ipmi might run very slowly.\n");
668 smi_info->si_state = SI_NORMAL; 670 smi_info->si_state = SI_NORMAL;
669 } else { 671 } else {
670 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 672 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
@@ -685,10 +687,12 @@ static void handle_transaction_done(struct smi_info *smi_info)
685 687
686 /* We got the flags from the SMI, now handle them. */ 688 /* We got the flags from the SMI, now handle them. */
687 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 689 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
688 if (msg[2] != 0) 690 if (msg[2] != 0) {
689 dev_warn(smi_info->dev, "Could not enable interrupts" 691 dev_warn(smi_info->dev,
690 ", failed set, using polled mode.\n"); 692 "Couldn't set irq info: %x.\n", msg[2]);
691 else 693 dev_warn(smi_info->dev,
694 "Maybe ok, but ipmi might run very slowly.\n");
695 } else
692 smi_info->interrupt_disabled = 0; 696 smi_info->interrupt_disabled = 0;
693 smi_info->si_state = SI_NORMAL; 697 smi_info->si_state = SI_NORMAL;
694 break; 698 break;
diff --git a/drivers/char/lp.c b/drivers/char/lp.c
index dafd9ac6428f..0913d79424d3 100644
--- a/drivers/char/lp.c
+++ b/drivers/char/lp.c
@@ -622,9 +622,12 @@ static int lp_do_ioctl(unsigned int minor, unsigned int cmd,
622 return -EFAULT; 622 return -EFAULT;
623 break; 623 break;
624 case LPGETSTATUS: 624 case LPGETSTATUS:
625 if (mutex_lock_interruptible(&lp_table[minor].port_mutex))
626 return -EINTR;
625 lp_claim_parport_or_block (&lp_table[minor]); 627 lp_claim_parport_or_block (&lp_table[minor]);
626 status = r_str(minor); 628 status = r_str(minor);
627 lp_release_parport (&lp_table[minor]); 629 lp_release_parport (&lp_table[minor]);
630 mutex_unlock(&lp_table[minor].port_mutex);
628 631
629 if (copy_to_user(argp, &status, sizeof(int))) 632 if (copy_to_user(argp, &status, sizeof(int)))
630 return -EFAULT; 633 return -EFAULT;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index cd9a6211dcad..35487e8ded59 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -865,16 +865,24 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
865 if (r->entropy_count / 8 < min + reserved) { 865 if (r->entropy_count / 8 < min + reserved) {
866 nbytes = 0; 866 nbytes = 0;
867 } else { 867 } else {
868 int entropy_count, orig;
869retry:
870 entropy_count = orig = ACCESS_ONCE(r->entropy_count);
868 /* If limited, never pull more than available */ 871 /* If limited, never pull more than available */
869 if (r->limit && nbytes + reserved >= r->entropy_count / 8) 872 if (r->limit && nbytes + reserved >= entropy_count / 8)
870 nbytes = r->entropy_count/8 - reserved; 873 nbytes = entropy_count/8 - reserved;
871 874
872 if (r->entropy_count / 8 >= nbytes + reserved) 875 if (entropy_count / 8 >= nbytes + reserved) {
873 r->entropy_count -= nbytes*8; 876 entropy_count -= nbytes*8;
874 else 877 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
875 r->entropy_count = reserved; 878 goto retry;
879 } else {
880 entropy_count = reserved;
881 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
882 goto retry;
883 }
876 884
877 if (r->entropy_count < random_write_wakeup_thresh) 885 if (entropy_count < random_write_wakeup_thresh)
878 wakeup_write = 1; 886 wakeup_write = 1;
879 } 887 }
880 888
@@ -957,10 +965,23 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
957{ 965{
958 ssize_t ret = 0, i; 966 ssize_t ret = 0, i;
959 __u8 tmp[EXTRACT_SIZE]; 967 __u8 tmp[EXTRACT_SIZE];
968 unsigned long flags;
960 969
961 /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */ 970 /* if last_data isn't primed, we need EXTRACT_SIZE extra bytes */
962 if (fips_enabled && !r->last_data_init) 971 if (fips_enabled) {
963 nbytes += EXTRACT_SIZE; 972 spin_lock_irqsave(&r->lock, flags);
973 if (!r->last_data_init) {
974 r->last_data_init = true;
975 spin_unlock_irqrestore(&r->lock, flags);
976 trace_extract_entropy(r->name, EXTRACT_SIZE,
977 r->entropy_count, _RET_IP_);
978 xfer_secondary_pool(r, EXTRACT_SIZE);
979 extract_buf(r, tmp);
980 spin_lock_irqsave(&r->lock, flags);
981 memcpy(r->last_data, tmp, EXTRACT_SIZE);
982 }
983 spin_unlock_irqrestore(&r->lock, flags);
984 }
964 985
965 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_); 986 trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
966 xfer_secondary_pool(r, nbytes); 987 xfer_secondary_pool(r, nbytes);
@@ -970,19 +991,6 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
970 extract_buf(r, tmp); 991 extract_buf(r, tmp);
971 992
972 if (fips_enabled) { 993 if (fips_enabled) {
973 unsigned long flags;
974
975
976 /* prime last_data value if need be, per fips 140-2 */
977 if (!r->last_data_init) {
978 spin_lock_irqsave(&r->lock, flags);
979 memcpy(r->last_data, tmp, EXTRACT_SIZE);
980 r->last_data_init = true;
981 nbytes -= EXTRACT_SIZE;
982 spin_unlock_irqrestore(&r->lock, flags);
983 extract_buf(r, tmp);
984 }
985
986 spin_lock_irqsave(&r->lock, flags); 994 spin_lock_irqsave(&r->lock, flags);
987 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE)) 995 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
988 panic("Hardware RNG duplicated output!\n"); 996 panic("Hardware RNG duplicated output!\n");
diff --git a/drivers/char/ttyprintk.c b/drivers/char/ttyprintk.c
index 4945bd3d18d0..d5d2e4a985aa 100644
--- a/drivers/char/ttyprintk.c
+++ b/drivers/char/ttyprintk.c
@@ -179,7 +179,6 @@ static int __init ttyprintk_init(void)
179{ 179{
180 int ret = -ENOMEM; 180 int ret = -ENOMEM;
181 181
182 tpk_port.port.ops = &null_ops;
183 mutex_init(&tpk_port.port_write_mutex); 182 mutex_init(&tpk_port.port_write_mutex);
184 183
185 ttyprintk_driver = tty_alloc_driver(1, 184 ttyprintk_driver = tty_alloc_driver(1,
@@ -190,6 +189,7 @@ static int __init ttyprintk_init(void)
190 return PTR_ERR(ttyprintk_driver); 189 return PTR_ERR(ttyprintk_driver);
191 190
192 tty_port_init(&tpk_port.port); 191 tty_port_init(&tpk_port.port);
192 tpk_port.port.ops = &null_ops;
193 193
194 ttyprintk_driver->driver_name = "ttyprintk"; 194 ttyprintk_driver->driver_name = "ttyprintk";
195 ttyprintk_driver->name = "ttyprintk"; 195 ttyprintk_driver->name = "ttyprintk";
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index 8292a00c3de9..075db0c99edb 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -872,6 +872,14 @@ static void __init tegra20_periph_clk_init(void)
872 struct clk *clk; 872 struct clk *clk;
873 int i; 873 int i;
874 874
875 /* ac97 */
876 clk = tegra_clk_register_periph_gate("ac97", "pll_a_out0",
877 TEGRA_PERIPH_ON_APB,
878 clk_base, 0, 3, &periph_l_regs,
879 periph_clk_enb_refcnt);
880 clk_register_clkdev(clk, NULL, "tegra20-ac97");
881 clks[ac97] = clk;
882
875 /* apbdma */ 883 /* apbdma */
876 clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base, 884 clk = tegra_clk_register_periph_gate("apbdma", "pclk", 0, clk_base,
877 0, 34, &periph_h_regs, 885 0, 34, &periph_h_regs,
@@ -1234,9 +1242,6 @@ static __initdata struct tegra_clk_init_table init_table[] = {
1234 {uartc, pll_p, 0, 0}, 1242 {uartc, pll_p, 0, 0},
1235 {uartd, pll_p, 0, 0}, 1243 {uartd, pll_p, 0, 0},
1236 {uarte, pll_p, 0, 0}, 1244 {uarte, pll_p, 0, 0},
1237 {usbd, clk_max, 12000000, 0},
1238 {usb2, clk_max, 12000000, 0},
1239 {usb3, clk_max, 12000000, 0},
1240 {pll_a, clk_max, 56448000, 1}, 1245 {pll_a, clk_max, 56448000, 1},
1241 {pll_a_out0, clk_max, 11289600, 1}, 1246 {pll_a_out0, clk_max, 11289600, 1},
1242 {cdev1, clk_max, 0, 1}, 1247 {cdev1, clk_max, 0, 1},
diff --git a/drivers/clk/x86/clk-lpt.c b/drivers/clk/x86/clk-lpt.c
index 5cf4f4686406..4f45eee9e33b 100644
--- a/drivers/clk/x86/clk-lpt.c
+++ b/drivers/clk/x86/clk-lpt.c
@@ -15,22 +15,29 @@
15#include <linux/clk-provider.h> 15#include <linux/clk-provider.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/platform_data/clk-lpss.h>
18#include <linux/platform_device.h> 19#include <linux/platform_device.h>
19 20
20#define PRV_CLOCK_PARAMS 0x800 21#define PRV_CLOCK_PARAMS 0x800
21 22
22static int lpt_clk_probe(struct platform_device *pdev) 23static int lpt_clk_probe(struct platform_device *pdev)
23{ 24{
25 struct lpss_clk_data *drvdata;
24 struct clk *clk; 26 struct clk *clk;
25 27
28 drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
29 if (!drvdata)
30 return -ENOMEM;
31
26 /* LPSS free running clock */ 32 /* LPSS free running clock */
27 clk = clk_register_fixed_rate(&pdev->dev, "lpss_clk", NULL, CLK_IS_ROOT, 33 drvdata->name = "lpss_clk";
28 100000000); 34 clk = clk_register_fixed_rate(&pdev->dev, drvdata->name, NULL,
35 CLK_IS_ROOT, 100000000);
29 if (IS_ERR(clk)) 36 if (IS_ERR(clk))
30 return PTR_ERR(clk); 37 return PTR_ERR(clk);
31 38
32 /* Shared DMA clock */ 39 drvdata->clk = clk;
33 clk_register_clkdev(clk, "hclk", "INTL9C60.0.auto"); 40 platform_set_drvdata(pdev, drvdata);
34 return 0; 41 return 0;
35} 42}
36 43
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a1488f58f6ca..534fcb825153 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS
47 47
48choice 48choice
49 prompt "Default CPUFreq governor" 49 prompt "Default CPUFreq governor"
50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE 51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
52 help 52 help
53 This option sets which CPUFreq governor shall be loaded at 53 This option sets which CPUFreq governor shall be loaded at
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f3af18b9acc5..6e57543fe0b9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,16 +3,17 @@
3# 3#
4 4
5config ARM_BIG_LITTLE_CPUFREQ 5config ARM_BIG_LITTLE_CPUFREQ
6 tristate 6 tristate "Generic ARM big LITTLE CPUfreq driver"
7 depends on ARM_CPU_TOPOLOGY 7 depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
8 help
9 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
8 10
9config ARM_DT_BL_CPUFREQ 11config ARM_DT_BL_CPUFREQ
10 tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" 12 tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
11 select ARM_BIG_LITTLE_CPUFREQ 13 depends on ARM_BIG_LITTLE_CPUFREQ && OF
12 depends on OF && HAVE_CLK
13 help 14 help
14 This enables the Generic CPUfreq driver for ARM big.LITTLE platform. 15 This enables probing via DT for Generic CPUfreq driver for ARM
15 This gets frequency tables from DT. 16 big.LITTLE platform. This gets frequency tables from DT.
16 17
17config ARM_EXYNOS_CPUFREQ 18config ARM_EXYNOS_CPUFREQ
18 bool "SAMSUNG EXYNOS SoCs" 19 bool "SAMSUNG EXYNOS SoCs"
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 2b8a8c374548..6bd63d63d356 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -272,7 +272,7 @@ config X86_LONGHAUL
272config X86_E_POWERSAVER 272config X86_E_POWERSAVER
273 tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)" 273 tristate "VIA C7 Enhanced PowerSaver (DANGEROUS)"
274 select CPU_FREQ_TABLE 274 select CPU_FREQ_TABLE
275 depends on X86_32 275 depends on X86_32 && ACPI_PROCESSOR
276 help 276 help
277 This adds the CPUFreq driver for VIA C7 processors. However, this driver 277 This adds the CPUFreq driver for VIA C7 processors. However, this driver
278 does not have any safeguards to prevent operating the CPU out of spec 278 does not have any safeguards to prevent operating the CPU out of spec
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index dbdf677d2f36..5d7f53fcd6f5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS];
40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; 40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; 41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
42 42
43static int cpu_to_cluster(int cpu)
44{
45 return topology_physical_package_id(cpu);
46}
47
48static unsigned int bL_cpufreq_get(unsigned int cpu) 43static unsigned int bL_cpufreq_get(unsigned int cpu)
49{ 44{
50 u32 cur_cluster = cpu_to_cluster(cpu); 45 u32 cur_cluster = cpu_to_cluster(cpu);
@@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
192 187
193 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); 188 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
194 189
195 dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); 190 dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
196 return 0; 191 return 0;
197} 192}
198 193
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4a..79b2ce17884d 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops {
34 int (*init_opp_table)(struct device *cpu_dev); 34 int (*init_opp_table)(struct device *cpu_dev);
35}; 35};
36 36
37static inline int cpu_to_cluster(int cpu)
38{
39 return topology_physical_package_id(cpu);
40}
41
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); 42int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
38void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); 43void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
39 44
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 44be3115375c..fd9e3ea6a480 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -19,69 +19,75 @@
19 19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21 21
22#include <linux/cpu.h>
22#include <linux/cpufreq.h> 23#include <linux/cpufreq.h>
23#include <linux/device.h> 24#include <linux/device.h>
24#include <linux/export.h> 25#include <linux/export.h>
25#include <linux/module.h> 26#include <linux/module.h>
26#include <linux/of.h> 27#include <linux/of.h>
27#include <linux/opp.h> 28#include <linux/opp.h>
29#include <linux/platform_device.h>
28#include <linux/slab.h> 30#include <linux/slab.h>
29#include <linux/types.h> 31#include <linux/types.h>
30#include "arm_big_little.h" 32#include "arm_big_little.h"
31 33
32static int dt_init_opp_table(struct device *cpu_dev) 34/* get cpu node with valid operating-points */
35static struct device_node *get_cpu_node_with_valid_op(int cpu)
33{ 36{
34 struct device_node *np, *parent; 37 struct device_node *np = NULL, *parent;
35 int count = 0, ret; 38 int count = 0;
36 39
37 parent = of_find_node_by_path("/cpus"); 40 parent = of_find_node_by_path("/cpus");
38 if (!parent) { 41 if (!parent) {
39 pr_err("failed to find OF /cpus\n"); 42 pr_err("failed to find OF /cpus\n");
40 return -ENOENT; 43 return NULL;
41 } 44 }
42 45
43 for_each_child_of_node(parent, np) { 46 for_each_child_of_node(parent, np) {
44 if (count++ != cpu_dev->id) 47 if (count++ != cpu)
45 continue; 48 continue;
46 if (!of_get_property(np, "operating-points", NULL)) { 49 if (!of_get_property(np, "operating-points", NULL)) {
47 ret = -ENODATA; 50 of_node_put(np);
48 } else { 51 np = NULL;
49 cpu_dev->of_node = np;
50 ret = of_init_opp_table(cpu_dev);
51 } 52 }
52 of_node_put(np);
53 of_node_put(parent);
54 53
55 return ret; 54 break;
56 } 55 }
57 56
58 return -ENODEV; 57 of_node_put(parent);
58 return np;
59} 59}
60 60
61static int dt_get_transition_latency(struct device *cpu_dev) 61static int dt_init_opp_table(struct device *cpu_dev)
62{ 62{
63 struct device_node *np, *parent; 63 struct device_node *np;
64 u32 transition_latency = CPUFREQ_ETERNAL; 64 int ret;
65 int count = 0;
66 65
67 parent = of_find_node_by_path("/cpus"); 66 np = get_cpu_node_with_valid_op(cpu_dev->id);
68 if (!parent) { 67 if (!np)
69 pr_err("failed to find OF /cpus\n"); 68 return -ENODATA;
70 return -ENOENT;
71 }
72 69
73 for_each_child_of_node(parent, np) { 70 cpu_dev->of_node = np;
74 if (count++ != cpu_dev->id) 71 ret = of_init_opp_table(cpu_dev);
75 continue; 72 of_node_put(np);
76 73
77 of_property_read_u32(np, "clock-latency", &transition_latency); 74 return ret;
78 of_node_put(np); 75}
79 of_node_put(parent);
80 76
81 return 0; 77static int dt_get_transition_latency(struct device *cpu_dev)
82 } 78{
79 struct device_node *np;
80 u32 transition_latency = CPUFREQ_ETERNAL;
81
82 np = get_cpu_node_with_valid_op(cpu_dev->id);
83 if (!np)
84 return CPUFREQ_ETERNAL;
83 85
84 return -ENODEV; 86 of_property_read_u32(np, "clock-latency", &transition_latency);
87 of_node_put(np);
88
89 pr_debug("%s: clock-latency: %d\n", __func__, transition_latency);
90 return transition_latency;
85} 91}
86 92
87static struct cpufreq_arm_bL_ops dt_bL_ops = { 93static struct cpufreq_arm_bL_ops dt_bL_ops = {
@@ -90,17 +96,33 @@ static struct cpufreq_arm_bL_ops dt_bL_ops = {
90 .init_opp_table = dt_init_opp_table, 96 .init_opp_table = dt_init_opp_table,
91}; 97};
92 98
93static int generic_bL_init(void) 99static int generic_bL_probe(struct platform_device *pdev)
94{ 100{
101 struct device_node *np;
102
103 np = get_cpu_node_with_valid_op(0);
104 if (!np)
105 return -ENODEV;
106
107 of_node_put(np);
95 return bL_cpufreq_register(&dt_bL_ops); 108 return bL_cpufreq_register(&dt_bL_ops);
96} 109}
97module_init(generic_bL_init);
98 110
99static void generic_bL_exit(void) 111static int generic_bL_remove(struct platform_device *pdev)
100{ 112{
101 return bL_cpufreq_unregister(&dt_bL_ops); 113 bL_cpufreq_unregister(&dt_bL_ops);
114 return 0;
102} 115}
103module_exit(generic_bL_exit); 116
117static struct platform_driver generic_bL_platdrv = {
118 .driver = {
119 .name = "arm-bL-cpufreq-dt",
120 .owner = THIS_MODULE,
121 },
122 .probe = generic_bL_probe,
123 .remove = generic_bL_remove,
124};
125module_platform_driver(generic_bL_platdrv);
104 126
105MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); 127MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
106MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT"); 128MODULE_DESCRIPTION("Generic ARM big LITTLE cpufreq driver via DT");
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 3ab8294eab04..a64eb8b70444 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
189 189
190 if (!np) { 190 if (!np) {
191 pr_err("failed to find cpu0 node\n"); 191 pr_err("failed to find cpu0 node\n");
192 return -ENOENT; 192 ret = -ENOENT;
193 goto out_put_parent;
193 } 194 }
194 195
195 cpu_dev = &pdev->dev; 196 cpu_dev = &pdev->dev;
196 cpu_dev->of_node = np; 197 cpu_dev->of_node = np;
197 198
199 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
200 if (IS_ERR(cpu_reg)) {
201 /*
202 * If cpu0 regulator supply node is present, but regulator is
203 * not yet registered, we should try defering probe.
204 */
205 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
206 dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
207 ret = -EPROBE_DEFER;
208 goto out_put_node;
209 }
210 pr_warn("failed to get cpu0 regulator: %ld\n",
211 PTR_ERR(cpu_reg));
212 cpu_reg = NULL;
213 }
214
198 cpu_clk = devm_clk_get(cpu_dev, NULL); 215 cpu_clk = devm_clk_get(cpu_dev, NULL);
199 if (IS_ERR(cpu_clk)) { 216 if (IS_ERR(cpu_clk)) {
200 ret = PTR_ERR(cpu_clk); 217 ret = PTR_ERR(cpu_clk);
@@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
202 goto out_put_node; 219 goto out_put_node;
203 } 220 }
204 221
205 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
206 if (IS_ERR(cpu_reg)) {
207 pr_warn("failed to get cpu0 regulator\n");
208 cpu_reg = NULL;
209 }
210
211 ret = of_init_opp_table(cpu_dev); 222 ret = of_init_opp_table(cpu_dev);
212 if (ret) { 223 if (ret) {
213 pr_err("failed to init OPP table: %d\n", ret); 224 pr_err("failed to init OPP table: %d\n", ret);
@@ -264,6 +275,8 @@ out_free_table:
264 opp_free_cpufreq_table(cpu_dev, &freq_table); 275 opp_free_cpufreq_table(cpu_dev, &freq_table);
265out_put_node: 276out_put_node:
266 of_node_put(np); 277 of_node_put(np);
278out_put_parent:
279 of_node_put(parent);
267 return ret; 280 return ret;
268} 281}
269 282
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1b8a48eaf90f..2d53f47d1747 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1075 __func__, cpu_dev->id, cpu); 1075 __func__, cpu_dev->id, cpu);
1076 } 1076 }
1077 1077
1078 if ((cpus == 1) && (cpufreq_driver->target))
1079 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1080
1078 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); 1081 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1079 cpufreq_cpu_put(data); 1082 cpufreq_cpu_put(data);
1080 1083
1081 /* If cpu is last user of policy, free policy */ 1084 /* If cpu is last user of policy, free policy */
1082 if (cpus == 1) { 1085 if (cpus == 1) {
1083 if (cpufreq_driver->target)
1084 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1085
1086 lock_policy_rwsem_read(cpu); 1086 lock_policy_rwsem_read(cpu);
1087 kobj = &data->kobj; 1087 kobj = &data->kobj;
1088 cmp = &data->kobj_unregister; 1088 cmp = &data->kobj_unregister;
@@ -1729,18 +1729,23 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1729 /* end old governor */ 1729 /* end old governor */
1730 if (data->governor) { 1730 if (data->governor) {
1731 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1731 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1732 unlock_policy_rwsem_write(policy->cpu);
1732 __cpufreq_governor(data, 1733 __cpufreq_governor(data,
1733 CPUFREQ_GOV_POLICY_EXIT); 1734 CPUFREQ_GOV_POLICY_EXIT);
1735 lock_policy_rwsem_write(policy->cpu);
1734 } 1736 }
1735 1737
1736 /* start new governor */ 1738 /* start new governor */
1737 data->governor = policy->governor; 1739 data->governor = policy->governor;
1738 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) { 1740 if (!__cpufreq_governor(data, CPUFREQ_GOV_POLICY_INIT)) {
1739 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) 1741 if (!__cpufreq_governor(data, CPUFREQ_GOV_START)) {
1740 failed = 0; 1742 failed = 0;
1741 else 1743 } else {
1744 unlock_policy_rwsem_write(policy->cpu);
1742 __cpufreq_governor(data, 1745 __cpufreq_governor(data,
1743 CPUFREQ_GOV_POLICY_EXIT); 1746 CPUFREQ_GOV_POLICY_EXIT);
1747 lock_policy_rwsem_write(policy->cpu);
1748 }
1744 } 1749 }
1745 1750
1746 if (failed) { 1751 if (failed) {
@@ -1832,15 +1837,13 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1832 if (dev) { 1837 if (dev) {
1833 switch (action) { 1838 switch (action) {
1834 case CPU_ONLINE: 1839 case CPU_ONLINE:
1835 case CPU_ONLINE_FROZEN:
1836 cpufreq_add_dev(dev, NULL); 1840 cpufreq_add_dev(dev, NULL);
1837 break; 1841 break;
1838 case CPU_DOWN_PREPARE: 1842 case CPU_DOWN_PREPARE:
1839 case CPU_DOWN_PREPARE_FROZEN: 1843 case CPU_UP_CANCELED_FROZEN:
1840 __cpufreq_remove_dev(dev, NULL); 1844 __cpufreq_remove_dev(dev, NULL);
1841 break; 1845 break;
1842 case CPU_DOWN_FAILED: 1846 case CPU_DOWN_FAILED:
1843 case CPU_DOWN_FAILED_FROZEN:
1844 cpufreq_add_dev(dev, NULL); 1847 cpufreq_add_dev(dev, NULL);
1845 break; 1848 break;
1846 } 1849 }
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 443442df113b..5af40ad82d23 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
255 if (have_governor_per_policy()) { 255 if (have_governor_per_policy()) {
256 WARN_ON(dbs_data); 256 WARN_ON(dbs_data);
257 } else if (dbs_data) { 257 } else if (dbs_data) {
258 dbs_data->usage_count++;
258 policy->governor_data = dbs_data; 259 policy->governor_data = dbs_data;
259 return 0; 260 return 0;
260 } 261 }
@@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
266 } 267 }
267 268
268 dbs_data->cdata = cdata; 269 dbs_data->cdata = cdata;
270 dbs_data->usage_count = 1;
269 rc = cdata->init(dbs_data); 271 rc = cdata->init(dbs_data);
270 if (rc) { 272 if (rc) {
271 pr_err("%s: POLICY_INIT: init() failed\n", __func__); 273 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
@@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
294 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, 296 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
295 latency * LATENCY_MULTIPLIER)); 297 latency * LATENCY_MULTIPLIER));
296 298
297 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 299 if ((cdata->governor == GOV_CONSERVATIVE) &&
300 (!policy->governor->initialized)) {
298 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 301 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
299 302
300 cpufreq_register_notifier(cs_ops->notifier_block, 303 cpufreq_register_notifier(cs_ops->notifier_block,
@@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
306 309
307 return 0; 310 return 0;
308 case CPUFREQ_GOV_POLICY_EXIT: 311 case CPUFREQ_GOV_POLICY_EXIT:
309 if ((policy->governor->initialized == 1) || 312 if (!--dbs_data->usage_count) {
310 have_governor_per_policy()) {
311 sysfs_remove_group(get_governor_parent_kobj(policy), 313 sysfs_remove_group(get_governor_parent_kobj(policy),
312 get_sysfs_attr(dbs_data)); 314 get_sysfs_attr(dbs_data));
313 315
314 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 316 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
317 (policy->governor->initialized == 1)) {
315 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 318 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
316 319
317 cpufreq_unregister_notifier(cs_ops->notifier_block, 320 cpufreq_unregister_notifier(cs_ops->notifier_block,
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 8ac33538d0bd..e16a96130cb3 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -211,6 +211,7 @@ struct common_dbs_data {
211struct dbs_data { 211struct dbs_data {
212 struct common_dbs_data *cdata; 212 struct common_dbs_data *cdata;
213 unsigned int min_sampling_rate; 213 unsigned int min_sampling_rate;
214 int usage_count;
214 void *tuners; 215 void *tuners;
215 216
216 /* dbs_mutex protects dbs_enable in governor start/stop */ 217 /* dbs_mutex protects dbs_enable in governor start/stop */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index b0ffef96bf77..4b9bb5def6f1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data)
547 tuners->io_is_busy = should_io_be_busy(); 547 tuners->io_is_busy = should_io_be_busy();
548 548
549 dbs_data->tuners = tuners; 549 dbs_data->tuners = tuners;
550 pr_info("%s: tuners %p\n", __func__, tuners);
551 mutex_init(&dbs_data->mutex); 550 mutex_init(&dbs_data->mutex);
552 return 0; 551 return 0;
553} 552}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index bfd6273fd873..fb65decffa28 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -349,15 +349,16 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
349 349
350 switch (action) { 350 switch (action) {
351 case CPU_ONLINE: 351 case CPU_ONLINE:
352 case CPU_ONLINE_FROZEN:
353 cpufreq_update_policy(cpu); 352 cpufreq_update_policy(cpu);
354 break; 353 break;
355 case CPU_DOWN_PREPARE: 354 case CPU_DOWN_PREPARE:
356 case CPU_DOWN_PREPARE_FROZEN:
357 cpufreq_stats_free_sysfs(cpu); 355 cpufreq_stats_free_sysfs(cpu);
358 break; 356 break;
359 case CPU_DEAD: 357 case CPU_DEAD:
360 case CPU_DEAD_FROZEN: 358 cpufreq_stats_free_table(cpu);
359 break;
360 case CPU_UP_CANCELED_FROZEN:
361 cpufreq_stats_free_sysfs(cpu);
361 cpufreq_stats_free_table(cpu); 362 cpufreq_stats_free_table(cpu);
362 break; 363 break;
363 } 364 }
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cc3a8e6c92be..07f2840ad805 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 ktime_t start_time;
52 ktime_t end_time;
53 int core_pct_busy; 51 int core_pct_busy;
54 int pstate_pct_busy;
55 u64 duration_us;
56 u64 idletime_us;
57 u64 aperf; 52 u64 aperf;
58 u64 mperf; 53 u64 mperf;
59 int freq; 54 int freq;
@@ -86,13 +81,9 @@ struct cpudata {
86 struct pstate_adjust_policy *pstate_policy; 81 struct pstate_adjust_policy *pstate_policy;
87 struct pstate_data pstate; 82 struct pstate_data pstate;
88 struct _pid pid; 83 struct _pid pid;
89 struct _pid idle_pid;
90 84
91 int min_pstate_count; 85 int min_pstate_count;
92 int idle_mode;
93 86
94 ktime_t prev_sample;
95 u64 prev_idle_time_us;
96 u64 prev_aperf; 87 u64 prev_aperf;
97 u64 prev_mperf; 88 u64 prev_mperf;
98 int sample_ptr; 89 int sample_ptr;
@@ -124,6 +115,8 @@ struct perf_limits {
124 int min_perf_pct; 115 int min_perf_pct;
125 int32_t max_perf; 116 int32_t max_perf;
126 int32_t min_perf; 117 int32_t min_perf;
118 int max_policy_pct;
119 int max_sysfs_pct;
127}; 120};
128 121
129static struct perf_limits limits = { 122static struct perf_limits limits = {
@@ -132,6 +125,8 @@ static struct perf_limits limits = {
132 .max_perf = int_tofp(1), 125 .max_perf = int_tofp(1),
133 .min_perf_pct = 0, 126 .min_perf_pct = 0,
134 .min_perf = 0, 127 .min_perf = 0,
128 .max_policy_pct = 100,
129 .max_sysfs_pct = 100,
135}; 130};
136 131
137static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 132static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
202 0); 197 0);
203} 198}
204 199
205static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
206{
207 pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
208 pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
209 pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
210
211 pid_reset(&cpu->idle_pid,
212 75,
213 50,
214 cpu->pstate_policy->deadband,
215 0);
216}
217
218static inline void intel_pstate_reset_all_pid(void) 200static inline void intel_pstate_reset_all_pid(void)
219{ 201{
220 unsigned int cpu; 202 unsigned int cpu;
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
302 if (ret != 1) 284 if (ret != 1)
303 return -EINVAL; 285 return -EINVAL;
304 286
305 limits.max_perf_pct = clamp_t(int, input, 0 , 100); 287 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
288 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
306 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 289 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
307 return count; 290 return count;
308} 291}
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
408 if (pstate == cpu->pstate.current_pstate) 391 if (pstate == cpu->pstate.current_pstate)
409 return; 392 return;
410 393
411#ifndef MODULE
412 trace_cpu_frequency(pstate * 100000, cpu->cpu); 394 trace_cpu_frequency(pstate * 100000, cpu->cpu);
413#endif 395
414 cpu->pstate.current_pstate = pstate; 396 cpu->pstate.current_pstate = pstate;
415 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
416 398
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
450 struct sample *sample) 432 struct sample *sample)
451{ 433{
452 u64 core_pct; 434 u64 core_pct;
453 sample->pstate_pct_busy = 100 - div64_u64(
454 sample->idletime_us * 100,
455 sample->duration_us);
456 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 435 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
457 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 436 sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
458 437
459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), 438 sample->core_pct_busy = core_pct;
460 100);
461} 439}
462 440
463static inline void intel_pstate_sample(struct cpudata *cpu) 441static inline void intel_pstate_sample(struct cpudata *cpu)
464{ 442{
465 ktime_t now;
466 u64 idle_time_us;
467 u64 aperf, mperf; 443 u64 aperf, mperf;
468 444
469 now = ktime_get();
470 idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
471
472 rdmsrl(MSR_IA32_APERF, aperf); 445 rdmsrl(MSR_IA32_APERF, aperf);
473 rdmsrl(MSR_IA32_MPERF, mperf); 446 rdmsrl(MSR_IA32_MPERF, mperf);
474 /* for the first sample, don't actually record a sample, just 447 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
475 * set the baseline */ 448 cpu->samples[cpu->sample_ptr].aperf = aperf;
476 if (cpu->prev_idle_time_us > 0) { 449 cpu->samples[cpu->sample_ptr].mperf = mperf;
477 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 450 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
478 cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; 451 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
479 cpu->samples[cpu->sample_ptr].end_time = now; 452
480 cpu->samples[cpu->sample_ptr].duration_us = 453 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
481 ktime_us_delta(now, cpu->prev_sample);
482 cpu->samples[cpu->sample_ptr].idletime_us =
483 idle_time_us - cpu->prev_idle_time_us;
484
485 cpu->samples[cpu->sample_ptr].aperf = aperf;
486 cpu->samples[cpu->sample_ptr].mperf = mperf;
487 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
488 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
489
490 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
491 }
492 454
493 cpu->prev_sample = now;
494 cpu->prev_idle_time_us = idle_time_us;
495 cpu->prev_aperf = aperf; 455 cpu->prev_aperf = aperf;
496 cpu->prev_mperf = mperf; 456 cpu->prev_mperf = mperf;
497} 457}
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
505 mod_timer_pinned(&cpu->timer, jiffies + delay); 465 mod_timer_pinned(&cpu->timer, jiffies + delay);
506} 466}
507 467
508static inline void intel_pstate_idle_mode(struct cpudata *cpu)
509{
510 cpu->idle_mode = 1;
511}
512
513static inline void intel_pstate_normal_mode(struct cpudata *cpu)
514{
515 cpu->idle_mode = 0;
516}
517
518static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
519{ 469{
520 int32_t busy_scaled; 470 int32_t busy_scaled;
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
547 intel_pstate_pstate_decrease(cpu, steps); 497 intel_pstate_pstate_decrease(cpu, steps);
548} 498}
549 499
550static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
551{
552 int busy_scaled;
553 struct _pid *pid;
554 int ctl = 0;
555 int steps;
556
557 pid = &cpu->idle_pid;
558
559 busy_scaled = intel_pstate_get_scaled_busy(cpu);
560
561 ctl = pid_calc(pid, 100 - busy_scaled);
562
563 steps = abs(ctl);
564 if (ctl < 0)
565 intel_pstate_pstate_decrease(cpu, steps);
566 else
567 intel_pstate_pstate_increase(cpu, steps);
568
569 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
570 intel_pstate_normal_mode(cpu);
571}
572
573static void intel_pstate_timer_func(unsigned long __data) 500static void intel_pstate_timer_func(unsigned long __data)
574{ 501{
575 struct cpudata *cpu = (struct cpudata *) __data; 502 struct cpudata *cpu = (struct cpudata *) __data;
576 503
577 intel_pstate_sample(cpu); 504 intel_pstate_sample(cpu);
505 intel_pstate_adjust_busy_pstate(cpu);
578 506
579 if (!cpu->idle_mode)
580 intel_pstate_adjust_busy_pstate(cpu);
581 else
582 intel_pstate_adjust_idle_pstate(cpu);
583
584#if defined(XPERF_FIX)
585 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { 507 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
586 cpu->min_pstate_count++; 508 cpu->min_pstate_count++;
587 if (!(cpu->min_pstate_count % 5)) { 509 if (!(cpu->min_pstate_count % 5)) {
588 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 510 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
589 intel_pstate_idle_mode(cpu);
590 } 511 }
591 } else 512 } else
592 cpu->min_pstate_count = 0; 513 cpu->min_pstate_count = 0;
593#endif 514
594 intel_pstate_set_sample_time(cpu); 515 intel_pstate_set_sample_time(cpu);
595} 516}
596 517
@@ -600,6 +521,7 @@ static void intel_pstate_timer_func(unsigned long __data)
600static const struct x86_cpu_id intel_pstate_cpu_ids[] = { 521static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
601 ICPU(0x2a, default_policy), 522 ICPU(0x2a, default_policy),
602 ICPU(0x2d, default_policy), 523 ICPU(0x2d, default_policy),
524 ICPU(0x3a, default_policy),
603 {} 525 {}
604}; 526};
605MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 527MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
@@ -631,7 +553,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
631 (unsigned long)cpu; 553 (unsigned long)cpu;
632 cpu->timer.expires = jiffies + HZ/100; 554 cpu->timer.expires = jiffies + HZ/100;
633 intel_pstate_busy_pid_reset(cpu); 555 intel_pstate_busy_pid_reset(cpu);
634 intel_pstate_idle_pid_reset(cpu);
635 intel_pstate_sample(cpu); 556 intel_pstate_sample(cpu);
636 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 557 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
637 558
@@ -675,8 +596,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
675 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 596 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
676 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 597 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
677 598
678 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; 599 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
679 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); 600 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
601 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
680 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 602 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
681 603
682 return 0; 604 return 0;
@@ -788,10 +710,9 @@ static int __init intel_pstate_init(void)
788 710
789 pr_info("Intel P-state driver initializing.\n"); 711 pr_info("Intel P-state driver initializing.\n");
790 712
791 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); 713 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
792 if (!all_cpu_data) 714 if (!all_cpu_data)
793 return -ENOMEM; 715 return -ENOMEM;
794 memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
795 716
796 rc = cpufreq_register_driver(&intel_pstate_driver); 717 rc = cpufreq_register_driver(&intel_pstate_driver);
797 if (rc) 718 if (rc)
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index d36ea8dc96eb..b2644af985ec 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
171 priv.dev = &pdev->dev; 171 priv.dev = &pdev->dev;
172 172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (!res) {
175 dev_err(&pdev->dev, "Cannot get memory resource\n");
176 return -ENODEV;
177 }
178 priv.base = devm_ioremap_resource(&pdev->dev, res); 174 priv.base = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(priv.base)) 175 if (IS_ERR(priv.base))
180 return PTR_ERR(priv.base); 176 return PTR_ERR(priv.base);
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index 84889573b566..d53912768946 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -18,6 +18,7 @@
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19 19
20#include <asm/clock.h> 20#include <asm/clock.h>
21#include <asm/idle.h>
21 22
22#include <asm/mach-loongson/loongson.h> 23#include <asm/mach-loongson/loongson.h>
23 24
@@ -200,6 +201,7 @@ static void loongson2_cpu_wait(void)
200 LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */ 201 LOONGSON_CHIPCFG0 &= ~0x7; /* Put CPU into wait mode */
201 LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */ 202 LOONGSON_CHIPCFG0 = cpu_freq; /* Restore CPU state */
202 spin_unlock_irqrestore(&loongson2_wait_lock, flags); 203 spin_unlock_irqrestore(&loongson2_wait_lock, flags);
204 local_irq_enable();
203} 205}
204 206
205static int __init cpufreq_init(void) 207static int __init cpufreq_init(void)
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index a76d4c4f29f5..35d483f8db66 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = {
126 .cra_blocksize = AES_BLOCK_SIZE, 126 .cra_blocksize = AES_BLOCK_SIZE,
127 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 127 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
128 .cra_type = &crypto_blkcipher_type, 128 .cra_type = &crypto_blkcipher_type,
129 .cra_alignmask = 0xf,
129 .cra_module = THIS_MODULE, 130 .cra_module = THIS_MODULE,
130 .cra_init = nx_crypto_ctx_aes_cbc_init, 131 .cra_init = nx_crypto_ctx_aes_cbc_init,
131 .cra_exit = nx_crypto_ctx_exit, 132 .cra_exit = nx_crypto_ctx_exit,
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index ba5f1611336f..7bbc9a81da21 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = {
123 .cra_priority = 300, 123 .cra_priority = 300,
124 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, 124 .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
125 .cra_blocksize = AES_BLOCK_SIZE, 125 .cra_blocksize = AES_BLOCK_SIZE,
126 .cra_alignmask = 0xf,
126 .cra_ctxsize = sizeof(struct nx_crypto_ctx), 127 .cra_ctxsize = sizeof(struct nx_crypto_ctx),
127 .cra_type = &crypto_blkcipher_type, 128 .cra_type = &crypto_blkcipher_type,
128 .cra_module = THIS_MODULE, 129 .cra_module = THIS_MODULE,
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index c8109edc5cfb..6cca6c392b00 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
219 if (enc) 219 if (enc)
220 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; 220 NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
221 else 221 else
222 nbytes -= AES_BLOCK_SIZE; 222 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
223 223
224 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; 224 csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
225 225
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 9767315f8c0b..67024f2f0b78 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
69 * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0 69 * 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
70 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover 70 * 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
71 */ 71 */
72 if (len + sctx->count <= SHA256_BLOCK_SIZE) { 72 if (len + sctx->count < SHA256_BLOCK_SIZE) {
73 memcpy(sctx->buf + sctx->count, data, len); 73 memcpy(sctx->buf + sctx->count, data, len);
74 sctx->count += len; 74 sctx->count += len;
75 goto out; 75 goto out;
@@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
110 atomic_inc(&(nx_ctx->stats->sha256_ops)); 110 atomic_inc(&(nx_ctx->stats->sha256_ops));
111 111
112 /* copy the leftover back into the state struct */ 112 /* copy the leftover back into the state struct */
113 memcpy(sctx->buf, data + len - leftover, leftover); 113 if (leftover)
114 memcpy(sctx->buf, data + len - leftover, leftover);
114 sctx->count = leftover; 115 sctx->count = leftover;
115 116
116 csbcpb->cpb.sha256.message_bit_length += (u64) 117 csbcpb->cpb.sha256.message_bit_length += (u64)
@@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
130 struct nx_sg *in_sg, *out_sg; 131 struct nx_sg *in_sg, *out_sg;
131 int rc; 132 int rc;
132 133
134
133 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) { 135 if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
134 /* we've hit the nx chip previously, now we're finalizing, 136 /* we've hit the nx chip previously, now we're finalizing,
135 * so copy over the partial digest */ 137 * so copy over the partial digest */
@@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
162 164
163 atomic_inc(&(nx_ctx->stats->sha256_ops)); 165 atomic_inc(&(nx_ctx->stats->sha256_ops));
164 166
165 atomic64_add(csbcpb->cpb.sha256.message_bit_length, 167 atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
166 &(nx_ctx->stats->sha256_bytes)); 168 &(nx_ctx->stats->sha256_bytes));
167 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE); 169 memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
168out: 170out:
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 3177b8c3d5f1..08eee1122349 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
69 * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0 69 * 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
70 * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover 70 * 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
71 */ 71 */
72 if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) { 72 if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
73 memcpy(sctx->buf + sctx->count[0], data, len); 73 memcpy(sctx->buf + sctx->count[0], data, len);
74 sctx->count[0] += len; 74 sctx->count[0] += len;
75 goto out; 75 goto out;
@@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
110 atomic_inc(&(nx_ctx->stats->sha512_ops)); 110 atomic_inc(&(nx_ctx->stats->sha512_ops));
111 111
112 /* copy the leftover back into the state struct */ 112 /* copy the leftover back into the state struct */
113 memcpy(sctx->buf, data + len - leftover, leftover); 113 if (leftover)
114 memcpy(sctx->buf, data + len - leftover, leftover);
114 sctx->count[0] = leftover; 115 sctx->count[0] = leftover;
115 116
116 spbc_bits = csbcpb->cpb.sha512.spbc * 8; 117 spbc_bits = csbcpb->cpb.sha512.spbc * 8;
@@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
168 goto out; 169 goto out;
169 170
170 atomic_inc(&(nx_ctx->stats->sha512_ops)); 171 atomic_inc(&(nx_ctx->stats->sha512_ops));
171 atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo, 172 atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
172 &(nx_ctx->stats->sha512_bytes)); 173 &(nx_ctx->stats->sha512_bytes));
173 174
174 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE); 175 memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index c767f232e693..bbdab6e5ccf0 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
211{ 211{
212 struct nx_sg *nx_insg = nx_ctx->in_sg; 212 struct nx_sg *nx_insg = nx_ctx->in_sg;
213 struct nx_sg *nx_outsg = nx_ctx->out_sg; 213 struct nx_sg *nx_outsg = nx_ctx->out_sg;
214 struct blkcipher_walk walk;
215 int rc;
216
217 blkcipher_walk_init(&walk, dst, src, nbytes);
218 rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
219 if (rc)
220 goto out;
221 214
222 if (iv) 215 if (iv)
223 memcpy(iv, walk.iv, AES_BLOCK_SIZE); 216 memcpy(iv, desc->info, AES_BLOCK_SIZE);
224 217
225 while (walk.nbytes) { 218 nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
226 nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr, 219 nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
227 walk.nbytes, nx_ctx->ap->sglen);
228 nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
229 walk.nbytes, nx_ctx->ap->sglen);
230
231 rc = blkcipher_walk_done(desc, &walk, 0);
232 if (rc)
233 break;
234 }
235
236 if (walk.nbytes) {
237 nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
238 walk.nbytes, nx_ctx->ap->sglen);
239 nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
240 walk.nbytes, nx_ctx->ap->sglen);
241
242 rc = 0;
243 }
244 220
245 /* these lengths should be negative, which will indicate to phyp that 221 /* these lengths should be negative, which will indicate to phyp that
246 * the input and output parameters are scatterlists, not linear 222 * the input and output parameters are scatterlists, not linear
247 * buffers */ 223 * buffers */
248 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg); 224 nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
249 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg); 225 nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
250out: 226
251 return rc; 227 return 0;
252} 228}
253 229
254/** 230/**
@@ -454,6 +430,8 @@ static int nx_register_algs(void)
454 if (rc) 430 if (rc)
455 goto out; 431 goto out;
456 432
433 nx_driver.of.status = NX_OKAY;
434
457 rc = crypto_register_alg(&nx_ecb_aes_alg); 435 rc = crypto_register_alg(&nx_ecb_aes_alg);
458 if (rc) 436 if (rc)
459 goto out; 437 goto out;
@@ -498,8 +476,6 @@ static int nx_register_algs(void)
498 if (rc) 476 if (rc)
499 goto out_unreg_s512; 477 goto out_unreg_s512;
500 478
501 nx_driver.of.status = NX_OKAY;
502
503 goto out; 479 goto out;
504 480
505out_unreg_s512: 481out_unreg_s512:
diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c
index ba6fc62e9651..5a18f82f732a 100644
--- a/drivers/dma/acpi-dma.c
+++ b/drivers/dma/acpi-dma.c
@@ -4,7 +4,8 @@
4 * Based on of-dma.c 4 * Based on of-dma.c
5 * 5 *
6 * Copyright (C) 2013, Intel Corporation 6 * Copyright (C) 2013, Intel Corporation
7 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 7 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
8 * Mika Westerberg <mika.westerberg@linux.intel.com>
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as 11 * it under the terms of the GNU General Public License version 2 as
@@ -16,6 +17,7 @@
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/mutex.h> 18#include <linux/mutex.h>
18#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/ioport.h>
19#include <linux/acpi.h> 21#include <linux/acpi.h>
20#include <linux/acpi_dma.h> 22#include <linux/acpi_dma.h>
21 23
@@ -23,6 +25,117 @@ static LIST_HEAD(acpi_dma_list);
23static DEFINE_MUTEX(acpi_dma_lock); 25static DEFINE_MUTEX(acpi_dma_lock);
24 26
25/** 27/**
28 * acpi_dma_parse_resource_group - match device and parse resource group
29 * @grp: CSRT resource group
30 * @adev: ACPI device to match with
31 * @adma: struct acpi_dma of the given DMA controller
32 *
33 * Returns 1 on success, 0 when no information is available, or appropriate
34 * errno value on error.
35 *
36 * In order to match a device from DSDT table to the corresponding CSRT device
37 * we use MMIO address and IRQ.
38 */
39static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp,
40 struct acpi_device *adev, struct acpi_dma *adma)
41{
42 const struct acpi_csrt_shared_info *si;
43 struct list_head resource_list;
44 struct resource_list_entry *rentry;
45 resource_size_t mem = 0, irq = 0;
46 u32 vendor_id;
47 int ret;
48
49 if (grp->shared_info_length != sizeof(struct acpi_csrt_shared_info))
50 return -ENODEV;
51
52 INIT_LIST_HEAD(&resource_list);
53 ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
54 if (ret <= 0)
55 return 0;
56
57 list_for_each_entry(rentry, &resource_list, node) {
58 if (resource_type(&rentry->res) == IORESOURCE_MEM)
59 mem = rentry->res.start;
60 else if (resource_type(&rentry->res) == IORESOURCE_IRQ)
61 irq = rentry->res.start;
62 }
63
64 acpi_dev_free_resource_list(&resource_list);
65
66 /* Consider initial zero values as resource not found */
67 if (mem == 0 && irq == 0)
68 return 0;
69
70 si = (const struct acpi_csrt_shared_info *)&grp[1];
71
72 /* Match device by MMIO and IRQ */
73 if (si->mmio_base_low != mem || si->gsi_interrupt != irq)
74 return 0;
75
76 vendor_id = le32_to_cpu(grp->vendor_id);
77 dev_dbg(&adev->dev, "matches with %.4s%04X (rev %u)\n",
78 (char *)&vendor_id, grp->device_id, grp->revision);
79
80 /* Check if the request line range is available */
81 if (si->base_request_line == 0 && si->num_handshake_signals == 0)
82 return 0;
83
84 adma->base_request_line = si->base_request_line;
85 adma->end_request_line = si->base_request_line +
86 si->num_handshake_signals - 1;
87
88 dev_dbg(&adev->dev, "request line base: 0x%04x end: 0x%04x\n",
89 adma->base_request_line, adma->end_request_line);
90
91 return 1;
92}
93
94/**
95 * acpi_dma_parse_csrt - parse CSRT to exctract additional DMA resources
96 * @adev: ACPI device to match with
97 * @adma: struct acpi_dma of the given DMA controller
98 *
99 * CSRT or Core System Resources Table is a proprietary ACPI table
100 * introduced by Microsoft. This table can contain devices that are not in
101 * the system DSDT table. In particular DMA controllers might be described
102 * here.
103 *
104 * We are using this table to get the request line range of the specific DMA
105 * controller to be used later.
106 *
107 */
108static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma)
109{
110 struct acpi_csrt_group *grp, *end;
111 struct acpi_table_csrt *csrt;
112 acpi_status status;
113 int ret;
114
115 status = acpi_get_table(ACPI_SIG_CSRT, 0,
116 (struct acpi_table_header **)&csrt);
117 if (ACPI_FAILURE(status)) {
118 if (status != AE_NOT_FOUND)
119 dev_warn(&adev->dev, "failed to get the CSRT table\n");
120 return;
121 }
122
123 grp = (struct acpi_csrt_group *)(csrt + 1);
124 end = (struct acpi_csrt_group *)((void *)csrt + csrt->header.length);
125
126 while (grp < end) {
127 ret = acpi_dma_parse_resource_group(grp, adev, adma);
128 if (ret < 0) {
129 dev_warn(&adev->dev,
130 "error in parsing resource group\n");
131 return;
132 }
133
134 grp = (struct acpi_csrt_group *)((void *)grp + grp->length);
135 }
136}
137
138/**
26 * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers 139 * acpi_dma_controller_register - Register a DMA controller to ACPI DMA helpers
27 * @dev: struct device of DMA controller 140 * @dev: struct device of DMA controller
28 * @acpi_dma_xlate: translation function which converts a dma specifier 141 * @acpi_dma_xlate: translation function which converts a dma specifier
@@ -61,6 +174,8 @@ int acpi_dma_controller_register(struct device *dev,
61 adma->acpi_dma_xlate = acpi_dma_xlate; 174 adma->acpi_dma_xlate = acpi_dma_xlate;
62 adma->data = data; 175 adma->data = data;
63 176
177 acpi_dma_parse_csrt(adev, adma);
178
64 /* Now queue acpi_dma controller structure in list */ 179 /* Now queue acpi_dma controller structure in list */
65 mutex_lock(&acpi_dma_lock); 180 mutex_lock(&acpi_dma_lock);
66 list_add_tail(&adma->dma_controllers, &acpi_dma_list); 181 list_add_tail(&adma->dma_controllers, &acpi_dma_list);
@@ -149,6 +264,45 @@ void devm_acpi_dma_controller_free(struct device *dev)
149} 264}
150EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free); 265EXPORT_SYMBOL_GPL(devm_acpi_dma_controller_free);
151 266
267/**
268 * acpi_dma_update_dma_spec - prepare dma specifier to pass to translation function
269 * @adma: struct acpi_dma of DMA controller
270 * @dma_spec: dma specifier to update
271 *
272 * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise.
273 *
274 * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource
275 * Descriptor":
276 * DMA Request Line bits is a platform-relative number uniquely
277 * identifying the request line assigned. Request line-to-Controller
278 * mapping is done in a controller-specific OS driver.
279 * That's why we can safely adjust slave_id when the appropriate controller is
280 * found.
281 */
282static int acpi_dma_update_dma_spec(struct acpi_dma *adma,
283 struct acpi_dma_spec *dma_spec)
284{
285 /* Set link to the DMA controller device */
286 dma_spec->dev = adma->dev;
287
288 /* Check if the request line range is available */
289 if (adma->base_request_line == 0 && adma->end_request_line == 0)
290 return 0;
291
292 /* Check if slave_id falls to the range */
293 if (dma_spec->slave_id < adma->base_request_line ||
294 dma_spec->slave_id > adma->end_request_line)
295 return -1;
296
297 /*
298 * Here we adjust slave_id. It should be a relative number to the base
299 * request line.
300 */
301 dma_spec->slave_id -= adma->base_request_line;
302
303 return 1;
304}
305
152struct acpi_dma_parser_data { 306struct acpi_dma_parser_data {
153 struct acpi_dma_spec dma_spec; 307 struct acpi_dma_spec dma_spec;
154 size_t index; 308 size_t index;
@@ -193,6 +347,7 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
193 struct acpi_device *adev; 347 struct acpi_device *adev;
194 struct acpi_dma *adma; 348 struct acpi_dma *adma;
195 struct dma_chan *chan = NULL; 349 struct dma_chan *chan = NULL;
350 int found;
196 351
197 /* Check if the device was enumerated by ACPI */ 352 /* Check if the device was enumerated by ACPI */
198 if (!dev || !ACPI_HANDLE(dev)) 353 if (!dev || !ACPI_HANDLE(dev))
@@ -219,9 +374,20 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev,
219 mutex_lock(&acpi_dma_lock); 374 mutex_lock(&acpi_dma_lock);
220 375
221 list_for_each_entry(adma, &acpi_dma_list, dma_controllers) { 376 list_for_each_entry(adma, &acpi_dma_list, dma_controllers) {
222 dma_spec->dev = adma->dev; 377 /*
378 * We are not going to call translation function if slave_id
379 * doesn't fall to the request range.
380 */
381 found = acpi_dma_update_dma_spec(adma, dma_spec);
382 if (found < 0)
383 continue;
223 chan = adma->acpi_dma_xlate(dma_spec, adma); 384 chan = adma->acpi_dma_xlate(dma_spec, adma);
224 if (chan) 385 /*
386 * Try to get a channel only from the DMA controller that
387 * matches the slave_id. See acpi_dma_update_dma_spec()
388 * description for the details.
389 */
390 if (found > 0 || chan)
225 break; 391 break;
226 } 392 }
227 393
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index ce193409ebd3..33f59ecd256e 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1273,11 +1273,6 @@ static int tegra_dma_probe(struct platform_device *pdev)
1273 platform_set_drvdata(pdev, tdma); 1273 platform_set_drvdata(pdev, tdma);
1274 1274
1275 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1275 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1276 if (!res) {
1277 dev_err(&pdev->dev, "No mem resource for DMA\n");
1278 return -EINVAL;
1279 }
1280
1281 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res); 1276 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1282 if (IS_ERR(tdma->base_addr)) 1277 if (IS_ERR(tdma->base_addr))
1283 return PTR_ERR(tdma->base_addr); 1278 return PTR_ERR(tdma->base_addr);
diff --git a/drivers/edac/amd64_edac_inj.c b/drivers/edac/amd64_edac_inj.c
index 8c171fa1cb9b..845f04786c2d 100644
--- a/drivers/edac/amd64_edac_inj.c
+++ b/drivers/edac/amd64_edac_inj.c
@@ -202,9 +202,9 @@ static DEVICE_ATTR(inject_word, S_IRUGO | S_IWUSR,
202 amd64_inject_word_show, amd64_inject_word_store); 202 amd64_inject_word_show, amd64_inject_word_store);
203static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR, 203static DEVICE_ATTR(inject_ecc_vector, S_IRUGO | S_IWUSR,
204 amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store); 204 amd64_inject_ecc_vector_show, amd64_inject_ecc_vector_store);
205static DEVICE_ATTR(inject_write, S_IRUGO | S_IWUSR, 205static DEVICE_ATTR(inject_write, S_IWUSR,
206 NULL, amd64_inject_write_store); 206 NULL, amd64_inject_write_store);
207static DEVICE_ATTR(inject_read, S_IRUGO | S_IWUSR, 207static DEVICE_ATTR(inject_read, S_IWUSR,
208 NULL, amd64_inject_read_store); 208 NULL, amd64_inject_read_store);
209 209
210 210
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 87d567089f13..573c449c49b9 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -636,7 +636,7 @@ config GPIO_MAX7301
636 636
637config GPIO_MCP23S08 637config GPIO_MCP23S08
638 tristate "Microchip MCP23xxx I/O expander" 638 tristate "Microchip MCP23xxx I/O expander"
639 depends on SPI_MASTER || I2C 639 depends on (SPI_MASTER && !I2C) || I2C
640 help 640 help
641 SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017 641 SPI/I2C driver for Microchip MCP23S08/MCP23S17/MCP23008/MCP23017
642 I/O expanders. 642 I/O expanders.
diff --git a/drivers/gpio/gpio-langwell.c b/drivers/gpio/gpio-langwell.c
index 634c3d37f7b5..62ef10a641c4 100644
--- a/drivers/gpio/gpio-langwell.c
+++ b/drivers/gpio/gpio-langwell.c
@@ -324,6 +324,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
324 resource_size_t start, len; 324 resource_size_t start, len;
325 struct lnw_gpio *lnw; 325 struct lnw_gpio *lnw;
326 u32 gpio_base; 326 u32 gpio_base;
327 u32 irq_base;
327 int retval; 328 int retval;
328 int ngpio = id->driver_data; 329 int ngpio = id->driver_data;
329 330
@@ -345,6 +346,7 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
345 retval = -EFAULT; 346 retval = -EFAULT;
346 goto err_ioremap; 347 goto err_ioremap;
347 } 348 }
349 irq_base = *(u32 *)base;
348 gpio_base = *((u32 *)base + 1); 350 gpio_base = *((u32 *)base + 1);
349 /* release the IO mapping, since we already get the info from bar1 */ 351 /* release the IO mapping, since we already get the info from bar1 */
350 iounmap(base); 352 iounmap(base);
@@ -365,13 +367,6 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
365 goto err_ioremap; 367 goto err_ioremap;
366 } 368 }
367 369
368 lnw->domain = irq_domain_add_linear(pdev->dev.of_node, ngpio,
369 &lnw_gpio_irq_ops, lnw);
370 if (!lnw->domain) {
371 retval = -ENOMEM;
372 goto err_ioremap;
373 }
374
375 lnw->reg_base = base; 370 lnw->reg_base = base;
376 lnw->chip.label = dev_name(&pdev->dev); 371 lnw->chip.label = dev_name(&pdev->dev);
377 lnw->chip.request = lnw_gpio_request; 372 lnw->chip.request = lnw_gpio_request;
@@ -384,6 +379,14 @@ static int lnw_gpio_probe(struct pci_dev *pdev,
384 lnw->chip.ngpio = ngpio; 379 lnw->chip.ngpio = ngpio;
385 lnw->chip.can_sleep = 0; 380 lnw->chip.can_sleep = 0;
386 lnw->pdev = pdev; 381 lnw->pdev = pdev;
382
383 lnw->domain = irq_domain_add_simple(pdev->dev.of_node, ngpio, irq_base,
384 &lnw_gpio_irq_ops, lnw);
385 if (!lnw->domain) {
386 retval = -ENOMEM;
387 goto err_ioremap;
388 }
389
387 pci_set_drvdata(pdev, lnw); 390 pci_set_drvdata(pdev, lnw);
388 retval = gpiochip_add(&lnw->chip); 391 retval = gpiochip_add(&lnw->chip);
389 if (retval) { 392 if (retval) {
diff --git a/drivers/gpio/gpio-ml-ioh.c b/drivers/gpio/gpio-ml-ioh.c
index b73366523fae..0966f2637ad2 100644
--- a/drivers/gpio/gpio-ml-ioh.c
+++ b/drivers/gpio/gpio-ml-ioh.c
@@ -496,8 +496,7 @@ err_irq_alloc_descs:
496err_gpiochip_add: 496err_gpiochip_add:
497 while (--i >= 0) { 497 while (--i >= 0) {
498 chip--; 498 chip--;
499 ret = gpiochip_remove(&chip->gpio); 499 if (gpiochip_remove(&chip->gpio))
500 if (ret)
501 dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i); 500 dev_err(&pdev->dev, "Failed gpiochip_remove(%d)\n", i);
502 } 501 }
503 kfree(chip_save); 502 kfree(chip_save);
diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c
index bf69a7eff370..3a4816adc137 100644
--- a/drivers/gpio/gpio-mvebu.c
+++ b/drivers/gpio/gpio-mvebu.c
@@ -619,11 +619,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev)
619 * per-CPU registers */ 619 * per-CPU registers */
620 if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) { 620 if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
621 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 621 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
622 if (!res) {
623 dev_err(&pdev->dev, "Cannot get memory resource\n");
624 return -ENODEV;
625 }
626
627 mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev, 622 mvchip->percpu_membase = devm_ioremap_resource(&pdev->dev,
628 res); 623 res);
629 if (IS_ERR(mvchip->percpu_membase)) 624 if (IS_ERR(mvchip->percpu_membase))
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 25000b0f8453..f8e6af20dfbf 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -326,7 +326,8 @@ static int mxs_gpio_probe(struct platform_device *pdev)
326 326
327 err = bgpio_init(&port->bgc, &pdev->dev, 4, 327 err = bgpio_init(&port->bgc, &pdev->dev, 4,
328 port->base + PINCTRL_DIN(port), 328 port->base + PINCTRL_DIN(port),
329 port->base + PINCTRL_DOUT(port), NULL, 329 port->base + PINCTRL_DOUT(port) + MXS_SET,
330 port->base + PINCTRL_DOUT(port) + MXS_CLR,
330 port->base + PINCTRL_DOE(port), NULL, 0); 331 port->base + PINCTRL_DOE(port), NULL, 0);
331 if (err) 332 if (err)
332 goto out_irqdesc_free; 333 goto out_irqdesc_free;
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 2050891d9c65..d3f7d2db870f 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -69,6 +69,7 @@ struct gpio_bank {
69 bool is_mpuio; 69 bool is_mpuio;
70 bool dbck_flag; 70 bool dbck_flag;
71 bool loses_context; 71 bool loses_context;
72 bool context_valid;
72 int stride; 73 int stride;
73 u32 width; 74 u32 width;
74 int context_loss_count; 75 int context_loss_count;
@@ -1128,6 +1129,10 @@ static int omap_gpio_probe(struct platform_device *pdev)
1128 bank->loses_context = true; 1129 bank->loses_context = true;
1129 } else { 1130 } else {
1130 bank->loses_context = pdata->loses_context; 1131 bank->loses_context = pdata->loses_context;
1132
1133 if (bank->loses_context)
1134 bank->get_context_loss_count =
1135 pdata->get_context_loss_count;
1131 } 1136 }
1132 1137
1133 1138
@@ -1178,9 +1183,6 @@ static int omap_gpio_probe(struct platform_device *pdev)
1178 omap_gpio_chip_init(bank); 1183 omap_gpio_chip_init(bank);
1179 omap_gpio_show_rev(bank); 1184 omap_gpio_show_rev(bank);
1180 1185
1181 if (bank->loses_context)
1182 bank->get_context_loss_count = pdata->get_context_loss_count;
1183
1184 pm_runtime_put(bank->dev); 1186 pm_runtime_put(bank->dev);
1185 1187
1186 list_add_tail(&bank->node, &omap_gpio_list); 1188 list_add_tail(&bank->node, &omap_gpio_list);
@@ -1259,6 +1261,8 @@ update_gpio_context_count:
1259 return 0; 1261 return 0;
1260} 1262}
1261 1263
1264static void omap_gpio_init_context(struct gpio_bank *p);
1265
1262static int omap_gpio_runtime_resume(struct device *dev) 1266static int omap_gpio_runtime_resume(struct device *dev)
1263{ 1267{
1264 struct platform_device *pdev = to_platform_device(dev); 1268 struct platform_device *pdev = to_platform_device(dev);
@@ -1268,6 +1272,20 @@ static int omap_gpio_runtime_resume(struct device *dev)
1268 int c; 1272 int c;
1269 1273
1270 spin_lock_irqsave(&bank->lock, flags); 1274 spin_lock_irqsave(&bank->lock, flags);
1275
1276 /*
1277 * On the first resume during the probe, the context has not
1278 * been initialised and so initialise it now. Also initialise
1279 * the context loss count.
1280 */
1281 if (bank->loses_context && !bank->context_valid) {
1282 omap_gpio_init_context(bank);
1283
1284 if (bank->get_context_loss_count)
1285 bank->context_loss_count =
1286 bank->get_context_loss_count(bank->dev);
1287 }
1288
1271 _gpio_dbck_enable(bank); 1289 _gpio_dbck_enable(bank);
1272 1290
1273 /* 1291 /*
@@ -1384,6 +1402,29 @@ void omap2_gpio_resume_after_idle(void)
1384} 1402}
1385 1403
1386#if defined(CONFIG_PM_RUNTIME) 1404#if defined(CONFIG_PM_RUNTIME)
1405static void omap_gpio_init_context(struct gpio_bank *p)
1406{
1407 struct omap_gpio_reg_offs *regs = p->regs;
1408 void __iomem *base = p->base;
1409
1410 p->context.ctrl = __raw_readl(base + regs->ctrl);
1411 p->context.oe = __raw_readl(base + regs->direction);
1412 p->context.wake_en = __raw_readl(base + regs->wkup_en);
1413 p->context.leveldetect0 = __raw_readl(base + regs->leveldetect0);
1414 p->context.leveldetect1 = __raw_readl(base + regs->leveldetect1);
1415 p->context.risingdetect = __raw_readl(base + regs->risingdetect);
1416 p->context.fallingdetect = __raw_readl(base + regs->fallingdetect);
1417 p->context.irqenable1 = __raw_readl(base + regs->irqenable);
1418 p->context.irqenable2 = __raw_readl(base + regs->irqenable2);
1419
1420 if (regs->set_dataout && p->regs->clr_dataout)
1421 p->context.dataout = __raw_readl(base + regs->set_dataout);
1422 else
1423 p->context.dataout = __raw_readl(base + regs->dataout);
1424
1425 p->context_valid = true;
1426}
1427
1387static void omap_gpio_restore_context(struct gpio_bank *bank) 1428static void omap_gpio_restore_context(struct gpio_bank *bank)
1388{ 1429{
1389 __raw_writel(bank->context.wake_en, 1430 __raw_writel(bank->context.wake_en,
@@ -1421,6 +1462,7 @@ static void omap_gpio_restore_context(struct gpio_bank *bank)
1421#else 1462#else
1422#define omap_gpio_runtime_suspend NULL 1463#define omap_gpio_runtime_suspend NULL
1423#define omap_gpio_runtime_resume NULL 1464#define omap_gpio_runtime_resume NULL
1465static void omap_gpio_init_context(struct gpio_bank *p) {}
1424#endif 1466#endif
1425 1467
1426static const struct dev_pm_ops gpio_pm_ops = { 1468static const struct dev_pm_ops gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-pch.c b/drivers/gpio/gpio-pch.c
index cdf599687cf7..0fec097e838d 100644
--- a/drivers/gpio/gpio-pch.c
+++ b/drivers/gpio/gpio-pch.c
@@ -424,8 +424,7 @@ end:
424err_request_irq: 424err_request_irq:
425 irq_free_descs(irq_base, gpio_pins[chip->ioh]); 425 irq_free_descs(irq_base, gpio_pins[chip->ioh]);
426 426
427 ret = gpiochip_remove(&chip->gpio); 427 if (gpiochip_remove(&chip->gpio))
428 if (ret)
429 dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__); 428 dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
430 429
431err_gpiochip_add: 430err_gpiochip_add:
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 1e4de16ceb41..5af65719b95d 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -272,10 +272,8 @@ static int sch_gpio_probe(struct platform_device *pdev)
272 return 0; 272 return 0;
273 273
274err_sch_gpio_resume: 274err_sch_gpio_resume:
275 err = gpiochip_remove(&sch_gpio_core); 275 if (gpiochip_remove(&sch_gpio_core))
276 if (err) 276 dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
277 dev_err(&pdev->dev, "%s failed, %d\n",
278 "gpiochip_remove()", err);
279 277
280err_sch_gpio_core: 278err_sch_gpio_core:
281 release_region(res->start, resource_size(res)); 279 release_region(res->start, resource_size(res));
diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c
index da4cb5b0cb87..9a62672f1bed 100644
--- a/drivers/gpio/gpio-tegra.c
+++ b/drivers/gpio/gpio-tegra.c
@@ -463,11 +463,6 @@ static int tegra_gpio_probe(struct platform_device *pdev)
463 } 463 }
464 464
465 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 465 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
466 if (!res) {
467 dev_err(&pdev->dev, "Missing MEM resource\n");
468 return -ENODEV;
469 }
470
471 regs = devm_ioremap_resource(&pdev->dev, res); 466 regs = devm_ioremap_resource(&pdev->dev, res);
472 if (IS_ERR(regs)) 467 if (IS_ERR(regs))
473 return PTR_ERR(regs); 468 return PTR_ERR(regs);
diff --git a/drivers/gpio/gpio-viperboard.c b/drivers/gpio/gpio-viperboard.c
index 095ab14cea4d..5ac2919197fe 100644
--- a/drivers/gpio/gpio-viperboard.c
+++ b/drivers/gpio/gpio-viperboard.c
@@ -446,7 +446,8 @@ static int vprbrd_gpio_probe(struct platform_device *pdev)
446 return ret; 446 return ret;
447 447
448err_gpiob: 448err_gpiob:
449 ret = gpiochip_remove(&vb_gpio->gpioa); 449 if (gpiochip_remove(&vb_gpio->gpioa))
450 dev_err(&pdev->dev, "%s gpiochip_remove failed\n", __func__);
450 451
451err_gpioa: 452err_gpioa:
452 return ret; 453 return ret;
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 3a8f7e6db295..e7e92429d10f 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -78,6 +78,10 @@ void drm_warn_on_modeset_not_all_locked(struct drm_device *dev)
78{ 78{
79 struct drm_crtc *crtc; 79 struct drm_crtc *crtc;
80 80
81 /* Locking is currently fubar in the panic handler. */
82 if (oops_in_progress)
83 return;
84
81 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 85 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
82 WARN_ON(!mutex_is_locked(&crtc->mutex)); 86 WARN_ON(!mutex_is_locked(&crtc->mutex));
83 87
@@ -246,6 +250,7 @@ char *drm_get_connector_status_name(enum drm_connector_status status)
246 else 250 else
247 return "unknown"; 251 return "unknown";
248} 252}
253EXPORT_SYMBOL(drm_get_connector_status_name);
249 254
250/** 255/**
251 * drm_mode_object_get - allocate a new modeset identifier 256 * drm_mode_object_get - allocate a new modeset identifier
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index e974f9309b72..ed1334e27c33 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -121,6 +121,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
121 connector->helper_private; 121 connector->helper_private;
122 int count = 0; 122 int count = 0;
123 int mode_flags = 0; 123 int mode_flags = 0;
124 bool verbose_prune = true;
124 125
125 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, 126 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
126 drm_get_connector_name(connector)); 127 drm_get_connector_name(connector));
@@ -149,6 +150,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
149 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n", 150 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] disconnected\n",
150 connector->base.id, drm_get_connector_name(connector)); 151 connector->base.id, drm_get_connector_name(connector));
151 drm_mode_connector_update_edid_property(connector, NULL); 152 drm_mode_connector_update_edid_property(connector, NULL);
153 verbose_prune = false;
152 goto prune; 154 goto prune;
153 } 155 }
154 156
@@ -182,7 +184,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
182 } 184 }
183 185
184prune: 186prune:
185 drm_mode_prune_invalid(dev, &connector->modes, true); 187 drm_mode_prune_invalid(dev, &connector->modes, verbose_prune);
186 188
187 if (list_empty(&connector->modes)) 189 if (list_empty(&connector->modes))
188 return 0; 190 return 0;
@@ -1005,12 +1007,20 @@ static void output_poll_execute(struct work_struct *work)
1005 continue; 1007 continue;
1006 1008
1007 connector->status = connector->funcs->detect(connector, false); 1009 connector->status = connector->funcs->detect(connector, false);
1008 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1010 if (old_status != connector->status) {
1009 connector->base.id, 1011 const char *old, *new;
1010 drm_get_connector_name(connector), 1012
1011 old_status, connector->status); 1013 old = drm_get_connector_status_name(old_status);
1012 if (old_status != connector->status) 1014 new = drm_get_connector_status_name(connector->status);
1015
1016 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] "
1017 "status updated from %s to %s\n",
1018 connector->base.id,
1019 drm_get_connector_name(connector),
1020 old, new);
1021
1013 changed = true; 1022 changed = true;
1023 }
1014 } 1024 }
1015 1025
1016 mutex_unlock(&dev->mode_config.mutex); 1026 mutex_unlock(&dev->mode_config.mutex);
@@ -1083,10 +1093,11 @@ void drm_helper_hpd_irq_event(struct drm_device *dev)
1083 old_status = connector->status; 1093 old_status = connector->status;
1084 1094
1085 connector->status = connector->funcs->detect(connector, false); 1095 connector->status = connector->funcs->detect(connector, false);
1086 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", 1096 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1087 connector->base.id, 1097 connector->base.id,
1088 drm_get_connector_name(connector), 1098 drm_get_connector_name(connector),
1089 old_status, connector->status); 1099 drm_get_connector_status_name(old_status),
1100 drm_get_connector_status_name(connector->status));
1090 if (old_status != connector->status) 1101 if (old_status != connector->status)
1091 changed = true; 1102 changed = true;
1092 } 1103 }
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 8d4f29075af5..9cc247f55502 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -57,7 +57,7 @@ static int drm_version(struct drm_device *dev, void *data,
57 struct drm_file *file_priv); 57 struct drm_file *file_priv);
58 58
59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \ 59#define DRM_IOCTL_DEF(ioctl, _func, _flags) \
60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0} 60 [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
61 61
62/** Ioctl table */ 62/** Ioctl table */
63static const struct drm_ioctl_desc drm_ioctls[] = { 63static const struct drm_ioctl_desc drm_ioctls[] = {
@@ -375,7 +375,7 @@ long drm_ioctl(struct file *filp,
375{ 375{
376 struct drm_file *file_priv = filp->private_data; 376 struct drm_file *file_priv = filp->private_data;
377 struct drm_device *dev; 377 struct drm_device *dev;
378 const struct drm_ioctl_desc *ioctl; 378 const struct drm_ioctl_desc *ioctl = NULL;
379 drm_ioctl_t *func; 379 drm_ioctl_t *func;
380 unsigned int nr = DRM_IOCTL_NR(cmd); 380 unsigned int nr = DRM_IOCTL_NR(cmd);
381 int retcode = -EINVAL; 381 int retcode = -EINVAL;
@@ -392,11 +392,6 @@ long drm_ioctl(struct file *filp,
392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); 392 atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
393 ++file_priv->ioctl_count; 393 ++file_priv->ioctl_count;
394 394
395 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
396 task_pid_nr(current), cmd, nr,
397 (long)old_encode_dev(file_priv->minor->device),
398 file_priv->authenticated);
399
400 if ((nr >= DRM_CORE_IOCTL_COUNT) && 395 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
401 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) 396 ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
402 goto err_i1; 397 goto err_i1;
@@ -417,6 +412,11 @@ long drm_ioctl(struct file *filp,
417 } else 412 } else
418 goto err_i1; 413 goto err_i1;
419 414
415 DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n",
416 task_pid_nr(current),
417 (long)old_encode_dev(file_priv->minor->device),
418 file_priv->authenticated, ioctl->name);
419
420 /* Do not trust userspace, use our own definition */ 420 /* Do not trust userspace, use our own definition */
421 func = ioctl->func; 421 func = ioctl->func;
422 /* is there a local override? */ 422 /* is there a local override? */
@@ -471,6 +471,12 @@ long drm_ioctl(struct file *filp,
471 } 471 }
472 472
473 err_i1: 473 err_i1:
474 if (!ioctl)
475 DRM_DEBUG("invalid iotcl: pid=%d, dev=0x%lx, auth=%d, cmd=0x%02x, nr=0x%02x\n",
476 task_pid_nr(current),
477 (long)old_encode_dev(file_priv->minor->device),
478 file_priv->authenticated, cmd, nr);
479
474 if (kdata != stack_kdata) 480 if (kdata != stack_kdata)
475 kfree(kdata); 481 kfree(kdata);
476 atomic_dec(&dev->ioctl_count); 482 atomic_dec(&dev->ioctl_count);
diff --git a/drivers/gpu/drm/drm_encoder_slave.c b/drivers/gpu/drm/drm_encoder_slave.c
index 48c52f7df4e6..0cfb60f54766 100644
--- a/drivers/gpu/drm/drm_encoder_slave.c
+++ b/drivers/gpu/drm/drm_encoder_slave.c
@@ -54,16 +54,12 @@ int drm_i2c_encoder_init(struct drm_device *dev,
54 struct i2c_adapter *adap, 54 struct i2c_adapter *adap,
55 const struct i2c_board_info *info) 55 const struct i2c_board_info *info)
56{ 56{
57 char modalias[sizeof(I2C_MODULE_PREFIX)
58 + I2C_NAME_SIZE];
59 struct module *module = NULL; 57 struct module *module = NULL;
60 struct i2c_client *client; 58 struct i2c_client *client;
61 struct drm_i2c_encoder_driver *encoder_drv; 59 struct drm_i2c_encoder_driver *encoder_drv;
62 int err = 0; 60 int err = 0;
63 61
64 snprintf(modalias, sizeof(modalias), 62 request_module("%s%s", I2C_MODULE_PREFIX, info->type);
65 "%s%s", I2C_MODULE_PREFIX, info->type);
66 request_module(modalias);
67 63
68 client = i2c_new_device(adap, info); 64 client = i2c_new_device(adap, info);
69 if (!client) { 65 if (!client) {
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index db1e2d6f90d7..07cf99cc8862 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -755,33 +755,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
755EXPORT_SYMBOL(drm_mm_debug_table); 755EXPORT_SYMBOL(drm_mm_debug_table);
756 756
757#if defined(CONFIG_DEBUG_FS) 757#if defined(CONFIG_DEBUG_FS)
758int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) 758static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry)
759{ 759{
760 struct drm_mm_node *entry;
761 unsigned long total_used = 0, total_free = 0, total = 0;
762 unsigned long hole_start, hole_end, hole_size; 760 unsigned long hole_start, hole_end, hole_size;
763 761
764 hole_start = drm_mm_hole_node_start(&mm->head_node); 762 if (entry->hole_follows) {
765 hole_end = drm_mm_hole_node_end(&mm->head_node); 763 hole_start = drm_mm_hole_node_start(entry);
766 hole_size = hole_end - hole_start; 764 hole_end = drm_mm_hole_node_end(entry);
767 if (hole_size) 765 hole_size = hole_end - hole_start;
768 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", 766 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
769 hole_start, hole_end, hole_size); 767 hole_start, hole_end, hole_size);
770 total_free += hole_size; 768 return hole_size;
769 }
770
771 return 0;
772}
773
774int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
775{
776 struct drm_mm_node *entry;
777 unsigned long total_used = 0, total_free = 0, total = 0;
778
779 total_free += drm_mm_dump_hole(m, &mm->head_node);
771 780
772 drm_mm_for_each_node(entry, mm) { 781 drm_mm_for_each_node(entry, mm) {
773 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", 782 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n",
774 entry->start, entry->start + entry->size, 783 entry->start, entry->start + entry->size,
775 entry->size); 784 entry->size);
776 total_used += entry->size; 785 total_used += entry->size;
777 if (entry->hole_follows) { 786 total_free += drm_mm_dump_hole(m, entry);
778 hole_start = drm_mm_hole_node_start(entry);
779 hole_end = drm_mm_hole_node_end(entry);
780 hole_size = hole_end - hole_start;
781 seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
782 hole_start, hole_end, hole_size);
783 total_free += hole_size;
784 }
785 } 787 }
786 total = total_free + total_used; 788 total = total_free + total_used;
787 789
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index faa79df02648..a371ff865a88 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1143,6 +1143,7 @@ bool drm_mode_parse_command_line_for_connector(const char *mode_option,
1143 was_digit = false; 1143 was_digit = false;
1144 } else 1144 } else
1145 goto done; 1145 goto done;
1146 break;
1146 case '0' ... '9': 1147 case '0' ... '9':
1147 was_digit = true; 1148 was_digit = true;
1148 break; 1149 break;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index bbfc3840080c..6652597586a1 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2005,11 +2005,6 @@ static int hdmi_probe(struct platform_device *pdev)
2005 } 2005 }
2006 2006
2007 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2007 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2008 if (!res) {
2009 DRM_ERROR("failed to find registers\n");
2010 return -ENOENT;
2011 }
2012
2013 hdata->regs = devm_ioremap_resource(&pdev->dev, res); 2008 hdata->regs = devm_ioremap_resource(&pdev->dev, res);
2014 if (IS_ERR(hdata->regs)) 2009 if (IS_ERR(hdata->regs))
2015 return PTR_ERR(hdata->regs); 2010 return PTR_ERR(hdata->regs);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6be940effefd..6165535d15f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1045,6 +1045,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1045 if (timeout) { 1045 if (timeout) {
1046 struct timespec sleep_time = timespec_sub(now, before); 1046 struct timespec sleep_time = timespec_sub(now, before);
1047 *timeout = timespec_sub(*timeout, sleep_time); 1047 *timeout = timespec_sub(*timeout, sleep_time);
1048 if (!timespec_valid(timeout)) /* i.e. negative time remains */
1049 set_normalized_timespec(timeout, 0, 0);
1048 } 1050 }
1049 1051
1050 switch (end) { 1052 switch (end) {
@@ -1053,8 +1055,6 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
1053 case -ERESTARTSYS: /* Signal */ 1055 case -ERESTARTSYS: /* Signal */
1054 return (int)end; 1056 return (int)end;
1055 case 0: /* Timeout */ 1057 case 0: /* Timeout */
1056 if (timeout)
1057 set_normalized_timespec(timeout, 0, 0);
1058 return -ETIME; 1058 return -ETIME;
1059 default: /* Completed */ 1059 default: /* Completed */
1060 WARN_ON(end < 0); /* We're not aware of other errors */ 1060 WARN_ON(end < 0); /* We're not aware of other errors */
@@ -2377,10 +2377,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
2377 mutex_unlock(&dev->struct_mutex); 2377 mutex_unlock(&dev->struct_mutex);
2378 2378
2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout); 2379 ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
2380 if (timeout) { 2380 if (timeout)
2381 WARN_ON(!timespec_valid(timeout));
2382 args->timeout_ns = timespec_to_ns(timeout); 2381 args->timeout_ns = timespec_to_ns(timeout);
2383 }
2384 return ret; 2382 return ret;
2385 2383
2386out: 2384out:
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index dca614de71b6..bdb0d7717bc7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -709,15 +709,6 @@ static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
709 return snb_gmch_ctl << 25; /* 32 MB units */ 709 return snb_gmch_ctl << 25; /* 32 MB units */
710} 710}
711 711
712static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
713{
714 static const int stolen_decoder[] = {
715 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
716 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
717 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
718 return stolen_decoder[snb_gmch_ctl] << 20;
719}
720
721static int gen6_gmch_probe(struct drm_device *dev, 712static int gen6_gmch_probe(struct drm_device *dev,
722 size_t *gtt_total, 713 size_t *gtt_total,
723 size_t *stolen, 714 size_t *stolen,
@@ -747,11 +738,7 @@ static int gen6_gmch_probe(struct drm_device *dev,
747 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); 738 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
748 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl); 739 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
749 740
750 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) 741 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
751 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
752 else
753 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
754
755 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT; 742 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
756 743
757 /* For Modern GENs the PTEs and register space are split in the BAR */ 744 /* For Modern GENs the PTEs and register space are split in the BAR */
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 83f9c26e1adb..2d6b62e42daf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -46,8 +46,6 @@
46#define SNB_GMCH_GGMS_MASK 0x3 46#define SNB_GMCH_GGMS_MASK 0x3
47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 47#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
48#define SNB_GMCH_GMS_MASK 0x1f 48#define SNB_GMCH_GMS_MASK 0x1f
49#define IVB_GMCH_GMS_SHIFT 4
50#define IVB_GMCH_GMS_MASK 0xf
51 49
52 50
53/* PCI config space */ 51/* PCI config space */
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index 26a0a570f92e..fb961bb81903 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1265,6 +1265,8 @@ static void intel_ddi_pre_enable(struct intel_encoder *intel_encoder)
1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1265 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1266 intel_dp_start_link_train(intel_dp); 1266 intel_dp_start_link_train(intel_dp);
1267 intel_dp_complete_link_train(intel_dp); 1267 intel_dp_complete_link_train(intel_dp);
1268 if (port != PORT_A)
1269 intel_dp_stop_link_train(intel_dp);
1268 } 1270 }
1269} 1271}
1270 1272
@@ -1326,6 +1328,9 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
1326 } else if (type == INTEL_OUTPUT_EDP) { 1328 } else if (type == INTEL_OUTPUT_EDP) {
1327 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1329 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1328 1330
1331 if (port == PORT_A)
1332 intel_dp_stop_link_train(intel_dp);
1333
1329 ironlake_edp_backlight_on(intel_dp); 1334 ironlake_edp_backlight_on(intel_dp);
1330 } 1335 }
1331 1336
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index fb2fbc1e08b9..3d704b706a8d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -702,6 +702,9 @@ intel_dp_compute_config(struct intel_encoder *encoder,
702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2 702 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
703 * bpc in between. */ 703 * bpc in between. */
704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp); 704 bpp = min_t(int, 8*3, pipe_config->pipe_bpp);
705 if (is_edp(intel_dp) && dev_priv->edp.bpp)
706 bpp = min_t(int, bpp, dev_priv->edp.bpp);
707
705 for (; bpp >= 6*3; bpp -= 2*3) { 708 for (; bpp >= 6*3; bpp -= 2*3) {
706 mode_rate = intel_dp_link_required(target_clock, bpp); 709 mode_rate = intel_dp_link_required(target_clock, bpp);
707 710
@@ -739,6 +742,7 @@ found:
739 intel_dp->link_bw = bws[clock]; 742 intel_dp->link_bw = bws[clock];
740 intel_dp->lane_count = lane_count; 743 intel_dp->lane_count = lane_count;
741 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); 744 adjusted_mode->clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
745 pipe_config->pipe_bpp = bpp;
742 pipe_config->pixel_target_clock = target_clock; 746 pipe_config->pixel_target_clock = target_clock;
743 747
744 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", 748 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
@@ -751,20 +755,6 @@ found:
751 target_clock, adjusted_mode->clock, 755 target_clock, adjusted_mode->clock,
752 &pipe_config->dp_m_n); 756 &pipe_config->dp_m_n);
753 757
754 /*
755 * XXX: We have a strange regression where using the vbt edp bpp value
756 * for the link bw computation results in black screens, the panel only
757 * works when we do the computation at the usual 24bpp (but still
758 * requires us to use 18bpp). Until that's fully debugged, stay
759 * bug-for-bug compatible with the old code.
760 */
761 if (is_edp(intel_dp) && dev_priv->edp.bpp) {
762 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n",
763 bpp, dev_priv->edp.bpp);
764 bpp = min_t(int, bpp, dev_priv->edp.bpp);
765 }
766 pipe_config->pipe_bpp = bpp;
767
768 return true; 758 return true;
769} 759}
770 760
@@ -1389,6 +1379,7 @@ static void intel_enable_dp(struct intel_encoder *encoder)
1389 ironlake_edp_panel_on(intel_dp); 1379 ironlake_edp_panel_on(intel_dp);
1390 ironlake_edp_panel_vdd_off(intel_dp, true); 1380 ironlake_edp_panel_vdd_off(intel_dp, true);
1391 intel_dp_complete_link_train(intel_dp); 1381 intel_dp_complete_link_train(intel_dp);
1382 intel_dp_stop_link_train(intel_dp);
1392 ironlake_edp_backlight_on(intel_dp); 1383 ironlake_edp_backlight_on(intel_dp);
1393} 1384}
1394 1385
@@ -1711,10 +1702,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1711 struct drm_i915_private *dev_priv = dev->dev_private; 1702 struct drm_i915_private *dev_priv = dev->dev_private;
1712 enum port port = intel_dig_port->port; 1703 enum port port = intel_dig_port->port;
1713 int ret; 1704 int ret;
1714 uint32_t temp;
1715 1705
1716 if (HAS_DDI(dev)) { 1706 if (HAS_DDI(dev)) {
1717 temp = I915_READ(DP_TP_CTL(port)); 1707 uint32_t temp = I915_READ(DP_TP_CTL(port));
1718 1708
1719 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) 1709 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
1720 temp |= DP_TP_CTL_SCRAMBLE_DISABLE; 1710 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -1724,18 +1714,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1724 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK; 1714 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1725 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1715 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
1726 case DP_TRAINING_PATTERN_DISABLE: 1716 case DP_TRAINING_PATTERN_DISABLE:
1727
1728 if (port != PORT_A) {
1729 temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
1730 I915_WRITE(DP_TP_CTL(port), temp);
1731
1732 if (wait_for((I915_READ(DP_TP_STATUS(port)) &
1733 DP_TP_STATUS_IDLE_DONE), 1))
1734 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1735
1736 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1737 }
1738
1739 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL; 1717 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
1740 1718
1741 break; 1719 break;
@@ -1811,6 +1789,37 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1811 return true; 1789 return true;
1812} 1790}
1813 1791
1792static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
1793{
1794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1795 struct drm_device *dev = intel_dig_port->base.base.dev;
1796 struct drm_i915_private *dev_priv = dev->dev_private;
1797 enum port port = intel_dig_port->port;
1798 uint32_t val;
1799
1800 if (!HAS_DDI(dev))
1801 return;
1802
1803 val = I915_READ(DP_TP_CTL(port));
1804 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
1805 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
1806 I915_WRITE(DP_TP_CTL(port), val);
1807
1808 /*
1809 * On PORT_A we can have only eDP in SST mode. There the only reason
1810 * we need to set idle transmission mode is to work around a HW issue
1811 * where we enable the pipe while not in idle link-training mode.
1812 * In this case there is requirement to wait for a minimum number of
1813 * idle patterns to be sent.
1814 */
1815 if (port == PORT_A)
1816 return;
1817
1818 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
1819 1))
1820 DRM_ERROR("Timed out waiting for DP idle patterns\n");
1821}
1822
1814/* Enable corresponding port and start training pattern 1 */ 1823/* Enable corresponding port and start training pattern 1 */
1815void 1824void
1816intel_dp_start_link_train(struct intel_dp *intel_dp) 1825intel_dp_start_link_train(struct intel_dp *intel_dp)
@@ -1953,10 +1962,19 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1953 ++tries; 1962 ++tries;
1954 } 1963 }
1955 1964
1965 intel_dp_set_idle_link_train(intel_dp);
1966
1967 intel_dp->DP = DP;
1968
1956 if (channel_eq) 1969 if (channel_eq)
1957 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n"); 1970 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
1958 1971
1959 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1972}
1973
1974void intel_dp_stop_link_train(struct intel_dp *intel_dp)
1975{
1976 intel_dp_set_link_train(intel_dp, intel_dp->DP,
1977 DP_TRAINING_PATTERN_DISABLE);
1960} 1978}
1961 1979
1962static void 1980static void
@@ -2164,6 +2182,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
2164 drm_get_encoder_name(&intel_encoder->base)); 2182 drm_get_encoder_name(&intel_encoder->base));
2165 intel_dp_start_link_train(intel_dp); 2183 intel_dp_start_link_train(intel_dp);
2166 intel_dp_complete_link_train(intel_dp); 2184 intel_dp_complete_link_train(intel_dp);
2185 intel_dp_stop_link_train(intel_dp);
2167 } 2186 }
2168} 2187}
2169 2188
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index b5b6d19e6dd3..624a9e6b8d71 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -499,6 +499,7 @@ extern void intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
499extern void intel_dp_init_link_config(struct intel_dp *intel_dp); 499extern void intel_dp_init_link_config(struct intel_dp *intel_dp);
500extern void intel_dp_start_link_train(struct intel_dp *intel_dp); 500extern void intel_dp_start_link_train(struct intel_dp *intel_dp);
501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp); 501extern void intel_dp_complete_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_stop_link_train(struct intel_dp *intel_dp);
502extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 503extern void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode);
503extern void intel_dp_encoder_destroy(struct drm_encoder *encoder); 504extern void intel_dp_encoder_destroy(struct drm_encoder *encoder);
504extern void intel_dp_check_link_status(struct intel_dp *intel_dp); 505extern void intel_dp_check_link_status(struct intel_dp *intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 0e19e575a1b4..6b7c3ca2c035 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -262,10 +262,22 @@ void intel_fbdev_fini(struct drm_device *dev)
262void intel_fbdev_set_suspend(struct drm_device *dev, int state) 262void intel_fbdev_set_suspend(struct drm_device *dev, int state)
263{ 263{
264 drm_i915_private_t *dev_priv = dev->dev_private; 264 drm_i915_private_t *dev_priv = dev->dev_private;
265 if (!dev_priv->fbdev) 265 struct intel_fbdev *ifbdev = dev_priv->fbdev;
266 struct fb_info *info;
267
268 if (!ifbdev)
266 return; 269 return;
267 270
268 fb_set_suspend(dev_priv->fbdev->helper.fbdev, state); 271 info = ifbdev->helper.fbdev;
272
273 /* On resume from hibernation: If the object is shmemfs backed, it has
274 * been restored from swap. If the object is stolen however, it will be
275 * full of whatever garbage was left in there.
276 */
277 if (!state && ifbdev->ifb.obj->stolen)
278 memset_io(info->screen_base, 0, info->screen_size);
279
280 fb_set_suspend(info, state);
269} 281}
270 282
271MODULE_LICENSE("GPL and additional rights"); 283MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index de3b0dc5658b..aa01128ff192 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1301,17 +1301,17 @@ static void valleyview_update_wm(struct drm_device *dev)
1301 1301
1302 vlv_update_drain_latency(dev); 1302 vlv_update_drain_latency(dev);
1303 1303
1304 if (g4x_compute_wm0(dev, 0, 1304 if (g4x_compute_wm0(dev, PIPE_A,
1305 &valleyview_wm_info, latency_ns, 1305 &valleyview_wm_info, latency_ns,
1306 &valleyview_cursor_wm_info, latency_ns, 1306 &valleyview_cursor_wm_info, latency_ns,
1307 &planea_wm, &cursora_wm)) 1307 &planea_wm, &cursora_wm))
1308 enabled |= 1; 1308 enabled |= 1 << PIPE_A;
1309 1309
1310 if (g4x_compute_wm0(dev, 1, 1310 if (g4x_compute_wm0(dev, PIPE_B,
1311 &valleyview_wm_info, latency_ns, 1311 &valleyview_wm_info, latency_ns,
1312 &valleyview_cursor_wm_info, latency_ns, 1312 &valleyview_cursor_wm_info, latency_ns,
1313 &planeb_wm, &cursorb_wm)) 1313 &planeb_wm, &cursorb_wm))
1314 enabled |= 2; 1314 enabled |= 1 << PIPE_B;
1315 1315
1316 if (single_plane_enabled(enabled) && 1316 if (single_plane_enabled(enabled) &&
1317 g4x_compute_srwm(dev, ffs(enabled) - 1, 1317 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1357,17 +1357,17 @@ static void g4x_update_wm(struct drm_device *dev)
1357 int plane_sr, cursor_sr; 1357 int plane_sr, cursor_sr;
1358 unsigned int enabled = 0; 1358 unsigned int enabled = 0;
1359 1359
1360 if (g4x_compute_wm0(dev, 0, 1360 if (g4x_compute_wm0(dev, PIPE_A,
1361 &g4x_wm_info, latency_ns, 1361 &g4x_wm_info, latency_ns,
1362 &g4x_cursor_wm_info, latency_ns, 1362 &g4x_cursor_wm_info, latency_ns,
1363 &planea_wm, &cursora_wm)) 1363 &planea_wm, &cursora_wm))
1364 enabled |= 1; 1364 enabled |= 1 << PIPE_A;
1365 1365
1366 if (g4x_compute_wm0(dev, 1, 1366 if (g4x_compute_wm0(dev, PIPE_B,
1367 &g4x_wm_info, latency_ns, 1367 &g4x_wm_info, latency_ns,
1368 &g4x_cursor_wm_info, latency_ns, 1368 &g4x_cursor_wm_info, latency_ns,
1369 &planeb_wm, &cursorb_wm)) 1369 &planeb_wm, &cursorb_wm))
1370 enabled |= 2; 1370 enabled |= 1 << PIPE_B;
1371 1371
1372 if (single_plane_enabled(enabled) && 1372 if (single_plane_enabled(enabled) &&
1373 g4x_compute_srwm(dev, ffs(enabled) - 1, 1373 g4x_compute_srwm(dev, ffs(enabled) - 1,
@@ -1716,7 +1716,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1716 unsigned int enabled; 1716 unsigned int enabled;
1717 1717
1718 enabled = 0; 1718 enabled = 0;
1719 if (g4x_compute_wm0(dev, 0, 1719 if (g4x_compute_wm0(dev, PIPE_A,
1720 &ironlake_display_wm_info, 1720 &ironlake_display_wm_info,
1721 ILK_LP0_PLANE_LATENCY, 1721 ILK_LP0_PLANE_LATENCY,
1722 &ironlake_cursor_wm_info, 1722 &ironlake_cursor_wm_info,
@@ -1727,10 +1727,10 @@ static void ironlake_update_wm(struct drm_device *dev)
1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1727 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1728 " plane %d, " "cursor: %d\n", 1728 " plane %d, " "cursor: %d\n",
1729 plane_wm, cursor_wm); 1729 plane_wm, cursor_wm);
1730 enabled |= 1; 1730 enabled |= 1 << PIPE_A;
1731 } 1731 }
1732 1732
1733 if (g4x_compute_wm0(dev, 1, 1733 if (g4x_compute_wm0(dev, PIPE_B,
1734 &ironlake_display_wm_info, 1734 &ironlake_display_wm_info,
1735 ILK_LP0_PLANE_LATENCY, 1735 ILK_LP0_PLANE_LATENCY,
1736 &ironlake_cursor_wm_info, 1736 &ironlake_cursor_wm_info,
@@ -1741,7 +1741,7 @@ static void ironlake_update_wm(struct drm_device *dev)
1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1741 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1742 " plane %d, cursor: %d\n", 1742 " plane %d, cursor: %d\n",
1743 plane_wm, cursor_wm); 1743 plane_wm, cursor_wm);
1744 enabled |= 2; 1744 enabled |= 1 << PIPE_B;
1745 } 1745 }
1746 1746
1747 /* 1747 /*
@@ -1801,7 +1801,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1801 unsigned int enabled; 1801 unsigned int enabled;
1802 1802
1803 enabled = 0; 1803 enabled = 0;
1804 if (g4x_compute_wm0(dev, 0, 1804 if (g4x_compute_wm0(dev, PIPE_A,
1805 &sandybridge_display_wm_info, latency, 1805 &sandybridge_display_wm_info, latency,
1806 &sandybridge_cursor_wm_info, latency, 1806 &sandybridge_cursor_wm_info, latency,
1807 &plane_wm, &cursor_wm)) { 1807 &plane_wm, &cursor_wm)) {
@@ -1812,10 +1812,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1812 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1813 " plane %d, " "cursor: %d\n", 1813 " plane %d, " "cursor: %d\n",
1814 plane_wm, cursor_wm); 1814 plane_wm, cursor_wm);
1815 enabled |= 1; 1815 enabled |= 1 << PIPE_A;
1816 } 1816 }
1817 1817
1818 if (g4x_compute_wm0(dev, 1, 1818 if (g4x_compute_wm0(dev, PIPE_B,
1819 &sandybridge_display_wm_info, latency, 1819 &sandybridge_display_wm_info, latency,
1820 &sandybridge_cursor_wm_info, latency, 1820 &sandybridge_cursor_wm_info, latency,
1821 &plane_wm, &cursor_wm)) { 1821 &plane_wm, &cursor_wm)) {
@@ -1826,7 +1826,7 @@ static void sandybridge_update_wm(struct drm_device *dev)
1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1826 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1827 " plane %d, cursor: %d\n", 1827 " plane %d, cursor: %d\n",
1828 plane_wm, cursor_wm); 1828 plane_wm, cursor_wm);
1829 enabled |= 2; 1829 enabled |= 1 << PIPE_B;
1830 } 1830 }
1831 1831
1832 /* 1832 /*
@@ -1904,7 +1904,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1904 unsigned int enabled; 1904 unsigned int enabled;
1905 1905
1906 enabled = 0; 1906 enabled = 0;
1907 if (g4x_compute_wm0(dev, 0, 1907 if (g4x_compute_wm0(dev, PIPE_A,
1908 &sandybridge_display_wm_info, latency, 1908 &sandybridge_display_wm_info, latency,
1909 &sandybridge_cursor_wm_info, latency, 1909 &sandybridge_cursor_wm_info, latency,
1910 &plane_wm, &cursor_wm)) { 1910 &plane_wm, &cursor_wm)) {
@@ -1915,10 +1915,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -" 1915 DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
1916 " plane %d, " "cursor: %d\n", 1916 " plane %d, " "cursor: %d\n",
1917 plane_wm, cursor_wm); 1917 plane_wm, cursor_wm);
1918 enabled |= 1; 1918 enabled |= 1 << PIPE_A;
1919 } 1919 }
1920 1920
1921 if (g4x_compute_wm0(dev, 1, 1921 if (g4x_compute_wm0(dev, PIPE_B,
1922 &sandybridge_display_wm_info, latency, 1922 &sandybridge_display_wm_info, latency,
1923 &sandybridge_cursor_wm_info, latency, 1923 &sandybridge_cursor_wm_info, latency,
1924 &plane_wm, &cursor_wm)) { 1924 &plane_wm, &cursor_wm)) {
@@ -1929,10 +1929,10 @@ static void ivybridge_update_wm(struct drm_device *dev)
1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -" 1929 DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
1930 " plane %d, cursor: %d\n", 1930 " plane %d, cursor: %d\n",
1931 plane_wm, cursor_wm); 1931 plane_wm, cursor_wm);
1932 enabled |= 2; 1932 enabled |= 1 << PIPE_B;
1933 } 1933 }
1934 1934
1935 if (g4x_compute_wm0(dev, 2, 1935 if (g4x_compute_wm0(dev, PIPE_C,
1936 &sandybridge_display_wm_info, latency, 1936 &sandybridge_display_wm_info, latency,
1937 &sandybridge_cursor_wm_info, latency, 1937 &sandybridge_cursor_wm_info, latency,
1938 &plane_wm, &cursor_wm)) { 1938 &plane_wm, &cursor_wm)) {
@@ -1943,7 +1943,7 @@ static void ivybridge_update_wm(struct drm_device *dev)
1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -" 1943 DRM_DEBUG_KMS("FIFO watermarks For pipe C -"
1944 " plane %d, cursor: %d\n", 1944 " plane %d, cursor: %d\n",
1945 plane_wm, cursor_wm); 1945 plane_wm, cursor_wm);
1946 enabled |= 3; 1946 enabled |= 1 << PIPE_C;
1947 } 1947 }
1948 1948
1949 /* 1949 /*
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index f9889658329b..77b8a45fb10a 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -46,29 +46,26 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc)
46 46
47static inline void mga_wait_vsync(struct mga_device *mdev) 47static inline void mga_wait_vsync(struct mga_device *mdev)
48{ 48{
49 unsigned int count = 0; 49 unsigned long timeout = jiffies + HZ/10;
50 unsigned int status = 0; 50 unsigned int status = 0;
51 51
52 do { 52 do {
53 status = RREG32(MGAREG_Status); 53 status = RREG32(MGAREG_Status);
54 count++; 54 } while ((status & 0x08) && time_before(jiffies, timeout));
55 } while ((status & 0x08) && (count < 250000)); 55 timeout = jiffies + HZ/10;
56 count = 0;
57 status = 0; 56 status = 0;
58 do { 57 do {
59 status = RREG32(MGAREG_Status); 58 status = RREG32(MGAREG_Status);
60 count++; 59 } while (!(status & 0x08) && time_before(jiffies, timeout));
61 } while (!(status & 0x08) && (count < 250000));
62} 60}
63 61
64static inline void mga_wait_busy(struct mga_device *mdev) 62static inline void mga_wait_busy(struct mga_device *mdev)
65{ 63{
66 unsigned int count = 0; 64 unsigned long timeout = jiffies + HZ;
67 unsigned int status = 0; 65 unsigned int status = 0;
68 do { 66 do {
69 status = RREG8(MGAREG_Status + 2); 67 status = RREG8(MGAREG_Status + 2);
70 count++; 68 } while ((status & 0x01) && time_before(jiffies, timeout));
71 } while ((status & 0x01) && (count < 500000));
72} 69}
73 70
74/* 71/*
@@ -189,12 +186,12 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
189 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 186 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
190 tmp = RREG8(DAC_DATA); 187 tmp = RREG8(DAC_DATA);
191 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 188 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
192 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 189 WREG8(DAC_DATA, tmp);
193 190
194 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 191 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
195 tmp = RREG8(DAC_DATA); 192 tmp = RREG8(DAC_DATA);
196 tmp |= MGA1064_REMHEADCTL_CLKDIS; 193 tmp |= MGA1064_REMHEADCTL_CLKDIS;
197 WREG_DAC(MGA1064_REMHEADCTL, tmp); 194 WREG8(DAC_DATA, tmp);
198 195
199 /* select PLL Set C */ 196 /* select PLL Set C */
200 tmp = RREG8(MGAREG_MEM_MISC_READ); 197 tmp = RREG8(MGAREG_MEM_MISC_READ);
@@ -204,7 +201,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
204 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 201 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
205 tmp = RREG8(DAC_DATA); 202 tmp = RREG8(DAC_DATA);
206 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80; 203 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN | 0x80;
207 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 204 WREG8(DAC_DATA, tmp);
208 205
209 udelay(500); 206 udelay(500);
210 207
@@ -212,7 +209,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
212 WREG8(DAC_INDEX, MGA1064_VREF_CTL); 209 WREG8(DAC_INDEX, MGA1064_VREF_CTL);
213 tmp = RREG8(DAC_DATA); 210 tmp = RREG8(DAC_DATA);
214 tmp &= ~0x04; 211 tmp &= ~0x04;
215 WREG_DAC(MGA1064_VREF_CTL, tmp); 212 WREG8(DAC_DATA, tmp);
216 213
217 udelay(50); 214 udelay(50);
218 215
@@ -236,13 +233,13 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
236 tmp = RREG8(DAC_DATA); 233 tmp = RREG8(DAC_DATA);
237 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 234 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
238 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 235 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
239 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 236 WREG8(DAC_DATA, tmp);
240 237
241 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 238 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
242 tmp = RREG8(DAC_DATA); 239 tmp = RREG8(DAC_DATA);
243 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK; 240 tmp &= ~MGA1064_REMHEADCTL_CLKSL_MSK;
244 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL; 241 tmp |= MGA1064_REMHEADCTL_CLKSL_PLL;
245 WREG_DAC(MGA1064_REMHEADCTL, tmp); 242 WREG8(DAC_DATA, tmp);
246 243
247 /* reset dotclock rate bit */ 244 /* reset dotclock rate bit */
248 WREG8(MGAREG_SEQ_INDEX, 1); 245 WREG8(MGAREG_SEQ_INDEX, 1);
@@ -253,7 +250,7 @@ static int mga_g200wb_set_plls(struct mga_device *mdev, long clock)
253 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 250 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
254 tmp = RREG8(DAC_DATA); 251 tmp = RREG8(DAC_DATA);
255 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 252 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
256 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 253 WREG8(DAC_DATA, tmp);
257 254
258 vcount = RREG8(MGAREG_VCOUNT); 255 vcount = RREG8(MGAREG_VCOUNT);
259 256
@@ -318,7 +315,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
318 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 315 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
319 tmp = RREG8(DAC_DATA); 316 tmp = RREG8(DAC_DATA);
320 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 317 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
321 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 318 WREG8(DAC_DATA, tmp);
322 319
323 tmp = RREG8(MGAREG_MEM_MISC_READ); 320 tmp = RREG8(MGAREG_MEM_MISC_READ);
324 tmp |= 0x3 << 2; 321 tmp |= 0x3 << 2;
@@ -326,12 +323,12 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
326 323
327 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 324 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
328 tmp = RREG8(DAC_DATA); 325 tmp = RREG8(DAC_DATA);
329 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp & ~0x40); 326 WREG8(DAC_DATA, tmp & ~0x40);
330 327
331 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 328 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
332 tmp = RREG8(DAC_DATA); 329 tmp = RREG8(DAC_DATA);
333 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 330 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
334 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 331 WREG8(DAC_DATA, tmp);
335 332
336 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m); 333 WREG_DAC(MGA1064_EV_PIX_PLLC_M, m);
337 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n); 334 WREG_DAC(MGA1064_EV_PIX_PLLC_N, n);
@@ -342,7 +339,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
342 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 339 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
343 tmp = RREG8(DAC_DATA); 340 tmp = RREG8(DAC_DATA);
344 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 341 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
345 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 342 WREG8(DAC_DATA, tmp);
346 343
347 udelay(500); 344 udelay(500);
348 345
@@ -350,11 +347,11 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
350 tmp = RREG8(DAC_DATA); 347 tmp = RREG8(DAC_DATA);
351 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 348 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
352 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 349 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
353 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 350 WREG8(DAC_DATA, tmp);
354 351
355 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT); 352 WREG8(DAC_INDEX, MGA1064_PIX_PLL_STAT);
356 tmp = RREG8(DAC_DATA); 353 tmp = RREG8(DAC_DATA);
357 WREG_DAC(MGA1064_PIX_PLL_STAT, tmp | 0x40); 354 WREG8(DAC_DATA, tmp | 0x40);
358 355
359 tmp = RREG8(MGAREG_MEM_MISC_READ); 356 tmp = RREG8(MGAREG_MEM_MISC_READ);
360 tmp |= (0x3 << 2); 357 tmp |= (0x3 << 2);
@@ -363,7 +360,7 @@ static int mga_g200ev_set_plls(struct mga_device *mdev, long clock)
363 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 360 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
364 tmp = RREG8(DAC_DATA); 361 tmp = RREG8(DAC_DATA);
365 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 362 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
366 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 363 WREG8(DAC_DATA, tmp);
367 364
368 return 0; 365 return 0;
369} 366}
@@ -416,7 +413,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
416 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 413 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
417 tmp = RREG8(DAC_DATA); 414 tmp = RREG8(DAC_DATA);
418 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 415 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
419 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 416 WREG8(DAC_DATA, tmp);
420 417
421 tmp = RREG8(MGAREG_MEM_MISC_READ); 418 tmp = RREG8(MGAREG_MEM_MISC_READ);
422 tmp |= 0x3 << 2; 419 tmp |= 0x3 << 2;
@@ -425,7 +422,7 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
425 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 422 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
426 tmp = RREG8(DAC_DATA); 423 tmp = RREG8(DAC_DATA);
427 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 424 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
428 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 425 WREG8(DAC_DATA, tmp);
429 426
430 udelay(500); 427 udelay(500);
431 428
@@ -439,13 +436,13 @@ static int mga_g200eh_set_plls(struct mga_device *mdev, long clock)
439 tmp = RREG8(DAC_DATA); 436 tmp = RREG8(DAC_DATA);
440 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK; 437 tmp &= ~MGA1064_PIX_CLK_CTL_SEL_MSK;
441 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL; 438 tmp |= MGA1064_PIX_CLK_CTL_SEL_PLL;
442 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 439 WREG8(DAC_DATA, tmp);
443 440
444 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 441 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
445 tmp = RREG8(DAC_DATA); 442 tmp = RREG8(DAC_DATA);
446 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 443 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
447 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 444 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
448 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 445 WREG8(DAC_DATA, tmp);
449 446
450 vcount = RREG8(MGAREG_VCOUNT); 447 vcount = RREG8(MGAREG_VCOUNT);
451 448
@@ -515,12 +512,12 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
515 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL); 512 WREG8(DAC_INDEX, MGA1064_PIX_CLK_CTL);
516 tmp = RREG8(DAC_DATA); 513 tmp = RREG8(DAC_DATA);
517 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS; 514 tmp |= MGA1064_PIX_CLK_CTL_CLK_DIS;
518 WREG_DAC(MGA1064_PIX_CLK_CTL_CLK_DIS, tmp); 515 WREG8(DAC_DATA, tmp);
519 516
520 WREG8(DAC_INDEX, MGA1064_REMHEADCTL); 517 WREG8(DAC_INDEX, MGA1064_REMHEADCTL);
521 tmp = RREG8(DAC_DATA); 518 tmp = RREG8(DAC_DATA);
522 tmp |= MGA1064_REMHEADCTL_CLKDIS; 519 tmp |= MGA1064_REMHEADCTL_CLKDIS;
523 WREG_DAC(MGA1064_REMHEADCTL, tmp); 520 WREG8(DAC_DATA, tmp);
524 521
525 tmp = RREG8(MGAREG_MEM_MISC_READ); 522 tmp = RREG8(MGAREG_MEM_MISC_READ);
526 tmp |= (0x3<<2) | 0xc0; 523 tmp |= (0x3<<2) | 0xc0;
@@ -530,7 +527,7 @@ static int mga_g200er_set_plls(struct mga_device *mdev, long clock)
530 tmp = RREG8(DAC_DATA); 527 tmp = RREG8(DAC_DATA);
531 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS; 528 tmp &= ~MGA1064_PIX_CLK_CTL_CLK_DIS;
532 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN; 529 tmp |= MGA1064_PIX_CLK_CTL_CLK_POW_DOWN;
533 WREG_DAC(MGA1064_PIX_CLK_CTL, tmp); 530 WREG8(DAC_DATA, tmp);
534 531
535 udelay(500); 532 udelay(500);
536 533
@@ -657,12 +654,26 @@ static void mga_g200wb_commit(struct drm_crtc *crtc)
657 WREG_DAC(MGA1064_GEN_IO_DATA, tmp); 654 WREG_DAC(MGA1064_GEN_IO_DATA, tmp);
658} 655}
659 656
660 657/*
658 This is how the framebuffer base address is stored in g200 cards:
659 * Assume @offset is the gpu_addr variable of the framebuffer object
660 * Then addr is the number of _pixels_ (not bytes) from the start of
661 VRAM to the first pixel we want to display. (divided by 2 for 32bit
662 framebuffers)
663 * addr is stored in the CRTCEXT0, CRTCC and CRTCD registers
664 addr<20> -> CRTCEXT0<6>
665 addr<19-16> -> CRTCEXT0<3-0>
666 addr<15-8> -> CRTCC<7-0>
667 addr<7-0> -> CRTCD<7-0>
668 CRTCEXT0 has to be programmed last to trigger an update and make the
669 new addr variable take effect.
670 */
661void mga_set_start_address(struct drm_crtc *crtc, unsigned offset) 671void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
662{ 672{
663 struct mga_device *mdev = crtc->dev->dev_private; 673 struct mga_device *mdev = crtc->dev->dev_private;
664 u32 addr; 674 u32 addr;
665 int count; 675 int count;
676 u8 crtcext0;
666 677
667 while (RREG8(0x1fda) & 0x08); 678 while (RREG8(0x1fda) & 0x08);
668 while (!(RREG8(0x1fda) & 0x08)); 679 while (!(RREG8(0x1fda) & 0x08));
@@ -670,10 +681,17 @@ void mga_set_start_address(struct drm_crtc *crtc, unsigned offset)
670 count = RREG8(MGAREG_VCOUNT) + 2; 681 count = RREG8(MGAREG_VCOUNT) + 2;
671 while (RREG8(MGAREG_VCOUNT) < count); 682 while (RREG8(MGAREG_VCOUNT) < count);
672 683
673 addr = offset >> 2; 684 WREG8(MGAREG_CRTCEXT_INDEX, 0);
685 crtcext0 = RREG8(MGAREG_CRTCEXT_DATA);
686 crtcext0 &= 0xB0;
687 addr = offset / 8;
688 /* Can't store addresses any higher than that...
689 but we also don't have more than 16MB of memory, so it should be fine. */
690 WARN_ON(addr > 0x1fffff);
691 crtcext0 |= (!!(addr & (1<<20)))<<6;
674 WREG_CRT(0x0d, (u8)(addr & 0xff)); 692 WREG_CRT(0x0d, (u8)(addr & 0xff));
675 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff); 693 WREG_CRT(0x0c, (u8)(addr >> 8) & 0xff);
676 WREG_CRT(0xaf, (u8)(addr >> 16) & 0xf); 694 WREG_ECRT(0x0, ((u8)(addr >> 16) & 0xf) | crtcext0);
677} 695}
678 696
679 697
@@ -829,11 +847,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
829 847
830 848
831 for (i = 0; i < sizeof(dacvalue); i++) { 849 for (i = 0; i < sizeof(dacvalue); i++) {
832 if ((i <= 0x03) || 850 if ((i <= 0x17) ||
833 (i == 0x07) ||
834 (i == 0x0b) ||
835 (i == 0x0f) ||
836 ((i >= 0x13) && (i <= 0x17)) ||
837 (i == 0x1b) || 851 (i == 0x1b) ||
838 (i == 0x1c) || 852 (i == 0x1c) ||
839 ((i >= 0x1f) && (i <= 0x29)) || 853 ((i >= 0x1f) && (i <= 0x29)) ||
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 955af122c3a6..a36e64e98ef3 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -138,7 +138,6 @@ nvc0_identify(struct nouveau_device *device)
138 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 138 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
139 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 139 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
140 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 140 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
141 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
142 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 141 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
143 break; 142 break;
144 case 0xce: 143 case 0xce:
@@ -225,7 +224,6 @@ nvc0_identify(struct nouveau_device *device)
225 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 224 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
226 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 225 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
227 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 226 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
228 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
229 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass; 227 device->oclass[NVDEV_ENGINE_DISP ] = &nva3_disp_oclass;
230 break; 228 break;
231 case 0xc8: 229 case 0xc8:
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
index ddaeb5572903..89bf459d584b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv50.c
@@ -47,6 +47,7 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
47 struct nouveau_gpuobj *cur; 47 struct nouveau_gpuobj *cur;
48 int i, p; 48 int i, p;
49 49
50 mutex_lock(&nv_subdev(priv)->mutex);
50 cur = priv->playlist[priv->cur_playlist]; 51 cur = priv->playlist[priv->cur_playlist];
51 priv->cur_playlist = !priv->cur_playlist; 52 priv->cur_playlist = !priv->cur_playlist;
52 53
@@ -60,6 +61,7 @@ nv50_fifo_playlist_update(struct nv50_fifo_priv *priv)
60 nv_wr32(priv, 0x0032f4, cur->addr >> 12); 61 nv_wr32(priv, 0x0032f4, cur->addr >> 12);
61 nv_wr32(priv, 0x0032ec, p); 62 nv_wr32(priv, 0x0032ec, p);
62 nv_wr32(priv, 0x002500, 0x00000101); 63 nv_wr32(priv, 0x002500, 0x00000101);
64 mutex_unlock(&nv_subdev(priv)->mutex);
63} 65}
64 66
65static int 67static int
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 4d4a6b905370..46dfa68c47bb 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -71,6 +71,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
71 struct nouveau_gpuobj *cur; 71 struct nouveau_gpuobj *cur;
72 int i, p; 72 int i, p;
73 73
74 mutex_lock(&nv_subdev(priv)->mutex);
74 cur = priv->playlist[priv->cur_playlist]; 75 cur = priv->playlist[priv->cur_playlist];
75 priv->cur_playlist = !priv->cur_playlist; 76 priv->cur_playlist = !priv->cur_playlist;
76 77
@@ -87,6 +88,7 @@ nvc0_fifo_playlist_update(struct nvc0_fifo_priv *priv)
87 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3)); 88 nv_wr32(priv, 0x002274, 0x01f00000 | (p >> 3));
88 if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000)) 89 if (!nv_wait(priv, 0x00227c, 0x00100000, 0x00000000))
89 nv_error(priv, "playlist update failed\n"); 90 nv_error(priv, "playlist update failed\n");
91 mutex_unlock(&nv_subdev(priv)->mutex);
90} 92}
91 93
92static int 94static int
@@ -248,9 +250,17 @@ nvc0_fifo_chan_fini(struct nouveau_object *object, bool suspend)
248 struct nvc0_fifo_priv *priv = (void *)object->engine; 250 struct nvc0_fifo_priv *priv = (void *)object->engine;
249 struct nvc0_fifo_chan *chan = (void *)object; 251 struct nvc0_fifo_chan *chan = (void *)object;
250 u32 chid = chan->base.chid; 252 u32 chid = chan->base.chid;
253 u32 mask, engine;
251 254
252 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000); 255 nv_mask(priv, 0x003004 + (chid * 8), 0x00000001, 0x00000000);
253 nvc0_fifo_playlist_update(priv); 256 nvc0_fifo_playlist_update(priv);
257 mask = nv_rd32(priv, 0x0025a4);
258 for (engine = 0; mask && engine < 16; engine++) {
259 if (!(mask & (1 << engine)))
260 continue;
261 nv_mask(priv, 0x0025a8 + (engine * 4), 0x00000000, 0x00000000);
262 mask &= ~(1 << engine);
263 }
254 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000); 264 nv_wr32(priv, 0x003000 + (chid * 8), 0x00000000);
255 265
256 return nouveau_fifo_channel_fini(&chan->base, suspend); 266 return nouveau_fifo_channel_fini(&chan->base, suspend);
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 9151919fb831..56192a7242ae 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -94,11 +94,13 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
94 u32 match = (engine << 16) | 0x00000001; 94 u32 match = (engine << 16) | 0x00000001;
95 int i, p; 95 int i, p;
96 96
97 mutex_lock(&nv_subdev(priv)->mutex);
97 cur = engn->playlist[engn->cur_playlist]; 98 cur = engn->playlist[engn->cur_playlist];
98 if (unlikely(cur == NULL)) { 99 if (unlikely(cur == NULL)) {
99 int ret = nouveau_gpuobj_new(nv_object(priv), NULL, 100 int ret = nouveau_gpuobj_new(nv_object(priv), NULL,
100 0x8000, 0x1000, 0, &cur); 101 0x8000, 0x1000, 0, &cur);
101 if (ret) { 102 if (ret) {
103 mutex_unlock(&nv_subdev(priv)->mutex);
102 nv_error(priv, "playlist alloc failed\n"); 104 nv_error(priv, "playlist alloc failed\n");
103 return; 105 return;
104 } 106 }
@@ -122,6 +124,7 @@ nve0_fifo_playlist_update(struct nve0_fifo_priv *priv, u32 engine)
122 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3)); 124 nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
123 if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000)) 125 if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
124 nv_error(priv, "playlist %d update timeout\n", engine); 126 nv_error(priv, "playlist %d update timeout\n", engine);
127 mutex_unlock(&nv_subdev(priv)->mutex);
125} 128}
126 129
127static int 130static int
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index c300b5e7b670..c434d398d16f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -1940,8 +1940,8 @@ init_zm_mask_add(struct nvbios_init *init)
1940 trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add); 1940 trace("ZM_MASK_ADD\tR[0x%06x] &= 0x%08x += 0x%08x\n", addr, mask, add);
1941 init->offset += 13; 1941 init->offset += 13;
1942 1942
1943 data = init_rd32(init, addr) & mask; 1943 data = init_rd32(init, addr);
1944 data |= ((data + add) & ~mask); 1944 data = (data & mask) | ((data + add) & ~mask);
1945 init_wr32(init, addr, data); 1945 init_wr32(init, addr, data);
1946} 1946}
1947 1947
diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
index e4940fb166e8..fb794e997fbc 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c
@@ -29,7 +29,6 @@
29struct nvc0_ltcg_priv { 29struct nvc0_ltcg_priv {
30 struct nouveau_ltcg base; 30 struct nouveau_ltcg base;
31 u32 part_nr; 31 u32 part_nr;
32 u32 part_mask;
33 u32 subp_nr; 32 u32 subp_nr;
34 struct nouveau_mm tags; 33 struct nouveau_mm tags;
35 u32 num_tags; 34 u32 num_tags;
@@ -105,8 +104,6 @@ nvc0_ltcg_tags_clear(struct nouveau_ltcg *ltcg, u32 first, u32 count)
105 104
106 /* wait until it's finished with clearing */ 105 /* wait until it's finished with clearing */
107 for (p = 0; p < priv->part_nr; ++p) { 106 for (p = 0; p < priv->part_nr; ++p) {
108 if (!(priv->part_mask & (1 << p)))
109 continue;
110 for (i = 0; i < priv->subp_nr; ++i) 107 for (i = 0; i < priv->subp_nr; ++i)
111 nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0); 108 nv_wait(priv, 0x1410c8 + p * 0x2000 + i * 0x400, ~0, 0);
112 } 109 }
@@ -121,6 +118,8 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv)
121 int ret; 118 int ret;
122 119
123 nv_wr32(priv, 0x17e8d8, priv->part_nr); 120 nv_wr32(priv, 0x17e8d8, priv->part_nr);
121 if (nv_device(pfb)->card_type >= NV_E0)
122 nv_wr32(priv, 0x17e000, priv->part_nr);
124 123
125 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ 124 /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */
126 priv->num_tags = (pfb->ram.size >> 17) / 4; 125 priv->num_tags = (pfb->ram.size >> 17) / 4;
@@ -167,16 +166,20 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
167{ 166{
168 struct nvc0_ltcg_priv *priv; 167 struct nvc0_ltcg_priv *priv;
169 struct nouveau_fb *pfb = nouveau_fb(parent); 168 struct nouveau_fb *pfb = nouveau_fb(parent);
170 int ret; 169 u32 parts, mask;
170 int ret, i;
171 171
172 ret = nouveau_ltcg_create(parent, engine, oclass, &priv); 172 ret = nouveau_ltcg_create(parent, engine, oclass, &priv);
173 *pobject = nv_object(priv); 173 *pobject = nv_object(priv);
174 if (ret) 174 if (ret)
175 return ret; 175 return ret;
176 176
177 priv->part_nr = nv_rd32(priv, 0x022438); 177 parts = nv_rd32(priv, 0x022438);
178 priv->part_mask = nv_rd32(priv, 0x022554); 178 mask = nv_rd32(priv, 0x022554);
179 179 for (i = 0; i < parts; i++) {
180 if (!(mask & (1 << i)))
181 priv->part_nr++;
182 }
180 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; 183 priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28;
181 184
182 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ 185 nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 46c152ff0a80..383f4e6ea9d1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -453,18 +453,32 @@ nouveau_do_suspend(struct drm_device *dev)
453 NV_INFO(drm, "evicting buffers...\n"); 453 NV_INFO(drm, "evicting buffers...\n");
454 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); 454 ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM);
455 455
456 NV_INFO(drm, "waiting for kernel channels to go idle...\n");
457 if (drm->cechan) {
458 ret = nouveau_channel_idle(drm->cechan);
459 if (ret)
460 return ret;
461 }
462
463 if (drm->channel) {
464 ret = nouveau_channel_idle(drm->channel);
465 if (ret)
466 return ret;
467 }
468
469 NV_INFO(drm, "suspending client object trees...\n");
456 if (drm->fence && nouveau_fence(drm)->suspend) { 470 if (drm->fence && nouveau_fence(drm)->suspend) {
457 if (!nouveau_fence(drm)->suspend(drm)) 471 if (!nouveau_fence(drm)->suspend(drm))
458 return -ENOMEM; 472 return -ENOMEM;
459 } 473 }
460 474
461 NV_INFO(drm, "suspending client object trees...\n");
462 list_for_each_entry(cli, &drm->clients, head) { 475 list_for_each_entry(cli, &drm->clients, head) {
463 ret = nouveau_client_fini(&cli->base, true); 476 ret = nouveau_client_fini(&cli->base, true);
464 if (ret) 477 if (ret)
465 goto fail_client; 478 goto fail_client;
466 } 479 }
467 480
481 NV_INFO(drm, "suspending kernel object tree...\n");
468 ret = nouveau_client_fini(&drm->client.base, true); 482 ret = nouveau_client_fini(&drm->client.base, true);
469 if (ret) 483 if (ret)
470 goto fail_client; 484 goto fail_client;
@@ -514,17 +528,18 @@ nouveau_do_resume(struct drm_device *dev)
514 528
515 nouveau_agp_reset(drm); 529 nouveau_agp_reset(drm);
516 530
517 NV_INFO(drm, "resuming client object trees...\n"); 531 NV_INFO(drm, "resuming kernel object tree...\n");
518 nouveau_client_init(&drm->client.base); 532 nouveau_client_init(&drm->client.base);
519 nouveau_agp_init(drm); 533 nouveau_agp_init(drm);
520 534
535 NV_INFO(drm, "resuming client object trees...\n");
536 if (drm->fence && nouveau_fence(drm)->resume)
537 nouveau_fence(drm)->resume(drm);
538
521 list_for_each_entry(cli, &drm->clients, head) { 539 list_for_each_entry(cli, &drm->clients, head) {
522 nouveau_client_init(&cli->base); 540 nouveau_client_init(&cli->base);
523 } 541 }
524 542
525 if (drm->fence && nouveau_fence(drm)->resume)
526 nouveau_fence(drm)->resume(drm);
527
528 nouveau_run_vbios_init(dev); 543 nouveau_run_vbios_init(dev);
529 nouveau_pm_resume(dev); 544 nouveau_pm_resume(dev);
530 545
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index 08b0823c93d5..f86771481317 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -277,7 +277,7 @@ out_unref:
277 return 0; 277 return 0;
278} 278}
279 279
280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port) 280static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr)
281{ 281{
282 int irq_num; 282 int irq_num;
283 long addr = qdev->io_base + port; 283 long addr = qdev->io_base + port;
@@ -285,20 +285,29 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port)
285 285
286 mutex_lock(&qdev->async_io_mutex); 286 mutex_lock(&qdev->async_io_mutex);
287 irq_num = atomic_read(&qdev->irq_received_io_cmd); 287 irq_num = atomic_read(&qdev->irq_received_io_cmd);
288
289
290 if (qdev->last_sent_io_cmd > irq_num) { 288 if (qdev->last_sent_io_cmd > irq_num) {
291 ret = wait_event_interruptible(qdev->io_cmd_event, 289 if (intr)
292 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 290 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
293 if (ret) 291 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
292 else
293 ret = wait_event_timeout(qdev->io_cmd_event,
294 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
295 /* 0 is timeout, just bail the "hw" has gone away */
296 if (ret <= 0)
294 goto out; 297 goto out;
295 irq_num = atomic_read(&qdev->irq_received_io_cmd); 298 irq_num = atomic_read(&qdev->irq_received_io_cmd);
296 } 299 }
297 outb(val, addr); 300 outb(val, addr);
298 qdev->last_sent_io_cmd = irq_num + 1; 301 qdev->last_sent_io_cmd = irq_num + 1;
299 ret = wait_event_interruptible(qdev->io_cmd_event, 302 if (intr)
300 atomic_read(&qdev->irq_received_io_cmd) > irq_num); 303 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
304 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
305 else
306 ret = wait_event_timeout(qdev->io_cmd_event,
307 atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
301out: 308out:
309 if (ret > 0)
310 ret = 0;
302 mutex_unlock(&qdev->async_io_mutex); 311 mutex_unlock(&qdev->async_io_mutex);
303 return ret; 312 return ret;
304} 313}
@@ -308,7 +317,7 @@ static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port)
308 int ret; 317 int ret;
309 318
310restart: 319restart:
311 ret = wait_for_io_cmd_user(qdev, val, port); 320 ret = wait_for_io_cmd_user(qdev, val, port, false);
312 if (ret == -ERESTARTSYS) 321 if (ret == -ERESTARTSYS)
313 goto restart; 322 goto restart;
314} 323}
@@ -340,7 +349,7 @@ int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf,
340 mutex_lock(&qdev->update_area_mutex); 349 mutex_lock(&qdev->update_area_mutex);
341 qdev->ram_header->update_area = *area; 350 qdev->ram_header->update_area = *area;
342 qdev->ram_header->update_surface = surface_id; 351 qdev->ram_header->update_surface = surface_id;
343 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC); 352 ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true);
344 mutex_unlock(&qdev->update_area_mutex); 353 mutex_unlock(&qdev->update_area_mutex);
345 return ret; 354 return ret;
346} 355}
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index fcfd4436ceed..823d29e926ec 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -428,10 +428,10 @@ static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb,
428 int inc = 1; 428 int inc = 1;
429 429
430 qobj = gem_to_qxl_bo(qxl_fb->obj); 430 qobj = gem_to_qxl_bo(qxl_fb->obj);
431 if (qxl_fb != qdev->active_user_framebuffer) { 431 /* if we aren't primary surface ignore this */
432 DRM_INFO("%s: qxl_fb 0x%p != qdev->active_user_framebuffer 0x%p\n", 432 if (!qobj->is_primary)
433 __func__, qxl_fb, qdev->active_user_framebuffer); 433 return 0;
434 } 434
435 if (!num_clips) { 435 if (!num_clips) {
436 num_clips = 1; 436 num_clips = 1;
437 clips = &norect; 437 clips = &norect;
@@ -604,7 +604,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
604 mode->hdisplay, 604 mode->hdisplay,
605 mode->vdisplay); 605 mode->vdisplay);
606 } 606 }
607 qdev->mode_set = true;
608 return 0; 607 return 0;
609} 608}
610 609
@@ -893,7 +892,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
893{ 892{
894 struct drm_gem_object *obj; 893 struct drm_gem_object *obj;
895 struct qxl_framebuffer *qxl_fb; 894 struct qxl_framebuffer *qxl_fb;
896 struct qxl_device *qdev = dev->dev_private;
897 int ret; 895 int ret;
898 896
899 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); 897 obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
@@ -909,13 +907,6 @@ qxl_user_framebuffer_create(struct drm_device *dev,
909 return NULL; 907 return NULL;
910 } 908 }
911 909
912 if (qdev->active_user_framebuffer) {
913 DRM_INFO("%s: active_user_framebuffer %p -> %p\n",
914 __func__,
915 qdev->active_user_framebuffer, qxl_fb);
916 }
917 qdev->active_user_framebuffer = qxl_fb;
918
919 return &qxl_fb->base; 910 return &qxl_fb->base;
920} 911}
921 912
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 52b582c211da..43d06ab28a21 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -255,12 +255,6 @@ struct qxl_device {
255 struct qxl_gem gem; 255 struct qxl_gem gem;
256 struct qxl_mode_info mode_info; 256 struct qxl_mode_info mode_info;
257 257
258 /*
259 * last created framebuffer with fb_create
260 * only used by debugfs dumbppm
261 */
262 struct qxl_framebuffer *active_user_framebuffer;
263
264 struct fb_info *fbdev_info; 258 struct fb_info *fbdev_info;
265 struct qxl_framebuffer *fbdev_qfb; 259 struct qxl_framebuffer *fbdev_qfb;
266 void *ram_physical; 260 void *ram_physical;
@@ -270,7 +264,6 @@ struct qxl_device {
270 struct qxl_ring *cursor_ring; 264 struct qxl_ring *cursor_ring;
271 265
272 struct qxl_ram_header *ram_header; 266 struct qxl_ram_header *ram_header;
273 bool mode_set;
274 267
275 bool primary_created; 268 bool primary_created;
276 269
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 04b64f9cbfdb..6db7370373ea 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -294,6 +294,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data,
294 goto out; 294 goto out;
295 295
296 if (!qobj->pin_count) { 296 if (!qobj->pin_count) {
297 qxl_ttm_placement_from_domain(qobj, qobj->type);
297 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, 298 ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
298 true, false); 299 true, false);
299 if (unlikely(ret)) 300 if (unlikely(ret))
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 6d6fdb3ba0d0..d5df8fd10217 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1811,12 +1811,9 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1811 1811
1812static void atombios_crtc_prepare(struct drm_crtc *crtc) 1812static void atombios_crtc_prepare(struct drm_crtc *crtc)
1813{ 1813{
1814 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1815 struct drm_device *dev = crtc->dev; 1814 struct drm_device *dev = crtc->dev;
1816 struct radeon_device *rdev = dev->dev_private; 1815 struct radeon_device *rdev = dev->dev_private;
1817 1816
1818 radeon_crtc->in_mode_set = true;
1819
1820 /* disable crtc pair power gating before programming */ 1817 /* disable crtc pair power gating before programming */
1821 if (ASIC_IS_DCE6(rdev)) 1818 if (ASIC_IS_DCE6(rdev))
1822 atombios_powergate_crtc(crtc, ATOM_DISABLE); 1819 atombios_powergate_crtc(crtc, ATOM_DISABLE);
@@ -1827,11 +1824,8 @@ static void atombios_crtc_prepare(struct drm_crtc *crtc)
1827 1824
1828static void atombios_crtc_commit(struct drm_crtc *crtc) 1825static void atombios_crtc_commit(struct drm_crtc *crtc)
1829{ 1826{
1830 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1831
1832 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON); 1827 atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
1833 atombios_lock_crtc(crtc, ATOM_DISABLE); 1828 atombios_lock_crtc(crtc, ATOM_DISABLE);
1834 radeon_crtc->in_mode_set = false;
1835} 1829}
1836 1830
1837static void atombios_crtc_disable(struct drm_crtc *crtc) 1831static void atombios_crtc_disable(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 105bafb6c29d..8f9e2d31b255 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2343,11 +2343,13 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2343 u32 crtc_enabled, tmp, frame_count, blackout; 2343 u32 crtc_enabled, tmp, frame_count, blackout;
2344 int i, j; 2344 int i, j;
2345 2345
2346 save->vga_render_control = RREG32(VGA_RENDER_CONTROL); 2346 if (!ASIC_IS_NODCE(rdev)) {
2347 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL); 2347 save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
2348 save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
2348 2349
2349 /* disable VGA render */ 2350 /* disable VGA render */
2350 WREG32(VGA_RENDER_CONTROL, 0); 2351 WREG32(VGA_RENDER_CONTROL, 0);
2352 }
2351 /* blank the display controllers */ 2353 /* blank the display controllers */
2352 for (i = 0; i < rdev->num_crtc; i++) { 2354 for (i = 0; i < rdev->num_crtc; i++) {
2353 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN; 2355 crtc_enabled = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]) & EVERGREEN_CRTC_MASTER_EN;
@@ -2438,8 +2440,11 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
2438 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], 2440 WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i],
2439 (u32)rdev->mc.vram_start); 2441 (u32)rdev->mc.vram_start);
2440 } 2442 }
2441 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start)); 2443
2442 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start); 2444 if (!ASIC_IS_NODCE(rdev)) {
2445 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
2446 WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
2447 }
2443 2448
2444 /* unlock regs and wait for update */ 2449 /* unlock regs and wait for update */
2445 for (i = 0; i < rdev->num_crtc; i++) { 2450 for (i = 0; i < rdev->num_crtc; i++) {
@@ -2499,10 +2504,12 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
2499 } 2504 }
2500 } 2505 }
2501 } 2506 }
2502 /* Unlock vga access */ 2507 if (!ASIC_IS_NODCE(rdev)) {
2503 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control); 2508 /* Unlock vga access */
2504 mdelay(1); 2509 WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
2505 WREG32(VGA_RENDER_CONTROL, save->vga_render_control); 2510 mdelay(1);
2511 WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
2512 }
2506} 2513}
2507 2514
2508void evergreen_mc_program(struct radeon_device *rdev) 2515void evergreen_mc_program(struct radeon_device *rdev)
@@ -3405,8 +3412,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
3405 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 3412 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
3406 } else { 3413 } else {
3407 /* size in MB on evergreen/cayman/tn */ 3414 /* size in MB on evergreen/cayman/tn */
3408 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 3415 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3409 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 3416 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3410 } 3417 }
3411 rdev->mc.visible_vram_size = rdev->mc.aper_size; 3418 rdev->mc.visible_vram_size = rdev->mc.aper_size;
3412 r700_vram_gtt_location(rdev, &rdev->mc); 3419 r700_vram_gtt_location(rdev, &rdev->mc);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index b4ab8ceb1654..ed7c8a768092 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -154,19 +154,18 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock)
154 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 154 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
155 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 155 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
156 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); 156 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
157 u32 base_rate = 48000; 157 u32 base_rate = 24000;
158 158
159 if (!dig || !dig->afmt) 159 if (!dig || !dig->afmt)
160 return; 160 return;
161 161
162 /* XXX: properly calculate this */
163 /* XXX two dtos; generally use dto0 for hdmi */ 162 /* XXX two dtos; generally use dto0 for hdmi */
164 /* Express [24MHz / target pixel clock] as an exact rational 163 /* Express [24MHz / target pixel clock] as an exact rational
165 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 164 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
166 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator 165 * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator
167 */ 166 */
168 WREG32(DCCG_AUDIO_DTO0_PHASE, (base_rate*50) & 0xffffff); 167 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
169 WREG32(DCCG_AUDIO_DTO0_MODULE, (clock*100) & 0xffffff); 168 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
170 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); 169 WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id));
171} 170}
172 171
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 865e2c9980db..60170ea5e3a2 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -75,7 +75,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); 75 OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1));
76 76
77 for (i = 0; i < nr; ++i) { 77 for (i = 0; i < nr; ++i) {
78 if (DRM_COPY_FROM_USER_UNCHECKED 78 if (DRM_COPY_FROM_USER
79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) { 79 (&box, &cmdbuf->boxes[n + i], sizeof(box))) {
80 DRM_ERROR("copy cliprect faulted\n"); 80 DRM_ERROR("copy cliprect faulted\n");
81 return -EFAULT; 81 return -EFAULT;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 47f180a79352..456750a0daa5 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -232,7 +232,7 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
232 struct radeon_device *rdev = dev->dev_private; 232 struct radeon_device *rdev = dev->dev_private;
233 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 233 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
234 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 234 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
235 u32 base_rate = 48000; 235 u32 base_rate = 24000;
236 236
237 if (!dig || !dig->afmt) 237 if (!dig || !dig->afmt)
238 return; 238 return;
@@ -240,7 +240,6 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
240 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. 240 /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT.
241 * doesn't matter which one you use. Just use the first one. 241 * doesn't matter which one you use. Just use the first one.
242 */ 242 */
243 /* XXX: properly calculate this */
244 /* XXX two dtos; generally use dto0 for hdmi */ 243 /* XXX two dtos; generally use dto0 for hdmi */
245 /* Express [24MHz / target pixel clock] as an exact rational 244 /* Express [24MHz / target pixel clock] as an exact rational
246 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE 245 * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE
@@ -250,13 +249,13 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock)
250 /* according to the reg specs, this should DCE3.2 only, but in 249 /* according to the reg specs, this should DCE3.2 only, but in
251 * practice it seems to cover DCE3.0 as well. 250 * practice it seems to cover DCE3.0 as well.
252 */ 251 */
253 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 50); 252 WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100);
254 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); 253 WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100);
255 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ 254 WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */
256 } else { 255 } else {
257 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ 256 /* according to the reg specs, this should be DCE2.0 and DCE3.0 */
258 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate * 50) | 257 WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) |
259 AUDIO_DTO_MODULE(clock * 100)); 258 AUDIO_DTO_MODULE(clock / 10));
260 } 259 }
261} 260}
262 261
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 1442ce765d48..142ce6cc69f5 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1694,6 +1694,7 @@ struct radeon_device {
1694 int num_crtc; /* number of crtcs */ 1694 int num_crtc; /* number of crtcs */
1695 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */ 1695 struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
1696 bool audio_enabled; 1696 bool audio_enabled;
1697 bool has_uvd;
1697 struct r600_audio audio_status; /* audio stuff */ 1698 struct r600_audio audio_status; /* audio stuff */
1698 struct notifier_block acpi_nb; 1699 struct notifier_block acpi_nb;
1699 /* only one userspace can use Hyperz features or CMASK at a time */ 1700 /* only one userspace can use Hyperz features or CMASK at a time */
@@ -1838,6 +1839,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
1838#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \ 1839#define ASIC_IS_DCE61(rdev) ((rdev->family >= CHIP_ARUBA) && \
1839 (rdev->flags & RADEON_IS_IGP)) 1840 (rdev->flags & RADEON_IS_IGP))
1840#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND)) 1841#define ASIC_IS_DCE64(rdev) ((rdev->family == CHIP_OLAND))
1842#define ASIC_IS_NODCE(rdev) ((rdev->family == CHIP_HAINAN))
1841 1843
1842/* 1844/*
1843 * BIOS helpers. 1845 * BIOS helpers.
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 6417132c50cf..06b8c19ab19e 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1935,6 +1935,8 @@ int radeon_asic_init(struct radeon_device *rdev)
1935 else 1935 else
1936 rdev->num_crtc = 2; 1936 rdev->num_crtc = 2;
1937 1937
1938 rdev->has_uvd = false;
1939
1938 switch (rdev->family) { 1940 switch (rdev->family) {
1939 case CHIP_R100: 1941 case CHIP_R100:
1940 case CHIP_RV100: 1942 case CHIP_RV100:
@@ -1999,16 +2001,22 @@ int radeon_asic_init(struct radeon_device *rdev)
1999 case CHIP_RV635: 2001 case CHIP_RV635:
2000 case CHIP_RV670: 2002 case CHIP_RV670:
2001 rdev->asic = &r600_asic; 2003 rdev->asic = &r600_asic;
2004 if (rdev->family == CHIP_R600)
2005 rdev->has_uvd = false;
2006 else
2007 rdev->has_uvd = true;
2002 break; 2008 break;
2003 case CHIP_RS780: 2009 case CHIP_RS780:
2004 case CHIP_RS880: 2010 case CHIP_RS880:
2005 rdev->asic = &rs780_asic; 2011 rdev->asic = &rs780_asic;
2012 rdev->has_uvd = true;
2006 break; 2013 break;
2007 case CHIP_RV770: 2014 case CHIP_RV770:
2008 case CHIP_RV730: 2015 case CHIP_RV730:
2009 case CHIP_RV710: 2016 case CHIP_RV710:
2010 case CHIP_RV740: 2017 case CHIP_RV740:
2011 rdev->asic = &rv770_asic; 2018 rdev->asic = &rv770_asic;
2019 rdev->has_uvd = true;
2012 break; 2020 break;
2013 case CHIP_CEDAR: 2021 case CHIP_CEDAR:
2014 case CHIP_REDWOOD: 2022 case CHIP_REDWOOD:
@@ -2021,11 +2029,13 @@ int radeon_asic_init(struct radeon_device *rdev)
2021 else 2029 else
2022 rdev->num_crtc = 6; 2030 rdev->num_crtc = 6;
2023 rdev->asic = &evergreen_asic; 2031 rdev->asic = &evergreen_asic;
2032 rdev->has_uvd = true;
2024 break; 2033 break;
2025 case CHIP_PALM: 2034 case CHIP_PALM:
2026 case CHIP_SUMO: 2035 case CHIP_SUMO:
2027 case CHIP_SUMO2: 2036 case CHIP_SUMO2:
2028 rdev->asic = &sumo_asic; 2037 rdev->asic = &sumo_asic;
2038 rdev->has_uvd = true;
2029 break; 2039 break;
2030 case CHIP_BARTS: 2040 case CHIP_BARTS:
2031 case CHIP_TURKS: 2041 case CHIP_TURKS:
@@ -2036,27 +2046,37 @@ int radeon_asic_init(struct radeon_device *rdev)
2036 else 2046 else
2037 rdev->num_crtc = 6; 2047 rdev->num_crtc = 6;
2038 rdev->asic = &btc_asic; 2048 rdev->asic = &btc_asic;
2049 rdev->has_uvd = true;
2039 break; 2050 break;
2040 case CHIP_CAYMAN: 2051 case CHIP_CAYMAN:
2041 rdev->asic = &cayman_asic; 2052 rdev->asic = &cayman_asic;
2042 /* set num crtcs */ 2053 /* set num crtcs */
2043 rdev->num_crtc = 6; 2054 rdev->num_crtc = 6;
2055 rdev->has_uvd = true;
2044 break; 2056 break;
2045 case CHIP_ARUBA: 2057 case CHIP_ARUBA:
2046 rdev->asic = &trinity_asic; 2058 rdev->asic = &trinity_asic;
2047 /* set num crtcs */ 2059 /* set num crtcs */
2048 rdev->num_crtc = 4; 2060 rdev->num_crtc = 4;
2061 rdev->has_uvd = true;
2049 break; 2062 break;
2050 case CHIP_TAHITI: 2063 case CHIP_TAHITI:
2051 case CHIP_PITCAIRN: 2064 case CHIP_PITCAIRN:
2052 case CHIP_VERDE: 2065 case CHIP_VERDE:
2053 case CHIP_OLAND: 2066 case CHIP_OLAND:
2067 case CHIP_HAINAN:
2054 rdev->asic = &si_asic; 2068 rdev->asic = &si_asic;
2055 /* set num crtcs */ 2069 /* set num crtcs */
2056 if (rdev->family == CHIP_OLAND) 2070 if (rdev->family == CHIP_HAINAN)
2071 rdev->num_crtc = 0;
2072 else if (rdev->family == CHIP_OLAND)
2057 rdev->num_crtc = 2; 2073 rdev->num_crtc = 2;
2058 else 2074 else
2059 rdev->num_crtc = 6; 2075 rdev->num_crtc = 6;
2076 if (rdev->family == CHIP_HAINAN)
2077 rdev->has_uvd = false;
2078 else
2079 rdev->has_uvd = true;
2060 break; 2080 break;
2061 default: 2081 default:
2062 /* FIXME: not supported yet */ 2082 /* FIXME: not supported yet */
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index fa3c56fba294..061b227dae0c 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -244,24 +244,28 @@ static bool ni_read_disabled_bios(struct radeon_device *rdev)
244 244
245 /* enable the rom */ 245 /* enable the rom */
246 WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS)); 246 WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
247 /* Disable VGA mode */ 247 if (!ASIC_IS_NODCE(rdev)) {
248 WREG32(AVIVO_D1VGA_CONTROL, 248 /* Disable VGA mode */
249 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | 249 WREG32(AVIVO_D1VGA_CONTROL,
250 AVIVO_DVGA_CONTROL_TIMING_SELECT))); 250 (d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
251 WREG32(AVIVO_D2VGA_CONTROL, 251 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
252 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE | 252 WREG32(AVIVO_D2VGA_CONTROL,
253 AVIVO_DVGA_CONTROL_TIMING_SELECT))); 253 (d2vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
254 WREG32(AVIVO_VGA_RENDER_CONTROL, 254 AVIVO_DVGA_CONTROL_TIMING_SELECT)));
255 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK)); 255 WREG32(AVIVO_VGA_RENDER_CONTROL,
256 (vga_render_control & ~AVIVO_VGA_VSTATUS_CNTL_MASK));
257 }
256 WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE); 258 WREG32(R600_ROM_CNTL, rom_cntl | R600_SCK_OVERWRITE);
257 259
258 r = radeon_read_bios(rdev); 260 r = radeon_read_bios(rdev);
259 261
260 /* restore regs */ 262 /* restore regs */
261 WREG32(R600_BUS_CNTL, bus_cntl); 263 WREG32(R600_BUS_CNTL, bus_cntl);
262 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control); 264 if (!ASIC_IS_NODCE(rdev)) {
263 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control); 265 WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
264 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control); 266 WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
267 WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
268 }
265 WREG32(R600_ROM_CNTL, rom_cntl); 269 WREG32(R600_ROM_CNTL, rom_cntl);
266 return r; 270 return r;
267} 271}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a8f608903989..c2c59fb1ea01 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -94,6 +94,7 @@ static const char radeon_family_name[][16] = {
94 "PITCAIRN", 94 "PITCAIRN",
95 "VERDE", 95 "VERDE",
96 "OLAND", 96 "OLAND",
97 "HAINAN",
97 "LAST", 98 "LAST",
98}; 99};
99 100
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index d33f484ace48..094e7e5ea39e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -147,7 +147,7 @@ static inline void radeon_unregister_atpx_handler(void) {}
147#endif 147#endif
148 148
149int radeon_no_wb; 149int radeon_no_wb;
150int radeon_modeset = 1; 150int radeon_modeset = -1;
151int radeon_dynclks = -1; 151int radeon_dynclks = -1;
152int radeon_r4xx_atom = 0; 152int radeon_r4xx_atom = 0;
153int radeon_agpmode = 0; 153int radeon_agpmode = 0;
@@ -456,6 +456,16 @@ static struct pci_driver radeon_kms_pci_driver = {
456 456
457static int __init radeon_init(void) 457static int __init radeon_init(void)
458{ 458{
459#ifdef CONFIG_VGA_CONSOLE
460 if (vgacon_text_force() && radeon_modeset == -1) {
461 DRM_INFO("VGACON disable radeon kernel modesetting.\n");
462 radeon_modeset = 0;
463 }
464#endif
465 /* set to modesetting by default if not nomodeset */
466 if (radeon_modeset == -1)
467 radeon_modeset = 1;
468
459 if (radeon_modeset == 1) { 469 if (radeon_modeset == 1) {
460 DRM_INFO("radeon kernel modesetting enabled.\n"); 470 DRM_INFO("radeon kernel modesetting enabled.\n");
461 driver = &kms_driver; 471 driver = &kms_driver;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 2d91123f2759..36e9803b077d 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -92,6 +92,7 @@ enum radeon_family {
92 CHIP_PITCAIRN, 92 CHIP_PITCAIRN,
93 CHIP_VERDE, 93 CHIP_VERDE,
94 CHIP_OLAND, 94 CHIP_OLAND,
95 CHIP_HAINAN,
95 CHIP_LAST, 96 CHIP_LAST,
96}; 97};
97 98
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 6857cb4efb76..7cb178a34a0f 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1031,11 +1031,9 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
1031 1031
1032static void radeon_crtc_prepare(struct drm_crtc *crtc) 1032static void radeon_crtc_prepare(struct drm_crtc *crtc)
1033{ 1033{
1034 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1035 struct drm_device *dev = crtc->dev; 1034 struct drm_device *dev = crtc->dev;
1036 struct drm_crtc *crtci; 1035 struct drm_crtc *crtci;
1037 1036
1038 radeon_crtc->in_mode_set = true;
1039 /* 1037 /*
1040 * The hardware wedges sometimes if you reconfigure one CRTC 1038 * The hardware wedges sometimes if you reconfigure one CRTC
1041 * whilst another is running (see fdo bug #24611). 1039 * whilst another is running (see fdo bug #24611).
@@ -1046,7 +1044,6 @@ static void radeon_crtc_prepare(struct drm_crtc *crtc)
1046 1044
1047static void radeon_crtc_commit(struct drm_crtc *crtc) 1045static void radeon_crtc_commit(struct drm_crtc *crtc)
1048{ 1046{
1049 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1050 struct drm_device *dev = crtc->dev; 1047 struct drm_device *dev = crtc->dev;
1051 struct drm_crtc *crtci; 1048 struct drm_crtc *crtci;
1052 1049
@@ -1057,7 +1054,6 @@ static void radeon_crtc_commit(struct drm_crtc *crtc)
1057 if (crtci->enabled) 1054 if (crtci->enabled)
1058 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON); 1055 radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
1059 } 1056 }
1060 radeon_crtc->in_mode_set = false;
1061} 1057}
1062 1058
1063static const struct drm_crtc_helper_funcs legacy_helper_funcs = { 1059static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 44e579e75fd0..69ad4fe224c1 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -302,7 +302,6 @@ struct radeon_crtc {
302 u16 lut_r[256], lut_g[256], lut_b[256]; 302 u16 lut_r[256], lut_g[256], lut_b[256];
303 bool enabled; 303 bool enabled;
304 bool can_tile; 304 bool can_tile;
305 bool in_mode_set;
306 uint32_t crtc_offset; 305 uint32_t crtc_offset;
307 struct drm_gem_object *cursor_bo; 306 struct drm_gem_object *cursor_bo;
308 uint64_t cursor_addr; 307 uint64_t cursor_addr;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 93f760e27a92..6c0ce8915fac 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -726,7 +726,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
726 return r; 726 return r;
727 } 727 }
728 DRM_INFO("radeon: %uM of VRAM memory ready\n", 728 DRM_INFO("radeon: %uM of VRAM memory ready\n",
729 (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); 729 (unsigned) (rdev->mc.real_vram_size / (1024 * 1024)));
730 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 730 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
731 rdev->mc.gtt_size >> PAGE_SHIFT); 731 rdev->mc.gtt_size >> PAGE_SHIFT);
732 if (r) { 732 if (r) {
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index f0b6c2f87c4d..5ffade69af25 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -60,6 +60,11 @@ MODULE_FIRMWARE("radeon/OLAND_me.bin");
60MODULE_FIRMWARE("radeon/OLAND_ce.bin"); 60MODULE_FIRMWARE("radeon/OLAND_ce.bin");
61MODULE_FIRMWARE("radeon/OLAND_mc.bin"); 61MODULE_FIRMWARE("radeon/OLAND_mc.bin");
62MODULE_FIRMWARE("radeon/OLAND_rlc.bin"); 62MODULE_FIRMWARE("radeon/OLAND_rlc.bin");
63MODULE_FIRMWARE("radeon/HAINAN_pfp.bin");
64MODULE_FIRMWARE("radeon/HAINAN_me.bin");
65MODULE_FIRMWARE("radeon/HAINAN_ce.bin");
66MODULE_FIRMWARE("radeon/HAINAN_mc.bin");
67MODULE_FIRMWARE("radeon/HAINAN_rlc.bin");
63 68
64extern int r600_ih_ring_alloc(struct radeon_device *rdev); 69extern int r600_ih_ring_alloc(struct radeon_device *rdev);
65extern void r600_ih_ring_fini(struct radeon_device *rdev); 70extern void r600_ih_ring_fini(struct radeon_device *rdev);
@@ -265,6 +270,40 @@ static const u32 oland_golden_registers[] =
265 0x15c0, 0x000c0fc0, 0x000c0400 270 0x15c0, 0x000c0fc0, 0x000c0400
266}; 271};
267 272
273static const u32 hainan_golden_registers[] =
274{
275 0x9a10, 0x00010000, 0x00018208,
276 0x9830, 0xffffffff, 0x00000000,
277 0x9834, 0xf00fffff, 0x00000400,
278 0x9838, 0x0002021c, 0x00020200,
279 0xd0c0, 0xff000fff, 0x00000100,
280 0xd030, 0x000300c0, 0x00800040,
281 0xd8c0, 0xff000fff, 0x00000100,
282 0xd830, 0x000300c0, 0x00800040,
283 0x2ae4, 0x00073ffe, 0x000022a2,
284 0x240c, 0x000007ff, 0x00000000,
285 0x8a14, 0xf000001f, 0x00000007,
286 0x8b24, 0xffffffff, 0x00ffffff,
287 0x8b10, 0x0000ff0f, 0x00000000,
288 0x28a4c, 0x07ffffff, 0x4e000000,
289 0x28350, 0x3f3f3fff, 0x00000000,
290 0x30, 0x000000ff, 0x0040,
291 0x34, 0x00000040, 0x00004040,
292 0x9100, 0x03e00000, 0x03600000,
293 0x9060, 0x0000007f, 0x00000020,
294 0x9508, 0x00010000, 0x00010000,
295 0xac14, 0x000003ff, 0x000000f1,
296 0xac10, 0xffffffff, 0x00000000,
297 0xac0c, 0xffffffff, 0x00003210,
298 0x88d4, 0x0000001f, 0x00000010,
299 0x15c0, 0x000c0fc0, 0x000c0400
300};
301
302static const u32 hainan_golden_registers2[] =
303{
304 0x98f8, 0xffffffff, 0x02010001
305};
306
268static const u32 tahiti_mgcg_cgcg_init[] = 307static const u32 tahiti_mgcg_cgcg_init[] =
269{ 308{
270 0xc400, 0xffffffff, 0xfffffffc, 309 0xc400, 0xffffffff, 0xfffffffc,
@@ -673,6 +712,83 @@ static const u32 oland_mgcg_cgcg_init[] =
673 0xd8c0, 0xfffffff0, 0x00000100 712 0xd8c0, 0xfffffff0, 0x00000100
674}; 713};
675 714
715static const u32 hainan_mgcg_cgcg_init[] =
716{
717 0xc400, 0xffffffff, 0xfffffffc,
718 0x802c, 0xffffffff, 0xe0000000,
719 0x9a60, 0xffffffff, 0x00000100,
720 0x92a4, 0xffffffff, 0x00000100,
721 0xc164, 0xffffffff, 0x00000100,
722 0x9774, 0xffffffff, 0x00000100,
723 0x8984, 0xffffffff, 0x06000100,
724 0x8a18, 0xffffffff, 0x00000100,
725 0x92a0, 0xffffffff, 0x00000100,
726 0xc380, 0xffffffff, 0x00000100,
727 0x8b28, 0xffffffff, 0x00000100,
728 0x9144, 0xffffffff, 0x00000100,
729 0x8d88, 0xffffffff, 0x00000100,
730 0x8d8c, 0xffffffff, 0x00000100,
731 0x9030, 0xffffffff, 0x00000100,
732 0x9034, 0xffffffff, 0x00000100,
733 0x9038, 0xffffffff, 0x00000100,
734 0x903c, 0xffffffff, 0x00000100,
735 0xad80, 0xffffffff, 0x00000100,
736 0xac54, 0xffffffff, 0x00000100,
737 0x897c, 0xffffffff, 0x06000100,
738 0x9868, 0xffffffff, 0x00000100,
739 0x9510, 0xffffffff, 0x00000100,
740 0xaf04, 0xffffffff, 0x00000100,
741 0xae04, 0xffffffff, 0x00000100,
742 0x949c, 0xffffffff, 0x00000100,
743 0x802c, 0xffffffff, 0xe0000000,
744 0x9160, 0xffffffff, 0x00010000,
745 0x9164, 0xffffffff, 0x00030002,
746 0x9168, 0xffffffff, 0x00040007,
747 0x916c, 0xffffffff, 0x00060005,
748 0x9170, 0xffffffff, 0x00090008,
749 0x9174, 0xffffffff, 0x00020001,
750 0x9178, 0xffffffff, 0x00040003,
751 0x917c, 0xffffffff, 0x00000007,
752 0x9180, 0xffffffff, 0x00060005,
753 0x9184, 0xffffffff, 0x00090008,
754 0x9188, 0xffffffff, 0x00030002,
755 0x918c, 0xffffffff, 0x00050004,
756 0x9190, 0xffffffff, 0x00000008,
757 0x9194, 0xffffffff, 0x00070006,
758 0x9198, 0xffffffff, 0x000a0009,
759 0x919c, 0xffffffff, 0x00040003,
760 0x91a0, 0xffffffff, 0x00060005,
761 0x91a4, 0xffffffff, 0x00000009,
762 0x91a8, 0xffffffff, 0x00080007,
763 0x91ac, 0xffffffff, 0x000b000a,
764 0x91b0, 0xffffffff, 0x00050004,
765 0x91b4, 0xffffffff, 0x00070006,
766 0x91b8, 0xffffffff, 0x0008000b,
767 0x91bc, 0xffffffff, 0x000a0009,
768 0x91c0, 0xffffffff, 0x000d000c,
769 0x91c4, 0xffffffff, 0x00060005,
770 0x91c8, 0xffffffff, 0x00080007,
771 0x91cc, 0xffffffff, 0x0000000b,
772 0x91d0, 0xffffffff, 0x000a0009,
773 0x91d4, 0xffffffff, 0x000d000c,
774 0x9150, 0xffffffff, 0x96940200,
775 0x8708, 0xffffffff, 0x00900100,
776 0xc478, 0xffffffff, 0x00000080,
777 0xc404, 0xffffffff, 0x0020003f,
778 0x30, 0xffffffff, 0x0000001c,
779 0x34, 0x000f0000, 0x000f0000,
780 0x160c, 0xffffffff, 0x00000100,
781 0x1024, 0xffffffff, 0x00000100,
782 0x20a8, 0xffffffff, 0x00000104,
783 0x264c, 0x000c0000, 0x000c0000,
784 0x2648, 0x000c0000, 0x000c0000,
785 0x2f50, 0x00000001, 0x00000001,
786 0x30cc, 0xc0000fff, 0x00000104,
787 0xc1e4, 0x00000001, 0x00000001,
788 0xd0c0, 0xfffffff0, 0x00000100,
789 0xd8c0, 0xfffffff0, 0x00000100
790};
791
676static u32 verde_pg_init[] = 792static u32 verde_pg_init[] =
677{ 793{
678 0x353c, 0xffffffff, 0x40000, 794 0x353c, 0xffffffff, 0x40000,
@@ -853,6 +969,17 @@ static void si_init_golden_registers(struct radeon_device *rdev)
853 oland_mgcg_cgcg_init, 969 oland_mgcg_cgcg_init,
854 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init)); 970 (const u32)ARRAY_SIZE(oland_mgcg_cgcg_init));
855 break; 971 break;
972 case CHIP_HAINAN:
973 radeon_program_register_sequence(rdev,
974 hainan_golden_registers,
975 (const u32)ARRAY_SIZE(hainan_golden_registers));
976 radeon_program_register_sequence(rdev,
977 hainan_golden_registers2,
978 (const u32)ARRAY_SIZE(hainan_golden_registers2));
979 radeon_program_register_sequence(rdev,
980 hainan_mgcg_cgcg_init,
981 (const u32)ARRAY_SIZE(hainan_mgcg_cgcg_init));
982 break;
856 default: 983 default:
857 break; 984 break;
858 } 985 }
@@ -1062,6 +1189,45 @@ static const u32 oland_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1062 {0x0000009f, 0x00a17730} 1189 {0x0000009f, 0x00a17730}
1063}; 1190};
1064 1191
1192static const u32 hainan_io_mc_regs[TAHITI_IO_MC_REGS_SIZE][2] = {
1193 {0x0000006f, 0x03044000},
1194 {0x00000070, 0x0480c018},
1195 {0x00000071, 0x00000040},
1196 {0x00000072, 0x01000000},
1197 {0x00000074, 0x000000ff},
1198 {0x00000075, 0x00143400},
1199 {0x00000076, 0x08ec0800},
1200 {0x00000077, 0x040000cc},
1201 {0x00000079, 0x00000000},
1202 {0x0000007a, 0x21000409},
1203 {0x0000007c, 0x00000000},
1204 {0x0000007d, 0xe8000000},
1205 {0x0000007e, 0x044408a8},
1206 {0x0000007f, 0x00000003},
1207 {0x00000080, 0x00000000},
1208 {0x00000081, 0x01000000},
1209 {0x00000082, 0x02000000},
1210 {0x00000083, 0x00000000},
1211 {0x00000084, 0xe3f3e4f4},
1212 {0x00000085, 0x00052024},
1213 {0x00000087, 0x00000000},
1214 {0x00000088, 0x66036603},
1215 {0x00000089, 0x01000000},
1216 {0x0000008b, 0x1c0a0000},
1217 {0x0000008c, 0xff010000},
1218 {0x0000008e, 0xffffefff},
1219 {0x0000008f, 0xfff3efff},
1220 {0x00000090, 0xfff3efbf},
1221 {0x00000094, 0x00101101},
1222 {0x00000095, 0x00000fff},
1223 {0x00000096, 0x00116fff},
1224 {0x00000097, 0x60010000},
1225 {0x00000098, 0x10010000},
1226 {0x00000099, 0x00006000},
1227 {0x0000009a, 0x00001000},
1228 {0x0000009f, 0x00a07730}
1229};
1230
1065/* ucode loading */ 1231/* ucode loading */
1066static int si_mc_load_microcode(struct radeon_device *rdev) 1232static int si_mc_load_microcode(struct radeon_device *rdev)
1067{ 1233{
@@ -1095,6 +1261,11 @@ static int si_mc_load_microcode(struct radeon_device *rdev)
1095 ucode_size = OLAND_MC_UCODE_SIZE; 1261 ucode_size = OLAND_MC_UCODE_SIZE;
1096 regs_size = TAHITI_IO_MC_REGS_SIZE; 1262 regs_size = TAHITI_IO_MC_REGS_SIZE;
1097 break; 1263 break;
1264 case CHIP_HAINAN:
1265 io_mc_regs = (u32 *)&hainan_io_mc_regs;
1266 ucode_size = OLAND_MC_UCODE_SIZE;
1267 regs_size = TAHITI_IO_MC_REGS_SIZE;
1268 break;
1098 } 1269 }
1099 1270
1100 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK; 1271 running = RREG32(MC_SEQ_SUP_CNTL) & RUN_MASK;
@@ -1198,6 +1369,15 @@ static int si_init_microcode(struct radeon_device *rdev)
1198 rlc_req_size = SI_RLC_UCODE_SIZE * 4; 1369 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1199 mc_req_size = OLAND_MC_UCODE_SIZE * 4; 1370 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
1200 break; 1371 break;
1372 case CHIP_HAINAN:
1373 chip_name = "HAINAN";
1374 rlc_chip_name = "HAINAN";
1375 pfp_req_size = SI_PFP_UCODE_SIZE * 4;
1376 me_req_size = SI_PM4_UCODE_SIZE * 4;
1377 ce_req_size = SI_CE_UCODE_SIZE * 4;
1378 rlc_req_size = SI_RLC_UCODE_SIZE * 4;
1379 mc_req_size = OLAND_MC_UCODE_SIZE * 4;
1380 break;
1201 default: BUG(); 1381 default: BUG();
1202 } 1382 }
1203 1383
@@ -2003,7 +2183,8 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
2003 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden); 2183 WREG32(GB_TILE_MODE0 + (reg_offset * 4), gb_tile_moden);
2004 } 2184 }
2005 } else if ((rdev->family == CHIP_VERDE) || 2185 } else if ((rdev->family == CHIP_VERDE) ||
2006 (rdev->family == CHIP_OLAND)) { 2186 (rdev->family == CHIP_OLAND) ||
2187 (rdev->family == CHIP_HAINAN)) {
2007 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) { 2188 for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
2008 switch (reg_offset) { 2189 switch (reg_offset) {
2009 case 0: /* non-AA compressed depth or any compressed stencil */ 2190 case 0: /* non-AA compressed depth or any compressed stencil */
@@ -2466,6 +2647,23 @@ static void si_gpu_init(struct radeon_device *rdev)
2466 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 2647 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
2467 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN; 2648 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
2468 break; 2649 break;
2650 case CHIP_HAINAN:
2651 rdev->config.si.max_shader_engines = 1;
2652 rdev->config.si.max_tile_pipes = 4;
2653 rdev->config.si.max_cu_per_sh = 5;
2654 rdev->config.si.max_sh_per_se = 1;
2655 rdev->config.si.max_backends_per_se = 1;
2656 rdev->config.si.max_texture_channel_caches = 2;
2657 rdev->config.si.max_gprs = 256;
2658 rdev->config.si.max_gs_threads = 16;
2659 rdev->config.si.max_hw_contexts = 8;
2660
2661 rdev->config.si.sc_prim_fifo_size_frontend = 0x20;
2662 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
2663 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
2664 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
2665 gb_addr_config = HAINAN_GB_ADDR_CONFIG_GOLDEN;
2666 break;
2469 } 2667 }
2470 2668
2471 /* Initialize HDP */ 2669 /* Initialize HDP */
@@ -2559,9 +2757,11 @@ static void si_gpu_init(struct radeon_device *rdev)
2559 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 2757 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2560 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config); 2758 WREG32(DMA_TILING_CONFIG + DMA0_REGISTER_OFFSET, gb_addr_config);
2561 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config); 2759 WREG32(DMA_TILING_CONFIG + DMA1_REGISTER_OFFSET, gb_addr_config);
2562 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config); 2760 if (rdev->has_uvd) {
2563 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config); 2761 WREG32(UVD_UDEC_ADDR_CONFIG, gb_addr_config);
2564 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config); 2762 WREG32(UVD_UDEC_DB_ADDR_CONFIG, gb_addr_config);
2763 WREG32(UVD_UDEC_DBW_ADDR_CONFIG, gb_addr_config);
2764 }
2565 2765
2566 si_tiling_mode_table_init(rdev); 2766 si_tiling_mode_table_init(rdev);
2567 2767
@@ -3304,8 +3504,9 @@ static void si_mc_program(struct radeon_device *rdev)
3304 if (radeon_mc_wait_for_idle(rdev)) { 3504 if (radeon_mc_wait_for_idle(rdev)) {
3305 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 3505 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3306 } 3506 }
3307 /* Lockout access through VGA aperture*/ 3507 if (!ASIC_IS_NODCE(rdev))
3308 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE); 3508 /* Lockout access through VGA aperture*/
3509 WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
3309 /* Update configuration */ 3510 /* Update configuration */
3310 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR, 3511 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
3311 rdev->mc.vram_start >> 12); 3512 rdev->mc.vram_start >> 12);
@@ -3327,9 +3528,11 @@ static void si_mc_program(struct radeon_device *rdev)
3327 dev_warn(rdev->dev, "Wait for MC idle timedout !\n"); 3528 dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
3328 } 3529 }
3329 evergreen_mc_resume(rdev, &save); 3530 evergreen_mc_resume(rdev, &save);
3330 /* we need to own VRAM, so turn off the VGA renderer here 3531 if (!ASIC_IS_NODCE(rdev)) {
3331 * to stop it overwriting our objects */ 3532 /* we need to own VRAM, so turn off the VGA renderer here
3332 rv515_vga_render_disable(rdev); 3533 * to stop it overwriting our objects */
3534 rv515_vga_render_disable(rdev);
3535 }
3333} 3536}
3334 3537
3335static void si_vram_gtt_location(struct radeon_device *rdev, 3538static void si_vram_gtt_location(struct radeon_device *rdev,
@@ -3397,8 +3600,8 @@ static int si_mc_init(struct radeon_device *rdev)
3397 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); 3600 rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0);
3398 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); 3601 rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0);
3399 /* size in MB on si */ 3602 /* size in MB on si */
3400 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 3603 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3401 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; 3604 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024ULL * 1024ULL;
3402 rdev->mc.visible_vram_size = rdev->mc.aper_size; 3605 rdev->mc.visible_vram_size = rdev->mc.aper_size;
3403 si_vram_gtt_location(rdev, &rdev->mc); 3606 si_vram_gtt_location(rdev, &rdev->mc);
3404 radeon_update_bandwidth_info(rdev); 3607 radeon_update_bandwidth_info(rdev);
@@ -4251,8 +4454,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
4251 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 4454 tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
4252 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); 4455 WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp);
4253 WREG32(GRBM_INT_CNTL, 0); 4456 WREG32(GRBM_INT_CNTL, 0);
4254 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 4457 if (rdev->num_crtc >= 2) {
4255 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 4458 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
4459 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
4460 }
4256 if (rdev->num_crtc >= 4) { 4461 if (rdev->num_crtc >= 4) {
4257 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 4462 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
4258 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 4463 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -4262,8 +4467,10 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
4262 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 4467 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
4263 } 4468 }
4264 4469
4265 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); 4470 if (rdev->num_crtc >= 2) {
4266 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); 4471 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
4472 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
4473 }
4267 if (rdev->num_crtc >= 4) { 4474 if (rdev->num_crtc >= 4) {
4268 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0); 4475 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
4269 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0); 4476 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
@@ -4273,21 +4480,22 @@ static void si_disable_interrupt_state(struct radeon_device *rdev)
4273 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0); 4480 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
4274 } 4481 }
4275 4482
4276 WREG32(DACA_AUTODETECT_INT_CONTROL, 0); 4483 if (!ASIC_IS_NODCE(rdev)) {
4277 4484 WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
4278 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4279 WREG32(DC_HPD1_INT_CONTROL, tmp);
4280 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4281 WREG32(DC_HPD2_INT_CONTROL, tmp);
4282 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4283 WREG32(DC_HPD3_INT_CONTROL, tmp);
4284 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4285 WREG32(DC_HPD4_INT_CONTROL, tmp);
4286 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4287 WREG32(DC_HPD5_INT_CONTROL, tmp);
4288 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4289 WREG32(DC_HPD6_INT_CONTROL, tmp);
4290 4485
4486 tmp = RREG32(DC_HPD1_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4487 WREG32(DC_HPD1_INT_CONTROL, tmp);
4488 tmp = RREG32(DC_HPD2_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4489 WREG32(DC_HPD2_INT_CONTROL, tmp);
4490 tmp = RREG32(DC_HPD3_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4491 WREG32(DC_HPD3_INT_CONTROL, tmp);
4492 tmp = RREG32(DC_HPD4_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4493 WREG32(DC_HPD4_INT_CONTROL, tmp);
4494 tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4495 WREG32(DC_HPD5_INT_CONTROL, tmp);
4496 tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
4497 WREG32(DC_HPD6_INT_CONTROL, tmp);
4498 }
4291} 4499}
4292 4500
4293static int si_irq_init(struct radeon_device *rdev) 4501static int si_irq_init(struct radeon_device *rdev)
@@ -4366,7 +4574,7 @@ int si_irq_set(struct radeon_device *rdev)
4366 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE; 4574 u32 cp_int_cntl = CNTX_BUSY_INT_ENABLE | CNTX_EMPTY_INT_ENABLE;
4367 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0; 4575 u32 cp_int_cntl1 = 0, cp_int_cntl2 = 0;
4368 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0; 4576 u32 crtc1 = 0, crtc2 = 0, crtc3 = 0, crtc4 = 0, crtc5 = 0, crtc6 = 0;
4369 u32 hpd1, hpd2, hpd3, hpd4, hpd5, hpd6; 4577 u32 hpd1 = 0, hpd2 = 0, hpd3 = 0, hpd4 = 0, hpd5 = 0, hpd6 = 0;
4370 u32 grbm_int_cntl = 0; 4578 u32 grbm_int_cntl = 0;
4371 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0; 4579 u32 grph1 = 0, grph2 = 0, grph3 = 0, grph4 = 0, grph5 = 0, grph6 = 0;
4372 u32 dma_cntl, dma_cntl1; 4580 u32 dma_cntl, dma_cntl1;
@@ -4383,12 +4591,14 @@ int si_irq_set(struct radeon_device *rdev)
4383 return 0; 4591 return 0;
4384 } 4592 }
4385 4593
4386 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN; 4594 if (!ASIC_IS_NODCE(rdev)) {
4387 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN; 4595 hpd1 = RREG32(DC_HPD1_INT_CONTROL) & ~DC_HPDx_INT_EN;
4388 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN; 4596 hpd2 = RREG32(DC_HPD2_INT_CONTROL) & ~DC_HPDx_INT_EN;
4389 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN; 4597 hpd3 = RREG32(DC_HPD3_INT_CONTROL) & ~DC_HPDx_INT_EN;
4390 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN; 4598 hpd4 = RREG32(DC_HPD4_INT_CONTROL) & ~DC_HPDx_INT_EN;
4391 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN; 4599 hpd5 = RREG32(DC_HPD5_INT_CONTROL) & ~DC_HPDx_INT_EN;
4600 hpd6 = RREG32(DC_HPD6_INT_CONTROL) & ~DC_HPDx_INT_EN;
4601 }
4392 4602
4393 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE; 4603 dma_cntl = RREG32(DMA_CNTL + DMA0_REGISTER_OFFSET) & ~TRAP_ENABLE;
4394 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; 4604 dma_cntl1 = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE;
@@ -4479,8 +4689,10 @@ int si_irq_set(struct radeon_device *rdev)
4479 4689
4480 WREG32(GRBM_INT_CNTL, grbm_int_cntl); 4690 WREG32(GRBM_INT_CNTL, grbm_int_cntl);
4481 4691
4482 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1); 4692 if (rdev->num_crtc >= 2) {
4483 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2); 4693 WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, crtc1);
4694 WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, crtc2);
4695 }
4484 if (rdev->num_crtc >= 4) { 4696 if (rdev->num_crtc >= 4) {
4485 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3); 4697 WREG32(INT_MASK + EVERGREEN_CRTC2_REGISTER_OFFSET, crtc3);
4486 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4); 4698 WREG32(INT_MASK + EVERGREEN_CRTC3_REGISTER_OFFSET, crtc4);
@@ -4490,8 +4702,10 @@ int si_irq_set(struct radeon_device *rdev)
4490 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6); 4702 WREG32(INT_MASK + EVERGREEN_CRTC5_REGISTER_OFFSET, crtc6);
4491 } 4703 }
4492 4704
4493 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1); 4705 if (rdev->num_crtc >= 2) {
4494 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2); 4706 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, grph1);
4707 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, grph2);
4708 }
4495 if (rdev->num_crtc >= 4) { 4709 if (rdev->num_crtc >= 4) {
4496 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3); 4710 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, grph3);
4497 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4); 4711 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, grph4);
@@ -4501,12 +4715,14 @@ int si_irq_set(struct radeon_device *rdev)
4501 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6); 4715 WREG32(GRPH_INT_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, grph6);
4502 } 4716 }
4503 4717
4504 WREG32(DC_HPD1_INT_CONTROL, hpd1); 4718 if (!ASIC_IS_NODCE(rdev)) {
4505 WREG32(DC_HPD2_INT_CONTROL, hpd2); 4719 WREG32(DC_HPD1_INT_CONTROL, hpd1);
4506 WREG32(DC_HPD3_INT_CONTROL, hpd3); 4720 WREG32(DC_HPD2_INT_CONTROL, hpd2);
4507 WREG32(DC_HPD4_INT_CONTROL, hpd4); 4721 WREG32(DC_HPD3_INT_CONTROL, hpd3);
4508 WREG32(DC_HPD5_INT_CONTROL, hpd5); 4722 WREG32(DC_HPD4_INT_CONTROL, hpd4);
4509 WREG32(DC_HPD6_INT_CONTROL, hpd6); 4723 WREG32(DC_HPD5_INT_CONTROL, hpd5);
4724 WREG32(DC_HPD6_INT_CONTROL, hpd6);
4725 }
4510 4726
4511 return 0; 4727 return 0;
4512} 4728}
@@ -4515,6 +4731,9 @@ static inline void si_irq_ack(struct radeon_device *rdev)
4515{ 4731{
4516 u32 tmp; 4732 u32 tmp;
4517 4733
4734 if (ASIC_IS_NODCE(rdev))
4735 return;
4736
4518 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS); 4737 rdev->irq.stat_regs.evergreen.disp_int = RREG32(DISP_INTERRUPT_STATUS);
4519 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE); 4738 rdev->irq.stat_regs.evergreen.disp_int_cont = RREG32(DISP_INTERRUPT_STATUS_CONTINUE);
4520 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2); 4739 rdev->irq.stat_regs.evergreen.disp_int_cont2 = RREG32(DISP_INTERRUPT_STATUS_CONTINUE2);
@@ -5118,15 +5337,17 @@ static int si_startup(struct radeon_device *rdev)
5118 return r; 5337 return r;
5119 } 5338 }
5120 5339
5121 r = rv770_uvd_resume(rdev); 5340 if (rdev->has_uvd) {
5122 if (!r) { 5341 r = rv770_uvd_resume(rdev);
5123 r = radeon_fence_driver_start_ring(rdev, 5342 if (!r) {
5124 R600_RING_TYPE_UVD_INDEX); 5343 r = radeon_fence_driver_start_ring(rdev,
5344 R600_RING_TYPE_UVD_INDEX);
5345 if (r)
5346 dev_err(rdev->dev, "UVD fences init error (%d).\n", r);
5347 }
5125 if (r) 5348 if (r)
5126 dev_err(rdev->dev, "UVD fences init error (%d).\n", r); 5349 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
5127 } 5350 }
5128 if (r)
5129 rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0;
5130 5351
5131 /* Enable IRQ */ 5352 /* Enable IRQ */
5132 r = si_irq_init(rdev); 5353 r = si_irq_init(rdev);
@@ -5185,16 +5406,18 @@ static int si_startup(struct radeon_device *rdev)
5185 if (r) 5406 if (r)
5186 return r; 5407 return r;
5187 5408
5188 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 5409 if (rdev->has_uvd) {
5189 if (ring->ring_size) { 5410 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5190 r = radeon_ring_init(rdev, ring, ring->ring_size, 5411 if (ring->ring_size) {
5191 R600_WB_UVD_RPTR_OFFSET, 5412 r = radeon_ring_init(rdev, ring, ring->ring_size,
5192 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR, 5413 R600_WB_UVD_RPTR_OFFSET,
5193 0, 0xfffff, RADEON_CP_PACKET2); 5414 UVD_RBC_RB_RPTR, UVD_RBC_RB_WPTR,
5194 if (!r) 5415 0, 0xfffff, RADEON_CP_PACKET2);
5195 r = r600_uvd_init(rdev); 5416 if (!r)
5196 if (r) 5417 r = r600_uvd_init(rdev);
5197 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); 5418 if (r)
5419 DRM_ERROR("radeon: failed initializing UVD (%d).\n", r);
5420 }
5198 } 5421 }
5199 5422
5200 r = radeon_ib_pool_init(rdev); 5423 r = radeon_ib_pool_init(rdev);
@@ -5243,8 +5466,10 @@ int si_suspend(struct radeon_device *rdev)
5243 radeon_vm_manager_fini(rdev); 5466 radeon_vm_manager_fini(rdev);
5244 si_cp_enable(rdev, false); 5467 si_cp_enable(rdev, false);
5245 cayman_dma_stop(rdev); 5468 cayman_dma_stop(rdev);
5246 r600_uvd_rbc_stop(rdev); 5469 if (rdev->has_uvd) {
5247 radeon_uvd_suspend(rdev); 5470 r600_uvd_rbc_stop(rdev);
5471 radeon_uvd_suspend(rdev);
5472 }
5248 si_irq_suspend(rdev); 5473 si_irq_suspend(rdev);
5249 radeon_wb_disable(rdev); 5474 radeon_wb_disable(rdev);
5250 si_pcie_gart_disable(rdev); 5475 si_pcie_gart_disable(rdev);
@@ -5332,11 +5557,13 @@ int si_init(struct radeon_device *rdev)
5332 ring->ring_obj = NULL; 5557 ring->ring_obj = NULL;
5333 r600_ring_init(rdev, ring, 64 * 1024); 5558 r600_ring_init(rdev, ring, 64 * 1024);
5334 5559
5335 r = radeon_uvd_init(rdev); 5560 if (rdev->has_uvd) {
5336 if (!r) { 5561 r = radeon_uvd_init(rdev);
5337 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; 5562 if (!r) {
5338 ring->ring_obj = NULL; 5563 ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX];
5339 r600_ring_init(rdev, ring, 4096); 5564 ring->ring_obj = NULL;
5565 r600_ring_init(rdev, ring, 4096);
5566 }
5340 } 5567 }
5341 5568
5342 rdev->ih.ring_obj = NULL; 5569 rdev->ih.ring_obj = NULL;
@@ -5384,7 +5611,8 @@ void si_fini(struct radeon_device *rdev)
5384 radeon_vm_manager_fini(rdev); 5611 radeon_vm_manager_fini(rdev);
5385 radeon_ib_pool_fini(rdev); 5612 radeon_ib_pool_fini(rdev);
5386 radeon_irq_kms_fini(rdev); 5613 radeon_irq_kms_fini(rdev);
5387 radeon_uvd_fini(rdev); 5614 if (rdev->has_uvd)
5615 radeon_uvd_fini(rdev);
5388 si_pcie_gart_fini(rdev); 5616 si_pcie_gart_fini(rdev);
5389 r600_vram_scratch_fini(rdev); 5617 r600_vram_scratch_fini(rdev);
5390 radeon_gem_fini(rdev); 5618 radeon_gem_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 222877ba6cf5..8f2d7d4f9b28 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -28,6 +28,7 @@
28 28
29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003 29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002 30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31#define HAINAN_GB_ADDR_CONFIG_GOLDEN 0x02010001
31 32
32/* discrete uvd clocks */ 33/* discrete uvd clocks */
33#define CG_UPLL_FUNC_CNTL 0x634 34#define CG_UPLL_FUNC_CNTL 0x634
diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
index 1e2060324f02..8c04943f82e3 100644
--- a/drivers/gpu/host1x/drm/dc.c
+++ b/drivers/gpu/host1x/drm/dc.c
@@ -1128,11 +1128,6 @@ static int tegra_dc_probe(struct platform_device *pdev)
1128 return err; 1128 return err;
1129 1129
1130 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1130 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1131 if (!regs) {
1132 dev_err(&pdev->dev, "failed to get registers\n");
1133 return -ENXIO;
1134 }
1135
1136 dc->regs = devm_ioremap_resource(&pdev->dev, regs); 1131 dc->regs = devm_ioremap_resource(&pdev->dev, regs);
1137 if (IS_ERR(dc->regs)) 1132 if (IS_ERR(dc->regs))
1138 return PTR_ERR(dc->regs); 1133 return PTR_ERR(dc->regs);
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index bad8128b283a..21ef68934a20 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -329,7 +329,7 @@ static u32 get_vp_index(uuid_le *type_guid)
329 return 0; 329 return 0;
330 } 330 }
331 cur_cpu = (++next_vp % max_cpus); 331 cur_cpu = (++next_vp % max_cpus);
332 return cur_cpu; 332 return hv_context.vp_index[cur_cpu];
333} 333}
334 334
335/* 335/*
diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c
index df0b69987914..2ebd6ce46108 100644
--- a/drivers/hwmon/abituguru.c
+++ b/drivers/hwmon/abituguru.c
@@ -1414,14 +1414,18 @@ static int abituguru_probe(struct platform_device *pdev)
1414 pr_info("found Abit uGuru\n"); 1414 pr_info("found Abit uGuru\n");
1415 1415
1416 /* Register sysfs hooks */ 1416 /* Register sysfs hooks */
1417 for (i = 0; i < sysfs_attr_i; i++) 1417 for (i = 0; i < sysfs_attr_i; i++) {
1418 if (device_create_file(&pdev->dev, 1418 res = device_create_file(&pdev->dev,
1419 &data->sysfs_attr[i].dev_attr)) 1419 &data->sysfs_attr[i].dev_attr);
1420 if (res)
1420 goto abituguru_probe_error; 1421 goto abituguru_probe_error;
1421 for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) 1422 }
1422 if (device_create_file(&pdev->dev, 1423 for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) {
1423 &abituguru_sysfs_attr[i].dev_attr)) 1424 res = device_create_file(&pdev->dev,
1425 &abituguru_sysfs_attr[i].dev_attr);
1426 if (res)
1424 goto abituguru_probe_error; 1427 goto abituguru_probe_error;
1428 }
1425 1429
1426 data->hwmon_dev = hwmon_device_register(&pdev->dev); 1430 data->hwmon_dev = hwmon_device_register(&pdev->dev);
1427 if (!IS_ERR(data->hwmon_dev)) 1431 if (!IS_ERR(data->hwmon_dev))
diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
index aafa4531b961..52b77afebde1 100644
--- a/drivers/hwmon/iio_hwmon.c
+++ b/drivers/hwmon/iio_hwmon.c
@@ -84,8 +84,10 @@ static int iio_hwmon_probe(struct platform_device *pdev)
84 return PTR_ERR(channels); 84 return PTR_ERR(channels);
85 85
86 st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); 86 st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL);
87 if (st == NULL) 87 if (st == NULL) {
88 return -ENOMEM; 88 ret = -ENOMEM;
89 goto error_release_channels;
90 }
89 91
90 st->channels = channels; 92 st->channels = channels;
91 93
@@ -159,7 +161,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
159error_remove_group: 161error_remove_group:
160 sysfs_remove_group(&dev->kobj, &st->attr_group); 162 sysfs_remove_group(&dev->kobj, &st->attr_group);
161error_release_channels: 163error_release_channels:
162 iio_channel_release_all(st->channels); 164 iio_channel_release_all(channels);
163 return ret; 165 return ret;
164} 166}
165 167
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index f43f5e571db9..04638aee9039 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -3705,8 +3705,10 @@ static int nct6775_probe(struct platform_device *pdev)
3705 data->have_temp |= 1 << i; 3705 data->have_temp |= 1 << i;
3706 data->have_temp_fixed |= 1 << i; 3706 data->have_temp_fixed |= 1 << i;
3707 data->reg_temp[0][i] = reg_temp_alternate[i]; 3707 data->reg_temp[0][i] = reg_temp_alternate[i];
3708 data->reg_temp[1][i] = reg_temp_over[i]; 3708 if (i < num_reg_temp) {
3709 data->reg_temp[2][i] = reg_temp_hyst[i]; 3709 data->reg_temp[1][i] = reg_temp_over[i];
3710 data->reg_temp[2][i] = reg_temp_hyst[i];
3711 }
3710 data->temp_src[i] = i + 1; 3712 data->temp_src[i] = i + 1;
3711 continue; 3713 continue;
3712 } 3714 }
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index a478454f690f..dfe6d9527efb 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -240,7 +240,7 @@ static struct tmp401_data *tmp401_update_device(struct device *dev)
240 mutex_lock(&data->update_lock); 240 mutex_lock(&data->update_lock);
241 241
242 next_update = data->last_updated + 242 next_update = data->last_updated +
243 msecs_to_jiffies(data->update_interval) + 1; 243 msecs_to_jiffies(data->update_interval);
244 if (time_after(jiffies, next_update) || !data->valid) { 244 if (time_after(jiffies, next_update) || !data->valid) {
245 if (data->kind != tmp432) { 245 if (data->kind != tmp432) {
246 /* 246 /*
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 21fbb340ad66..c41ca6354fc5 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -383,7 +383,8 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
383 /* Enable the adapter */ 383 /* Enable the adapter */
384 __i2c_dw_enable(dev, true); 384 __i2c_dw_enable(dev, true);
385 385
386 /* Enable interrupts */ 386 /* Clear and enable interrupts */
387 i2c_dw_clear_int(dev);
387 dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK); 388 dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK);
388} 389}
389 390
@@ -448,8 +449,14 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
448 cmd |= BIT(9); 449 cmd |= BIT(9);
449 450
450 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { 451 if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
452
453 /* avoid rx buffer overrun */
454 if (rx_limit - dev->rx_outstanding <= 0)
455 break;
456
451 dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); 457 dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD);
452 rx_limit--; 458 rx_limit--;
459 dev->rx_outstanding++;
453 } else 460 } else
454 dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD); 461 dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD);
455 tx_limit--; buf_len--; 462 tx_limit--; buf_len--;
@@ -502,8 +509,10 @@ i2c_dw_read(struct dw_i2c_dev *dev)
502 509
503 rx_valid = dw_readl(dev, DW_IC_RXFLR); 510 rx_valid = dw_readl(dev, DW_IC_RXFLR);
504 511
505 for (; len > 0 && rx_valid > 0; len--, rx_valid--) 512 for (; len > 0 && rx_valid > 0; len--, rx_valid--) {
506 *buf++ = dw_readl(dev, DW_IC_DATA_CMD); 513 *buf++ = dw_readl(dev, DW_IC_DATA_CMD);
514 dev->rx_outstanding--;
515 }
507 516
508 if (len > 0) { 517 if (len > 0) {
509 dev->status |= STATUS_READ_IN_PROGRESS; 518 dev->status |= STATUS_READ_IN_PROGRESS;
@@ -561,6 +570,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
561 dev->msg_err = 0; 570 dev->msg_err = 0;
562 dev->status = STATUS_IDLE; 571 dev->status = STATUS_IDLE;
563 dev->abort_source = 0; 572 dev->abort_source = 0;
573 dev->rx_outstanding = 0;
564 574
565 ret = i2c_dw_wait_bus_not_busy(dev); 575 ret = i2c_dw_wait_bus_not_busy(dev);
566 if (ret < 0) 576 if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 9c1840ee09c7..e761ad18dd61 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -60,6 +60,7 @@
60 * @adapter: i2c subsystem adapter node 60 * @adapter: i2c subsystem adapter node
61 * @tx_fifo_depth: depth of the hardware tx fifo 61 * @tx_fifo_depth: depth of the hardware tx fifo
62 * @rx_fifo_depth: depth of the hardware rx fifo 62 * @rx_fifo_depth: depth of the hardware rx fifo
63 * @rx_outstanding: current master-rx elements in tx fifo
63 */ 64 */
64struct dw_i2c_dev { 65struct dw_i2c_dev {
65 struct device *dev; 66 struct device *dev;
@@ -88,6 +89,7 @@ struct dw_i2c_dev {
88 u32 master_cfg; 89 u32 master_cfg;
89 unsigned int tx_fifo_depth; 90 unsigned int tx_fifo_depth;
90 unsigned int rx_fifo_depth; 91 unsigned int rx_fifo_depth;
92 int rx_outstanding;
91}; 93};
92 94
93#define ACCESS_SWAP 0x00000001 95#define ACCESS_SWAP 0x00000001
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 8ec91335d95a..35b70a1edf57 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -69,6 +69,7 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
69static const struct acpi_device_id dw_i2c_acpi_match[] = { 69static const struct acpi_device_id dw_i2c_acpi_match[] = {
70 { "INT33C2", 0 }, 70 { "INT33C2", 0 },
71 { "INT33C3", 0 }, 71 { "INT33C3", 0 },
72 { "80860F41", 0 },
72 { } 73 { }
73}; 74};
74MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match); 75MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index e1cf2e0e1f23..3a6903f63913 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -231,7 +231,11 @@ static const char *i801_feature_names[] = {
231 231
232static unsigned int disable_features; 232static unsigned int disable_features;
233module_param(disable_features, uint, S_IRUGO | S_IWUSR); 233module_param(disable_features, uint, S_IRUGO | S_IWUSR);
234MODULE_PARM_DESC(disable_features, "Disable selected driver features"); 234MODULE_PARM_DESC(disable_features, "Disable selected driver features:\n"
235 "\t\t 0x01 disable SMBus PEC\n"
236 "\t\t 0x02 disable the block buffer\n"
237 "\t\t 0x08 disable the I2C block read functionality\n"
238 "\t\t 0x10 don't use interrupts ");
235 239
236/* Make sure the SMBus host is ready to start transmitting. 240/* Make sure the SMBus host is ready to start transmitting.
237 Return 0 if it is, -EBUSY if it is not. */ 241 Return 0 if it is, -EBUSY if it is not. */
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index 3bbd65d35a5e..1a3abd6a0bfc 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -252,7 +252,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
252 writel(drv_data->cntl_bits, 252 writel(drv_data->cntl_bits,
253 drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); 253 drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
254 drv_data->block = 0; 254 drv_data->block = 0;
255 wake_up_interruptible(&drv_data->waitq); 255 wake_up(&drv_data->waitq);
256 break; 256 break;
257 257
258 case MV64XXX_I2C_ACTION_CONTINUE: 258 case MV64XXX_I2C_ACTION_CONTINUE:
@@ -300,7 +300,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
300 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, 300 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
301 drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); 301 drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
302 drv_data->block = 0; 302 drv_data->block = 0;
303 wake_up_interruptible(&drv_data->waitq); 303 wake_up(&drv_data->waitq);
304 break; 304 break;
305 305
306 case MV64XXX_I2C_ACTION_INVALID: 306 case MV64XXX_I2C_ACTION_INVALID:
@@ -315,7 +315,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
315 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP, 315 writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_STOP,
316 drv_data->reg_base + MV64XXX_I2C_REG_CONTROL); 316 drv_data->reg_base + MV64XXX_I2C_REG_CONTROL);
317 drv_data->block = 0; 317 drv_data->block = 0;
318 wake_up_interruptible(&drv_data->waitq); 318 wake_up(&drv_data->waitq);
319 break; 319 break;
320 } 320 }
321} 321}
@@ -381,7 +381,7 @@ mv64xxx_i2c_wait_for_completion(struct mv64xxx_i2c_data *drv_data)
381 unsigned long flags; 381 unsigned long flags;
382 char abort = 0; 382 char abort = 0;
383 383
384 time_left = wait_event_interruptible_timeout(drv_data->waitq, 384 time_left = wait_event_timeout(drv_data->waitq,
385 !drv_data->block, drv_data->adapter.timeout); 385 !drv_data->block, drv_data->adapter.timeout);
386 386
387 spin_lock_irqsave(&drv_data->lock, flags); 387 spin_lock_irqsave(&drv_data->lock, flags);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 6e8ee92ab553..cab1c91b75a3 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -1082,11 +1082,6 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
1082 /* map the registers */ 1082 /* map the registers */
1083 1083
1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1085 if (res == NULL) {
1086 dev_err(&pdev->dev, "cannot find IO resource\n");
1087 return -ENOENT;
1088 }
1089
1090 i2c->regs = devm_ioremap_resource(&pdev->dev, res); 1085 i2c->regs = devm_ioremap_resource(&pdev->dev, res);
1091 1086
1092 if (IS_ERR(i2c->regs)) 1087 if (IS_ERR(i2c->regs))
diff --git a/drivers/i2c/busses/i2c-sirf.c b/drivers/i2c/busses/i2c-sirf.c
index 5a7ad240bd26..a63c7d506836 100644
--- a/drivers/i2c/busses/i2c-sirf.c
+++ b/drivers/i2c/busses/i2c-sirf.c
@@ -303,12 +303,6 @@ static int i2c_sirfsoc_probe(struct platform_device *pdev)
303 adap->class = I2C_CLASS_HWMON; 303 adap->class = I2C_CLASS_HWMON;
304 304
305 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 305 mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
306 if (mem_res == NULL) {
307 dev_err(&pdev->dev, "Unable to get MEM resource\n");
308 err = -EINVAL;
309 goto out;
310 }
311
312 siic->base = devm_ioremap_resource(&pdev->dev, mem_res); 306 siic->base = devm_ioremap_resource(&pdev->dev, mem_res);
313 if (IS_ERR(siic->base)) { 307 if (IS_ERR(siic->base)) {
314 err = PTR_ERR(siic->base); 308 err = PTR_ERR(siic->base);
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index b60ff90adc39..9aa1b60f7fdd 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -714,11 +714,6 @@ static int tegra_i2c_probe(struct platform_device *pdev)
714 int ret = 0; 714 int ret = 0;
715 715
716 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 716 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
717 if (!res) {
718 dev_err(&pdev->dev, "no mem resource\n");
719 return -EINVAL;
720 }
721
722 base = devm_ioremap_resource(&pdev->dev, res); 717 base = devm_ioremap_resource(&pdev->dev, res);
723 if (IS_ERR(base)) 718 if (IS_ERR(base))
724 return PTR_ERR(base); 719 return PTR_ERR(base);
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 6b63cc7eb71e..48e31ed69dbf 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -892,7 +892,8 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
892} 892}
893 893
894static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); 894static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
895static DEVICE_ATTR(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device); 895static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
896 i2c_sysfs_delete_device);
896 897
897static struct attribute *i2c_adapter_attrs[] = { 898static struct attribute *i2c_adapter_attrs[] = {
898 &dev_attr_name.attr, 899 &dev_attr_name.attr,
diff --git a/drivers/iio/adc/exynos_adc.c b/drivers/iio/adc/exynos_adc.c
index 9f3a8ef1fb3e..b3d03d335948 100644
--- a/drivers/iio/adc/exynos_adc.c
+++ b/drivers/iio/adc/exynos_adc.c
@@ -390,8 +390,8 @@ static int exynos_adc_remove(struct platform_device *pdev)
390#ifdef CONFIG_PM_SLEEP 390#ifdef CONFIG_PM_SLEEP
391static int exynos_adc_suspend(struct device *dev) 391static int exynos_adc_suspend(struct device *dev)
392{ 392{
393 struct platform_device *pdev = to_platform_device(dev); 393 struct iio_dev *indio_dev = dev_get_drvdata(dev);
394 struct exynos_adc *info = platform_get_drvdata(pdev); 394 struct exynos_adc *info = iio_priv(indio_dev);
395 u32 con; 395 u32 con;
396 396
397 if (info->version == ADC_V2) { 397 if (info->version == ADC_V2) {
@@ -413,8 +413,8 @@ static int exynos_adc_suspend(struct device *dev)
413 413
414static int exynos_adc_resume(struct device *dev) 414static int exynos_adc_resume(struct device *dev)
415{ 415{
416 struct platform_device *pdev = to_platform_device(dev); 416 struct iio_dev *indio_dev = dev_get_drvdata(dev);
417 struct exynos_adc *info = platform_get_drvdata(pdev); 417 struct exynos_adc *info = iio_priv(indio_dev);
418 int ret; 418 int ret;
419 419
420 ret = regulator_enable(info->vdd); 420 ret = regulator_enable(info->vdd);
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index bd33473f8e38..ed9bc8ae9330 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -312,6 +312,8 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev,
312 goto read_error; 312 goto read_error;
313 313
314 *val = *val >> ch->scan_type.shift; 314 *val = *val >> ch->scan_type.shift;
315
316 err = st_sensors_set_enable(indio_dev, false);
315 } 317 }
316 mutex_unlock(&indio_dev->mlock); 318 mutex_unlock(&indio_dev->mlock);
317 319
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index f4a6f0838327..b61160bd935e 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -5,7 +5,7 @@ menu "Digital to analog converters"
5 5
6config AD5064 6config AD5064
7 tristate "Analog Devices AD5064 and similar multi-channel DAC driver" 7 tristate "Analog Devices AD5064 and similar multi-channel DAC driver"
8 depends on (SPI_MASTER || I2C) 8 depends on (SPI_MASTER && I2C!=m) || I2C
9 help 9 help
10 Say yes here to build support for Analog Devices AD5024, AD5025, AD5044, 10 Say yes here to build support for Analog Devices AD5024, AD5025, AD5044,
11 AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668, 11 AD5045, AD5064, AD5064-1, AD5065, AD5628, AD5629R, AD5648, AD5666, AD5668,
@@ -27,7 +27,7 @@ config AD5360
27 27
28config AD5380 28config AD5380
29 tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver" 29 tristate "Analog Devices AD5380/81/82/83/84/90/91/92 DAC driver"
30 depends on (SPI_MASTER || I2C) 30 depends on (SPI_MASTER && I2C!=m) || I2C
31 select REGMAP_I2C if I2C 31 select REGMAP_I2C if I2C
32 select REGMAP_SPI if SPI_MASTER 32 select REGMAP_SPI if SPI_MASTER
33 help 33 help
@@ -57,7 +57,7 @@ config AD5624R_SPI
57 57
58config AD5446 58config AD5446
59 tristate "Analog Devices AD5446 and similar single channel DACs driver" 59 tristate "Analog Devices AD5446 and similar single channel DACs driver"
60 depends on (SPI_MASTER || I2C) 60 depends on (SPI_MASTER && I2C!=m) || I2C
61 help 61 help
62 Say yes here to build support for Analog Devices AD5300, AD5301, AD5310, 62 Say yes here to build support for Analog Devices AD5300, AD5301, AD5310,
63 AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453, 63 AD5311, AD5320, AD5321, AD5444, AD5446, AD5450, AD5451, AD5452, AD5453,
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 0bfd8cf25200..5c68e4486845 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -342,10 +342,10 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
342 wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) | 342 wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) |
343 ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12); 343 ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12);
344 344
345 switch (wacom->id[idx] & 0xfffff) { 345 switch (wacom->id[idx]) {
346 case 0x812: /* Inking pen */ 346 case 0x812: /* Inking pen */
347 case 0x801: /* Intuos3 Inking pen */ 347 case 0x801: /* Intuos3 Inking pen */
348 case 0x20802: /* Intuos4 Inking Pen */ 348 case 0x120802: /* Intuos4/5 Inking Pen */
349 case 0x012: 349 case 0x012:
350 wacom->tool[idx] = BTN_TOOL_PENCIL; 350 wacom->tool[idx] = BTN_TOOL_PENCIL;
351 break; 351 break;
@@ -356,11 +356,13 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
356 case 0x823: /* Intuos3 Grip Pen */ 356 case 0x823: /* Intuos3 Grip Pen */
357 case 0x813: /* Intuos3 Classic Pen */ 357 case 0x813: /* Intuos3 Classic Pen */
358 case 0x885: /* Intuos3 Marker Pen */ 358 case 0x885: /* Intuos3 Marker Pen */
359 case 0x802: /* Intuos4 General Pen */ 359 case 0x802: /* Intuos4/5 13HD/24HD General Pen */
360 case 0x804: /* Intuos4 Marker Pen */ 360 case 0x804: /* Intuos4/5 13HD/24HD Marker Pen */
361 case 0x40802: /* Intuos4 Classic Pen */
362 case 0x18802: /* DTH2242 Grip Pen */
363 case 0x022: 361 case 0x022:
362 case 0x100804: /* Intuos4/5 13HD/24HD Art Pen */
363 case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
364 case 0x160802: /* Cintiq 13HD Pro Pen */
365 case 0x180802: /* DTH2242 Pen */
364 wacom->tool[idx] = BTN_TOOL_PEN; 366 wacom->tool[idx] = BTN_TOOL_PEN;
365 break; 367 break;
366 368
@@ -391,10 +393,14 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
391 case 0x82b: /* Intuos3 Grip Pen Eraser */ 393 case 0x82b: /* Intuos3 Grip Pen Eraser */
392 case 0x81b: /* Intuos3 Classic Pen Eraser */ 394 case 0x81b: /* Intuos3 Classic Pen Eraser */
393 case 0x91b: /* Intuos3 Airbrush Eraser */ 395 case 0x91b: /* Intuos3 Airbrush Eraser */
394 case 0x80c: /* Intuos4 Marker Pen Eraser */ 396 case 0x80c: /* Intuos4/5 13HD/24HD Marker Pen Eraser */
395 case 0x80a: /* Intuos4 General Pen Eraser */ 397 case 0x80a: /* Intuos4/5 13HD/24HD General Pen Eraser */
396 case 0x4080a: /* Intuos4 Classic Pen Eraser */ 398 case 0x90a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
397 case 0x90a: /* Intuos4 Airbrush Eraser */ 399 case 0x14080a: /* Intuos4/5 13HD/24HD Classic Pen Eraser */
400 case 0x10090a: /* Intuos4/5 13HD/24HD Airbrush Eraser */
401 case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
402 case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
403 case 0x18080a: /* DTH2242 Eraser */
398 wacom->tool[idx] = BTN_TOOL_RUBBER; 404 wacom->tool[idx] = BTN_TOOL_RUBBER;
399 break; 405 break;
400 406
@@ -402,7 +408,8 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
402 case 0x912: 408 case 0x912:
403 case 0x112: 409 case 0x112:
404 case 0x913: /* Intuos3 Airbrush */ 410 case 0x913: /* Intuos3 Airbrush */
405 case 0x902: /* Intuos4 Airbrush */ 411 case 0x902: /* Intuos4/5 13HD/24HD Airbrush */
412 case 0x100902: /* Intuos4/5 13HD/24HD Airbrush */
406 wacom->tool[idx] = BTN_TOOL_AIRBRUSH; 413 wacom->tool[idx] = BTN_TOOL_AIRBRUSH;
407 break; 414 break;
408 415
@@ -533,10 +540,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
533 input_report_key(input, BTN_8, (data[3] & 0x80)); 540 input_report_key(input, BTN_8, (data[3] & 0x80));
534 } 541 }
535 if (data[1] | (data[2] & 0x01) | data[3]) { 542 if (data[1] | (data[2] & 0x01) | data[3]) {
536 input_report_key(input, wacom->tool[1], 1);
537 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); 543 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
538 } else { 544 } else {
539 input_report_key(input, wacom->tool[1], 0);
540 input_report_abs(input, ABS_MISC, 0); 545 input_report_abs(input, ABS_MISC, 0);
541 } 546 }
542 } else if (features->type == DTK) { 547 } else if (features->type == DTK) {
@@ -546,6 +551,26 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
546 input_report_key(input, BTN_3, (data[6] & 0x08)); 551 input_report_key(input, BTN_3, (data[6] & 0x08));
547 input_report_key(input, BTN_4, (data[6] & 0x10)); 552 input_report_key(input, BTN_4, (data[6] & 0x10));
548 input_report_key(input, BTN_5, (data[6] & 0x20)); 553 input_report_key(input, BTN_5, (data[6] & 0x20));
554 if (data[6] & 0x3f) {
555 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
556 } else {
557 input_report_abs(input, ABS_MISC, 0);
558 }
559 } else if (features->type == WACOM_13HD) {
560 input_report_key(input, BTN_0, (data[3] & 0x01));
561 input_report_key(input, BTN_1, (data[4] & 0x01));
562 input_report_key(input, BTN_2, (data[4] & 0x02));
563 input_report_key(input, BTN_3, (data[4] & 0x04));
564 input_report_key(input, BTN_4, (data[4] & 0x08));
565 input_report_key(input, BTN_5, (data[4] & 0x10));
566 input_report_key(input, BTN_6, (data[4] & 0x20));
567 input_report_key(input, BTN_7, (data[4] & 0x40));
568 input_report_key(input, BTN_8, (data[4] & 0x80));
569 if ((data[3] & 0x01) | data[4]) {
570 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
571 } else {
572 input_report_abs(input, ABS_MISC, 0);
573 }
549 } else if (features->type == WACOM_24HD) { 574 } else if (features->type == WACOM_24HD) {
550 input_report_key(input, BTN_0, (data[6] & 0x01)); 575 input_report_key(input, BTN_0, (data[6] & 0x01));
551 input_report_key(input, BTN_1, (data[6] & 0x02)); 576 input_report_key(input, BTN_1, (data[6] & 0x02));
@@ -590,10 +615,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
590 } 615 }
591 616
592 if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) { 617 if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) {
593 input_report_key(input, wacom->tool[1], 1);
594 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); 618 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
595 } else { 619 } else {
596 input_report_key(input, wacom->tool[1], 0);
597 input_report_abs(input, ABS_MISC, 0); 620 input_report_abs(input, ABS_MISC, 0);
598 } 621 }
599 } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) { 622 } else if (features->type >= INTUOS5S && features->type <= INTUOS5L) {
@@ -618,10 +641,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
618 } 641 }
619 642
620 if (data[2] | (data[3] & 0x01) | data[4] | data[5]) { 643 if (data[2] | (data[3] & 0x01) | data[4] | data[5]) {
621 input_report_key(input, wacom->tool[1], 1);
622 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); 644 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
623 } else { 645 } else {
624 input_report_key(input, wacom->tool[1], 0);
625 input_report_abs(input, ABS_MISC, 0); 646 input_report_abs(input, ABS_MISC, 0);
626 } 647 }
627 } else { 648 } else {
@@ -668,10 +689,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
668 if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) | 689 if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) |
669 data[2] | (data[3] & 0x1f) | data[4] | data[8] | 690 data[2] | (data[3] & 0x1f) | data[4] | data[8] |
670 (data[7] & 0x01)) { 691 (data[7] & 0x01)) {
671 input_report_key(input, wacom->tool[1], 1);
672 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); 692 input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
673 } else { 693 } else {
674 input_report_key(input, wacom->tool[1], 0);
675 input_report_abs(input, ABS_MISC, 0); 694 input_report_abs(input, ABS_MISC, 0);
676 } 695 }
677 } 696 }
@@ -1301,6 +1320,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1301 case INTUOS4L: 1320 case INTUOS4L:
1302 case CINTIQ: 1321 case CINTIQ:
1303 case WACOM_BEE: 1322 case WACOM_BEE:
1323 case WACOM_13HD:
1304 case WACOM_21UX2: 1324 case WACOM_21UX2:
1305 case WACOM_22HD: 1325 case WACOM_22HD:
1306 case WACOM_24HD: 1326 case WACOM_24HD:
@@ -1530,15 +1550,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1530 __set_bit(KEY_PROG1, input_dev->keybit); 1550 __set_bit(KEY_PROG1, input_dev->keybit);
1531 __set_bit(KEY_PROG2, input_dev->keybit); 1551 __set_bit(KEY_PROG2, input_dev->keybit);
1532 __set_bit(KEY_PROG3, input_dev->keybit); 1552 __set_bit(KEY_PROG3, input_dev->keybit);
1553
1554 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1555 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
1533 /* fall through */ 1556 /* fall through */
1534 1557
1535 case DTK: 1558 case DTK:
1536 for (i = 0; i < 6; i++) 1559 for (i = 0; i < 6; i++)
1537 __set_bit(BTN_0 + i, input_dev->keybit); 1560 __set_bit(BTN_0 + i, input_dev->keybit);
1538 1561
1539 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1540 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
1541
1542 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 1562 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1543 1563
1544 wacom_setup_cintiq(wacom_wac); 1564 wacom_setup_cintiq(wacom_wac);
@@ -1579,6 +1599,15 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
1579 wacom_setup_cintiq(wacom_wac); 1599 wacom_setup_cintiq(wacom_wac);
1580 break; 1600 break;
1581 1601
1602 case WACOM_13HD:
1603 for (i = 0; i < 9; i++)
1604 __set_bit(BTN_0 + i, input_dev->keybit);
1605
1606 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
1607 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
1608 wacom_setup_cintiq(wacom_wac);
1609 break;
1610
1582 case INTUOS3: 1611 case INTUOS3:
1583 case INTUOS3L: 1612 case INTUOS3L:
1584 __set_bit(BTN_4, input_dev->keybit); 1613 __set_bit(BTN_4, input_dev->keybit);
@@ -1950,6 +1979,9 @@ static const struct wacom_features wacom_features_0xC5 =
1950static const struct wacom_features wacom_features_0xC6 = 1979static const struct wacom_features wacom_features_0xC6 =
1951 { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 1980 { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023,
1952 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 1981 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1982static const struct wacom_features wacom_features_0x304 =
1983 { "Wacom Cintiq 13HD", WACOM_PKGLEN_INTUOS, 59552, 33848, 1023,
1984 63, WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
1953static const struct wacom_features wacom_features_0xC7 = 1985static const struct wacom_features wacom_features_0xC7 =
1954 { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 1986 { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511,
1955 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1987 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -1959,6 +1991,9 @@ static const struct wacom_features wacom_features_0xCE =
1959static const struct wacom_features wacom_features_0xF0 = 1991static const struct wacom_features wacom_features_0xF0 =
1960 { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, 1992 { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511,
1961 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 1993 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
1994static const struct wacom_features wacom_features_0x57 =
1995 { "Wacom DTK2241", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
1996 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES};
1962static const struct wacom_features wacom_features_0x59 = /* Pen */ 1997static const struct wacom_features wacom_features_0x59 = /* Pen */
1963 { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 1998 { "Wacom DTH2242", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
1964 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 1999 63, DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
@@ -1972,6 +2007,12 @@ static const struct wacom_features wacom_features_0xCC =
1972static const struct wacom_features wacom_features_0xFA = 2007static const struct wacom_features wacom_features_0xFA =
1973 { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047, 2008 { "Wacom Cintiq 22HD", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
1974 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 2009 63, WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
2010static const struct wacom_features wacom_features_0x5B =
2011 { "Wacom Cintiq 22HDT", WACOM_PKGLEN_INTUOS, 95840, 54260, 2047,
2012 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
2013static const struct wacom_features wacom_features_0x5E =
2014 { "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
2015 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5b, .touch_max = 10 };
1975static const struct wacom_features wacom_features_0x90 = 2016static const struct wacom_features wacom_features_0x90 =
1976 { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 2017 { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255,
1977 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2018 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2143,8 +2184,11 @@ const struct usb_device_id wacom_ids[] = {
2143 { USB_DEVICE_WACOM(0x43) }, 2184 { USB_DEVICE_WACOM(0x43) },
2144 { USB_DEVICE_WACOM(0x44) }, 2185 { USB_DEVICE_WACOM(0x44) },
2145 { USB_DEVICE_WACOM(0x45) }, 2186 { USB_DEVICE_WACOM(0x45) },
2187 { USB_DEVICE_WACOM(0x57) },
2146 { USB_DEVICE_WACOM(0x59) }, 2188 { USB_DEVICE_WACOM(0x59) },
2147 { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) }, 2189 { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
2190 { USB_DEVICE_WACOM(0x5B) },
2191 { USB_DEVICE_DETAILED(0x5E, USB_CLASS_HID, 0, 0) },
2148 { USB_DEVICE_WACOM(0xB0) }, 2192 { USB_DEVICE_WACOM(0xB0) },
2149 { USB_DEVICE_WACOM(0xB1) }, 2193 { USB_DEVICE_WACOM(0xB1) },
2150 { USB_DEVICE_WACOM(0xB2) }, 2194 { USB_DEVICE_WACOM(0xB2) },
@@ -2205,6 +2249,7 @@ const struct usb_device_id wacom_ids[] = {
2205 { USB_DEVICE_WACOM(0x100) }, 2249 { USB_DEVICE_WACOM(0x100) },
2206 { USB_DEVICE_WACOM(0x101) }, 2250 { USB_DEVICE_WACOM(0x101) },
2207 { USB_DEVICE_WACOM(0x10D) }, 2251 { USB_DEVICE_WACOM(0x10D) },
2252 { USB_DEVICE_WACOM(0x304) },
2208 { USB_DEVICE_WACOM(0x4001) }, 2253 { USB_DEVICE_WACOM(0x4001) },
2209 { USB_DEVICE_WACOM(0x47) }, 2254 { USB_DEVICE_WACOM(0x47) },
2210 { USB_DEVICE_WACOM(0xF4) }, 2255 { USB_DEVICE_WACOM(0xF4) },
diff --git a/drivers/input/tablet/wacom_wac.h b/drivers/input/tablet/wacom_wac.h
index 5f9a7721e16c..dfc9e08e7f70 100644
--- a/drivers/input/tablet/wacom_wac.h
+++ b/drivers/input/tablet/wacom_wac.h
@@ -82,6 +82,7 @@ enum {
82 WACOM_24HD, 82 WACOM_24HD,
83 CINTIQ, 83 CINTIQ,
84 WACOM_BEE, 84 WACOM_BEE,
85 WACOM_13HD,
85 WACOM_MO, 86 WACOM_MO,
86 WIRELESS, 87 WIRELESS,
87 BAMBOO_PT, 88 BAMBOO_PT,
diff --git a/drivers/input/touchscreen/egalax_ts.c b/drivers/input/touchscreen/egalax_ts.c
index 17c9097f3b5d..39f3df8670c3 100644
--- a/drivers/input/touchscreen/egalax_ts.c
+++ b/drivers/input/touchscreen/egalax_ts.c
@@ -216,7 +216,7 @@ static int egalax_ts_probe(struct i2c_client *client,
216 input_set_abs_params(input_dev, 216 input_set_abs_params(input_dev,
217 ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0); 217 ABS_MT_POSITION_X, 0, EGALAX_MAX_X, 0, 0);
218 input_set_abs_params(input_dev, 218 input_set_abs_params(input_dev,
219 ABS_MT_POSITION_X, 0, EGALAX_MAX_Y, 0, 0); 219 ABS_MT_POSITION_Y, 0, EGALAX_MAX_Y, 0, 0);
220 input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0); 220 input_mt_init_slots(input_dev, MAX_SUPPORT_POINTS, 0);
221 221
222 input_set_drvdata(input_dev, ts); 222 input_set_drvdata(input_dev, ts);
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index 9b1b274c7d25..c123709acf82 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -93,7 +93,7 @@ capi_ctr_put(struct capi_ctr *ctr)
93 93
94static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr) 94static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
95{ 95{
96 if (contr - 1 >= CAPI_MAXCONTR) 96 if (contr < 1 || contr - 1 >= CAPI_MAXCONTR)
97 return NULL; 97 return NULL;
98 98
99 return capi_controller[contr - 1]; 99 return capi_controller[contr - 1];
@@ -103,7 +103,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
103{ 103{
104 lockdep_assert_held(&capi_controller_lock); 104 lockdep_assert_held(&capi_controller_lock);
105 105
106 if (applid - 1 >= CAPI_MAXAPPL) 106 if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
107 return NULL; 107 return NULL;
108 108
109 return capi_applications[applid - 1]; 109 return capi_applications[applid - 1];
@@ -111,7 +111,7 @@ static inline struct capi20_appl *__get_capi_appl_by_nr(u16 applid)
111 111
112static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid) 112static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
113{ 113{
114 if (applid - 1 >= CAPI_MAXAPPL) 114 if (applid < 1 || applid - 1 >= CAPI_MAXAPPL)
115 return NULL; 115 return NULL;
116 116
117 return rcu_dereference(capi_applications[applid - 1]); 117 return rcu_dereference(capi_applications[applid - 1]);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index a0d931bcb37c..b02b679abf31 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -107,6 +107,10 @@ static int create_gpio_led(const struct gpio_led *template,
107 return 0; 107 return 0;
108 } 108 }
109 109
110 ret = devm_gpio_request(parent, template->gpio, template->name);
111 if (ret < 0)
112 return ret;
113
110 led_dat->cdev.name = template->name; 114 led_dat->cdev.name = template->name;
111 led_dat->cdev.default_trigger = template->default_trigger; 115 led_dat->cdev.default_trigger = template->default_trigger;
112 led_dat->gpio = template->gpio; 116 led_dat->gpio = template->gpio;
@@ -126,10 +130,7 @@ static int create_gpio_led(const struct gpio_led *template,
126 if (!template->retain_state_suspended) 130 if (!template->retain_state_suspended)
127 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; 131 led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
128 132
129 ret = devm_gpio_request_one(parent, template->gpio, 133 ret = gpio_direction_output(led_dat->gpio, led_dat->active_low ^ state);
130 (led_dat->active_low ^ state) ?
131 GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
132 template->name);
133 if (ret < 0) 134 if (ret < 0)
134 return ret; 135 return ret;
135 136
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
index ee14662ed5ce..98cae529373f 100644
--- a/drivers/leds/leds-ot200.c
+++ b/drivers/leds/leds-ot200.c
@@ -47,37 +47,37 @@ static struct ot200_led leds[] = {
47 { 47 {
48 .name = "led_1", 48 .name = "led_1",
49 .port = 0x49, 49 .port = 0x49,
50 .mask = BIT(7), 50 .mask = BIT(6),
51 }, 51 },
52 { 52 {
53 .name = "led_2", 53 .name = "led_2",
54 .port = 0x49, 54 .port = 0x49,
55 .mask = BIT(6), 55 .mask = BIT(5),
56 }, 56 },
57 { 57 {
58 .name = "led_3", 58 .name = "led_3",
59 .port = 0x49, 59 .port = 0x49,
60 .mask = BIT(5), 60 .mask = BIT(4),
61 }, 61 },
62 { 62 {
63 .name = "led_4", 63 .name = "led_4",
64 .port = 0x49, 64 .port = 0x49,
65 .mask = BIT(4), 65 .mask = BIT(3),
66 }, 66 },
67 { 67 {
68 .name = "led_5", 68 .name = "led_5",
69 .port = 0x49, 69 .port = 0x49,
70 .mask = BIT(3), 70 .mask = BIT(2),
71 }, 71 },
72 { 72 {
73 .name = "led_6", 73 .name = "led_6",
74 .port = 0x49, 74 .port = 0x49,
75 .mask = BIT(2), 75 .mask = BIT(1),
76 }, 76 },
77 { 77 {
78 .name = "led_7", 78 .name = "led_7",
79 .port = 0x49, 79 .port = 0x49,
80 .mask = BIT(1), 80 .mask = BIT(0),
81 } 81 }
82}; 82};
83 83
diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
index 699187ab3800..5b9ac32801c7 100644
--- a/drivers/lguest/page_tables.c
+++ b/drivers/lguest/page_tables.c
@@ -1002,6 +1002,7 @@ void guest_set_pgd(struct lguest *lg, unsigned long gpgdir, u32 idx)
1002 kill_guest(&lg->cpus[0], 1002 kill_guest(&lg->cpus[0],
1003 "Cannot populate switcher mapping"); 1003 "Cannot populate switcher mapping");
1004 } 1004 }
1005 lg->pgdirs[pgdir].last_host_cpu = -1;
1005 } 1006 }
1006} 1007}
1007 1008
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 759cffc45cab..88f2f802d528 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -2188,7 +2188,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2188 2188
2189 *need_commit = false; 2189 *need_commit = false;
2190 2190
2191 metadata_dev_size = get_metadata_dev_size(pool->md_dev); 2191 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
2192 2192
2193 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); 2193 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2194 if (r) { 2194 if (r) {
@@ -2197,7 +2197,7 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2197 } 2197 }
2198 2198
2199 if (metadata_dev_size < sb_metadata_dev_size) { 2199 if (metadata_dev_size < sb_metadata_dev_size) {
2200 DMERR("metadata device (%llu sectors) too small: expected %llu", 2200 DMERR("metadata device (%llu blocks) too small: expected %llu",
2201 metadata_dev_size, sb_metadata_dev_size); 2201 metadata_dev_size, sb_metadata_dev_size);
2202 return -EINVAL; 2202 return -EINVAL;
2203 2203
diff --git a/drivers/memory/emif.c b/drivers/memory/emif.c
index cadf1cc19aaf..04644e7b42b1 100644
--- a/drivers/memory/emif.c
+++ b/drivers/memory/emif.c
@@ -1560,12 +1560,6 @@ static int __init_or_module emif_probe(struct platform_device *pdev)
1560 platform_set_drvdata(pdev, emif); 1560 platform_set_drvdata(pdev, emif);
1561 1561
1562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1563 if (!res) {
1564 dev_err(emif->dev, "%s: error getting memory resource\n",
1565 __func__);
1566 goto error;
1567 }
1568
1569 emif->base = devm_ioremap_resource(emif->dev, res); 1563 emif->base = devm_ioremap_resource(emif->dev, res);
1570 if (IS_ERR(emif->base)) 1564 if (IS_ERR(emif->base))
1571 goto error; 1565 goto error;
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index d9aed1593e5d..d54e985748b7 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -579,7 +579,7 @@ config AB8500_CORE
579 579
580config AB8500_DEBUG 580config AB8500_DEBUG
581 bool "Enable debug info via debugfs" 581 bool "Enable debug info via debugfs"
582 depends on AB8500_CORE && DEBUG_FS 582 depends on AB8500_GPADC && DEBUG_FS
583 default y if DEBUG_FS 583 default y if DEBUG_FS
584 help 584 help
585 Select this option if you want debug information using the debug 585 Select this option if you want debug information using the debug
@@ -818,6 +818,7 @@ config MFD_TPS65910
818config MFD_TPS65912 818config MFD_TPS65912
819 bool "TI TPS65912 Power Management chip" 819 bool "TI TPS65912 Power Management chip"
820 depends on GPIOLIB 820 depends on GPIOLIB
821 select MFD_CORE
821 help 822 help
822 If you say yes here you get support for the TPS65912 series of 823 If you say yes here you get support for the TPS65912 series of
823 PM chips. 824 PM chips.
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 8e8a016effe9..258b367e3989 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -868,6 +868,15 @@ static struct resource ab8500_chargalg_resources[] = {};
868#ifdef CONFIG_DEBUG_FS 868#ifdef CONFIG_DEBUG_FS
869static struct resource ab8500_debug_resources[] = { 869static struct resource ab8500_debug_resources[] = {
870 { 870 {
871 .name = "IRQ_AB8500",
872 /*
873 * Number will be filled in. NOTE: this is deliberately
874 * not flagged as an IRQ in ordet to avoid remapping using
875 * the irqdomain in the MFD core, so that this IRQ passes
876 * unremapped to the debug code.
877 */
878 },
879 {
871 .name = "IRQ_FIRST", 880 .name = "IRQ_FIRST",
872 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK, 881 .start = AB8500_INT_MAIN_EXT_CH_NOT_OK,
873 .end = AB8500_INT_MAIN_EXT_CH_NOT_OK, 882 .end = AB8500_INT_MAIN_EXT_CH_NOT_OK,
@@ -1051,6 +1060,7 @@ static struct mfd_cell ab8500_devs[] = {
1051 }, 1060 },
1052 { 1061 {
1053 .name = "ab8500-gpadc", 1062 .name = "ab8500-gpadc",
1063 .of_compatible = "stericsson,ab8500-gpadc",
1054 .num_resources = ARRAY_SIZE(ab8500_gpadc_resources), 1064 .num_resources = ARRAY_SIZE(ab8500_gpadc_resources),
1055 .resources = ab8500_gpadc_resources, 1065 .resources = ab8500_gpadc_resources,
1056 }, 1066 },
@@ -1097,7 +1107,7 @@ static struct mfd_cell ab8500_devs[] = {
1097 .of_compatible = "stericsson,ab8500-denc", 1107 .of_compatible = "stericsson,ab8500-denc",
1098 }, 1108 },
1099 { 1109 {
1100 .name = "ab8500-gpio", 1110 .name = "pinctrl-ab8500",
1101 .of_compatible = "stericsson,ab8500-gpio", 1111 .of_compatible = "stericsson,ab8500-gpio",
1102 }, 1112 },
1103 { 1113 {
@@ -1208,6 +1218,7 @@ static struct mfd_cell ab8505_devs[] = {
1208 }, 1218 },
1209 { 1219 {
1210 .name = "ab8500-gpadc", 1220 .name = "ab8500-gpadc",
1221 .of_compatible = "stericsson,ab8500-gpadc",
1211 .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), 1222 .num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
1212 .resources = ab8505_gpadc_resources, 1223 .resources = ab8505_gpadc_resources,
1213 }, 1224 },
@@ -1234,7 +1245,7 @@ static struct mfd_cell ab8505_devs[] = {
1234 .name = "ab8500-leds", 1245 .name = "ab8500-leds",
1235 }, 1246 },
1236 { 1247 {
1237 .name = "ab8500-gpio", 1248 .name = "pinctrl-ab8505",
1238 }, 1249 },
1239 { 1250 {
1240 .name = "ab8500-usb", 1251 .name = "ab8500-usb",
@@ -1271,6 +1282,7 @@ static struct mfd_cell ab8540_devs[] = {
1271 }, 1282 },
1272 { 1283 {
1273 .name = "ab8500-gpadc", 1284 .name = "ab8500-gpadc",
1285 .of_compatible = "stericsson,ab8500-gpadc",
1274 .num_resources = ARRAY_SIZE(ab8505_gpadc_resources), 1286 .num_resources = ARRAY_SIZE(ab8505_gpadc_resources),
1275 .resources = ab8505_gpadc_resources, 1287 .resources = ab8505_gpadc_resources,
1276 }, 1288 },
@@ -1302,7 +1314,7 @@ static struct mfd_cell ab8540_devs[] = {
1302 .resources = ab8500_temp_resources, 1314 .resources = ab8500_temp_resources,
1303 }, 1315 },
1304 { 1316 {
1305 .name = "ab8500-gpio", 1317 .name = "pinctrl-ab8540",
1306 }, 1318 },
1307 { 1319 {
1308 .name = "ab8540-usb", 1320 .name = "ab8540-usb",
@@ -1712,6 +1724,12 @@ static int ab8500_probe(struct platform_device *pdev)
1712 if (ret) 1724 if (ret)
1713 return ret; 1725 return ret;
1714 1726
1727#if CONFIG_DEBUG_FS
1728 /* Pass to debugfs */
1729 ab8500_debug_resources[0].start = ab8500->irq;
1730 ab8500_debug_resources[0].end = ab8500->irq;
1731#endif
1732
1715 if (is_ab9540(ab8500)) 1733 if (is_ab9540(ab8500))
1716 ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs, 1734 ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
1717 ARRAY_SIZE(ab9540_devs), NULL, 1735 ARRAY_SIZE(ab9540_devs), NULL,
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index b88bbbc15f1e..37b7ce4c7c3b 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -91,12 +91,10 @@
91#include <linux/ctype.h> 91#include <linux/ctype.h>
92#endif 92#endif
93 93
94/* TODO: this file should not reference IRQ_DB8500_AB8500! */
95#include <mach/irqs.h>
96
97static u32 debug_bank; 94static u32 debug_bank;
98static u32 debug_address; 95static u32 debug_address;
99 96
97static int irq_ab8500;
100static int irq_first; 98static int irq_first;
101static int irq_last; 99static int irq_last;
102static u32 *irq_count; 100static u32 *irq_count;
@@ -1589,7 +1587,7 @@ void ab8500_debug_register_interrupt(int line)
1589{ 1587{
1590 if (line < num_interrupt_lines) { 1588 if (line < num_interrupt_lines) {
1591 num_interrupts[line]++; 1589 num_interrupts[line]++;
1592 if (suspend_test_wake_cause_interrupt_is_mine(IRQ_DB8500_AB8500)) 1590 if (suspend_test_wake_cause_interrupt_is_mine(irq_ab8500))
1593 num_wake_interrupts[line]++; 1591 num_wake_interrupts[line]++;
1594 } 1592 }
1595} 1593}
@@ -2941,6 +2939,7 @@ static int ab8500_debug_probe(struct platform_device *plf)
2941 struct dentry *file; 2939 struct dentry *file;
2942 int ret = -ENOMEM; 2940 int ret = -ENOMEM;
2943 struct ab8500 *ab8500; 2941 struct ab8500 *ab8500;
2942 struct resource *res;
2944 debug_bank = AB8500_MISC; 2943 debug_bank = AB8500_MISC;
2945 debug_address = AB8500_REV_REG & 0x00FF; 2944 debug_address = AB8500_REV_REG & 0x00FF;
2946 2945
@@ -2959,6 +2958,15 @@ static int ab8500_debug_probe(struct platform_device *plf)
2959 if (!event_name) 2958 if (!event_name)
2960 goto out_freedev_attr; 2959 goto out_freedev_attr;
2961 2960
2961 res = platform_get_resource_byname(plf, 0, "IRQ_AB8500");
2962 if (!res) {
2963 dev_err(&plf->dev, "AB8500 irq not found, err %d\n",
2964 irq_first);
2965 ret = -ENXIO;
2966 goto out_freeevent_name;
2967 }
2968 irq_ab8500 = res->start;
2969
2962 irq_first = platform_get_irq_byname(plf, "IRQ_FIRST"); 2970 irq_first = platform_get_irq_byname(plf, "IRQ_FIRST");
2963 if (irq_first < 0) { 2971 if (irq_first < 0) {
2964 dev_err(&plf->dev, "First irq not found, err %d\n", 2972 dev_err(&plf->dev, "First irq not found, err %d\n",
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index 5e65b28a5d09..13f7866de46e 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -907,14 +907,17 @@ static int ab8500_gpadc_suspend(struct device *dev)
907static int ab8500_gpadc_resume(struct device *dev) 907static int ab8500_gpadc_resume(struct device *dev)
908{ 908{
909 struct ab8500_gpadc *gpadc = dev_get_drvdata(dev); 909 struct ab8500_gpadc *gpadc = dev_get_drvdata(dev);
910 int ret;
910 911
911 regulator_enable(gpadc->regu); 912 ret = regulator_enable(gpadc->regu);
913 if (ret)
914 dev_err(dev, "Failed to enable vtvout LDO: %d\n", ret);
912 915
913 pm_runtime_mark_last_busy(gpadc->dev); 916 pm_runtime_mark_last_busy(gpadc->dev);
914 pm_runtime_put_autosuspend(gpadc->dev); 917 pm_runtime_put_autosuspend(gpadc->dev);
915 918
916 mutex_unlock(&gpadc->ab8500_gpadc_lock); 919 mutex_unlock(&gpadc->ab8500_gpadc_lock);
917 return 0; 920 return ret;
918} 921}
919 922
920static int ab8500_gpadc_probe(struct platform_device *pdev) 923static int ab8500_gpadc_probe(struct platform_device *pdev)
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index fbca1ced49fa..8e0dae59844d 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -23,7 +23,7 @@
23 23
24static struct device *sysctrl_dev; 24static struct device *sysctrl_dev;
25 25
26void ab8500_power_off(void) 26static void ab8500_power_off(void)
27{ 27{
28 sigset_t old; 28 sigset_t old;
29 sigset_t all; 29 sigset_t all;
@@ -104,7 +104,7 @@ void ab8500_restart(char mode, const char *cmd)
104 104
105 plat = dev_get_platdata(sysctrl_dev->parent); 105 plat = dev_get_platdata(sysctrl_dev->parent);
106 pdata = plat->sysctrl; 106 pdata = plat->sysctrl;
107 if (pdata->reboot_reason_code) 107 if (pdata && pdata->reboot_reason_code)
108 reason = pdata->reboot_reason_code(cmd); 108 reason = pdata->reboot_reason_code(cmd);
109 else 109 else
110 pr_warn("[%s] No reboot reason set. Default reason %d\n", 110 pr_warn("[%s] No reboot reason set. Default reason %d\n",
@@ -188,14 +188,15 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
188 188
189 plat = dev_get_platdata(pdev->dev.parent); 189 plat = dev_get_platdata(pdev->dev.parent);
190 190
191 if (!(plat && plat->sysctrl)) 191 if (!plat)
192 return -EINVAL; 192 return -EINVAL;
193 193
194 if (plat->pm_power_off) 194 sysctrl_dev = &pdev->dev;
195
196 if (!pm_power_off)
195 pm_power_off = ab8500_power_off; 197 pm_power_off = ab8500_power_off;
196 198
197 pdata = plat->sysctrl; 199 pdata = plat->sysctrl;
198
199 if (pdata) { 200 if (pdata) {
200 int last, ret, i, j; 201 int last, ret, i, j;
201 202
@@ -226,6 +227,10 @@ static int ab8500_sysctrl_probe(struct platform_device *pdev)
226static int ab8500_sysctrl_remove(struct platform_device *pdev) 227static int ab8500_sysctrl_remove(struct platform_device *pdev)
227{ 228{
228 sysctrl_dev = NULL; 229 sysctrl_dev = NULL;
230
231 if (pm_power_off == ab8500_power_off)
232 pm_power_off = NULL;
233
229 return 0; 234 return 0;
230} 235}
231 236
diff --git a/drivers/mfd/abx500-core.c b/drivers/mfd/abx500-core.c
index 9818afba2515..3714acb61458 100644
--- a/drivers/mfd/abx500-core.c
+++ b/drivers/mfd/abx500-core.c
@@ -156,7 +156,7 @@ EXPORT_SYMBOL(abx500_startup_irq_enabled);
156void abx500_dump_all_banks(void) 156void abx500_dump_all_banks(void)
157{ 157{
158 struct abx500_ops *ops; 158 struct abx500_ops *ops;
159 struct device dummy_child = {0}; 159 struct device dummy_child = {NULL};
160 struct abx500_device_entry *dev_entry; 160 struct abx500_device_entry *dev_entry;
161 161
162 list_for_each_entry(dev_entry, &abx500_list, list) { 162 list_for_each_entry(dev_entry, &abx500_list, list) {
diff --git a/drivers/mfd/cros_ec_spi.c b/drivers/mfd/cros_ec_spi.c
index 19193cf1e7a1..367ccb58ecb1 100644
--- a/drivers/mfd/cros_ec_spi.c
+++ b/drivers/mfd/cros_ec_spi.c
@@ -120,7 +120,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
120 120
121 for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) { 121 for (end = ptr + EC_MSG_PREAMBLE_COUNT; ptr != end; ptr++) {
122 if (*ptr == EC_MSG_HEADER) { 122 if (*ptr == EC_MSG_HEADER) {
123 dev_dbg(ec_dev->dev, "msg found at %ld\n", 123 dev_dbg(ec_dev->dev, "msg found at %zd\n",
124 ptr - ec_dev->din); 124 ptr - ec_dev->din);
125 break; 125 break;
126 } 126 }
@@ -154,7 +154,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
154 * maximum-supported transfer size. 154 * maximum-supported transfer size.
155 */ 155 */
156 todo = min(need_len, 256); 156 todo = min(need_len, 256);
157 dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%ld\n", 157 dev_dbg(ec_dev->dev, "loop, todo=%d, need_len=%d, ptr=%zd\n",
158 todo, need_len, ptr - ec_dev->din); 158 todo, need_len, ptr - ec_dev->din);
159 159
160 memset(&trans, '\0', sizeof(trans)); 160 memset(&trans, '\0', sizeof(trans));
@@ -178,7 +178,7 @@ static int cros_ec_spi_receive_response(struct cros_ec_device *ec_dev,
178 need_len -= todo; 178 need_len -= todo;
179 } 179 }
180 180
181 dev_dbg(ec_dev->dev, "loop done, ptr=%ld\n", ptr - ec_dev->din); 181 dev_dbg(ec_dev->dev, "loop done, ptr=%zd\n", ptr - ec_dev->din);
182 182
183 return 0; 183 return 0;
184} 184}
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 319b8abe742b..66f80973596b 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -1613,6 +1613,8 @@ static unsigned long dsiclk_rate(u8 n)
1613 1613
1614 if (divsel == PRCM_DSI_PLLOUT_SEL_OFF) 1614 if (divsel == PRCM_DSI_PLLOUT_SEL_OFF)
1615 divsel = dsiclk[n].divsel; 1615 divsel = dsiclk[n].divsel;
1616 else
1617 dsiclk[n].divsel = divsel;
1616 1618
1617 switch (divsel) { 1619 switch (divsel) {
1618 case PRCM_DSI_PLLOUT_SEL_PHI_4: 1620 case PRCM_DSI_PLLOUT_SEL_PHI_4:
@@ -3095,6 +3097,7 @@ static struct mfd_cell db8500_prcmu_devs[] = {
3095 .num_resources = ARRAY_SIZE(db8500_thsens_resources), 3097 .num_resources = ARRAY_SIZE(db8500_thsens_resources),
3096 .resources = db8500_thsens_resources, 3098 .resources = db8500_thsens_resources,
3097 .platform_data = &db8500_thsens_data, 3099 .platform_data = &db8500_thsens_data,
3100 .pdata_size = sizeof(db8500_thsens_data),
3098 }, 3101 },
3099}; 3102};
3100 3103
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index 5be3b5e13855..d8d5137f9717 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -414,11 +414,6 @@ static int intel_msic_probe(struct platform_device *pdev)
414 * the clients via intel_msic_irq_read(). 414 * the clients via intel_msic_irq_read().
415 */ 415 */
416 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 416 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
417 if (!res) {
418 dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
419 return -ENODEV;
420 }
421
422 msic->irq_base = devm_ioremap_resource(&pdev->dev, res); 417 msic->irq_base = devm_ioremap_resource(&pdev->dev, res);
423 if (IS_ERR(msic->irq_base)) 418 if (IS_ERR(msic->irq_base))
424 return PTR_ERR(msic->irq_base); 419 return PTR_ERR(msic->irq_base);
diff --git a/drivers/mfd/si476x-cmd.c b/drivers/mfd/si476x-cmd.c
index de48b4e88450..6f1ef63086c9 100644
--- a/drivers/mfd/si476x-cmd.c
+++ b/drivers/mfd/si476x-cmd.c
@@ -29,6 +29,8 @@
29 29
30#include <linux/mfd/si476x-core.h> 30#include <linux/mfd/si476x-core.h>
31 31
32#include <asm/unaligned.h>
33
32#define msb(x) ((u8)((u16) x >> 8)) 34#define msb(x) ((u8)((u16) x >> 8))
33#define lsb(x) ((u8)((u16) x & 0x00FF)) 35#define lsb(x) ((u8)((u16) x & 0x00FF))
34 36
@@ -150,7 +152,7 @@ enum si476x_acf_status_report_bits {
150 SI476X_ACF_SOFTMUTE_INT = (1 << 0), 152 SI476X_ACF_SOFTMUTE_INT = (1 << 0),
151 153
152 SI476X_ACF_SMUTE = (1 << 0), 154 SI476X_ACF_SMUTE = (1 << 0),
153 SI476X_ACF_SMATTN = 0b11111, 155 SI476X_ACF_SMATTN = 0x1f,
154 SI476X_ACF_PILOT = (1 << 7), 156 SI476X_ACF_PILOT = (1 << 7),
155 SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT, 157 SI476X_ACF_STBLEND = ~SI476X_ACF_PILOT,
156}; 158};
@@ -483,7 +485,7 @@ int si476x_core_cmd_get_property(struct si476x_core *core, u16 property)
483 if (err < 0) 485 if (err < 0)
484 return err; 486 return err;
485 else 487 else
486 return be16_to_cpup((__be16 *)(resp + 2)); 488 return get_unaligned_be16(resp + 2);
487} 489}
488EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property); 490EXPORT_SYMBOL_GPL(si476x_core_cmd_get_property);
489 491
@@ -772,18 +774,18 @@ int si476x_core_cmd_am_rsq_status(struct si476x_core *core,
772 if (!report) 774 if (!report)
773 return err; 775 return err;
774 776
775 report->snrhint = 0b00001000 & resp[1]; 777 report->snrhint = 0x08 & resp[1];
776 report->snrlint = 0b00000100 & resp[1]; 778 report->snrlint = 0x04 & resp[1];
777 report->rssihint = 0b00000010 & resp[1]; 779 report->rssihint = 0x02 & resp[1];
778 report->rssilint = 0b00000001 & resp[1]; 780 report->rssilint = 0x01 & resp[1];
779 781
780 report->bltf = 0b10000000 & resp[2]; 782 report->bltf = 0x80 & resp[2];
781 report->snr_ready = 0b00100000 & resp[2]; 783 report->snr_ready = 0x20 & resp[2];
782 report->rssiready = 0b00001000 & resp[2]; 784 report->rssiready = 0x08 & resp[2];
783 report->afcrl = 0b00000010 & resp[2]; 785 report->afcrl = 0x02 & resp[2];
784 report->valid = 0b00000001 & resp[2]; 786 report->valid = 0x01 & resp[2];
785 787
786 report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); 788 report->readfreq = get_unaligned_be16(resp + 3);
787 report->freqoff = resp[5]; 789 report->freqoff = resp[5];
788 report->rssi = resp[6]; 790 report->rssi = resp[6];
789 report->snr = resp[7]; 791 report->snr = resp[7];
@@ -931,26 +933,26 @@ int si476x_core_cmd_fm_rds_status(struct si476x_core *core,
931 if (err < 0 || report == NULL) 933 if (err < 0 || report == NULL)
932 return err; 934 return err;
933 935
934 report->rdstpptyint = 0b00010000 & resp[1]; 936 report->rdstpptyint = 0x10 & resp[1];
935 report->rdspiint = 0b00001000 & resp[1]; 937 report->rdspiint = 0x08 & resp[1];
936 report->rdssyncint = 0b00000010 & resp[1]; 938 report->rdssyncint = 0x02 & resp[1];
937 report->rdsfifoint = 0b00000001 & resp[1]; 939 report->rdsfifoint = 0x01 & resp[1];
938 940
939 report->tpptyvalid = 0b00010000 & resp[2]; 941 report->tpptyvalid = 0x10 & resp[2];
940 report->pivalid = 0b00001000 & resp[2]; 942 report->pivalid = 0x08 & resp[2];
941 report->rdssync = 0b00000010 & resp[2]; 943 report->rdssync = 0x02 & resp[2];
942 report->rdsfifolost = 0b00000001 & resp[2]; 944 report->rdsfifolost = 0x01 & resp[2];
943 945
944 report->tp = 0b00100000 & resp[3]; 946 report->tp = 0x20 & resp[3];
945 report->pty = 0b00011111 & resp[3]; 947 report->pty = 0x1f & resp[3];
946 948
947 report->pi = be16_to_cpup((__be16 *)(resp + 4)); 949 report->pi = get_unaligned_be16(resp + 4);
948 report->rdsfifoused = resp[6]; 950 report->rdsfifoused = resp[6];
949 951
950 report->ble[V4L2_RDS_BLOCK_A] = 0b11000000 & resp[7]; 952 report->ble[V4L2_RDS_BLOCK_A] = 0xc0 & resp[7];
951 report->ble[V4L2_RDS_BLOCK_B] = 0b00110000 & resp[7]; 953 report->ble[V4L2_RDS_BLOCK_B] = 0x30 & resp[7];
952 report->ble[V4L2_RDS_BLOCK_C] = 0b00001100 & resp[7]; 954 report->ble[V4L2_RDS_BLOCK_C] = 0x0c & resp[7];
953 report->ble[V4L2_RDS_BLOCK_D] = 0b00000011 & resp[7]; 955 report->ble[V4L2_RDS_BLOCK_D] = 0x03 & resp[7];
954 956
955 report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A; 957 report->rds[V4L2_RDS_BLOCK_A].block = V4L2_RDS_BLOCK_A;
956 report->rds[V4L2_RDS_BLOCK_A].msb = resp[8]; 958 report->rds[V4L2_RDS_BLOCK_A].msb = resp[8];
@@ -991,9 +993,9 @@ int si476x_core_cmd_fm_rds_blockcount(struct si476x_core *core,
991 SI476X_DEFAULT_TIMEOUT); 993 SI476X_DEFAULT_TIMEOUT);
992 994
993 if (!err) { 995 if (!err) {
994 report->expected = be16_to_cpup((__be16 *)(resp + 2)); 996 report->expected = get_unaligned_be16(resp + 2);
995 report->received = be16_to_cpup((__be16 *)(resp + 4)); 997 report->received = get_unaligned_be16(resp + 4);
996 report->uncorrectable = be16_to_cpup((__be16 *)(resp + 6)); 998 report->uncorrectable = get_unaligned_be16(resp + 6);
997 } 999 }
998 1000
999 return err; 1001 return err;
@@ -1005,7 +1007,7 @@ int si476x_core_cmd_fm_phase_diversity(struct si476x_core *core,
1005{ 1007{
1006 u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP]; 1008 u8 resp[CMD_FM_PHASE_DIVERSITY_NRESP];
1007 const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = { 1009 const u8 args[CMD_FM_PHASE_DIVERSITY_NARGS] = {
1008 mode & 0b111, 1010 mode & 0x07,
1009 }; 1011 };
1010 1012
1011 return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY, 1013 return si476x_core_send_command(core, CMD_FM_PHASE_DIVERSITY,
@@ -1162,7 +1164,7 @@ static int si476x_core_cmd_am_tune_freq_a20(struct si476x_core *core,
1162 const int am_freq = tuneargs->freq; 1164 const int am_freq = tuneargs->freq;
1163 u8 resp[CMD_AM_TUNE_FREQ_NRESP]; 1165 u8 resp[CMD_AM_TUNE_FREQ_NRESP];
1164 const u8 args[CMD_AM_TUNE_FREQ_NARGS] = { 1166 const u8 args[CMD_AM_TUNE_FREQ_NARGS] = {
1165 (tuneargs->zifsr << 6) | (tuneargs->injside & 0b11), 1167 (tuneargs->zifsr << 6) | (tuneargs->injside & 0x03),
1166 msb(am_freq), 1168 msb(am_freq),
1167 lsb(am_freq), 1169 lsb(am_freq),
1168 }; 1170 };
@@ -1197,20 +1199,20 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
1197 if (err < 0 || report == NULL) 1199 if (err < 0 || report == NULL)
1198 return err; 1200 return err;
1199 1201
1200 report->multhint = 0b10000000 & resp[1]; 1202 report->multhint = 0x80 & resp[1];
1201 report->multlint = 0b01000000 & resp[1]; 1203 report->multlint = 0x40 & resp[1];
1202 report->snrhint = 0b00001000 & resp[1]; 1204 report->snrhint = 0x08 & resp[1];
1203 report->snrlint = 0b00000100 & resp[1]; 1205 report->snrlint = 0x04 & resp[1];
1204 report->rssihint = 0b00000010 & resp[1]; 1206 report->rssihint = 0x02 & resp[1];
1205 report->rssilint = 0b00000001 & resp[1]; 1207 report->rssilint = 0x01 & resp[1];
1206 1208
1207 report->bltf = 0b10000000 & resp[2]; 1209 report->bltf = 0x80 & resp[2];
1208 report->snr_ready = 0b00100000 & resp[2]; 1210 report->snr_ready = 0x20 & resp[2];
1209 report->rssiready = 0b00001000 & resp[2]; 1211 report->rssiready = 0x08 & resp[2];
1210 report->afcrl = 0b00000010 & resp[2]; 1212 report->afcrl = 0x02 & resp[2];
1211 report->valid = 0b00000001 & resp[2]; 1213 report->valid = 0x01 & resp[2];
1212 1214
1213 report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); 1215 report->readfreq = get_unaligned_be16(resp + 3);
1214 report->freqoff = resp[5]; 1216 report->freqoff = resp[5];
1215 report->rssi = resp[6]; 1217 report->rssi = resp[6];
1216 report->snr = resp[7]; 1218 report->snr = resp[7];
@@ -1218,7 +1220,7 @@ static int si476x_core_cmd_fm_rsq_status_a10(struct si476x_core *core,
1218 report->hassi = resp[10]; 1220 report->hassi = resp[10];
1219 report->mult = resp[11]; 1221 report->mult = resp[11];
1220 report->dev = resp[12]; 1222 report->dev = resp[12];
1221 report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); 1223 report->readantcap = get_unaligned_be16(resp + 13);
1222 report->assi = resp[15]; 1224 report->assi = resp[15];
1223 report->usn = resp[16]; 1225 report->usn = resp[16];
1224 1226
@@ -1251,20 +1253,20 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
1251 if (err < 0 || report == NULL) 1253 if (err < 0 || report == NULL)
1252 return err; 1254 return err;
1253 1255
1254 report->multhint = 0b10000000 & resp[1]; 1256 report->multhint = 0x80 & resp[1];
1255 report->multlint = 0b01000000 & resp[1]; 1257 report->multlint = 0x40 & resp[1];
1256 report->snrhint = 0b00001000 & resp[1]; 1258 report->snrhint = 0x08 & resp[1];
1257 report->snrlint = 0b00000100 & resp[1]; 1259 report->snrlint = 0x04 & resp[1];
1258 report->rssihint = 0b00000010 & resp[1]; 1260 report->rssihint = 0x02 & resp[1];
1259 report->rssilint = 0b00000001 & resp[1]; 1261 report->rssilint = 0x01 & resp[1];
1260 1262
1261 report->bltf = 0b10000000 & resp[2]; 1263 report->bltf = 0x80 & resp[2];
1262 report->snr_ready = 0b00100000 & resp[2]; 1264 report->snr_ready = 0x20 & resp[2];
1263 report->rssiready = 0b00001000 & resp[2]; 1265 report->rssiready = 0x08 & resp[2];
1264 report->afcrl = 0b00000010 & resp[2]; 1266 report->afcrl = 0x02 & resp[2];
1265 report->valid = 0b00000001 & resp[2]; 1267 report->valid = 0x01 & resp[2];
1266 1268
1267 report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); 1269 report->readfreq = get_unaligned_be16(resp + 3);
1268 report->freqoff = resp[5]; 1270 report->freqoff = resp[5];
1269 report->rssi = resp[6]; 1271 report->rssi = resp[6];
1270 report->snr = resp[7]; 1272 report->snr = resp[7];
@@ -1272,7 +1274,7 @@ static int si476x_core_cmd_fm_rsq_status_a20(struct si476x_core *core,
1272 report->hassi = resp[10]; 1274 report->hassi = resp[10];
1273 report->mult = resp[11]; 1275 report->mult = resp[11];
1274 report->dev = resp[12]; 1276 report->dev = resp[12];
1275 report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); 1277 report->readantcap = get_unaligned_be16(resp + 13);
1276 report->assi = resp[15]; 1278 report->assi = resp[15];
1277 report->usn = resp[16]; 1279 report->usn = resp[16];
1278 1280
@@ -1306,21 +1308,21 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
1306 if (err < 0 || report == NULL) 1308 if (err < 0 || report == NULL)
1307 return err; 1309 return err;
1308 1310
1309 report->multhint = 0b10000000 & resp[1]; 1311 report->multhint = 0x80 & resp[1];
1310 report->multlint = 0b01000000 & resp[1]; 1312 report->multlint = 0x40 & resp[1];
1311 report->snrhint = 0b00001000 & resp[1]; 1313 report->snrhint = 0x08 & resp[1];
1312 report->snrlint = 0b00000100 & resp[1]; 1314 report->snrlint = 0x04 & resp[1];
1313 report->rssihint = 0b00000010 & resp[1]; 1315 report->rssihint = 0x02 & resp[1];
1314 report->rssilint = 0b00000001 & resp[1]; 1316 report->rssilint = 0x01 & resp[1];
1315 1317
1316 report->bltf = 0b10000000 & resp[2]; 1318 report->bltf = 0x80 & resp[2];
1317 report->snr_ready = 0b00100000 & resp[2]; 1319 report->snr_ready = 0x20 & resp[2];
1318 report->rssiready = 0b00001000 & resp[2]; 1320 report->rssiready = 0x08 & resp[2];
1319 report->injside = 0b00000100 & resp[2]; 1321 report->injside = 0x04 & resp[2];
1320 report->afcrl = 0b00000010 & resp[2]; 1322 report->afcrl = 0x02 & resp[2];
1321 report->valid = 0b00000001 & resp[2]; 1323 report->valid = 0x01 & resp[2];
1322 1324
1323 report->readfreq = be16_to_cpup((__be16 *)(resp + 3)); 1325 report->readfreq = get_unaligned_be16(resp + 3);
1324 report->freqoff = resp[5]; 1326 report->freqoff = resp[5];
1325 report->rssi = resp[6]; 1327 report->rssi = resp[6];
1326 report->snr = resp[7]; 1328 report->snr = resp[7];
@@ -1329,7 +1331,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
1329 report->hassi = resp[10]; 1331 report->hassi = resp[10];
1330 report->mult = resp[11]; 1332 report->mult = resp[11];
1331 report->dev = resp[12]; 1333 report->dev = resp[12];
1332 report->readantcap = be16_to_cpup((__be16 *)(resp + 13)); 1334 report->readantcap = get_unaligned_be16(resp + 13);
1333 report->assi = resp[15]; 1335 report->assi = resp[15];
1334 report->usn = resp[16]; 1336 report->usn = resp[16];
1335 1337
@@ -1337,7 +1339,7 @@ static int si476x_core_cmd_fm_rsq_status_a30(struct si476x_core *core,
1337 report->rdsdev = resp[18]; 1339 report->rdsdev = resp[18];
1338 report->assidev = resp[19]; 1340 report->assidev = resp[19];
1339 report->strongdev = resp[20]; 1341 report->strongdev = resp[20];
1340 report->rdspi = be16_to_cpup((__be16 *)(resp + 21)); 1342 report->rdspi = get_unaligned_be16(resp + 21);
1341 1343
1342 return err; 1344 return err;
1343} 1345}
diff --git a/drivers/misc/dummy-irq.c b/drivers/misc/dummy-irq.c
index 7014167e2c61..c37eeedfe215 100644
--- a/drivers/misc/dummy-irq.c
+++ b/drivers/misc/dummy-irq.c
@@ -19,7 +19,7 @@
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21 21
22static int irq; 22static int irq = -1;
23 23
24static irqreturn_t dummy_interrupt(int irq, void *dev_id) 24static irqreturn_t dummy_interrupt(int irq, void *dev_id)
25{ 25{
@@ -36,6 +36,10 @@ static irqreturn_t dummy_interrupt(int irq, void *dev_id)
36 36
37static int __init dummy_irq_init(void) 37static int __init dummy_irq_init(void)
38{ 38{
39 if (irq < 0) {
40 printk(KERN_ERR "dummy-irq: no IRQ given. Use irq=N\n");
41 return -EIO;
42 }
39 if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) { 43 if (request_irq(irq, &dummy_interrupt, IRQF_SHARED, "dummy_irq", &irq)) {
40 printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq); 44 printk(KERN_ERR "dummy-irq: cannot register IRQ %d\n", irq);
41 return -EIO; 45 return -EIO;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 1e935eacaa7f..9ecd49a7be1b 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -496,6 +496,8 @@ int mei_cl_disable_device(struct mei_cl_device *device)
496 } 496 }
497 } 497 }
498 498
499 device->event_cb = NULL;
500
499 mutex_unlock(&dev->device_lock); 501 mutex_unlock(&dev->device_lock);
500 502
501 if (!device->ops || !device->ops->disable) 503 if (!device->ops || !device->ops->disable)
diff --git a/drivers/misc/vmw_vmci/Kconfig b/drivers/misc/vmw_vmci/Kconfig
index ea98f7e9ccd1..39c2ecadb273 100644
--- a/drivers/misc/vmw_vmci/Kconfig
+++ b/drivers/misc/vmw_vmci/Kconfig
@@ -4,7 +4,7 @@
4 4
5config VMWARE_VMCI 5config VMWARE_VMCI
6 tristate "VMware VMCI Driver" 6 tristate "VMware VMCI Driver"
7 depends on X86 && PCI && NET 7 depends on X86 && PCI
8 help 8 help
9 This is VMware's Virtual Machine Communication Interface. It enables 9 This is VMware's Virtual Machine Communication Interface. It enables
10 high-speed communication between host and guest in a virtual 10 high-speed communication between host and guest in a virtual
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index d94245dbd765..8ff2e5ee8fb8 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -23,7 +23,7 @@
23#include <linux/pagemap.h> 23#include <linux/pagemap.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/socket.h> 26#include <linux/uio.h>
27#include <linux/wait.h> 27#include <linux/wait.h>
28#include <linux/vmalloc.h> 28#include <linux/vmalloc.h>
29 29
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 375c109607ff..f4f3038c1df0 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1130,6 +1130,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1130 struct variant_data *variant = host->variant; 1130 struct variant_data *variant = host->variant;
1131 u32 pwr = 0; 1131 u32 pwr = 0;
1132 unsigned long flags; 1132 unsigned long flags;
1133 int ret;
1133 1134
1134 pm_runtime_get_sync(mmc_dev(mmc)); 1135 pm_runtime_get_sync(mmc_dev(mmc));
1135 1136
@@ -1161,8 +1162,12 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1161 break; 1162 break;
1162 case MMC_POWER_ON: 1163 case MMC_POWER_ON:
1163 if (!IS_ERR(mmc->supply.vqmmc) && 1164 if (!IS_ERR(mmc->supply.vqmmc) &&
1164 !regulator_is_enabled(mmc->supply.vqmmc)) 1165 !regulator_is_enabled(mmc->supply.vqmmc)) {
1165 regulator_enable(mmc->supply.vqmmc); 1166 ret = regulator_enable(mmc->supply.vqmmc);
1167 if (ret < 0)
1168 dev_err(mmc_dev(mmc),
1169 "failed to enable vqmmc regulator\n");
1170 }
1166 1171
1167 pwr |= MCI_PWR_ON; 1172 pwr |= MCI_PWR_ON;
1168 break; 1173 break;
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index a94facb46e5c..fd1df5e13ae4 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -672,11 +672,6 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
672 } 672 }
673 673
674 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0); 674 rc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
675 if (rc == NULL) {
676 dev_err(&pdev->dev, "No memory resource found for device!\r\n");
677 return -ENXIO;
678 }
679
680 host->io_base = devm_ioremap_resource(&pdev->dev, rc); 675 host->io_base = devm_ioremap_resource(&pdev->dev, rc);
681 if (IS_ERR(host->io_base)) 676 if (IS_ERR(host->io_base))
682 return PTR_ERR(host->io_base); 677 return PTR_ERR(host->io_base);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index fc58d118d844..390061d09693 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2360,14 +2360,15 @@ int bond_3ad_set_carrier(struct bonding *bond)
2360} 2360}
2361 2361
2362/** 2362/**
2363 * bond_3ad_get_active_agg_info - get information of the active aggregator 2363 * __bond_3ad_get_active_agg_info - get information of the active aggregator
2364 * @bond: bonding struct to work on 2364 * @bond: bonding struct to work on
2365 * @ad_info: ad_info struct to fill with the bond's info 2365 * @ad_info: ad_info struct to fill with the bond's info
2366 * 2366 *
2367 * Returns: 0 on success 2367 * Returns: 0 on success
2368 * < 0 on error 2368 * < 0 on error
2369 */ 2369 */
2370int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info) 2370int __bond_3ad_get_active_agg_info(struct bonding *bond,
2371 struct ad_info *ad_info)
2371{ 2372{
2372 struct aggregator *aggregator = NULL; 2373 struct aggregator *aggregator = NULL;
2373 struct port *port; 2374 struct port *port;
@@ -2391,6 +2392,18 @@ int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
2391 return -1; 2392 return -1;
2392} 2393}
2393 2394
2395/* Wrapper used to hold bond->lock so no slave manipulation can occur */
2396int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info)
2397{
2398 int ret;
2399
2400 read_lock(&bond->lock);
2401 ret = __bond_3ad_get_active_agg_info(bond, ad_info);
2402 read_unlock(&bond->lock);
2403
2404 return ret;
2405}
2406
2394int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev) 2407int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2395{ 2408{
2396 struct slave *slave, *start_at; 2409 struct slave *slave, *start_at;
@@ -2402,8 +2415,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2402 struct ad_info ad_info; 2415 struct ad_info ad_info;
2403 int res = 1; 2416 int res = 1;
2404 2417
2405 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 2418 if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
2406 pr_debug("%s: Error: bond_3ad_get_active_agg_info failed\n", 2419 pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
2407 dev->name); 2420 dev->name);
2408 goto out; 2421 goto out;
2409 } 2422 }
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 0cfaa4afdece..5d91ad0cc041 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -273,6 +273,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave);
273void bond_3ad_adapter_duplex_changed(struct slave *slave); 273void bond_3ad_adapter_duplex_changed(struct slave *slave);
274void bond_3ad_handle_link_change(struct slave *slave, char link); 274void bond_3ad_handle_link_change(struct slave *slave, char link);
275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info); 275int bond_3ad_get_active_agg_info(struct bonding *bond, struct ad_info *ad_info);
276int __bond_3ad_get_active_agg_info(struct bonding *bond,
277 struct ad_info *ad_info);
276int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev); 278int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev);
277int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond, 279int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
278 struct slave *slave); 280 struct slave *slave);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d0aade04e49a..29b846cbfb48 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1362,6 +1362,7 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
1362 slave->dev->features, 1362 slave->dev->features,
1363 mask); 1363 mask);
1364 } 1364 }
1365 features = netdev_add_tso_features(features, mask);
1365 1366
1366out: 1367out:
1367 read_unlock(&bond->lock); 1368 read_unlock(&bond->lock);
@@ -2555,8 +2556,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
2555{ 2556{
2556 struct sk_buff *skb; 2557 struct sk_buff *skb;
2557 2558
2558 pr_debug("arp %d on slave %s: dst %x src %x vid %d\n", arp_op, 2559 pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
2559 slave_dev->name, dest_ip, src_ip, vlan_id); 2560 slave_dev->name, &dest_ip, &src_ip, vlan_id);
2560 2561
2561 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip, 2562 skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
2562 NULL, slave_dev->dev_addr, NULL); 2563 NULL, slave_dev->dev_addr, NULL);
@@ -2588,7 +2589,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2588 __be32 addr; 2589 __be32 addr;
2589 if (!targets[i]) 2590 if (!targets[i])
2590 break; 2591 break;
2591 pr_debug("basa: target %x\n", targets[i]); 2592 pr_debug("basa: target %pI4\n", &targets[i]);
2592 if (!bond_vlan_used(bond)) { 2593 if (!bond_vlan_used(bond)) {
2593 pr_debug("basa: empty vlan: arp_send\n"); 2594 pr_debug("basa: empty vlan: arp_send\n");
2594 addr = bond_confirm_addr(bond->dev, targets[i], 0); 2595 addr = bond_confirm_addr(bond->dev, targets[i], 0);
@@ -4470,7 +4471,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
4470 4471
4471static int bond_check_params(struct bond_params *params) 4472static int bond_check_params(struct bond_params *params)
4472{ 4473{
4473 int arp_validate_value, fail_over_mac_value, primary_reselect_value; 4474 int arp_validate_value, fail_over_mac_value, primary_reselect_value, i;
4474 4475
4475 /* 4476 /*
4476 * Convert string parameters. 4477 * Convert string parameters.
@@ -4650,19 +4651,18 @@ static int bond_check_params(struct bond_params *params)
4650 arp_interval = BOND_LINK_ARP_INTERV; 4651 arp_interval = BOND_LINK_ARP_INTERV;
4651 } 4652 }
4652 4653
4653 for (arp_ip_count = 0; 4654 for (arp_ip_count = 0, i = 0;
4654 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[arp_ip_count]; 4655 (arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
4655 arp_ip_count++) {
4656 /* not complete check, but should be good enough to 4656 /* not complete check, but should be good enough to
4657 catch mistakes */ 4657 catch mistakes */
4658 __be32 ip = in_aton(arp_ip_target[arp_ip_count]); 4658 __be32 ip = in_aton(arp_ip_target[i]);
4659 if (!isdigit(arp_ip_target[arp_ip_count][0]) || 4659 if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
4660 ip == 0 || ip == htonl(INADDR_BROADCAST)) { 4660 ip == htonl(INADDR_BROADCAST)) {
4661 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n", 4661 pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
4662 arp_ip_target[arp_ip_count]); 4662 arp_ip_target[i]);
4663 arp_interval = 0; 4663 arp_interval = 0;
4664 } else { 4664 } else {
4665 arp_target[arp_ip_count] = ip; 4665 arp_target[arp_ip_count++] = ip;
4666 } 4666 }
4667 } 4667 }
4668 4668
@@ -4696,8 +4696,6 @@ static int bond_check_params(struct bond_params *params)
4696 if (miimon) { 4696 if (miimon) {
4697 pr_info("MII link monitoring set to %d ms\n", miimon); 4697 pr_info("MII link monitoring set to %d ms\n", miimon);
4698 } else if (arp_interval) { 4698 } else if (arp_interval) {
4699 int i;
4700
4701 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):", 4699 pr_info("ARP monitoring set to %d ms, validate %s, with %d target(s):",
4702 arp_interval, 4700 arp_interval,
4703 arp_validate_tbl[arp_validate_value].modename, 4701 arp_validate_tbl[arp_validate_value].modename,
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 94d06f1307b8..4060d41f0ee7 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -130,7 +130,7 @@ static void bond_info_show_master(struct seq_file *seq)
130 seq_printf(seq, "Aggregator selection policy (ad_select): %s\n", 130 seq_printf(seq, "Aggregator selection policy (ad_select): %s\n",
131 ad_select_tbl[bond->params.ad_select].modename); 131 ad_select_tbl[bond->params.ad_select].modename);
132 132
133 if (bond_3ad_get_active_agg_info(bond, &ad_info)) { 133 if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
134 seq_printf(seq, "bond %s has no active aggregator\n", 134 seq_printf(seq, "bond %s has no active aggregator\n",
135 bond->dev->name); 135 bond->dev->name);
136 } else { 136 } else {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ea7a388f4843..d7434e0a610e 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -316,6 +316,9 @@ static ssize_t bonding_store_mode(struct device *d,
316 int new_value, ret = count; 316 int new_value, ret = count;
317 struct bonding *bond = to_bond(d); 317 struct bonding *bond = to_bond(d);
318 318
319 if (!rtnl_trylock())
320 return restart_syscall();
321
319 if (bond->dev->flags & IFF_UP) { 322 if (bond->dev->flags & IFF_UP) {
320 pr_err("unable to update mode of %s because interface is up.\n", 323 pr_err("unable to update mode of %s because interface is up.\n",
321 bond->dev->name); 324 bond->dev->name);
@@ -352,6 +355,7 @@ static ssize_t bonding_store_mode(struct device *d,
352 bond->dev->name, bond_mode_tbl[new_value].modename, 355 bond->dev->name, bond_mode_tbl[new_value].modename,
353 new_value); 356 new_value);
354out: 357out:
358 rtnl_unlock();
355 return ret; 359 return ret;
356} 360}
357static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, 361static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
@@ -1315,7 +1319,6 @@ static ssize_t bonding_show_mii_status(struct device *d,
1315} 1319}
1316static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); 1320static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
1317 1321
1318
1319/* 1322/*
1320 * Show current 802.3ad aggregator ID. 1323 * Show current 802.3ad aggregator ID.
1321 */ 1324 */
@@ -1329,7 +1332,7 @@ static ssize_t bonding_show_ad_aggregator(struct device *d,
1329 if (bond->params.mode == BOND_MODE_8023AD) { 1332 if (bond->params.mode == BOND_MODE_8023AD) {
1330 struct ad_info ad_info; 1333 struct ad_info ad_info;
1331 count = sprintf(buf, "%d\n", 1334 count = sprintf(buf, "%d\n",
1332 (bond_3ad_get_active_agg_info(bond, &ad_info)) 1335 bond_3ad_get_active_agg_info(bond, &ad_info)
1333 ? 0 : ad_info.aggregator_id); 1336 ? 0 : ad_info.aggregator_id);
1334 } 1337 }
1335 1338
@@ -1351,7 +1354,7 @@ static ssize_t bonding_show_ad_num_ports(struct device *d,
1351 if (bond->params.mode == BOND_MODE_8023AD) { 1354 if (bond->params.mode == BOND_MODE_8023AD) {
1352 struct ad_info ad_info; 1355 struct ad_info ad_info;
1353 count = sprintf(buf, "%d\n", 1356 count = sprintf(buf, "%d\n",
1354 (bond_3ad_get_active_agg_info(bond, &ad_info)) 1357 bond_3ad_get_active_agg_info(bond, &ad_info)
1355 ? 0 : ad_info.ports); 1358 ? 0 : ad_info.ports);
1356 } 1359 }
1357 1360
@@ -1373,7 +1376,7 @@ static ssize_t bonding_show_ad_actor_key(struct device *d,
1373 if (bond->params.mode == BOND_MODE_8023AD) { 1376 if (bond->params.mode == BOND_MODE_8023AD) {
1374 struct ad_info ad_info; 1377 struct ad_info ad_info;
1375 count = sprintf(buf, "%d\n", 1378 count = sprintf(buf, "%d\n",
1376 (bond_3ad_get_active_agg_info(bond, &ad_info)) 1379 bond_3ad_get_active_agg_info(bond, &ad_info)
1377 ? 0 : ad_info.actor_key); 1380 ? 0 : ad_info.actor_key);
1378 } 1381 }
1379 1382
@@ -1395,7 +1398,7 @@ static ssize_t bonding_show_ad_partner_key(struct device *d,
1395 if (bond->params.mode == BOND_MODE_8023AD) { 1398 if (bond->params.mode == BOND_MODE_8023AD) {
1396 struct ad_info ad_info; 1399 struct ad_info ad_info;
1397 count = sprintf(buf, "%d\n", 1400 count = sprintf(buf, "%d\n",
1398 (bond_3ad_get_active_agg_info(bond, &ad_info)) 1401 bond_3ad_get_active_agg_info(bond, &ad_info)
1399 ? 0 : ad_info.partner_key); 1402 ? 0 : ad_info.partner_key);
1400 } 1403 }
1401 1404
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index 7ffc756131a2..547098086773 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -43,7 +43,7 @@ config CAIF_HSI
43 43
44config CAIF_VIRTIO 44config CAIF_VIRTIO
45 tristate "CAIF virtio transport driver" 45 tristate "CAIF virtio transport driver"
46 depends on CAIF 46 depends on CAIF && HAS_DMA
47 select VHOST_RING 47 select VHOST_RING
48 select VIRTIO 48 select VIRTIO
49 select GENERIC_ALLOCATOR 49 select GENERIC_ALLOCATOR
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index de570a8f8967..072c6f14e8fc 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -632,7 +632,6 @@ struct vortex_private {
632 pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ 632 pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */
633 open:1, 633 open:1,
634 medialock:1, 634 medialock:1,
635 must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */
636 large_frames:1, /* accept large frames */ 635 large_frames:1, /* accept large frames */
637 handling_irq:1; /* private in_irq indicator */ 636 handling_irq:1; /* private in_irq indicator */
638 /* {get|set}_wol operations are already serialized by rtnl. 637 /* {get|set}_wol operations are already serialized by rtnl.
@@ -1012,6 +1011,12 @@ static int vortex_init_one(struct pci_dev *pdev,
1012 if (rc < 0) 1011 if (rc < 0)
1013 goto out; 1012 goto out;
1014 1013
1014 rc = pci_request_regions(pdev, DRV_NAME);
1015 if (rc < 0) {
1016 pci_disable_device(pdev);
1017 goto out;
1018 }
1019
1015 unit = vortex_cards_found; 1020 unit = vortex_cards_found;
1016 1021
1017 if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { 1022 if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
@@ -1027,6 +1032,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1027 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ 1032 if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1028 ioaddr = pci_iomap(pdev, 0, 0); 1033 ioaddr = pci_iomap(pdev, 0, 0);
1029 if (!ioaddr) { 1034 if (!ioaddr) {
1035 pci_release_regions(pdev);
1030 pci_disable_device(pdev); 1036 pci_disable_device(pdev);
1031 rc = -ENOMEM; 1037 rc = -ENOMEM;
1032 goto out; 1038 goto out;
@@ -1036,6 +1042,7 @@ static int vortex_init_one(struct pci_dev *pdev,
1036 ent->driver_data, unit); 1042 ent->driver_data, unit);
1037 if (rc < 0) { 1043 if (rc < 0) {
1038 pci_iounmap(pdev, ioaddr); 1044 pci_iounmap(pdev, ioaddr);
1045 pci_release_regions(pdev);
1039 pci_disable_device(pdev); 1046 pci_disable_device(pdev);
1040 goto out; 1047 goto out;
1041 } 1048 }
@@ -1178,11 +1185,6 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1178 1185
1179 /* PCI-only startup logic */ 1186 /* PCI-only startup logic */
1180 if (pdev) { 1187 if (pdev) {
1181 /* EISA resources already marked, so only PCI needs to do this here */
1182 /* Ignore return value, because Cardbus drivers already allocate for us */
1183 if (request_region(dev->base_addr, vci->io_size, print_name) != NULL)
1184 vp->must_free_region = 1;
1185
1186 /* enable bus-mastering if necessary */ 1188 /* enable bus-mastering if necessary */
1187 if (vci->flags & PCI_USES_MASTER) 1189 if (vci->flags & PCI_USES_MASTER)
1188 pci_set_master(pdev); 1190 pci_set_master(pdev);
@@ -1220,7 +1222,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1220 &vp->rx_ring_dma); 1222 &vp->rx_ring_dma);
1221 retval = -ENOMEM; 1223 retval = -ENOMEM;
1222 if (!vp->rx_ring) 1224 if (!vp->rx_ring)
1223 goto free_region; 1225 goto free_device;
1224 1226
1225 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); 1227 vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1226 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; 1228 vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
@@ -1484,9 +1486,7 @@ free_ring:
1484 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 1486 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1485 vp->rx_ring, 1487 vp->rx_ring,
1486 vp->rx_ring_dma); 1488 vp->rx_ring_dma);
1487free_region: 1489free_device:
1488 if (vp->must_free_region)
1489 release_region(dev->base_addr, vci->io_size);
1490 free_netdev(dev); 1490 free_netdev(dev);
1491 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); 1491 pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval);
1492out: 1492out:
@@ -3254,8 +3254,9 @@ static void vortex_remove_one(struct pci_dev *pdev)
3254 + sizeof(struct boom_tx_desc) * TX_RING_SIZE, 3254 + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3255 vp->rx_ring, 3255 vp->rx_ring,
3256 vp->rx_ring_dma); 3256 vp->rx_ring_dma);
3257 if (vp->must_free_region) 3257
3258 release_region(dev->base_addr, vp->io_size); 3258 pci_release_regions(pdev);
3259
3259 free_netdev(dev); 3260 free_netdev(dev);
3260} 3261}
3261 3262
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index b8fbe266ab68..be59ec4b2c30 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3313,6 +3313,7 @@ static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3313 */ 3313 */
3314static void bnx2x_set_pbd_gso(struct sk_buff *skb, 3314static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3315 struct eth_tx_parse_bd_e1x *pbd, 3315 struct eth_tx_parse_bd_e1x *pbd,
3316 struct eth_tx_start_bd *tx_start_bd,
3316 u32 xmit_type) 3317 u32 xmit_type)
3317{ 3318{
3318 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size); 3319 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
@@ -3326,11 +3327,14 @@ static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3326 ip_hdr(skb)->daddr, 3327 ip_hdr(skb)->daddr,
3327 0, IPPROTO_TCP, 0)); 3328 0, IPPROTO_TCP, 0));
3328 3329
3329 } else 3330 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3331 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3332 } else {
3330 pbd->tcp_pseudo_csum = 3333 pbd->tcp_pseudo_csum =
3331 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3334 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3332 &ipv6_hdr(skb)->daddr, 3335 &ipv6_hdr(skb)->daddr,
3333 0, IPPROTO_TCP, 0)); 3336 0, IPPROTO_TCP, 0));
3337 }
3334 3338
3335 pbd->global_data |= 3339 pbd->global_data |=
3336 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN); 3340 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
@@ -3814,7 +3818,8 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3814 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data, 3818 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3815 xmit_type); 3819 xmit_type);
3816 else 3820 else
3817 bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type); 3821 bnx2x_set_pbd_gso(skb, pbd_e1x, tx_start_bd,
3822 xmit_type);
3818 } 3823 }
3819 3824
3820 /* Set the PBD's parsing_data field if not zero 3825 /* Set the PBD's parsing_data field if not zero
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 728d42ab2a76..1f2dd928888a 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
94 94
95#define DRV_MODULE_NAME "tg3" 95#define DRV_MODULE_NAME "tg3"
96#define TG3_MAJ_NUM 3 96#define TG3_MAJ_NUM 3
97#define TG3_MIN_NUM 131 97#define TG3_MIN_NUM 132
98#define DRV_MODULE_VERSION \ 98#define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100#define DRV_MODULE_RELDATE "April 09, 2013" 100#define DRV_MODULE_RELDATE "May 21, 2013"
101 101
102#define RESET_KIND_SHUTDOWN 0 102#define RESET_KIND_SHUTDOWN 0
103#define RESET_KIND_INIT 1 103#define RESET_KIND_INIT 1
@@ -2957,6 +2957,31 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2957 return 0; 2957 return 0;
2958} 2958}
2959 2959
2960static bool tg3_phy_power_bug(struct tg3 *tp)
2961{
2962 switch (tg3_asic_rev(tp)) {
2963 case ASIC_REV_5700:
2964 case ASIC_REV_5704:
2965 return true;
2966 case ASIC_REV_5780:
2967 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2968 return true;
2969 return false;
2970 case ASIC_REV_5717:
2971 if (!tp->pci_fn)
2972 return true;
2973 return false;
2974 case ASIC_REV_5719:
2975 case ASIC_REV_5720:
2976 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2977 !tp->pci_fn)
2978 return true;
2979 return false;
2980 }
2981
2982 return false;
2983}
2984
2960static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power) 2985static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2961{ 2986{
2962 u32 val; 2987 u32 val;
@@ -3016,12 +3041,7 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3016 /* The PHY should not be powered down on some chips because 3041 /* The PHY should not be powered down on some chips because
3017 * of bugs. 3042 * of bugs.
3018 */ 3043 */
3019 if (tg3_asic_rev(tp) == ASIC_REV_5700 || 3044 if (tg3_phy_power_bug(tp))
3020 tg3_asic_rev(tp) == ASIC_REV_5704 ||
3021 (tg3_asic_rev(tp) == ASIC_REV_5780 &&
3022 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)) ||
3023 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
3024 !tp->pci_fn))
3025 return; 3045 return;
3026 3046
3027 if (tg3_chip_rev(tp) == CHIPREV_5784_AX || 3047 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
@@ -7428,6 +7448,20 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7428 return (base > 0xffffdcc0) && (base + len + 8 < base); 7448 return (base > 0xffffdcc0) && (base + len + 8 < base);
7429} 7449}
7430 7450
7451/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7452 * of any 4GB boundaries: 4G, 8G, etc
7453 */
7454static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7455 u32 len, u32 mss)
7456{
7457 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7458 u32 base = (u32) mapping & 0xffffffff;
7459
7460 return ((base + len + (mss & 0x3fff)) < base);
7461 }
7462 return 0;
7463}
7464
7431/* Test for DMA addresses > 40-bit */ 7465/* Test for DMA addresses > 40-bit */
7432static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping, 7466static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7433 int len) 7467 int len)
@@ -7464,6 +7498,9 @@ static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7464 if (tg3_4g_overflow_test(map, len)) 7498 if (tg3_4g_overflow_test(map, len))
7465 hwbug = true; 7499 hwbug = true;
7466 7500
7501 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7502 hwbug = true;
7503
7467 if (tg3_40bit_overflow_test(tp, map, len)) 7504 if (tg3_40bit_overflow_test(tp, map, len))
7468 hwbug = true; 7505 hwbug = true;
7469 7506
@@ -8874,6 +8911,10 @@ static int tg3_chip_reset(struct tg3 *tp)
8874 tg3_halt_cpu(tp, RX_CPU_BASE); 8911 tg3_halt_cpu(tp, RX_CPU_BASE);
8875 } 8912 }
8876 8913
8914 err = tg3_poll_fw(tp);
8915 if (err)
8916 return err;
8917
8877 tw32(GRC_MODE, tp->grc_mode); 8918 tw32(GRC_MODE, tp->grc_mode);
8878 8919
8879 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) { 8920 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
@@ -8904,10 +8945,6 @@ static int tg3_chip_reset(struct tg3 *tp)
8904 8945
8905 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 8946 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
8906 8947
8907 err = tg3_poll_fw(tp);
8908 if (err)
8909 return err;
8910
8911 tg3_mdio_start(tp); 8948 tg3_mdio_start(tp);
8912 8949
8913 if (tg3_flag(tp, PCI_EXPRESS) && 8950 if (tg3_flag(tp, PCI_EXPRESS) &&
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index ce4a030d3d0c..07f7ef05c3f2 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3236,9 +3236,10 @@ bnad_init(struct bnad *bnad,
3236 3236
3237 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id); 3237 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3238 bnad->work_q = create_singlethread_workqueue(bnad->wq_name); 3238 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3239 3239 if (!bnad->work_q) {
3240 if (!bnad->work_q) 3240 iounmap(bnad->bar0);
3241 return -ENOMEM; 3241 return -ENOMEM;
3242 }
3242 3243
3243 return 0; 3244 return 0;
3244} 3245}
diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig
index 1194446f859a..768285ec10f4 100644
--- a/drivers/net/ethernet/cadence/Kconfig
+++ b/drivers/net/ethernet/cadence/Kconfig
@@ -22,7 +22,7 @@ if NET_CADENCE
22 22
23config ARM_AT91_ETHER 23config ARM_AT91_ETHER
24 tristate "AT91RM9200 Ethernet support" 24 tristate "AT91RM9200 Ethernet support"
25 depends on GENERIC_HARDIRQS 25 depends on GENERIC_HARDIRQS && HAS_DMA
26 select NET_CORE 26 select NET_CORE
27 select MACB 27 select MACB
28 ---help--- 28 ---help---
@@ -31,6 +31,7 @@ config ARM_AT91_ETHER
31 31
32config MACB 32config MACB
33 tristate "Cadence MACB/GEM support" 33 tristate "Cadence MACB/GEM support"
34 depends on HAS_DMA
34 select PHYLIB 35 select PHYLIB
35 ---help--- 36 ---help---
36 The Cadence MACB ethernet interface is found on many Atmel AT32 and 37 The Cadence MACB ethernet interface is found on many Atmel AT32 and
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 6be513deb17f..c89aa41dd448 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -485,7 +485,8 @@ static void macb_tx_interrupt(struct macb *bp)
485 status = macb_readl(bp, TSR); 485 status = macb_readl(bp, TSR);
486 macb_writel(bp, TSR, status); 486 macb_writel(bp, TSR, status);
487 487
488 macb_writel(bp, ISR, MACB_BIT(TCOMP)); 488 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
489 macb_writel(bp, ISR, MACB_BIT(TCOMP));
489 490
490 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n", 491 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
491 (unsigned long)status); 492 (unsigned long)status);
@@ -738,7 +739,8 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
738 * now. 739 * now.
739 */ 740 */
740 macb_writel(bp, IDR, MACB_RX_INT_FLAGS); 741 macb_writel(bp, IDR, MACB_RX_INT_FLAGS);
741 macb_writel(bp, ISR, MACB_BIT(RCOMP)); 742 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
743 macb_writel(bp, ISR, MACB_BIT(RCOMP));
742 744
743 if (napi_schedule_prep(&bp->napi)) { 745 if (napi_schedule_prep(&bp->napi)) {
744 netdev_vdbg(bp->dev, "scheduling RX softirq\n"); 746 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
@@ -1062,6 +1064,17 @@ static void macb_configure_dma(struct macb *bp)
1062 } 1064 }
1063} 1065}
1064 1066
1067/*
1068 * Configure peripheral capacities according to integration options used
1069 */
1070static void macb_configure_caps(struct macb *bp)
1071{
1072 if (macb_is_gem(bp)) {
1073 if (GEM_BF(IRQCOR, gem_readl(bp, DCFG1)) == 0)
1074 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
1075 }
1076}
1077
1065static void macb_init_hw(struct macb *bp) 1078static void macb_init_hw(struct macb *bp)
1066{ 1079{
1067 u32 config; 1080 u32 config;
@@ -1084,6 +1097,7 @@ static void macb_init_hw(struct macb *bp)
1084 bp->duplex = DUPLEX_HALF; 1097 bp->duplex = DUPLEX_HALF;
1085 1098
1086 macb_configure_dma(bp); 1099 macb_configure_dma(bp);
1100 macb_configure_caps(bp);
1087 1101
1088 /* Initialize TX and RX buffers */ 1102 /* Initialize TX and RX buffers */
1089 macb_writel(bp, RBQP, bp->rx_ring_dma); 1103 macb_writel(bp, RBQP, bp->rx_ring_dma);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 993d70380688..548c0ecae869 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -300,6 +300,8 @@
300#define MACB_REV_SIZE 16 300#define MACB_REV_SIZE 16
301 301
302/* Bitfields in DCFG1. */ 302/* Bitfields in DCFG1. */
303#define GEM_IRQCOR_OFFSET 23
304#define GEM_IRQCOR_SIZE 1
303#define GEM_DBWDEF_OFFSET 25 305#define GEM_DBWDEF_OFFSET 25
304#define GEM_DBWDEF_SIZE 3 306#define GEM_DBWDEF_SIZE 3
305 307
@@ -323,6 +325,9 @@
323#define MACB_MAN_READ 2 325#define MACB_MAN_READ 2
324#define MACB_MAN_CODE 2 326#define MACB_MAN_CODE 2
325 327
328/* Capability mask bits */
329#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x1
330
326/* Bit manipulation macros */ 331/* Bit manipulation macros */
327#define MACB_BIT(name) \ 332#define MACB_BIT(name) \
328 (1 << MACB_##name##_OFFSET) 333 (1 << MACB_##name##_OFFSET)
@@ -574,6 +579,8 @@ struct macb {
574 unsigned int speed; 579 unsigned int speed;
575 unsigned int duplex; 580 unsigned int duplex;
576 581
582 u32 caps;
583
577 phy_interface_t phy_interface; 584 phy_interface_t phy_interface;
578 585
579 /* AT91RM9200 transmit */ 586 /* AT91RM9200 transmit */
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index aba435c3d4ae..184a063bed5f 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,6 @@
1config NET_CALXEDA_XGMAC 1config NET_CALXEDA_XGMAC
2 tristate "Calxeda 1G/10G XGMAC Ethernet driver" 2 tristate "Calxeda 1G/10G XGMAC Ethernet driver"
3 depends on HAS_IOMEM 3 depends on HAS_IOMEM && HAS_DMA
4 select CRC32 4 select CRC32
5 help 5 help
6 This is the driver for the XGMAC Ethernet IP block found on Calxeda 6 This is the driver for the XGMAC Ethernet IP block found on Calxeda
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index fd7b547698ab..a236ecd27cf3 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -2976,22 +2976,17 @@ static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2976 for (i = 0; i < desc_count; i++) { 2976 for (i = 0; i < desc_count; i++) {
2977 desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE; 2977 desc->desc_len = desc->desc_len ? : RESOURCE_DESC_SIZE;
2978 if (((void *)desc + desc->desc_len) > 2978 if (((void *)desc + desc->desc_len) >
2979 (void *)(buf + max_buf_size)) { 2979 (void *)(buf + max_buf_size))
2980 desc = NULL; 2980 return NULL;
2981 break;
2982 }
2983 2981
2984 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 2982 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
2985 desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1) 2983 desc->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
2986 break; 2984 return desc;
2987 2985
2988 desc = (void *)desc + desc->desc_len; 2986 desc = (void *)desc + desc->desc_len;
2989 } 2987 }
2990 2988
2991 if (!desc || i == MAX_RESOURCE_DESC) 2989 return NULL;
2992 return NULL;
2993
2994 return desc;
2995} 2990}
2996 2991
2997/* Uses Mbox */ 2992/* Uses Mbox */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index a444110b060f..ca2967b0f18b 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -780,26 +780,18 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
780 if (unlikely(!skb)) 780 if (unlikely(!skb))
781 return skb; 781 return skb;
782 782
783 if (vlan_tx_tag_present(skb)) { 783 if (vlan_tx_tag_present(skb))
784 vlan_tag = be_get_tx_vlan_tag(adapter, skb); 784 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 785 else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786 if (skb) 786 vlan_tag = adapter->pvid;
787 skb->vlan_tci = 0;
788 }
789
790 if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
791 if (!vlan_tag)
792 vlan_tag = adapter->pvid;
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
795 }
796 787
797 if (vlan_tag) { 788 if (vlan_tag) {
798 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 789 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
799 if (unlikely(!skb)) 790 if (unlikely(!skb))
800 return skb; 791 return skb;
801
802 skb->vlan_tci = 0; 792 skb->vlan_tci = 0;
793 if (skip_hw_vlan)
794 *skip_hw_vlan = true;
803 } 795 }
804 796
805 /* Insert the outer VLAN, if any */ 797 /* Insert the outer VLAN, if any */
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index aff0310a778b..85a06037b242 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -87,6 +87,8 @@
87#define FEC_QUIRK_HAS_GBIT (1 << 3) 87#define FEC_QUIRK_HAS_GBIT (1 << 3)
88/* Controller has extend desc buffer */ 88/* Controller has extend desc buffer */
89#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4) 89#define FEC_QUIRK_HAS_BUFDESC_EX (1 << 4)
90/* Controller has hardware checksum support */
91#define FEC_QUIRK_HAS_CSUM (1 << 5)
90 92
91static struct platform_device_id fec_devtype[] = { 93static struct platform_device_id fec_devtype[] = {
92 { 94 {
@@ -105,9 +107,9 @@ static struct platform_device_id fec_devtype[] = {
105 }, { 107 }, {
106 .name = "imx6q-fec", 108 .name = "imx6q-fec",
107 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 109 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
108 FEC_QUIRK_HAS_BUFDESC_EX, 110 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM,
109 }, { 111 }, {
110 .name = "mvf-fec", 112 .name = "mvf600-fec",
111 .driver_data = FEC_QUIRK_ENET_MAC, 113 .driver_data = FEC_QUIRK_ENET_MAC,
112 }, { 114 }, {
113 /* sentinel */ 115 /* sentinel */
@@ -120,7 +122,7 @@ enum imx_fec_type {
120 IMX27_FEC, /* runs on i.mx27/35/51 */ 122 IMX27_FEC, /* runs on i.mx27/35/51 */
121 IMX28_FEC, 123 IMX28_FEC,
122 IMX6Q_FEC, 124 IMX6Q_FEC,
123 MVF_FEC, 125 MVF600_FEC,
124}; 126};
125 127
126static const struct of_device_id fec_dt_ids[] = { 128static const struct of_device_id fec_dt_ids[] = {
@@ -128,7 +130,7 @@ static const struct of_device_id fec_dt_ids[] = {
128 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], }, 130 { .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
129 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], }, 131 { .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
130 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], }, 132 { .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
131 { .compatible = "fsl,mvf-fec", .data = &fec_devtype[MVF_FEC], }, 133 { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
132 { /* sentinel */ } 134 { /* sentinel */ }
133}; 135};
134MODULE_DEVICE_TABLE(of, fec_dt_ids); 136MODULE_DEVICE_TABLE(of, fec_dt_ids);
@@ -449,7 +451,7 @@ fec_restart(struct net_device *ndev, int duplex)
449 netif_device_detach(ndev); 451 netif_device_detach(ndev);
450 napi_disable(&fep->napi); 452 napi_disable(&fep->napi);
451 netif_stop_queue(ndev); 453 netif_stop_queue(ndev);
452 netif_tx_lock(ndev); 454 netif_tx_lock_bh(ndev);
453 } 455 }
454 456
455 /* Whack a reset. We should wait for this. */ 457 /* Whack a reset. We should wait for this. */
@@ -614,10 +616,10 @@ fec_restart(struct net_device *ndev, int duplex)
614 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); 616 writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
615 617
616 if (netif_running(ndev)) { 618 if (netif_running(ndev)) {
617 netif_device_attach(ndev); 619 netif_tx_unlock_bh(ndev);
618 napi_enable(&fep->napi);
619 netif_wake_queue(ndev); 620 netif_wake_queue(ndev);
620 netif_tx_unlock(ndev); 621 napi_enable(&fep->napi);
622 netif_device_attach(ndev);
621 } 623 }
622} 624}
623 625
@@ -1744,6 +1746,8 @@ static const struct net_device_ops fec_netdev_ops = {
1744static int fec_enet_init(struct net_device *ndev) 1746static int fec_enet_init(struct net_device *ndev)
1745{ 1747{
1746 struct fec_enet_private *fep = netdev_priv(ndev); 1748 struct fec_enet_private *fep = netdev_priv(ndev);
1749 const struct platform_device_id *id_entry =
1750 platform_get_device_id(fep->pdev);
1747 struct bufdesc *cbd_base; 1751 struct bufdesc *cbd_base;
1748 1752
1749 /* Allocate memory for buffer descriptors. */ 1753 /* Allocate memory for buffer descriptors. */
@@ -1775,12 +1779,14 @@ static int fec_enet_init(struct net_device *ndev)
1775 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); 1779 writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
1776 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT); 1780 netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
1777 1781
1778 /* enable hw accelerator */ 1782 if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
1779 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 1783 /* enable hw accelerator */
1780 | NETIF_F_RXCSUM); 1784 ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1781 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM 1785 | NETIF_F_RXCSUM);
1782 | NETIF_F_RXCSUM); 1786 ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
1783 fep->csum_flags |= FLAG_RX_CSUM_ENABLED; 1787 | NETIF_F_RXCSUM);
1788 fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
1789 }
1784 1790
1785 fec_restart(ndev, 0); 1791 fec_restart(ndev, 0);
1786 1792
diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c
index 576e4b858fce..083ea2b4d20a 100644
--- a/drivers/net/ethernet/freescale/gianfar_ptp.c
+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c
@@ -524,6 +524,7 @@ static int gianfar_ptp_probe(struct platform_device *dev)
524 return 0; 524 return 0;
525 525
526no_clock: 526no_clock:
527 iounmap(etsects->regs);
527no_ioremap: 528no_ioremap:
528 release_resource(etsects->rsrc); 529 release_resource(etsects->rsrc);
529no_resource: 530no_resource:
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 4989481c19f0..d300a0c0eafc 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -359,10 +359,26 @@ static int emac_reset(struct emac_instance *dev)
359 } 359 }
360 360
361#ifdef CONFIG_PPC_DCR_NATIVE 361#ifdef CONFIG_PPC_DCR_NATIVE
362 /* Enable internal clock source */ 362 /*
363 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) 363 * PPC460EX/GT Embedded Processor Advanced User's Manual
364 dcri_clrset(SDR0, SDR0_ETH_CFG, 364 * section 28.10.1 Mode Register 0 (EMACx_MR0) states:
365 0, SDR0_ETH_CFG_ECS << dev->cell_index); 365 * Note: The PHY must provide a TX Clk in order to perform a soft reset
366 * of the EMAC. If none is present, select the internal clock
367 * (SDR0_ETH_CFG[EMACx_PHY_CLK] = 1).
368 * After a soft reset, select the external clock.
369 */
370 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
371 if (dev->phy_address == 0xffffffff &&
372 dev->phy_map == 0xffffffff) {
373 /* No PHY: select internal loop clock before reset */
374 dcri_clrset(SDR0, SDR0_ETH_CFG,
375 0, SDR0_ETH_CFG_ECS << dev->cell_index);
376 } else {
377 /* PHY present: select external clock before reset */
378 dcri_clrset(SDR0, SDR0_ETH_CFG,
379 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
380 }
381 }
366#endif 382#endif
367 383
368 out_be32(&p->mr0, EMAC_MR0_SRST); 384 out_be32(&p->mr0, EMAC_MR0_SRST);
@@ -370,10 +386,14 @@ static int emac_reset(struct emac_instance *dev)
370 --n; 386 --n;
371 387
372#ifdef CONFIG_PPC_DCR_NATIVE 388#ifdef CONFIG_PPC_DCR_NATIVE
373 /* Enable external clock source */ 389 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) {
374 if (emac_has_feature(dev, EMAC_FTR_460EX_PHY_CLK_FIX)) 390 if (dev->phy_address == 0xffffffff &&
375 dcri_clrset(SDR0, SDR0_ETH_CFG, 391 dev->phy_map == 0xffffffff) {
376 SDR0_ETH_CFG_ECS << dev->cell_index, 0); 392 /* No PHY: restore external clock source after reset */
393 dcri_clrset(SDR0, SDR0_ETH_CFG,
394 SDR0_ETH_CFG_ECS << dev->cell_index, 0);
395 }
396 }
377#endif 397#endif
378 398
379 if (n) { 399 if (n) {
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
index 6ce027355fcf..abb300a31912 100644
--- a/drivers/net/ethernet/icplus/ipg.h
+++ b/drivers/net/ethernet/icplus/ipg.h
@@ -195,57 +195,57 @@ enum ipg_regs {
195/* TFD data structure masks. */ 195/* TFD data structure masks. */
196 196
197/* TFDList, TFC */ 197/* TFDList, TFC */
198#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF 198#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFFULL
199#define IPG_TFC_FRAMEID 0x000000000000FFFF 199#define IPG_TFC_FRAMEID 0x000000000000FFFFULL
200#define IPG_TFC_WORDALIGN 0x0000000000030000 200#define IPG_TFC_WORDALIGN 0x0000000000030000ULL
201#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 201#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000ULL
202#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 202#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000ULL
203#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 203#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000ULL
204#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 204#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000ULL
205#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 205#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000ULL
206#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 206#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000ULL
207#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 207#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000ULL
208#define IPG_TFC_TXINDICATE 0x0000000000400000 208#define IPG_TFC_TXINDICATE 0x0000000000400000ULL
209#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 209#define IPG_TFC_TXDMAINDICATE 0x0000000000800000ULL
210#define IPG_TFC_FRAGCOUNT 0x000000000F000000 210#define IPG_TFC_FRAGCOUNT 0x000000000F000000ULL
211#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 211#define IPG_TFC_VLANTAGINSERT 0x0000000010000000ULL
212#define IPG_TFC_TFDDONE 0x0000000080000000 212#define IPG_TFC_TFDDONE 0x0000000080000000ULL
213#define IPG_TFC_VID 0x00000FFF00000000 213#define IPG_TFC_VID 0x00000FFF00000000ULL
214#define IPG_TFC_CFI 0x0000100000000000 214#define IPG_TFC_CFI 0x0000100000000000ULL
215#define IPG_TFC_USERPRIORITY 0x0000E00000000000 215#define IPG_TFC_USERPRIORITY 0x0000E00000000000ULL
216 216
217/* TFDList, FragInfo */ 217/* TFDList, FragInfo */
218#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF 218#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
219#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF 219#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFFULL
220#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL 220#define IPG_TFI_FRAGLEN 0xFFFF000000000000ULL
221 221
222/* RFD data structure masks. */ 222/* RFD data structure masks. */
223 223
224/* RFDList, RFS */ 224/* RFDList, RFS */
225#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF 225#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFFULL
226#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF 226#define IPG_RFS_RXFRAMELEN 0x000000000000FFFFULL
227#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 227#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000ULL
228#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 228#define IPG_RFS_RXRUNTFRAME 0x0000000000020000ULL
229#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 229#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000ULL
230#define IPG_RFS_RXFCSERROR 0x0000000000080000 230#define IPG_RFS_RXFCSERROR 0x0000000000080000ULL
231#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 231#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000ULL
232#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 232#define IPG_RFS_RXLENGTHERROR 0x0000000000200000ULL
233#define IPG_RFS_VLANDETECTED 0x0000000000400000 233#define IPG_RFS_VLANDETECTED 0x0000000000400000ULL
234#define IPG_RFS_TCPDETECTED 0x0000000000800000 234#define IPG_RFS_TCPDETECTED 0x0000000000800000ULL
235#define IPG_RFS_TCPERROR 0x0000000001000000 235#define IPG_RFS_TCPERROR 0x0000000001000000ULL
236#define IPG_RFS_UDPDETECTED 0x0000000002000000 236#define IPG_RFS_UDPDETECTED 0x0000000002000000ULL
237#define IPG_RFS_UDPERROR 0x0000000004000000 237#define IPG_RFS_UDPERROR 0x0000000004000000ULL
238#define IPG_RFS_IPDETECTED 0x0000000008000000 238#define IPG_RFS_IPDETECTED 0x0000000008000000ULL
239#define IPG_RFS_IPERROR 0x0000000010000000 239#define IPG_RFS_IPERROR 0x0000000010000000ULL
240#define IPG_RFS_FRAMESTART 0x0000000020000000 240#define IPG_RFS_FRAMESTART 0x0000000020000000ULL
241#define IPG_RFS_FRAMEEND 0x0000000040000000 241#define IPG_RFS_FRAMEEND 0x0000000040000000ULL
242#define IPG_RFS_RFDDONE 0x0000000080000000 242#define IPG_RFS_RFDDONE 0x0000000080000000ULL
243#define IPG_RFS_TCI 0x0000FFFF00000000 243#define IPG_RFS_TCI 0x0000FFFF00000000ULL
244 244
245/* RFDList, FragInfo */ 245/* RFDList, FragInfo */
246#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF 246#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFFULL
247#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF 247#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFFULL
248#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL 248#define IPG_RFI_FRAGLEN 0xFFFF000000000000ULL
249 249
250/* I/O Register masks. */ 250/* I/O Register masks. */
251 251
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index d0afeea181fb..2ad1494efbb3 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -867,7 +867,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
867 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index); 867 struct netdev_queue *nq = netdev_get_tx_queue(mp->dev, txq->index);
868 int reclaimed; 868 int reclaimed;
869 869
870 __netif_tx_lock(nq, smp_processor_id()); 870 __netif_tx_lock_bh(nq);
871 871
872 reclaimed = 0; 872 reclaimed = 0;
873 while (reclaimed < budget && txq->tx_desc_count > 0) { 873 while (reclaimed < budget && txq->tx_desc_count > 0) {
@@ -913,7 +913,7 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
913 dev_kfree_skb(skb); 913 dev_kfree_skb(skb);
914 } 914 }
915 915
916 __netif_tx_unlock(nq); 916 __netif_tx_unlock_bh(nq);
917 917
918 if (reclaimed < budget) 918 if (reclaimed < budget)
919 mp->work_tx &= ~(1 << txq->index); 919 mp->work_tx &= ~(1 << txq->index);
@@ -2745,7 +2745,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2745 2745
2746 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task); 2746 INIT_WORK(&mp->tx_timeout_task, tx_timeout_task);
2747 2747
2748 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, 128); 2748 netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
2749 2749
2750 init_timer(&mp->rx_oom); 2750 init_timer(&mp->rx_oom);
2751 mp->rx_oom.data = (unsigned long)mp; 2751 mp->rx_oom.data = (unsigned long)mp;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 91f2b2c43c12..d3f508697a3d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -60,7 +60,7 @@ void mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride,
60 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6; 60 context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
61 if (user_prio >= 0) { 61 if (user_prio >= 0) {
62 context->pri_path.sched_queue |= user_prio << 3; 62 context->pri_path.sched_queue |= user_prio << 3;
63 context->pri_path.feup = 1 << 6; 63 context->pri_path.feup = MLX4_FEUP_FORCE_ETH_UP;
64 } 64 }
65 context->pri_path.counter_index = 0xff; 65 context->pri_path.counter_index = 0xff;
66 context->cqn_send = cpu_to_be32(cqn); 66 context->cqn_send = cpu_to_be32(cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index b147bdd40768..58a8e535d698 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -131,7 +131,9 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
131 [2] = "RSS XOR Hash Function support", 131 [2] = "RSS XOR Hash Function support",
132 [3] = "Device manage flow steering support", 132 [3] = "Device manage flow steering support",
133 [4] = "Automatic MAC reassignment support", 133 [4] = "Automatic MAC reassignment support",
134 [5] = "Time stamping support" 134 [5] = "Time stamping support",
135 [6] = "VST (control vlan insertion/stripping) support",
136 [7] = "FSM (MAC anti-spoofing) support"
135 }; 137 };
136 int i; 138 int i;
137 139
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index e12e0d2e0ee0..1157f028a90f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -372,24 +372,29 @@ static int update_vport_qp_param(struct mlx4_dev *dev,
372 if (MLX4_QP_ST_RC == qp_type) 372 if (MLX4_QP_ST_RC == qp_type)
373 return -EINVAL; 373 return -EINVAL;
374 374
375 /* force strip vlan by clear vsd */
376 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
377 if (0 != vp_oper->state.default_vlan) {
378 qpc->pri_path.vlan_control =
379 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
380 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
381 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
382 } else { /* priority tagged */
383 qpc->pri_path.vlan_control =
384 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
385 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
386 }
387
388 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
375 qpc->pri_path.vlan_index = vp_oper->vlan_idx; 389 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
376 qpc->pri_path.fl = (1 << 6) | (1 << 2); /* set cv bit and hide_cqe_vlan bit*/ 390 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
377 qpc->pri_path.feup |= 1 << 3; /* set fvl bit */ 391 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
378 qpc->pri_path.sched_queue &= 0xC7; 392 qpc->pri_path.sched_queue &= 0xC7;
379 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3; 393 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
380 mlx4_dbg(dev, "qp %d port %d Q 0x%x set vlan to %d vidx %d feup %x fl %x\n",
381 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
382 (int)(qpc->pri_path.sched_queue), vp_oper->state.default_vlan,
383 vp_oper->vlan_idx, (int)(qpc->pri_path.feup),
384 (int)(qpc->pri_path.fl));
385 } 394 }
386 if (vp_oper->state.spoofchk) { 395 if (vp_oper->state.spoofchk) {
387 qpc->pri_path.feup |= 1 << 5; /* set fsm bit */; 396 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
388 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; 397 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
389 mlx4_dbg(dev, "spoof qp %d port %d feup 0x%x, myLmc 0x%x mindx %d\n",
390 be32_to_cpu(qpc->local_qpn) & 0xffffff, port,
391 (int)qpc->pri_path.feup, (int)qpc->pri_path.grh_mylmc,
392 vp_oper->mac_idx);
393 } 398 }
394 return 0; 399 return 0;
395} 400}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 90c253b145ef..c1b693cb3df3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -429,6 +429,7 @@ struct qlcnic_hardware_context {
429 429
430 u16 port_type; 430 u16 port_type;
431 u16 board_type; 431 u16 board_type;
432 u16 supported_type;
432 433
433 u16 link_speed; 434 u16 link_speed;
434 u16 link_duplex; 435 u16 link_duplex;
@@ -906,8 +907,11 @@ struct qlcnic_ipaddr {
906#define QLCNIC_FW_HANG 0x4000 907#define QLCNIC_FW_HANG 0x4000
907#define QLCNIC_FW_LRO_MSS_CAP 0x8000 908#define QLCNIC_FW_LRO_MSS_CAP 0x8000
908#define QLCNIC_TX_INTR_SHARED 0x10000 909#define QLCNIC_TX_INTR_SHARED 0x10000
910#define QLCNIC_APP_CHANGED_FLAGS 0x20000
909#define QLCNIC_IS_MSI_FAMILY(adapter) \ 911#define QLCNIC_IS_MSI_FAMILY(adapter) \
910 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 912 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
913#define QLCNIC_IS_TSO_CAPABLE(adapter) \
914 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
911 915
912#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4 916#define QLCNIC_DEF_NUM_STS_DESC_RINGS 4
913#define QLCNIC_MSIX_TBL_SPACE 8192 917#define QLCNIC_MSIX_TBL_SPACE 8192
@@ -1033,6 +1037,7 @@ struct qlcnic_adapter {
1033 spinlock_t rx_mac_learn_lock; 1037 spinlock_t rx_mac_learn_lock;
1034 u32 file_prd_off; /*File fw product offset*/ 1038 u32 file_prd_off; /*File fw product offset*/
1035 u32 fw_version; 1039 u32 fw_version;
1040 u32 offload_flags;
1036 const struct firmware *fw; 1041 const struct firmware *fw;
1037}; 1042};
1038 1043
@@ -1514,6 +1519,7 @@ void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
1514void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter); 1519void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
1515void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter); 1520void qlcnic_82xx_add_sysfs(struct qlcnic_adapter *adapter);
1516void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter); 1521void qlcnic_82xx_remove_sysfs(struct qlcnic_adapter *adapter);
1522int qlcnic_82xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
1517 1523
1518int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32); 1524int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *, u32);
1519int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32); 1525int qlcnicvf_config_led(struct qlcnic_adapter *, u32, u32);
@@ -1540,6 +1546,8 @@ void qlcnic_add_lb_filter(struct qlcnic_adapter *, struct sk_buff *, int, u16);
1540int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter); 1546int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter);
1541int qlcnic_read_mac_addr(struct qlcnic_adapter *); 1547int qlcnic_read_mac_addr(struct qlcnic_adapter *);
1542int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int); 1548int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
1549void qlcnic_set_netdev_features(struct qlcnic_adapter *,
1550 struct qlcnic_esw_func_cfg *);
1543void qlcnic_sriov_vf_schedule_multi(struct net_device *); 1551void qlcnic_sriov_vf_schedule_multi(struct net_device *);
1544void qlcnic_vf_add_mc_list(struct net_device *, u16); 1552void qlcnic_vf_add_mc_list(struct net_device *, u16);
1545 1553
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index ea790a93ee7c..b4ff1e35a11d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -696,15 +696,14 @@ u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *adapter)
696 return 1; 696 return 1;
697} 697}
698 698
699u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter) 699u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *adapter, u32 *wait_time)
700{ 700{
701 u32 data; 701 u32 data;
702 unsigned long wait_time = 0;
703 struct qlcnic_hardware_context *ahw = adapter->ahw; 702 struct qlcnic_hardware_context *ahw = adapter->ahw;
704 /* wait for mailbox completion */ 703 /* wait for mailbox completion */
705 do { 704 do {
706 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL); 705 data = QLCRDX(ahw, QLCNIC_FW_MBX_CTRL);
707 if (++wait_time > QLCNIC_MBX_TIMEOUT) { 706 if (++(*wait_time) > QLCNIC_MBX_TIMEOUT) {
708 data = QLCNIC_RCODE_TIMEOUT; 707 data = QLCNIC_RCODE_TIMEOUT;
709 break; 708 break;
710 } 709 }
@@ -720,8 +719,8 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
720 u16 opcode; 719 u16 opcode;
721 u8 mbx_err_code; 720 u8 mbx_err_code;
722 unsigned long flags; 721 unsigned long flags;
723 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd;
724 struct qlcnic_hardware_context *ahw = adapter->ahw; 722 struct qlcnic_hardware_context *ahw = adapter->ahw;
723 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, wait_time = 0;
725 724
726 opcode = LSW(cmd->req.arg[0]); 725 opcode = LSW(cmd->req.arg[0]);
727 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) { 726 if (!test_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status)) {
@@ -754,15 +753,13 @@ int qlcnic_83xx_mbx_op(struct qlcnic_adapter *adapter,
754 /* Signal FW about the impending command */ 753 /* Signal FW about the impending command */
755 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER); 754 QLCWRX(ahw, QLCNIC_HOST_MBX_CTRL, QLCNIC_SET_OWNER);
756poll: 755poll:
757 rsp = qlcnic_83xx_mbx_poll(adapter); 756 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
758 if (rsp != QLCNIC_RCODE_TIMEOUT) { 757 if (rsp != QLCNIC_RCODE_TIMEOUT) {
759 /* Get the FW response data */ 758 /* Get the FW response data */
760 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 759 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
761 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 760 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
762 __qlcnic_83xx_process_aen(adapter); 761 __qlcnic_83xx_process_aen(adapter);
763 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 762 goto poll;
764 if (mbx_val)
765 goto poll;
766 } 763 }
767 mbx_err_code = QLCNIC_MBX_STATUS(fw_data); 764 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
768 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); 765 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -1276,11 +1273,13 @@ out:
1276 return err; 1273 return err;
1277} 1274}
1278 1275
1279static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test) 1276static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
1277 int num_sds_ring)
1280{ 1278{
1281 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1279 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1282 struct qlcnic_host_sds_ring *sds_ring; 1280 struct qlcnic_host_sds_ring *sds_ring;
1283 struct qlcnic_host_rds_ring *rds_ring; 1281 struct qlcnic_host_rds_ring *rds_ring;
1282 u16 adapter_state = adapter->is_up;
1284 u8 ring; 1283 u8 ring;
1285 int ret; 1284 int ret;
1286 1285
@@ -1304,6 +1303,10 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test)
1304 ret = qlcnic_fw_create_ctx(adapter); 1303 ret = qlcnic_fw_create_ctx(adapter);
1305 if (ret) { 1304 if (ret) {
1306 qlcnic_detach(adapter); 1305 qlcnic_detach(adapter);
1306 if (adapter_state == QLCNIC_ADAPTER_UP_MAGIC) {
1307 adapter->max_sds_rings = num_sds_ring;
1308 qlcnic_attach(adapter);
1309 }
1307 netif_device_attach(netdev); 1310 netif_device_attach(netdev);
1308 return ret; 1311 return ret;
1309 } 1312 }
@@ -1596,7 +1599,8 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
1596 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 1599 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1597 return -EBUSY; 1600 return -EBUSY;
1598 1601
1599 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST); 1602 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST,
1603 max_sds_rings);
1600 if (ret) 1604 if (ret)
1601 goto fail_diag_alloc; 1605 goto fail_diag_alloc;
1602 1606
@@ -2830,6 +2834,23 @@ int qlcnic_83xx_test_link(struct qlcnic_adapter *adapter)
2830 break; 2834 break;
2831 } 2835 }
2832 config = cmd.rsp.arg[3]; 2836 config = cmd.rsp.arg[3];
2837 if (QLC_83XX_SFP_PRESENT(config)) {
2838 switch (ahw->module_type) {
2839 case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
2840 case LINKEVENT_MODULE_OPTICAL_SRLR:
2841 case LINKEVENT_MODULE_OPTICAL_LRM:
2842 case LINKEVENT_MODULE_OPTICAL_SFP_1G:
2843 ahw->supported_type = PORT_FIBRE;
2844 break;
2845 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
2846 case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
2847 case LINKEVENT_MODULE_TWINAX:
2848 ahw->supported_type = PORT_TP;
2849 break;
2850 default:
2851 ahw->supported_type = PORT_OTHER;
2852 }
2853 }
2833 if (config & 1) 2854 if (config & 1)
2834 err = 1; 2855 err = 1;
2835 } 2856 }
@@ -2838,7 +2859,8 @@ out:
2838 return config; 2859 return config;
2839} 2860}
2840 2861
2841int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter) 2862int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter,
2863 struct ethtool_cmd *ecmd)
2842{ 2864{
2843 u32 config = 0; 2865 u32 config = 0;
2844 int status = 0; 2866 int status = 0;
@@ -2851,6 +2873,54 @@ int qlcnic_83xx_get_settings(struct qlcnic_adapter *adapter)
2851 ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config); 2873 ahw->module_type = QLC_83XX_SFP_MODULE_TYPE(config);
2852 /* hard code until there is a way to get it from flash */ 2874 /* hard code until there is a way to get it from flash */
2853 ahw->board_type = QLCNIC_BRDTYPE_83XX_10G; 2875 ahw->board_type = QLCNIC_BRDTYPE_83XX_10G;
2876
2877 if (netif_running(adapter->netdev) && ahw->has_link_events) {
2878 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
2879 ecmd->duplex = ahw->link_duplex;
2880 ecmd->autoneg = ahw->link_autoneg;
2881 } else {
2882 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
2883 ecmd->duplex = DUPLEX_UNKNOWN;
2884 ecmd->autoneg = AUTONEG_DISABLE;
2885 }
2886
2887 if (ahw->port_type == QLCNIC_XGBE) {
2888 ecmd->supported = SUPPORTED_1000baseT_Full;
2889 ecmd->advertising = ADVERTISED_1000baseT_Full;
2890 } else {
2891 ecmd->supported = (SUPPORTED_10baseT_Half |
2892 SUPPORTED_10baseT_Full |
2893 SUPPORTED_100baseT_Half |
2894 SUPPORTED_100baseT_Full |
2895 SUPPORTED_1000baseT_Half |
2896 SUPPORTED_1000baseT_Full);
2897 ecmd->advertising = (ADVERTISED_100baseT_Half |
2898 ADVERTISED_100baseT_Full |
2899 ADVERTISED_1000baseT_Half |
2900 ADVERTISED_1000baseT_Full);
2901 }
2902
2903 switch (ahw->supported_type) {
2904 case PORT_FIBRE:
2905 ecmd->supported |= SUPPORTED_FIBRE;
2906 ecmd->advertising |= ADVERTISED_FIBRE;
2907 ecmd->port = PORT_FIBRE;
2908 ecmd->transceiver = XCVR_EXTERNAL;
2909 break;
2910 case PORT_TP:
2911 ecmd->supported |= SUPPORTED_TP;
2912 ecmd->advertising |= ADVERTISED_TP;
2913 ecmd->port = PORT_TP;
2914 ecmd->transceiver = XCVR_INTERNAL;
2915 break;
2916 default:
2917 ecmd->supported |= SUPPORTED_FIBRE;
2918 ecmd->advertising |= ADVERTISED_FIBRE;
2919 ecmd->port = PORT_OTHER;
2920 ecmd->transceiver = XCVR_EXTERNAL;
2921 break;
2922 }
2923 ecmd->phy_address = ahw->physical_port;
2854 return status; 2924 return status;
2855} 2925}
2856 2926
@@ -3046,7 +3116,8 @@ int qlcnic_83xx_interrupt_test(struct net_device *netdev)
3046 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 3116 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
3047 return -EIO; 3117 return -EIO;
3048 3118
3049 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST); 3119 ret = qlcnic_83xx_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST,
3120 max_sds_rings);
3050 if (ret) 3121 if (ret)
3051 goto fail_diag_irq; 3122 goto fail_diag_irq;
3052 3123
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 1f1d85e6f2af..f5db67fc9f55 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -603,7 +603,7 @@ int qlcnic_83xx_get_vnic_pf_info(struct qlcnic_adapter *, struct qlcnic_info *);
603 603
604void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *); 604void qlcnic_83xx_get_minidump_template(struct qlcnic_adapter *);
605void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data); 605void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data);
606int qlcnic_83xx_get_settings(struct qlcnic_adapter *); 606int qlcnic_83xx_get_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
607int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *); 607int qlcnic_83xx_set_settings(struct qlcnic_adapter *, struct ethtool_cmd *);
608void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *, 608void qlcnic_83xx_get_pauseparam(struct qlcnic_adapter *,
609 struct ethtool_pauseparam *); 609 struct ethtool_pauseparam *);
@@ -620,7 +620,7 @@ int qlcnic_83xx_flash_test(struct qlcnic_adapter *);
620int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *); 620int qlcnic_83xx_enable_flash_write(struct qlcnic_adapter *);
621int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *); 621int qlcnic_83xx_disable_flash_write(struct qlcnic_adapter *);
622u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *); 622u32 qlcnic_83xx_mac_rcode(struct qlcnic_adapter *);
623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *); 623u32 qlcnic_83xx_mbx_poll(struct qlcnic_adapter *, u32 *);
624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *); 624void qlcnic_83xx_enable_mbx_poll(struct qlcnic_adapter *);
625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *); 625void qlcnic_83xx_disable_mbx_poll(struct qlcnic_adapter *);
626#endif 626#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ab1d8d99cbd5..5e7fb1dfb97b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -382,8 +382,6 @@ static int qlcnic_83xx_idc_tx_soft_reset(struct qlcnic_adapter *adapter)
382 clear_bit(__QLCNIC_RESETTING, &adapter->state); 382 clear_bit(__QLCNIC_RESETTING, &adapter->state);
383 dev_err(&adapter->pdev->dev, "%s:\n", __func__); 383 dev_err(&adapter->pdev->dev, "%s:\n", __func__);
384 384
385 adapter->netdev->trans_start = jiffies;
386
387 return 0; 385 return 0;
388} 386}
389 387
@@ -435,10 +433,6 @@ static void qlcnic_83xx_idc_attach_driver(struct qlcnic_adapter *adapter)
435 } 433 }
436done: 434done:
437 netif_device_attach(netdev); 435 netif_device_attach(netdev);
438 if (netif_running(netdev)) {
439 netif_carrier_on(netdev);
440 netif_wake_queue(netdev);
441 }
442} 436}
443 437
444static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter, 438static int qlcnic_83xx_idc_enter_failed_state(struct qlcnic_adapter *adapter,
@@ -642,15 +636,21 @@ static int qlcnic_83xx_idc_reattach_driver(struct qlcnic_adapter *adapter)
642 636
643static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter) 637static void qlcnic_83xx_idc_update_idc_params(struct qlcnic_adapter *adapter)
644{ 638{
639 struct qlcnic_hardware_context *ahw = adapter->ahw;
640
645 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1); 641 qlcnic_83xx_idc_update_drv_presence_reg(adapter, 1, 1);
646 clear_bit(__QLCNIC_RESETTING, &adapter->state);
647 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status); 642 set_bit(QLC_83XX_MBX_READY, &adapter->ahw->idc.status);
648 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1); 643 qlcnic_83xx_idc_update_audit_reg(adapter, 0, 1);
649 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status); 644 set_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
650 adapter->ahw->idc.quiesce_req = 0; 645
651 adapter->ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY; 646 ahw->idc.quiesce_req = 0;
652 adapter->ahw->idc.err_code = 0; 647 ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
653 adapter->ahw->idc.collect_dump = 0; 648 ahw->idc.err_code = 0;
649 ahw->idc.collect_dump = 0;
650 ahw->reset_context = 0;
651 adapter->tx_timeo_cnt = 0;
652
653 clear_bit(__QLCNIC_RESETTING, &adapter->state);
654} 654}
655 655
656/** 656/**
@@ -851,6 +851,7 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
851 /* Check for soft reset request */ 851 /* Check for soft reset request */
852 if (ahw->reset_context && 852 if (ahw->reset_context &&
853 !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) { 853 !(val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY)) {
854 adapter->ahw->reset_context = 0;
854 qlcnic_83xx_idc_tx_soft_reset(adapter); 855 qlcnic_83xx_idc_tx_soft_reset(adapter);
855 return ret; 856 return ret;
856 } 857 }
@@ -914,6 +915,7 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
914static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter) 915static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
915{ 916{
916 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__); 917 dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
918 clear_bit(__QLCNIC_RESETTING, &adapter->state);
917 adapter->ahw->idc.err_code = -EIO; 919 adapter->ahw->idc.err_code = -EIO;
918 920
919 return 0; 921 return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 08efb4635007..f67652de5a63 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -131,12 +131,13 @@ static const char qlcnic_83xx_rx_stats_strings[][ETH_GSTRING_LEN] = {
131 "ctx_lro_pkt_cnt", 131 "ctx_lro_pkt_cnt",
132 "ctx_ip_csum_error", 132 "ctx_ip_csum_error",
133 "ctx_rx_pkts_wo_ctx", 133 "ctx_rx_pkts_wo_ctx",
134 "ctx_rx_pkts_dropped_wo_sts", 134 "ctx_rx_pkts_drop_wo_sds_on_card",
135 "ctx_rx_pkts_drop_wo_sds_on_host",
135 "ctx_rx_osized_pkts", 136 "ctx_rx_osized_pkts",
136 "ctx_rx_pkts_dropped_wo_rds", 137 "ctx_rx_pkts_dropped_wo_rds",
137 "ctx_rx_unexpected_mcast_pkts", 138 "ctx_rx_unexpected_mcast_pkts",
138 "ctx_invalid_mac_address", 139 "ctx_invalid_mac_address",
139 "ctx_rx_rds_ring_prim_attemoted", 140 "ctx_rx_rds_ring_prim_attempted",
140 "ctx_rx_rds_ring_prim_success", 141 "ctx_rx_rds_ring_prim_success",
141 "ctx_num_lro_flows_added", 142 "ctx_num_lro_flows_added",
142 "ctx_num_lro_flows_removed", 143 "ctx_num_lro_flows_removed",
@@ -251,6 +252,18 @@ static int
251qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 252qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
252{ 253{
253 struct qlcnic_adapter *adapter = netdev_priv(dev); 254 struct qlcnic_adapter *adapter = netdev_priv(dev);
255
256 if (qlcnic_82xx_check(adapter))
257 return qlcnic_82xx_get_settings(adapter, ecmd);
258 else if (qlcnic_83xx_check(adapter))
259 return qlcnic_83xx_get_settings(adapter, ecmd);
260
261 return -EIO;
262}
263
264int qlcnic_82xx_get_settings(struct qlcnic_adapter *adapter,
265 struct ethtool_cmd *ecmd)
266{
254 struct qlcnic_hardware_context *ahw = adapter->ahw; 267 struct qlcnic_hardware_context *ahw = adapter->ahw;
255 u32 speed, reg; 268 u32 speed, reg;
256 int check_sfp_module = 0; 269 int check_sfp_module = 0;
@@ -276,10 +289,7 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
276 289
277 } else if (adapter->ahw->port_type == QLCNIC_XGBE) { 290 } else if (adapter->ahw->port_type == QLCNIC_XGBE) {
278 u32 val = 0; 291 u32 val = 0;
279 if (qlcnic_83xx_check(adapter)) 292 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
280 qlcnic_83xx_get_settings(adapter);
281 else
282 val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
283 293
284 if (val == QLCNIC_PORT_MODE_802_3_AP) { 294 if (val == QLCNIC_PORT_MODE_802_3_AP) {
285 ecmd->supported = SUPPORTED_1000baseT_Full; 295 ecmd->supported = SUPPORTED_1000baseT_Full;
@@ -289,16 +299,13 @@ qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
289 ecmd->advertising = ADVERTISED_10000baseT_Full; 299 ecmd->advertising = ADVERTISED_10000baseT_Full;
290 } 300 }
291 301
292 if (netif_running(dev) && adapter->ahw->has_link_events) { 302 if (netif_running(adapter->netdev) && ahw->has_link_events) {
293 if (qlcnic_82xx_check(adapter)) { 303 reg = QLCRD32(adapter, P3P_LINK_SPEED_REG(pcifn));
294 reg = QLCRD32(adapter, 304 speed = P3P_LINK_SPEED_VAL(pcifn, reg);
295 P3P_LINK_SPEED_REG(pcifn)); 305 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ;
296 speed = P3P_LINK_SPEED_VAL(pcifn, reg); 306 ethtool_cmd_speed_set(ecmd, ahw->link_speed);
297 ahw->link_speed = speed * P3P_LINK_SPEED_MHZ; 307 ecmd->autoneg = ahw->link_autoneg;
298 } 308 ecmd->duplex = ahw->link_duplex;
299 ethtool_cmd_speed_set(ecmd, adapter->ahw->link_speed);
300 ecmd->autoneg = adapter->ahw->link_autoneg;
301 ecmd->duplex = adapter->ahw->link_duplex;
302 goto skip; 309 goto skip;
303 } 310 }
304 311
@@ -340,8 +347,8 @@ skip:
340 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT: 347 case QLCNIC_BRDTYPE_P3P_10G_SFP_QT:
341 ecmd->advertising |= ADVERTISED_TP; 348 ecmd->advertising |= ADVERTISED_TP;
342 ecmd->supported |= SUPPORTED_TP; 349 ecmd->supported |= SUPPORTED_TP;
343 check_sfp_module = netif_running(dev) && 350 check_sfp_module = netif_running(adapter->netdev) &&
344 adapter->ahw->has_link_events; 351 ahw->has_link_events;
345 case QLCNIC_BRDTYPE_P3P_10G_XFP: 352 case QLCNIC_BRDTYPE_P3P_10G_XFP:
346 ecmd->supported |= SUPPORTED_FIBRE; 353 ecmd->supported |= SUPPORTED_FIBRE;
347 ecmd->advertising |= ADVERTISED_FIBRE; 354 ecmd->advertising |= ADVERTISED_FIBRE;
@@ -355,8 +362,8 @@ skip:
355 ecmd->advertising |= 362 ecmd->advertising |=
356 (ADVERTISED_FIBRE | ADVERTISED_TP); 363 (ADVERTISED_FIBRE | ADVERTISED_TP);
357 ecmd->port = PORT_FIBRE; 364 ecmd->port = PORT_FIBRE;
358 check_sfp_module = netif_running(dev) && 365 check_sfp_module = netif_running(adapter->netdev) &&
359 adapter->ahw->has_link_events; 366 ahw->has_link_events;
360 } else { 367 } else {
361 ecmd->autoneg = AUTONEG_ENABLE; 368 ecmd->autoneg = AUTONEG_ENABLE;
362 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg); 369 ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
@@ -365,13 +372,6 @@ skip:
365 ecmd->port = PORT_TP; 372 ecmd->port = PORT_TP;
366 } 373 }
367 break; 374 break;
368 case QLCNIC_BRDTYPE_83XX_10G:
369 ecmd->autoneg = AUTONEG_DISABLE;
370 ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
371 ecmd->advertising |= (ADVERTISED_FIBRE | ADVERTISED_TP);
372 ecmd->port = PORT_FIBRE;
373 check_sfp_module = netif_running(dev) && ahw->has_link_events;
374 break;
375 default: 375 default:
376 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n", 376 dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
377 adapter->ahw->board_type); 377 adapter->ahw->board_type);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 6a6512ba9f38..106a12f2a02f 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -973,16 +973,57 @@ int qlcnic_change_mtu(struct net_device *netdev, int mtu)
973 return rc; 973 return rc;
974} 974}
975 975
976static netdev_features_t qlcnic_process_flags(struct qlcnic_adapter *adapter,
977 netdev_features_t features)
978{
979 u32 offload_flags = adapter->offload_flags;
980
981 if (offload_flags & BIT_0) {
982 features |= NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
983 NETIF_F_IPV6_CSUM;
984 adapter->rx_csum = 1;
985 if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
986 if (!(offload_flags & BIT_1))
987 features &= ~NETIF_F_TSO;
988 else
989 features |= NETIF_F_TSO;
990
991 if (!(offload_flags & BIT_2))
992 features &= ~NETIF_F_TSO6;
993 else
994 features |= NETIF_F_TSO6;
995 }
996 } else {
997 features &= ~(NETIF_F_RXCSUM |
998 NETIF_F_IP_CSUM |
999 NETIF_F_IPV6_CSUM);
1000
1001 if (QLCNIC_IS_TSO_CAPABLE(adapter))
1002 features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
1003 adapter->rx_csum = 0;
1004 }
1005
1006 return features;
1007}
976 1008
977netdev_features_t qlcnic_fix_features(struct net_device *netdev, 1009netdev_features_t qlcnic_fix_features(struct net_device *netdev,
978 netdev_features_t features) 1010 netdev_features_t features)
979{ 1011{
980 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1012 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1013 netdev_features_t changed;
981 1014
982 if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) && 1015 if (qlcnic_82xx_check(adapter) &&
983 qlcnic_82xx_check(adapter)) { 1016 (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
984 netdev_features_t changed = features ^ netdev->features; 1017 if (adapter->flags & QLCNIC_APP_CHANGED_FLAGS) {
985 features ^= changed & (NETIF_F_ALL_CSUM | NETIF_F_RXCSUM); 1018 features = qlcnic_process_flags(adapter, features);
1019 } else {
1020 changed = features ^ netdev->features;
1021 features ^= changed & (NETIF_F_RXCSUM |
1022 NETIF_F_IP_CSUM |
1023 NETIF_F_IPV6_CSUM |
1024 NETIF_F_TSO |
1025 NETIF_F_TSO6);
1026 }
986 } 1027 }
987 1028
988 if (!(features & NETIF_F_RXCSUM)) 1029 if (!(features & NETIF_F_RXCSUM))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 95b1b5732838..b6818f4356b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -134,7 +134,7 @@ struct qlcnic_mailbox_metadata {
134 134
135#define QLCNIC_SET_OWNER 1 135#define QLCNIC_SET_OWNER 1
136#define QLCNIC_CLR_OWNER 0 136#define QLCNIC_CLR_OWNER 0
137#define QLCNIC_MBX_TIMEOUT 10000 137#define QLCNIC_MBX_TIMEOUT 5000
138 138
139#define QLCNIC_MBX_RSP_OK 1 139#define QLCNIC_MBX_RSP_OK 1
140#define QLCNIC_MBX_PORT_RSP_OK 0x1a 140#define QLCNIC_MBX_PORT_RSP_OK 0x1a
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 264d5a4f8153..aeb26a850679 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -37,24 +37,24 @@ MODULE_PARM_DESC(qlcnic_mac_learn,
37 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)"); 37 "Mac Filter (0=learning is disabled, 1=Driver learning is enabled, 2=FDB learning is enabled)");
38 38
39int qlcnic_use_msi = 1; 39int qlcnic_use_msi = 1;
40MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 40MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled)");
41module_param_named(use_msi, qlcnic_use_msi, int, 0444); 41module_param_named(use_msi, qlcnic_use_msi, int, 0444);
42 42
43int qlcnic_use_msi_x = 1; 43int qlcnic_use_msi_x = 1;
44MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 44MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled)");
45module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444); 45module_param_named(use_msi_x, qlcnic_use_msi_x, int, 0444);
46 46
47int qlcnic_auto_fw_reset = 1; 47int qlcnic_auto_fw_reset = 1;
48MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 48MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled)");
49module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644); 49module_param_named(auto_fw_reset, qlcnic_auto_fw_reset, int, 0644);
50 50
51int qlcnic_load_fw_file; 51int qlcnic_load_fw_file;
52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 52MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file)");
53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444); 53module_param_named(load_fw_file, qlcnic_load_fw_file, int, 0444);
54 54
55int qlcnic_config_npars; 55int qlcnic_config_npars;
56module_param(qlcnic_config_npars, int, 0444); 56module_param(qlcnic_config_npars, int, 0444);
57MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 57MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled)");
58 58
59static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent); 59static int qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
60static void qlcnic_remove(struct pci_dev *pdev); 60static void qlcnic_remove(struct pci_dev *pdev);
@@ -84,14 +84,9 @@ static int qlcnic_start_firmware(struct qlcnic_adapter *);
84static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter); 84static void qlcnic_free_lb_filters_mem(struct qlcnic_adapter *adapter);
85static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *); 85static void qlcnic_dev_set_npar_ready(struct qlcnic_adapter *);
86static int qlcnicvf_start_firmware(struct qlcnic_adapter *); 86static int qlcnicvf_start_firmware(struct qlcnic_adapter *);
87static void qlcnic_set_netdev_features(struct qlcnic_adapter *,
88 struct qlcnic_esw_func_cfg *);
89static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16); 87static int qlcnic_vlan_rx_add(struct net_device *, __be16, u16);
90static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16); 88static int qlcnic_vlan_rx_del(struct net_device *, __be16, u16);
91 89
92#define QLCNIC_IS_TSO_CAPABLE(adapter) \
93 ((adapter)->ahw->capabilities & QLCNIC_FW_CAPABILITY_TSO)
94
95static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter) 90static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
96{ 91{
97 struct qlcnic_hardware_context *ahw = adapter->ahw; 92 struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -308,6 +303,23 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
308 return 0; 303 return 0;
309} 304}
310 305
306static void qlcnic_delete_adapter_mac(struct qlcnic_adapter *adapter)
307{
308 struct qlcnic_mac_list_s *cur;
309 struct list_head *head;
310
311 list_for_each(head, &adapter->mac_list) {
312 cur = list_entry(head, struct qlcnic_mac_list_s, list);
313 if (!memcmp(adapter->mac_addr, cur->mac_addr, ETH_ALEN)) {
314 qlcnic_sre_macaddr_change(adapter, cur->mac_addr,
315 0, QLCNIC_MAC_DEL);
316 list_del(&cur->list);
317 kfree(cur);
318 return;
319 }
320 }
321}
322
311static int qlcnic_set_mac(struct net_device *netdev, void *p) 323static int qlcnic_set_mac(struct net_device *netdev, void *p)
312{ 324{
313 struct qlcnic_adapter *adapter = netdev_priv(netdev); 325 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -322,11 +334,15 @@ static int qlcnic_set_mac(struct net_device *netdev, void *p)
322 if (!is_valid_ether_addr(addr->sa_data)) 334 if (!is_valid_ether_addr(addr->sa_data))
323 return -EINVAL; 335 return -EINVAL;
324 336
337 if (!memcmp(adapter->mac_addr, addr->sa_data, ETH_ALEN))
338 return 0;
339
325 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) { 340 if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
326 netif_device_detach(netdev); 341 netif_device_detach(netdev);
327 qlcnic_napi_disable(adapter); 342 qlcnic_napi_disable(adapter);
328 } 343 }
329 344
345 qlcnic_delete_adapter_mac(adapter);
330 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len); 346 memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
331 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 347 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
332 qlcnic_set_multi(adapter->netdev); 348 qlcnic_set_multi(adapter->netdev);
@@ -1053,8 +1069,6 @@ void qlcnic_set_eswitch_port_features(struct qlcnic_adapter *adapter,
1053 1069
1054 if (!esw_cfg->promisc_mode) 1070 if (!esw_cfg->promisc_mode)
1055 adapter->flags |= QLCNIC_PROMISC_DISABLED; 1071 adapter->flags |= QLCNIC_PROMISC_DISABLED;
1056
1057 qlcnic_set_netdev_features(adapter, esw_cfg);
1058} 1072}
1059 1073
1060int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter) 1074int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
@@ -1069,51 +1083,23 @@ int qlcnic_set_eswitch_port_config(struct qlcnic_adapter *adapter)
1069 return -EIO; 1083 return -EIO;
1070 qlcnic_set_vlan_config(adapter, &esw_cfg); 1084 qlcnic_set_vlan_config(adapter, &esw_cfg);
1071 qlcnic_set_eswitch_port_features(adapter, &esw_cfg); 1085 qlcnic_set_eswitch_port_features(adapter, &esw_cfg);
1086 qlcnic_set_netdev_features(adapter, &esw_cfg);
1072 1087
1073 return 0; 1088 return 0;
1074} 1089}
1075 1090
1076static void 1091void qlcnic_set_netdev_features(struct qlcnic_adapter *adapter,
1077qlcnic_set_netdev_features(struct qlcnic_adapter *adapter, 1092 struct qlcnic_esw_func_cfg *esw_cfg)
1078 struct qlcnic_esw_func_cfg *esw_cfg)
1079{ 1093{
1080 struct net_device *netdev = adapter->netdev; 1094 struct net_device *netdev = adapter->netdev;
1081 unsigned long features, vlan_features;
1082 1095
1083 if (qlcnic_83xx_check(adapter)) 1096 if (qlcnic_83xx_check(adapter))
1084 return; 1097 return;
1085 1098
1086 features = (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | 1099 adapter->offload_flags = esw_cfg->offload_flags;
1087 NETIF_F_IPV6_CSUM | NETIF_F_GRO); 1100 adapter->flags |= QLCNIC_APP_CHANGED_FLAGS;
1088 vlan_features = (NETIF_F_SG | NETIF_F_IP_CSUM | 1101 netdev_update_features(netdev);
1089 NETIF_F_IPV6_CSUM); 1102 adapter->flags &= ~QLCNIC_APP_CHANGED_FLAGS;
1090
1091 if (QLCNIC_IS_TSO_CAPABLE(adapter)) {
1092 features |= (NETIF_F_TSO | NETIF_F_TSO6);
1093 vlan_features |= (NETIF_F_TSO | NETIF_F_TSO6);
1094 }
1095
1096 if (netdev->features & NETIF_F_LRO)
1097 features |= NETIF_F_LRO;
1098
1099 if (esw_cfg->offload_flags & BIT_0) {
1100 netdev->features |= features;
1101 adapter->rx_csum = 1;
1102 if (!(esw_cfg->offload_flags & BIT_1)) {
1103 netdev->features &= ~NETIF_F_TSO;
1104 features &= ~NETIF_F_TSO;
1105 }
1106 if (!(esw_cfg->offload_flags & BIT_2)) {
1107 netdev->features &= ~NETIF_F_TSO6;
1108 features &= ~NETIF_F_TSO6;
1109 }
1110 } else {
1111 netdev->features &= ~features;
1112 features &= ~features;
1113 adapter->rx_csum = 0;
1114 }
1115
1116 netdev->vlan_features = (features & vlan_features);
1117} 1103}
1118 1104
1119static int 1105static int
@@ -1995,8 +1981,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1995 pci_enable_pcie_error_reporting(pdev); 1981 pci_enable_pcie_error_reporting(pdev);
1996 1982
1997 ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL); 1983 ahw = kzalloc(sizeof(struct qlcnic_hardware_context), GFP_KERNEL);
1998 if (!ahw) 1984 if (!ahw) {
1985 err = -ENOMEM;
1999 goto err_out_free_res; 1986 goto err_out_free_res;
1987 }
2000 1988
2001 switch (ent->device) { 1989 switch (ent->device) {
2002 case PCI_DEVICE_ID_QLOGIC_QLE824X: 1990 case PCI_DEVICE_ID_QLOGIC_QLE824X:
@@ -2032,6 +2020,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2032 2020
2033 adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic"); 2021 adapter->qlcnic_wq = create_singlethread_workqueue("qlcnic");
2034 if (adapter->qlcnic_wq == NULL) { 2022 if (adapter->qlcnic_wq == NULL) {
2023 err = -ENOMEM;
2035 dev_err(&pdev->dev, "Failed to create workqueue\n"); 2024 dev_err(&pdev->dev, "Failed to create workqueue\n");
2036 goto err_out_free_netdev; 2025 goto err_out_free_netdev;
2037 } 2026 }
@@ -2112,6 +2101,10 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2112 goto err_out_disable_msi; 2101 goto err_out_disable_msi;
2113 } 2102 }
2114 2103
2104 err = qlcnic_get_act_pci_func(adapter);
2105 if (err)
2106 goto err_out_disable_mbx_intr;
2107
2115 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); 2108 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
2116 if (err) 2109 if (err)
2117 goto err_out_disable_mbx_intr; 2110 goto err_out_disable_mbx_intr;
@@ -2141,9 +2134,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2141 break; 2134 break;
2142 } 2135 }
2143 2136
2144 if (qlcnic_get_act_pci_func(adapter))
2145 goto err_out_disable_mbx_intr;
2146
2147 if (adapter->drv_mac_learn) 2137 if (adapter->drv_mac_learn)
2148 qlcnic_alloc_lb_filters_mem(adapter); 2138 qlcnic_alloc_lb_filters_mem(adapter);
2149 2139
@@ -2481,12 +2471,17 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
2481 if (test_bit(__QLCNIC_RESETTING, &adapter->state)) 2471 if (test_bit(__QLCNIC_RESETTING, &adapter->state))
2482 return; 2472 return;
2483 2473
2484 dev_err(&netdev->dev, "transmit timeout, resetting.\n"); 2474 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
2485 2475 netdev_info(netdev, "Tx timeout, reset the adapter.\n");
2486 if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) 2476 if (qlcnic_82xx_check(adapter))
2487 adapter->need_fw_reset = 1; 2477 adapter->need_fw_reset = 1;
2488 else 2478 else if (qlcnic_83xx_check(adapter))
2479 qlcnic_83xx_idc_request_reset(adapter,
2480 QLCNIC_FORCE_FW_DUMP_KEY);
2481 } else {
2482 netdev_info(netdev, "Tx timeout, reset adapter context.\n");
2489 adapter->ahw->reset_context = 1; 2483 adapter->ahw->reset_context = 1;
2484 }
2490} 2485}
2491 2486
2492static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) 2487static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -3123,10 +3118,8 @@ qlcnic_check_health(struct qlcnic_adapter *adapter)
3123 if (adapter->need_fw_reset) 3118 if (adapter->need_fw_reset)
3124 goto detach; 3119 goto detach;
3125 3120
3126 if (adapter->ahw->reset_context && qlcnic_auto_fw_reset) { 3121 if (adapter->ahw->reset_context && qlcnic_auto_fw_reset)
3127 qlcnic_reset_hw_context(adapter); 3122 qlcnic_reset_hw_context(adapter);
3128 adapter->netdev->trans_start = jiffies;
3129 }
3130 3123
3131 return 0; 3124 return 0;
3132 } 3125 }
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 44d547d78b84..196b2d100407 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -280,9 +280,9 @@ void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
280static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr, 280static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
281 u32 *pay, u8 pci_func, u8 size) 281 u32 *pay, u8 pci_func, u8 size)
282{ 282{
283 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val, wait_time = 0;
283 struct qlcnic_hardware_context *ahw = adapter->ahw; 284 struct qlcnic_hardware_context *ahw = adapter->ahw;
284 unsigned long flags; 285 unsigned long flags;
285 u32 rsp, mbx_val, fw_data, rsp_num, mbx_cmd, val;
286 u16 opcode; 286 u16 opcode;
287 u8 mbx_err_code; 287 u8 mbx_err_code;
288 int i, j; 288 int i, j;
@@ -330,15 +330,13 @@ static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
330 * assume something is wrong. 330 * assume something is wrong.
331 */ 331 */
332poll: 332poll:
333 rsp = qlcnic_83xx_mbx_poll(adapter); 333 rsp = qlcnic_83xx_mbx_poll(adapter, &wait_time);
334 if (rsp != QLCNIC_RCODE_TIMEOUT) { 334 if (rsp != QLCNIC_RCODE_TIMEOUT) {
335 /* Get the FW response data */ 335 /* Get the FW response data */
336 fw_data = readl(QLCNIC_MBX_FW(ahw, 0)); 336 fw_data = readl(QLCNIC_MBX_FW(ahw, 0));
337 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) { 337 if (fw_data & QLCNIC_MBX_ASYNC_EVENT) {
338 __qlcnic_83xx_process_aen(adapter); 338 __qlcnic_83xx_process_aen(adapter);
339 mbx_val = QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL); 339 goto poll;
340 if (mbx_val)
341 goto poll;
342 } 340 }
343 mbx_err_code = QLCNIC_MBX_STATUS(fw_data); 341 mbx_err_code = QLCNIC_MBX_STATUS(fw_data);
344 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data); 342 rsp_num = QLCNIC_MBX_NUM_REGS(fw_data);
@@ -1736,7 +1734,6 @@ static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1736 1734
1737 if (!qlcnic_sriov_vf_reinit_driver(adapter)) { 1735 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1738 qlcnic_sriov_vf_attach(adapter); 1736 qlcnic_sriov_vf_attach(adapter);
1739 adapter->netdev->trans_start = jiffies;
1740 adapter->tx_timeo_cnt = 0; 1737 adapter->tx_timeo_cnt = 0;
1741 adapter->reset_ctx_cnt = 0; 1738 adapter->reset_ctx_cnt = 0;
1742 adapter->fw_fail_cnt = 0; 1739 adapter->fw_fail_cnt = 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index c81be2da119b..1a66ccded235 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -1133,9 +1133,6 @@ static int qlcnic_sriov_validate_linkevent(struct qlcnic_vf_info *vf,
1133 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id) 1133 if ((cmd->req.arg[1] >> 16) != vf->rx_ctx_id)
1134 return -EINVAL; 1134 return -EINVAL;
1135 1135
1136 if (!(cmd->req.arg[1] & BIT_8))
1137 return -EINVAL;
1138
1139 return 0; 1136 return 0;
1140} 1137}
1141 1138
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 4e22e794a186..e7a2fe21b649 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -544,6 +544,9 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
544 switch (esw_cfg[i].op_mode) { 544 switch (esw_cfg[i].op_mode) {
545 case QLCNIC_PORT_DEFAULTS: 545 case QLCNIC_PORT_DEFAULTS:
546 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]); 546 qlcnic_set_eswitch_port_features(adapter, &esw_cfg[i]);
547 rtnl_lock();
548 qlcnic_set_netdev_features(adapter, &esw_cfg[i]);
549 rtnl_unlock();
547 break; 550 break;
548 case QLCNIC_ADD_VLAN: 551 case QLCNIC_ADD_VLAN:
549 qlcnic_set_vlan_config(adapter, &esw_cfg[i]); 552 qlcnic_set_vlan_config(adapter, &esw_cfg[i]);
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 87463bc701a6..50235d201592 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1106,6 +1106,7 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1106 if (pci_dma_mapping_error(qdev->pdev, map)) { 1106 if (pci_dma_mapping_error(qdev->pdev, map)) {
1107 __free_pages(rx_ring->pg_chunk.page, 1107 __free_pages(rx_ring->pg_chunk.page,
1108 qdev->lbq_buf_order); 1108 qdev->lbq_buf_order);
1109 rx_ring->pg_chunk.page = NULL;
1109 netif_err(qdev, drv, qdev->ndev, 1110 netif_err(qdev, drv, qdev->ndev,
1110 "PCI mapping failed.\n"); 1111 "PCI mapping failed.\n");
1111 return -ENOMEM; 1112 return -ENOMEM;
@@ -2777,6 +2778,12 @@ static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2777 curr_idx = 0; 2778 curr_idx = 0;
2778 2779
2779 } 2780 }
2781 if (rx_ring->pg_chunk.page) {
2782 pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
2783 ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
2784 put_page(rx_ring->pg_chunk.page);
2785 rx_ring->pg_chunk.page = NULL;
2786 }
2780} 2787}
2781 2788
2782static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2789static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 7d1fb9ad1296..03523459c406 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -1136,6 +1136,7 @@ static void cp_clean_rings (struct cp_private *cp)
1136 cp->dev->stats.tx_dropped++; 1136 cp->dev->stats.tx_dropped++;
1137 } 1137 }
1138 } 1138 }
1139 netdev_reset_queue(cp->dev);
1139 1140
1140 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE); 1141 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1141 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE); 1142 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 79c520b64fdd..393f961a013c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5856,7 +5856,20 @@ err_out:
5856 return -EIO; 5856 return -EIO;
5857} 5857}
5858 5858
5859static inline void rtl8169_tso_csum(struct rtl8169_private *tp, 5859static bool rtl_skb_pad(struct sk_buff *skb)
5860{
5861 if (skb_padto(skb, ETH_ZLEN))
5862 return false;
5863 skb_put(skb, ETH_ZLEN - skb->len);
5864 return true;
5865}
5866
5867static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
5868{
5869 return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
5870}
5871
5872static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
5860 struct sk_buff *skb, u32 *opts) 5873 struct sk_buff *skb, u32 *opts)
5861{ 5874{
5862 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version; 5875 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
@@ -5869,13 +5882,20 @@ static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5869 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 5882 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5870 const struct iphdr *ip = ip_hdr(skb); 5883 const struct iphdr *ip = ip_hdr(skb);
5871 5884
5885 if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
5886 return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
5887
5872 if (ip->protocol == IPPROTO_TCP) 5888 if (ip->protocol == IPPROTO_TCP)
5873 opts[offset] |= info->checksum.tcp; 5889 opts[offset] |= info->checksum.tcp;
5874 else if (ip->protocol == IPPROTO_UDP) 5890 else if (ip->protocol == IPPROTO_UDP)
5875 opts[offset] |= info->checksum.udp; 5891 opts[offset] |= info->checksum.udp;
5876 else 5892 else
5877 WARN_ON_ONCE(1); 5893 WARN_ON_ONCE(1);
5894 } else {
5895 if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
5896 return rtl_skb_pad(skb);
5878 } 5897 }
5898 return true;
5879} 5899}
5880 5900
5881static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 5901static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -5896,17 +5916,15 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5896 goto err_stop_0; 5916 goto err_stop_0;
5897 } 5917 }
5898 5918
5899 /* 8168evl does not automatically pad to minimum length. */
5900 if (unlikely(tp->mac_version == RTL_GIGA_MAC_VER_34 &&
5901 skb->len < ETH_ZLEN)) {
5902 if (skb_padto(skb, ETH_ZLEN))
5903 goto err_update_stats;
5904 skb_put(skb, ETH_ZLEN - skb->len);
5905 }
5906
5907 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn)) 5919 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5908 goto err_stop_0; 5920 goto err_stop_0;
5909 5921
5922 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5923 opts[0] = DescOwn;
5924
5925 if (!rtl8169_tso_csum(tp, skb, opts))
5926 goto err_update_stats;
5927
5910 len = skb_headlen(skb); 5928 len = skb_headlen(skb);
5911 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE); 5929 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5912 if (unlikely(dma_mapping_error(d, mapping))) { 5930 if (unlikely(dma_mapping_error(d, mapping))) {
@@ -5918,11 +5936,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5918 tp->tx_skb[entry].len = len; 5936 tp->tx_skb[entry].len = len;
5919 txd->addr = cpu_to_le64(mapping); 5937 txd->addr = cpu_to_le64(mapping);
5920 5938
5921 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5922 opts[0] = DescOwn;
5923
5924 rtl8169_tso_csum(tp, skb, opts);
5925
5926 frags = rtl8169_xmit_frags(tp, skb, opts); 5939 frags = rtl8169_xmit_frags(tp, skb, opts);
5927 if (frags < 0) 5940 if (frags < 0)
5928 goto err_dma_1; 5941 goto err_dma_1;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 33dc6f2418f2..42e9dd05c936 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -2745,11 +2745,6 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
2745 if (mdp->cd->tsu) { 2745 if (mdp->cd->tsu) {
2746 struct resource *rtsu; 2746 struct resource *rtsu;
2747 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2747 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2748 if (!rtsu) {
2749 dev_err(&pdev->dev, "Not found TSU resource\n");
2750 ret = -ENODEV;
2751 goto out_release;
2752 }
2753 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu); 2748 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2754 if (IS_ERR(mdp->tsu_addr)) { 2749 if (IS_ERR(mdp->tsu_addr)) {
2755 ret = PTR_ERR(mdp->tsu_addr); 2750 ret = PTR_ERR(mdp->tsu_addr);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 01b99206139a..39e4cb39de29 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -638,14 +638,16 @@ static void efx_start_datapath(struct efx_nic *efx)
638 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 638 EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
639 efx->type->rx_buffer_padding); 639 efx->type->rx_buffer_padding);
640 rx_buf_len = (sizeof(struct efx_rx_page_state) + 640 rx_buf_len = (sizeof(struct efx_rx_page_state) +
641 EFX_PAGE_IP_ALIGN + efx->rx_dma_len); 641 NET_IP_ALIGN + efx->rx_dma_len);
642 if (rx_buf_len <= PAGE_SIZE) { 642 if (rx_buf_len <= PAGE_SIZE) {
643 efx->rx_scatter = false; 643 efx->rx_scatter = false;
644 efx->rx_buffer_order = 0; 644 efx->rx_buffer_order = 0;
645 } else if (efx->type->can_rx_scatter) { 645 } else if (efx->type->can_rx_scatter) {
646 BUILD_BUG_ON(EFX_RX_USR_BUF_SIZE % L1_CACHE_BYTES);
646 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) + 647 BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
647 EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE > 648 2 * ALIGN(NET_IP_ALIGN + EFX_RX_USR_BUF_SIZE,
648 PAGE_SIZE / 2); 649 EFX_RX_BUF_ALIGNMENT) >
650 PAGE_SIZE);
649 efx->rx_scatter = true; 651 efx->rx_scatter = true;
650 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE; 652 efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
651 efx->rx_buffer_order = 0; 653 efx->rx_buffer_order = 0;
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 9bd433a095c5..39d6bd77f015 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -72,8 +72,20 @@
72/* Maximum possible MTU the driver supports */ 72/* Maximum possible MTU the driver supports */
73#define EFX_MAX_MTU (9 * 1024) 73#define EFX_MAX_MTU (9 * 1024)
74 74
75/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */ 75/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
76#define EFX_RX_USR_BUF_SIZE 1824 76 * and should be a multiple of the cache line size.
77 */
78#define EFX_RX_USR_BUF_SIZE (2048 - 256)
79
80/* If possible, we should ensure cache line alignment at start and end
81 * of every buffer. Otherwise, we just need to ensure 4-byte
82 * alignment of the network header.
83 */
84#if NET_IP_ALIGN == 0
85#define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
86#else
87#define EFX_RX_BUF_ALIGNMENT 4
88#endif
77 89
78/* Forward declare Precision Time Protocol (PTP) support structure. */ 90/* Forward declare Precision Time Protocol (PTP) support structure. */
79struct efx_ptp_data; 91struct efx_ptp_data;
@@ -468,24 +480,11 @@ enum nic_state {
468}; 480};
469 481
470/* 482/*
471 * Alignment of page-allocated RX buffers
472 *
473 * Controls the number of bytes inserted at the start of an RX buffer.
474 * This is the equivalent of NET_IP_ALIGN [which controls the alignment
475 * of the skb->head for hardware DMA].
476 */
477#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
478#define EFX_PAGE_IP_ALIGN 0
479#else
480#define EFX_PAGE_IP_ALIGN NET_IP_ALIGN
481#endif
482
483/*
484 * Alignment of the skb->head which wraps a page-allocated RX buffer 483 * Alignment of the skb->head which wraps a page-allocated RX buffer
485 * 484 *
486 * The skb allocated to wrap an rx_buffer can have this alignment. Since 485 * The skb allocated to wrap an rx_buffer can have this alignment. Since
487 * the data is memcpy'd from the rx_buf, it does not need to be equal to 486 * the data is memcpy'd from the rx_buf, it does not need to be equal to
488 * EFX_PAGE_IP_ALIGN. 487 * NET_IP_ALIGN.
489 */ 488 */
490#define EFX_PAGE_SKB_ALIGN 2 489#define EFX_PAGE_SKB_ALIGN 2
491 490
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index e73e30bac10e..a7dfe36cabf4 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -93,8 +93,8 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
93 93
94void efx_rx_config_page_split(struct efx_nic *efx) 94void efx_rx_config_page_split(struct efx_nic *efx)
95{ 95{
96 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + EFX_PAGE_IP_ALIGN, 96 efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
97 L1_CACHE_BYTES); 97 EFX_RX_BUF_ALIGNMENT);
98 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 : 98 efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
99 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) / 99 ((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
100 efx->rx_page_buf_step); 100 efx->rx_page_buf_step);
@@ -188,9 +188,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
188 do { 188 do {
189 index = rx_queue->added_count & rx_queue->ptr_mask; 189 index = rx_queue->added_count & rx_queue->ptr_mask;
190 rx_buf = efx_rx_buffer(rx_queue, index); 190 rx_buf = efx_rx_buffer(rx_queue, index);
191 rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; 191 rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
192 rx_buf->page = page; 192 rx_buf->page = page;
193 rx_buf->page_offset = page_offset + EFX_PAGE_IP_ALIGN; 193 rx_buf->page_offset = page_offset + NET_IP_ALIGN;
194 rx_buf->len = efx->rx_dma_len; 194 rx_buf->len = efx->rx_dma_len;
195 rx_buf->flags = 0; 195 rx_buf->flags = 0;
196 ++rx_queue->added_count; 196 ++rx_queue->added_count;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index f695a50bac47..43c1f3223322 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -1,6 +1,6 @@
1config STMMAC_ETH 1config STMMAC_ETH
2 tristate "STMicroelectronics 10/100/1000 Ethernet driver" 2 tristate "STMicroelectronics 10/100/1000 Ethernet driver"
3 depends on HAS_IOMEM 3 depends on HAS_IOMEM && HAS_DMA
4 select NET_CORE 4 select NET_CORE
5 select MII 5 select MII
6 select PHYLIB 6 select PHYLIB
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index d5a141c7c4e7..1c502bb0c916 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -229,7 +229,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
229 } 229 }
230 230
231 if (port->passthru) 231 if (port->passthru)
232 vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); 232 vlan = list_first_or_null_rcu(&port->vlans,
233 struct macvlan_dev, list);
233 else 234 else
234 vlan = macvlan_hash_lookup(port, eth->h_dest); 235 vlan = macvlan_hash_lookup(port, eth->h_dest);
235 if (vlan == NULL) 236 if (vlan == NULL)
@@ -814,7 +815,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
814 if (err < 0) 815 if (err < 0)
815 goto upper_dev_unlink; 816 goto upper_dev_unlink;
816 817
817 list_add_tail(&vlan->list, &port->vlans); 818 list_add_tail_rcu(&vlan->list, &port->vlans);
818 netif_stacked_transfer_operstate(lowerdev, dev); 819 netif_stacked_transfer_operstate(lowerdev, dev);
819 820
820 return 0; 821 return 0;
@@ -842,7 +843,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head)
842{ 843{
843 struct macvlan_dev *vlan = netdev_priv(dev); 844 struct macvlan_dev *vlan = netdev_priv(dev);
844 845
845 list_del(&vlan->list); 846 list_del_rcu(&vlan->list);
846 unregister_netdevice_queue(dev, head); 847 unregister_netdevice_queue(dev, head);
847 netdev_upper_dev_unlink(vlan->lowerdev, dev); 848 netdev_upper_dev_unlink(vlan->lowerdev, dev);
848} 849}
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index ed947dd76fbd..f3cdf64997d6 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -375,6 +375,8 @@ static void ntb_netdev_remove(struct pci_dev *pdev)
375 if (dev == NULL) 375 if (dev == NULL)
376 return; 376 return;
377 377
378 list_del(&dev->list);
379
378 ndev = dev->ndev; 380 ndev = dev->ndev;
379 381
380 unregister_netdev(ndev); 382 unregister_netdev(ndev);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf887c2384e9..86adfa0a912e 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -582,6 +582,7 @@ static const struct usb_device_id products[] = {
582 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ 582 {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
583 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 583 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
584 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 584 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */
585 {QMI_FIXED_INTF(0x1e2d, 0x12d1, 4)}, /* Cinterion PLxx */
585 586
586 /* 4. Gobi 1000 devices */ 587 /* 4. Gobi 1000 devices */
587 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 588 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index a491d3a95393..6cbdac67f3a0 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -130,19 +130,23 @@ struct rtl8150 {
130 struct usb_device *udev; 130 struct usb_device *udev;
131 struct tasklet_struct tl; 131 struct tasklet_struct tl;
132 struct net_device *netdev; 132 struct net_device *netdev;
133 struct urb *rx_urb, *tx_urb, *intr_urb, *ctrl_urb; 133 struct urb *rx_urb, *tx_urb, *intr_urb;
134 struct sk_buff *tx_skb, *rx_skb; 134 struct sk_buff *tx_skb, *rx_skb;
135 struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE]; 135 struct sk_buff *rx_skb_pool[RX_SKB_POOL_SIZE];
136 spinlock_t rx_pool_lock; 136 spinlock_t rx_pool_lock;
137 struct usb_ctrlrequest dr; 137 struct usb_ctrlrequest dr;
138 int intr_interval; 138 int intr_interval;
139 __le16 rx_creg;
140 u8 *intr_buff; 139 u8 *intr_buff;
141 u8 phy; 140 u8 phy;
142}; 141};
143 142
144typedef struct rtl8150 rtl8150_t; 143typedef struct rtl8150 rtl8150_t;
145 144
145struct async_req {
146 struct usb_ctrlrequest dr;
147 u16 rx_creg;
148};
149
146static const char driver_name [] = "rtl8150"; 150static const char driver_name [] = "rtl8150";
147 151
148/* 152/*
@@ -164,51 +168,47 @@ static int set_registers(rtl8150_t * dev, u16 indx, u16 size, void *data)
164 indx, 0, data, size, 500); 168 indx, 0, data, size, 500);
165} 169}
166 170
167static void ctrl_callback(struct urb *urb) 171static void async_set_reg_cb(struct urb *urb)
168{ 172{
169 rtl8150_t *dev; 173 struct async_req *req = (struct async_req *)urb->context;
170 int status = urb->status; 174 int status = urb->status;
171 175
172 switch (status) { 176 if (status < 0)
173 case 0: 177 dev_dbg(&urb->dev->dev, "%s failed with %d", __func__, status);
174 break; 178 kfree(req);
175 case -EINPROGRESS: 179 usb_free_urb(urb);
176 break;
177 case -ENOENT:
178 break;
179 default:
180 if (printk_ratelimit())
181 dev_warn(&urb->dev->dev, "ctrl urb status %d\n", status);
182 }
183 dev = urb->context;
184 clear_bit(RX_REG_SET, &dev->flags);
185} 180}
186 181
187static int async_set_registers(rtl8150_t * dev, u16 indx, u16 size) 182static int async_set_registers(rtl8150_t *dev, u16 indx, u16 size, u16 reg)
188{ 183{
189 int ret; 184 int res = -ENOMEM;
190 185 struct urb *async_urb;
191 if (test_bit(RX_REG_SET, &dev->flags)) 186 struct async_req *req;
192 return -EAGAIN;
193 187
194 dev->dr.bRequestType = RTL8150_REQT_WRITE; 188 req = kmalloc(sizeof(struct async_req), GFP_ATOMIC);
195 dev->dr.bRequest = RTL8150_REQ_SET_REGS; 189 if (req == NULL)
196 dev->dr.wValue = cpu_to_le16(indx); 190 return res;
197 dev->dr.wIndex = 0; 191 async_urb = usb_alloc_urb(0, GFP_ATOMIC);
198 dev->dr.wLength = cpu_to_le16(size); 192 if (async_urb == NULL) {
199 dev->ctrl_urb->transfer_buffer_length = size; 193 kfree(req);
200 usb_fill_control_urb(dev->ctrl_urb, dev->udev, 194 return res;
201 usb_sndctrlpipe(dev->udev, 0), (char *) &dev->dr, 195 }
202 &dev->rx_creg, size, ctrl_callback, dev); 196 req->rx_creg = cpu_to_le16(reg);
203 if ((ret = usb_submit_urb(dev->ctrl_urb, GFP_ATOMIC))) { 197 req->dr.bRequestType = RTL8150_REQT_WRITE;
204 if (ret == -ENODEV) 198 req->dr.bRequest = RTL8150_REQ_SET_REGS;
199 req->dr.wIndex = 0;
200 req->dr.wValue = cpu_to_le16(indx);
201 req->dr.wLength = cpu_to_le16(size);
202 usb_fill_control_urb(async_urb, dev->udev,
203 usb_sndctrlpipe(dev->udev, 0), (void *)&req->dr,
204 &req->rx_creg, size, async_set_reg_cb, req);
205 res = usb_submit_urb(async_urb, GFP_ATOMIC);
206 if (res) {
207 if (res == -ENODEV)
205 netif_device_detach(dev->netdev); 208 netif_device_detach(dev->netdev);
206 dev_err(&dev->udev->dev, 209 dev_err(&dev->udev->dev, "%s failed with %d\n", __func__, res);
207 "control request submission failed: %d\n", ret); 210 }
208 } else 211 return res;
209 set_bit(RX_REG_SET, &dev->flags);
210
211 return ret;
212} 212}
213 213
214static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg) 214static int read_mii_word(rtl8150_t * dev, u8 phy, __u8 indx, u16 * reg)
@@ -330,13 +330,6 @@ static int alloc_all_urbs(rtl8150_t * dev)
330 usb_free_urb(dev->tx_urb); 330 usb_free_urb(dev->tx_urb);
331 return 0; 331 return 0;
332 } 332 }
333 dev->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL);
334 if (!dev->ctrl_urb) {
335 usb_free_urb(dev->rx_urb);
336 usb_free_urb(dev->tx_urb);
337 usb_free_urb(dev->intr_urb);
338 return 0;
339 }
340 333
341 return 1; 334 return 1;
342} 335}
@@ -346,7 +339,6 @@ static void free_all_urbs(rtl8150_t * dev)
346 usb_free_urb(dev->rx_urb); 339 usb_free_urb(dev->rx_urb);
347 usb_free_urb(dev->tx_urb); 340 usb_free_urb(dev->tx_urb);
348 usb_free_urb(dev->intr_urb); 341 usb_free_urb(dev->intr_urb);
349 usb_free_urb(dev->ctrl_urb);
350} 342}
351 343
352static void unlink_all_urbs(rtl8150_t * dev) 344static void unlink_all_urbs(rtl8150_t * dev)
@@ -354,7 +346,6 @@ static void unlink_all_urbs(rtl8150_t * dev)
354 usb_kill_urb(dev->rx_urb); 346 usb_kill_urb(dev->rx_urb);
355 usb_kill_urb(dev->tx_urb); 347 usb_kill_urb(dev->tx_urb);
356 usb_kill_urb(dev->intr_urb); 348 usb_kill_urb(dev->intr_urb);
357 usb_kill_urb(dev->ctrl_urb);
358} 349}
359 350
360static inline struct sk_buff *pull_skb(rtl8150_t *dev) 351static inline struct sk_buff *pull_skb(rtl8150_t *dev)
@@ -629,7 +620,6 @@ static int enable_net_traffic(rtl8150_t * dev)
629 } 620 }
630 /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */ 621 /* RCR bit7=1 attach Rx info at the end; =0 HW CRC (which is broken) */
631 rcr = 0x9e; 622 rcr = 0x9e;
632 dev->rx_creg = cpu_to_le16(rcr);
633 tcr = 0xd8; 623 tcr = 0xd8;
634 cr = 0x0c; 624 cr = 0x0c;
635 if (!(rcr & 0x80)) 625 if (!(rcr & 0x80))
@@ -662,20 +652,22 @@ static void rtl8150_tx_timeout(struct net_device *netdev)
662static void rtl8150_set_multicast(struct net_device *netdev) 652static void rtl8150_set_multicast(struct net_device *netdev)
663{ 653{
664 rtl8150_t *dev = netdev_priv(netdev); 654 rtl8150_t *dev = netdev_priv(netdev);
655 u16 rx_creg = 0x9e;
656
665 netif_stop_queue(netdev); 657 netif_stop_queue(netdev);
666 if (netdev->flags & IFF_PROMISC) { 658 if (netdev->flags & IFF_PROMISC) {
667 dev->rx_creg |= cpu_to_le16(0x0001); 659 rx_creg |= 0x0001;
668 dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name); 660 dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
669 } else if (!netdev_mc_empty(netdev) || 661 } else if (!netdev_mc_empty(netdev) ||
670 (netdev->flags & IFF_ALLMULTI)) { 662 (netdev->flags & IFF_ALLMULTI)) {
671 dev->rx_creg &= cpu_to_le16(0xfffe); 663 rx_creg &= 0xfffe;
672 dev->rx_creg |= cpu_to_le16(0x0002); 664 rx_creg |= 0x0002;
673 dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name); 665 dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
674 } else { 666 } else {
675 /* ~RX_MULTICAST, ~RX_PROMISCUOUS */ 667 /* ~RX_MULTICAST, ~RX_PROMISCUOUS */
676 dev->rx_creg &= cpu_to_le16(0x00fc); 668 rx_creg &= 0x00fc;
677 } 669 }
678 async_set_registers(dev, RCR, 2); 670 async_set_registers(dev, RCR, sizeof(rx_creg), rx_creg);
679 netif_wake_queue(netdev); 671 netif_wake_queue(netdev);
680} 672}
681 673
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index f95cb032394b..06ee82f557d4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1477,7 +1477,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1477 1477
1478 /* usbnet already took usb runtime pm, so have to enable the feature 1478 /* usbnet already took usb runtime pm, so have to enable the feature
1479 * for usb interface, otherwise usb_autopm_get_interface may return 1479 * for usb interface, otherwise usb_autopm_get_interface may return
1480 * failure if USB_SUSPEND(RUNTIME_PM) is enabled. 1480 * failure if RUNTIME_PM is enabled.
1481 */ 1481 */
1482 if (!driver->supports_autosuspend) { 1482 if (!driver->supports_autosuspend) {
1483 driver->supports_autosuspend = 1; 1483 driver->supports_autosuspend = 1;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 3c23fdc27bf0..c9e00387d999 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -28,7 +28,7 @@
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/cpu.h> 29#include <linux/cpu.h>
30 30
31static int napi_weight = 128; 31static int napi_weight = NAPI_POLL_WEIGHT;
32module_param(napi_weight, int, 0444); 32module_param(napi_weight, int, 0444);
33 33
34static bool csum = true, gso = true; 34static bool csum = true, gso = true;
@@ -636,10 +636,11 @@ static int virtnet_open(struct net_device *dev)
636 struct virtnet_info *vi = netdev_priv(dev); 636 struct virtnet_info *vi = netdev_priv(dev);
637 int i; 637 int i;
638 638
639 for (i = 0; i < vi->curr_queue_pairs; i++) { 639 for (i = 0; i < vi->max_queue_pairs; i++) {
640 /* Make sure we have some buffers: if oom use wq. */ 640 if (i < vi->curr_queue_pairs)
641 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL)) 641 /* Make sure we have some buffers: if oom use wq. */
642 schedule_delayed_work(&vi->refill, 0); 642 if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
643 schedule_delayed_work(&vi->refill, 0);
643 virtnet_napi_enable(&vi->rq[i]); 644 virtnet_napi_enable(&vi->rq[i]);
644 } 645 }
645 646
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ba81f3c39a83..3b1d2ee7156b 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -301,7 +301,7 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
301} 301}
302 302
303/* Look up Ethernet address in forwarding table */ 303/* Look up Ethernet address in forwarding table */
304static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan, 304static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
305 const u8 *mac) 305 const u8 *mac)
306 306
307{ 307{
@@ -316,6 +316,18 @@ static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
316 return NULL; 316 return NULL;
317} 317}
318 318
319static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
320 const u8 *mac)
321{
322 struct vxlan_fdb *f;
323
324 f = __vxlan_find_mac(vxlan, mac);
325 if (f)
326 f->used = jiffies;
327
328 return f;
329}
330
319/* Add/update destinations for multicast */ 331/* Add/update destinations for multicast */
320static int vxlan_fdb_append(struct vxlan_fdb *f, 332static int vxlan_fdb_append(struct vxlan_fdb *f,
321 __be32 ip, __be16 port, __u32 vni, __u32 ifindex) 333 __be32 ip, __be16 port, __u32 vni, __u32 ifindex)
@@ -353,7 +365,7 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
353 struct vxlan_fdb *f; 365 struct vxlan_fdb *f;
354 int notify = 0; 366 int notify = 0;
355 367
356 f = vxlan_find_mac(vxlan, mac); 368 f = __vxlan_find_mac(vxlan, mac);
357 if (f) { 369 if (f) {
358 if (flags & NLM_F_EXCL) { 370 if (flags & NLM_F_EXCL) {
359 netdev_dbg(vxlan->dev, 371 netdev_dbg(vxlan->dev,
@@ -563,7 +575,6 @@ static void vxlan_snoop(struct net_device *dev,
563 575
564 f = vxlan_find_mac(vxlan, src_mac); 576 f = vxlan_find_mac(vxlan, src_mac);
565 if (likely(f)) { 577 if (likely(f)) {
566 f->used = jiffies;
567 if (likely(f->remote.remote_ip == src_ip)) 578 if (likely(f->remote.remote_ip == src_ip))
568 return; 579 return;
569 580
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 9b20d9ee2719..7f702fe3ecc2 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2369,6 +2369,9 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2369 int i; 2369 int i;
2370 bool needreset = false; 2370 bool needreset = false;
2371 2371
2372 if (!test_bit(ATH_STAT_STARTED, ah->status))
2373 return;
2374
2372 mutex_lock(&ah->lock); 2375 mutex_lock(&ah->lock);
2373 2376
2374 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) { 2377 for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
@@ -2676,6 +2679,7 @@ done:
2676 mmiowb(); 2679 mmiowb();
2677 mutex_unlock(&ah->lock); 2680 mutex_unlock(&ah->lock);
2678 2681
2682 set_bit(ATH_STAT_STARTED, ah->status);
2679 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work, 2683 ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
2680 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT)); 2684 msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
2681 2685
@@ -2737,6 +2741,7 @@ void ath5k_stop(struct ieee80211_hw *hw)
2737 2741
2738 ath5k_stop_tasklets(ah); 2742 ath5k_stop_tasklets(ah);
2739 2743
2744 clear_bit(ATH_STAT_STARTED, ah->status);
2740 cancel_delayed_work_sync(&ah->tx_complete_work); 2745 cancel_delayed_work_sync(&ah->tx_complete_work);
2741 2746
2742 if (!ath5k_modparam_no_hw_rfkill_switch) 2747 if (!ath5k_modparam_no_hw_rfkill_switch)
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 17507dc8a1e7..f3dc124c60c7 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -17,7 +17,7 @@ config ATH9K_BTCOEX_SUPPORT
17 17
18config ATH9K 18config ATH9K
19 tristate "Atheros 802.11n wireless cards support" 19 tristate "Atheros 802.11n wireless cards support"
20 depends on MAC80211 20 depends on MAC80211 && HAS_DMA
21 select ATH9K_HW 21 select ATH9K_HW
22 select MAC80211_LEDS 22 select MAC80211_LEDS
23 select LEDS_CLASS 23 select LEDS_CLASS
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 639ba7d18ea4..6988e1d081f2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -965,7 +965,7 @@ static void ar9003_hw_do_manual_peak_cal(struct ath_hw *ah,
965{ 965{
966 int i; 966 int i;
967 967
968 if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah)) 968 if (!AR_SREV_9462(ah) && !AR_SREV_9565(ah) && !AR_SREV_9485(ah))
969 return; 969 return;
970 970
971 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 971 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
index 712f415b8c08..88ff1d7b53ab 100644
--- a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -1020,7 +1020,7 @@ static const u32 ar9485_1_1_baseband_postamble[][5] = {
1020 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0}, 1020 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
1021 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1021 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1022 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1022 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1023 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 1023 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
1024 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982}, 1024 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071982, 0x00071982},
1025 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 1025 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
1026 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1026 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
index 0c2ac0c6dc89..e85a8b076c22 100644
--- a/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9565_1p0_initvals.h
@@ -233,9 +233,9 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
233 {0x00009d10, 0x01834061}, 233 {0x00009d10, 0x01834061},
234 {0x00009d14, 0x00c00400}, 234 {0x00009d14, 0x00c00400},
235 {0x00009d18, 0x00000000}, 235 {0x00009d18, 0x00000000},
236 {0x00009e08, 0x0078230c}, 236 {0x00009e08, 0x0038230c},
237 {0x00009e24, 0x990bb515}, 237 {0x00009e24, 0x9907b515},
238 {0x00009e28, 0x126f0000}, 238 {0x00009e28, 0x126f0600},
239 {0x00009e30, 0x06336f77}, 239 {0x00009e30, 0x06336f77},
240 {0x00009e34, 0x6af6532f}, 240 {0x00009e34, 0x6af6532f},
241 {0x00009e38, 0x0cc80c00}, 241 {0x00009e38, 0x0cc80c00},
@@ -337,7 +337,7 @@ static const u32 ar9565_1p0_baseband_core[][2] = {
337 337
338static const u32 ar9565_1p0_baseband_postamble[][5] = { 338static const u32 ar9565_1p0_baseband_postamble[][5] = {
339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 339 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a800d}, 340 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8009},
341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae}, 341 {0x00009820, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a01ae},
342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da}, 342 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x63c640da},
343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81}, 343 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x09143c81},
@@ -345,9 +345,9 @@ static const u32 ar9565_1p0_baseband_postamble[][5] = {
345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 345 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4}, 346 {0x00009c00, 0x000000c4, 0x000000c4, 0x000000c4, 0x000000c4},
347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0}, 347 {0x00009e00, 0x0372111a, 0x0372111a, 0x037216a0, 0x037216a0},
348 {0x00009e04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 348 {0x00009e04, 0x00802020, 0x00802020, 0x00142020, 0x00142020},
349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000d8}, 349 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec86d2e}, 350 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e},
351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e}, 351 {0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3379605e, 0x33795d5e},
352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 352 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 353 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
@@ -450,6 +450,8 @@ static const u32 ar9565_1p0_soc_postamble[][5] = {
450 450
451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = { 451static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
452 /* Addr allmodes */ 452 /* Addr allmodes */
453 {0x00004050, 0x00300300},
454 {0x0000406c, 0x00100000},
453 {0x0000a000, 0x00010000}, 455 {0x0000a000, 0x00010000},
454 {0x0000a004, 0x00030002}, 456 {0x0000a004, 0x00030002},
455 {0x0000a008, 0x00050004}, 457 {0x0000a008, 0x00050004},
@@ -498,27 +500,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
498 {0x0000a0b4, 0x00000000}, 500 {0x0000a0b4, 0x00000000},
499 {0x0000a0b8, 0x00000000}, 501 {0x0000a0b8, 0x00000000},
500 {0x0000a0bc, 0x00000000}, 502 {0x0000a0bc, 0x00000000},
501 {0x0000a0c0, 0x001f0000}, 503 {0x0000a0c0, 0x00bf00a0},
502 {0x0000a0c4, 0x01000101}, 504 {0x0000a0c4, 0x11a011a1},
503 {0x0000a0c8, 0x011e011f}, 505 {0x0000a0c8, 0x11be11bf},
504 {0x0000a0cc, 0x011c011d}, 506 {0x0000a0cc, 0x11bc11bd},
505 {0x0000a0d0, 0x02030204}, 507 {0x0000a0d0, 0x22632264},
506 {0x0000a0d4, 0x02010202}, 508 {0x0000a0d4, 0x22612262},
507 {0x0000a0d8, 0x021f0200}, 509 {0x0000a0d8, 0x227f2260},
508 {0x0000a0dc, 0x0302021e}, 510 {0x0000a0dc, 0x4322227e},
509 {0x0000a0e0, 0x03000301}, 511 {0x0000a0e0, 0x43204321},
510 {0x0000a0e4, 0x031e031f}, 512 {0x0000a0e4, 0x433e433f},
511 {0x0000a0e8, 0x0402031d}, 513 {0x0000a0e8, 0x4462433d},
512 {0x0000a0ec, 0x04000401}, 514 {0x0000a0ec, 0x44604461},
513 {0x0000a0f0, 0x041e041f}, 515 {0x0000a0f0, 0x447e447f},
514 {0x0000a0f4, 0x0502041d}, 516 {0x0000a0f4, 0x5582447d},
515 {0x0000a0f8, 0x05000501}, 517 {0x0000a0f8, 0x55805581},
516 {0x0000a0fc, 0x051e051f}, 518 {0x0000a0fc, 0x559e559f},
517 {0x0000a100, 0x06010602}, 519 {0x0000a100, 0x66816682},
518 {0x0000a104, 0x061f0600}, 520 {0x0000a104, 0x669f6680},
519 {0x0000a108, 0x061d061e}, 521 {0x0000a108, 0x669d669e},
520 {0x0000a10c, 0x07020703}, 522 {0x0000a10c, 0x77627763},
521 {0x0000a110, 0x07000701}, 523 {0x0000a110, 0x77607761},
522 {0x0000a114, 0x00000000}, 524 {0x0000a114, 0x00000000},
523 {0x0000a118, 0x00000000}, 525 {0x0000a118, 0x00000000},
524 {0x0000a11c, 0x00000000}, 526 {0x0000a11c, 0x00000000},
@@ -530,27 +532,27 @@ static const u32 ar9565_1p0_Common_rx_gain_table[][2] = {
530 {0x0000a134, 0x00000000}, 532 {0x0000a134, 0x00000000},
531 {0x0000a138, 0x00000000}, 533 {0x0000a138, 0x00000000},
532 {0x0000a13c, 0x00000000}, 534 {0x0000a13c, 0x00000000},
533 {0x0000a140, 0x001f0000}, 535 {0x0000a140, 0x00bf00a0},
534 {0x0000a144, 0x01000101}, 536 {0x0000a144, 0x11a011a1},
535 {0x0000a148, 0x011e011f}, 537 {0x0000a148, 0x11be11bf},
536 {0x0000a14c, 0x011c011d}, 538 {0x0000a14c, 0x11bc11bd},
537 {0x0000a150, 0x02030204}, 539 {0x0000a150, 0x22632264},
538 {0x0000a154, 0x02010202}, 540 {0x0000a154, 0x22612262},
539 {0x0000a158, 0x021f0200}, 541 {0x0000a158, 0x227f2260},
540 {0x0000a15c, 0x0302021e}, 542 {0x0000a15c, 0x4322227e},
541 {0x0000a160, 0x03000301}, 543 {0x0000a160, 0x43204321},
542 {0x0000a164, 0x031e031f}, 544 {0x0000a164, 0x433e433f},
543 {0x0000a168, 0x0402031d}, 545 {0x0000a168, 0x4462433d},
544 {0x0000a16c, 0x04000401}, 546 {0x0000a16c, 0x44604461},
545 {0x0000a170, 0x041e041f}, 547 {0x0000a170, 0x447e447f},
546 {0x0000a174, 0x0502041d}, 548 {0x0000a174, 0x5582447d},
547 {0x0000a178, 0x05000501}, 549 {0x0000a178, 0x55805581},
548 {0x0000a17c, 0x051e051f}, 550 {0x0000a17c, 0x559e559f},
549 {0x0000a180, 0x06010602}, 551 {0x0000a180, 0x66816682},
550 {0x0000a184, 0x061f0600}, 552 {0x0000a184, 0x669f6680},
551 {0x0000a188, 0x061d061e}, 553 {0x0000a188, 0x669d669e},
552 {0x0000a18c, 0x07020703}, 554 {0x0000a18c, 0x77e677e7},
553 {0x0000a190, 0x07000701}, 555 {0x0000a190, 0x77e477e5},
554 {0x0000a194, 0x00000000}, 556 {0x0000a194, 0x00000000},
555 {0x0000a198, 0x00000000}, 557 {0x0000a198, 0x00000000},
556 {0x0000a19c, 0x00000000}, 558 {0x0000a19c, 0x00000000},
@@ -770,7 +772,7 @@ static const u32 ar9565_1p0_Modes_lowest_ob_db_tx_gain_table[][5] = {
770 772
771static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = { 773static const u32 ar9565_1p0_pciephy_clkreq_disable_L1[][2] = {
772 /* Addr allmodes */ 774 /* Addr allmodes */
773 {0x00018c00, 0x18213ede}, 775 {0x00018c00, 0x18212ede},
774 {0x00018c04, 0x000801d8}, 776 {0x00018c04, 0x000801d8},
775 {0x00018c08, 0x0003780c}, 777 {0x00018c08, 0x0003780c},
776}; 778};
@@ -889,8 +891,8 @@ static const u32 ar9565_1p0_common_wo_xlna_rx_gain_table[][2] = {
889 {0x0000a180, 0x66816682}, 891 {0x0000a180, 0x66816682},
890 {0x0000a184, 0x669f6680}, 892 {0x0000a184, 0x669f6680},
891 {0x0000a188, 0x669d669e}, 893 {0x0000a188, 0x669d669e},
892 {0x0000a18c, 0x77627763}, 894 {0x0000a18c, 0x77e677e7},
893 {0x0000a190, 0x77607761}, 895 {0x0000a190, 0x77e477e5},
894 {0x0000a194, 0x00000000}, 896 {0x0000a194, 0x00000000},
895 {0x0000a198, 0x00000000}, 897 {0x0000a198, 0x00000000},
896 {0x0000a19c, 0x00000000}, 898 {0x0000a19c, 0x00000000},
@@ -1114,7 +1116,7 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1114 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, 1116 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1115 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, 1117 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1116 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, 1118 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1117 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1119 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
1118 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 1120 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1119 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 1121 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1120 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004}, 1122 {0x0000a508, 0x0b022220, 0x0b022220, 0x08000004, 0x08000004},
@@ -1140,13 +1142,13 @@ static const u32 ar9565_1p0_modes_high_ob_db_tx_gain_table[][5] = {
1140 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5}, 1142 {0x0000a558, 0x69027f56, 0x69027f56, 0x53001ce5, 0x53001ce5},
1141 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9}, 1143 {0x0000a55c, 0x6d029f56, 0x6d029f56, 0x57001ce9, 0x57001ce9},
1142 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb}, 1144 {0x0000a560, 0x73049f56, 0x73049f56, 0x5b001ceb, 0x5b001ceb},
1143 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1145 {0x0000a564, 0x7804ff56, 0x7804ff56, 0x60001cf0, 0x60001cf0},
1144 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1146 {0x0000a568, 0x7804ff56, 0x7804ff56, 0x61001cf1, 0x61001cf1},
1145 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1147 {0x0000a56c, 0x7804ff56, 0x7804ff56, 0x62001cf2, 0x62001cf2},
1146 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1148 {0x0000a570, 0x7804ff56, 0x7804ff56, 0x63001cf3, 0x63001cf3},
1147 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1149 {0x0000a574, 0x7804ff56, 0x7804ff56, 0x64001cf4, 0x64001cf4},
1148 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1150 {0x0000a578, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
1149 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x5d001eec, 0x5d001eec}, 1151 {0x0000a57c, 0x7804ff56, 0x7804ff56, 0x66001ff6, 0x66001ff6},
1150 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1152 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1151 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1153 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1152 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1154 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -1174,7 +1176,7 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1174 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84}, 1176 {0x0000a2e0, 0xffecec00, 0xffecec00, 0xfd339c84, 0xfd339c84},
1175 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000}, 1177 {0x0000a2e4, 0xfc0f0000, 0xfc0f0000, 0xfec3e000, 0xfec3e000},
1176 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000}, 1178 {0x0000a2e8, 0xfc100000, 0xfc100000, 0xfffc0000, 0xfffc0000},
1177 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 1179 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050df, 0x000050df},
1178 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000}, 1180 {0x0000a500, 0x00002220, 0x00002220, 0x00000000, 0x00000000},
1179 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002}, 1181 {0x0000a504, 0x06002223, 0x06002223, 0x04000002, 0x04000002},
1180 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004}, 1182 {0x0000a508, 0x0a022220, 0x0a022220, 0x08000004, 0x08000004},
@@ -1200,13 +1202,13 @@ static const u32 ar9565_1p0_modes_high_power_tx_gain_table[][5] = {
1200 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5}, 1202 {0x0000a558, 0x66027f56, 0x66027f56, 0x4c001ce5, 0x4c001ce5},
1201 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9}, 1203 {0x0000a55c, 0x6a029f56, 0x6a029f56, 0x50001ce9, 0x50001ce9},
1202 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb}, 1204 {0x0000a560, 0x70049f56, 0x70049f56, 0x54001ceb, 0x54001ceb},
1203 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1205 {0x0000a564, 0x7504ff56, 0x7504ff56, 0x59001cf0, 0x59001cf0},
1204 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1206 {0x0000a568, 0x7504ff56, 0x7504ff56, 0x5a001cf1, 0x5a001cf1},
1205 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1207 {0x0000a56c, 0x7504ff56, 0x7504ff56, 0x5b001cf2, 0x5b001cf2},
1206 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1208 {0x0000a570, 0x7504ff56, 0x7504ff56, 0x5c001cf3, 0x5c001cf3},
1207 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1209 {0x0000a574, 0x7504ff56, 0x7504ff56, 0x5d001cf4, 0x5d001cf4},
1208 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1210 {0x0000a578, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
1209 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x56001eec, 0x56001eec}, 1211 {0x0000a57c, 0x7504ff56, 0x7504ff56, 0x5f001ff6, 0x5f001ff6},
1210 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1212 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1211 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1213 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
1212 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 1214 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 8a1888d02070..366002f266f8 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -254,6 +254,7 @@ struct ath_atx_tid {
254 int sched; 254 int sched;
255 int paused; 255 int paused;
256 u8 state; 256 u8 state;
257 bool stop_cb;
257}; 258};
258 259
259struct ath_node { 260struct ath_node {
@@ -351,7 +352,8 @@ void ath_tx_tasklet(struct ath_softc *sc);
351void ath_tx_edma_tasklet(struct ath_softc *sc); 352void ath_tx_edma_tasklet(struct ath_softc *sc);
352int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 353int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
353 u16 tid, u16 *ssn); 354 u16 tid, u16 *ssn);
354void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 355bool ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid,
356 bool flush);
355void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 357void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
356 358
357void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an); 359void ath_tx_aggr_wakeup(struct ath_softc *sc, struct ath_node *an);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index e6307b86363a..b37eb8d38811 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -2008,6 +2008,14 @@ void ath9k_get_et_stats(struct ieee80211_hw *hw,
2008 WARN_ON(i != ATH9K_SSTATS_LEN); 2008 WARN_ON(i != ATH9K_SSTATS_LEN);
2009} 2009}
2010 2010
2011void ath9k_deinit_debug(struct ath_softc *sc)
2012{
2013 if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
2014 relay_close(sc->rfs_chan_spec_scan);
2015 sc->rfs_chan_spec_scan = NULL;
2016 }
2017}
2018
2011int ath9k_init_debug(struct ath_hw *ah) 2019int ath9k_init_debug(struct ath_hw *ah)
2012{ 2020{
2013 struct ath_common *common = ath9k_hw_common(ah); 2021 struct ath_common *common = ath9k_hw_common(ah);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 794a7ec83a24..9d49aab8b989 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -304,6 +304,7 @@ struct ath9k_debug {
304}; 304};
305 305
306int ath9k_init_debug(struct ath_hw *ah); 306int ath9k_init_debug(struct ath_hw *ah);
307void ath9k_deinit_debug(struct ath_softc *sc);
307 308
308void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 309void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
309void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf, 310void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
@@ -339,6 +340,10 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
339 return 0; 340 return 0;
340} 341}
341 342
343static inline void ath9k_deinit_debug(struct ath_softc *sc)
344{
345}
346
342static inline void ath_debug_stat_interrupt(struct ath_softc *sc, 347static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
343 enum ath9k_int status) 348 enum ath9k_int status)
344{ 349{
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 0237b2868961..aba415103f94 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -906,7 +906,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
906 if (!ath_is_world_regd(reg)) { 906 if (!ath_is_world_regd(reg)) {
907 error = regulatory_hint(hw->wiphy, reg->alpha2); 907 error = regulatory_hint(hw->wiphy, reg->alpha2);
908 if (error) 908 if (error)
909 goto unregister; 909 goto debug_cleanup;
910 } 910 }
911 911
912 ath_init_leds(sc); 912 ath_init_leds(sc);
@@ -914,6 +914,8 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc,
914 914
915 return 0; 915 return 0;
916 916
917debug_cleanup:
918 ath9k_deinit_debug(sc);
917unregister: 919unregister:
918 ieee80211_unregister_hw(hw); 920 ieee80211_unregister_hw(hw);
919rx_cleanup: 921rx_cleanup:
@@ -942,11 +944,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
942 sc->dfs_detector->exit(sc->dfs_detector); 944 sc->dfs_detector->exit(sc->dfs_detector);
943 945
944 ath9k_eeprom_release(sc); 946 ath9k_eeprom_release(sc);
945
946 if (config_enabled(CONFIG_ATH9K_DEBUGFS) && sc->rfs_chan_spec_scan) {
947 relay_close(sc->rfs_chan_spec_scan);
948 sc->rfs_chan_spec_scan = NULL;
949 }
950} 947}
951 948
952void ath9k_deinit_device(struct ath_softc *sc) 949void ath9k_deinit_device(struct ath_softc *sc)
@@ -960,6 +957,7 @@ void ath9k_deinit_device(struct ath_softc *sc)
960 957
961 ath9k_ps_restore(sc); 958 ath9k_ps_restore(sc);
962 959
960 ath9k_deinit_debug(sc);
963 ieee80211_unregister_hw(hw); 961 ieee80211_unregister_hw(hw);
964 ath_rx_cleanup(sc); 962 ath_rx_cleanup(sc);
965 ath9k_deinit_softc(sc); 963 ath9k_deinit_softc(sc);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6963862a1872..2382d1262e7f 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -227,13 +227,13 @@ static bool ath_complete_reset(struct ath_softc *sc, bool start)
227 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags)) 227 if (!test_bit(SC_OP_BEACONS, &sc->sc_flags))
228 goto work; 228 goto work;
229 229
230 ath9k_set_beacon(sc);
231
232 if (ah->opmode == NL80211_IFTYPE_STATION && 230 if (ah->opmode == NL80211_IFTYPE_STATION &&
233 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) { 231 test_bit(SC_OP_PRIM_STA_VIF, &sc->sc_flags)) {
234 spin_lock_irqsave(&sc->sc_pm_lock, flags); 232 spin_lock_irqsave(&sc->sc_pm_lock, flags);
235 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON; 233 sc->ps_flags |= PS_BEACON_SYNC | PS_WAIT_FOR_BEACON;
236 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 234 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
235 } else {
236 ath9k_set_beacon(sc);
237 } 237 }
238 work: 238 work:
239 ath_restart_work(sc); 239 ath_restart_work(sc);
@@ -1332,6 +1332,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1332 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1332 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1333 struct ath_node *an = (struct ath_node *) sta->drv_priv; 1333 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1334 struct ieee80211_key_conf ps_key = { }; 1334 struct ieee80211_key_conf ps_key = { };
1335 int key;
1335 1336
1336 ath_node_attach(sc, sta, vif); 1337 ath_node_attach(sc, sta, vif);
1337 1338
@@ -1339,7 +1340,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw,
1339 vif->type != NL80211_IFTYPE_AP_VLAN) 1340 vif->type != NL80211_IFTYPE_AP_VLAN)
1340 return 0; 1341 return 0;
1341 1342
1342 an->ps_key = ath_key_config(common, vif, sta, &ps_key); 1343 key = ath_key_config(common, vif, sta, &ps_key);
1344 if (key > 0)
1345 an->ps_key = key;
1343 1346
1344 return 0; 1347 return 0;
1345} 1348}
@@ -1356,6 +1359,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
1356 return; 1359 return;
1357 1360
1358 ath_key_delete(common, &ps_key); 1361 ath_key_delete(common, &ps_key);
1362 an->ps_key = 0;
1359} 1363}
1360 1364
1361static int ath9k_sta_remove(struct ieee80211_hw *hw, 1365static int ath9k_sta_remove(struct ieee80211_hw *hw,
@@ -1683,6 +1687,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1683 u16 tid, u16 *ssn, u8 buf_size) 1687 u16 tid, u16 *ssn, u8 buf_size)
1684{ 1688{
1685 struct ath_softc *sc = hw->priv; 1689 struct ath_softc *sc = hw->priv;
1690 bool flush = false;
1686 int ret = 0; 1691 int ret = 0;
1687 1692
1688 local_bh_disable(); 1693 local_bh_disable();
@@ -1699,12 +1704,13 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
1699 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1704 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1700 ath9k_ps_restore(sc); 1705 ath9k_ps_restore(sc);
1701 break; 1706 break;
1702 case IEEE80211_AMPDU_TX_STOP_CONT:
1703 case IEEE80211_AMPDU_TX_STOP_FLUSH: 1707 case IEEE80211_AMPDU_TX_STOP_FLUSH:
1704 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 1708 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
1709 flush = true;
1710 case IEEE80211_AMPDU_TX_STOP_CONT:
1705 ath9k_ps_wakeup(sc); 1711 ath9k_ps_wakeup(sc);
1706 ath_tx_aggr_stop(sc, sta, tid); 1712 if (ath_tx_aggr_stop(sc, sta, tid, flush))
1707 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 1713 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1708 ath9k_ps_restore(sc); 1714 ath9k_ps_restore(sc);
1709 break; 1715 break;
1710 case IEEE80211_AMPDU_TX_OPERATIONAL: 1716 case IEEE80211_AMPDU_TX_OPERATIONAL:
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index eab0fcb7ded6..14bb3354ea64 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -164,7 +164,20 @@ static void ath_set_rates(struct ieee80211_vif *vif, struct ieee80211_sta *sta,
164 ARRAY_SIZE(bf->rates)); 164 ARRAY_SIZE(bf->rates));
165} 165}
166 166
167static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 167static void ath_tx_clear_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
168{
169 tid->state &= ~AGGR_ADDBA_COMPLETE;
170 tid->state &= ~AGGR_CLEANUP;
171 if (!tid->stop_cb)
172 return;
173
174 ieee80211_start_tx_ba_cb_irqsafe(tid->an->vif, tid->an->sta->addr,
175 tid->tidno);
176 tid->stop_cb = false;
177}
178
179static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid,
180 bool flush_packets)
168{ 181{
169 struct ath_txq *txq = tid->ac->txq; 182 struct ath_txq *txq = tid->ac->txq;
170 struct sk_buff *skb; 183 struct sk_buff *skb;
@@ -181,16 +194,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
181 while ((skb = __skb_dequeue(&tid->buf_q))) { 194 while ((skb = __skb_dequeue(&tid->buf_q))) {
182 fi = get_frame_info(skb); 195 fi = get_frame_info(skb);
183 bf = fi->bf; 196 bf = fi->bf;
197 if (!bf && !flush_packets)
198 bf = ath_tx_setup_buffer(sc, txq, tid, skb);
184 199
185 if (!bf) { 200 if (!bf) {
186 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 201 ieee80211_free_txskb(sc->hw, skb);
187 if (!bf) { 202 continue;
188 ieee80211_free_txskb(sc->hw, skb);
189 continue;
190 }
191 } 203 }
192 204
193 if (fi->retries) { 205 if (fi->retries || flush_packets) {
194 list_add_tail(&bf->list, &bf_head); 206 list_add_tail(&bf->list, &bf_head);
195 ath_tx_update_baw(sc, tid, bf->bf_state.seqno); 207 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
196 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 208 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
@@ -201,12 +213,10 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
201 } 213 }
202 } 214 }
203 215
204 if (tid->baw_head == tid->baw_tail) { 216 if (tid->baw_head == tid->baw_tail)
205 tid->state &= ~AGGR_ADDBA_COMPLETE; 217 ath_tx_clear_tid(sc, tid);
206 tid->state &= ~AGGR_CLEANUP;
207 }
208 218
209 if (sendbar) { 219 if (sendbar && !flush_packets) {
210 ath_txq_unlock(sc, txq); 220 ath_txq_unlock(sc, txq);
211 ath_send_bar(tid, tid->seq_start); 221 ath_send_bar(tid, tid->seq_start);
212 ath_txq_lock(sc, txq); 222 ath_txq_lock(sc, txq);
@@ -277,9 +287,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
277 287
278 list_add_tail(&bf->list, &bf_head); 288 list_add_tail(&bf->list, &bf_head);
279 289
280 if (fi->retries) 290 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
281 ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
282
283 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0); 291 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
284 } 292 }
285 293
@@ -602,7 +610,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
602 } 610 }
603 611
604 if (tid->state & AGGR_CLEANUP) 612 if (tid->state & AGGR_CLEANUP)
605 ath_tx_flush_tid(sc, tid); 613 ath_tx_flush_tid(sc, tid, false);
606 614
607 rcu_read_unlock(); 615 rcu_read_unlock();
608 616
@@ -620,6 +628,7 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
620 struct ath_tx_status *ts, struct ath_buf *bf, 628 struct ath_tx_status *ts, struct ath_buf *bf,
621 struct list_head *bf_head) 629 struct list_head *bf_head)
622{ 630{
631 struct ieee80211_tx_info *info;
623 bool txok, flush; 632 bool txok, flush;
624 633
625 txok = !(ts->ts_status & ATH9K_TXERR_MASK); 634 txok = !(ts->ts_status & ATH9K_TXERR_MASK);
@@ -631,8 +640,12 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
631 txq->axq_ampdu_depth--; 640 txq->axq_ampdu_depth--;
632 641
633 if (!bf_isampdu(bf)) { 642 if (!bf_isampdu(bf)) {
634 if (!flush) 643 if (!flush) {
644 info = IEEE80211_SKB_CB(bf->bf_mpdu);
645 memcpy(info->control.rates, bf->rates,
646 sizeof(info->control.rates));
635 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); 647 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
648 }
636 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok); 649 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
637 } else 650 } else
638 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok); 651 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok);
@@ -676,7 +689,7 @@ static u32 ath_lookup_rate(struct ath_softc *sc, struct ath_buf *bf,
676 689
677 skb = bf->bf_mpdu; 690 skb = bf->bf_mpdu;
678 tx_info = IEEE80211_SKB_CB(skb); 691 tx_info = IEEE80211_SKB_CB(skb);
679 rates = tx_info->control.rates; 692 rates = bf->rates;
680 693
681 /* 694 /*
682 * Find the lowest frame length among the rate series that will have a 695 * Find the lowest frame length among the rate series that will have a
@@ -1256,18 +1269,23 @@ int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
1256 return 0; 1269 return 0;
1257} 1270}
1258 1271
1259void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid) 1272bool ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid,
1273 bool flush)
1260{ 1274{
1261 struct ath_node *an = (struct ath_node *)sta->drv_priv; 1275 struct ath_node *an = (struct ath_node *)sta->drv_priv;
1262 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 1276 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
1263 struct ath_txq *txq = txtid->ac->txq; 1277 struct ath_txq *txq = txtid->ac->txq;
1278 bool ret = !flush;
1279
1280 if (flush)
1281 txtid->stop_cb = false;
1264 1282
1265 if (txtid->state & AGGR_CLEANUP) 1283 if (txtid->state & AGGR_CLEANUP)
1266 return; 1284 return false;
1267 1285
1268 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) { 1286 if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
1269 txtid->state &= ~AGGR_ADDBA_PROGRESS; 1287 txtid->state &= ~AGGR_ADDBA_PROGRESS;
1270 return; 1288 return ret;
1271 } 1289 }
1272 1290
1273 ath_txq_lock(sc, txq); 1291 ath_txq_lock(sc, txq);
@@ -1279,13 +1297,17 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
1279 * TID can only be reused after all in-progress subframes have been 1297 * TID can only be reused after all in-progress subframes have been
1280 * completed. 1298 * completed.
1281 */ 1299 */
1282 if (txtid->baw_head != txtid->baw_tail) 1300 if (txtid->baw_head != txtid->baw_tail) {
1283 txtid->state |= AGGR_CLEANUP; 1301 txtid->state |= AGGR_CLEANUP;
1284 else 1302 ret = false;
1303 txtid->stop_cb = !flush;
1304 } else {
1285 txtid->state &= ~AGGR_ADDBA_COMPLETE; 1305 txtid->state &= ~AGGR_ADDBA_COMPLETE;
1306 }
1286 1307
1287 ath_tx_flush_tid(sc, txtid); 1308 ath_tx_flush_tid(sc, txtid, flush);
1288 ath_txq_unlock_complete(sc, txq); 1309 ath_txq_unlock_complete(sc, txq);
1310 return ret;
1289} 1311}
1290 1312
1291void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc, 1313void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
@@ -2415,6 +2437,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2415 tid->ac = &an->ac[acno]; 2437 tid->ac = &an->ac[acno];
2416 tid->state &= ~AGGR_ADDBA_COMPLETE; 2438 tid->state &= ~AGGR_ADDBA_COMPLETE;
2417 tid->state &= ~AGGR_ADDBA_PROGRESS; 2439 tid->state &= ~AGGR_ADDBA_PROGRESS;
2440 tid->stop_cb = false;
2418 } 2441 }
2419 2442
2420 for (acno = 0, ac = &an->ac[acno]; 2443 for (acno = 0, ac = &an->ac[acno];
@@ -2451,8 +2474,7 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2451 } 2474 }
2452 2475
2453 ath_tid_drain(sc, txq, tid); 2476 ath_tid_drain(sc, txq, tid);
2454 tid->state &= ~AGGR_ADDBA_COMPLETE; 2477 ath_tx_clear_tid(sc, tid);
2455 tid->state &= ~AGGR_CLEANUP;
2456 2478
2457 ath_txq_unlock(sc, txq); 2479 ath_txq_unlock(sc, txq);
2458 } 2480 }
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 523355b87659..f7c70b3a6ea9 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1728,6 +1728,25 @@ drop_recycle_buffer:
1728 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); 1728 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1729} 1729}
1730 1730
1731void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
1732{
1733 int current_slot, previous_slot;
1734
1735 B43_WARN_ON(ring->tx);
1736
1737 /* Device has filled all buffers, drop all packets and let TCP
1738 * decrease speed.
1739 * Decrement RX index by one will let the device to see all slots
1740 * as free again
1741 */
1742 /*
1743 *TODO: How to increase rx_drop in mac80211?
1744 */
1745 current_slot = ring->ops->get_current_rxslot(ring);
1746 previous_slot = prev_slot(ring, current_slot);
1747 ring->ops->set_current_rxslot(ring, previous_slot);
1748}
1749
1731void b43_dma_rx(struct b43_dmaring *ring) 1750void b43_dma_rx(struct b43_dmaring *ring)
1732{ 1751{
1733 const struct b43_dma_ops *ops = ring->ops; 1752 const struct b43_dma_ops *ops = ring->ops;
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index 9fdd1983079c..df8c8cdcbdb5 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -9,7 +9,7 @@
9/* DMA-Interrupt reasons. */ 9/* DMA-Interrupt reasons. */
10#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \ 10#define B43_DMAIRQ_FATALMASK ((1 << 10) | (1 << 11) | (1 << 12) \
11 | (1 << 14) | (1 << 15)) 11 | (1 << 14) | (1 << 15))
12#define B43_DMAIRQ_NONFATALMASK (1 << 13) 12#define B43_DMAIRQ_RDESC_UFLOW (1 << 13)
13#define B43_DMAIRQ_RX_DONE (1 << 16) 13#define B43_DMAIRQ_RX_DONE (1 << 16)
14 14
15/*** 32-bit DMA Engine. ***/ 15/*** 32-bit DMA Engine. ***/
@@ -295,6 +295,8 @@ int b43_dma_tx(struct b43_wldev *dev,
295void b43_dma_handle_txstatus(struct b43_wldev *dev, 295void b43_dma_handle_txstatus(struct b43_wldev *dev,
296 const struct b43_txstatus *status); 296 const struct b43_txstatus *status);
297 297
298void b43_dma_handle_rx_overflow(struct b43_dmaring *ring);
299
298void b43_dma_rx(struct b43_dmaring *ring); 300void b43_dma_rx(struct b43_dmaring *ring);
299 301
300void b43_dma_direct_fifo_rx(struct b43_wldev *dev, 302void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index d377f77d30b5..6dd07e2ec595 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -1902,30 +1902,18 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1902 } 1902 }
1903 } 1903 }
1904 1904
1905 if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK | 1905 if (unlikely(merged_dma_reason & (B43_DMAIRQ_FATALMASK))) {
1906 B43_DMAIRQ_NONFATALMASK))) { 1906 b43err(dev->wl,
1907 if (merged_dma_reason & B43_DMAIRQ_FATALMASK) { 1907 "Fatal DMA error: 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X, 0x%08X\n",
1908 b43err(dev->wl, "Fatal DMA error: " 1908 dma_reason[0], dma_reason[1],
1909 "0x%08X, 0x%08X, 0x%08X, " 1909 dma_reason[2], dma_reason[3],
1910 "0x%08X, 0x%08X, 0x%08X\n", 1910 dma_reason[4], dma_reason[5]);
1911 dma_reason[0], dma_reason[1], 1911 b43err(dev->wl, "This device does not support DMA "
1912 dma_reason[2], dma_reason[3],
1913 dma_reason[4], dma_reason[5]);
1914 b43err(dev->wl, "This device does not support DMA "
1915 "on your system. It will now be switched to PIO.\n"); 1912 "on your system. It will now be switched to PIO.\n");
1916 /* Fall back to PIO transfers if we get fatal DMA errors! */ 1913 /* Fall back to PIO transfers if we get fatal DMA errors! */
1917 dev->use_pio = true; 1914 dev->use_pio = true;
1918 b43_controller_restart(dev, "DMA error"); 1915 b43_controller_restart(dev, "DMA error");
1919 return; 1916 return;
1920 }
1921 if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
1922 b43err(dev->wl, "DMA error: "
1923 "0x%08X, 0x%08X, 0x%08X, "
1924 "0x%08X, 0x%08X, 0x%08X\n",
1925 dma_reason[0], dma_reason[1],
1926 dma_reason[2], dma_reason[3],
1927 dma_reason[4], dma_reason[5]);
1928 }
1929 } 1917 }
1930 1918
1931 if (unlikely(reason & B43_IRQ_UCODE_DEBUG)) 1919 if (unlikely(reason & B43_IRQ_UCODE_DEBUG))
@@ -1944,6 +1932,11 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
1944 handle_irq_noise(dev); 1932 handle_irq_noise(dev);
1945 1933
1946 /* Check the DMA reason registers for received data. */ 1934 /* Check the DMA reason registers for received data. */
1935 if (dma_reason[0] & B43_DMAIRQ_RDESC_UFLOW) {
1936 if (B43_DEBUG)
1937 b43warn(dev->wl, "RX descriptor underrun\n");
1938 b43_dma_handle_rx_overflow(dev->dma.rx_ring);
1939 }
1947 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) { 1940 if (dma_reason[0] & B43_DMAIRQ_RX_DONE) {
1948 if (b43_using_pio_transfers(dev)) 1941 if (b43_using_pio_transfers(dev))
1949 b43_pio_rx(dev->pio.rx_queue); 1942 b43_pio_rx(dev->pio.rx_queue);
@@ -2001,7 +1994,7 @@ static irqreturn_t b43_do_interrupt(struct b43_wldev *dev)
2001 return IRQ_NONE; 1994 return IRQ_NONE;
2002 1995
2003 dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON) 1996 dev->dma_reason[0] = b43_read32(dev, B43_MMIO_DMA0_REASON)
2004 & 0x0001DC00; 1997 & 0x0001FC00;
2005 dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON) 1998 dev->dma_reason[1] = b43_read32(dev, B43_MMIO_DMA1_REASON)
2006 & 0x0000DC00; 1999 & 0x0000DC00;
2007 dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON) 2000 dev->dma_reason[2] = b43_read32(dev, B43_MMIO_DMA2_REASON)
@@ -3130,7 +3123,7 @@ static int b43_chip_init(struct b43_wldev *dev)
3130 b43_write32(dev, 0x018C, 0x02000000); 3123 b43_write32(dev, 0x018C, 0x02000000);
3131 } 3124 }
3132 b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000); 3125 b43_write32(dev, B43_MMIO_GEN_IRQ_REASON, 0x00004000);
3133 b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001DC00); 3126 b43_write32(dev, B43_MMIO_DMA0_IRQ_MASK, 0x0001FC00);
3134 b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00); 3127 b43_write32(dev, B43_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
3135 b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00); 3128 b43_write32(dev, B43_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
3136 b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00); 3129 b43_write32(dev, B43_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 6d758f285352..761f501959a9 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -4140,6 +4140,10 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
4140 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) | 4140 .types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
4141 BIT(NL80211_IFTYPE_P2P_GO) 4141 BIT(NL80211_IFTYPE_P2P_GO)
4142 }, 4142 },
4143 {
4144 .max = 1,
4145 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
4146 }
4143}; 4147};
4144static const struct ieee80211_iface_combination brcmf_iface_combos[] = { 4148static const struct ieee80211_iface_combination brcmf_iface_combos[] = {
4145 { 4149 {
@@ -4197,7 +4201,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
4197 BIT(NL80211_IFTYPE_ADHOC) | 4201 BIT(NL80211_IFTYPE_ADHOC) |
4198 BIT(NL80211_IFTYPE_AP) | 4202 BIT(NL80211_IFTYPE_AP) |
4199 BIT(NL80211_IFTYPE_P2P_CLIENT) | 4203 BIT(NL80211_IFTYPE_P2P_CLIENT) |
4200 BIT(NL80211_IFTYPE_P2P_GO); 4204 BIT(NL80211_IFTYPE_P2P_GO) |
4205 BIT(NL80211_IFTYPE_P2P_DEVICE);
4201 wiphy->iface_combinations = brcmf_iface_combos; 4206 wiphy->iface_combinations = brcmf_iface_combos;
4202 wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos); 4207 wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
4203 wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz; 4208 wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
diff --git a/drivers/net/wireless/iwlegacy/4965-mac.c b/drivers/net/wireless/iwlegacy/4965-mac.c
index b8f82e688c72..9a95045c97b6 100644
--- a/drivers/net/wireless/iwlegacy/4965-mac.c
+++ b/drivers/net/wireless/iwlegacy/4965-mac.c
@@ -5741,8 +5741,7 @@ il4965_mac_setup_register(struct il_priv *il, u32 max_probe_length)
5741 hw->flags = 5741 hw->flags =
5742 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION | 5742 IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_AMPDU_AGGREGATION |
5743 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT | 5743 IEEE80211_HW_NEED_DTIM_BEFORE_ASSOC | IEEE80211_HW_SPECTRUM_MGMT |
5744 IEEE80211_HW_REPORTS_TX_ACK_STATUS | IEEE80211_HW_SUPPORTS_PS | 5744 IEEE80211_HW_SUPPORTS_PS | IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5745 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
5746 if (il->cfg->sku & IL_SKU_N) 5745 if (il->cfg->sku & IL_SKU_N)
5747 hw->flags |= 5746 hw->flags |=
5748 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS | 5747 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
diff --git a/drivers/net/wireless/iwlegacy/common.c b/drivers/net/wireless/iwlegacy/common.c
index 592d0aa634a8..e9a3cbc409ae 100644
--- a/drivers/net/wireless/iwlegacy/common.c
+++ b/drivers/net/wireless/iwlegacy/common.c
@@ -1423,7 +1423,7 @@ il_setup_rx_scan_handlers(struct il_priv *il)
1423} 1423}
1424EXPORT_SYMBOL(il_setup_rx_scan_handlers); 1424EXPORT_SYMBOL(il_setup_rx_scan_handlers);
1425 1425
1426inline u16 1426u16
1427il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band, 1427il_get_active_dwell_time(struct il_priv *il, enum ieee80211_band band,
1428 u8 n_probes) 1428 u8 n_probes)
1429{ 1429{
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 191dcae8ba47..c6384555aab4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -173,6 +173,8 @@ enum {
173 REPLY_DEBUG_CMD = 0xf0, 173 REPLY_DEBUG_CMD = 0xf0,
174 DEBUG_LOG_MSG = 0xf7, 174 DEBUG_LOG_MSG = 0xf7,
175 175
176 MCAST_FILTER_CMD = 0xd0,
177
176 /* D3 commands/notifications */ 178 /* D3 commands/notifications */
177 D3_CONFIG_CMD = 0xd3, 179 D3_CONFIG_CMD = 0xd3,
178 PROT_OFFLOAD_CONFIG_CMD = 0xd4, 180 PROT_OFFLOAD_CONFIG_CMD = 0xd4,
@@ -948,4 +950,29 @@ struct iwl_set_calib_default_cmd {
948 u8 data[0]; 950 u8 data[0];
949} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */ 951} __packed; /* PHY_CALIB_OVERRIDE_VALUES_S */
950 952
953#define MAX_PORT_ID_NUM 2
954
955/**
956 * struct iwl_mcast_filter_cmd - configure multicast filter.
957 * @filter_own: Set 1 to filter out multicast packets sent by station itself
958 * @port_id: Multicast MAC addresses array specifier. This is a strange way
959 * to identify network interface adopted in host-device IF.
960 * It is used by FW as index in array of addresses. This array has
961 * MAX_PORT_ID_NUM members.
962 * @count: Number of MAC addresses in the array
963 * @pass_all: Set 1 to pass all multicast packets.
964 * @bssid: current association BSSID.
965 * @addr_list: Place holder for array of MAC addresses.
966 * IMPORTANT: add padding if necessary to ensure DWORD alignment.
967 */
968struct iwl_mcast_filter_cmd {
969 u8 filter_own;
970 u8 port_id;
971 u8 count;
972 u8 pass_all;
973 u8 bssid[6];
974 u8 reserved[2];
975 u8 addr_list[0];
976} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
977
951#endif /* __fw_api_h__ */ 978#endif /* __fw_api_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index e6eca4d66f6c..b2cc3d98e0f7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -586,10 +586,12 @@ static int iwl_mvm_mac_ctxt_send_cmd(struct iwl_mvm *mvm,
586 */ 586 */
587static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm, 587static void iwl_mvm_mac_ctxt_cmd_fill_sta(struct iwl_mvm *mvm,
588 struct ieee80211_vif *vif, 588 struct ieee80211_vif *vif,
589 struct iwl_mac_data_sta *ctxt_sta) 589 struct iwl_mac_data_sta *ctxt_sta,
590 bool force_assoc_off)
590{ 591{
591 /* We need the dtim_period to set the MAC as associated */ 592 /* We need the dtim_period to set the MAC as associated */
592 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period) { 593 if (vif->bss_conf.assoc && vif->bss_conf.dtim_period &&
594 !force_assoc_off) {
593 u32 dtim_offs; 595 u32 dtim_offs;
594 596
595 /* 597 /*
@@ -659,7 +661,8 @@ static int iwl_mvm_mac_ctxt_cmd_station(struct iwl_mvm *mvm,
659 cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON); 661 cmd.filter_flags &= ~cpu_to_le32(MAC_FILTER_IN_BEACON);
660 662
661 /* Fill the data specific for station mode */ 663 /* Fill the data specific for station mode */
662 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta); 664 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.sta,
665 action == FW_CTXT_ACTION_ADD);
663 666
664 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); 667 return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
665} 668}
@@ -677,7 +680,8 @@ static int iwl_mvm_mac_ctxt_cmd_p2p_client(struct iwl_mvm *mvm,
677 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action); 680 iwl_mvm_mac_ctxt_cmd_common(mvm, vif, &cmd, action);
678 681
679 /* Fill the data specific for station mode */ 682 /* Fill the data specific for station mode */
680 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta); 683 iwl_mvm_mac_ctxt_cmd_fill_sta(mvm, vif, &cmd.p2p_sta.sta,
684 action == FW_CTXT_ACTION_ADD);
681 685
682 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow & 686 cmd.p2p_sta.ctwin = cpu_to_le32(noa->oppps_ctwindow &
683 IEEE80211_P2P_OPPPS_CTWINDOW_MASK); 687 IEEE80211_P2P_OPPPS_CTWINDOW_MASK);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index dd158ec571fb..a5eb8c82f16a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -701,6 +701,20 @@ static void iwl_mvm_configure_filter(struct ieee80211_hw *hw,
701 *total_flags = 0; 701 *total_flags = 0;
702} 702}
703 703
704static int iwl_mvm_configure_mcast_filter(struct iwl_mvm *mvm,
705 struct ieee80211_vif *vif)
706{
707 struct iwl_mcast_filter_cmd mcast_filter_cmd = {
708 .pass_all = 1,
709 };
710
711 memcpy(mcast_filter_cmd.bssid, vif->bss_conf.bssid, ETH_ALEN);
712
713 return iwl_mvm_send_cmd_pdu(mvm, MCAST_FILTER_CMD, CMD_SYNC,
714 sizeof(mcast_filter_cmd),
715 &mcast_filter_cmd);
716}
717
704static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 718static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
705 struct ieee80211_vif *vif, 719 struct ieee80211_vif *vif,
706 struct ieee80211_bss_conf *bss_conf, 720 struct ieee80211_bss_conf *bss_conf,
@@ -722,6 +736,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
722 return; 736 return;
723 } 737 }
724 iwl_mvm_bt_coex_vif_assoc(mvm, vif); 738 iwl_mvm_bt_coex_vif_assoc(mvm, vif);
739 iwl_mvm_configure_mcast_filter(mvm, vif);
725 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 740 } else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
726 /* remove AP station now that the MAC is unassoc */ 741 /* remove AP station now that the MAC is unassoc */
727 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id); 742 ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
@@ -931,7 +946,7 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
931 946
932 switch (cmd) { 947 switch (cmd) {
933 case STA_NOTIFY_SLEEP: 948 case STA_NOTIFY_SLEEP:
934 if (atomic_read(&mvmsta->pending_frames) > 0) 949 if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
935 ieee80211_sta_block_awake(hw, sta, true); 950 ieee80211_sta_block_awake(hw, sta, true);
936 /* 951 /*
937 * The fw updates the STA to be asleep. Tx packets on the Tx 952 * The fw updates the STA to be asleep. Tx packets on the Tx
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 8269bc562951..9f46b23801bc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -292,6 +292,7 @@ struct iwl_mvm {
292 struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT]; 292 struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT];
293 struct work_struct sta_drained_wk; 293 struct work_struct sta_drained_wk;
294 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)]; 294 unsigned long sta_drained[BITS_TO_LONGS(IWL_MVM_STATION_COUNT)];
295 atomic_t pending_frames[IWL_MVM_STATION_COUNT];
295 296
296 /* configured by mac80211 */ 297 /* configured by mac80211 */
297 u32 rts_threshold; 298 u32 rts_threshold;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index fe031d304d1e..b29c31a41594 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -292,6 +292,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
292 CMD(BT_COEX_PROT_ENV), 292 CMD(BT_COEX_PROT_ENV),
293 CMD(BT_PROFILE_NOTIFICATION), 293 CMD(BT_PROFILE_NOTIFICATION),
294 CMD(BT_CONFIG), 294 CMD(BT_CONFIG),
295 CMD(MCAST_FILTER_CMD),
295}; 296};
296#undef CMD 297#undef CMD
297 298
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 2157b0f8ced5..2476e43799d5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -298,6 +298,12 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
298 else 298 else
299 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED); 299 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
300 300
301 /*
302 * TODO: This is a WA due to a bug in the FW AUX framework that does not
303 * properly handle time events that fail to be scheduled
304 */
305 cmd->type = cpu_to_le32(SCAN_TYPE_FORCED);
306
301 cmd->repeats = cpu_to_le32(1); 307 cmd->repeats = cpu_to_le32(1);
302 308
303 /* 309 /*
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 0fd96e4da461..5c664ed54400 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -219,7 +219,7 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
219 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 219 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
220 220
221 /* HW restart, don't assume the memory has been zeroed */ 221 /* HW restart, don't assume the memory has been zeroed */
222 atomic_set(&mvm_sta->pending_frames, 0); 222 atomic_set(&mvm->pending_frames[sta_id], 0);
223 mvm_sta->tid_disable_agg = 0; 223 mvm_sta->tid_disable_agg = 0;
224 mvm_sta->tfd_queue_msk = 0; 224 mvm_sta->tfd_queue_msk = 0;
225 for (i = 0; i < IEEE80211_NUM_ACS; i++) 225 for (i = 0; i < IEEE80211_NUM_ACS; i++)
@@ -407,14 +407,21 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
407 } 407 }
408 408
409 /* 409 /*
410 * Make sure that the tx response code sees the station as -EBUSY and
411 * calls the drain worker.
412 */
413 spin_lock_bh(&mvm_sta->lock);
414 /*
410 * There are frames pending on the AC queues for this station. 415 * There are frames pending on the AC queues for this station.
411 * We need to wait until all the frames are drained... 416 * We need to wait until all the frames are drained...
412 */ 417 */
413 if (atomic_read(&mvm_sta->pending_frames)) { 418 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) {
414 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
415 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 419 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id],
416 ERR_PTR(-EBUSY)); 420 ERR_PTR(-EBUSY));
421 spin_unlock_bh(&mvm_sta->lock);
422 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
417 } else { 423 } else {
424 spin_unlock_bh(&mvm_sta->lock);
418 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 425 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
419 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 426 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
420 } 427 }
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 12abd2d71835..a4ddce77aaae 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -274,7 +274,6 @@ struct iwl_mvm_tid_data {
274 * @bt_reduced_txpower: is reduced tx power enabled for this station 274 * @bt_reduced_txpower: is reduced tx power enabled for this station
275 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx 275 * @lock: lock to protect the whole struct. Since %tid_data is access from Tx
276 * and from Tx response flow, it needs a spinlock. 276 * and from Tx response flow, it needs a spinlock.
277 * @pending_frames: number of frames for this STA on the shared Tx queues.
278 * @tid_data: per tid data. Look at %iwl_mvm_tid_data. 277 * @tid_data: per tid data. Look at %iwl_mvm_tid_data.
279 * 278 *
280 * When mac80211 creates a station it reserves some space (hw->sta_data_size) 279 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
@@ -290,7 +289,6 @@ struct iwl_mvm_sta {
290 u8 max_agg_bufsize; 289 u8 max_agg_bufsize;
291 bool bt_reduced_txpower; 290 bool bt_reduced_txpower;
292 spinlock_t lock; 291 spinlock_t lock;
293 atomic_t pending_frames;
294 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT]; 292 struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
295 struct iwl_lq_sta lq_sta; 293 struct iwl_lq_sta lq_sta;
296 struct ieee80211_vif *vif; 294 struct ieee80211_vif *vif;
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 479074303bd7..f212f16502ff 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -416,9 +416,8 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
416 416
417 spin_unlock(&mvmsta->lock); 417 spin_unlock(&mvmsta->lock);
418 418
419 if (mvmsta->vif->type == NL80211_IFTYPE_AP && 419 if (txq_id < IWL_MVM_FIRST_AGG_QUEUE)
420 txq_id < IWL_MVM_FIRST_AGG_QUEUE) 420 atomic_inc(&mvm->pending_frames[mvmsta->sta_id]);
421 atomic_inc(&mvmsta->pending_frames);
422 421
423 return 0; 422 return 0;
424 423
@@ -680,16 +679,41 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
680 /* 679 /*
681 * If the txq is not an AMPDU queue, there is no chance we freed 680 * If the txq is not an AMPDU queue, there is no chance we freed
682 * several skbs. Check that out... 681 * several skbs. Check that out...
683 * If there are no pending frames for this STA, notify mac80211 that
684 * this station can go to sleep in its STA table.
685 */ 682 */
686 if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && mvmsta && 683 if (txq_id < IWL_MVM_FIRST_AGG_QUEUE && !WARN_ON(skb_freed > 1) &&
687 !WARN_ON(skb_freed > 1) && 684 atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
688 mvmsta->vif->type == NL80211_IFTYPE_AP && 685 if (mvmsta) {
689 atomic_sub_and_test(skb_freed, &mvmsta->pending_frames)) { 686 /*
690 ieee80211_sta_block_awake(mvm->hw, sta, false); 687 * If there are no pending frames for this STA, notify
691 set_bit(sta_id, mvm->sta_drained); 688 * mac80211 that this station can go to sleep in its
692 schedule_work(&mvm->sta_drained_wk); 689 * STA table.
690 */
691 if (mvmsta->vif->type == NL80211_IFTYPE_AP)
692 ieee80211_sta_block_awake(mvm->hw, sta, false);
693 /*
694 * We might very well have taken mvmsta pointer while
695 * the station was being removed. The remove flow might
696 * have seen a pending_frame (because we didn't take
697 * the lock) even if now the queues are drained. So make
698 * really sure now that this the station is not being
699 * removed. If it is, run the drain worker to remove it.
700 */
701 spin_lock_bh(&mvmsta->lock);
702 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
703 if (IS_ERR_OR_NULL(sta)) {
704 /*
705 * Station disappeared in the meantime:
706 * so we are draining.
707 */
708 set_bit(sta_id, mvm->sta_drained);
709 schedule_work(&mvm->sta_drained_wk);
710 }
711 spin_unlock_bh(&mvmsta->lock);
712 } else if (!mvmsta) {
713 /* Tx response without STA, so we are draining */
714 set_bit(sta_id, mvm->sta_drained);
715 schedule_work(&mvm->sta_drained_wk);
716 }
693 } 717 }
694 718
695 rcu_read_unlock(); 719 rcu_read_unlock();
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index b878a32e7a98..cb34c7895f2a 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1723,11 +1723,11 @@ static void mac80211_hwsim_free(void)
1723 class_destroy(hwsim_class); 1723 class_destroy(hwsim_class);
1724} 1724}
1725 1725
1726 1726static struct platform_driver mac80211_hwsim_driver = {
1727static struct device_driver mac80211_hwsim_driver = { 1727 .driver = {
1728 .name = "mac80211_hwsim", 1728 .name = "mac80211_hwsim",
1729 .bus = &platform_bus_type, 1729 .owner = THIS_MODULE,
1730 .owner = THIS_MODULE, 1730 },
1731}; 1731};
1732 1732
1733static const struct net_device_ops hwsim_netdev_ops = { 1733static const struct net_device_ops hwsim_netdev_ops = {
@@ -2219,7 +2219,7 @@ static int __init init_mac80211_hwsim(void)
2219 spin_lock_init(&hwsim_radio_lock); 2219 spin_lock_init(&hwsim_radio_lock);
2220 INIT_LIST_HEAD(&hwsim_radios); 2220 INIT_LIST_HEAD(&hwsim_radios);
2221 2221
2222 err = driver_register(&mac80211_hwsim_driver); 2222 err = platform_driver_register(&mac80211_hwsim_driver);
2223 if (err) 2223 if (err)
2224 return err; 2224 return err;
2225 2225
@@ -2254,7 +2254,7 @@ static int __init init_mac80211_hwsim(void)
2254 err = -ENOMEM; 2254 err = -ENOMEM;
2255 goto failed_drvdata; 2255 goto failed_drvdata;
2256 } 2256 }
2257 data->dev->driver = &mac80211_hwsim_driver; 2257 data->dev->driver = &mac80211_hwsim_driver.driver;
2258 err = device_bind_driver(data->dev); 2258 err = device_bind_driver(data->dev);
2259 if (err != 0) { 2259 if (err != 0) {
2260 printk(KERN_DEBUG 2260 printk(KERN_DEBUG
@@ -2564,7 +2564,7 @@ failed_drvdata:
2564failed: 2564failed:
2565 mac80211_hwsim_free(); 2565 mac80211_hwsim_free();
2566failed_unregister_driver: 2566failed_unregister_driver:
2567 driver_unregister(&mac80211_hwsim_driver); 2567 platform_driver_unregister(&mac80211_hwsim_driver);
2568 return err; 2568 return err;
2569} 2569}
2570module_init(init_mac80211_hwsim); 2570module_init(init_mac80211_hwsim);
@@ -2577,6 +2577,6 @@ static void __exit exit_mac80211_hwsim(void)
2577 2577
2578 mac80211_hwsim_free(); 2578 mac80211_hwsim_free();
2579 unregister_netdev(hwsim_mon); 2579 unregister_netdev(hwsim_mon);
2580 driver_unregister(&mac80211_hwsim_driver); 2580 platform_driver_unregister(&mac80211_hwsim_driver);
2581} 2581}
2582module_exit(exit_mac80211_hwsim); 2582module_exit(exit_mac80211_hwsim);
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index d3c8ece980d8..e42b266a023a 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -2234,9 +2234,6 @@ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev)
2234 if (wdev->netdev->reg_state == NETREG_REGISTERED) 2234 if (wdev->netdev->reg_state == NETREG_REGISTERED)
2235 unregister_netdevice(wdev->netdev); 2235 unregister_netdevice(wdev->netdev);
2236 2236
2237 if (wdev->netdev->reg_state == NETREG_UNREGISTERED)
2238 free_netdev(wdev->netdev);
2239
2240 /* Clear the priv in adapter */ 2237 /* Clear the priv in adapter */
2241 priv->netdev = NULL; 2238 priv->netdev = NULL;
2242 2239
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 74db0d24a579..26755d9acb55 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -1191,6 +1191,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter)
1191 adapter->if_ops.wakeup(adapter); 1191 adapter->if_ops.wakeup(adapter);
1192 adapter->hs_activated = false; 1192 adapter->hs_activated = false;
1193 adapter->is_hs_configured = false; 1193 adapter->is_hs_configured = false;
1194 adapter->is_suspended = false;
1194 mwifiex_hs_activated_event(mwifiex_get_priv(adapter, 1195 mwifiex_hs_activated_event(mwifiex_get_priv(adapter,
1195 MWIFIEX_BSS_ROLE_ANY), 1196 MWIFIEX_BSS_ROLE_ANY),
1196 false); 1197 false);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 121443a0f2a1..2eb88ea9acf7 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -655,6 +655,7 @@ void mwifiex_init_priv_params(struct mwifiex_private *priv,
655 struct net_device *dev) 655 struct net_device *dev)
656{ 656{
657 dev->netdev_ops = &mwifiex_netdev_ops; 657 dev->netdev_ops = &mwifiex_netdev_ops;
658 dev->destructor = free_netdev;
658 /* Initialize private structure */ 659 /* Initialize private structure */
659 priv->current_key_index = 0; 660 priv->current_key_index = 0;
660 priv->media_connected = false; 661 priv->media_connected = false;
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index 311d0b26b81c..1a8a19dbd635 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -96,7 +96,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
96 } else { 96 } else {
97 /* Multicast */ 97 /* Multicast */
98 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; 98 priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE;
99 if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { 99 if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) {
100 dev_dbg(priv->adapter->dev, 100 dev_dbg(priv->adapter->dev,
101 "info: Enabling All Multicast!\n"); 101 "info: Enabling All Multicast!\n");
102 priv->curr_pkt_filter |= 102 priv->curr_pkt_filter |=
@@ -108,20 +108,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv,
108 dev_dbg(priv->adapter->dev, 108 dev_dbg(priv->adapter->dev,
109 "info: Set multicast list=%d\n", 109 "info: Set multicast list=%d\n",
110 mcast_list->num_multicast_addr); 110 mcast_list->num_multicast_addr);
111 /* Set multicast addresses to firmware */ 111 /* Send multicast addresses to firmware */
112 if (old_pkt_filter == priv->curr_pkt_filter) { 112 ret = mwifiex_send_cmd_async(priv,
113 /* Send request to firmware */ 113 HostCmd_CMD_MAC_MULTICAST_ADR,
114 ret = mwifiex_send_cmd_async(priv, 114 HostCmd_ACT_GEN_SET, 0,
115 HostCmd_CMD_MAC_MULTICAST_ADR, 115 mcast_list);
116 HostCmd_ACT_GEN_SET, 0,
117 mcast_list);
118 } else {
119 /* Send request to firmware */
120 ret = mwifiex_send_cmd_async(priv,
121 HostCmd_CMD_MAC_MULTICAST_ADR,
122 HostCmd_ACT_GEN_SET, 0,
123 mcast_list);
124 }
125 } 116 }
126 } 117 }
127 } 118 }
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
index d3a02e73f53a..21ca33a7c770 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.h
@@ -550,7 +550,7 @@ do { \
550 rxmcs == DESC92C_RATE11M) 550 rxmcs == DESC92C_RATE11M)
551 551
552struct phy_rx_agc_info_t { 552struct phy_rx_agc_info_t {
553 #if __LITTLE_ENDIAN 553 #ifdef __LITTLE_ENDIAN
554 u8 gain:7, trsw:1; 554 u8 gain:7, trsw:1;
555 #else 555 #else
556 u8 trsw:1, gain:7; 556 u8 trsw:1, gain:7;
@@ -574,7 +574,7 @@ struct phy_status_rpt {
574 u8 stream_target_csi[2]; 574 u8 stream_target_csi[2];
575 u8 sig_evm; 575 u8 sig_evm;
576 u8 rsvd_3; 576 u8 rsvd_3;
577#if __LITTLE_ENDIAN 577#ifdef __LITTLE_ENDIAN
578 u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/ 578 u8 antsel_rx_keep_2:1; /*ex_intf_flg:1;*/
579 u8 sgi_en:1; 579 u8 sgi_en:1;
580 u8 rxsc:2; 580 u8 rxsc:2;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 23d640a4debd..938b1e670b93 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -349,6 +349,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
349 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/ 349 {RTL_USB_DEVICE(0x07aa, 0x0056, rtl92cu_hal_cfg)}, /*ATKK-Gemtek*/
350 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/ 350 {RTL_USB_DEVICE(0x07b8, 0x8178, rtl92cu_hal_cfg)}, /*Funai -Abocom*/
351 {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/ 351 {RTL_USB_DEVICE(0x0846, 0x9021, rtl92cu_hal_cfg)}, /*Netgear-Sercomm*/
352 {RTL_USB_DEVICE(0x0846, 0xf001, rtl92cu_hal_cfg)}, /*On Netwrks N300MA*/
352 {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/ 353 {RTL_USB_DEVICE(0x0b05, 0x17ab, rtl92cu_hal_cfg)}, /*ASUS-Edimax*/
353 {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/ 354 {RTL_USB_DEVICE(0x0bda, 0x8186, rtl92cu_hal_cfg)}, /*Realtek 92CE-VAU*/
354 {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/ 355 {RTL_USB_DEVICE(0x0df6, 0x0061, rtl92cu_hal_cfg)}, /*Sitecom-Edimax*/
diff --git a/drivers/ntb/ntb_hw.c b/drivers/ntb/ntb_hw.c
index f802e7c92356..2dacd19e1b8a 100644
--- a/drivers/ntb/ntb_hw.c
+++ b/drivers/ntb/ntb_hw.c
@@ -345,7 +345,7 @@ int ntb_read_remote_spad(struct ntb_device *ndev, unsigned int idx, u32 *val)
345 */ 345 */
346void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw) 346void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
347{ 347{
348 if (mw > NTB_NUM_MW) 348 if (mw >= NTB_NUM_MW)
349 return NULL; 349 return NULL;
350 350
351 return ndev->mw[mw].vbase; 351 return ndev->mw[mw].vbase;
@@ -362,7 +362,7 @@ void __iomem *ntb_get_mw_vbase(struct ntb_device *ndev, unsigned int mw)
362 */ 362 */
363resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw) 363resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
364{ 364{
365 if (mw > NTB_NUM_MW) 365 if (mw >= NTB_NUM_MW)
366 return 0; 366 return 0;
367 367
368 return ndev->mw[mw].bar_sz; 368 return ndev->mw[mw].bar_sz;
@@ -380,7 +380,7 @@ resource_size_t ntb_get_mw_size(struct ntb_device *ndev, unsigned int mw)
380 */ 380 */
381void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr) 381void ntb_set_mw_addr(struct ntb_device *ndev, unsigned int mw, u64 addr)
382{ 382{
383 if (mw > NTB_NUM_MW) 383 if (mw >= NTB_NUM_MW)
384 return; 384 return;
385 385
386 dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr, 386 dev_dbg(&ndev->pdev->dev, "Writing addr %Lx to BAR %d\n", addr,
@@ -1027,8 +1027,8 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1027 ndev->mw[i].vbase = 1027 ndev->mw[i].vbase =
1028 ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)), 1028 ioremap_wc(pci_resource_start(pdev, MW_TO_BAR(i)),
1029 ndev->mw[i].bar_sz); 1029 ndev->mw[i].bar_sz);
1030 dev_info(&pdev->dev, "MW %d size %d\n", i, 1030 dev_info(&pdev->dev, "MW %d size %llu\n", i,
1031 (u32) pci_resource_len(pdev, MW_TO_BAR(i))); 1031 pci_resource_len(pdev, MW_TO_BAR(i)));
1032 if (!ndev->mw[i].vbase) { 1032 if (!ndev->mw[i].vbase) {
1033 dev_warn(&pdev->dev, "Cannot remap BAR %d\n", 1033 dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
1034 MW_TO_BAR(i)); 1034 MW_TO_BAR(i));
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index e0bdfd7f9930..f8d7081ee301 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -58,7 +58,7 @@
58#include <linux/ntb.h> 58#include <linux/ntb.h>
59#include "ntb_hw.h" 59#include "ntb_hw.h"
60 60
61#define NTB_TRANSPORT_VERSION 2 61#define NTB_TRANSPORT_VERSION 3
62 62
63static unsigned int transport_mtu = 0x401E; 63static unsigned int transport_mtu = 0x401E;
64module_param(transport_mtu, uint, 0644); 64module_param(transport_mtu, uint, 0644);
@@ -173,10 +173,13 @@ struct ntb_payload_header {
173 173
174enum { 174enum {
175 VERSION = 0, 175 VERSION = 0,
176 MW0_SZ,
177 MW1_SZ,
178 NUM_QPS,
179 QP_LINKS, 176 QP_LINKS,
177 NUM_QPS,
178 NUM_MWS,
179 MW0_SZ_HIGH,
180 MW0_SZ_LOW,
181 MW1_SZ_HIGH,
182 MW1_SZ_LOW,
180 MAX_SPAD, 183 MAX_SPAD,
181}; 184};
182 185
@@ -297,7 +300,7 @@ int ntb_register_client_dev(char *device_name)
297{ 300{
298 struct ntb_transport_client_dev *client_dev; 301 struct ntb_transport_client_dev *client_dev;
299 struct ntb_transport *nt; 302 struct ntb_transport *nt;
300 int rc; 303 int rc, i = 0;
301 304
302 if (list_empty(&ntb_transport_list)) 305 if (list_empty(&ntb_transport_list))
303 return -ENODEV; 306 return -ENODEV;
@@ -315,7 +318,7 @@ int ntb_register_client_dev(char *device_name)
315 dev = &client_dev->dev; 318 dev = &client_dev->dev;
316 319
317 /* setup and register client devices */ 320 /* setup and register client devices */
318 dev_set_name(dev, "%s", device_name); 321 dev_set_name(dev, "%s%d", device_name, i);
319 dev->bus = &ntb_bus_type; 322 dev->bus = &ntb_bus_type;
320 dev->release = ntb_client_release; 323 dev->release = ntb_client_release;
321 dev->parent = &ntb_query_pdev(nt->ndev)->dev; 324 dev->parent = &ntb_query_pdev(nt->ndev)->dev;
@@ -327,6 +330,7 @@ int ntb_register_client_dev(char *device_name)
327 } 330 }
328 331
329 list_add_tail(&client_dev->entry, &nt->client_devs); 332 list_add_tail(&client_dev->entry, &nt->client_devs);
333 i++;
330 } 334 }
331 335
332 return 0; 336 return 0;
@@ -486,12 +490,13 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
486 (qp_num / NTB_NUM_MW * rx_size); 490 (qp_num / NTB_NUM_MW * rx_size);
487 rx_size -= sizeof(struct ntb_rx_info); 491 rx_size -= sizeof(struct ntb_rx_info);
488 492
489 qp->rx_buff = qp->remote_rx_info + sizeof(struct ntb_rx_info); 493 qp->rx_buff = qp->remote_rx_info + 1;
490 qp->rx_max_frame = min(transport_mtu, rx_size); 494 /* Due to housekeeping, there must be atleast 2 buffs */
495 qp->rx_max_frame = min(transport_mtu, rx_size / 2);
491 qp->rx_max_entry = rx_size / qp->rx_max_frame; 496 qp->rx_max_entry = rx_size / qp->rx_max_frame;
492 qp->rx_index = 0; 497 qp->rx_index = 0;
493 498
494 qp->remote_rx_info->entry = qp->rx_max_entry; 499 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
495 500
496 /* setup the hdr offsets with 0's */ 501 /* setup the hdr offsets with 0's */
497 for (i = 0; i < qp->rx_max_entry; i++) { 502 for (i = 0; i < qp->rx_max_entry; i++) {
@@ -502,6 +507,19 @@ static void ntb_transport_setup_qp_mw(struct ntb_transport *nt,
502 507
503 qp->rx_pkts = 0; 508 qp->rx_pkts = 0;
504 qp->tx_pkts = 0; 509 qp->tx_pkts = 0;
510 qp->tx_index = 0;
511}
512
513static void ntb_free_mw(struct ntb_transport *nt, int num_mw)
514{
515 struct ntb_transport_mw *mw = &nt->mw[num_mw];
516 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
517
518 if (!mw->virt_addr)
519 return;
520
521 dma_free_coherent(&pdev->dev, mw->size, mw->virt_addr, mw->dma_addr);
522 mw->virt_addr = NULL;
505} 523}
506 524
507static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size) 525static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
@@ -509,12 +527,20 @@ static int ntb_set_mw(struct ntb_transport *nt, int num_mw, unsigned int size)
509 struct ntb_transport_mw *mw = &nt->mw[num_mw]; 527 struct ntb_transport_mw *mw = &nt->mw[num_mw];
510 struct pci_dev *pdev = ntb_query_pdev(nt->ndev); 528 struct pci_dev *pdev = ntb_query_pdev(nt->ndev);
511 529
530 /* No need to re-setup */
531 if (mw->size == ALIGN(size, 4096))
532 return 0;
533
534 if (mw->size != 0)
535 ntb_free_mw(nt, num_mw);
536
512 /* Alloc memory for receiving data. Must be 4k aligned */ 537 /* Alloc memory for receiving data. Must be 4k aligned */
513 mw->size = ALIGN(size, 4096); 538 mw->size = ALIGN(size, 4096);
514 539
515 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr, 540 mw->virt_addr = dma_alloc_coherent(&pdev->dev, mw->size, &mw->dma_addr,
516 GFP_KERNEL); 541 GFP_KERNEL);
517 if (!mw->virt_addr) { 542 if (!mw->virt_addr) {
543 mw->size = 0;
518 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n", 544 dev_err(&pdev->dev, "Unable to allocate MW buffer of size %d\n",
519 (int) mw->size); 545 (int) mw->size);
520 return -ENOMEM; 546 return -ENOMEM;
@@ -604,25 +630,31 @@ static void ntb_transport_link_work(struct work_struct *work)
604 u32 val; 630 u32 val;
605 int rc, i; 631 int rc, i;
606 632
607 /* send the local info */ 633 /* send the local info, in the opposite order of the way we read it */
608 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION); 634 for (i = 0; i < NTB_NUM_MW; i++) {
609 if (rc) { 635 rc = ntb_write_remote_spad(ndev, MW0_SZ_HIGH + (i * 2),
610 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 636 ntb_get_mw_size(ndev, i) >> 32);
611 0, VERSION); 637 if (rc) {
612 goto out; 638 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
613 } 639 (u32)(ntb_get_mw_size(ndev, i) >> 32),
640 MW0_SZ_HIGH + (i * 2));
641 goto out;
642 }
614 643
615 rc = ntb_write_remote_spad(ndev, MW0_SZ, ntb_get_mw_size(ndev, 0)); 644 rc = ntb_write_remote_spad(ndev, MW0_SZ_LOW + (i * 2),
616 if (rc) { 645 (u32) ntb_get_mw_size(ndev, i));
617 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 646 if (rc) {
618 (u32) ntb_get_mw_size(ndev, 0), MW0_SZ); 647 dev_err(&pdev->dev, "Error writing %u to remote spad %d\n",
619 goto out; 648 (u32) ntb_get_mw_size(ndev, i),
649 MW0_SZ_LOW + (i * 2));
650 goto out;
651 }
620 } 652 }
621 653
622 rc = ntb_write_remote_spad(ndev, MW1_SZ, ntb_get_mw_size(ndev, 1)); 654 rc = ntb_write_remote_spad(ndev, NUM_MWS, NTB_NUM_MW);
623 if (rc) { 655 if (rc) {
624 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 656 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
625 (u32) ntb_get_mw_size(ndev, 1), MW1_SZ); 657 NTB_NUM_MW, NUM_MWS);
626 goto out; 658 goto out;
627 } 659 }
628 660
@@ -633,16 +665,10 @@ static void ntb_transport_link_work(struct work_struct *work)
633 goto out; 665 goto out;
634 } 666 }
635 667
636 rc = ntb_read_local_spad(nt->ndev, QP_LINKS, &val); 668 rc = ntb_write_remote_spad(ndev, VERSION, NTB_TRANSPORT_VERSION);
637 if (rc) {
638 dev_err(&pdev->dev, "Error reading spad %d\n", QP_LINKS);
639 goto out;
640 }
641
642 rc = ntb_write_remote_spad(ndev, QP_LINKS, val);
643 if (rc) { 669 if (rc) {
644 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n", 670 dev_err(&pdev->dev, "Error writing %x to remote spad %d\n",
645 val, QP_LINKS); 671 NTB_TRANSPORT_VERSION, VERSION);
646 goto out; 672 goto out;
647 } 673 }
648 674
@@ -667,33 +693,43 @@ static void ntb_transport_link_work(struct work_struct *work)
667 goto out; 693 goto out;
668 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val); 694 dev_dbg(&pdev->dev, "Remote max number of qps = %d\n", val);
669 695
670 rc = ntb_read_remote_spad(ndev, MW0_SZ, &val); 696 rc = ntb_read_remote_spad(ndev, NUM_MWS, &val);
671 if (rc) { 697 if (rc) {
672 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW0_SZ); 698 dev_err(&pdev->dev, "Error reading remote spad %d\n", NUM_MWS);
673 goto out; 699 goto out;
674 } 700 }
675 701
676 if (!val) 702 if (val != NTB_NUM_MW)
677 goto out; 703 goto out;
678 dev_dbg(&pdev->dev, "Remote MW0 size = %d\n", val); 704 dev_dbg(&pdev->dev, "Remote number of mws = %d\n", val);
679 705
680 rc = ntb_set_mw(nt, 0, val); 706 for (i = 0; i < NTB_NUM_MW; i++) {
681 if (rc) 707 u64 val64;
682 goto out;
683 708
684 rc = ntb_read_remote_spad(ndev, MW1_SZ, &val); 709 rc = ntb_read_remote_spad(ndev, MW0_SZ_HIGH + (i * 2), &val);
685 if (rc) { 710 if (rc) {
686 dev_err(&pdev->dev, "Error reading remote spad %d\n", MW1_SZ); 711 dev_err(&pdev->dev, "Error reading remote spad %d\n",
687 goto out; 712 MW0_SZ_HIGH + (i * 2));
688 } 713 goto out1;
714 }
689 715
690 if (!val) 716 val64 = (u64) val << 32;
691 goto out;
692 dev_dbg(&pdev->dev, "Remote MW1 size = %d\n", val);
693 717
694 rc = ntb_set_mw(nt, 1, val); 718 rc = ntb_read_remote_spad(ndev, MW0_SZ_LOW + (i * 2), &val);
695 if (rc) 719 if (rc) {
696 goto out; 720 dev_err(&pdev->dev, "Error reading remote spad %d\n",
721 MW0_SZ_LOW + (i * 2));
722 goto out1;
723 }
724
725 val64 |= val;
726
727 dev_dbg(&pdev->dev, "Remote MW%d size = %llu\n", i, val64);
728
729 rc = ntb_set_mw(nt, i, val64);
730 if (rc)
731 goto out1;
732 }
697 733
698 nt->transport_link = NTB_LINK_UP; 734 nt->transport_link = NTB_LINK_UP;
699 735
@@ -708,6 +744,9 @@ static void ntb_transport_link_work(struct work_struct *work)
708 744
709 return; 745 return;
710 746
747out1:
748 for (i = 0; i < NTB_NUM_MW; i++)
749 ntb_free_mw(nt, i);
711out: 750out:
712 if (ntb_hw_link_status(ndev)) 751 if (ntb_hw_link_status(ndev))
713 schedule_delayed_work(&nt->link_work, 752 schedule_delayed_work(&nt->link_work,
@@ -780,10 +819,10 @@ static void ntb_transport_init_queue(struct ntb_transport *nt,
780 (qp_num / NTB_NUM_MW * tx_size); 819 (qp_num / NTB_NUM_MW * tx_size);
781 tx_size -= sizeof(struct ntb_rx_info); 820 tx_size -= sizeof(struct ntb_rx_info);
782 821
783 qp->tx_mw = qp->rx_info + sizeof(struct ntb_rx_info); 822 qp->tx_mw = qp->rx_info + 1;
784 qp->tx_max_frame = min(transport_mtu, tx_size); 823 /* Due to housekeeping, there must be atleast 2 buffs */
824 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
785 qp->tx_max_entry = tx_size / qp->tx_max_frame; 825 qp->tx_max_entry = tx_size / qp->tx_max_frame;
786 qp->tx_index = 0;
787 826
788 if (nt->debugfs_dir) { 827 if (nt->debugfs_dir) {
789 char debugfs_name[4]; 828 char debugfs_name[4];
@@ -897,10 +936,7 @@ void ntb_transport_free(void *transport)
897 pdev = ntb_query_pdev(nt->ndev); 936 pdev = ntb_query_pdev(nt->ndev);
898 937
899 for (i = 0; i < NTB_NUM_MW; i++) 938 for (i = 0; i < NTB_NUM_MW; i++)
900 if (nt->mw[i].virt_addr) 939 ntb_free_mw(nt, i);
901 dma_free_coherent(&pdev->dev, nt->mw[i].size,
902 nt->mw[i].virt_addr,
903 nt->mw[i].dma_addr);
904 940
905 kfree(nt->qps); 941 kfree(nt->qps);
906 ntb_unregister_transport(nt->ndev); 942 ntb_unregister_transport(nt->ndev);
@@ -999,11 +1035,16 @@ out:
999static void ntb_transport_rx(unsigned long data) 1035static void ntb_transport_rx(unsigned long data)
1000{ 1036{
1001 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data; 1037 struct ntb_transport_qp *qp = (struct ntb_transport_qp *)data;
1002 int rc; 1038 int rc, i;
1003 1039
1004 do { 1040 /* Limit the number of packets processed in a single interrupt to
1041 * provide fairness to others
1042 */
1043 for (i = 0; i < qp->rx_max_entry; i++) {
1005 rc = ntb_process_rxc(qp); 1044 rc = ntb_process_rxc(qp);
1006 } while (!rc); 1045 if (rc)
1046 break;
1047 }
1007} 1048}
1008 1049
1009static void ntb_transport_rxc_db(void *data, int db_num) 1050static void ntb_transport_rxc_db(void *data, int db_num)
@@ -1210,12 +1251,14 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1210 */ 1251 */
1211void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1252void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1212{ 1253{
1213 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 1254 struct pci_dev *pdev;
1214 struct ntb_queue_entry *entry; 1255 struct ntb_queue_entry *entry;
1215 1256
1216 if (!qp) 1257 if (!qp)
1217 return; 1258 return;
1218 1259
1260 pdev = ntb_query_pdev(qp->ndev);
1261
1219 cancel_delayed_work_sync(&qp->link_work); 1262 cancel_delayed_work_sync(&qp->link_work);
1220 1263
1221 ntb_unregister_db_callback(qp->ndev, qp->qp_num); 1264 ntb_unregister_db_callback(qp->ndev, qp->qp_num);
@@ -1371,12 +1414,13 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_up);
1371 */ 1414 */
1372void ntb_transport_link_down(struct ntb_transport_qp *qp) 1415void ntb_transport_link_down(struct ntb_transport_qp *qp)
1373{ 1416{
1374 struct pci_dev *pdev = ntb_query_pdev(qp->ndev); 1417 struct pci_dev *pdev;
1375 int rc, val; 1418 int rc, val;
1376 1419
1377 if (!qp) 1420 if (!qp)
1378 return; 1421 return;
1379 1422
1423 pdev = ntb_query_pdev(qp->ndev);
1380 qp->client_ready = NTB_LINK_DOWN; 1424 qp->client_ready = NTB_LINK_DOWN;
1381 1425
1382 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val); 1426 rc = ntb_read_local_spad(qp->ndev, QP_LINKS, &val);
@@ -1408,6 +1452,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_down);
1408 */ 1452 */
1409bool ntb_transport_link_query(struct ntb_transport_qp *qp) 1453bool ntb_transport_link_query(struct ntb_transport_qp *qp)
1410{ 1454{
1455 if (!qp)
1456 return false;
1457
1411 return qp->qp_link == NTB_LINK_UP; 1458 return qp->qp_link == NTB_LINK_UP;
1412} 1459}
1413EXPORT_SYMBOL_GPL(ntb_transport_link_query); 1460EXPORT_SYMBOL_GPL(ntb_transport_link_query);
@@ -1422,6 +1469,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_link_query);
1422 */ 1469 */
1423unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp) 1470unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1424{ 1471{
1472 if (!qp)
1473 return 0;
1474
1425 return qp->qp_num; 1475 return qp->qp_num;
1426} 1476}
1427EXPORT_SYMBOL_GPL(ntb_transport_qp_num); 1477EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
@@ -1436,6 +1486,9 @@ EXPORT_SYMBOL_GPL(ntb_transport_qp_num);
1436 */ 1486 */
1437unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp) 1487unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp)
1438{ 1488{
1489 if (!qp)
1490 return 0;
1491
1439 return qp->tx_max_frame - sizeof(struct ntb_payload_header); 1492 return qp->tx_max_frame - sizeof(struct ntb_payload_header);
1440} 1493}
1441EXPORT_SYMBOL_GPL(ntb_transport_max_size); 1494EXPORT_SYMBOL_GPL(ntb_transport_max_size);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index c76d16c972cc..f53b992f060a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1208,11 +1208,11 @@ static int __of_parse_phandle_with_args(const struct device_node *np,
1208 out_args->args_count = count; 1208 out_args->args_count = count;
1209 for (i = 0; i < count; i++) 1209 for (i = 0; i < count; i++)
1210 out_args->args[i] = be32_to_cpup(list++); 1210 out_args->args[i] = be32_to_cpup(list++);
1211 } else {
1212 of_node_put(node);
1211 } 1213 }
1212 1214
1213 /* Found it! return success */ 1215 /* Found it! return success */
1214 if (node)
1215 of_node_put(node);
1216 return 0; 1216 return 0;
1217 } 1217 }
1218 1218
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index ac6e8e7a02df..a042d065a0c7 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -494,15 +494,4 @@ static struct pci_driver superio_driver = {
494 .probe = superio_probe, 494 .probe = superio_probe,
495}; 495};
496 496
497static int __init superio_modinit(void) 497module_pci_driver(superio_driver);
498{
499 return pci_register_driver(&superio_driver);
500}
501
502static void __exit superio_exit(void)
503{
504 pci_unregister_driver(&superio_driver);
505}
506
507module_init(superio_modinit);
508module_exit(superio_exit);
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 96fed19c6d90..716aa93fff76 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -950,6 +950,20 @@ check_sub_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
950 return AE_OK ; 950 return AE_OK ;
951} 951}
952 952
953void acpiphp_check_host_bridge(acpi_handle handle)
954{
955 struct acpiphp_bridge *bridge;
956
957 bridge = acpiphp_handle_to_bridge(handle);
958 if (bridge) {
959 acpiphp_check_bridge(bridge);
960 put_bridge(bridge);
961 }
962
963 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
964 ACPI_UINT32_MAX, check_sub_bridges, NULL, NULL, NULL);
965}
966
953static void _handle_hotplug_event_bridge(struct work_struct *work) 967static void _handle_hotplug_event_bridge(struct work_struct *work)
954{ 968{
955 struct acpiphp_bridge *bridge; 969 struct acpiphp_bridge *bridge;
diff --git a/drivers/pinctrl/pinctrl-abx500.c b/drivers/pinctrl/pinctrl-abx500.c
index aa17f7580f61..6d4532702f80 100644
--- a/drivers/pinctrl/pinctrl-abx500.c
+++ b/drivers/pinctrl/pinctrl-abx500.c
@@ -851,23 +851,12 @@ static int abx500_gpio_probe(struct platform_device *pdev)
851 851
852 if (abx500_pdata) 852 if (abx500_pdata)
853 pdata = abx500_pdata->gpio; 853 pdata = abx500_pdata->gpio;
854 if (!pdata) {
855 if (np) {
856 const struct of_device_id *match;
857 854
858 match = of_match_device(abx500_gpio_match, &pdev->dev); 855 if (!(pdata || np)) {
859 if (!match) 856 dev_err(&pdev->dev, "gpio dt and platform data missing\n");
860 return -ENODEV; 857 return -ENODEV;
861 id = (unsigned long)match->data;
862 } else {
863 dev_err(&pdev->dev, "gpio dt and platform data missing\n");
864 return -ENODEV;
865 }
866 } 858 }
867 859
868 if (platid)
869 id = platid->driver_data;
870
871 pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl), 860 pct = devm_kzalloc(&pdev->dev, sizeof(struct abx500_pinctrl),
872 GFP_KERNEL); 861 GFP_KERNEL);
873 if (pct == NULL) { 862 if (pct == NULL) {
@@ -882,6 +871,16 @@ static int abx500_gpio_probe(struct platform_device *pdev)
882 pct->chip.dev = &pdev->dev; 871 pct->chip.dev = &pdev->dev;
883 pct->chip.base = (np) ? -1 : pdata->gpio_base; 872 pct->chip.base = (np) ? -1 : pdata->gpio_base;
884 873
874 if (platid)
875 id = platid->driver_data;
876 else if (np) {
877 const struct of_device_id *match;
878
879 match = of_match_device(abx500_gpio_match, &pdev->dev);
880 if (match)
881 id = (unsigned long)match->data;
882 }
883
885 /* initialize the lock */ 884 /* initialize the lock */
886 mutex_init(&pct->lock); 885 mutex_init(&pct->lock);
887 886
@@ -900,8 +899,7 @@ static int abx500_gpio_probe(struct platform_device *pdev)
900 abx500_pinctrl_ab8505_init(&pct->soc); 899 abx500_pinctrl_ab8505_init(&pct->soc);
901 break; 900 break;
902 default: 901 default:
903 dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", 902 dev_err(&pdev->dev, "Unsupported pinctrl sub driver (%d)\n", id);
904 (int) platid->driver_data);
905 mutex_destroy(&pct->lock); 903 mutex_destroy(&pct->lock);
906 return -EINVAL; 904 return -EINVAL;
907 } 905 }
diff --git a/drivers/pinctrl/pinctrl-coh901.c b/drivers/pinctrl/pinctrl-coh901.c
index edde3acc4186..a67af419f531 100644
--- a/drivers/pinctrl/pinctrl-coh901.c
+++ b/drivers/pinctrl/pinctrl-coh901.c
@@ -713,11 +713,6 @@ static int __init u300_gpio_probe(struct platform_device *pdev)
713 gpio->dev = &pdev->dev; 713 gpio->dev = &pdev->dev;
714 714
715 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 715 memres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
716 if (!memres) {
717 dev_err(gpio->dev, "could not get GPIO memory resource\n");
718 return -ENODEV;
719 }
720
721 gpio->base = devm_ioremap_resource(&pdev->dev, memres); 716 gpio->base = devm_ioremap_resource(&pdev->dev, memres);
722 if (IS_ERR(gpio->base)) 717 if (IS_ERR(gpio->base))
723 return PTR_ERR(gpio->base); 718 return PTR_ERR(gpio->base);
diff --git a/drivers/pinctrl/pinctrl-exynos5440.c b/drivers/pinctrl/pinctrl-exynos5440.c
index 6038503ed929..32a48f44f574 100644
--- a/drivers/pinctrl/pinctrl-exynos5440.c
+++ b/drivers/pinctrl/pinctrl-exynos5440.c
@@ -1000,11 +1000,6 @@ static int exynos5440_pinctrl_probe(struct platform_device *pdev)
1000 } 1000 }
1001 1001
1002 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1002 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1003 if (!res) {
1004 dev_err(dev, "cannot find IO resource\n");
1005 return -ENOENT;
1006 }
1007
1008 priv->reg_base = devm_ioremap_resource(&pdev->dev, res); 1003 priv->reg_base = devm_ioremap_resource(&pdev->dev, res);
1009 if (IS_ERR(priv->reg_base)) 1004 if (IS_ERR(priv->reg_base))
1010 return PTR_ERR(priv->reg_base); 1005 return PTR_ERR(priv->reg_base);
diff --git a/drivers/pinctrl/pinctrl-lantiq.c b/drivers/pinctrl/pinctrl-lantiq.c
index 615c5002b757..d22ca252b80d 100644
--- a/drivers/pinctrl/pinctrl-lantiq.c
+++ b/drivers/pinctrl/pinctrl-lantiq.c
@@ -52,7 +52,8 @@ static void ltq_pinctrl_dt_free_map(struct pinctrl_dev *pctldev,
52 int i; 52 int i;
53 53
54 for (i = 0; i < num_maps; i++) 54 for (i = 0; i < num_maps; i++)
55 if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN) 55 if (map[i].type == PIN_MAP_TYPE_CONFIGS_PIN ||
56 map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
56 kfree(map[i].data.configs.configs); 57 kfree(map[i].data.configs.configs);
57 kfree(map); 58 kfree(map);
58} 59}
diff --git a/drivers/pinctrl/pinctrl-samsung.c b/drivers/pinctrl/pinctrl-samsung.c
index 976366899f68..055d0162098b 100644
--- a/drivers/pinctrl/pinctrl-samsung.c
+++ b/drivers/pinctrl/pinctrl-samsung.c
@@ -932,11 +932,6 @@ static int samsung_pinctrl_probe(struct platform_device *pdev)
932 drvdata->dev = dev; 932 drvdata->dev = dev;
933 933
934 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 934 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
935 if (!res) {
936 dev_err(dev, "cannot find IO resource\n");
937 return -ENOENT;
938 }
939
940 drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res); 935 drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res);
941 if (IS_ERR(drvdata->virt_base)) 936 if (IS_ERR(drvdata->virt_base))
942 return PTR_ERR(drvdata->virt_base); 937 return PTR_ERR(drvdata->virt_base);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index 5f2d2bfd356e..b9fa04618601 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1166,7 +1166,8 @@ static int pcs_parse_one_pinctrl_entry(struct pcs_device *pcs,
1166 (*map)->data.mux.function = np->name; 1166 (*map)->data.mux.function = np->name;
1167 1167
1168 if (pcs->is_pinconf) { 1168 if (pcs->is_pinconf) {
1169 if (pcs_parse_pinconf(pcs, np, function, map)) 1169 res = pcs_parse_pinconf(pcs, np, function, map);
1170 if (res)
1170 goto free_pingroups; 1171 goto free_pingroups;
1171 *num_maps = 2; 1172 *num_maps = 2;
1172 } else { 1173 } else {
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index f2977cff8366..e92132c76a6b 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -716,10 +716,6 @@ static int pinmux_xway_probe(struct platform_device *pdev)
716 716
717 /* get and remap our register range */ 717 /* get and remap our register range */
718 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 718 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
719 if (!res) {
720 dev_err(&pdev->dev, "Failed to get resource\n");
721 return -ENOENT;
722 }
723 xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res); 719 xway_info.membase[0] = devm_ioremap_resource(&pdev->dev, res);
724 if (IS_ERR(xway_info.membase[0])) 720 if (IS_ERR(xway_info.membase[0]))
725 return PTR_ERR(xway_info.membase[0]); 721 return PTR_ERR(xway_info.membase[0]);
diff --git a/drivers/pinctrl/vt8500/pinctrl-wm8750.c b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
index b964cc550568..de43262398db 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wm8750.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wm8750.c
@@ -53,7 +53,7 @@ static const struct wmt_pinctrl_bank_registers wm8750_banks[] = {
53#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6) 53#define WMT_PIN_EXTGPIO6 WMT_PIN(0, 6)
54#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7) 54#define WMT_PIN_EXTGPIO7 WMT_PIN(0, 7)
55#define WMT_PIN_WAKEUP0 WMT_PIN(0, 16) 55#define WMT_PIN_WAKEUP0 WMT_PIN(0, 16)
56#define WMT_PIN_WAKEUP1 WMT_PIN(0, 16) 56#define WMT_PIN_WAKEUP1 WMT_PIN(0, 17)
57#define WMT_PIN_SD0CD WMT_PIN(0, 28) 57#define WMT_PIN_SD0CD WMT_PIN(0, 28)
58#define WMT_PIN_VDOUT0 WMT_PIN(1, 0) 58#define WMT_PIN_VDOUT0 WMT_PIN(1, 0)
59#define WMT_PIN_VDOUT1 WMT_PIN(1, 1) 59#define WMT_PIN_VDOUT1 WMT_PIN(1, 1)
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 0d0b5d7d19d0..7b8979c63f48 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -152,6 +152,7 @@ config BATTERY_SBS
152 152
153config BATTERY_BQ27x00 153config BATTERY_BQ27x00
154 tristate "BQ27x00 battery driver" 154 tristate "BQ27x00 battery driver"
155 depends on I2C || I2C=n
155 help 156 help
156 Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips. 157 Say Y here to enable support for batteries with BQ27x00 (I2C/HDQ) chips.
157 158
@@ -284,6 +285,7 @@ config CHARGER_LP8788
284 tristate "TI LP8788 charger driver" 285 tristate "TI LP8788 charger driver"
285 depends on MFD_LP8788 286 depends on MFD_LP8788
286 depends on LP8788_ADC 287 depends on LP8788_ADC
288 depends on IIO
287 help 289 help
288 Say Y to enable support for the LP8788 linear charger. 290 Say Y to enable support for the LP8788 linear charger.
289 291
diff --git a/drivers/power/pm2301_charger.c b/drivers/power/pm2301_charger.c
index a44175139bbf..fef56e2041b3 100644
--- a/drivers/power/pm2301_charger.c
+++ b/drivers/power/pm2301_charger.c
@@ -1269,5 +1269,5 @@ module_exit(pm2xxx_charger_exit);
1269 1269
1270MODULE_LICENSE("GPL v2"); 1270MODULE_LICENSE("GPL v2");
1271MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay"); 1271MODULE_AUTHOR("Rajkumar kasirajan, Olivier Launay");
1272MODULE_ALIAS("platform:pm2xxx-charger"); 1272MODULE_ALIAS("i2c:pm2xxx-charger");
1273MODULE_DESCRIPTION("PM2xxx charger management driver"); 1273MODULE_DESCRIPTION("PM2xxx charger management driver");
diff --git a/drivers/power/wm831x_backup.c b/drivers/power/wm831x_backup.c
index 58cbb009b74f..56fb509f4be0 100644
--- a/drivers/power/wm831x_backup.c
+++ b/drivers/power/wm831x_backup.c
@@ -207,7 +207,6 @@ static int wm831x_backup_remove(struct platform_device *pdev)
207 struct wm831x_backup *devdata = platform_get_drvdata(pdev); 207 struct wm831x_backup *devdata = platform_get_drvdata(pdev);
208 208
209 power_supply_unregister(&devdata->backup); 209 power_supply_unregister(&devdata->backup);
210 kfree(devdata->backup.name);
211 210
212 return 0; 211 return 0;
213} 212}
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index ec287989eafc..c938bae18812 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -265,11 +265,6 @@ static int imx_pwm_probe(struct platform_device *pdev)
265 imx->chip.npwm = 1; 265 imx->chip.npwm = 1;
266 266
267 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 267 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
268 if (r == NULL) {
269 dev_err(&pdev->dev, "no memory resource defined\n");
270 return -ENODEV;
271 }
272
273 imx->mmio_base = devm_ioremap_resource(&pdev->dev, r); 268 imx->mmio_base = devm_ioremap_resource(&pdev->dev, r);
274 if (IS_ERR(imx->mmio_base)) 269 if (IS_ERR(imx->mmio_base))
275 return PTR_ERR(imx->mmio_base); 270 return PTR_ERR(imx->mmio_base);
diff --git a/drivers/pwm/pwm-puv3.c b/drivers/pwm/pwm-puv3.c
index d1eb499fb15d..ed6007b27585 100644
--- a/drivers/pwm/pwm-puv3.c
+++ b/drivers/pwm/pwm-puv3.c
@@ -117,11 +117,6 @@ static int pwm_probe(struct platform_device *pdev)
117 return PTR_ERR(puv3->clk); 117 return PTR_ERR(puv3->clk);
118 118
119 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 119 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
120 if (r == NULL) {
121 dev_err(&pdev->dev, "no memory resource defined\n");
122 return -ENODEV;
123 }
124
125 puv3->base = devm_ioremap_resource(&pdev->dev, r); 120 puv3->base = devm_ioremap_resource(&pdev->dev, r);
126 if (IS_ERR(puv3->base)) 121 if (IS_ERR(puv3->base))
127 return PTR_ERR(puv3->base); 122 return PTR_ERR(puv3->base);
diff --git a/drivers/pwm/pwm-pxa.c b/drivers/pwm/pwm-pxa.c
index dee6ab552a0a..dc9717551d39 100644
--- a/drivers/pwm/pwm-pxa.c
+++ b/drivers/pwm/pwm-pxa.c
@@ -147,11 +147,6 @@ static int pwm_probe(struct platform_device *pdev)
147 pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1; 147 pwm->chip.npwm = (id->driver_data & HAS_SECONDARY_PWM) ? 2 : 1;
148 148
149 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 149 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
150 if (r == NULL) {
151 dev_err(&pdev->dev, "no memory resource defined\n");
152 return -ENODEV;
153 }
154
155 pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); 150 pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
156 if (IS_ERR(pwm->mmio_base)) 151 if (IS_ERR(pwm->mmio_base))
157 return PTR_ERR(pwm->mmio_base); 152 return PTR_ERR(pwm->mmio_base);
diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c
index 3d75f4a88f98..a5402933001f 100644
--- a/drivers/pwm/pwm-tegra.c
+++ b/drivers/pwm/pwm-tegra.c
@@ -181,11 +181,6 @@ static int tegra_pwm_probe(struct platform_device *pdev)
181 pwm->dev = &pdev->dev; 181 pwm->dev = &pdev->dev;
182 182
183 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 183 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
184 if (!r) {
185 dev_err(&pdev->dev, "no memory resources defined\n");
186 return -ENODEV;
187 }
188
189 pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r); 184 pwm->mmio_base = devm_ioremap_resource(&pdev->dev, r);
190 if (IS_ERR(pwm->mmio_base)) 185 if (IS_ERR(pwm->mmio_base))
191 return PTR_ERR(pwm->mmio_base); 186 return PTR_ERR(pwm->mmio_base);
diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c
index 0d65fb2e02c7..72ca42dfa733 100644
--- a/drivers/pwm/pwm-tiecap.c
+++ b/drivers/pwm/pwm-tiecap.c
@@ -240,11 +240,6 @@ static int ecap_pwm_probe(struct platform_device *pdev)
240 pc->chip.npwm = 1; 240 pc->chip.npwm = 1;
241 241
242 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 242 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 if (!r) {
244 dev_err(&pdev->dev, "no memory resource defined\n");
245 return -ENODEV;
246 }
247
248 pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); 243 pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
249 if (IS_ERR(pc->mmio_base)) 244 if (IS_ERR(pc->mmio_base))
250 return PTR_ERR(pc->mmio_base); 245 return PTR_ERR(pc->mmio_base);
diff --git a/drivers/pwm/pwm-tiehrpwm.c b/drivers/pwm/pwm-tiehrpwm.c
index 6a217596942f..48a485c2e422 100644
--- a/drivers/pwm/pwm-tiehrpwm.c
+++ b/drivers/pwm/pwm-tiehrpwm.c
@@ -471,11 +471,6 @@ static int ehrpwm_pwm_probe(struct platform_device *pdev)
471 pc->chip.npwm = NUM_PWM_CHANNEL; 471 pc->chip.npwm = NUM_PWM_CHANNEL;
472 472
473 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 473 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
474 if (!r) {
475 dev_err(&pdev->dev, "no memory resource defined\n");
476 return -ENODEV;
477 }
478
479 pc->mmio_base = devm_ioremap_resource(&pdev->dev, r); 474 pc->mmio_base = devm_ioremap_resource(&pdev->dev, r);
480 if (IS_ERR(pc->mmio_base)) 475 if (IS_ERR(pc->mmio_base))
481 return PTR_ERR(pc->mmio_base); 476 return PTR_ERR(pc->mmio_base);
diff --git a/drivers/pwm/pwm-tipwmss.c b/drivers/pwm/pwm-tipwmss.c
index c9c3d3a1e0eb..3b119bc2c3c6 100644
--- a/drivers/pwm/pwm-tipwmss.c
+++ b/drivers/pwm/pwm-tipwmss.c
@@ -70,11 +70,6 @@ static int pwmss_probe(struct platform_device *pdev)
70 mutex_init(&info->pwmss_lock); 70 mutex_init(&info->pwmss_lock);
71 71
72 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 72 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
73 if (!r) {
74 dev_err(&pdev->dev, "no memory resource defined\n");
75 return -ENODEV;
76 }
77
78 info->mmio_base = devm_ioremap_resource(&pdev->dev, r); 73 info->mmio_base = devm_ioremap_resource(&pdev->dev, r);
79 if (IS_ERR(info->mmio_base)) 74 if (IS_ERR(info->mmio_base))
80 return PTR_ERR(info->mmio_base); 75 return PTR_ERR(info->mmio_base);
diff --git a/drivers/pwm/pwm-vt8500.c b/drivers/pwm/pwm-vt8500.c
index 69effd19afc7..323125abf3f4 100644
--- a/drivers/pwm/pwm-vt8500.c
+++ b/drivers/pwm/pwm-vt8500.c
@@ -230,11 +230,6 @@ static int vt8500_pwm_probe(struct platform_device *pdev)
230 } 230 }
231 231
232 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 232 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
233 if (r == NULL) {
234 dev_err(&pdev->dev, "no memory resource defined\n");
235 return -ENODEV;
236 }
237
238 chip->base = devm_ioremap_resource(&pdev->dev, r); 233 chip->base = devm_ioremap_resource(&pdev->dev, r);
239 if (IS_ERR(chip->base)) 234 if (IS_ERR(chip->base))
240 return PTR_ERR(chip->base); 235 return PTR_ERR(chip->base);
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index 6194d35ebb97..5ab056494bbe 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -47,4 +47,24 @@ config RAPIDIO_DEBUG
47 47
48 If you are unsure about this, say N here. 48 If you are unsure about this, say N here.
49 49
50choice
51 prompt "Enumeration method"
52 depends on RAPIDIO
53 default RAPIDIO_ENUM_BASIC
54 help
55 There are different enumeration and discovery mechanisms offered
56 for RapidIO subsystem. You may select single built-in method or
57 or any number of methods to be built as modules.
58 Selecting a built-in method disables use of loadable methods.
59
60 If unsure, select Basic built-in.
61
62config RAPIDIO_ENUM_BASIC
63 tristate "Basic"
64 help
65 This option includes basic RapidIO fabric enumeration and discovery
66 mechanism similar to one described in RapidIO specification Annex 1.
67
68endchoice
69
50source "drivers/rapidio/switches/Kconfig" 70source "drivers/rapidio/switches/Kconfig"
diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile
index ec3fb8121004..3036702ffe8b 100644
--- a/drivers/rapidio/Makefile
+++ b/drivers/rapidio/Makefile
@@ -1,7 +1,8 @@
1# 1#
2# Makefile for RapidIO interconnect services 2# Makefile for RapidIO interconnect services
3# 3#
4obj-y += rio.o rio-access.o rio-driver.o rio-scan.o rio-sysfs.o 4obj-y += rio.o rio-access.o rio-driver.o rio-sysfs.o
5obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o
5 6
6obj-$(CONFIG_RAPIDIO) += switches/ 7obj-$(CONFIG_RAPIDIO) += switches/
7obj-$(CONFIG_RAPIDIO) += devices/ 8obj-$(CONFIG_RAPIDIO) += devices/
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 6faba406b6e9..a8b2c23a7ef4 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -471,6 +471,10 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
471 u32 intval; 471 u32 intval;
472 u32 ch_inte; 472 u32 ch_inte;
473 473
474 /* For MSI mode disable all device-level interrupts */
475 if (priv->flags & TSI721_USING_MSI)
476 iowrite32(0, priv->regs + TSI721_DEV_INTE);
477
474 dev_int = ioread32(priv->regs + TSI721_DEV_INT); 478 dev_int = ioread32(priv->regs + TSI721_DEV_INT);
475 if (!dev_int) 479 if (!dev_int)
476 return IRQ_NONE; 480 return IRQ_NONE;
@@ -560,6 +564,14 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
560 } 564 }
561 } 565 }
562#endif 566#endif
567
568 /* For MSI mode re-enable device-level interrupts */
569 if (priv->flags & TSI721_USING_MSI) {
570 dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
571 TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
572 iowrite32(dev_int, priv->regs + TSI721_DEV_INTE);
573 }
574
563 return IRQ_HANDLED; 575 return IRQ_HANDLED;
564} 576}
565 577
diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c
index 0f4a53bdaa3c..a0c875563d76 100644
--- a/drivers/rapidio/rio-driver.c
+++ b/drivers/rapidio/rio-driver.c
@@ -164,6 +164,13 @@ void rio_unregister_driver(struct rio_driver *rdrv)
164 driver_unregister(&rdrv->driver); 164 driver_unregister(&rdrv->driver);
165} 165}
166 166
167void rio_attach_device(struct rio_dev *rdev)
168{
169 rdev->dev.bus = &rio_bus_type;
170 rdev->dev.parent = &rio_bus;
171}
172EXPORT_SYMBOL_GPL(rio_attach_device);
173
167/** 174/**
168 * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure 175 * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
169 * @dev: the standard device structure to match against 176 * @dev: the standard device structure to match against
@@ -200,6 +207,7 @@ struct bus_type rio_bus_type = {
200 .name = "rapidio", 207 .name = "rapidio",
201 .match = rio_match_bus, 208 .match = rio_match_bus,
202 .dev_attrs = rio_dev_attrs, 209 .dev_attrs = rio_dev_attrs,
210 .bus_attrs = rio_bus_attrs,
203 .probe = rio_device_probe, 211 .probe = rio_device_probe,
204 .remove = rio_device_remove, 212 .remove = rio_device_remove,
205}; 213};
diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c
index a965acd3c0e4..4c15dbf81087 100644
--- a/drivers/rapidio/rio-scan.c
+++ b/drivers/rapidio/rio-scan.c
@@ -37,12 +37,8 @@
37 37
38#include "rio.h" 38#include "rio.h"
39 39
40LIST_HEAD(rio_devices);
41
42static void rio_init_em(struct rio_dev *rdev); 40static void rio_init_em(struct rio_dev *rdev);
43 41
44DEFINE_SPINLOCK(rio_global_list_lock);
45
46static int next_destid = 0; 42static int next_destid = 0;
47static int next_comptag = 1; 43static int next_comptag = 1;
48 44
@@ -327,127 +323,6 @@ static int rio_is_switch(struct rio_dev *rdev)
327} 323}
328 324
329/** 325/**
330 * rio_switch_init - Sets switch operations for a particular vendor switch
331 * @rdev: RIO device
332 * @do_enum: Enumeration/Discovery mode flag
333 *
334 * Searches the RIO switch ops table for known switch types. If the vid
335 * and did match a switch table entry, then call switch initialization
336 * routine to setup switch-specific routines.
337 */
338static void rio_switch_init(struct rio_dev *rdev, int do_enum)
339{
340 struct rio_switch_ops *cur = __start_rio_switch_ops;
341 struct rio_switch_ops *end = __end_rio_switch_ops;
342
343 while (cur < end) {
344 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
345 pr_debug("RIO: calling init routine for %s\n",
346 rio_name(rdev));
347 cur->init_hook(rdev, do_enum);
348 break;
349 }
350 cur++;
351 }
352
353 if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
354 pr_debug("RIO: adding STD routing ops for %s\n",
355 rio_name(rdev));
356 rdev->rswitch->add_entry = rio_std_route_add_entry;
357 rdev->rswitch->get_entry = rio_std_route_get_entry;
358 rdev->rswitch->clr_table = rio_std_route_clr_table;
359 }
360
361 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
362 printk(KERN_ERR "RIO: missing routing ops for %s\n",
363 rio_name(rdev));
364}
365
366/**
367 * rio_add_device- Adds a RIO device to the device model
368 * @rdev: RIO device
369 *
370 * Adds the RIO device to the global device list and adds the RIO
371 * device to the RIO device list. Creates the generic sysfs nodes
372 * for an RIO device.
373 */
374static int rio_add_device(struct rio_dev *rdev)
375{
376 int err;
377
378 err = device_add(&rdev->dev);
379 if (err)
380 return err;
381
382 spin_lock(&rio_global_list_lock);
383 list_add_tail(&rdev->global_list, &rio_devices);
384 spin_unlock(&rio_global_list_lock);
385
386 rio_create_sysfs_dev_files(rdev);
387
388 return 0;
389}
390
391/**
392 * rio_enable_rx_tx_port - enable input receiver and output transmitter of
393 * given port
394 * @port: Master port associated with the RIO network
395 * @local: local=1 select local port otherwise a far device is reached
396 * @destid: Destination ID of the device to check host bit
397 * @hopcount: Number of hops to reach the target
398 * @port_num: Port (-number on switch) to enable on a far end device
399 *
400 * Returns 0 or 1 from on General Control Command and Status Register
401 * (EXT_PTR+0x3C)
402 */
403inline int rio_enable_rx_tx_port(struct rio_mport *port,
404 int local, u16 destid,
405 u8 hopcount, u8 port_num) {
406#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
407 u32 regval;
408 u32 ext_ftr_ptr;
409
410 /*
411 * enable rx input tx output port
412 */
413 pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
414 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
415
416 ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
417
418 if (local) {
419 rio_local_read_config_32(port, ext_ftr_ptr +
420 RIO_PORT_N_CTL_CSR(0),
421 &regval);
422 } else {
423 if (rio_mport_read_config_32(port, destid, hopcount,
424 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
425 return -EIO;
426 }
427
428 if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
429 /* serial */
430 regval = regval | RIO_PORT_N_CTL_EN_RX_SER
431 | RIO_PORT_N_CTL_EN_TX_SER;
432 } else {
433 /* parallel */
434 regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
435 | RIO_PORT_N_CTL_EN_TX_PAR;
436 }
437
438 if (local) {
439 rio_local_write_config_32(port, ext_ftr_ptr +
440 RIO_PORT_N_CTL_CSR(0), regval);
441 } else {
442 if (rio_mport_write_config_32(port, destid, hopcount,
443 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
444 return -EIO;
445 }
446#endif
447 return 0;
448}
449
450/**
451 * rio_setup_device- Allocates and sets up a RIO device 326 * rio_setup_device- Allocates and sets up a RIO device
452 * @net: RIO network 327 * @net: RIO network
453 * @port: Master port to send transactions 328 * @port: Master port to send transactions
@@ -587,8 +462,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
587 rdev->destid); 462 rdev->destid);
588 } 463 }
589 464
590 rdev->dev.bus = &rio_bus_type; 465 rio_attach_device(rdev);
591 rdev->dev.parent = &rio_bus;
592 466
593 device_initialize(&rdev->dev); 467 device_initialize(&rdev->dev);
594 rdev->dev.release = rio_release_dev; 468 rdev->dev.release = rio_release_dev;
@@ -1260,19 +1134,30 @@ static void rio_pw_enable(struct rio_mport *port, int enable)
1260/** 1134/**
1261 * rio_enum_mport- Start enumeration through a master port 1135 * rio_enum_mport- Start enumeration through a master port
1262 * @mport: Master port to send transactions 1136 * @mport: Master port to send transactions
1137 * @flags: Enumeration control flags
1263 * 1138 *
1264 * Starts the enumeration process. If somebody has enumerated our 1139 * Starts the enumeration process. If somebody has enumerated our
1265 * master port device, then give up. If not and we have an active 1140 * master port device, then give up. If not and we have an active
1266 * link, then start recursive peer enumeration. Returns %0 if 1141 * link, then start recursive peer enumeration. Returns %0 if
1267 * enumeration succeeds or %-EBUSY if enumeration fails. 1142 * enumeration succeeds or %-EBUSY if enumeration fails.
1268 */ 1143 */
1269int rio_enum_mport(struct rio_mport *mport) 1144int rio_enum_mport(struct rio_mport *mport, u32 flags)
1270{ 1145{
1271 struct rio_net *net = NULL; 1146 struct rio_net *net = NULL;
1272 int rc = 0; 1147 int rc = 0;
1273 1148
1274 printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, 1149 printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id,
1275 mport->name); 1150 mport->name);
1151
1152 /*
1153 * To avoid multiple start requests (repeat enumeration is not supported
1154 * by this method) check if enumeration/discovery was performed for this
1155 * mport: if mport was added into the list of mports for a net exit
1156 * with error.
1157 */
1158 if (mport->nnode.next || mport->nnode.prev)
1159 return -EBUSY;
1160
1276 /* If somebody else enumerated our master port device, bail. */ 1161 /* If somebody else enumerated our master port device, bail. */
1277 if (rio_enum_host(mport) < 0) { 1162 if (rio_enum_host(mport) < 0) {
1278 printk(KERN_INFO 1163 printk(KERN_INFO
@@ -1362,14 +1247,16 @@ static void rio_build_route_tables(struct rio_net *net)
1362/** 1247/**
1363 * rio_disc_mport- Start discovery through a master port 1248 * rio_disc_mport- Start discovery through a master port
1364 * @mport: Master port to send transactions 1249 * @mport: Master port to send transactions
1250 * @flags: discovery control flags
1365 * 1251 *
1366 * Starts the discovery process. If we have an active link, 1252 * Starts the discovery process. If we have an active link,
1367 * then wait for the signal that enumeration is complete. 1253 * then wait for the signal that enumeration is complete (if wait
1254 * is allowed).
1368 * When enumeration completion is signaled, start recursive 1255 * When enumeration completion is signaled, start recursive
1369 * peer discovery. Returns %0 if discovery succeeds or %-EBUSY 1256 * peer discovery. Returns %0 if discovery succeeds or %-EBUSY
1370 * on failure. 1257 * on failure.
1371 */ 1258 */
1372int rio_disc_mport(struct rio_mport *mport) 1259int rio_disc_mport(struct rio_mport *mport, u32 flags)
1373{ 1260{
1374 struct rio_net *net = NULL; 1261 struct rio_net *net = NULL;
1375 unsigned long to_end; 1262 unsigned long to_end;
@@ -1379,6 +1266,11 @@ int rio_disc_mport(struct rio_mport *mport)
1379 1266
1380 /* If master port has an active link, allocate net and discover peers */ 1267 /* If master port has an active link, allocate net and discover peers */
1381 if (rio_mport_is_active(mport)) { 1268 if (rio_mport_is_active(mport)) {
1269 if (rio_enum_complete(mport))
1270 goto enum_done;
1271 else if (flags & RIO_SCAN_ENUM_NO_WAIT)
1272 return -EAGAIN;
1273
1382 pr_debug("RIO: wait for enumeration to complete...\n"); 1274 pr_debug("RIO: wait for enumeration to complete...\n");
1383 1275
1384 to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; 1276 to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ;
@@ -1421,3 +1313,41 @@ enum_done:
1421bail: 1313bail:
1422 return -EBUSY; 1314 return -EBUSY;
1423} 1315}
1316
1317static struct rio_scan rio_scan_ops = {
1318 .enumerate = rio_enum_mport,
1319 .discover = rio_disc_mport,
1320};
1321
1322static bool scan;
1323module_param(scan, bool, 0);
1324MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery "
1325 "(default = 0)");
1326
1327/**
1328 * rio_basic_attach:
1329 *
1330 * When this enumeration/discovery method is loaded as a module this function
1331 * registers its specific enumeration and discover routines for all available
1332 * RapidIO mport devices. The "scan" command line parameter controls ability of
1333 * the module to start RapidIO enumeration/discovery automatically.
1334 *
1335 * Returns 0 for success or -EIO if unable to register itself.
1336 *
1337 * This enumeration/discovery method cannot be unloaded and therefore does not
1338 * provide a matching cleanup_module routine.
1339 */
1340
1341static int __init rio_basic_attach(void)
1342{
1343 if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops))
1344 return -EIO;
1345 if (scan)
1346 rio_init_mports();
1347 return 0;
1348}
1349
1350late_initcall(rio_basic_attach);
1351
1352MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery");
1353MODULE_LICENSE("GPL");
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 4dbe360989be..66d4acd5e18f 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -285,3 +285,48 @@ void rio_remove_sysfs_dev_files(struct rio_dev *rdev)
285 rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE); 285 rdev->rswitch->sw_sysfs(rdev, RIO_SW_SYSFS_REMOVE);
286 } 286 }
287} 287}
288
289static ssize_t bus_scan_store(struct bus_type *bus, const char *buf,
290 size_t count)
291{
292 long val;
293 struct rio_mport *port = NULL;
294 int rc;
295
296 if (kstrtol(buf, 0, &val) < 0)
297 return -EINVAL;
298
299 if (val == RIO_MPORT_ANY) {
300 rc = rio_init_mports();
301 goto exit;
302 }
303
304 if (val < 0 || val >= RIO_MAX_MPORTS)
305 return -EINVAL;
306
307 port = rio_find_mport((int)val);
308
309 if (!port) {
310 pr_debug("RIO: %s: mport_%d not available\n",
311 __func__, (int)val);
312 return -EINVAL;
313 }
314
315 if (!port->nscan)
316 return -EINVAL;
317
318 if (port->host_deviceid >= 0)
319 rc = port->nscan->enumerate(port, 0);
320 else
321 rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT);
322exit:
323 if (!rc)
324 rc = count;
325
326 return rc;
327}
328
329struct bus_attribute rio_bus_attrs[] = {
330 __ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store),
331 __ATTR_NULL
332};
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index d553b5d13722..cb1c08996fbb 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -31,7 +31,11 @@
31 31
32#include "rio.h" 32#include "rio.h"
33 33
34static LIST_HEAD(rio_devices);
35static DEFINE_SPINLOCK(rio_global_list_lock);
36
34static LIST_HEAD(rio_mports); 37static LIST_HEAD(rio_mports);
38static DEFINE_MUTEX(rio_mport_list_lock);
35static unsigned char next_portid; 39static unsigned char next_portid;
36static DEFINE_SPINLOCK(rio_mmap_lock); 40static DEFINE_SPINLOCK(rio_mmap_lock);
37 41
@@ -53,6 +57,32 @@ u16 rio_local_get_device_id(struct rio_mport *port)
53} 57}
54 58
55/** 59/**
60 * rio_add_device- Adds a RIO device to the device model
61 * @rdev: RIO device
62 *
63 * Adds the RIO device to the global device list and adds the RIO
64 * device to the RIO device list. Creates the generic sysfs nodes
65 * for an RIO device.
66 */
67int rio_add_device(struct rio_dev *rdev)
68{
69 int err;
70
71 err = device_add(&rdev->dev);
72 if (err)
73 return err;
74
75 spin_lock(&rio_global_list_lock);
76 list_add_tail(&rdev->global_list, &rio_devices);
77 spin_unlock(&rio_global_list_lock);
78
79 rio_create_sysfs_dev_files(rdev);
80
81 return 0;
82}
83EXPORT_SYMBOL_GPL(rio_add_device);
84
85/**
56 * rio_request_inb_mbox - request inbound mailbox service 86 * rio_request_inb_mbox - request inbound mailbox service
57 * @mport: RIO master port from which to allocate the mailbox resource 87 * @mport: RIO master port from which to allocate the mailbox resource
58 * @dev_id: Device specific pointer to pass on event 88 * @dev_id: Device specific pointer to pass on event
@@ -489,6 +519,7 @@ rio_mport_get_physefb(struct rio_mport *port, int local,
489 519
490 return ext_ftr_ptr; 520 return ext_ftr_ptr;
491} 521}
522EXPORT_SYMBOL_GPL(rio_mport_get_physefb);
492 523
493/** 524/**
494 * rio_get_comptag - Begin or continue searching for a RIO device by component tag 525 * rio_get_comptag - Begin or continue searching for a RIO device by component tag
@@ -521,6 +552,7 @@ exit:
521 spin_unlock(&rio_global_list_lock); 552 spin_unlock(&rio_global_list_lock);
522 return rdev; 553 return rdev;
523} 554}
555EXPORT_SYMBOL_GPL(rio_get_comptag);
524 556
525/** 557/**
526 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. 558 * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port.
@@ -545,6 +577,107 @@ int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock)
545 regval); 577 regval);
546 return 0; 578 return 0;
547} 579}
580EXPORT_SYMBOL_GPL(rio_set_port_lockout);
581
582/**
583 * rio_switch_init - Sets switch operations for a particular vendor switch
584 * @rdev: RIO device
585 * @do_enum: Enumeration/Discovery mode flag
586 *
587 * Searches the RIO switch ops table for known switch types. If the vid
588 * and did match a switch table entry, then call switch initialization
589 * routine to setup switch-specific routines.
590 */
591void rio_switch_init(struct rio_dev *rdev, int do_enum)
592{
593 struct rio_switch_ops *cur = __start_rio_switch_ops;
594 struct rio_switch_ops *end = __end_rio_switch_ops;
595
596 while (cur < end) {
597 if ((cur->vid == rdev->vid) && (cur->did == rdev->did)) {
598 pr_debug("RIO: calling init routine for %s\n",
599 rio_name(rdev));
600 cur->init_hook(rdev, do_enum);
601 break;
602 }
603 cur++;
604 }
605
606 if ((cur >= end) && (rdev->pef & RIO_PEF_STD_RT)) {
607 pr_debug("RIO: adding STD routing ops for %s\n",
608 rio_name(rdev));
609 rdev->rswitch->add_entry = rio_std_route_add_entry;
610 rdev->rswitch->get_entry = rio_std_route_get_entry;
611 rdev->rswitch->clr_table = rio_std_route_clr_table;
612 }
613
614 if (!rdev->rswitch->add_entry || !rdev->rswitch->get_entry)
615 printk(KERN_ERR "RIO: missing routing ops for %s\n",
616 rio_name(rdev));
617}
618EXPORT_SYMBOL_GPL(rio_switch_init);
619
620/**
621 * rio_enable_rx_tx_port - enable input receiver and output transmitter of
622 * given port
623 * @port: Master port associated with the RIO network
624 * @local: local=1 select local port otherwise a far device is reached
625 * @destid: Destination ID of the device to check host bit
626 * @hopcount: Number of hops to reach the target
627 * @port_num: Port (-number on switch) to enable on a far end device
628 *
629 * Returns 0 or 1 from on General Control Command and Status Register
630 * (EXT_PTR+0x3C)
631 */
632int rio_enable_rx_tx_port(struct rio_mport *port,
633 int local, u16 destid,
634 u8 hopcount, u8 port_num)
635{
636#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS
637 u32 regval;
638 u32 ext_ftr_ptr;
639
640 /*
641 * enable rx input tx output port
642 */
643 pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = "
644 "%d, port_num = %d)\n", local, destid, hopcount, port_num);
645
646 ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount);
647
648 if (local) {
649 rio_local_read_config_32(port, ext_ftr_ptr +
650 RIO_PORT_N_CTL_CSR(0),
651 &regval);
652 } else {
653 if (rio_mport_read_config_32(port, destid, hopcount,
654 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), &regval) < 0)
655 return -EIO;
656 }
657
658 if (regval & RIO_PORT_N_CTL_P_TYP_SER) {
659 /* serial */
660 regval = regval | RIO_PORT_N_CTL_EN_RX_SER
661 | RIO_PORT_N_CTL_EN_TX_SER;
662 } else {
663 /* parallel */
664 regval = regval | RIO_PORT_N_CTL_EN_RX_PAR
665 | RIO_PORT_N_CTL_EN_TX_PAR;
666 }
667
668 if (local) {
669 rio_local_write_config_32(port, ext_ftr_ptr +
670 RIO_PORT_N_CTL_CSR(0), regval);
671 } else {
672 if (rio_mport_write_config_32(port, destid, hopcount,
673 ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0)
674 return -EIO;
675 }
676#endif
677 return 0;
678}
679EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port);
680
548 681
549/** 682/**
550 * rio_chk_dev_route - Validate route to the specified device. 683 * rio_chk_dev_route - Validate route to the specified device.
@@ -610,6 +743,7 @@ rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount)
610 743
611 return 0; 744 return 0;
612} 745}
746EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access);
613 747
614/** 748/**
615 * rio_chk_dev_access - Validate access to the specified device. 749 * rio_chk_dev_access - Validate access to the specified device.
@@ -941,6 +1075,7 @@ rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
941 return RIO_GET_BLOCK_ID(reg_val); 1075 return RIO_GET_BLOCK_ID(reg_val);
942 } 1076 }
943} 1077}
1078EXPORT_SYMBOL_GPL(rio_mport_get_efb);
944 1079
945/** 1080/**
946 * rio_mport_get_feature - query for devices' extended features 1081 * rio_mport_get_feature - query for devices' extended features
@@ -997,6 +1132,7 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
997 1132
998 return 0; 1133 return 0;
999} 1134}
1135EXPORT_SYMBOL_GPL(rio_mport_get_feature);
1000 1136
1001/** 1137/**
1002 * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did 1138 * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did
@@ -1246,6 +1382,95 @@ EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
1246 1382
1247#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 1383#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1248 1384
1385/**
1386 * rio_find_mport - find RIO mport by its ID
1387 * @mport_id: number (ID) of mport device
1388 *
1389 * Given a RIO mport number, the desired mport is located
1390 * in the global list of mports. If the mport is found, a pointer to its
1391 * data structure is returned. If no mport is found, %NULL is returned.
1392 */
1393struct rio_mport *rio_find_mport(int mport_id)
1394{
1395 struct rio_mport *port;
1396
1397 mutex_lock(&rio_mport_list_lock);
1398 list_for_each_entry(port, &rio_mports, node) {
1399 if (port->id == mport_id)
1400 goto found;
1401 }
1402 port = NULL;
1403found:
1404 mutex_unlock(&rio_mport_list_lock);
1405
1406 return port;
1407}
1408
1409/**
1410 * rio_register_scan - enumeration/discovery method registration interface
1411 * @mport_id: mport device ID for which fabric scan routine has to be set
1412 * (RIO_MPORT_ANY = set for all available mports)
1413 * @scan_ops: enumeration/discovery control structure
1414 *
1415 * Assigns enumeration or discovery method to the specified mport device (or all
1416 * available mports if RIO_MPORT_ANY is specified).
1417 * Returns error if the mport already has an enumerator attached to it.
1418 * In case of RIO_MPORT_ANY ignores ports with valid scan routines and returns
1419 * an error if was unable to find at least one available mport.
1420 */
1421int rio_register_scan(int mport_id, struct rio_scan *scan_ops)
1422{
1423 struct rio_mport *port;
1424 int rc = -EBUSY;
1425
1426 mutex_lock(&rio_mport_list_lock);
1427 list_for_each_entry(port, &rio_mports, node) {
1428 if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
1429 if (port->nscan && mport_id == RIO_MPORT_ANY)
1430 continue;
1431 else if (port->nscan)
1432 break;
1433
1434 port->nscan = scan_ops;
1435 rc = 0;
1436
1437 if (mport_id != RIO_MPORT_ANY)
1438 break;
1439 }
1440 }
1441 mutex_unlock(&rio_mport_list_lock);
1442
1443 return rc;
1444}
1445EXPORT_SYMBOL_GPL(rio_register_scan);
1446
1447/**
1448 * rio_unregister_scan - removes enumeration/discovery method from mport
1449 * @mport_id: mport device ID for which fabric scan routine has to be
1450 * unregistered (RIO_MPORT_ANY = set for all available mports)
1451 *
1452 * Removes enumeration or discovery method assigned to the specified mport
1453 * device (or all available mports if RIO_MPORT_ANY is specified).
1454 */
1455int rio_unregister_scan(int mport_id)
1456{
1457 struct rio_mport *port;
1458
1459 mutex_lock(&rio_mport_list_lock);
1460 list_for_each_entry(port, &rio_mports, node) {
1461 if (port->id == mport_id || mport_id == RIO_MPORT_ANY) {
1462 if (port->nscan)
1463 port->nscan = NULL;
1464 if (mport_id != RIO_MPORT_ANY)
1465 break;
1466 }
1467 }
1468 mutex_unlock(&rio_mport_list_lock);
1469
1470 return 0;
1471}
1472EXPORT_SYMBOL_GPL(rio_unregister_scan);
1473
1249static void rio_fixup_device(struct rio_dev *dev) 1474static void rio_fixup_device(struct rio_dev *dev)
1250{ 1475{
1251} 1476}
@@ -1274,7 +1499,7 @@ static void disc_work_handler(struct work_struct *_work)
1274 work = container_of(_work, struct rio_disc_work, work); 1499 work = container_of(_work, struct rio_disc_work, work);
1275 pr_debug("RIO: discovery work for mport %d %s\n", 1500 pr_debug("RIO: discovery work for mport %d %s\n",
1276 work->mport->id, work->mport->name); 1501 work->mport->id, work->mport->name);
1277 rio_disc_mport(work->mport); 1502 work->mport->nscan->discover(work->mport, 0);
1278} 1503}
1279 1504
1280int rio_init_mports(void) 1505int rio_init_mports(void)
@@ -1290,12 +1515,15 @@ int rio_init_mports(void)
1290 * First, run enumerations and check if we need to perform discovery 1515 * First, run enumerations and check if we need to perform discovery
1291 * on any of the registered mports. 1516 * on any of the registered mports.
1292 */ 1517 */
1518 mutex_lock(&rio_mport_list_lock);
1293 list_for_each_entry(port, &rio_mports, node) { 1519 list_for_each_entry(port, &rio_mports, node) {
1294 if (port->host_deviceid >= 0) 1520 if (port->host_deviceid >= 0) {
1295 rio_enum_mport(port); 1521 if (port->nscan)
1296 else 1522 port->nscan->enumerate(port, 0);
1523 } else
1297 n++; 1524 n++;
1298 } 1525 }
1526 mutex_unlock(&rio_mport_list_lock);
1299 1527
1300 if (!n) 1528 if (!n)
1301 goto no_disc; 1529 goto no_disc;
@@ -1322,14 +1550,16 @@ int rio_init_mports(void)
1322 } 1550 }
1323 1551
1324 n = 0; 1552 n = 0;
1553 mutex_lock(&rio_mport_list_lock);
1325 list_for_each_entry(port, &rio_mports, node) { 1554 list_for_each_entry(port, &rio_mports, node) {
1326 if (port->host_deviceid < 0) { 1555 if (port->host_deviceid < 0 && port->nscan) {
1327 work[n].mport = port; 1556 work[n].mport = port;
1328 INIT_WORK(&work[n].work, disc_work_handler); 1557 INIT_WORK(&work[n].work, disc_work_handler);
1329 queue_work(rio_wq, &work[n].work); 1558 queue_work(rio_wq, &work[n].work);
1330 n++; 1559 n++;
1331 } 1560 }
1332 } 1561 }
1562 mutex_unlock(&rio_mport_list_lock);
1333 1563
1334 flush_workqueue(rio_wq); 1564 flush_workqueue(rio_wq);
1335 pr_debug("RIO: destroy discovery workqueue\n"); 1565 pr_debug("RIO: destroy discovery workqueue\n");
@@ -1342,8 +1572,6 @@ no_disc:
1342 return 0; 1572 return 0;
1343} 1573}
1344 1574
1345device_initcall_sync(rio_init_mports);
1346
1347static int hdids[RIO_MAX_MPORTS + 1]; 1575static int hdids[RIO_MAX_MPORTS + 1];
1348 1576
1349static int rio_get_hdid(int index) 1577static int rio_get_hdid(int index)
@@ -1371,7 +1599,10 @@ int rio_register_mport(struct rio_mport *port)
1371 1599
1372 port->id = next_portid++; 1600 port->id = next_portid++;
1373 port->host_deviceid = rio_get_hdid(port->id); 1601 port->host_deviceid = rio_get_hdid(port->id);
1602 port->nscan = NULL;
1603 mutex_lock(&rio_mport_list_lock);
1374 list_add_tail(&port->node, &rio_mports); 1604 list_add_tail(&port->node, &rio_mports);
1605 mutex_unlock(&rio_mport_list_lock);
1375 return 0; 1606 return 0;
1376} 1607}
1377 1608
@@ -1386,3 +1617,4 @@ EXPORT_SYMBOL_GPL(rio_request_inb_mbox);
1386EXPORT_SYMBOL_GPL(rio_release_inb_mbox); 1617EXPORT_SYMBOL_GPL(rio_release_inb_mbox);
1387EXPORT_SYMBOL_GPL(rio_request_outb_mbox); 1618EXPORT_SYMBOL_GPL(rio_request_outb_mbox);
1388EXPORT_SYMBOL_GPL(rio_release_outb_mbox); 1619EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
1620EXPORT_SYMBOL_GPL(rio_init_mports);
diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h
index b1af414f15e6..c14f864dea5c 100644
--- a/drivers/rapidio/rio.h
+++ b/drivers/rapidio/rio.h
@@ -15,6 +15,7 @@
15#include <linux/rio.h> 15#include <linux/rio.h>
16 16
17#define RIO_MAX_CHK_RETRY 3 17#define RIO_MAX_CHK_RETRY 3
18#define RIO_MPORT_ANY (-1)
18 19
19/* Functions internal to the RIO core code */ 20/* Functions internal to the RIO core code */
20 21
@@ -27,8 +28,6 @@ extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid,
27extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, 28extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid,
28 u8 hopcount); 29 u8 hopcount);
29extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); 30extern int rio_create_sysfs_dev_files(struct rio_dev *rdev);
30extern int rio_enum_mport(struct rio_mport *mport);
31extern int rio_disc_mport(struct rio_mport *mport);
32extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid, 31extern int rio_std_route_add_entry(struct rio_mport *mport, u16 destid,
33 u8 hopcount, u16 table, u16 route_destid, 32 u8 hopcount, u16 table, u16 route_destid,
34 u8 route_port); 33 u8 route_port);
@@ -39,10 +38,18 @@ extern int rio_std_route_clr_table(struct rio_mport *mport, u16 destid,
39 u8 hopcount, u16 table); 38 u8 hopcount, u16 table);
40extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); 39extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock);
41extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); 40extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from);
41extern int rio_add_device(struct rio_dev *rdev);
42extern void rio_switch_init(struct rio_dev *rdev, int do_enum);
43extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid,
44 u8 hopcount, u8 port_num);
45extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops);
46extern int rio_unregister_scan(int mport_id);
47extern void rio_attach_device(struct rio_dev *rdev);
48extern struct rio_mport *rio_find_mport(int mport_id);
42 49
43/* Structures internal to the RIO core code */ 50/* Structures internal to the RIO core code */
44extern struct device_attribute rio_dev_attrs[]; 51extern struct device_attribute rio_dev_attrs[];
45extern spinlock_t rio_global_list_lock; 52extern struct bus_attribute rio_bus_attrs[];
46 53
47extern struct rio_switch_ops __start_rio_switch_ops[]; 54extern struct rio_switch_ops __start_rio_switch_ops[];
48extern struct rio_switch_ops __end_rio_switch_ops[]; 55extern struct rio_switch_ops __end_rio_switch_ops[];
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 0c81915b1997..b9838130a7b0 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -20,7 +20,6 @@ if RTC_CLASS
20config RTC_HCTOSYS 20config RTC_HCTOSYS
21 bool "Set system time from RTC on startup and resume" 21 bool "Set system time from RTC on startup and resume"
22 default y 22 default y
23 depends on !ALWAYS_USE_PERSISTENT_CLOCK
24 help 23 help
25 If you say yes here, the system time (wall clock) will be set using 24 If you say yes here, the system time (wall clock) will be set using
26 the value read from a specified RTC device. This is useful to avoid 25 the value read from a specified RTC device. This is useful to avoid
@@ -29,7 +28,6 @@ config RTC_HCTOSYS
29config RTC_SYSTOHC 28config RTC_SYSTOHC
30 bool "Set the RTC time based on NTP synchronization" 29 bool "Set the RTC time based on NTP synchronization"
31 default y 30 default y
32 depends on !ALWAYS_USE_PERSISTENT_CLOCK
33 help 31 help
34 If you say yes here, the system time (wall clock) will be stored 32 If you say yes here, the system time (wall clock) will be stored
35 in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11 33 in the RTC specified by RTC_HCTOSYS_DEVICE approximately every 11
diff --git a/drivers/rtc/rtc-max8998.c b/drivers/rtc/rtc-max8998.c
index 48b6612fae7f..d5af7baa48b5 100644
--- a/drivers/rtc/rtc-max8998.c
+++ b/drivers/rtc/rtc-max8998.c
@@ -285,7 +285,7 @@ static int max8998_rtc_probe(struct platform_device *pdev)
285 info->irq, ret); 285 info->irq, ret);
286 286
287 dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name); 287 dev_info(&pdev->dev, "RTC CHIP NAME: %s\n", pdev->id_entry->name);
288 if (pdata->rtc_delay) { 288 if (pdata && pdata->rtc_delay) {
289 info->lp3974_bug_workaround = true; 289 info->lp3974_bug_workaround = true;
290 dev_warn(&pdev->dev, "LP3974 with RTC REGERR option." 290 dev_warn(&pdev->dev, "LP3974 with RTC REGERR option."
291 " RTC updates will be extremely slow.\n"); 291 " RTC updates will be extremely slow.\n");
diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c
index f5dfb6e5e7d9..d592e2fe43f7 100644
--- a/drivers/rtc/rtc-nuc900.c
+++ b/drivers/rtc/rtc-nuc900.c
@@ -234,11 +234,6 @@ static int __init nuc900_rtc_probe(struct platform_device *pdev)
234 return -ENOMEM; 234 return -ENOMEM;
235 } 235 }
236 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 236 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
237 if (!res) {
238 dev_err(&pdev->dev, "platform_get_resource failed\n");
239 return -ENXIO;
240 }
241
242 nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res); 237 nuc900_rtc->rtc_reg = devm_ioremap_resource(&pdev->dev, res);
243 if (IS_ERR(nuc900_rtc->rtc_reg)) 238 if (IS_ERR(nuc900_rtc->rtc_reg))
244 return PTR_ERR(nuc900_rtc->rtc_reg); 239 return PTR_ERR(nuc900_rtc->rtc_reg);
diff --git a/drivers/rtc/rtc-omap.c b/drivers/rtc/rtc-omap.c
index 4e1bdb832e37..b0ba3fc991ea 100644
--- a/drivers/rtc/rtc-omap.c
+++ b/drivers/rtc/rtc-omap.c
@@ -347,11 +347,6 @@ static int __init omap_rtc_probe(struct platform_device *pdev)
347 } 347 }
348 348
349 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 349 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
350 if (!res) {
351 pr_debug("%s: RTC resource data missing\n", pdev->name);
352 return -ENOENT;
353 }
354
355 rtc_base = devm_ioremap_resource(&pdev->dev, res); 350 rtc_base = devm_ioremap_resource(&pdev->dev, res);
356 if (IS_ERR(rtc_base)) 351 if (IS_ERR(rtc_base))
357 return PTR_ERR(rtc_base); 352 return PTR_ERR(rtc_base);
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index 8900ea784817..0f0609b1aa2c 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -306,7 +306,7 @@ static int pl031_remove(struct amba_device *adev)
306 struct pl031_local *ldata = dev_get_drvdata(&adev->dev); 306 struct pl031_local *ldata = dev_get_drvdata(&adev->dev);
307 307
308 amba_set_drvdata(adev, NULL); 308 amba_set_drvdata(adev, NULL);
309 free_irq(adev->irq[0], ldata->rtc); 309 free_irq(adev->irq[0], ldata);
310 rtc_device_unregister(ldata->rtc); 310 rtc_device_unregister(ldata->rtc);
311 iounmap(ldata->base); 311 iounmap(ldata->base);
312 kfree(ldata); 312 kfree(ldata);
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 14040b22888d..0b495e8b8e66 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -477,11 +477,6 @@ static int s3c_rtc_probe(struct platform_device *pdev)
477 /* get the memory region */ 477 /* get the memory region */
478 478
479 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 479 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
480 if (res == NULL) {
481 dev_err(&pdev->dev, "failed to get memory region resource\n");
482 return -ENOENT;
483 }
484
485 s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res); 480 s3c_rtc_base = devm_ioremap_resource(&pdev->dev, res);
486 if (IS_ERR(s3c_rtc_base)) 481 if (IS_ERR(s3c_rtc_base))
487 return PTR_ERR(s3c_rtc_base); 482 return PTR_ERR(s3c_rtc_base);
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index a34315d25478..76af92ad5a8a 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -322,12 +322,6 @@ static int __init tegra_rtc_probe(struct platform_device *pdev)
322 return -ENOMEM; 322 return -ENOMEM;
323 323
324 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 324 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
325 if (!res) {
326 dev_err(&pdev->dev,
327 "Unable to allocate resources for device.\n");
328 return -EBUSY;
329 }
330
331 info->rtc_base = devm_ioremap_resource(&pdev->dev, res); 325 info->rtc_base = devm_ioremap_resource(&pdev->dev, res);
332 if (IS_ERR(info->rtc_base)) 326 if (IS_ERR(info->rtc_base))
333 return PTR_ERR(info->rtc_base); 327 return PTR_ERR(info->rtc_base);
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 690c3338a8ae..464dd29d06c0 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -343,6 +343,7 @@ static int __init xpram_setup_blkdev(void)
343 put_disk(xpram_disks[i]); 343 put_disk(xpram_disks[i]);
344 goto out; 344 goto out;
345 } 345 }
346 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, xpram_queues[i]);
346 blk_queue_make_request(xpram_queues[i], xpram_make_request); 347 blk_queue_make_request(xpram_queues[i], xpram_make_request);
347 blk_queue_logical_block_size(xpram_queues[i], 4096); 348 blk_queue_logical_block_size(xpram_queues[i], 4096);
348 } 349 }
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 21fabc6d5a9c..6c440d4349d4 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -352,12 +352,48 @@ static ssize_t chp_shared_show(struct device *dev,
352 352
353static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL); 353static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
354 354
355static ssize_t chp_chid_show(struct device *dev, struct device_attribute *attr,
356 char *buf)
357{
358 struct channel_path *chp = to_channelpath(dev);
359 ssize_t rc;
360
361 mutex_lock(&chp->lock);
362 if (chp->desc_fmt1.flags & 0x10)
363 rc = sprintf(buf, "%04x\n", chp->desc_fmt1.chid);
364 else
365 rc = 0;
366 mutex_unlock(&chp->lock);
367
368 return rc;
369}
370static DEVICE_ATTR(chid, 0444, chp_chid_show, NULL);
371
372static ssize_t chp_chid_external_show(struct device *dev,
373 struct device_attribute *attr, char *buf)
374{
375 struct channel_path *chp = to_channelpath(dev);
376 ssize_t rc;
377
378 mutex_lock(&chp->lock);
379 if (chp->desc_fmt1.flags & 0x10)
380 rc = sprintf(buf, "%x\n", chp->desc_fmt1.flags & 0x8 ? 1 : 0);
381 else
382 rc = 0;
383 mutex_unlock(&chp->lock);
384
385 return rc;
386}
387static DEVICE_ATTR(chid_external, 0444, chp_chid_external_show, NULL);
388
355static struct attribute *chp_attrs[] = { 389static struct attribute *chp_attrs[] = {
356 &dev_attr_status.attr, 390 &dev_attr_status.attr,
357 &dev_attr_configure.attr, 391 &dev_attr_configure.attr,
358 &dev_attr_type.attr, 392 &dev_attr_type.attr,
359 &dev_attr_cmg.attr, 393 &dev_attr_cmg.attr,
360 &dev_attr_shared.attr, 394 &dev_attr_shared.attr,
395 &dev_attr_chid.attr,
396 &dev_attr_chid_external.attr,
361 NULL, 397 NULL,
362}; 398};
363static struct attribute_group chp_attr_group = { 399static struct attribute_group chp_attr_group = {
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 349d5fc47196..e7ef2a683b8f 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -43,7 +43,9 @@ struct channel_path_desc_fmt1 {
43 u8 chpid; 43 u8 chpid;
44 u32:24; 44 u32:24;
45 u8 chpp; 45 u8 chpp;
46 u32 unused[3]; 46 u32 unused[2];
47 u16 chid;
48 u32:16;
47 u16 mdc; 49 u16 mdc;
48 u16:13; 50 u16:13;
49 u8 r:1; 51 u8 r:1;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index 787bd2c22bca..380387a47b1d 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -526,13 +526,17 @@ static void atmel_spi_next_xfer_pio(struct spi_master *master,
526 } 526 }
527 527
528 if (xfer->tx_buf) 528 if (xfer->tx_buf)
529 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf)); 529 if (xfer->bits_per_word > 8)
530 spi_writel(as, TDR, *(u16 *)(xfer->tx_buf));
531 else
532 spi_writel(as, TDR, *(u8 *)(xfer->tx_buf));
530 else 533 else
531 spi_writel(as, TDR, 0); 534 spi_writel(as, TDR, 0);
532 535
533 dev_dbg(master->dev.parent, 536 dev_dbg(master->dev.parent,
534 " start pio xfer %p: len %u tx %p rx %p\n", 537 " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
535 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf); 538 xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
539 xfer->bits_per_word);
536 540
537 /* Enable relevant interrupts */ 541 /* Enable relevant interrupts */
538 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES)); 542 spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
@@ -950,21 +954,39 @@ atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
950{ 954{
951 u8 *txp; 955 u8 *txp;
952 u8 *rxp; 956 u8 *rxp;
957 u16 *txp16;
958 u16 *rxp16;
953 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes; 959 unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
954 960
955 if (xfer->rx_buf) { 961 if (xfer->rx_buf) {
956 rxp = ((u8 *)xfer->rx_buf) + xfer_pos; 962 if (xfer->bits_per_word > 8) {
957 *rxp = spi_readl(as, RDR); 963 rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
964 *rxp16 = spi_readl(as, RDR);
965 } else {
966 rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
967 *rxp = spi_readl(as, RDR);
968 }
958 } else { 969 } else {
959 spi_readl(as, RDR); 970 spi_readl(as, RDR);
960 } 971 }
961 972 if (xfer->bits_per_word > 8) {
962 as->current_remaining_bytes--; 973 as->current_remaining_bytes -= 2;
974 if (as->current_remaining_bytes < 0)
975 as->current_remaining_bytes = 0;
976 } else {
977 as->current_remaining_bytes--;
978 }
963 979
964 if (as->current_remaining_bytes) { 980 if (as->current_remaining_bytes) {
965 if (xfer->tx_buf) { 981 if (xfer->tx_buf) {
966 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1; 982 if (xfer->bits_per_word > 8) {
967 spi_writel(as, TDR, *txp); 983 txp16 = (u16 *)(((u8 *)xfer->tx_buf)
984 + xfer_pos + 2);
985 spi_writel(as, TDR, *txp16);
986 } else {
987 txp = ((u8 *)xfer->tx_buf) + xfer_pos + 1;
988 spi_writel(as, TDR, *txp);
989 }
968 } else { 990 } else {
969 spi_writel(as, TDR, 0); 991 spi_writel(as, TDR, 0);
970 } 992 }
@@ -1378,9 +1400,16 @@ static int atmel_spi_transfer(struct spi_device *spi, struct spi_message *msg)
1378 } 1400 }
1379 } 1401 }
1380 1402
1403 if (xfer->bits_per_word > 8) {
1404 if (xfer->len % 2) {
1405 dev_dbg(&spi->dev, "buffer len should be 16 bits aligned\n");
1406 return -EINVAL;
1407 }
1408 }
1409
1381 /* FIXME implement these protocol options!! */ 1410 /* FIXME implement these protocol options!! */
1382 if (xfer->speed_hz) { 1411 if (xfer->speed_hz < spi->max_speed_hz) {
1383 dev_dbg(&spi->dev, "no protocol options yet\n"); 1412 dev_dbg(&spi->dev, "can't change speed in transfer\n");
1384 return -ENOPROTOOPT; 1413 return -ENOPROTOOPT;
1385 } 1414 }
1386 1415
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 2e8f24a1fb95..50b13c9b1ab6 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -784,7 +784,7 @@ static const struct of_device_id davinci_spi_of_match[] = {
784 }, 784 },
785 { }, 785 { },
786}; 786};
787MODULE_DEVICE_TABLE(of, davini_spi_of_match); 787MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
788 788
789/** 789/**
790 * spi_davinci_get_pdata - Get platform data from DTS binding 790 * spi_davinci_get_pdata - Get platform data from DTS binding
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index d65c000efe35..09df8e22dba0 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -489,11 +489,6 @@ static int tegra_sflash_probe(struct platform_device *pdev)
489 tegra_sflash_parse_dt(tsd); 489 tegra_sflash_parse_dt(tsd);
490 490
491 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 491 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
492 if (!r) {
493 dev_err(&pdev->dev, "No IO memory resource\n");
494 ret = -ENODEV;
495 goto exit_free_master;
496 }
497 tsd->base = devm_ioremap_resource(&pdev->dev, r); 492 tsd->base = devm_ioremap_resource(&pdev->dev, r);
498 if (IS_ERR(tsd->base)) { 493 if (IS_ERR(tsd->base)) {
499 ret = PTR_ERR(tsd->base); 494 ret = PTR_ERR(tsd->base);
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 163fd802b7ac..32b7bb111eb6 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -334,7 +334,7 @@ struct spi_device *spi_alloc_device(struct spi_master *master)
334 spi->dev.parent = &master->dev; 334 spi->dev.parent = &master->dev;
335 spi->dev.bus = &spi_bus_type; 335 spi->dev.bus = &spi_bus_type;
336 spi->dev.release = spidev_release; 336 spi->dev.release = spidev_release;
337 spi->cs_gpio = -EINVAL; 337 spi->cs_gpio = -ENOENT;
338 device_initialize(&spi->dev); 338 device_initialize(&spi->dev);
339 return spi; 339 return spi;
340} 340}
@@ -1067,8 +1067,11 @@ static int of_spi_register_master(struct spi_master *master)
1067 nb = of_gpio_named_count(np, "cs-gpios"); 1067 nb = of_gpio_named_count(np, "cs-gpios");
1068 master->num_chipselect = max(nb, (int)master->num_chipselect); 1068 master->num_chipselect = max(nb, (int)master->num_chipselect);
1069 1069
1070 if (nb < 1) 1070 /* Return error only for an incorrectly formed cs-gpios property */
1071 if (nb == 0 || nb == -ENOENT)
1071 return 0; 1072 return 0;
1073 else if (nb < 0)
1074 return nb;
1072 1075
1073 cs = devm_kzalloc(&master->dev, 1076 cs = devm_kzalloc(&master->dev,
1074 sizeof(int) * master->num_chipselect, 1077 sizeof(int) * master->num_chipselect,
@@ -1079,7 +1082,7 @@ static int of_spi_register_master(struct spi_master *master)
1079 return -ENOMEM; 1082 return -ENOMEM;
1080 1083
1081 for (i = 0; i < master->num_chipselect; i++) 1084 for (i = 0; i < master->num_chipselect; i++)
1082 cs[i] = -EINVAL; 1085 cs[i] = -ENOENT;
1083 1086
1084 for (i = 0; i < nb; i++) 1087 for (i = 0; i < nb; i++)
1085 cs[i] = of_get_named_gpio(np, "cs-gpios", i); 1088 cs[i] = of_get_named_gpio(np, "cs-gpios", i);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 4e8a1794f50a..aefe820a8005 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -72,10 +72,10 @@ source "drivers/staging/sep/Kconfig"
72 72
73source "drivers/staging/iio/Kconfig" 73source "drivers/staging/iio/Kconfig"
74 74
75source "drivers/staging/zram/Kconfig"
76
77source "drivers/staging/zsmalloc/Kconfig" 75source "drivers/staging/zsmalloc/Kconfig"
78 76
77source "drivers/staging/zram/Kconfig"
78
79source "drivers/staging/wlags49_h2/Kconfig" 79source "drivers/staging/wlags49_h2/Kconfig"
80 80
81source "drivers/staging/wlags49_h25/Kconfig" 81source "drivers/staging/wlags49_h25/Kconfig"
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index b040200a5a55..9bd874789ce5 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -242,7 +242,7 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
242 * 'log->buffer' which contains the first entry readable by 'euid' 242 * 'log->buffer' which contains the first entry readable by 'euid'
243 */ 243 */
244static size_t get_next_entry_by_uid(struct logger_log *log, 244static size_t get_next_entry_by_uid(struct logger_log *log,
245 size_t off, uid_t euid) 245 size_t off, kuid_t euid)
246{ 246{
247 while (off != log->w_off) { 247 while (off != log->w_off) {
248 struct logger_entry *entry; 248 struct logger_entry *entry;
@@ -251,7 +251,7 @@ static size_t get_next_entry_by_uid(struct logger_log *log,
251 251
252 entry = get_entry_header(log, off, &scratch); 252 entry = get_entry_header(log, off, &scratch);
253 253
254 if (entry->euid == euid) 254 if (uid_eq(entry->euid, euid))
255 return off; 255 return off;
256 256
257 next_len = sizeof(struct logger_entry) + entry->len; 257 next_len = sizeof(struct logger_entry) + entry->len;
diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h
index cc6bbd99c8e0..70af7d805dff 100644
--- a/drivers/staging/android/logger.h
+++ b/drivers/staging/android/logger.h
@@ -66,7 +66,7 @@ struct logger_entry {
66 __s32 tid; 66 __s32 tid;
67 __s32 sec; 67 __s32 sec;
68 __s32 nsec; 68 __s32 nsec;
69 uid_t euid; 69 kuid_t euid;
70 char msg[0]; 70 char msg[0];
71}; 71};
72 72
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index 7871579bb83d..87e852a0ef49 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -981,6 +981,7 @@ config COMEDI_ME_DAQ
981 981
982config COMEDI_NI_6527 982config COMEDI_NI_6527
983 tristate "NI 6527 support" 983 tristate "NI 6527 support"
984 depends on HAS_DMA
984 select COMEDI_MITE 985 select COMEDI_MITE
985 ---help--- 986 ---help---
986 Enable support for the National Instruments 6527 PCI card 987 Enable support for the National Instruments 6527 PCI card
@@ -990,6 +991,7 @@ config COMEDI_NI_6527
990 991
991config COMEDI_NI_65XX 992config COMEDI_NI_65XX
992 tristate "NI 65xx static dio PCI card support" 993 tristate "NI 65xx static dio PCI card support"
994 depends on HAS_DMA
993 select COMEDI_MITE 995 select COMEDI_MITE
994 ---help--- 996 ---help---
995 Enable support for National Instruments 65xx static dio boards. 997 Enable support for National Instruments 65xx static dio boards.
@@ -1003,6 +1005,7 @@ config COMEDI_NI_65XX
1003 1005
1004config COMEDI_NI_660X 1006config COMEDI_NI_660X
1005 tristate "NI 660x counter/timer PCI card support" 1007 tristate "NI 660x counter/timer PCI card support"
1008 depends on HAS_DMA
1006 select COMEDI_NI_TIOCMD 1009 select COMEDI_NI_TIOCMD
1007 ---help--- 1010 ---help---
1008 Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602, 1011 Enable support for National Instruments PCI-6601 (ni_660x), PCI-6602,
@@ -1013,6 +1016,7 @@ config COMEDI_NI_660X
1013 1016
1014config COMEDI_NI_670X 1017config COMEDI_NI_670X
1015 tristate "NI 670x PCI card support" 1018 tristate "NI 670x PCI card support"
1019 depends on HAS_DMA
1016 select COMEDI_MITE 1020 select COMEDI_MITE
1017 ---help--- 1021 ---help---
1018 Enable support for National Instruments PCI-6703 and PCI-6704 1022 Enable support for National Instruments PCI-6703 and PCI-6704
@@ -1022,6 +1026,7 @@ config COMEDI_NI_670X
1022 1026
1023config COMEDI_NI_LABPC_PCI 1027config COMEDI_NI_LABPC_PCI
1024 tristate "NI Lab-PC PCI-1200 support" 1028 tristate "NI Lab-PC PCI-1200 support"
1029 depends on HAS_DMA
1025 select COMEDI_NI_LABPC 1030 select COMEDI_NI_LABPC
1026 select COMEDI_MITE 1031 select COMEDI_MITE
1027 ---help--- 1032 ---help---
@@ -1032,6 +1037,7 @@ config COMEDI_NI_LABPC_PCI
1032 1037
1033config COMEDI_NI_PCIDIO 1038config COMEDI_NI_PCIDIO
1034 tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support" 1039 tristate "NI PCI-DIO32HS, PCI-6533, PCI-6534 support"
1040 depends on HAS_DMA
1035 select COMEDI_MITE 1041 select COMEDI_MITE
1036 select COMEDI_8255 1042 select COMEDI_8255
1037 ---help--- 1043 ---help---
@@ -1043,6 +1049,7 @@ config COMEDI_NI_PCIDIO
1043 1049
1044config COMEDI_NI_PCIMIO 1050config COMEDI_NI_PCIMIO
1045 tristate "NI PCI-MIO-E series and M series support" 1051 tristate "NI PCI-MIO-E series and M series support"
1052 depends on HAS_DMA
1046 select COMEDI_NI_TIOCMD 1053 select COMEDI_NI_TIOCMD
1047 select COMEDI_8255 1054 select COMEDI_8255
1048 select COMEDI_FC 1055 select COMEDI_FC
@@ -1095,10 +1102,12 @@ config COMEDI_SSV_DNP
1095 called ssv_dnp. 1102 called ssv_dnp.
1096 1103
1097config COMEDI_MITE 1104config COMEDI_MITE
1105 depends on HAS_DMA
1098 tristate 1106 tristate
1099 1107
1100config COMEDI_NI_TIOCMD 1108config COMEDI_NI_TIOCMD
1101 tristate 1109 tristate
1110 depends on HAS_DMA
1102 select COMEDI_NI_TIO 1111 select COMEDI_NI_TIO
1103 select COMEDI_MITE 1112 select COMEDI_MITE
1104 1113
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index ca709901fb3e..d4be0e68509b 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -51,10 +51,12 @@ static void __comedi_buf_free(struct comedi_device *dev,
51 clear_bit(PG_reserved, 51 clear_bit(PG_reserved,
52 &(virt_to_page(buf->virt_addr)->flags)); 52 &(virt_to_page(buf->virt_addr)->flags));
53 if (s->async_dma_dir != DMA_NONE) { 53 if (s->async_dma_dir != DMA_NONE) {
54#ifdef CONFIG_HAS_DMA
54 dma_free_coherent(dev->hw_dev, 55 dma_free_coherent(dev->hw_dev,
55 PAGE_SIZE, 56 PAGE_SIZE,
56 buf->virt_addr, 57 buf->virt_addr,
57 buf->dma_addr); 58 buf->dma_addr);
59#endif
58 } else { 60 } else {
59 free_page((unsigned long)buf->virt_addr); 61 free_page((unsigned long)buf->virt_addr);
60 } 62 }
@@ -74,6 +76,12 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
74 struct comedi_buf_page *buf; 76 struct comedi_buf_page *buf;
75 unsigned i; 77 unsigned i;
76 78
79 if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
80 dev_err(dev->class_dev,
81 "dma buffer allocation not supported\n");
82 return;
83 }
84
77 async->buf_page_list = vzalloc(sizeof(*buf) * n_pages); 85 async->buf_page_list = vzalloc(sizeof(*buf) * n_pages);
78 if (async->buf_page_list) 86 if (async->buf_page_list)
79 pages = vmalloc(sizeof(struct page *) * n_pages); 87 pages = vmalloc(sizeof(struct page *) * n_pages);
@@ -84,11 +92,15 @@ static void __comedi_buf_alloc(struct comedi_device *dev,
84 for (i = 0; i < n_pages; i++) { 92 for (i = 0; i < n_pages; i++) {
85 buf = &async->buf_page_list[i]; 93 buf = &async->buf_page_list[i];
86 if (s->async_dma_dir != DMA_NONE) 94 if (s->async_dma_dir != DMA_NONE)
95#ifdef CONFIG_HAS_DMA
87 buf->virt_addr = dma_alloc_coherent(dev->hw_dev, 96 buf->virt_addr = dma_alloc_coherent(dev->hw_dev,
88 PAGE_SIZE, 97 PAGE_SIZE,
89 &buf->dma_addr, 98 &buf->dma_addr,
90 GFP_KERNEL | 99 GFP_KERNEL |
91 __GFP_COMP); 100 __GFP_COMP);
101#else
102 break;
103#endif
92 else 104 else
93 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL); 105 buf->virt_addr = (void *)get_zeroed_page(GFP_KERNEL);
94 if (!buf->virt_addr) 106 if (!buf->virt_addr)
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 00f2547024ec..924c54c9c31f 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -246,9 +246,6 @@ static int resize_async_buffer(struct comedi_device *dev,
246 return -EBUSY; 246 return -EBUSY;
247 } 247 }
248 248
249 if (!async->prealloc_buf)
250 return -EINVAL;
251
252 /* make sure buffer is an integral number of pages 249 /* make sure buffer is an integral number of pages
253 * (we round up) */ 250 * (we round up) */
254 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK; 251 new_size = (new_size + PAGE_SIZE - 1) & PAGE_MASK;
diff --git a/drivers/staging/comedi/drivers/ni_labpc.c b/drivers/staging/comedi/drivers/ni_labpc.c
index 3d978f34d212..77a7bb632580 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.c
+++ b/drivers/staging/comedi/drivers/ni_labpc.c
@@ -976,8 +976,7 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
976 /* clear flip-flop to make sure 2-byte registers for 976 /* clear flip-flop to make sure 2-byte registers for
977 * count and address get set correctly */ 977 * count and address get set correctly */
978 clear_dma_ff(devpriv->dma_chan); 978 clear_dma_ff(devpriv->dma_chan);
979 set_dma_addr(devpriv->dma_chan, 979 set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
980 virt_to_bus(devpriv->dma_buffer));
981 /* set appropriate size of transfer */ 980 /* set appropriate size of transfer */
982 devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd); 981 devpriv->dma_transfer_size = labpc_suggest_transfer_size(cmd);
983 if (cmd->stop_src == TRIG_COUNT && 982 if (cmd->stop_src == TRIG_COUNT &&
@@ -1089,7 +1088,7 @@ static void labpc_drain_dma(struct comedi_device *dev)
1089 devpriv->count -= num_points; 1088 devpriv->count -= num_points;
1090 1089
1091 /* set address and count for next transfer */ 1090 /* set address and count for next transfer */
1092 set_dma_addr(devpriv->dma_chan, virt_to_bus(devpriv->dma_buffer)); 1091 set_dma_addr(devpriv->dma_chan, devpriv->dma_addr);
1093 set_dma_count(devpriv->dma_chan, leftover * sample_size); 1092 set_dma_count(devpriv->dma_chan, leftover * sample_size);
1094 release_dma_lock(flags); 1093 release_dma_lock(flags);
1095 1094
@@ -1741,6 +1740,9 @@ static int labpc_attach(struct comedi_device *dev, struct comedi_devconfig *it)
1741 unsigned long dma_flags; 1740 unsigned long dma_flags;
1742 1741
1743 devpriv->dma_chan = dma_chan; 1742 devpriv->dma_chan = dma_chan;
1743 devpriv->dma_addr =
1744 virt_to_bus(devpriv->dma_buffer);
1745
1744 dma_flags = claim_dma_lock(); 1746 dma_flags = claim_dma_lock();
1745 disable_dma(devpriv->dma_chan); 1747 disable_dma(devpriv->dma_chan);
1746 set_dma_mode(devpriv->dma_chan, DMA_MODE_READ); 1748 set_dma_mode(devpriv->dma_chan, DMA_MODE_READ);
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index 615f16f271c0..4b691f5a9965 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -82,6 +82,7 @@ struct labpc_private {
82 unsigned int divisor_b1; 82 unsigned int divisor_b1;
83 unsigned int dma_chan; /* dma channel to use */ 83 unsigned int dma_chan; /* dma channel to use */
84 u16 *dma_buffer; /* buffer ai will dma into */ 84 u16 *dma_buffer; /* buffer ai will dma into */
85 phys_addr_t dma_addr;
85 /* transfer size in bytes for current transfer */ 86 /* transfer size in bytes for current transfer */
86 unsigned int dma_transfer_size; 87 unsigned int dma_transfer_size;
87 /* we are using dma/fifo-half-full/etc. */ 88 /* we are using dma/fifo-half-full/etc. */
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index a46d579016d9..8c5dee9b3b05 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -310,9 +310,11 @@ static int ni_gpct_insn_read(struct comedi_device *dev,
310static int ni_gpct_insn_config(struct comedi_device *dev, 310static int ni_gpct_insn_config(struct comedi_device *dev,
311 struct comedi_subdevice *s, 311 struct comedi_subdevice *s,
312 struct comedi_insn *insn, unsigned int *data); 312 struct comedi_insn *insn, unsigned int *data);
313#ifdef PCIDMA
313static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s); 314static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s);
314static int ni_gpct_cmdtest(struct comedi_device *dev, 315static int ni_gpct_cmdtest(struct comedi_device *dev,
315 struct comedi_subdevice *s, struct comedi_cmd *cmd); 316 struct comedi_subdevice *s, struct comedi_cmd *cmd);
317#endif
316static int ni_gpct_cancel(struct comedi_device *dev, 318static int ni_gpct_cancel(struct comedi_device *dev,
317 struct comedi_subdevice *s); 319 struct comedi_subdevice *s);
318static void handle_gpct_interrupt(struct comedi_device *dev, 320static void handle_gpct_interrupt(struct comedi_device *dev,
@@ -4617,9 +4619,7 @@ static int ni_E_init(struct comedi_device *dev)
4617 for (j = 0; j < NUM_GPCT; ++j) { 4619 for (j = 0; j < NUM_GPCT; ++j) {
4618 s = &dev->subdevices[NI_GPCT_SUBDEV(j)]; 4620 s = &dev->subdevices[NI_GPCT_SUBDEV(j)];
4619 s->type = COMEDI_SUBD_COUNTER; 4621 s->type = COMEDI_SUBD_COUNTER;
4620 s->subdev_flags = 4622 s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
4621 SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL | SDF_CMD_READ
4622 /* | SDF_CMD_WRITE */ ;
4623 s->n_chan = 3; 4623 s->n_chan = 3;
4624 if (board->reg_type & ni_reg_m_series_mask) 4624 if (board->reg_type & ni_reg_m_series_mask)
4625 s->maxdata = 0xffffffff; 4625 s->maxdata = 0xffffffff;
@@ -4628,11 +4628,14 @@ static int ni_E_init(struct comedi_device *dev)
4628 s->insn_read = &ni_gpct_insn_read; 4628 s->insn_read = &ni_gpct_insn_read;
4629 s->insn_write = &ni_gpct_insn_write; 4629 s->insn_write = &ni_gpct_insn_write;
4630 s->insn_config = &ni_gpct_insn_config; 4630 s->insn_config = &ni_gpct_insn_config;
4631#ifdef PCIDMA
4632 s->subdev_flags |= SDF_CMD_READ /* | SDF_CMD_WRITE */;
4631 s->do_cmd = &ni_gpct_cmd; 4633 s->do_cmd = &ni_gpct_cmd;
4632 s->len_chanlist = 1; 4634 s->len_chanlist = 1;
4633 s->do_cmdtest = &ni_gpct_cmdtest; 4635 s->do_cmdtest = &ni_gpct_cmdtest;
4634 s->cancel = &ni_gpct_cancel; 4636 s->cancel = &ni_gpct_cancel;
4635 s->async_dma_dir = DMA_BIDIRECTIONAL; 4637 s->async_dma_dir = DMA_BIDIRECTIONAL;
4638#endif
4636 s->private = &devpriv->counter_dev->counters[j]; 4639 s->private = &devpriv->counter_dev->counters[j];
4637 4640
4638 devpriv->counter_dev->counters[j].chip_index = 0; 4641 devpriv->counter_dev->counters[j].chip_index = 0;
@@ -5216,10 +5219,10 @@ static int ni_gpct_insn_write(struct comedi_device *dev,
5216 return ni_tio_winsn(counter, insn, data); 5219 return ni_tio_winsn(counter, insn, data);
5217} 5220}
5218 5221
5222#ifdef PCIDMA
5219static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s) 5223static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
5220{ 5224{
5221 int retval; 5225 int retval;
5222#ifdef PCIDMA
5223 struct ni_gpct *counter = s->private; 5226 struct ni_gpct *counter = s->private;
5224/* const struct comedi_cmd *cmd = &s->async->cmd; */ 5227/* const struct comedi_cmd *cmd = &s->async->cmd; */
5225 5228
@@ -5233,23 +5236,20 @@ static int ni_gpct_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
5233 ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL); 5236 ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
5234 ni_e_series_enable_second_irq(dev, counter->counter_index, 1); 5237 ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
5235 retval = ni_tio_cmd(counter, s->async); 5238 retval = ni_tio_cmd(counter, s->async);
5236#else
5237 retval = -ENOTSUPP;
5238#endif
5239 return retval; 5239 return retval;
5240} 5240}
5241#endif
5241 5242
5243#ifdef PCIDMA
5242static int ni_gpct_cmdtest(struct comedi_device *dev, 5244static int ni_gpct_cmdtest(struct comedi_device *dev,
5243 struct comedi_subdevice *s, struct comedi_cmd *cmd) 5245 struct comedi_subdevice *s, struct comedi_cmd *cmd)
5244{ 5246{
5245#ifdef PCIDMA
5246 struct ni_gpct *counter = s->private; 5247 struct ni_gpct *counter = s->private;
5247 5248
5248 return ni_tio_cmdtest(counter, cmd); 5249 return ni_tio_cmdtest(counter, cmd);
5249#else
5250 return -ENOTSUPP; 5250 return -ENOTSUPP;
5251#endif
5252} 5251}
5252#endif
5253 5253
5254static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s) 5254static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
5255{ 5255{
diff --git a/drivers/staging/dwc2/Kconfig b/drivers/staging/dwc2/Kconfig
index f0b4739c65a1..d15d9d58e5ac 100644
--- a/drivers/staging/dwc2/Kconfig
+++ b/drivers/staging/dwc2/Kconfig
@@ -2,7 +2,6 @@ config USB_DWC2
2 tristate "DesignWare USB2 DRD Core Support" 2 tristate "DesignWare USB2 DRD Core Support"
3 depends on USB 3 depends on USB
4 depends on VIRT_TO_BUS 4 depends on VIRT_TO_BUS
5 select USB_OTG_UTILS
6 help 5 help
7 Say Y or M here if your system has a Dual Role HighSpeed 6 Say Y or M here if your system has a Dual Role HighSpeed
8 USB controller based on the DesignWare HSOTG IP Core. 7 USB controller based on the DesignWare HSOTG IP Core.
@@ -39,6 +38,7 @@ config USB_DWC2_TRACK_MISSED_SOFS
39 bool "Enable Missed SOF Tracking" 38 bool "Enable Missed SOF Tracking"
40 help 39 help
41 Say Y here to enable logging of missed SOF events to the dmesg log. 40 Say Y here to enable logging of missed SOF events to the dmesg log.
41 WARNING: This feature is still experimental.
42 If in doubt, say N. 42 If in doubt, say N.
43 43
44config USB_DWC2_DEBUG_PERIODIC 44config USB_DWC2_DEBUG_PERIODIC
diff --git a/drivers/staging/dwc2/hcd_intr.c b/drivers/staging/dwc2/hcd_intr.c
index 6e5dbed6ccec..e24062f0a49e 100644
--- a/drivers/staging/dwc2/hcd_intr.c
+++ b/drivers/staging/dwc2/hcd_intr.c
@@ -56,8 +56,6 @@
56static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg) 56static void dwc2_track_missed_sofs(struct dwc2_hsotg *hsotg)
57{ 57{
58#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS 58#ifdef CONFIG_USB_DWC2_TRACK_MISSED_SOFS
59#warning Compiling code to track missed SOFs
60
61 u16 curr_frame_number = hsotg->frame_number; 59 u16 curr_frame_number = hsotg->frame_number;
62 60
63 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) { 61 if (hsotg->frame_num_idx < FRAME_NUM_ARRAY_SIZE) {
diff --git a/drivers/staging/dwc2/platform.c b/drivers/staging/dwc2/platform.c
index 1f3d581a1078..44cce2fa6361 100644
--- a/drivers/staging/dwc2/platform.c
+++ b/drivers/staging/dwc2/platform.c
@@ -95,6 +95,14 @@ static int dwc2_driver_probe(struct platform_device *dev)
95 95
96 hsotg->dev = &dev->dev; 96 hsotg->dev = &dev->dev;
97 97
98 /*
99 * Use reasonable defaults so platforms don't have to provide these.
100 */
101 if (!dev->dev.dma_mask)
102 dev->dev.dma_mask = &dev->dev.coherent_dma_mask;
103 if (!dev->dev.coherent_dma_mask)
104 dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
105
98 irq = platform_get_irq(dev, 0); 106 irq = platform_get_irq(dev, 0);
99 if (irq < 0) { 107 if (irq < 0) {
100 dev_err(&dev->dev, "missing IRQ resource\n"); 108 dev_err(&dev->dev, "missing IRQ resource\n");
@@ -102,11 +110,6 @@ static int dwc2_driver_probe(struct platform_device *dev)
102 } 110 }
103 111
104 res = platform_get_resource(dev, IORESOURCE_MEM, 0); 112 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
105 if (!res) {
106 dev_err(&dev->dev, "missing memory base resource\n");
107 return -EINVAL;
108 }
109
110 hsotg->regs = devm_ioremap_resource(&dev->dev, res); 113 hsotg->regs = devm_ioremap_resource(&dev->dev, res);
111 if (IS_ERR(hsotg->regs)) 114 if (IS_ERR(hsotg->regs))
112 return PTR_ERR(hsotg->regs); 115 return PTR_ERR(hsotg->regs);
diff --git a/drivers/staging/gdm72xx/Kconfig b/drivers/staging/gdm72xx/Kconfig
index 3c18efe31365..69059138de4a 100644
--- a/drivers/staging/gdm72xx/Kconfig
+++ b/drivers/staging/gdm72xx/Kconfig
@@ -39,7 +39,7 @@ if WIMAX_GDM72XX_USB
39 39
40config WIMAX_GDM72XX_USB_PM 40config WIMAX_GDM72XX_USB_PM
41 bool "Enable power managerment support" 41 bool "Enable power managerment support"
42 depends on USB_SUSPEND 42 depends on PM_RUNTIME
43 43
44endif # WIMAX_GDM72XX_USB 44endif # WIMAX_GDM72XX_USB
45 45
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 2856b8fd44ad..163c638e4095 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -690,7 +690,6 @@ static void mxs_lradc_trigger_remove(struct iio_dev *iio)
690static int mxs_lradc_buffer_preenable(struct iio_dev *iio) 690static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
691{ 691{
692 struct mxs_lradc *lradc = iio_priv(iio); 692 struct mxs_lradc *lradc = iio_priv(iio);
693 struct iio_buffer *buffer = iio->buffer;
694 int ret = 0, chan, ofs = 0; 693 int ret = 0, chan, ofs = 0;
695 unsigned long enable = 0; 694 unsigned long enable = 0;
696 uint32_t ctrl4_set = 0; 695 uint32_t ctrl4_set = 0;
@@ -698,7 +697,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
698 uint32_t ctrl1_irq = 0; 697 uint32_t ctrl1_irq = 0;
699 const uint32_t chan_value = LRADC_CH_ACCUMULATE | 698 const uint32_t chan_value = LRADC_CH_ACCUMULATE |
700 ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET); 699 ((LRADC_DELAY_TIMER_LOOP - 1) << LRADC_CH_NUM_SAMPLES_OFFSET);
701 const int len = bitmap_weight(buffer->scan_mask, LRADC_MAX_TOTAL_CHANS); 700 const int len = bitmap_weight(iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS);
702 701
703 if (!len) 702 if (!len)
704 return -EINVAL; 703 return -EINVAL;
@@ -725,7 +724,7 @@ static int mxs_lradc_buffer_preenable(struct iio_dev *iio)
725 lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR); 724 lradc->base + LRADC_CTRL1 + STMP_OFFSET_REG_CLR);
726 writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR); 725 writel(0xff, lradc->base + LRADC_CTRL0 + STMP_OFFSET_REG_CLR);
727 726
728 for_each_set_bit(chan, buffer->scan_mask, LRADC_MAX_TOTAL_CHANS) { 727 for_each_set_bit(chan, iio->active_scan_mask, LRADC_MAX_TOTAL_CHANS) {
729 ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs); 728 ctrl4_set |= chan << LRADC_CTRL4_LRADCSELECT_OFFSET(ofs);
730 ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs); 729 ctrl4_clr |= LRADC_CTRL4_LRADCSELECT_MASK(ofs);
731 ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs); 730 ctrl1_irq |= LRADC_CTRL1_LRADC_IRQ_EN(ofs);
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index d060f2572512..c99f890cc6c6 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -1869,6 +1869,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
1869 dev_info(&chip->client->dev, 1869 dev_info(&chip->client->dev,
1870 "%s: i2c device found does not match expected id\n", 1870 "%s: i2c device found does not match expected id\n",
1871 __func__); 1871 __func__);
1872 ret = -EINVAL;
1872 goto fail1; 1873 goto fail1;
1873 } 1874 }
1874 1875
@@ -1907,7 +1908,7 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
1907 if (ret) { 1908 if (ret) {
1908 dev_err(&clientp->dev, 1909 dev_err(&clientp->dev,
1909 "%s: irq request failed", __func__); 1910 "%s: irq request failed", __func__);
1910 goto fail2; 1911 goto fail1;
1911 } 1912 }
1912 } 1913 }
1913 1914
@@ -1920,17 +1921,17 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
1920 if (ret) { 1921 if (ret) {
1921 dev_err(&clientp->dev, 1922 dev_err(&clientp->dev,
1922 "%s: iio registration failed\n", __func__); 1923 "%s: iio registration failed\n", __func__);
1923 goto fail1; 1924 goto fail2;
1924 } 1925 }
1925 1926
1926 dev_info(&clientp->dev, "%s Light sensor found.\n", id->name); 1927 dev_info(&clientp->dev, "%s Light sensor found.\n", id->name);
1927 1928
1928 return 0; 1929 return 0;
1929 1930
1930fail1: 1931fail2:
1931 if (clientp->irq) 1932 if (clientp->irq)
1932 free_irq(clientp->irq, indio_dev); 1933 free_irq(clientp->irq, indio_dev);
1933fail2: 1934fail1:
1934 iio_device_free(indio_dev); 1935 iio_device_free(indio_dev);
1935 1936
1936 return ret; 1937 return ret;
diff --git a/drivers/staging/imx-drm/Kconfig b/drivers/staging/imx-drm/Kconfig
index 8c9e40390f42..ef699f753186 100644
--- a/drivers/staging/imx-drm/Kconfig
+++ b/drivers/staging/imx-drm/Kconfig
@@ -1,6 +1,7 @@
1config DRM_IMX 1config DRM_IMX
2 tristate "DRM Support for Freescale i.MX" 2 tristate "DRM Support for Freescale i.MX"
3 select DRM_KMS_HELPER 3 select DRM_KMS_HELPER
4 select VIDEOMODE_HELPERS
4 select DRM_GEM_CMA_HELPER 5 select DRM_GEM_CMA_HELPER
5 select DRM_KMS_CMA_HELPER 6 select DRM_KMS_CMA_HELPER
6 depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM) 7 depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM)
@@ -19,10 +20,12 @@ config DRM_IMX_FB_HELPER
19config DRM_IMX_PARALLEL_DISPLAY 20config DRM_IMX_PARALLEL_DISPLAY
20 tristate "Support for parallel displays" 21 tristate "Support for parallel displays"
21 depends on DRM_IMX 22 depends on DRM_IMX
23 select VIDEOMODE_HELPERS
22 24
23config DRM_IMX_TVE 25config DRM_IMX_TVE
24 tristate "Support for TV and VGA displays" 26 tristate "Support for TV and VGA displays"
25 depends on DRM_IMX 27 depends on DRM_IMX
28 select REGMAP_MMIO
26 help 29 help
27 Choose this to enable the internal Television Encoder (TVe) 30 Choose this to enable the internal Television Encoder (TVe)
28 found on i.MX53 processors. 31 found on i.MX53 processors.
@@ -30,6 +33,7 @@ config DRM_IMX_TVE
30config DRM_IMX_IPUV3_CORE 33config DRM_IMX_IPUV3_CORE
31 tristate "IPUv3 core support" 34 tristate "IPUv3 core support"
32 depends on DRM_IMX 35 depends on DRM_IMX
36 depends on RESET_CONTROLLER
33 help 37 help
34 Choose this if you have a i.MX5/6 system and want 38 Choose this if you have a i.MX5/6 system and want
35 to use the IPU. This option only enables IPU base 39 to use the IPU. This option only enables IPU base
@@ -38,5 +42,6 @@ config DRM_IMX_IPUV3_CORE
38config DRM_IMX_IPUV3 42config DRM_IMX_IPUV3
39 tristate "DRM Support for i.MX IPUv3" 43 tristate "DRM Support for i.MX IPUv3"
40 depends on DRM_IMX 44 depends on DRM_IMX
45 depends on DRM_IMX_IPUV3_CORE
41 help 46 help
42 Choose this if you have a i.MX5 or i.MX6 processor. 47 Choose this if you have a i.MX5 or i.MX6 processor.
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index ac1634464407..03892de9bd7e 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -670,7 +670,9 @@ static int imx_tve_probe(struct platform_device *pdev)
670 tve->dac_reg = devm_regulator_get(&pdev->dev, "dac"); 670 tve->dac_reg = devm_regulator_get(&pdev->dev, "dac");
671 if (!IS_ERR(tve->dac_reg)) { 671 if (!IS_ERR(tve->dac_reg)) {
672 regulator_set_voltage(tve->dac_reg, 2750000, 2750000); 672 regulator_set_voltage(tve->dac_reg, 2750000, 2750000);
673 regulator_enable(tve->dac_reg); 673 ret = regulator_enable(tve->dac_reg);
674 if (ret)
675 return ret;
674 } 676 }
675 677
676 tve->clk = devm_clk_get(&pdev->dev, "tve"); 678 tve->clk = devm_clk_get(&pdev->dev, "tve");
diff --git a/drivers/staging/media/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig
index ec32776ff547..df6569b997b8 100644
--- a/drivers/staging/media/solo6x10/Kconfig
+++ b/drivers/staging/media/solo6x10/Kconfig
@@ -1,6 +1,7 @@
1config SOLO6X10 1config SOLO6X10
2 tristate "Softlogic 6x10 MPEG codec cards" 2 tristate "Softlogic 6x10 MPEG codec cards"
3 depends on PCI && VIDEO_DEV && SND && I2C 3 depends on PCI && VIDEO_DEV && SND && I2C
4 depends on FONTS
4 select VIDEOBUF2_DMA_SG 5 select VIDEOBUF2_DMA_SG
5 select VIDEOBUF2_DMA_CONTIG 6 select VIDEOBUF2_DMA_CONTIG
6 select SND_PCM 7 select SND_PCM
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index a88959f9a07a..197c393c4ca7 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -124,6 +124,20 @@ int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
124EXPORT_SYMBOL_GPL(nvec_register_notifier); 124EXPORT_SYMBOL_GPL(nvec_register_notifier);
125 125
126/** 126/**
127 * nvec_unregister_notifier - Unregister a notifier with nvec
128 * @nvec: A &struct nvec_chip
129 * @nb: The notifier block to unregister
130 *
131 * Unregisters a notifier with @nvec. The notifier will be removed from the
132 * atomic notifier chain.
133 */
134int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
135{
136 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
137}
138EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
139
140/**
127 * nvec_status_notifier - The final notifier 141 * nvec_status_notifier - The final notifier
128 * 142 *
129 * Prints a message about control events not handled in the notifier 143 * Prints a message about control events not handled in the notifier
@@ -185,7 +199,7 @@ static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
185 * 199 *
186 * Free the given message 200 * Free the given message
187 */ 201 */
188inline void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg) 202void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
189{ 203{
190 if (msg != &nvec->tx_scratch) 204 if (msg != &nvec->tx_scratch)
191 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool); 205 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
@@ -800,11 +814,6 @@ static int tegra_nvec_probe(struct platform_device *pdev)
800 } 814 }
801 815
802 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 816 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
803 if (!res) {
804 dev_err(&pdev->dev, "no mem resource?\n");
805 return -ENODEV;
806 }
807
808 base = devm_ioremap_resource(&pdev->dev, res); 817 base = devm_ioremap_resource(&pdev->dev, res);
809 if (IS_ERR(base)) 818 if (IS_ERR(base))
810 return PTR_ERR(base); 819 return PTR_ERR(base);
@@ -815,7 +824,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
815 return -ENODEV; 824 return -ENODEV;
816 } 825 }
817 826
818 i2c_clk = clk_get(&pdev->dev, "div-clk"); 827 i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
819 if (IS_ERR(i2c_clk)) { 828 if (IS_ERR(i2c_clk)) {
820 dev_err(nvec->dev, "failed to get controller clock\n"); 829 dev_err(nvec->dev, "failed to get controller clock\n");
821 return -ENODEV; 830 return -ENODEV;
@@ -902,8 +911,11 @@ static int tegra_nvec_remove(struct platform_device *pdev)
902 911
903 nvec_toggle_global_events(nvec, false); 912 nvec_toggle_global_events(nvec, false);
904 mfd_remove_devices(nvec->dev); 913 mfd_remove_devices(nvec->dev);
914 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
905 cancel_work_sync(&nvec->rx_work); 915 cancel_work_sync(&nvec->rx_work);
906 cancel_work_sync(&nvec->tx_work); 916 cancel_work_sync(&nvec->tx_work);
917 /* FIXME: needs check wether nvec is responsible for power off */
918 pm_power_off = NULL;
907 919
908 return 0; 920 return 0;
909} 921}
diff --git a/drivers/staging/nvec/nvec.h b/drivers/staging/nvec/nvec.h
index b7a14bc0ab91..2b1316d87470 100644
--- a/drivers/staging/nvec/nvec.h
+++ b/drivers/staging/nvec/nvec.h
@@ -197,9 +197,8 @@ extern int nvec_register_notifier(struct nvec_chip *nvec,
197 struct notifier_block *nb, 197 struct notifier_block *nb,
198 unsigned int events); 198 unsigned int events);
199 199
200extern int nvec_unregister_notifier(struct device *dev, 200extern int nvec_unregister_notifier(struct nvec_chip *dev,
201 struct notifier_block *nb, 201 struct notifier_block *nb);
202 unsigned int events);
203 202
204extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg); 203extern void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg);
205 204
diff --git a/drivers/staging/nvec/nvec_kbd.c b/drivers/staging/nvec/nvec_kbd.c
index 7445ce6422bb..a0ec52a4114f 100644
--- a/drivers/staging/nvec/nvec_kbd.c
+++ b/drivers/staging/nvec/nvec_kbd.c
@@ -169,8 +169,15 @@ fail:
169 169
170static int nvec_kbd_remove(struct platform_device *pdev) 170static int nvec_kbd_remove(struct platform_device *pdev)
171{ 171{
172 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
173 char disable_kbd[] = { NVEC_KBD, DISABLE_KBD },
174 uncnfg_wake_key_reporting[] = { NVEC_KBD, CNFG_WAKE_KEY_REPORTING,
175 false };
176 nvec_write_async(nvec, uncnfg_wake_key_reporting, 3);
177 nvec_write_async(nvec, disable_kbd, 2);
178 nvec_unregister_notifier(nvec, &keys_dev.notifier);
179
172 input_unregister_device(keys_dev.input); 180 input_unregister_device(keys_dev.input);
173 input_free_device(keys_dev.input);
174 181
175 return 0; 182 return 0;
176} 183}
@@ -188,4 +195,5 @@ module_platform_driver(nvec_kbd_driver);
188 195
189MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>"); 196MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
190MODULE_DESCRIPTION("NVEC keyboard driver"); 197MODULE_DESCRIPTION("NVEC keyboard driver");
198MODULE_ALIAS("platform:nvec-kbd");
191MODULE_LICENSE("GPL"); 199MODULE_LICENSE("GPL");
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index 296f7b9a8c8c..aacfcd6954a3 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -414,6 +414,7 @@ static int nvec_power_remove(struct platform_device *pdev)
414 struct nvec_power *power = platform_get_drvdata(pdev); 414 struct nvec_power *power = platform_get_drvdata(pdev);
415 415
416 cancel_delayed_work_sync(&power->poller); 416 cancel_delayed_work_sync(&power->poller);
417 nvec_unregister_notifier(power->nvec, &power->notifier);
417 switch (pdev->id) { 418 switch (pdev->id) {
418 case AC: 419 case AC:
419 power_supply_unregister(&nvec_psy); 420 power_supply_unregister(&nvec_psy);
diff --git a/drivers/staging/nvec/nvec_ps2.c b/drivers/staging/nvec/nvec_ps2.c
index aff6b9b9f9aa..06dbb02085a9 100644
--- a/drivers/staging/nvec/nvec_ps2.c
+++ b/drivers/staging/nvec/nvec_ps2.c
@@ -106,7 +106,7 @@ static int nvec_mouse_probe(struct platform_device *pdev)
106 struct serio *ser_dev; 106 struct serio *ser_dev;
107 char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 }; 107 char mouse_reset[] = { NVEC_PS2, SEND_COMMAND, PSMOUSE_RST, 3 };
108 108
109 ser_dev = devm_kzalloc(&pdev->dev, sizeof(struct serio), GFP_KERNEL); 109 ser_dev = kzalloc(sizeof(struct serio), GFP_KERNEL);
110 if (ser_dev == NULL) 110 if (ser_dev == NULL)
111 return -ENOMEM; 111 return -ENOMEM;
112 112
@@ -133,6 +133,11 @@ static int nvec_mouse_probe(struct platform_device *pdev)
133 133
134static int nvec_mouse_remove(struct platform_device *pdev) 134static int nvec_mouse_remove(struct platform_device *pdev)
135{ 135{
136 struct nvec_chip *nvec = dev_get_drvdata(pdev->dev.parent);
137
138 ps2_sendcommand(ps2_dev.ser_dev, DISABLE_MOUSE);
139 ps2_stopstreaming(ps2_dev.ser_dev);
140 nvec_unregister_notifier(nvec, &ps2_dev.notifier);
136 serio_unregister_port(ps2_dev.ser_dev); 141 serio_unregister_port(ps2_dev.ser_dev);
137 142
138 return 0; 143 return 0;
@@ -179,4 +184,5 @@ module_platform_driver(nvec_mouse_driver);
179 184
180MODULE_DESCRIPTION("NVEC mouse driver"); 185MODULE_DESCRIPTION("NVEC mouse driver");
181MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>"); 186MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
187MODULE_ALIAS("platform:nvec-mouse");
182MODULE_LICENSE("GPL"); 188MODULE_LICENSE("GPL");
diff --git a/drivers/staging/sep/Kconfig b/drivers/staging/sep/Kconfig
index 185b676d858a..aab945a316ea 100644
--- a/drivers/staging/sep/Kconfig
+++ b/drivers/staging/sep/Kconfig
@@ -1,6 +1,6 @@
1config DX_SEP 1config DX_SEP
2 tristate "Discretix SEP driver" 2 tristate "Discretix SEP driver"
3 depends on PCI 3 depends on PCI && CRYPTO
4 help 4 help
5 Discretix SEP driver; used for the security processor subsystem 5 Discretix SEP driver; used for the security processor subsystem
6 on board the Intel Mobile Internet Device and adds SEP availability 6 on board the Intel Mobile Internet Device and adds SEP availability
diff --git a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
index fe667dde43ce..386362c9964f 100644
--- a/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
+++ b/drivers/staging/ste_rmi4/synaptics_i2c_rmi4.c
@@ -1087,7 +1087,11 @@ static int synaptics_rmi4_resume(struct device *dev)
1087 unsigned char intr_status; 1087 unsigned char intr_status;
1088 struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev); 1088 struct synaptics_rmi4_data *rmi4_data = dev_get_drvdata(dev);
1089 1089
1090 regulator_enable(rmi4_data->regulator); 1090 retval = regulator_enable(rmi4_data->regulator);
1091 if (retval) {
1092 dev_err(dev, "Regulator enable failed (%d)\n", retval);
1093 return retval;
1094 }
1091 1095
1092 enable_irq(rmi4_data->i2c_client->irq); 1096 enable_irq(rmi4_data->i2c_client->irq);
1093 rmi4_data->touch_stopped = false; 1097 rmi4_data->touch_stopped = false;
diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
index f4f1bf7a30fd..c699a3058b39 100644
--- a/drivers/staging/vt6656/hostap.c
+++ b/drivers/staging/vt6656/hostap.c
@@ -133,7 +133,7 @@ static int hostap_disable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
133 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", 133 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
134 pDevice->dev->name, pDevice->apdev->name); 134 pDevice->dev->name, pDevice->apdev->name);
135 } 135 }
136 kfree(pDevice->apdev); 136 free_netdev(pDevice->apdev);
137 pDevice->apdev = NULL; 137 pDevice->apdev = NULL;
138 pDevice->bEnable8021x = false; 138 pDevice->bEnable8021x = false;
139 pDevice->bEnableHostWEP = false; 139 pDevice->bEnableHostWEP = false;
diff --git a/drivers/staging/vt6656/iwctl.c b/drivers/staging/vt6656/iwctl.c
index c335808211ee..d0cf7d8a20e5 100644
--- a/drivers/staging/vt6656/iwctl.c
+++ b/drivers/staging/vt6656/iwctl.c
@@ -1345,9 +1345,12 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
1345 return rc; 1345 return rc;
1346 } 1346 }
1347 1347
1348 spin_lock_irq(&pDevice->lock);
1349
1348 if (wrq->disabled) { 1350 if (wrq->disabled) {
1349 pDevice->ePSMode = WMAC_POWER_CAM; 1351 pDevice->ePSMode = WMAC_POWER_CAM;
1350 PSvDisablePowerSaving(pDevice); 1352 PSvDisablePowerSaving(pDevice);
1353 spin_unlock_irq(&pDevice->lock);
1351 return rc; 1354 return rc;
1352 } 1355 }
1353 if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { 1356 if ((wrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) {
@@ -1358,6 +1361,9 @@ int iwctl_siwpower(struct net_device *dev, struct iw_request_info *info,
1358 pDevice->ePSMode = WMAC_POWER_FAST; 1361 pDevice->ePSMode = WMAC_POWER_FAST;
1359 PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval); 1362 PSvEnablePowerSaving((void *)pDevice, pMgmt->wListenInterval);
1360 } 1363 }
1364
1365 spin_unlock_irq(&pDevice->lock);
1366
1361 switch (wrq->flags & IW_POWER_MODE) { 1367 switch (wrq->flags & IW_POWER_MODE) {
1362 case IW_POWER_UNICAST_R: 1368 case IW_POWER_UNICAST_R:
1363 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n"); 1369 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWPOWER: IW_POWER_UNICAST_R \n");
diff --git a/drivers/staging/zcache/ramster/ramster-howto.txt b/drivers/staging/zcache/ramster/ramster-howto.txt
new file mode 100644
index 000000000000..7b1ee3bbfdd5
--- /dev/null
+++ b/drivers/staging/zcache/ramster/ramster-howto.txt
@@ -0,0 +1,366 @@
1 RAMSTER HOW-TO
2
3Author: Dan Magenheimer
4Ramster maintainer: Konrad Wilk <konrad.wilk@oracle.com>
5
6This is a HOWTO document for ramster which, as of this writing, is in
7the kernel as a subdirectory of zcache in drivers/staging, called ramster.
8(Zcache can be built with or without ramster functionality.) If enabled
9and properly configured, ramster allows memory capacity load balancing
10across multiple machines in a cluster. Further, the ramster code serves
11as an example of asynchronous access for zcache (as well as cleancache and
12frontswap) that may prove useful for future transcendent memory
13implementations, such as KVM and NVRAM. While ramster works today on
14any network connection that supports kernel sockets, its features may
15become more interesting on future high-speed fabrics/interconnects.
16
17Ramster requires both kernel and userland support. The userland support,
18called ramster-tools, is known to work with EL6-based distros, but is a
19set of poorly-hacked slightly-modified cluster tools based on ocfs2, which
20includes an init file, a config file, and a userland binary that interfaces
21to the kernel. This state of userland support reflects the abysmal userland
22skills of this suitably-embarrassed author; any help/patches to turn
23ramster-tools into more distributable rpms/debs useful for a wider range
24of distros would be appreciated. The source RPM that can be used as a
25starting point is available at:
26 http://oss.oracle.com/projects/tmem/files/RAMster/
27
28As a result of this author's ignorance, userland setup described in this
29HOWTO assumes an EL6 distro and is described in EL6 syntax. Apologies
30if this offends anyone!
31
32Kernel support has only been tested on x86_64. Systems with an active
33ocfs2 filesystem should work, but since ramster leverages a lot of
34code from ocfs2, there may be latent issues. A kernel configuration that
35includes CONFIG_OCFS2_FS should build OK, and should certainly run OK
36if no ocfs2 filesystem is mounted.
37
38This HOWTO demonstrates memory capacity load balancing for a two-node
39cluster, where one node called the "local" node becomes overcommitted
40and the other node called the "remote" node provides additional RAM
41capacity for use by the local node. Ramster is capable of more complex
42topologies; see the last section titled "ADVANCED RAMSTER TOPOLOGIES".
43
44If you find any terms in this HOWTO unfamiliar or don't understand the
45motivation for ramster, the following LWN reading is recommended:
46-- Transcendent Memory in a Nutshell (lwn.net/Articles/454795)
47-- The future calculus of memory management (lwn.net/Articles/475681)
48And since ramster is built on top of zcache, this article may be helpful:
49-- In-kernel memory compression (lwn.net/Articles/545244)
50
51Now that you've memorized the contents of those articles, let's get started!
52
53A. PRELIMINARY
54
551) Install two x86_64 Linux systems that are known to work when
56 upgraded to a recent upstream Linux kernel version.
57
58On each system:
59
602) Configure, build and install, then boot Linux, just to ensure it
61 can be done with an unmodified upstream kernel. Confirm you booted
62 the upstream kernel with "uname -a".
63
643) If you plan to do any performance testing or unless you plan to
65 test only swapping, the "WasActive" patch is also highly recommended.
66 (Search lkml.org for WasActive, apply the patch, rebuild your kernel.)
67 For a demo or simple testing, the patch can be ignored.
68
694) Install ramster-tools as root. An x86_64 rpm for EL6-based systems
70 can be found at:
71 http://oss.oracle.com/projects/tmem/files/RAMster/
72 (Sorry but for now, non-EL6 users must recreate ramster-tools on
73 their own from source. See above.)
74
755) Ensure that debugfs is mounted at each boot. Examples below assume it
76 is mounted at /sys/kernel/debug.
77
78B. BUILDING RAMSTER INTO THE KERNEL
79
80Do the following on each system:
81
821) Using the kernel configuration mechanism of your choice, change
83 your config to include:
84
85 CONFIG_CLEANCACHE=y
86 CONFIG_FRONTSWAP=y
87 CONFIG_STAGING=y
88 CONFIG_CONFIGFS_FS=y # NOTE: MUST BE y, not m
89 CONFIG_ZCACHE=y
90 CONFIG_RAMSTER=y
91
92 For a linux-3.10 or later kernel, you should also set:
93
94 CONFIG_ZCACHE_DEBUG=y
95 CONFIG_RAMSTER_DEBUG=y
96
97 Before building the kernel please doublecheck your kernel config
98 file to ensure all of the settings are correct.
99
1002) Build this kernel and change your boot file (e.g. /etc/grub.conf)
101 so that the new kernel will boot.
102
1033) Add "zcache" and "ramster" as kernel boot parameters for the new kernel.
104
1054) Reboot each system approximately simultaneously.
106
1075) Check dmesg to ensure there are some messages from ramster, prefixed
108 by "ramster:"
109
110 # dmesg | grep ramster
111
112 You should also see a lot of files in:
113
114 # ls /sys/kernel/debug/zcache
115 # ls /sys/kernel/debug/ramster
116
117 These are mostly counters for various zcache and ramster activities.
118 You should also see files in:
119
120 # ls /sys/kernel/mm/ramster
121
122 These are sysfs files that control ramster as we shall see.
123
124 Ramster now will act as a single-system zcache on each system
125 but doesn't yet know anything about the cluster so can't yet do
126 anything remotely.
127
128C. CONFIGURING THE RAMSTER CLUSTER
129
130This part can be error prone unless you are familiar with clustering
131filesystems. We need to describe the cluster in a /etc/ramster.conf
132file and the init scripts that parse it are extremely picky about
133the syntax.
134
1351) Create a /etc/ramster.conf file and ensure it is identical on both
136 systems. This file mimics the ocfs2 format and there is a good amount
137 of documentation that can be searched for ocfs2.conf, but you can use:
138
139 cluster:
140 name = ramster
141 node_count = 2
142 node:
143 name = system1
144 cluster = ramster
145 number = 0
146 ip_address = my.ip.ad.r1
147 ip_port = 7777
148 node:
149 name = system2
150 cluster = ramster
151 number = 1
152 ip_address = my.ip.ad.r2
153 ip_port = 7777
154
155 You must ensure that the "name" field in the file exactly matches
156 the output of "hostname" on each system; if "hostname" shows a
157 fully-qualified hostname, ensure the name is fully qualified in
158 /etc/ramster.conf. Obviously, substitute my.ip.ad.rx with proper
159 ip addresses.
160
1612) Enable the ramster service and configure it. If you used the
162 EL6 ramster-tools, this would be:
163
164 # chkconfig --add ramster
165 # service ramster configure
166
167 Set "load on boot" to "y", cluster to start is "ramster" (or whatever
168 name you chose in ramster.conf), heartbeat dead threshold as "500",
169 network idle timeout as "1000000". Leave the others as default.
170
1713) Reboot both systems. After reboot, try (assuming EL6 ramster-tools):
172
173 # service ramster status
174
175 You should see "Checking RAMSTER cluster "ramster": Online". If you do
176 not, something is wrong and ramster will not work. Note that you
177 should also see that the driver for "configfs" is loaded and mounted,
178 the driver for ocfs2_dlmfs is not loaded, and some numbers for network
179 parameters. You will also see "Checking RAMSTER heartbeat: Not active".
180 That's all OK.
181
1824) Now you need to start the cluster heartbeat; the cluster is not "up"
183 until all nodes detect a heartbeat. In a real cluster, heartbeat detection
184 is done via a cluster filesystem, but ramster doesn't require one. Some
185 hack-y kernel code in ramster can start the heartbeat for you though if
186 you tell it what nodes are "up". To enable the heartbeat, do:
187
188 # echo 0 > /sys/kernel/mm/ramster/manual_node_up
189 # echo 1 > /sys/kernel/mm/ramster/manual_node_up
190
191 This must be done on BOTH nodes and, to avoid timeouts, must be done
192 approximately concurrently on both nodes. On an EL6 system, it is
193 convenient to put these lines in /etc/rc.local. To confirm that the
194 cluster is now up, on both systems do:
195
196 # dmesg | grep ramster
197
198 You should see ramster "Accepted connection" messages in dmesg on both
199 nodes after this. Note that if you check userland status again with
200
201 # service ramster status
202
203 you will still see "Checking RAMSTER heartbeat: Not active". That's
204 still OK... the ramster kernel heartbeat hack doesn't communicate to
205 userland.
206
2075) You now must tell each node the node to which it should "remotify" pages.
208 On this two node cluster, we will assume the "local" node, node 0, has
209 memory overcommitted and will use ramster to utilize RAM capacity on
210 the "remote node", node 1. To configure this, on node 0, you do:
211
212 # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
213
214 You should see "ramster: node 1 set as remotification target" in dmesg
215 on node 0. Again, on EL6, /etc/rc.local is a good place to put this
216 on node 0 so you don't forget to do it at each boot.
217
2186) One more step: By default, the ramster code does not "remotify" any
219 pages; this is primarily for testing purposes, but sometimes it is
220 useful. This may change in the future, but for now, on node 0, you do:
221
222 # echo 1 > /sys/kernel/mm/ramster/pers_remotify_enable
223 # echo 1 > /sys/kernel/mm/ramster/eph_remotify_enable
224
225 The first enables remotifying swap (persistent, aka frontswap) pages,
226 the second enables remotifying of page cache (ephemeral, cleancache)
227 pages.
228
229 On EL6, these lines can also be put in /etc/rc.local (AFTER the
230 node_up lines), or at the beginning of a script that runs a workload.
231
2327) Note that most testing has been done with both/all machines booted
233 roughly simultaneously to avoid cluster timeouts. Ideally, you should
234 do this too unless you are trying to break ramster rather than just
235 use it. ;-)
236
237D. TESTING RAMSTER
238
2391) Note that ramster has no value unless pages get "remotified". For
240 swap/frontswap/persistent pages, this doesn't happen unless/until
241 the workload would cause swapping to occur, at which point pages
242 are put into frontswap/zcache, and the remotification thread starts
243 working. To get to the point where the system swaps, you either
244 need a workload for which the working set exceeds the RAM in the
245 system; or you need to somehow reduce the amount of RAM one of
246 the system sees. This latter is easy when testing in a VM, but
247 harder on physical systems. In some cases, "mem=xxxM" on the
248 kernel command line restricts memory, but for some values of xxx
249 the kernel may fail to boot. One may also try creating a fixed
250 RAMdisk, doing nothing with it, but ensuring that it eats up a fixed
251 amount of RAM.
252
2532) To see if ramster is working, on the "remote node", node 1, try:
254
255 # grep . /sys/kernel/debug/ramster/foreign_*
256 # # note, that is space-dot-space between grep and the pathname
257
258 to monitor the number (and max) ephemeral and persistent pages
259 that ramster has sent. If these stay at zero, ramster is not working
260 either because the workload on the local node (node 0) isn't creating
261 enough memory pressure or because "remotifying" isn't working. On the
262 local system, node 0, you can watch lots of useful information also.
263 Try:
264
265 grep . /sys/kernel/debug/zcache/*pageframes* \
266 /sys/kernel/debug/zcache/*zbytes* \
267 /sys/kernel/debug/zcache/*zpages* \
268 /sys/kernel/debug/ramster/*remote*
269
270 Of particular note are the remote_*_pages_succ_get counters. These
271 show how many disk reads and/or disk writes have been avoided on the
272 overcommitted local system by storing pages remotely using ramster.
273
274 At the risk of information overload, you can also grep:
275
276 /sys/kernel/debug/cleancache/* and /sys/kernel/debug/frontswap/*
277
278 These show, for example, how many disk reads and/or disk writes have
279 been avoided by using zcache to optimize RAM on the local system.
280
281
282AUTOMATIC SWAP REPATRIATION
283
284You may notice that while the systems are idle, the foreign persistent
285page count on the remote machine slowly decreases. This is because
286ramster implements "frontswap selfshrinking": When possible, swap
287pages that have been remotified are slowly repatriated to the local
288machine. This is so that local RAM can be used when possible and
289so that, in case of remote machine crash, the probability of loss
290of data is reduced.
291
292REBOOTING / POWEROFF
293
294If a system is shut down while some of its swap pages still reside
295on a remote system, the system may lock up during the shutdown
296sequence. This will occur if the network is shut down before the
297swap mechansim is shut down, which is the default ordering on many
298distros. To avoid this annoying problem, simply shut off the swap
299subsystem before starting the shutdown sequence, e.g.:
300
301 # swapoff -a
302 # reboot
303
304Ideally, this swapoff-before-ifdown ordering should be enforced permanently
305using shutdown scripts.
306
307KNOWN PROBLEMS
308
3091) You may periodically see messages such as:
310
311 ramster_r2net, message length problem
312
313 This is harmless but indicates that a node is sending messages
314 containing compressed pages that exceed the maximum for zcache
315 (PAGE_SIZE*15/16). The sender side needs to be fixed.
316
3172) If you see a "No longer connected to node..." message or a "No connection
318 established with node X after N seconds", it is possible you may
319 be in an unrecoverable state. If you are certain all of the
320 appropriate cluster configuration steps described above have been
321 performed, try rebooting the two servers concurrently to see if
322 the cluster starts.
323
324 Note that "Connection to node... shutdown, state 7" is an intermediate
325 connection state. As long as you later see "Accepted connection", the
326 intermediate states are harmless.
327
3283) There are known issues in counting certain values. As a result
329 you may see periodic warnings from the kernel. Almost always you
330 will see "ramster: bad accounting for XXX". There are also "WARN_ONCE"
331 messages. If you see kernel warnings with a tombstone, please report
332 them. They are harmless but reflect bugs that need to be eventually fixed.
333
334ADVANCED RAMSTER TOPOLOGIES
335
336The kernel code for ramster can support up to eight nodes in a cluster,
337but no testing has been done with more than three nodes.
338
339In the example described above, the "remote" node serves as a RAM
340overflow for the "local" node. This can be made symmetric by appropriate
341settings of the sysfs remote_target_nodenum file. For example, by setting:
342
343 # echo 1 > /sys/kernel/mm/ramster/remote_target_nodenum
344
345on node 0, and
346
347 # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
348
349on node 1, each node can serve as a RAM overflow for the other.
350
351For more than two nodes, a "RAM server" can be configured. For a
352three node system, set:
353
354 # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
355
356on node 1, and
357
358 # echo 0 > /sys/kernel/mm/ramster/remote_target_nodenum
359
360on node 2. Then node 0 is a RAM server for node 1 and node 2.
361
362In this implementation of ramster, any remote node is potentially a single
363point of failure (SPOF). Though the probability of failure is reduced
364by automatic swap repatriation (see above), a proposed future enhancement
365to ramster improves high-availability for the cluster by sending a copy
366of each page of date to two other nodes. Patches welcome!
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 522cb8e55142..dcceed29d31a 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1922,15 +1922,15 @@ out:
1922 1922
1923#ifdef CONFIG_ZCACHE_MODULE 1923#ifdef CONFIG_ZCACHE_MODULE
1924#ifdef CONFIG_RAMSTER 1924#ifdef CONFIG_RAMSTER
1925module_param(ramster_enabled, int, S_IRUGO); 1925module_param(ramster_enabled, bool, S_IRUGO);
1926module_param(disable_frontswap_selfshrink, int, S_IRUGO); 1926module_param(disable_frontswap_selfshrink, int, S_IRUGO);
1927#endif 1927#endif
1928module_param(disable_cleancache, int, S_IRUGO); 1928module_param(disable_cleancache, bool, S_IRUGO);
1929module_param(disable_frontswap, int, S_IRUGO); 1929module_param(disable_frontswap, bool, S_IRUGO);
1930#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS 1930#ifdef FRONTSWAP_HAS_EXCLUSIVE_GETS
1931module_param(frontswap_has_exclusive_gets, bool, S_IRUGO); 1931module_param(frontswap_has_exclusive_gets, bool, S_IRUGO);
1932#endif 1932#endif
1933module_param(disable_frontswap_ignore_nonactive, int, S_IRUGO); 1933module_param(disable_frontswap_ignore_nonactive, bool, S_IRUGO);
1934module_param(zcache_comp_name, charp, S_IRUGO); 1934module_param(zcache_comp_name, charp, S_IRUGO);
1935module_init(zcache_init); 1935module_init(zcache_init);
1936MODULE_LICENSE("GPL"); 1936MODULE_LICENSE("GPL");
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index ffbc6a94be52..262ef1f23b38 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1250,7 +1250,7 @@ static u32 iscsit_do_crypto_hash_sg(
1250 1250
1251static void iscsit_do_crypto_hash_buf( 1251static void iscsit_do_crypto_hash_buf(
1252 struct hash_desc *hash, 1252 struct hash_desc *hash,
1253 unsigned char *buf, 1253 const void *buf,
1254 u32 payload_length, 1254 u32 payload_length,
1255 u32 padding, 1255 u32 padding,
1256 u8 *pad_bytes, 1256 u8 *pad_bytes,
@@ -2524,9 +2524,8 @@ static int iscsit_send_conn_drop_async_message(
2524 if (conn->conn_ops->HeaderDigest) { 2524 if (conn->conn_ops->HeaderDigest) {
2525 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2525 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2526 2526
2527 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2527 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2528 (unsigned char *)hdr, ISCSI_HDR_LEN, 2528 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2529 0, NULL, (u8 *)header_digest);
2530 2529
2531 cmd->tx_size += ISCSI_CRC_LEN; 2530 cmd->tx_size += ISCSI_CRC_LEN;
2532 pr_debug("Attaching CRC32C HeaderDigest to" 2531 pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2662,9 +2661,8 @@ static int iscsit_send_datain(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2662 if (conn->conn_ops->HeaderDigest) { 2661 if (conn->conn_ops->HeaderDigest) {
2663 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2662 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2664 2663
2665 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2664 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
2666 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 2665 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2667 0, NULL, (u8 *)header_digest);
2668 2666
2669 iov[0].iov_len += ISCSI_CRC_LEN; 2667 iov[0].iov_len += ISCSI_CRC_LEN;
2670 tx_size += ISCSI_CRC_LEN; 2668 tx_size += ISCSI_CRC_LEN;
@@ -2841,9 +2839,8 @@ iscsit_send_logout(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2841 if (conn->conn_ops->HeaderDigest) { 2839 if (conn->conn_ops->HeaderDigest) {
2842 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2840 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2843 2841
2844 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2842 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, &cmd->pdu[0],
2845 (unsigned char *)&cmd->pdu[0], ISCSI_HDR_LEN, 2843 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2846 0, NULL, (u8 *)header_digest);
2847 2844
2848 iov[0].iov_len += ISCSI_CRC_LEN; 2845 iov[0].iov_len += ISCSI_CRC_LEN;
2849 tx_size += ISCSI_CRC_LEN; 2846 tx_size += ISCSI_CRC_LEN;
@@ -2900,9 +2897,8 @@ static int iscsit_send_unsolicited_nopin(
2900 if (conn->conn_ops->HeaderDigest) { 2897 if (conn->conn_ops->HeaderDigest) {
2901 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2898 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2902 2899
2903 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2900 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2904 (unsigned char *)hdr, ISCSI_HDR_LEN, 2901 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2905 0, NULL, (u8 *)header_digest);
2906 2902
2907 tx_size += ISCSI_CRC_LEN; 2903 tx_size += ISCSI_CRC_LEN;
2908 pr_debug("Attaching CRC32C HeaderDigest to" 2904 pr_debug("Attaching CRC32C HeaderDigest to"
@@ -2949,9 +2945,8 @@ iscsit_send_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
2949 if (conn->conn_ops->HeaderDigest) { 2945 if (conn->conn_ops->HeaderDigest) {
2950 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 2946 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
2951 2947
2952 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 2948 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
2953 (unsigned char *)hdr, ISCSI_HDR_LEN, 2949 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
2954 0, NULL, (u8 *)header_digest);
2955 2950
2956 iov[0].iov_len += ISCSI_CRC_LEN; 2951 iov[0].iov_len += ISCSI_CRC_LEN;
2957 tx_size += ISCSI_CRC_LEN; 2952 tx_size += ISCSI_CRC_LEN;
@@ -3040,9 +3035,8 @@ static int iscsit_send_r2t(
3040 if (conn->conn_ops->HeaderDigest) { 3035 if (conn->conn_ops->HeaderDigest) {
3041 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3036 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3042 3037
3043 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3038 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3044 (unsigned char *)hdr, ISCSI_HDR_LEN, 3039 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3045 0, NULL, (u8 *)header_digest);
3046 3040
3047 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3041 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3048 tx_size += ISCSI_CRC_LEN; 3042 tx_size += ISCSI_CRC_LEN;
@@ -3256,9 +3250,8 @@ static int iscsit_send_response(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3256 if (conn->conn_ops->HeaderDigest) { 3250 if (conn->conn_ops->HeaderDigest) {
3257 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3251 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3258 3252
3259 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3253 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->pdu,
3260 (unsigned char *)cmd->pdu, ISCSI_HDR_LEN, 3254 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3261 0, NULL, (u8 *)header_digest);
3262 3255
3263 iov[0].iov_len += ISCSI_CRC_LEN; 3256 iov[0].iov_len += ISCSI_CRC_LEN;
3264 tx_size += ISCSI_CRC_LEN; 3257 tx_size += ISCSI_CRC_LEN;
@@ -3329,9 +3322,8 @@ iscsit_send_task_mgt_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
3329 if (conn->conn_ops->HeaderDigest) { 3322 if (conn->conn_ops->HeaderDigest) {
3330 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3323 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3331 3324
3332 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3325 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3333 (unsigned char *)hdr, ISCSI_HDR_LEN, 3326 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3334 0, NULL, (u8 *)header_digest);
3335 3327
3336 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN; 3328 cmd->iov_misc[0].iov_len += ISCSI_CRC_LEN;
3337 tx_size += ISCSI_CRC_LEN; 3329 tx_size += ISCSI_CRC_LEN;
@@ -3504,9 +3496,8 @@ static int iscsit_send_text_rsp(
3504 if (conn->conn_ops->HeaderDigest) { 3496 if (conn->conn_ops->HeaderDigest) {
3505 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3497 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3506 3498
3507 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3499 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3508 (unsigned char *)hdr, ISCSI_HDR_LEN, 3500 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3509 0, NULL, (u8 *)header_digest);
3510 3501
3511 iov[0].iov_len += ISCSI_CRC_LEN; 3502 iov[0].iov_len += ISCSI_CRC_LEN;
3512 tx_size += ISCSI_CRC_LEN; 3503 tx_size += ISCSI_CRC_LEN;
@@ -3557,11 +3548,11 @@ static int iscsit_send_reject(
3557 struct iscsi_cmd *cmd, 3548 struct iscsi_cmd *cmd,
3558 struct iscsi_conn *conn) 3549 struct iscsi_conn *conn)
3559{ 3550{
3560 u32 iov_count = 0, tx_size = 0; 3551 struct iscsi_reject *hdr = (struct iscsi_reject *)&cmd->pdu[0];
3561 struct iscsi_reject *hdr;
3562 struct kvec *iov; 3552 struct kvec *iov;
3553 u32 iov_count = 0, tx_size;
3563 3554
3564 iscsit_build_reject(cmd, conn, (struct iscsi_reject *)&cmd->pdu[0]); 3555 iscsit_build_reject(cmd, conn, hdr);
3565 3556
3566 iov = &cmd->iov_misc[0]; 3557 iov = &cmd->iov_misc[0];
3567 iov[iov_count].iov_base = cmd->pdu; 3558 iov[iov_count].iov_base = cmd->pdu;
@@ -3574,9 +3565,8 @@ static int iscsit_send_reject(
3574 if (conn->conn_ops->HeaderDigest) { 3565 if (conn->conn_ops->HeaderDigest) {
3575 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN]; 3566 u32 *header_digest = (u32 *)&cmd->pdu[ISCSI_HDR_LEN];
3576 3567
3577 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3568 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, hdr,
3578 (unsigned char *)hdr, ISCSI_HDR_LEN, 3569 ISCSI_HDR_LEN, 0, NULL, (u8 *)header_digest);
3579 0, NULL, (u8 *)header_digest);
3580 3570
3581 iov[0].iov_len += ISCSI_CRC_LEN; 3571 iov[0].iov_len += ISCSI_CRC_LEN;
3582 tx_size += ISCSI_CRC_LEN; 3572 tx_size += ISCSI_CRC_LEN;
@@ -3585,9 +3575,8 @@ static int iscsit_send_reject(
3585 } 3575 }
3586 3576
3587 if (conn->conn_ops->DataDigest) { 3577 if (conn->conn_ops->DataDigest) {
3588 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, 3578 iscsit_do_crypto_hash_buf(&conn->conn_tx_hash, cmd->buf_ptr,
3589 (unsigned char *)cmd->buf_ptr, ISCSI_HDR_LEN, 3579 ISCSI_HDR_LEN, 0, NULL, (u8 *)&cmd->data_crc);
3590 0, NULL, (u8 *)&cmd->data_crc);
3591 3580
3592 iov[iov_count].iov_base = &cmd->data_crc; 3581 iov[iov_count].iov_base = &cmd->data_crc;
3593 iov[iov_count++].iov_len = ISCSI_CRC_LEN; 3582 iov[iov_count++].iov_len = ISCSI_CRC_LEN;
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index 7816af6cdd12..40d9dbca987b 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -823,7 +823,7 @@ static int iscsit_attach_ooo_cmdsn(
823 /* 823 /*
824 * CmdSN is greater than the tail of the list. 824 * CmdSN is greater than the tail of the list.
825 */ 825 */
826 if (ooo_tail->cmdsn < ooo_cmdsn->cmdsn) 826 if (iscsi_sna_lt(ooo_tail->cmdsn, ooo_cmdsn->cmdsn))
827 list_add_tail(&ooo_cmdsn->ooo_list, 827 list_add_tail(&ooo_cmdsn->ooo_list,
828 &sess->sess_ooo_cmdsn_list); 828 &sess->sess_ooo_cmdsn_list);
829 else { 829 else {
@@ -833,11 +833,12 @@ static int iscsit_attach_ooo_cmdsn(
833 */ 833 */
834 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list, 834 list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,
835 ooo_list) { 835 ooo_list) {
836 if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) 836 if (iscsi_sna_lt(ooo_tmp->cmdsn, ooo_cmdsn->cmdsn))
837 continue; 837 continue;
838 838
839 /* Insert before this entry */
839 list_add(&ooo_cmdsn->ooo_list, 840 list_add(&ooo_cmdsn->ooo_list,
840 &ooo_tmp->ooo_list); 841 ooo_tmp->ooo_list.prev);
841 break; 842 break;
842 } 843 }
843 } 844 }
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c
index f690be9e5293..c2185fc31136 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.c
+++ b/drivers/target/iscsi/iscsi_target_parameters.c
@@ -436,7 +436,7 @@ int iscsi_create_default_params(struct iscsi_param_list **param_list_ptr)
436 /* 436 /*
437 * Extra parameters for ISER from RFC-5046 437 * Extra parameters for ISER from RFC-5046
438 */ 438 */
439 param = iscsi_set_default_param(pl, RDMAEXTENTIONS, INITIAL_RDMAEXTENTIONS, 439 param = iscsi_set_default_param(pl, RDMAEXTENSIONS, INITIAL_RDMAEXTENSIONS,
440 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH, 440 PHASE_OPERATIONAL, SCOPE_SESSION_WIDE, SENDER_BOTH,
441 TYPERANGE_BOOL_AND, USE_LEADING_ONLY); 441 TYPERANGE_BOOL_AND, USE_LEADING_ONLY);
442 if (!param) 442 if (!param)
@@ -529,7 +529,7 @@ int iscsi_set_keys_to_negotiate(
529 SET_PSTATE_NEGOTIATE(param); 529 SET_PSTATE_NEGOTIATE(param);
530 } else if (!strcmp(param->name, OFMARKINT)) { 530 } else if (!strcmp(param->name, OFMARKINT)) {
531 SET_PSTATE_NEGOTIATE(param); 531 SET_PSTATE_NEGOTIATE(param);
532 } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 532 } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
533 if (iser == true) 533 if (iser == true)
534 SET_PSTATE_NEGOTIATE(param); 534 SET_PSTATE_NEGOTIATE(param);
535 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) { 535 } else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) {
@@ -580,7 +580,7 @@ int iscsi_set_keys_irrelevant_for_discovery(
580 param->state &= ~PSTATE_NEGOTIATE; 580 param->state &= ~PSTATE_NEGOTIATE;
581 else if (!strcmp(param->name, OFMARKINT)) 581 else if (!strcmp(param->name, OFMARKINT))
582 param->state &= ~PSTATE_NEGOTIATE; 582 param->state &= ~PSTATE_NEGOTIATE;
583 else if (!strcmp(param->name, RDMAEXTENTIONS)) 583 else if (!strcmp(param->name, RDMAEXTENSIONS))
584 param->state &= ~PSTATE_NEGOTIATE; 584 param->state &= ~PSTATE_NEGOTIATE;
585 else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH)) 585 else if (!strcmp(param->name, INITIATORRECVDATASEGMENTLENGTH))
586 param->state &= ~PSTATE_NEGOTIATE; 586 param->state &= ~PSTATE_NEGOTIATE;
@@ -1977,7 +1977,7 @@ void iscsi_set_session_parameters(
1977 ops->SessionType = !strcmp(param->value, DISCOVERY); 1977 ops->SessionType = !strcmp(param->value, DISCOVERY);
1978 pr_debug("SessionType: %s\n", 1978 pr_debug("SessionType: %s\n",
1979 param->value); 1979 param->value);
1980 } else if (!strcmp(param->name, RDMAEXTENTIONS)) { 1980 } else if (!strcmp(param->name, RDMAEXTENSIONS)) {
1981 ops->RDMAExtensions = !strcmp(param->value, YES); 1981 ops->RDMAExtensions = !strcmp(param->value, YES);
1982 pr_debug("RDMAExtensions: %s\n", 1982 pr_debug("RDMAExtensions: %s\n",
1983 param->value); 1983 param->value);
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h
index f31b9c4b83f2..915b06798505 100644
--- a/drivers/target/iscsi/iscsi_target_parameters.h
+++ b/drivers/target/iscsi/iscsi_target_parameters.h
@@ -91,7 +91,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
91/* 91/*
92 * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046 92 * Parameter names of iSCSI Extentions for RDMA (iSER). See RFC-5046
93 */ 93 */
94#define RDMAEXTENTIONS "RDMAExtensions" 94#define RDMAEXTENSIONS "RDMAExtensions"
95#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength" 95#define INITIATORRECVDATASEGMENTLENGTH "InitiatorRecvDataSegmentLength"
96#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength" 96#define TARGETRECVDATASEGMENTLENGTH "TargetRecvDataSegmentLength"
97 97
@@ -142,7 +142,7 @@ extern void iscsi_set_session_parameters(struct iscsi_sess_ops *,
142/* 142/*
143 * Initial values for iSER parameters following RFC-5046 Section 6 143 * Initial values for iSER parameters following RFC-5046 Section 6
144 */ 144 */
145#define INITIAL_RDMAEXTENTIONS NO 145#define INITIAL_RDMAEXTENSIONS NO
146#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144" 146#define INITIAL_INITIATORRECVDATASEGMENTLENGTH "262144"
147#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192" 147#define INITIAL_TARGETRECVDATASEGMENTLENGTH "8192"
148 148
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 43b7ac6c5b1c..4a8bd36d3958 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -1584,6 +1584,13 @@ static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
1584 .store = target_core_store_dev_udev_path, 1584 .store = target_core_store_dev_udev_path,
1585}; 1585};
1586 1586
1587static ssize_t target_core_show_dev_enable(void *p, char *page)
1588{
1589 struct se_device *dev = p;
1590
1591 return snprintf(page, PAGE_SIZE, "%d\n", !!(dev->dev_flags & DF_CONFIGURED));
1592}
1593
1587static ssize_t target_core_store_dev_enable( 1594static ssize_t target_core_store_dev_enable(
1588 void *p, 1595 void *p,
1589 const char *page, 1596 const char *page,
@@ -1609,8 +1616,8 @@ static ssize_t target_core_store_dev_enable(
1609static struct target_core_configfs_attribute target_core_attr_dev_enable = { 1616static struct target_core_configfs_attribute target_core_attr_dev_enable = {
1610 .attr = { .ca_owner = THIS_MODULE, 1617 .attr = { .ca_owner = THIS_MODULE,
1611 .ca_name = "enable", 1618 .ca_name = "enable",
1612 .ca_mode = S_IWUSR }, 1619 .ca_mode = S_IRUGO | S_IWUSR },
1613 .show = NULL, 1620 .show = target_core_show_dev_enable,
1614 .store = target_core_store_dev_enable, 1621 .store = target_core_store_dev_enable,
1615}; 1622};
1616 1623
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 2e4d655471bc..4630481b6043 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -68,7 +68,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
68 struct se_dev_entry *deve = se_cmd->se_deve; 68 struct se_dev_entry *deve = se_cmd->se_deve;
69 69
70 deve->total_cmds++; 70 deve->total_cmds++;
71 deve->total_bytes += se_cmd->data_length;
72 71
73 if ((se_cmd->data_direction == DMA_TO_DEVICE) && 72 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
74 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) { 73 (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
@@ -85,8 +84,6 @@ transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
85 else if (se_cmd->data_direction == DMA_FROM_DEVICE) 84 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
86 deve->read_bytes += se_cmd->data_length; 85 deve->read_bytes += se_cmd->data_length;
87 86
88 deve->deve_cmds++;
89
90 se_lun = deve->se_lun; 87 se_lun = deve->se_lun;
91 se_cmd->se_lun = deve->se_lun; 88 se_cmd->se_lun = deve->se_lun;
92 se_cmd->pr_res_key = deve->pr_res_key; 89 se_cmd->pr_res_key = deve->pr_res_key;
@@ -275,17 +272,6 @@ int core_free_device_list_for_node(
275 return 0; 272 return 0;
276} 273}
277 274
278void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
279{
280 struct se_dev_entry *deve;
281 unsigned long flags;
282
283 spin_lock_irqsave(&se_nacl->device_list_lock, flags);
284 deve = se_nacl->device_list[se_cmd->orig_fe_lun];
285 deve->deve_cmds--;
286 spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
287}
288
289void core_update_device_list_access( 275void core_update_device_list_access(
290 u32 mapped_lun, 276 u32 mapped_lun,
291 u32 lun_access, 277 u32 lun_access,
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 58ed683e04ae..1b1d544e927a 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -153,10 +153,6 @@ static int fd_configure_device(struct se_device *dev)
153 struct request_queue *q = bdev_get_queue(inode->i_bdev); 153 struct request_queue *q = bdev_get_queue(inode->i_bdev);
154 unsigned long long dev_size; 154 unsigned long long dev_size;
155 155
156 dev->dev_attrib.hw_block_size =
157 bdev_logical_block_size(inode->i_bdev);
158 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
159
160 /* 156 /*
161 * Determine the number of bytes from i_size_read() minus 157 * Determine the number of bytes from i_size_read() minus
162 * one (1) logical sector from underlying struct block_device 158 * one (1) logical sector from underlying struct block_device
@@ -203,9 +199,6 @@ static int fd_configure_device(struct se_device *dev)
203 goto fail; 199 goto fail;
204 } 200 }
205 201
206 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
207 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
208
209 /* 202 /*
210 * Limit UNMAP emulation to 8k Number of LBAs (NoLB) 203 * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
211 */ 204 */
@@ -226,6 +219,8 @@ static int fd_configure_device(struct se_device *dev)
226 219
227 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; 220 fd_dev->fd_block_size = dev->dev_attrib.hw_block_size;
228 221
222 dev->dev_attrib.hw_block_size = FD_BLOCKSIZE;
223 dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
229 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; 224 dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
230 225
231 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) { 226 if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 07f5f94634bb..aa1620abec6d 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -615,6 +615,8 @@ iblock_execute_rw(struct se_cmd *cmd)
615 rw = WRITE_FUA; 615 rw = WRITE_FUA;
616 else if (!(q->flush_flags & REQ_FLUSH)) 616 else if (!(q->flush_flags & REQ_FLUSH))
617 rw = WRITE_FUA; 617 rw = WRITE_FUA;
618 else
619 rw = WRITE;
618 } else { 620 } else {
619 rw = WRITE; 621 rw = WRITE;
620 } 622 }
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index 853bab60e362..18d49df4d0ac 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -8,7 +8,6 @@ extern struct t10_alua_lu_gp *default_lu_gp;
8struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); 8struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
9int core_free_device_list_for_node(struct se_node_acl *, 9int core_free_device_list_for_node(struct se_node_acl *,
10 struct se_portal_group *); 10 struct se_portal_group *);
11void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
12void core_update_device_list_access(u32, u32, struct se_node_acl *); 11void core_update_device_list_access(u32, u32, struct se_node_acl *);
13int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *, 12int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
14 u32, u32, struct se_node_acl *, struct se_portal_group *); 13 u32, u32, struct se_node_acl *, struct se_portal_group *);
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index e0b3c379aa14..0921a64b5550 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -291,6 +291,11 @@ rd_execute_rw(struct se_cmd *cmd)
291 u32 src_len; 291 u32 src_len;
292 u64 tmp; 292 u64 tmp;
293 293
294 if (dev->rd_flags & RDF_NULLIO) {
295 target_complete_cmd(cmd, SAM_STAT_GOOD);
296 return 0;
297 }
298
294 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size; 299 tmp = cmd->t_task_lba * se_dev->dev_attrib.block_size;
295 rd_offset = do_div(tmp, PAGE_SIZE); 300 rd_offset = do_div(tmp, PAGE_SIZE);
296 rd_page = tmp; 301 rd_page = tmp;
@@ -373,11 +378,12 @@ rd_execute_rw(struct se_cmd *cmd)
373} 378}
374 379
375enum { 380enum {
376 Opt_rd_pages, Opt_err 381 Opt_rd_pages, Opt_rd_nullio, Opt_err
377}; 382};
378 383
379static match_table_t tokens = { 384static match_table_t tokens = {
380 {Opt_rd_pages, "rd_pages=%d"}, 385 {Opt_rd_pages, "rd_pages=%d"},
386 {Opt_rd_nullio, "rd_nullio=%d"},
381 {Opt_err, NULL} 387 {Opt_err, NULL}
382}; 388};
383 389
@@ -408,6 +414,14 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev,
408 " Count: %u\n", rd_dev->rd_page_count); 414 " Count: %u\n", rd_dev->rd_page_count);
409 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT; 415 rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
410 break; 416 break;
417 case Opt_rd_nullio:
418 match_int(args, &arg);
419 if (arg != 1)
420 break;
421
422 pr_debug("RAMDISK: Setting NULLIO flag: %d\n", arg);
423 rd_dev->rd_flags |= RDF_NULLIO;
424 break;
411 default: 425 default:
412 break; 426 break;
413 } 427 }
@@ -424,8 +438,9 @@ static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b)
424 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n", 438 ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: rd_mcp\n",
425 rd_dev->rd_dev_id); 439 rd_dev->rd_dev_id);
426 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu" 440 bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
427 " SG_table_count: %u\n", rd_dev->rd_page_count, 441 " SG_table_count: %u nullio: %d\n", rd_dev->rd_page_count,
428 PAGE_SIZE, rd_dev->sg_table_count); 442 PAGE_SIZE, rd_dev->sg_table_count,
443 !!(rd_dev->rd_flags & RDF_NULLIO));
429 return bl; 444 return bl;
430} 445}
431 446
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index 933b38b6e563..1789d1e14395 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -22,6 +22,7 @@ struct rd_dev_sg_table {
22} ____cacheline_aligned; 22} ____cacheline_aligned;
23 23
24#define RDF_HAS_PAGE_COUNT 0x01 24#define RDF_HAS_PAGE_COUNT 0x01
25#define RDF_NULLIO 0x02
25 26
26struct rd_dev { 27struct rd_dev {
27 struct se_device dev; 28 struct se_device dev;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index f8388b4024aa..4a793362309d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2163,8 +2163,6 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2163 if (wait_for_tasks) 2163 if (wait_for_tasks)
2164 transport_wait_for_tasks(cmd); 2164 transport_wait_for_tasks(cmd);
2165 2165
2166 core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd);
2167
2168 if (cmd->se_lun) 2166 if (cmd->se_lun)
2169 transport_lun_remove_cmd(cmd); 2167 transport_lun_remove_cmd(cmd);
2170 2168
@@ -2213,21 +2211,19 @@ static void target_release_cmd_kref(struct kref *kref)
2213{ 2211{
2214 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); 2212 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2215 struct se_session *se_sess = se_cmd->se_sess; 2213 struct se_session *se_sess = se_cmd->se_sess;
2216 unsigned long flags;
2217 2214
2218 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2219 if (list_empty(&se_cmd->se_cmd_list)) { 2215 if (list_empty(&se_cmd->se_cmd_list)) {
2220 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2216 spin_unlock(&se_sess->sess_cmd_lock);
2221 se_cmd->se_tfo->release_cmd(se_cmd); 2217 se_cmd->se_tfo->release_cmd(se_cmd);
2222 return; 2218 return;
2223 } 2219 }
2224 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) { 2220 if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
2225 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2221 spin_unlock(&se_sess->sess_cmd_lock);
2226 complete(&se_cmd->cmd_wait_comp); 2222 complete(&se_cmd->cmd_wait_comp);
2227 return; 2223 return;
2228 } 2224 }
2229 list_del(&se_cmd->se_cmd_list); 2225 list_del(&se_cmd->se_cmd_list);
2230 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); 2226 spin_unlock(&se_sess->sess_cmd_lock);
2231 2227
2232 se_cmd->se_tfo->release_cmd(se_cmd); 2228 se_cmd->se_tfo->release_cmd(se_cmd);
2233} 2229}
@@ -2238,7 +2234,8 @@ static void target_release_cmd_kref(struct kref *kref)
2238 */ 2234 */
2239int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd) 2235int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
2240{ 2236{
2241 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); 2237 return kref_put_spinlock_irqsave(&se_cmd->cmd_kref, target_release_cmd_kref,
2238 &se_sess->sess_cmd_lock);
2242} 2239}
2243EXPORT_SYMBOL(target_put_sess_cmd); 2240EXPORT_SYMBOL(target_put_sess_cmd);
2244 2241
diff --git a/drivers/thermal/armada_thermal.c b/drivers/thermal/armada_thermal.c
index 5b4d75fd7b49..54ffd64ca3f7 100644
--- a/drivers/thermal/armada_thermal.c
+++ b/drivers/thermal/armada_thermal.c
@@ -169,21 +169,11 @@ static int armada_thermal_probe(struct platform_device *pdev)
169 return -ENOMEM; 169 return -ENOMEM;
170 170
171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172 if (!res) {
173 dev_err(&pdev->dev, "Failed to get platform resource\n");
174 return -ENODEV;
175 }
176
177 priv->sensor = devm_ioremap_resource(&pdev->dev, res); 172 priv->sensor = devm_ioremap_resource(&pdev->dev, res);
178 if (IS_ERR(priv->sensor)) 173 if (IS_ERR(priv->sensor))
179 return PTR_ERR(priv->sensor); 174 return PTR_ERR(priv->sensor);
180 175
181 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 176 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
182 if (!res) {
183 dev_err(&pdev->dev, "Failed to get platform resource\n");
184 return -ENODEV;
185 }
186
187 priv->control = devm_ioremap_resource(&pdev->dev, res); 177 priv->control = devm_ioremap_resource(&pdev->dev, res);
188 if (IS_ERR(priv->control)) 178 if (IS_ERR(priv->control))
189 return PTR_ERR(priv->control); 179 return PTR_ERR(priv->control);
diff --git a/drivers/thermal/dove_thermal.c b/drivers/thermal/dove_thermal.c
index 4b15a5f270dc..a088d1365ca5 100644
--- a/drivers/thermal/dove_thermal.c
+++ b/drivers/thermal/dove_thermal.c
@@ -149,10 +149,6 @@ static int dove_thermal_probe(struct platform_device *pdev)
149 return PTR_ERR(priv->sensor); 149 return PTR_ERR(priv->sensor);
150 150
151 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 151 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
152 if (!res) {
153 dev_err(&pdev->dev, "Failed to get platform resource\n");
154 return -ENODEV;
155 }
156 priv->control = devm_ioremap_resource(&pdev->dev, res); 152 priv->control = devm_ioremap_resource(&pdev->dev, res);
157 if (IS_ERR(priv->control)) 153 if (IS_ERR(priv->control))
158 return PTR_ERR(priv->control); 154 return PTR_ERR(priv->control);
diff --git a/drivers/thermal/exynos_thermal.c b/drivers/thermal/exynos_thermal.c
index d20ce9e61403..788b1ddcac6c 100644
--- a/drivers/thermal/exynos_thermal.c
+++ b/drivers/thermal/exynos_thermal.c
@@ -925,11 +925,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
925 INIT_WORK(&data->irq_work, exynos_tmu_work); 925 INIT_WORK(&data->irq_work, exynos_tmu_work);
926 926
927 data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 927 data->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
928 if (!data->mem) {
929 dev_err(&pdev->dev, "Failed to get platform resource\n");
930 return -ENOENT;
931 }
932
933 data->base = devm_ioremap_resource(&pdev->dev, data->mem); 928 data->base = devm_ioremap_resource(&pdev->dev, data->mem);
934 if (IS_ERR(data->base)) 929 if (IS_ERR(data->base))
935 return PTR_ERR(data->base); 930 return PTR_ERR(data->base);
diff --git a/drivers/tty/ehv_bytechan.c b/drivers/tty/ehv_bytechan.c
index 6d0c27cd03da..9bffcec5ad82 100644
--- a/drivers/tty/ehv_bytechan.c
+++ b/drivers/tty/ehv_bytechan.c
@@ -859,6 +859,7 @@ error:
859 */ 859 */
860static void __exit ehv_bc_exit(void) 860static void __exit ehv_bc_exit(void)
861{ 861{
862 platform_driver_unregister(&ehv_bc_tty_driver);
862 tty_unregister_driver(ehv_bc_driver); 863 tty_unregister_driver(ehv_bc_driver);
863 put_tty_driver(ehv_bc_driver); 864 put_tty_driver(ehv_bc_driver);
864 kfree(bcs); 865 kfree(bcs);
diff --git a/drivers/tty/mxser.c b/drivers/tty/mxser.c
index 71d6eb2c93b1..4c4a23674569 100644
--- a/drivers/tty/mxser.c
+++ b/drivers/tty/mxser.c
@@ -1618,8 +1618,12 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp)
1618 if (ip->type == PORT_16550A) 1618 if (ip->type == PORT_16550A)
1619 me->fifo[p] = 1; 1619 me->fifo[p] = 1;
1620 1620
1621 opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2); 1621 if (ip->board->chip_flag == MOXA_MUST_MU860_HWID) {
1622 opmode &= OP_MODE_MASK; 1622 opmode = inb(ip->opmode_ioaddr)>>((p % 4) * 2);
1623 opmode &= OP_MODE_MASK;
1624 } else {
1625 opmode = RS232_MODE;
1626 }
1623 me->iftype[p] = opmode; 1627 me->iftype[p] = opmode;
1624 mutex_unlock(&port->mutex); 1628 mutex_unlock(&port->mutex);
1625 } 1629 }
@@ -1676,6 +1680,9 @@ static int mxser_ioctl(struct tty_struct *tty,
1676 int shiftbit; 1680 int shiftbit;
1677 unsigned char val, mask; 1681 unsigned char val, mask;
1678 1682
1683 if (info->board->chip_flag != MOXA_MUST_MU860_HWID)
1684 return -EFAULT;
1685
1679 p = tty->index % 4; 1686 p = tty->index % 4;
1680 if (cmd == MOXA_SET_OP_MODE) { 1687 if (cmd == MOXA_SET_OP_MODE) {
1681 if (get_user(opmode, (int __user *) argp)) 1688 if (get_user(opmode, (int __user *) argp))
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d655416087b7..6c7fe90ad72d 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1573,6 +1573,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
1573 ldata->real_raw = 0; 1573 ldata->real_raw = 0;
1574 } 1574 }
1575 n_tty_set_room(tty); 1575 n_tty_set_room(tty);
1576 /*
1577 * Fix tty hang when I_IXON(tty) is cleared, but the tty
1578 * been stopped by STOP_CHAR(tty) before it.
1579 */
1580 if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
1581 start_tty(tty);
1582 }
1583
1576 /* The termios change make the tty ready for I/O */ 1584 /* The termios change make the tty ready for I/O */
1577 wake_up_interruptible(&tty->write_wait); 1585 wake_up_interruptible(&tty->write_wait);
1578 wake_up_interruptible(&tty->read_wait); 1586 wake_up_interruptible(&tty->read_wait);
diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
index 82d35c5a58fd..354564ea47c5 100644
--- a/drivers/tty/rocket.c
+++ b/drivers/tty/rocket.c
@@ -150,12 +150,14 @@ static Word_t aiop_intr_bits[AIOP_CTL_SIZE] = {
150 AIOP_INTR_BIT_3 150 AIOP_INTR_BIT_3
151}; 151};
152 152
153#ifdef CONFIG_PCI
153static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = { 154static Word_t upci_aiop_intr_bits[AIOP_CTL_SIZE] = {
154 UPCI_AIOP_INTR_BIT_0, 155 UPCI_AIOP_INTR_BIT_0,
155 UPCI_AIOP_INTR_BIT_1, 156 UPCI_AIOP_INTR_BIT_1,
156 UPCI_AIOP_INTR_BIT_2, 157 UPCI_AIOP_INTR_BIT_2,
157 UPCI_AIOP_INTR_BIT_3 158 UPCI_AIOP_INTR_BIT_3
158}; 159};
160#endif
159 161
160static Byte_t RData[RDATASIZE] = { 162static Byte_t RData[RDATASIZE] = {
161 0x00, 0x09, 0xf6, 0x82, 163 0x00, 0x09, 0xf6, 0x82,
@@ -227,7 +229,6 @@ static unsigned long nextLineNumber;
227static int __init init_ISA(int i); 229static int __init init_ISA(int i);
228static void rp_wait_until_sent(struct tty_struct *tty, int timeout); 230static void rp_wait_until_sent(struct tty_struct *tty, int timeout);
229static void rp_flush_buffer(struct tty_struct *tty); 231static void rp_flush_buffer(struct tty_struct *tty);
230static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model);
231static unsigned char GetLineNumber(int ctrl, int aiop, int ch); 232static unsigned char GetLineNumber(int ctrl, int aiop, int ch);
232static unsigned char SetLineNumber(int ctrl, int aiop, int ch); 233static unsigned char SetLineNumber(int ctrl, int aiop, int ch);
233static void rp_start(struct tty_struct *tty); 234static void rp_start(struct tty_struct *tty);
@@ -241,11 +242,6 @@ static void sDisInterrupts(CHANNEL_T * ChP, Word_t Flags);
241static void sModemReset(CONTROLLER_T * CtlP, int chan, int on); 242static void sModemReset(CONTROLLER_T * CtlP, int chan, int on);
242static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on); 243static void sPCIModemReset(CONTROLLER_T * CtlP, int chan, int on);
243static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data); 244static int sWriteTxPrioByte(CHANNEL_T * ChP, Byte_t Data);
244static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
245 ByteIO_t * AiopIOList, int AiopIOListSize,
246 WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
247 int PeriodicOnly, int altChanRingIndicator,
248 int UPCIRingInd);
249static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO, 245static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
250 ByteIO_t * AiopIOList, int AiopIOListSize, 246 ByteIO_t * AiopIOList, int AiopIOListSize,
251 int IRQNum, Byte_t Frequency, int PeriodicOnly); 247 int IRQNum, Byte_t Frequency, int PeriodicOnly);
@@ -1775,6 +1771,145 @@ static DEFINE_PCI_DEVICE_TABLE(rocket_pci_ids) = {
1775}; 1771};
1776MODULE_DEVICE_TABLE(pci, rocket_pci_ids); 1772MODULE_DEVICE_TABLE(pci, rocket_pci_ids);
1777 1773
1774/* Resets the speaker controller on RocketModem II and III devices */
1775static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
1776{
1777 ByteIO_t addr;
1778
1779 /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
1780 if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
1781 addr = CtlP->AiopIO[0] + 0x4F;
1782 sOutB(addr, 0);
1783 }
1784
1785 /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
1786 if ((model == MODEL_UPCI_RM3_8PORT)
1787 || (model == MODEL_UPCI_RM3_4PORT)) {
1788 addr = CtlP->AiopIO[0] + 0x88;
1789 sOutB(addr, 0);
1790 }
1791}
1792
1793/***************************************************************************
1794Function: sPCIInitController
1795Purpose: Initialization of controller global registers and controller
1796 structure.
1797Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
1798 IRQNum,Frequency,PeriodicOnly)
1799 CONTROLLER_T *CtlP; Ptr to controller structure
1800 int CtlNum; Controller number
1801 ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
1802 This list must be in the order the AIOPs will be found on the
1803 controller. Once an AIOP in the list is not found, it is
1804 assumed that there are no more AIOPs on the controller.
1805 int AiopIOListSize; Number of addresses in AiopIOList
1806 int IRQNum; Interrupt Request number. Can be any of the following:
1807 0: Disable global interrupts
1808 3: IRQ 3
1809 4: IRQ 4
1810 5: IRQ 5
1811 9: IRQ 9
1812 10: IRQ 10
1813 11: IRQ 11
1814 12: IRQ 12
1815 15: IRQ 15
1816 Byte_t Frequency: A flag identifying the frequency
1817 of the periodic interrupt, can be any one of the following:
1818 FREQ_DIS - periodic interrupt disabled
1819 FREQ_137HZ - 137 Hertz
1820 FREQ_69HZ - 69 Hertz
1821 FREQ_34HZ - 34 Hertz
1822 FREQ_17HZ - 17 Hertz
1823 FREQ_9HZ - 9 Hertz
1824 FREQ_4HZ - 4 Hertz
1825 If IRQNum is set to 0 the Frequency parameter is
1826 overidden, it is forced to a value of FREQ_DIS.
1827 int PeriodicOnly: 1 if all interrupts except the periodic
1828 interrupt are to be blocked.
1829 0 is both the periodic interrupt and
1830 other channel interrupts are allowed.
1831 If IRQNum is set to 0 the PeriodicOnly parameter is
1832 overidden, it is forced to a value of 0.
1833Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
1834 initialization failed.
1835
1836Comments:
1837 If periodic interrupts are to be disabled but AIOP interrupts
1838 are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
1839
1840 If interrupts are to be completely disabled set IRQNum to 0.
1841
1842 Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
1843 invalid combination.
1844
1845 This function performs initialization of global interrupt modes,
1846 but it does not actually enable global interrupts. To enable
1847 and disable global interrupts use functions sEnGlobalInt() and
1848 sDisGlobalInt(). Enabling of global interrupts is normally not
1849 done until all other initializations are complete.
1850
1851 Even if interrupts are globally enabled, they must also be
1852 individually enabled for each channel that is to generate
1853 interrupts.
1854
1855Warnings: No range checking on any of the parameters is done.
1856
1857 No context switches are allowed while executing this function.
1858
1859 After this function all AIOPs on the controller are disabled,
1860 they can be enabled with sEnAiop().
1861*/
1862static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
1863 ByteIO_t * AiopIOList, int AiopIOListSize,
1864 WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
1865 int PeriodicOnly, int altChanRingIndicator,
1866 int UPCIRingInd)
1867{
1868 int i;
1869 ByteIO_t io;
1870
1871 CtlP->AltChanRingIndicator = altChanRingIndicator;
1872 CtlP->UPCIRingInd = UPCIRingInd;
1873 CtlP->CtlNum = CtlNum;
1874 CtlP->CtlID = CTLID_0001; /* controller release 1 */
1875 CtlP->BusType = isPCI; /* controller release 1 */
1876
1877 if (ConfigIO) {
1878 CtlP->isUPCI = 1;
1879 CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
1880 CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
1881 CtlP->AiopIntrBits = upci_aiop_intr_bits;
1882 } else {
1883 CtlP->isUPCI = 0;
1884 CtlP->PCIIO =
1885 (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
1886 CtlP->AiopIntrBits = aiop_intr_bits;
1887 }
1888
1889 sPCIControllerEOI(CtlP); /* clear EOI if warm init */
1890 /* Init AIOPs */
1891 CtlP->NumAiop = 0;
1892 for (i = 0; i < AiopIOListSize; i++) {
1893 io = AiopIOList[i];
1894 CtlP->AiopIO[i] = (WordIO_t) io;
1895 CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
1896
1897 CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
1898 if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
1899 break; /* done looking for AIOPs */
1900
1901 CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
1902 sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
1903 sOutB(io + _INDX_DATA, sClockPrescale);
1904 CtlP->NumAiop++; /* bump count of AIOPs */
1905 }
1906
1907 if (CtlP->NumAiop == 0)
1908 return (-1);
1909 else
1910 return (CtlP->NumAiop);
1911}
1912
1778/* 1913/*
1779 * Called when a PCI card is found. Retrieves and stores model information, 1914 * Called when a PCI card is found. Retrieves and stores model information,
1780 * init's aiopic and serial port hardware. 1915 * init's aiopic and serial port hardware.
@@ -2519,147 +2654,6 @@ static int sInitController(CONTROLLER_T * CtlP, int CtlNum, ByteIO_t MudbacIO,
2519 return (CtlP->NumAiop); 2654 return (CtlP->NumAiop);
2520} 2655}
2521 2656
2522#ifdef CONFIG_PCI
2523/***************************************************************************
2524Function: sPCIInitController
2525Purpose: Initialization of controller global registers and controller
2526 structure.
2527Call: sPCIInitController(CtlP,CtlNum,AiopIOList,AiopIOListSize,
2528 IRQNum,Frequency,PeriodicOnly)
2529 CONTROLLER_T *CtlP; Ptr to controller structure
2530 int CtlNum; Controller number
2531 ByteIO_t *AiopIOList; List of I/O addresses for each AIOP.
2532 This list must be in the order the AIOPs will be found on the
2533 controller. Once an AIOP in the list is not found, it is
2534 assumed that there are no more AIOPs on the controller.
2535 int AiopIOListSize; Number of addresses in AiopIOList
2536 int IRQNum; Interrupt Request number. Can be any of the following:
2537 0: Disable global interrupts
2538 3: IRQ 3
2539 4: IRQ 4
2540 5: IRQ 5
2541 9: IRQ 9
2542 10: IRQ 10
2543 11: IRQ 11
2544 12: IRQ 12
2545 15: IRQ 15
2546 Byte_t Frequency: A flag identifying the frequency
2547 of the periodic interrupt, can be any one of the following:
2548 FREQ_DIS - periodic interrupt disabled
2549 FREQ_137HZ - 137 Hertz
2550 FREQ_69HZ - 69 Hertz
2551 FREQ_34HZ - 34 Hertz
2552 FREQ_17HZ - 17 Hertz
2553 FREQ_9HZ - 9 Hertz
2554 FREQ_4HZ - 4 Hertz
2555 If IRQNum is set to 0 the Frequency parameter is
2556 overidden, it is forced to a value of FREQ_DIS.
2557 int PeriodicOnly: 1 if all interrupts except the periodic
2558 interrupt are to be blocked.
2559 0 is both the periodic interrupt and
2560 other channel interrupts are allowed.
2561 If IRQNum is set to 0 the PeriodicOnly parameter is
2562 overidden, it is forced to a value of 0.
2563Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller
2564 initialization failed.
2565
2566Comments:
2567 If periodic interrupts are to be disabled but AIOP interrupts
2568 are allowed, set Frequency to FREQ_DIS and PeriodicOnly to 0.
2569
2570 If interrupts are to be completely disabled set IRQNum to 0.
2571
2572 Setting Frequency to FREQ_DIS and PeriodicOnly to 1 is an
2573 invalid combination.
2574
2575 This function performs initialization of global interrupt modes,
2576 but it does not actually enable global interrupts. To enable
2577 and disable global interrupts use functions sEnGlobalInt() and
2578 sDisGlobalInt(). Enabling of global interrupts is normally not
2579 done until all other initializations are complete.
2580
2581 Even if interrupts are globally enabled, they must also be
2582 individually enabled for each channel that is to generate
2583 interrupts.
2584
2585Warnings: No range checking on any of the parameters is done.
2586
2587 No context switches are allowed while executing this function.
2588
2589 After this function all AIOPs on the controller are disabled,
2590 they can be enabled with sEnAiop().
2591*/
2592static int sPCIInitController(CONTROLLER_T * CtlP, int CtlNum,
2593 ByteIO_t * AiopIOList, int AiopIOListSize,
2594 WordIO_t ConfigIO, int IRQNum, Byte_t Frequency,
2595 int PeriodicOnly, int altChanRingIndicator,
2596 int UPCIRingInd)
2597{
2598 int i;
2599 ByteIO_t io;
2600
2601 CtlP->AltChanRingIndicator = altChanRingIndicator;
2602 CtlP->UPCIRingInd = UPCIRingInd;
2603 CtlP->CtlNum = CtlNum;
2604 CtlP->CtlID = CTLID_0001; /* controller release 1 */
2605 CtlP->BusType = isPCI; /* controller release 1 */
2606
2607 if (ConfigIO) {
2608 CtlP->isUPCI = 1;
2609 CtlP->PCIIO = ConfigIO + _PCI_9030_INT_CTRL;
2610 CtlP->PCIIO2 = ConfigIO + _PCI_9030_GPIO_CTRL;
2611 CtlP->AiopIntrBits = upci_aiop_intr_bits;
2612 } else {
2613 CtlP->isUPCI = 0;
2614 CtlP->PCIIO =
2615 (WordIO_t) ((ByteIO_t) AiopIOList[0] + _PCI_INT_FUNC);
2616 CtlP->AiopIntrBits = aiop_intr_bits;
2617 }
2618
2619 sPCIControllerEOI(CtlP); /* clear EOI if warm init */
2620 /* Init AIOPs */
2621 CtlP->NumAiop = 0;
2622 for (i = 0; i < AiopIOListSize; i++) {
2623 io = AiopIOList[i];
2624 CtlP->AiopIO[i] = (WordIO_t) io;
2625 CtlP->AiopIntChanIO[i] = io + _INT_CHAN;
2626
2627 CtlP->AiopID[i] = sReadAiopID(io); /* read AIOP ID */
2628 if (CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */
2629 break; /* done looking for AIOPs */
2630
2631 CtlP->AiopNumChan[i] = sReadAiopNumChan((WordIO_t) io); /* num channels in AIOP */
2632 sOutW((WordIO_t) io + _INDX_ADDR, _CLK_PRE); /* clock prescaler */
2633 sOutB(io + _INDX_DATA, sClockPrescale);
2634 CtlP->NumAiop++; /* bump count of AIOPs */
2635 }
2636
2637 if (CtlP->NumAiop == 0)
2638 return (-1);
2639 else
2640 return (CtlP->NumAiop);
2641}
2642
2643/* Resets the speaker controller on RocketModem II and III devices */
2644static void rmSpeakerReset(CONTROLLER_T * CtlP, unsigned long model)
2645{
2646 ByteIO_t addr;
2647
2648 /* RocketModem II speaker control is at the 8th port location of offset 0x40 */
2649 if ((model == MODEL_RP4M) || (model == MODEL_RP6M)) {
2650 addr = CtlP->AiopIO[0] + 0x4F;
2651 sOutB(addr, 0);
2652 }
2653
2654 /* RocketModem III speaker control is at the 1st port location of offset 0x80 */
2655 if ((model == MODEL_UPCI_RM3_8PORT)
2656 || (model == MODEL_UPCI_RM3_4PORT)) {
2657 addr = CtlP->AiopIO[0] + 0x88;
2658 sOutB(addr, 0);
2659 }
2660}
2661#endif
2662
2663/*************************************************************************** 2657/***************************************************************************
2664Function: sReadAiopID 2658Function: sReadAiopID
2665Purpose: Read the AIOP idenfication number directly from an AIOP. 2659Purpose: Read the AIOP idenfication number directly from an AIOP.
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index beaa283f5cc6..d07b6af3a937 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -338,7 +338,8 @@ static int dw8250_runtime_suspend(struct device *dev)
338{ 338{
339 struct dw8250_data *data = dev_get_drvdata(dev); 339 struct dw8250_data *data = dev_get_drvdata(dev);
340 340
341 clk_disable_unprepare(data->clk); 341 if (!IS_ERR(data->clk))
342 clk_disable_unprepare(data->clk);
342 343
343 return 0; 344 return 0;
344} 345}
@@ -347,7 +348,8 @@ static int dw8250_runtime_resume(struct device *dev)
347{ 348{
348 struct dw8250_data *data = dev_get_drvdata(dev); 349 struct dw8250_data *data = dev_get_drvdata(dev);
349 350
350 clk_prepare_enable(data->clk); 351 if (!IS_ERR(data->clk))
352 clk_prepare_enable(data->clk);
351 353
352 return 0; 354 return 0;
353} 355}
@@ -367,6 +369,7 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
367static const struct acpi_device_id dw8250_acpi_match[] = { 369static const struct acpi_device_id dw8250_acpi_match[] = {
368 { "INT33C4", 0 }, 370 { "INT33C4", 0 },
369 { "INT33C5", 0 }, 371 { "INT33C5", 0 },
372 { "80860F0A", 0 },
370 { }, 373 { },
371}; 374};
372MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match); 375MODULE_DEVICE_TABLE(acpi, dw8250_acpi_match);
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 8ab70a620919..e2774f9ecd59 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -332,7 +332,7 @@ static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *
332 dmaengine_slave_config(chan, &rx_conf); 332 dmaengine_slave_config(chan, &rx_conf);
333 uap->dmarx.chan = chan; 333 uap->dmarx.chan = chan;
334 334
335 if (plat->dma_rx_poll_enable) { 335 if (plat && plat->dma_rx_poll_enable) {
336 /* Set poll rate if specified. */ 336 /* Set poll rate if specified. */
337 if (plat->dma_rx_poll_rate) { 337 if (plat->dma_rx_poll_rate) {
338 uap->dmarx.auto_poll_rate = false; 338 uap->dmarx.auto_poll_rate = false;
diff --git a/drivers/tty/serial/mcf.c b/drivers/tty/serial/mcf.c
index e956377a38fe..65be0c00c4bf 100644
--- a/drivers/tty/serial/mcf.c
+++ b/drivers/tty/serial/mcf.c
@@ -707,8 +707,10 @@ static int __init mcf_init(void)
707 if (rc) 707 if (rc)
708 return rc; 708 return rc;
709 rc = platform_driver_register(&mcf_platform_driver); 709 rc = platform_driver_register(&mcf_platform_driver);
710 if (rc) 710 if (rc) {
711 uart_unregister_driver(&mcf_driver);
711 return rc; 712 return rc;
713 }
712 return 0; 714 return 0;
713} 715}
714 716
diff --git a/drivers/tty/serial/mpc52xx_uart.c b/drivers/tty/serial/mpc52xx_uart.c
index 018bad922554..f51b280f3bf2 100644
--- a/drivers/tty/serial/mpc52xx_uart.c
+++ b/drivers/tty/serial/mpc52xx_uart.c
@@ -1497,18 +1497,23 @@ mpc52xx_uart_init(void)
1497 if (psc_ops && psc_ops->fifoc_init) { 1497 if (psc_ops && psc_ops->fifoc_init) {
1498 ret = psc_ops->fifoc_init(); 1498 ret = psc_ops->fifoc_init();
1499 if (ret) 1499 if (ret)
1500 return ret; 1500 goto err_init;
1501 } 1501 }
1502 1502
1503 ret = platform_driver_register(&mpc52xx_uart_of_driver); 1503 ret = platform_driver_register(&mpc52xx_uart_of_driver);
1504 if (ret) { 1504 if (ret) {
1505 printk(KERN_ERR "%s: platform_driver_register failed (%i)\n", 1505 printk(KERN_ERR "%s: platform_driver_register failed (%i)\n",
1506 __FILE__, ret); 1506 __FILE__, ret);
1507 uart_unregister_driver(&mpc52xx_uart_driver); 1507 goto err_reg;
1508 return ret;
1509 } 1508 }
1510 1509
1511 return 0; 1510 return 0;
1511err_reg:
1512 if (psc_ops && psc_ops->fifoc_uninit)
1513 psc_ops->fifoc_uninit();
1514err_init:
1515 uart_unregister_driver(&mpc52xx_uart_driver);
1516 return ret;
1512} 1517}
1513 1518
1514static void __exit 1519static void __exit
diff --git a/drivers/tty/serial/nwpserial.c b/drivers/tty/serial/nwpserial.c
index 77287c54f331..549c70a2a63e 100644
--- a/drivers/tty/serial/nwpserial.c
+++ b/drivers/tty/serial/nwpserial.c
@@ -199,7 +199,7 @@ static void nwpserial_shutdown(struct uart_port *port)
199 dcr_write(up->dcr_host, UART_IER, up->ier); 199 dcr_write(up->dcr_host, UART_IER, up->ier);
200 200
201 /* free irq */ 201 /* free irq */
202 free_irq(up->port.irq, port); 202 free_irq(up->port.irq, up);
203} 203}
204 204
205static int nwpserial_verify_port(struct uart_port *port, 205static int nwpserial_verify_port(struct uart_port *port,
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 30d4f7a783cd..f0b9f6b52b32 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -202,26 +202,6 @@ static int serial_omap_get_context_loss_count(struct uart_omap_port *up)
202 return pdata->get_context_loss_count(up->dev); 202 return pdata->get_context_loss_count(up->dev);
203} 203}
204 204
205static void serial_omap_set_forceidle(struct uart_omap_port *up)
206{
207 struct omap_uart_port_info *pdata = up->dev->platform_data;
208
209 if (!pdata || !pdata->set_forceidle)
210 return;
211
212 pdata->set_forceidle(up->dev);
213}
214
215static void serial_omap_set_noidle(struct uart_omap_port *up)
216{
217 struct omap_uart_port_info *pdata = up->dev->platform_data;
218
219 if (!pdata || !pdata->set_noidle)
220 return;
221
222 pdata->set_noidle(up->dev);
223}
224
225static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable) 205static void serial_omap_enable_wakeup(struct uart_omap_port *up, bool enable)
226{ 206{
227 struct omap_uart_port_info *pdata = up->dev->platform_data; 207 struct omap_uart_port_info *pdata = up->dev->platform_data;
@@ -298,8 +278,6 @@ static void serial_omap_stop_tx(struct uart_port *port)
298 serial_out(up, UART_IER, up->ier); 278 serial_out(up, UART_IER, up->ier);
299 } 279 }
300 280
301 serial_omap_set_forceidle(up);
302
303 pm_runtime_mark_last_busy(up->dev); 281 pm_runtime_mark_last_busy(up->dev);
304 pm_runtime_put_autosuspend(up->dev); 282 pm_runtime_put_autosuspend(up->dev);
305} 283}
@@ -364,7 +342,6 @@ static void serial_omap_start_tx(struct uart_port *port)
364 342
365 pm_runtime_get_sync(up->dev); 343 pm_runtime_get_sync(up->dev);
366 serial_omap_enable_ier_thri(up); 344 serial_omap_enable_ier_thri(up);
367 serial_omap_set_noidle(up);
368 pm_runtime_mark_last_busy(up->dev); 345 pm_runtime_mark_last_busy(up->dev);
369 pm_runtime_put_autosuspend(up->dev); 346 pm_runtime_put_autosuspend(up->dev);
370} 347}
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 074b9194144f..89429410a245 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1803,6 +1803,7 @@ static int __init s3c24xx_serial_modinit(void)
1803 1803
1804static void __exit s3c24xx_serial_modexit(void) 1804static void __exit s3c24xx_serial_modexit(void)
1805{ 1805{
1806 platform_driver_unregister(&samsung_serial_driver);
1806 uart_unregister_driver(&s3c24xx_uart_drv); 1807 uart_unregister_driver(&s3c24xx_uart_drv);
1807} 1808}
1808 1809
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index fbd447b390f7..740202d8a5c4 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -779,7 +779,6 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
779 con_set_default_unimap(vc); 779 con_set_default_unimap(vc);
780 vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL); 780 vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
781 if (!vc->vc_screenbuf) { 781 if (!vc->vc_screenbuf) {
782 tty_port_destroy(&vc->port);
783 kfree(vc); 782 kfree(vc);
784 vc_cons[currcons].d = NULL; 783 vc_cons[currcons].d = NULL;
785 return -ENOMEM; 784 return -ENOMEM;
@@ -986,26 +985,25 @@ static int vt_resize(struct tty_struct *tty, struct winsize *ws)
986 return ret; 985 return ret;
987} 986}
988 987
989void vc_deallocate(unsigned int currcons) 988struct vc_data *vc_deallocate(unsigned int currcons)
990{ 989{
990 struct vc_data *vc = NULL;
991
991 WARN_CONSOLE_UNLOCKED(); 992 WARN_CONSOLE_UNLOCKED();
992 993
993 if (vc_cons_allocated(currcons)) { 994 if (vc_cons_allocated(currcons)) {
994 struct vc_data *vc = vc_cons[currcons].d; 995 struct vt_notifier_param param;
995 struct vt_notifier_param param = { .vc = vc };
996 996
997 param.vc = vc = vc_cons[currcons].d;
997 atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param); 998 atomic_notifier_call_chain(&vt_notifier_list, VT_DEALLOCATE, &param);
998 vcs_remove_sysfs(currcons); 999 vcs_remove_sysfs(currcons);
999 vc->vc_sw->con_deinit(vc); 1000 vc->vc_sw->con_deinit(vc);
1000 put_pid(vc->vt_pid); 1001 put_pid(vc->vt_pid);
1001 module_put(vc->vc_sw->owner); 1002 module_put(vc->vc_sw->owner);
1002 kfree(vc->vc_screenbuf); 1003 kfree(vc->vc_screenbuf);
1003 if (currcons >= MIN_NR_CONSOLES) {
1004 tty_port_destroy(&vc->port);
1005 kfree(vc);
1006 }
1007 vc_cons[currcons].d = NULL; 1004 vc_cons[currcons].d = NULL;
1008 } 1005 }
1006 return vc;
1009} 1007}
1010 1008
1011/* 1009/*
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 98ff1735eafc..fc2c06c66e89 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -283,6 +283,51 @@ do_unimap_ioctl(int cmd, struct unimapdesc __user *user_ud, int perm, struct vc_
283 return 0; 283 return 0;
284} 284}
285 285
286/* deallocate a single console, if possible (leave 0) */
287static int vt_disallocate(unsigned int vc_num)
288{
289 struct vc_data *vc = NULL;
290 int ret = 0;
291
292 if (!vc_num)
293 return 0;
294
295 console_lock();
296 if (VT_BUSY(vc_num))
297 ret = -EBUSY;
298 else
299 vc = vc_deallocate(vc_num);
300 console_unlock();
301
302 if (vc && vc_num >= MIN_NR_CONSOLES) {
303 tty_port_destroy(&vc->port);
304 kfree(vc);
305 }
306
307 return ret;
308}
309
310/* deallocate all unused consoles, but leave 0 */
311static void vt_disallocate_all(void)
312{
313 struct vc_data *vc[MAX_NR_CONSOLES];
314 int i;
315
316 console_lock();
317 for (i = 1; i < MAX_NR_CONSOLES; i++)
318 if (!VT_BUSY(i))
319 vc[i] = vc_deallocate(i);
320 else
321 vc[i] = NULL;
322 console_unlock();
323
324 for (i = 1; i < MAX_NR_CONSOLES; i++) {
325 if (vc[i] && i >= MIN_NR_CONSOLES) {
326 tty_port_destroy(&vc[i]->port);
327 kfree(vc[i]);
328 }
329 }
330}
286 331
287 332
288/* 333/*
@@ -769,24 +814,10 @@ int vt_ioctl(struct tty_struct *tty,
769 ret = -ENXIO; 814 ret = -ENXIO;
770 break; 815 break;
771 } 816 }
772 if (arg == 0) { 817 if (arg == 0)
773 /* deallocate all unused consoles, but leave 0 */ 818 vt_disallocate_all();
774 console_lock(); 819 else
775 for (i=1; i<MAX_NR_CONSOLES; i++) 820 ret = vt_disallocate(--arg);
776 if (! VT_BUSY(i))
777 vc_deallocate(i);
778 console_unlock();
779 } else {
780 /* deallocate a single console, if possible */
781 arg--;
782 if (VT_BUSY(arg))
783 ret = -EBUSY;
784 else if (arg) { /* leave 0 */
785 console_lock();
786 vc_deallocate(arg);
787 console_unlock();
788 }
789 }
790 break; 821 break;
791 822
792 case VT_RESIZE: 823 case VT_RESIZE:
diff --git a/drivers/uio/Kconfig b/drivers/uio/Kconfig
index e92eeaf251fe..5295be0342c1 100644
--- a/drivers/uio/Kconfig
+++ b/drivers/uio/Kconfig
@@ -45,6 +45,7 @@ config UIO_PDRV_GENIRQ
45 45
46config UIO_DMEM_GENIRQ 46config UIO_DMEM_GENIRQ
47 tristate "Userspace platform driver with generic irq and dynamic memory" 47 tristate "Userspace platform driver with generic irq and dynamic memory"
48 depends on HAS_DMA
48 help 49 help
49 Platform driver for Userspace I/O devices, including generic 50 Platform driver for Userspace I/O devices, including generic
50 interrupt handling code. Shared interrupts are not supported. 51 interrupt handling code. Shared interrupts are not supported.
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index b7eb86ad6bf2..8a7eb77233b4 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
686{ 686{
687 int ret, len; 687 int ret, len;
688 __le32 *buf; 688 __le32 *buf;
689 int offb, offd; 689 int offb;
690 unsigned int offd;
690 const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; 691 const int stride = CMD_PACKET_SIZE / (4 * 2) - 1;
691 int buflen = ((size - 1) / stride + 1 + size * 2) * 4; 692 int buflen = ((size - 1) / stride + 1 + size * 2) * 4;
692 693
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 608a2aeb400c..b2df442eb3e5 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -20,7 +20,7 @@ config USB_CHIPIDEA_UDC
20config USB_CHIPIDEA_HOST 20config USB_CHIPIDEA_HOST
21 bool "ChipIdea host controller" 21 bool "ChipIdea host controller"
22 depends on USB=y || USB=USB_CHIPIDEA 22 depends on USB=y || USB=USB_CHIPIDEA
23 depends on USB_EHCI_HCD 23 depends on USB_EHCI_HCD=y
24 select USB_EHCI_ROOT_HUB_TT 24 select USB_EHCI_ROOT_HUB_TT
25 help 25 help
26 Say Y here to enable host controller functionality of the 26 Say Y here to enable host controller functionality of the
diff --git a/drivers/usb/chipidea/ci13xxx_imx.c b/drivers/usb/chipidea/ci13xxx_imx.c
index 8faec9dbbb84..73f9d5f15adb 100644
--- a/drivers/usb/chipidea/ci13xxx_imx.c
+++ b/drivers/usb/chipidea/ci13xxx_imx.c
@@ -173,17 +173,10 @@ static int ci13xxx_imx_probe(struct platform_device *pdev)
173 173
174 ci13xxx_imx_platdata.phy = data->phy; 174 ci13xxx_imx_platdata.phy = data->phy;
175 175
176 if (!pdev->dev.dma_mask) { 176 if (!pdev->dev.dma_mask)
177 pdev->dev.dma_mask = devm_kzalloc(&pdev->dev, 177 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
178 sizeof(*pdev->dev.dma_mask), GFP_KERNEL); 178 if (!pdev->dev.coherent_dma_mask)
179 if (!pdev->dev.dma_mask) { 179 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
180 ret = -ENOMEM;
181 dev_err(&pdev->dev, "Failed to alloc dma_mask!\n");
182 goto err;
183 }
184 *pdev->dev.dma_mask = DMA_BIT_MASK(32);
185 dma_set_coherent_mask(&pdev->dev, *pdev->dev.dma_mask);
186 }
187 180
188 if (usbmisc_ops && usbmisc_ops->init) { 181 if (usbmisc_ops && usbmisc_ops->init) {
189 ret = usbmisc_ops->init(&pdev->dev); 182 ret = usbmisc_ops->init(&pdev->dev);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 450107e5f657..49b098bedf9b 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -370,11 +370,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
370 } 370 }
371 371
372 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 372 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
373 if (!res) {
374 dev_err(dev, "missing resource\n");
375 return -ENODEV;
376 }
377
378 base = devm_ioremap_resource(dev, res); 373 base = devm_ioremap_resource(dev, res);
379 if (IS_ERR(base)) 374 if (IS_ERR(base))
380 return PTR_ERR(base); 375 return PTR_ERR(base);
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index 8772b3659296..db535b0aa172 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -51,7 +51,7 @@ config USB_DYNAMIC_MINORS
51 51
52config USB_OTG 52config USB_OTG
53 bool "OTG support" 53 bool "OTG support"
54 depends on USB_SUSPEND 54 depends on PM_RUNTIME
55 default n 55 default n
56 help 56 help
57 The most notable feature of USB OTG is support for a 57 The most notable feature of USB OTG is support for a
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ab5638d9c707..a63598895077 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -88,6 +88,9 @@ static const struct usb_device_id usb_quirk_list[] = {
88 /* Edirol SD-20 */ 88 /* Edirol SD-20 */
89 { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, 89 { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME },
90 90
91 /* Alcor Micro Corp. Hub */
92 { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME },
93
91 /* appletouch */ 94 /* appletouch */
92 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, 95 { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME },
93 96
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index ea5ee9c21c35..757aa18027d0 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -19,21 +19,21 @@ choice
19 19
20config USB_DWC3_HOST 20config USB_DWC3_HOST
21 bool "Host only mode" 21 bool "Host only mode"
22 depends on USB 22 depends on USB=y || USB=USB_DWC3
23 help 23 help
24 Select this when you want to use DWC3 in host mode only, 24 Select this when you want to use DWC3 in host mode only,
25 thereby the gadget feature will be regressed. 25 thereby the gadget feature will be regressed.
26 26
27config USB_DWC3_GADGET 27config USB_DWC3_GADGET
28 bool "Gadget only mode" 28 bool "Gadget only mode"
29 depends on USB_GADGET 29 depends on USB_GADGET=y || USB_GADGET=USB_DWC3
30 help 30 help
31 Select this when you want to use DWC3 in gadget mode only, 31 Select this when you want to use DWC3 in gadget mode only,
32 thereby the host feature will be regressed. 32 thereby the host feature will be regressed.
33 33
34config USB_DWC3_DUAL_ROLE 34config USB_DWC3_DUAL_ROLE
35 bool "Dual Role mode" 35 bool "Dual Role mode"
36 depends on (USB && USB_GADGET) 36 depends on ((USB=y || USB=USB_DWC3) && (USB_GADGET=y || USB_GADGET=USB_DWC3))
37 help 37 help
38 This is the default mode of working of DWC3 controller where 38 This is the default mode of working of DWC3 controller where
39 both host and gadget features are enabled. 39 both host and gadget features are enabled.
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index a8afe6e26621..929e7dd6e58b 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -95,8 +95,6 @@ static int dwc3_exynos_remove_child(struct device *dev, void *unused)
95 return 0; 95 return 0;
96} 96}
97 97
98static u64 dwc3_exynos_dma_mask = DMA_BIT_MASK(32);
99
100static int dwc3_exynos_probe(struct platform_device *pdev) 98static int dwc3_exynos_probe(struct platform_device *pdev)
101{ 99{
102 struct dwc3_exynos *exynos; 100 struct dwc3_exynos *exynos;
@@ -118,7 +116,9 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
118 * Once we move to full device tree support this will vanish off. 116 * Once we move to full device tree support this will vanish off.
119 */ 117 */
120 if (!dev->dma_mask) 118 if (!dev->dma_mask)
121 dev->dma_mask = &dwc3_exynos_dma_mask; 119 dev->dma_mask = &dev->coherent_dma_mask;
120 if (!dev->coherent_dma_mask)
121 dev->coherent_dma_mask = DMA_BIT_MASK(32);
122 122
123 platform_set_drvdata(pdev, exynos); 123 platform_set_drvdata(pdev, exynos);
124 124
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 83300d94a893..f41aa0d0c414 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -146,7 +146,6 @@ config USB_LPC32XX
146 depends on ARCH_LPC32XX 146 depends on ARCH_LPC32XX
147 depends on USB_PHY 147 depends on USB_PHY
148 select USB_ISP1301 148 select USB_ISP1301
149 select USB_OTG_UTILS
150 help 149 help
151 This option selects the USB device controller in the LPC32xx SoC. 150 This option selects the USB device controller in the LPC32xx SoC.
152 151
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index f2a970f75bfa..5a5128a226f7 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -1992,8 +1992,6 @@ err_map_regs:
1992err_get_hclk: 1992err_get_hclk:
1993 clk_put(pclk); 1993 clk_put(pclk);
1994 1994
1995 platform_set_drvdata(pdev, NULL);
1996
1997 return ret; 1995 return ret;
1998} 1996}
1999 1997
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index 6e6518264c42..fd24cb4540a4 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -2334,21 +2334,11 @@ static int bcm63xx_udc_probe(struct platform_device *pdev)
2334 } 2334 }
2335 2335
2336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2336 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2337 if (!res) {
2338 dev_err(dev, "error finding USBD resource\n");
2339 return -ENXIO;
2340 }
2341
2342 udc->usbd_regs = devm_ioremap_resource(dev, res); 2337 udc->usbd_regs = devm_ioremap_resource(dev, res);
2343 if (IS_ERR(udc->usbd_regs)) 2338 if (IS_ERR(udc->usbd_regs))
2344 return PTR_ERR(udc->usbd_regs); 2339 return PTR_ERR(udc->usbd_regs);
2345 2340
2346 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 2341 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
2347 if (!res) {
2348 dev_err(dev, "error finding IUDMA resource\n");
2349 return -ENXIO;
2350 }
2351
2352 udc->iudma_regs = devm_ioremap_resource(dev, res); 2342 udc->iudma_regs = devm_ioremap_resource(dev, res);
2353 if (IS_ERR(udc->iudma_regs)) 2343 if (IS_ERR(udc->iudma_regs))
2354 return PTR_ERR(udc->iudma_regs); 2344 return PTR_ERR(udc->iudma_regs);
@@ -2420,7 +2410,6 @@ static int bcm63xx_udc_remove(struct platform_device *pdev)
2420 usb_del_gadget_udc(&udc->gadget); 2410 usb_del_gadget_udc(&udc->gadget);
2421 BUG_ON(udc->driver); 2411 BUG_ON(udc->driver);
2422 2412
2423 platform_set_drvdata(pdev, NULL);
2424 bcm63xx_uninit_udc_hw(udc); 2413 bcm63xx_uninit_udc_hw(udc);
2425 2414
2426 return 0; 2415 return 0;
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 3d5cfc9c2c78..80e7f75a56c7 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -821,8 +821,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
821 gi->gstrings[i] = NULL; 821 gi->gstrings[i] = NULL;
822 s = usb_gstrings_attach(&gi->cdev, gi->gstrings, 822 s = usb_gstrings_attach(&gi->cdev, gi->gstrings,
823 USB_GADGET_FIRST_AVAIL_IDX); 823 USB_GADGET_FIRST_AVAIL_IDX);
824 if (IS_ERR(s)) 824 if (IS_ERR(s)) {
825 ret = PTR_ERR(s);
825 goto err_comp_cleanup; 826 goto err_comp_cleanup;
827 }
826 828
827 gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id; 829 gi->cdev.desc.iManufacturer = s[USB_GADGET_MANUFACTURER_IDX].id;
828 gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id; 830 gi->cdev.desc.iProduct = s[USB_GADGET_PRODUCT_IDX].id;
@@ -847,8 +849,10 @@ static int configfs_composite_bind(struct usb_gadget *gadget,
847 } 849 }
848 cfg->gstrings[i] = NULL; 850 cfg->gstrings[i] = NULL;
849 s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1); 851 s = usb_gstrings_attach(&gi->cdev, cfg->gstrings, 1);
850 if (IS_ERR(s)) 852 if (IS_ERR(s)) {
853 ret = PTR_ERR(s);
851 goto err_comp_cleanup; 854 goto err_comp_cleanup;
855 }
852 c->iConfiguration = s[0].id; 856 c->iConfiguration = s[0].id;
853 } 857 }
854 858
diff --git a/drivers/usb/gadget/dummy_hcd.c b/drivers/usb/gadget/dummy_hcd.c
index a792e322f4f1..c588e8e486e5 100644
--- a/drivers/usb/gadget/dummy_hcd.c
+++ b/drivers/usb/gadget/dummy_hcd.c
@@ -1001,7 +1001,6 @@ static int dummy_udc_remove(struct platform_device *pdev)
1001 struct dummy *dum = platform_get_drvdata(pdev); 1001 struct dummy *dum = platform_get_drvdata(pdev);
1002 1002
1003 usb_del_gadget_udc(&dum->gadget); 1003 usb_del_gadget_udc(&dum->gadget);
1004 platform_set_drvdata(pdev, NULL);
1005 device_remove_file(&dum->gadget.dev, &dev_attr_function); 1004 device_remove_file(&dum->gadget.dev, &dev_attr_function);
1006 return 0; 1005 return 0;
1007} 1006}
@@ -2661,8 +2660,10 @@ static int __init init(void)
2661 } 2660 }
2662 for (i = 0; i < mod_data.num; i++) { 2661 for (i = 0; i < mod_data.num; i++) {
2663 dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL); 2662 dum[i] = kzalloc(sizeof(struct dummy), GFP_KERNEL);
2664 if (!dum[i]) 2663 if (!dum[i]) {
2664 retval = -ENOMEM;
2665 goto err_add_pdata; 2665 goto err_add_pdata;
2666 }
2666 retval = platform_device_add_data(the_hcd_pdev[i], &dum[i], 2667 retval = platform_device_add_data(the_hcd_pdev[i], &dum[i],
2667 sizeof(void *)); 2668 sizeof(void *));
2668 if (retval) 2669 if (retval)
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index d893d6929079..abf8a31ae146 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -816,6 +816,7 @@ ecm_unbind(struct usb_configuration *c, struct usb_function *f)
816 * @c: the configuration to support the network link 816 * @c: the configuration to support the network link
817 * @ethaddr: a buffer in which the ethernet address of the host side 817 * @ethaddr: a buffer in which the ethernet address of the host side
818 * side of the link was recorded 818 * side of the link was recorded
819 * @dev: eth_dev structure
819 * Context: single threaded during gadget setup 820 * Context: single threaded during gadget setup
820 * 821 *
821 * Returns zero on success, else negative errno. 822 * Returns zero on success, else negative errno.
diff --git a/drivers/usb/gadget/f_subset.c b/drivers/usb/gadget/f_subset.c
index 185d6f5e4e4d..7be04b342494 100644
--- a/drivers/usb/gadget/f_subset.c
+++ b/drivers/usb/gadget/f_subset.c
@@ -373,6 +373,7 @@ geth_unbind(struct usb_configuration *c, struct usb_function *f)
373 * @c: the configuration to support the network link 373 * @c: the configuration to support the network link
374 * @ethaddr: a buffer in which the ethernet address of the host side 374 * @ethaddr: a buffer in which the ethernet address of the host side
375 * side of the link was recorded 375 * side of the link was recorded
376 * @dev: eth_dev structure
376 * Context: single threaded during gadget setup 377 * Context: single threaded during gadget setup
377 * 378 *
378 * Returns zero on success, else negative errno. 379 * Returns zero on success, else negative errno.
diff --git a/drivers/usb/gadget/f_uac2.c b/drivers/usb/gadget/f_uac2.c
index c7468b6c07b0..03c1fb686644 100644
--- a/drivers/usb/gadget/f_uac2.c
+++ b/drivers/usb/gadget/f_uac2.c
@@ -456,8 +456,6 @@ static int snd_uac2_remove(struct platform_device *pdev)
456{ 456{
457 struct snd_card *card = platform_get_drvdata(pdev); 457 struct snd_card *card = platform_get_drvdata(pdev);
458 458
459 platform_set_drvdata(pdev, NULL);
460
461 if (card) 459 if (card)
462 return snd_card_free(card); 460 return snd_card_free(card);
463 461
diff --git a/drivers/usb/gadget/fusb300_udc.c b/drivers/usb/gadget/fusb300_udc.c
index cec8871b77f9..b8632d40f8bf 100644
--- a/drivers/usb/gadget/fusb300_udc.c
+++ b/drivers/usb/gadget/fusb300_udc.c
@@ -1461,8 +1461,10 @@ static int __init fusb300_probe(struct platform_device *pdev)
1461 1461
1462 fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, 1462 fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep,
1463 GFP_KERNEL); 1463 GFP_KERNEL);
1464 if (fusb300->ep0_req == NULL) 1464 if (fusb300->ep0_req == NULL) {
1465 ret = -ENOMEM;
1465 goto clean_up3; 1466 goto clean_up3;
1467 }
1466 1468
1467 init_controller(fusb300); 1469 init_controller(fusb300);
1468 ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget); 1470 ret = usb_add_gadget_udc(&pdev->dev, &fusb300->gadget);
diff --git a/drivers/usb/gadget/imx_udc.c b/drivers/usb/gadget/imx_udc.c
index b5cebd6b0d7a..9b2d24e4c95f 100644
--- a/drivers/usb/gadget/imx_udc.c
+++ b/drivers/usb/gadget/imx_udc.c
@@ -1511,8 +1511,6 @@ static int __exit imx_udc_remove(struct platform_device *pdev)
1511 if (pdata->exit) 1511 if (pdata->exit)
1512 pdata->exit(&pdev->dev); 1512 pdata->exit(&pdev->dev);
1513 1513
1514 platform_set_drvdata(pdev, NULL);
1515
1516 return 0; 1514 return 0;
1517} 1515}
1518 1516
diff --git a/drivers/usb/gadget/m66592-udc.c b/drivers/usb/gadget/m66592-udc.c
index 866ef0999247..51cfe72da5bb 100644
--- a/drivers/usb/gadget/m66592-udc.c
+++ b/drivers/usb/gadget/m66592-udc.c
@@ -1660,8 +1660,10 @@ static int __init m66592_probe(struct platform_device *pdev)
1660 m66592->epaddr2ep[0] = &m66592->ep[0]; 1660 m66592->epaddr2ep[0] = &m66592->ep[0];
1661 1661
1662 m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL); 1662 m66592->ep0_req = m66592_alloc_request(&m66592->ep[0].ep, GFP_KERNEL);
1663 if (m66592->ep0_req == NULL) 1663 if (m66592->ep0_req == NULL) {
1664 ret = -ENOMEM;
1664 goto clean_up3; 1665 goto clean_up3;
1666 }
1665 m66592->ep0_req->complete = nop_completion; 1667 m66592->ep0_req->complete = nop_completion;
1666 1668
1667 init_controller(m66592); 1669 init_controller(m66592);
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index ef47495dec8f..95c531d5aa4f 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -2236,7 +2236,6 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev)
2236 dev->transceiver = NULL; 2236 dev->transceiver = NULL;
2237 } 2237 }
2238 2238
2239 platform_set_drvdata(pdev, NULL);
2240 the_controller = NULL; 2239 the_controller = NULL;
2241 return 0; 2240 return 0;
2242} 2241}
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 0b742d171843..7ff7d9cf2061 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -1977,8 +1977,10 @@ static int __init r8a66597_probe(struct platform_device *pdev)
1977 1977
1978 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep, 1978 r8a66597->ep0_req = r8a66597_alloc_request(&r8a66597->ep[0].ep,
1979 GFP_KERNEL); 1979 GFP_KERNEL);
1980 if (r8a66597->ep0_req == NULL) 1980 if (r8a66597->ep0_req == NULL) {
1981 ret = -ENOMEM;
1981 goto clean_up3; 1982 goto clean_up3;
1983 }
1982 r8a66597->ep0_req->complete = nop_completion; 1984 r8a66597->ep0_req->complete = nop_completion;
1983 1985
1984 ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget); 1986 ret = usb_add_gadget_udc(&pdev->dev, &r8a66597->gadget);
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index a3cdc32115d5..af22f24046b2 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -437,7 +437,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
437 if (hs_req->req.length == 0) 437 if (hs_req->req.length == 0)
438 return; 438 return;
439 439
440 usb_gadget_unmap_request(&hsotg->gadget, hs_req, hs_ep->dir_in); 440 usb_gadget_unmap_request(&hsotg->gadget, req, hs_ep->dir_in);
441} 441}
442 442
443/** 443/**
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index d0e75e1b3ccb..09c4f70c93c4 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1851,6 +1851,7 @@ static int s3c2410_udc_probe(struct platform_device *pdev)
1851 irq = gpio_to_irq(udc_info->vbus_pin); 1851 irq = gpio_to_irq(udc_info->vbus_pin);
1852 if (irq < 0) { 1852 if (irq < 0) {
1853 dev_err(dev, "no irq for gpio vbus pin\n"); 1853 dev_err(dev, "no irq for gpio vbus pin\n");
1854 retval = irq;
1854 goto err_gpio_claim; 1855 goto err_gpio_claim;
1855 } 1856 }
1856 1857
@@ -1948,8 +1949,6 @@ static int s3c2410_udc_remove(struct platform_device *pdev)
1948 iounmap(base_addr); 1949 iounmap(base_addr);
1949 release_mem_region(rsrc_start, rsrc_len); 1950 release_mem_region(rsrc_start, rsrc_len);
1950 1951
1951 platform_set_drvdata(pdev, NULL);
1952
1953 if (!IS_ERR(udc_clock) && udc_clock != NULL) { 1952 if (!IS_ERR(udc_clock) && udc_clock != NULL) {
1954 clk_disable(udc_clock); 1953 clk_disable(udc_clock);
1955 clk_put(udc_clock); 1954 clk_put(udc_clock);
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 2cd6262e8b71..0deb9d6cde26 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -284,12 +284,16 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
284 ss_opts->bulk_buflen = gzero_options.bulk_buflen; 284 ss_opts->bulk_buflen = gzero_options.bulk_buflen;
285 285
286 func_ss = usb_get_function(func_inst_ss); 286 func_ss = usb_get_function(func_inst_ss);
287 if (IS_ERR(func_ss)) 287 if (IS_ERR(func_ss)) {
288 status = PTR_ERR(func_ss);
288 goto err_put_func_inst_ss; 289 goto err_put_func_inst_ss;
290 }
289 291
290 func_inst_lb = usb_get_function_instance("Loopback"); 292 func_inst_lb = usb_get_function_instance("Loopback");
291 if (IS_ERR(func_inst_lb)) 293 if (IS_ERR(func_inst_lb)) {
294 status = PTR_ERR(func_inst_lb);
292 goto err_put_func_ss; 295 goto err_put_func_ss;
296 }
293 297
294 lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst); 298 lb_opts = container_of(func_inst_lb, struct f_lb_opts, func_inst);
295 lb_opts->bulk_buflen = gzero_options.bulk_buflen; 299 lb_opts->bulk_buflen = gzero_options.bulk_buflen;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index de94f2699063..344d5e2f87d7 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -507,7 +507,7 @@ endif # USB_OHCI_HCD
507 507
508config USB_UHCI_HCD 508config USB_UHCI_HCD
509 tristate "UHCI HCD (most Intel and VIA) support" 509 tristate "UHCI HCD (most Intel and VIA) support"
510 depends on PCI || SPARC_LEON || ARCH_VT8500 510 depends on PCI || USB_UHCI_SUPPORT_NON_PCI_HC
511 ---help--- 511 ---help---
512 The Universal Host Controller Interface is a standard by Intel for 512 The Universal Host Controller Interface is a standard by Intel for
513 accessing the USB hardware in the PC (which is also called the USB 513 accessing the USB hardware in the PC (which is also called the USB
@@ -524,26 +524,19 @@ config USB_UHCI_HCD
524 524
525config USB_UHCI_SUPPORT_NON_PCI_HC 525config USB_UHCI_SUPPORT_NON_PCI_HC
526 bool 526 bool
527 depends on USB_UHCI_HCD 527 default y if (SPARC_LEON || USB_UHCI_PLATFORM)
528 default y if (SPARC_LEON || ARCH_VT8500)
529 528
530config USB_UHCI_PLATFORM 529config USB_UHCI_PLATFORM
531 bool "Generic UHCI Platform Driver support" 530 bool
532 depends on USB_UHCI_SUPPORT_NON_PCI_HC
533 default y if ARCH_VT8500 531 default y if ARCH_VT8500
534 ---help---
535 Enable support for generic UHCI platform devices that require no
536 additional configuration.
537 532
538config USB_UHCI_BIG_ENDIAN_MMIO 533config USB_UHCI_BIG_ENDIAN_MMIO
539 bool 534 bool
540 depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON 535 default y if SPARC_LEON
541 default y
542 536
543config USB_UHCI_BIG_ENDIAN_DESC 537config USB_UHCI_BIG_ENDIAN_DESC
544 bool 538 bool
545 depends on USB_UHCI_SUPPORT_NON_PCI_HC && SPARC_LEON 539 default y if SPARC_LEON
546 default y
547 540
548config USB_FHCI_HCD 541config USB_FHCI_HCD
549 tristate "Freescale QE USB Host Controller support" 542 tristate "Freescale QE USB Host Controller support"
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 66420097c242..02f4611faa62 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -63,8 +63,6 @@ static void atmel_stop_ehci(struct platform_device *pdev)
63 63
64/*-------------------------------------------------------------------------*/ 64/*-------------------------------------------------------------------------*/
65 65
66static u64 at91_ehci_dma_mask = DMA_BIT_MASK(32);
67
68static int ehci_atmel_drv_probe(struct platform_device *pdev) 66static int ehci_atmel_drv_probe(struct platform_device *pdev)
69{ 67{
70 struct usb_hcd *hcd; 68 struct usb_hcd *hcd;
@@ -93,7 +91,9 @@ static int ehci_atmel_drv_probe(struct platform_device *pdev)
93 * Once we have dma capability bindings this can go away. 91 * Once we have dma capability bindings this can go away.
94 */ 92 */
95 if (!pdev->dev.dma_mask) 93 if (!pdev->dev.dma_mask)
96 pdev->dev.dma_mask = &at91_ehci_dma_mask; 94 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
95 if (!pdev->dev.coherent_dma_mask)
96 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
97 97
98 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev)); 98 hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
99 if (!hcd) { 99 if (!hcd) {
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 312fc10da3c7..246e124e6ac5 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1286,23 +1286,6 @@ MODULE_LICENSE ("GPL");
1286#define PLATFORM_DRIVER ehci_hcd_sead3_driver 1286#define PLATFORM_DRIVER ehci_hcd_sead3_driver
1287#endif 1287#endif
1288 1288
1289#if !IS_ENABLED(CONFIG_USB_EHCI_PCI) && \
1290 !IS_ENABLED(CONFIG_USB_EHCI_HCD_PLATFORM) && \
1291 !IS_ENABLED(CONFIG_USB_CHIPIDEA_HOST) && \
1292 !IS_ENABLED(CONFIG_USB_EHCI_MXC) && \
1293 !IS_ENABLED(CONFIG_USB_EHCI_HCD_OMAP) && \
1294 !IS_ENABLED(CONFIG_USB_EHCI_HCD_ORION) && \
1295 !IS_ENABLED(CONFIG_USB_EHCI_HCD_SPEAR) && \
1296 !IS_ENABLED(CONFIG_USB_EHCI_S5P) && \
1297 !IS_ENABLED(CONFIG_USB_EHCI_HCD_AT91) && \
1298 !IS_ENABLED(CONFIG_USB_EHCI_MSM) && \
1299 !defined(PLATFORM_DRIVER) && \
1300 !defined(PS3_SYSTEM_BUS_DRIVER) && \
1301 !defined(OF_PLATFORM_DRIVER) && \
1302 !defined(XILINX_OF_PLATFORM_DRIVER)
1303#error "missing bus glue for ehci-hcd"
1304#endif
1305
1306static int __init ehci_hcd_init(void) 1289static int __init ehci_hcd_init(void)
1307{ 1290{
1308 int retval = 0; 1291 int retval = 0;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 3d1491b5f360..16d7150e8557 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -90,8 +90,6 @@ static const struct ehci_driver_overrides ehci_omap_overrides __initdata = {
90 .extra_priv_size = sizeof(struct omap_hcd), 90 .extra_priv_size = sizeof(struct omap_hcd),
91}; 91};
92 92
93static u64 omap_ehci_dma_mask = DMA_BIT_MASK(32);
94
95/** 93/**
96 * ehci_hcd_omap_probe - initialize TI-based HCDs 94 * ehci_hcd_omap_probe - initialize TI-based HCDs
97 * 95 *
@@ -146,8 +144,10 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
146 * Since shared usb code relies on it, set it here for now. 144 * Since shared usb code relies on it, set it here for now.
147 * Once we have dma capability bindings this can go away. 145 * Once we have dma capability bindings this can go away.
148 */ 146 */
149 if (!pdev->dev.dma_mask) 147 if (!dev->dma_mask)
150 pdev->dev.dma_mask = &omap_ehci_dma_mask; 148 dev->dma_mask = &dev->coherent_dma_mask;
149 if (!dev->coherent_dma_mask)
150 dev->coherent_dma_mask = DMA_BIT_MASK(32);
151 151
152 hcd = usb_create_hcd(&ehci_omap_hc_driver, dev, 152 hcd = usb_create_hcd(&ehci_omap_hc_driver, dev,
153 dev_name(dev)); 153 dev_name(dev));
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 54c579485150..efbc588b48c5 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -137,8 +137,6 @@ ehci_orion_conf_mbus_windows(struct usb_hcd *hcd,
137 } 137 }
138} 138}
139 139
140static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32);
141
142static int ehci_orion_drv_probe(struct platform_device *pdev) 140static int ehci_orion_drv_probe(struct platform_device *pdev)
143{ 141{
144 struct orion_ehci_data *pd = pdev->dev.platform_data; 142 struct orion_ehci_data *pd = pdev->dev.platform_data;
@@ -183,7 +181,9 @@ static int ehci_orion_drv_probe(struct platform_device *pdev)
183 * now. Once we have dma capability bindings this can go away. 181 * now. Once we have dma capability bindings this can go away.
184 */ 182 */
185 if (!pdev->dev.dma_mask) 183 if (!pdev->dev.dma_mask)
186 pdev->dev.dma_mask = &ehci_orion_dma_mask; 184 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
185 if (!pdev->dev.coherent_dma_mask)
186 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
187 187
188 if (!request_mem_region(res->start, resource_size(res), 188 if (!request_mem_region(res->start, resource_size(res),
189 ehci_orion_hc_driver.description)) { 189 ehci_orion_hc_driver.description)) {
diff --git a/drivers/usb/host/ehci-s5p.c b/drivers/usb/host/ehci-s5p.c
index 635775278c7f..379037f51a2f 100644
--- a/drivers/usb/host/ehci-s5p.c
+++ b/drivers/usb/host/ehci-s5p.c
@@ -71,8 +71,6 @@ static void s5p_setup_vbus_gpio(struct platform_device *pdev)
71 dev_err(dev, "can't request ehci vbus gpio %d", gpio); 71 dev_err(dev, "can't request ehci vbus gpio %d", gpio);
72} 72}
73 73
74static u64 ehci_s5p_dma_mask = DMA_BIT_MASK(32);
75
76static int s5p_ehci_probe(struct platform_device *pdev) 74static int s5p_ehci_probe(struct platform_device *pdev)
77{ 75{
78 struct s5p_ehci_platdata *pdata = pdev->dev.platform_data; 76 struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
@@ -90,7 +88,7 @@ static int s5p_ehci_probe(struct platform_device *pdev)
90 * Once we move to full device tree support this will vanish off. 88 * Once we move to full device tree support this will vanish off.
91 */ 89 */
92 if (!pdev->dev.dma_mask) 90 if (!pdev->dev.dma_mask)
93 pdev->dev.dma_mask = &ehci_s5p_dma_mask; 91 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
94 if (!pdev->dev.coherent_dma_mask) 92 if (!pdev->dev.coherent_dma_mask)
95 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 93 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
96 94
@@ -107,6 +105,7 @@ static int s5p_ehci_probe(struct platform_device *pdev)
107 if (IS_ERR(phy)) { 105 if (IS_ERR(phy)) {
108 /* Fallback to pdata */ 106 /* Fallback to pdata */
109 if (!pdata) { 107 if (!pdata) {
108 usb_put_hcd(hcd);
110 dev_warn(&pdev->dev, "no platform data or transceiver defined\n"); 109 dev_warn(&pdev->dev, "no platform data or transceiver defined\n");
111 return -EPROBE_DEFER; 110 return -EPROBE_DEFER;
112 } else { 111 } else {
diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c
index 61ecfb3d52f5..bd3e5cbc6240 100644
--- a/drivers/usb/host/ehci-spear.c
+++ b/drivers/usb/host/ehci-spear.c
@@ -58,8 +58,6 @@ static int ehci_spear_drv_resume(struct device *dev)
58static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend, 58static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend,
59 ehci_spear_drv_resume); 59 ehci_spear_drv_resume);
60 60
61static u64 spear_ehci_dma_mask = DMA_BIT_MASK(32);
62
63static int spear_ehci_hcd_drv_probe(struct platform_device *pdev) 61static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
64{ 62{
65 struct usb_hcd *hcd ; 63 struct usb_hcd *hcd ;
@@ -84,7 +82,9 @@ static int spear_ehci_hcd_drv_probe(struct platform_device *pdev)
84 * Once we have dma capability bindings this can go away. 82 * Once we have dma capability bindings this can go away.
85 */ 83 */
86 if (!pdev->dev.dma_mask) 84 if (!pdev->dev.dma_mask)
87 pdev->dev.dma_mask = &spear_ehci_dma_mask; 85 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
86 if (!pdev->dev.coherent_dma_mask)
87 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
88 88
89 usbh_clk = devm_clk_get(&pdev->dev, NULL); 89 usbh_clk = devm_clk_get(&pdev->dev, NULL);
90 if (IS_ERR(usbh_clk)) { 90 if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index e3eddc31ac83..59d111bf44a9 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -637,8 +637,6 @@ static void tegra_ehci_set_phcd(struct usb_phy *x, bool enable)
637 writel(val, base + TEGRA_USB_PORTSC1); 637 writel(val, base + TEGRA_USB_PORTSC1);
638} 638}
639 639
640static u64 tegra_ehci_dma_mask = DMA_BIT_MASK(32);
641
642static int tegra_ehci_probe(struct platform_device *pdev) 640static int tegra_ehci_probe(struct platform_device *pdev)
643{ 641{
644 struct resource *res; 642 struct resource *res;
@@ -661,7 +659,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
661 * Once we have dma capability bindings this can go away. 659 * Once we have dma capability bindings this can go away.
662 */ 660 */
663 if (!pdev->dev.dma_mask) 661 if (!pdev->dev.dma_mask)
664 pdev->dev.dma_mask = &tegra_ehci_dma_mask; 662 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
663 if (!pdev->dev.coherent_dma_mask)
664 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
665 665
666 setup_vbus_gpio(pdev, pdata); 666 setup_vbus_gpio(pdev, pdata);
667 667
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 125e261f5bfc..2facee53eab1 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
1739 int retval = 1; 1739 int retval = 1;
1740 unsigned long flags; 1740 unsigned long flags;
1741 1741
1742 /* if !USB_SUSPEND, root hub timers won't get shut down ... */ 1742 /* if !PM_RUNTIME, root hub timers won't get shut down ... */
1743 if (!HC_IS_RUNNING(hcd->state)) 1743 if (!HC_IS_RUNNING(hcd->state))
1744 return 0; 1744 return 0;
1745 1745
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index bbb791bd7617..a13709ee4e5d 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -373,8 +373,10 @@ static int isp1760_plat_probe(struct platform_device *pdev)
373 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 373 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
374 if (!irq_res) { 374 if (!irq_res) {
375 pr_warning("isp1760: IRQ resource not available\n"); 375 pr_warning("isp1760: IRQ resource not available\n");
376 return -ENODEV; 376 ret = -ENODEV;
377 goto cleanup;
377 } 378 }
379
378 irqflags |= irq_res->flags & IRQF_TRIGGER_MASK; 380 irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
379 381
380 if (priv) { 382 if (priv) {
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index a0cb44f0e724..2ee1496dbc1d 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -504,8 +504,6 @@ static const struct of_device_id at91_ohci_dt_ids[] = {
504 504
505MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids); 505MODULE_DEVICE_TABLE(of, at91_ohci_dt_ids);
506 506
507static u64 at91_ohci_dma_mask = DMA_BIT_MASK(32);
508
509static int ohci_at91_of_init(struct platform_device *pdev) 507static int ohci_at91_of_init(struct platform_device *pdev)
510{ 508{
511 struct device_node *np = pdev->dev.of_node; 509 struct device_node *np = pdev->dev.of_node;
@@ -522,7 +520,9 @@ static int ohci_at91_of_init(struct platform_device *pdev)
522 * Once we have dma capability bindings this can go away. 520 * Once we have dma capability bindings this can go away.
523 */ 521 */
524 if (!pdev->dev.dma_mask) 522 if (!pdev->dev.dma_mask)
525 pdev->dev.dma_mask = &at91_ohci_dma_mask; 523 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
524 if (!pdev->dev.coherent_dma_mask)
525 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
526 526
527 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 527 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
528 if (!pdata) 528 if (!pdata)
diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
index 07592c00af26..b0b542c14e31 100644
--- a/drivers/usb/host/ohci-exynos.c
+++ b/drivers/usb/host/ohci-exynos.c
@@ -98,8 +98,6 @@ static const struct hc_driver exynos_ohci_hc_driver = {
98 .start_port_reset = ohci_start_port_reset, 98 .start_port_reset = ohci_start_port_reset,
99}; 99};
100 100
101static u64 ohci_exynos_dma_mask = DMA_BIT_MASK(32);
102
103static int exynos_ohci_probe(struct platform_device *pdev) 101static int exynos_ohci_probe(struct platform_device *pdev)
104{ 102{
105 struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data; 103 struct exynos4_ohci_platdata *pdata = pdev->dev.platform_data;
@@ -117,7 +115,7 @@ static int exynos_ohci_probe(struct platform_device *pdev)
117 * Once we move to full device tree support this will vanish off. 115 * Once we move to full device tree support this will vanish off.
118 */ 116 */
119 if (!pdev->dev.dma_mask) 117 if (!pdev->dev.dma_mask)
120 pdev->dev.dma_mask = &ohci_exynos_dma_mask; 118 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
121 if (!pdev->dev.coherent_dma_mask) 119 if (!pdev->dev.coherent_dma_mask)
122 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 120 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
123 121
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 9e6de9586ae4..fc627fd54116 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -233,14 +233,14 @@ static int ohci_urb_enqueue (
233 urb->start_frame = frame; 233 urb->start_frame = frame;
234 } 234 }
235 } else if (ed->type == PIPE_ISOCHRONOUS) { 235 } else if (ed->type == PIPE_ISOCHRONOUS) {
236 u16 next = ohci_frame_no(ohci) + 2; 236 u16 next = ohci_frame_no(ohci) + 1;
237 u16 frame = ed->last_iso + ed->interval; 237 u16 frame = ed->last_iso + ed->interval;
238 238
239 /* Behind the scheduling threshold? */ 239 /* Behind the scheduling threshold? */
240 if (unlikely(tick_before(frame, next))) { 240 if (unlikely(tick_before(frame, next))) {
241 241
242 /* USB_ISO_ASAP: Round up to the first available slot */ 242 /* USB_ISO_ASAP: Round up to the first available slot */
243 if (urb->transfer_flags & URB_ISO_ASAP) 243 if (urb->transfer_flags & URB_ISO_ASAP) {
244 frame += (next - frame + ed->interval - 1) & 244 frame += (next - frame + ed->interval - 1) &
245 -ed->interval; 245 -ed->interval;
246 246
@@ -248,21 +248,25 @@ static int ohci_urb_enqueue (
248 * Not ASAP: Use the next slot in the stream. If 248 * Not ASAP: Use the next slot in the stream. If
249 * the entire URB falls before the threshold, fail. 249 * the entire URB falls before the threshold, fail.
250 */ 250 */
251 else if (tick_before(frame + ed->interval * 251 } else {
252 if (tick_before(frame + ed->interval *
252 (urb->number_of_packets - 1), next)) { 253 (urb->number_of_packets - 1), next)) {
253 retval = -EXDEV; 254 retval = -EXDEV;
254 usb_hcd_unlink_urb_from_ep(hcd, urb); 255 usb_hcd_unlink_urb_from_ep(hcd, urb);
255 goto fail; 256 goto fail;
256 } 257 }
257 258
258 /* 259 /*
259 * Some OHCI hardware doesn't handle late TDs 260 * Some OHCI hardware doesn't handle late TDs
260 * correctly. After retiring them it proceeds to 261 * correctly. After retiring them it proceeds
261 * the next ED instead of the next TD. Therefore 262 * to the next ED instead of the next TD.
262 * we have to omit the late TDs entirely. 263 * Therefore we have to omit the late TDs
263 */ 264 * entirely.
264 urb_priv->td_cnt = DIV_ROUND_UP(next - frame, 265 */
265 ed->interval); 266 urb_priv->td_cnt = DIV_ROUND_UP(
267 (u16) (next - frame),
268 ed->interval);
269 }
266 } 270 }
267 urb->start_frame = frame; 271 urb->start_frame = frame;
268 } 272 }
diff --git a/drivers/usb/host/ohci-nxp.c b/drivers/usb/host/ohci-nxp.c
index f4988fbe78e7..5d7eb72c5064 100644
--- a/drivers/usb/host/ohci-nxp.c
+++ b/drivers/usb/host/ohci-nxp.c
@@ -223,8 +223,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
223 223
224 isp1301_i2c_client = isp1301_get_client(isp1301_node); 224 isp1301_i2c_client = isp1301_get_client(isp1301_node);
225 if (!isp1301_i2c_client) { 225 if (!isp1301_i2c_client) {
226 ret = -EPROBE_DEFER; 226 return -EPROBE_DEFER;
227 goto out;
228 } 227 }
229 228
230 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); 229 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
@@ -234,7 +233,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
234 if (usb_disabled()) { 233 if (usb_disabled()) {
235 dev_err(&pdev->dev, "USB is disabled\n"); 234 dev_err(&pdev->dev, "USB is disabled\n");
236 ret = -ENODEV; 235 ret = -ENODEV;
237 goto out; 236 goto fail_disable;
238 } 237 }
239 238
240 /* Enable AHB slave USB clock, needed for further USB clock control */ 239 /* Enable AHB slave USB clock, needed for further USB clock control */
@@ -245,19 +244,19 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
245 if (IS_ERR(usb_pll_clk)) { 244 if (IS_ERR(usb_pll_clk)) {
246 dev_err(&pdev->dev, "failed to acquire USB PLL\n"); 245 dev_err(&pdev->dev, "failed to acquire USB PLL\n");
247 ret = PTR_ERR(usb_pll_clk); 246 ret = PTR_ERR(usb_pll_clk);
248 goto out1; 247 goto fail_pll;
249 } 248 }
250 249
251 ret = clk_enable(usb_pll_clk); 250 ret = clk_enable(usb_pll_clk);
252 if (ret < 0) { 251 if (ret < 0) {
253 dev_err(&pdev->dev, "failed to start USB PLL\n"); 252 dev_err(&pdev->dev, "failed to start USB PLL\n");
254 goto out2; 253 goto fail_pllen;
255 } 254 }
256 255
257 ret = clk_set_rate(usb_pll_clk, 48000); 256 ret = clk_set_rate(usb_pll_clk, 48000);
258 if (ret < 0) { 257 if (ret < 0) {
259 dev_err(&pdev->dev, "failed to set USB clock rate\n"); 258 dev_err(&pdev->dev, "failed to set USB clock rate\n");
260 goto out3; 259 goto fail_rate;
261 } 260 }
262 261
263 /* Enable USB device clock */ 262 /* Enable USB device clock */
@@ -265,13 +264,13 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
265 if (IS_ERR(usb_dev_clk)) { 264 if (IS_ERR(usb_dev_clk)) {
266 dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); 265 dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
267 ret = PTR_ERR(usb_dev_clk); 266 ret = PTR_ERR(usb_dev_clk);
268 goto out4; 267 goto fail_dev;
269 } 268 }
270 269
271 ret = clk_enable(usb_dev_clk); 270 ret = clk_enable(usb_dev_clk);
272 if (ret < 0) { 271 if (ret < 0) {
273 dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); 272 dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
274 goto out5; 273 goto fail_deven;
275 } 274 }
276 275
277 /* Enable USB otg clocks */ 276 /* Enable USB otg clocks */
@@ -279,7 +278,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
279 if (IS_ERR(usb_otg_clk)) { 278 if (IS_ERR(usb_otg_clk)) {
280 dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n"); 279 dev_err(&pdev->dev, "failed to acquire USB DEV Clock\n");
281 ret = PTR_ERR(usb_otg_clk); 280 ret = PTR_ERR(usb_otg_clk);
282 goto out6; 281 goto fail_otg;
283 } 282 }
284 283
285 __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL); 284 __raw_writel(__raw_readl(USB_CTRL) | USB_HOST_NEED_CLK_EN, USB_CTRL);
@@ -287,7 +286,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
287 ret = clk_enable(usb_otg_clk); 286 ret = clk_enable(usb_otg_clk);
288 if (ret < 0) { 287 if (ret < 0) {
289 dev_err(&pdev->dev, "failed to start USB DEV Clock\n"); 288 dev_err(&pdev->dev, "failed to start USB DEV Clock\n");
290 goto out7; 289 goto fail_otgen;
291 } 290 }
292 291
293 isp1301_configure(); 292 isp1301_configure();
@@ -296,20 +295,14 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
296 if (!hcd) { 295 if (!hcd) {
297 dev_err(&pdev->dev, "Failed to allocate HC buffer\n"); 296 dev_err(&pdev->dev, "Failed to allocate HC buffer\n");
298 ret = -ENOMEM; 297 ret = -ENOMEM;
299 goto out8; 298 goto fail_hcd;
300 } 299 }
301 300
302 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 301 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
303 if (!res) {
304 dev_err(&pdev->dev, "Failed to get MEM resource\n");
305 ret = -ENOMEM;
306 goto out8;
307 }
308
309 hcd->regs = devm_ioremap_resource(&pdev->dev, res); 302 hcd->regs = devm_ioremap_resource(&pdev->dev, res);
310 if (IS_ERR(hcd->regs)) { 303 if (IS_ERR(hcd->regs)) {
311 ret = PTR_ERR(hcd->regs); 304 ret = PTR_ERR(hcd->regs);
312 goto out8; 305 goto fail_resource;
313 } 306 }
314 hcd->rsrc_start = res->start; 307 hcd->rsrc_start = res->start;
315 hcd->rsrc_len = resource_size(res); 308 hcd->rsrc_len = resource_size(res);
@@ -317,7 +310,7 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
317 irq = platform_get_irq(pdev, 0); 310 irq = platform_get_irq(pdev, 0);
318 if (irq < 0) { 311 if (irq < 0) {
319 ret = -ENXIO; 312 ret = -ENXIO;
320 goto out8; 313 goto fail_resource;
321 } 314 }
322 315
323 nxp_start_hc(); 316 nxp_start_hc();
@@ -331,23 +324,24 @@ static int usb_hcd_nxp_probe(struct platform_device *pdev)
331 return ret; 324 return ret;
332 325
333 nxp_stop_hc(); 326 nxp_stop_hc();
334out8: 327fail_resource:
335 usb_put_hcd(hcd); 328 usb_put_hcd(hcd);
336out7: 329fail_hcd:
337 clk_disable(usb_otg_clk); 330 clk_disable(usb_otg_clk);
338out6: 331fail_otgen:
339 clk_put(usb_otg_clk); 332 clk_put(usb_otg_clk);
340out5: 333fail_otg:
341 clk_disable(usb_dev_clk); 334 clk_disable(usb_dev_clk);
342out4: 335fail_deven:
343 clk_put(usb_dev_clk); 336 clk_put(usb_dev_clk);
344out3: 337fail_dev:
338fail_rate:
345 clk_disable(usb_pll_clk); 339 clk_disable(usb_pll_clk);
346out2: 340fail_pllen:
347 clk_put(usb_pll_clk); 341 clk_put(usb_pll_clk);
348out1: 342fail_pll:
343fail_disable:
349 isp1301_i2c_client = NULL; 344 isp1301_i2c_client = NULL;
350out:
351 return ret; 345 return ret;
352} 346}
353 347
diff --git a/drivers/usb/host/ohci-omap3.c b/drivers/usb/host/ohci-omap3.c
index ddfc31427bc0..8663851c8d8e 100644
--- a/drivers/usb/host/ohci-omap3.c
+++ b/drivers/usb/host/ohci-omap3.c
@@ -114,8 +114,6 @@ static const struct hc_driver ohci_omap3_hc_driver = {
114 114
115/*-------------------------------------------------------------------------*/ 115/*-------------------------------------------------------------------------*/
116 116
117static u64 omap_ohci_dma_mask = DMA_BIT_MASK(32);
118
119/* 117/*
120 * configure so an HC device and id are always provided 118 * configure so an HC device and id are always provided
121 * always called with process context; sleeping is OK 119 * always called with process context; sleeping is OK
@@ -168,8 +166,10 @@ static int ohci_hcd_omap3_probe(struct platform_device *pdev)
168 * Since shared usb code relies on it, set it here for now. 166 * Since shared usb code relies on it, set it here for now.
169 * Once we have dma capability bindings this can go away. 167 * Once we have dma capability bindings this can go away.
170 */ 168 */
171 if (!pdev->dev.dma_mask) 169 if (!dev->dma_mask)
172 pdev->dev.dma_mask = &omap_ohci_dma_mask; 170 dev->dma_mask = &dev->coherent_dma_mask;
171 if (!dev->coherent_dma_mask)
172 dev->coherent_dma_mask = DMA_BIT_MASK(32);
173 173
174 hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev, 174 hcd = usb_create_hcd(&ohci_omap3_hc_driver, dev,
175 dev_name(dev)); 175 dev_name(dev));
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index efe71f3ca477..279b2ef17411 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -282,8 +282,6 @@ static const struct of_device_id pxa_ohci_dt_ids[] = {
282 282
283MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids); 283MODULE_DEVICE_TABLE(of, pxa_ohci_dt_ids);
284 284
285static u64 pxa_ohci_dma_mask = DMA_BIT_MASK(32);
286
287static int ohci_pxa_of_init(struct platform_device *pdev) 285static int ohci_pxa_of_init(struct platform_device *pdev)
288{ 286{
289 struct device_node *np = pdev->dev.of_node; 287 struct device_node *np = pdev->dev.of_node;
@@ -298,7 +296,9 @@ static int ohci_pxa_of_init(struct platform_device *pdev)
298 * Once we have dma capability bindings this can go away. 296 * Once we have dma capability bindings this can go away.
299 */ 297 */
300 if (!pdev->dev.dma_mask) 298 if (!pdev->dev.dma_mask)
301 pdev->dev.dma_mask = &pxa_ohci_dma_mask; 299 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
300 if (!pdev->dev.coherent_dma_mask)
301 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
302 302
303 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 303 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
304 if (!pdata) 304 if (!pdata)
diff --git a/drivers/usb/host/ohci-spear.c b/drivers/usb/host/ohci-spear.c
index 9020bf0e2eca..3e19e0170d11 100644
--- a/drivers/usb/host/ohci-spear.c
+++ b/drivers/usb/host/ohci-spear.c
@@ -91,8 +91,6 @@ static const struct hc_driver ohci_spear_hc_driver = {
91 .start_port_reset = ohci_start_port_reset, 91 .start_port_reset = ohci_start_port_reset,
92}; 92};
93 93
94static u64 spear_ohci_dma_mask = DMA_BIT_MASK(32);
95
96static int spear_ohci_hcd_drv_probe(struct platform_device *pdev) 94static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
97{ 95{
98 const struct hc_driver *driver = &ohci_spear_hc_driver; 96 const struct hc_driver *driver = &ohci_spear_hc_driver;
@@ -114,7 +112,9 @@ static int spear_ohci_hcd_drv_probe(struct platform_device *pdev)
114 * Once we have dma capability bindings this can go away. 112 * Once we have dma capability bindings this can go away.
115 */ 113 */
116 if (!pdev->dev.dma_mask) 114 if (!pdev->dev.dma_mask)
117 pdev->dev.dma_mask = &spear_ohci_dma_mask; 115 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
116 if (!pdev->dev.coherent_dma_mask)
117 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
118 118
119 usbh_clk = devm_clk_get(&pdev->dev, NULL); 119 usbh_clk = devm_clk_get(&pdev->dev, NULL);
120 if (IS_ERR(usbh_clk)) { 120 if (IS_ERR(usbh_clk)) {
diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c
index 4f0f0339532f..0f401dbfaf07 100644
--- a/drivers/usb/host/oxu210hp-hcd.c
+++ b/drivers/usb/host/oxu210hp-hcd.c
@@ -3084,7 +3084,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
3084 int ports, i, retval = 1; 3084 int ports, i, retval = 1;
3085 unsigned long flags; 3085 unsigned long flags;
3086 3086
3087 /* if !USB_SUSPEND, root hub timers won't get shut down ... */ 3087 /* if !PM_RUNTIME, root hub timers won't get shut down ... */
3088 if (!HC_IS_RUNNING(hcd->state)) 3088 if (!HC_IS_RUNNING(hcd->state))
3089 return 0; 3089 return 0;
3090 3090
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index ad4483efb6d6..b2ec7fe758dd 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -22,7 +22,7 @@
22 * and usb-storage. 22 * and usb-storage.
23 * 23 *
24 * TODO: 24 * TODO:
25 * - usb suspend/resume triggered by sl811 (with USB_SUSPEND) 25 * - usb suspend/resume triggered by sl811 (with PM_RUNTIME)
26 * - various issues noted in the code 26 * - various issues noted in the code
27 * - performance work; use both register banks; ... 27 * - performance work; use both register banks; ...
28 * - use urb->iso_frame_desc[] with ISO transfers 28 * - use urb->iso_frame_desc[] with ISO transfers
diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c
index f87bee6d2789..9189bc984c98 100644
--- a/drivers/usb/host/uhci-hub.c
+++ b/drivers/usb/host/uhci-hub.c
@@ -225,7 +225,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf)
225 /* auto-stop if nothing connected for 1 second */ 225 /* auto-stop if nothing connected for 1 second */
226 if (any_ports_active(uhci)) 226 if (any_ports_active(uhci))
227 uhci->rh_state = UHCI_RH_RUNNING; 227 uhci->rh_state = UHCI_RH_RUNNING;
228 else if (time_after_eq(jiffies, uhci->auto_stop_time)) 228 else if (time_after_eq(jiffies, uhci->auto_stop_time) &&
229 !uhci->wait_for_hp)
229 suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); 230 suspend_rh(uhci, UHCI_RH_AUTO_STOPPED);
230 break; 231 break;
231 232
diff --git a/drivers/usb/host/uhci-platform.c b/drivers/usb/host/uhci-platform.c
index 8c4dace4b14a..f1db61ada6a8 100644
--- a/drivers/usb/host/uhci-platform.c
+++ b/drivers/usb/host/uhci-platform.c
@@ -60,8 +60,6 @@ static const struct hc_driver uhci_platform_hc_driver = {
60 .hub_control = uhci_hub_control, 60 .hub_control = uhci_hub_control,
61}; 61};
62 62
63static u64 platform_uhci_dma_mask = DMA_BIT_MASK(32);
64
65static int uhci_hcd_platform_probe(struct platform_device *pdev) 63static int uhci_hcd_platform_probe(struct platform_device *pdev)
66{ 64{
67 struct usb_hcd *hcd; 65 struct usb_hcd *hcd;
@@ -78,7 +76,9 @@ static int uhci_hcd_platform_probe(struct platform_device *pdev)
78 * Once we have dma capability bindings this can go away. 76 * Once we have dma capability bindings this can go away.
79 */ 77 */
80 if (!pdev->dev.dma_mask) 78 if (!pdev->dev.dma_mask)
81 pdev->dev.dma_mask = &platform_uhci_dma_mask; 79 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
80 if (!pdev->dev.coherent_dma_mask)
81 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
82 82
83 hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev, 83 hcd = usb_create_hcd(&uhci_platform_hc_driver, &pdev->dev,
84 pdev->name); 84 pdev->name);
diff --git a/drivers/usb/host/uhci-q.c b/drivers/usb/host/uhci-q.c
index f0976d8190bc..041c6ddb695c 100644
--- a/drivers/usb/host/uhci-q.c
+++ b/drivers/usb/host/uhci-q.c
@@ -1287,7 +1287,7 @@ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb,
1287 return -EINVAL; /* Can't change the period */ 1287 return -EINVAL; /* Can't change the period */
1288 1288
1289 } else { 1289 } else {
1290 next = uhci->frame_number + 2; 1290 next = uhci->frame_number + 1;
1291 1291
1292 /* Find the next unused frame */ 1292 /* Find the next unused frame */
1293 if (list_empty(&qh->queue)) { 1293 if (list_empty(&qh->queue)) {
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 965b539bc474..2cfc465925bd 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1423,15 +1423,17 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1423 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep)); 1423 ep_ctx->ep_info2 |= cpu_to_le32(xhci_get_endpoint_type(udev, ep));
1424 1424
1425 /* Set the max packet size and max burst */ 1425 /* Set the max packet size and max burst */
1426 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1427 max_burst = 0;
1426 switch (udev->speed) { 1428 switch (udev->speed) {
1427 case USB_SPEED_SUPER: 1429 case USB_SPEED_SUPER:
1428 max_packet = usb_endpoint_maxp(&ep->desc);
1429 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1430 /* dig out max burst from ep companion desc */ 1430 /* dig out max burst from ep companion desc */
1431 max_packet = ep->ss_ep_comp.bMaxBurst; 1431 max_burst = ep->ss_ep_comp.bMaxBurst;
1432 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_packet));
1433 break; 1432 break;
1434 case USB_SPEED_HIGH: 1433 case USB_SPEED_HIGH:
1434 /* Some devices get this wrong */
1435 if (usb_endpoint_xfer_bulk(&ep->desc))
1436 max_packet = 512;
1435 /* bits 11:12 specify the number of additional transaction 1437 /* bits 11:12 specify the number of additional transaction
1436 * opportunities per microframe (USB 2.0, section 9.6.6) 1438 * opportunities per microframe (USB 2.0, section 9.6.6)
1437 */ 1439 */
@@ -1439,17 +1441,16 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1439 usb_endpoint_xfer_int(&ep->desc)) { 1441 usb_endpoint_xfer_int(&ep->desc)) {
1440 max_burst = (usb_endpoint_maxp(&ep->desc) 1442 max_burst = (usb_endpoint_maxp(&ep->desc)
1441 & 0x1800) >> 11; 1443 & 0x1800) >> 11;
1442 ep_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(max_burst));
1443 } 1444 }
1444 /* Fall through */ 1445 break;
1445 case USB_SPEED_FULL: 1446 case USB_SPEED_FULL:
1446 case USB_SPEED_LOW: 1447 case USB_SPEED_LOW:
1447 max_packet = GET_MAX_PACKET(usb_endpoint_maxp(&ep->desc));
1448 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet));
1449 break; 1448 break;
1450 default: 1449 default:
1451 BUG(); 1450 BUG();
1452 } 1451 }
1452 ep_ctx->ep_info2 |= cpu_to_le32(MAX_PACKET(max_packet) |
1453 MAX_BURST(max_burst));
1453 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep); 1454 max_esit_payload = xhci_get_max_esit_payload(xhci, udev, ep);
1454 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload)); 1455 ep_ctx->tx_info = cpu_to_le32(MAX_ESIT_PAYLOAD_FOR_EP(max_esit_payload));
1455 1456
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 3a18e44e9391..e1b661d04021 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -560,6 +560,7 @@ static int dsps_create_musb_pdev(struct dsps_glue *glue, u8 id)
560 if (!config) { 560 if (!config) {
561 dev_err(&pdev->dev, 561 dev_err(&pdev->dev,
562 "failed to allocate musb hdrc config\n"); 562 "failed to allocate musb hdrc config\n");
563 ret = -ENOMEM;
563 goto err2; 564 goto err2;
564 } 565 }
565 566
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 3551f1a30c65..628b93fe5ccc 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -549,7 +549,8 @@ static int omap2430_probe(struct platform_device *pdev)
549 glue->control_otghs = omap_get_control_dev(); 549 glue->control_otghs = omap_get_control_dev();
550 if (IS_ERR(glue->control_otghs)) { 550 if (IS_ERR(glue->control_otghs)) {
551 dev_vdbg(&pdev->dev, "Failed to get control device\n"); 551 dev_vdbg(&pdev->dev, "Failed to get control device\n");
552 return -ENODEV; 552 ret = PTR_ERR(glue->control_otghs);
553 goto err2;
553 } 554 }
554 } else { 555 } else {
555 glue->control_otghs = ERR_PTR(-ENODEV); 556 glue->control_otghs = ERR_PTR(-ENODEV);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 371d0e74e909..7ef3eb8617a6 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -25,7 +25,7 @@ config AB8500_USB
25 25
26config FSL_USB2_OTG 26config FSL_USB2_OTG
27 bool "Freescale USB OTG Transceiver Driver" 27 bool "Freescale USB OTG Transceiver Driver"
28 depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_SUSPEND 28 depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
29 select USB_OTG 29 select USB_OTG
30 help 30 help
31 Enable this to support Freescale USB OTG transceiver. 31 Enable this to support Freescale USB OTG transceiver.
@@ -139,7 +139,6 @@ config USB_ISP1301
139 tristate "NXP ISP1301 USB transceiver support" 139 tristate "NXP ISP1301 USB transceiver support"
140 depends on USB || USB_GADGET 140 depends on USB || USB_GADGET
141 depends on I2C 141 depends on I2C
142 select USB_OTG_UTILS
143 help 142 help
144 Say Y here to add support for the NXP ISP1301 USB transceiver driver. 143 Say Y here to add support for the NXP ISP1301 USB transceiver driver.
145 This chip is typically used as USB transceiver for USB host, gadget 144 This chip is typically used as USB transceiver for USB host, gadget
@@ -162,7 +161,7 @@ config USB_MSM_OTG
162 161
163config USB_MV_OTG 162config USB_MV_OTG
164 tristate "Marvell USB OTG support" 163 tristate "Marvell USB OTG support"
165 depends on USB_EHCI_MV && USB_MV_UDC && USB_SUSPEND 164 depends on USB_EHCI_MV && USB_MV_UDC && PM_RUNTIME
166 select USB_OTG 165 select USB_OTG
167 help 166 help
168 Say Y here if you want to build Marvell USB OTG transciever 167 Say Y here if you want to build Marvell USB OTG transciever
diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c
index 4acef26a2ef5..e5eb1b5a04eb 100644
--- a/drivers/usb/phy/phy-ab8500-usb.c
+++ b/drivers/usb/phy/phy-ab8500-usb.c
@@ -892,8 +892,6 @@ static int ab8500_usb_remove(struct platform_device *pdev)
892 else if (ab->mode == USB_PERIPHERAL) 892 else if (ab->mode == USB_PERIPHERAL)
893 ab8500_usb_peri_phy_dis(ab); 893 ab8500_usb_peri_phy_dis(ab);
894 894
895 platform_set_drvdata(pdev, NULL);
896
897 return 0; 895 return 0;
898} 896}
899 897
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 97b9308507c3..e771bafb9f1d 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -799,6 +799,7 @@ static int fsl_otg_conf(struct platform_device *pdev)
799 799
800 /* initialize the otg structure */ 800 /* initialize the otg structure */
801 fsl_otg_tc->phy.label = DRIVER_DESC; 801 fsl_otg_tc->phy.label = DRIVER_DESC;
802 fsl_otg_tc->phy.dev = &pdev->dev;
802 fsl_otg_tc->phy.set_power = fsl_otg_set_power; 803 fsl_otg_tc->phy.set_power = fsl_otg_set_power;
803 804
804 fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy; 805 fsl_otg_tc->phy.otg->phy = &fsl_otg_tc->phy;
diff --git a/drivers/usb/phy/phy-gpio-vbus-usb.c b/drivers/usb/phy/phy-gpio-vbus-usb.c
index 4c76074e518d..8443335c2ea0 100644
--- a/drivers/usb/phy/phy-gpio-vbus-usb.c
+++ b/drivers/usb/phy/phy-gpio-vbus-usb.c
@@ -266,6 +266,7 @@ static int __init gpio_vbus_probe(struct platform_device *pdev)
266 platform_set_drvdata(pdev, gpio_vbus); 266 platform_set_drvdata(pdev, gpio_vbus);
267 gpio_vbus->dev = &pdev->dev; 267 gpio_vbus->dev = &pdev->dev;
268 gpio_vbus->phy.label = "gpio-vbus"; 268 gpio_vbus->phy.label = "gpio-vbus";
269 gpio_vbus->phy.dev = gpio_vbus->dev;
269 gpio_vbus->phy.set_power = gpio_vbus_set_power; 270 gpio_vbus->phy.set_power = gpio_vbus_set_power;
270 gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend; 271 gpio_vbus->phy.set_suspend = gpio_vbus_set_suspend;
271 gpio_vbus->phy.state = OTG_STATE_UNDEFINED; 272 gpio_vbus->phy.state = OTG_STATE_UNDEFINED;
@@ -343,7 +344,6 @@ err_irq:
343 gpio_free(pdata->gpio_pullup); 344 gpio_free(pdata->gpio_pullup);
344 gpio_free(pdata->gpio_vbus); 345 gpio_free(pdata->gpio_vbus);
345err_gpio: 346err_gpio:
346 platform_set_drvdata(pdev, NULL);
347 kfree(gpio_vbus->phy.otg); 347 kfree(gpio_vbus->phy.otg);
348 kfree(gpio_vbus); 348 kfree(gpio_vbus);
349 return err; 349 return err;
@@ -365,7 +365,6 @@ static int __exit gpio_vbus_remove(struct platform_device *pdev)
365 if (gpio_is_valid(pdata->gpio_pullup)) 365 if (gpio_is_valid(pdata->gpio_pullup))
366 gpio_free(pdata->gpio_pullup); 366 gpio_free(pdata->gpio_pullup);
367 gpio_free(gpio); 367 gpio_free(gpio);
368 platform_set_drvdata(pdev, NULL);
369 kfree(gpio_vbus->phy.otg); 368 kfree(gpio_vbus->phy.otg);
370 kfree(gpio_vbus); 369 kfree(gpio_vbus);
371 370
diff --git a/drivers/usb/phy/phy-isp1301.c b/drivers/usb/phy/phy-isp1301.c
index 225ae6c97eeb..8a55b37d1a02 100644
--- a/drivers/usb/phy/phy-isp1301.c
+++ b/drivers/usb/phy/phy-isp1301.c
@@ -102,6 +102,7 @@ static int isp1301_probe(struct i2c_client *client,
102 mutex_init(&isp->mutex); 102 mutex_init(&isp->mutex);
103 103
104 phy = &isp->phy; 104 phy = &isp->phy;
105 phy->dev = &client->dev;
105 phy->label = DRV_NAME; 106 phy->label = DRV_NAME;
106 phy->init = isp1301_phy_init; 107 phy->init = isp1301_phy_init;
107 phy->set_vbus = isp1301_phy_set_vbus; 108 phy->set_vbus = isp1301_phy_set_vbus;
diff --git a/drivers/usb/phy/phy-mv-u3d-usb.c b/drivers/usb/phy/phy-mv-u3d-usb.c
index f7838a43347c..1568ea63e338 100644
--- a/drivers/usb/phy/phy-mv-u3d-usb.c
+++ b/drivers/usb/phy/phy-mv-u3d-usb.c
@@ -278,11 +278,6 @@ static int mv_u3d_phy_probe(struct platform_device *pdev)
278 } 278 }
279 279
280 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 280 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
281 if (!res) {
282 dev_err(dev, "missing mem resource\n");
283 return -ENODEV;
284 }
285
286 phy_base = devm_ioremap_resource(dev, res); 281 phy_base = devm_ioremap_resource(dev, res);
287 if (IS_ERR(phy_base)) 282 if (IS_ERR(phy_base))
288 return PTR_ERR(phy_base); 283 return PTR_ERR(phy_base);
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index c987bbe27851..4a6b03c73876 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -667,7 +667,6 @@ int mv_otg_remove(struct platform_device *pdev)
667 mv_otg_disable(mvotg); 667 mv_otg_disable(mvotg);
668 668
669 usb_remove_phy(&mvotg->phy); 669 usb_remove_phy(&mvotg->phy);
670 platform_set_drvdata(pdev, NULL);
671 670
672 return 0; 671 return 0;
673} 672}
@@ -850,8 +849,6 @@ err_destroy_workqueue:
850 flush_workqueue(mvotg->qwork); 849 flush_workqueue(mvotg->qwork);
851 destroy_workqueue(mvotg->qwork); 850 destroy_workqueue(mvotg->qwork);
852 851
853 platform_set_drvdata(pdev, NULL);
854
855 return retval; 852 return retval;
856} 853}
857 854
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 9d4381e64d51..bd601c537c8d 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -130,11 +130,6 @@ static int mxs_phy_probe(struct platform_device *pdev)
130 int ret; 130 int ret;
131 131
132 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 132 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
133 if (!res) {
134 dev_err(&pdev->dev, "can't get device resources\n");
135 return -ENOENT;
136 }
137
138 base = devm_ioremap_resource(&pdev->dev, res); 133 base = devm_ioremap_resource(&pdev->dev, res);
139 if (IS_ERR(base)) 134 if (IS_ERR(base))
140 return PTR_ERR(base); 135 return PTR_ERR(base);
@@ -160,6 +155,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
160 mxs_phy->phy.set_suspend = mxs_phy_suspend; 155 mxs_phy->phy.set_suspend = mxs_phy_suspend;
161 mxs_phy->phy.notify_connect = mxs_phy_on_connect; 156 mxs_phy->phy.notify_connect = mxs_phy_on_connect;
162 mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect; 157 mxs_phy->phy.notify_disconnect = mxs_phy_on_disconnect;
158 mxs_phy->phy.type = USB_PHY_TYPE_USB2;
163 159
164 ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier); 160 ATOMIC_INIT_NOTIFIER_HEAD(&mxs_phy->phy.notifier);
165 161
@@ -180,8 +176,6 @@ static int mxs_phy_remove(struct platform_device *pdev)
180 176
181 usb_remove_phy(&mxs_phy->phy); 177 usb_remove_phy(&mxs_phy->phy);
182 178
183 platform_set_drvdata(pdev, NULL);
184
185 return 0; 179 return 0;
186} 180}
187 181
diff --git a/drivers/usb/phy/phy-nop.c b/drivers/usb/phy/phy-nop.c
index 2b10cc969bbb..638cc5dade35 100644
--- a/drivers/usb/phy/phy-nop.c
+++ b/drivers/usb/phy/phy-nop.c
@@ -254,8 +254,6 @@ static int nop_usb_xceiv_remove(struct platform_device *pdev)
254 254
255 usb_remove_phy(&nop->phy); 255 usb_remove_phy(&nop->phy);
256 256
257 platform_set_drvdata(pdev, NULL);
258
259 return 0; 257 return 0;
260} 258}
261 259
diff --git a/drivers/usb/phy/phy-samsung-usb2.c b/drivers/usb/phy/phy-samsung-usb2.c
index 45ffe036dacc..9d5e273abcc7 100644
--- a/drivers/usb/phy/phy-samsung-usb2.c
+++ b/drivers/usb/phy/phy-samsung-usb2.c
@@ -363,11 +363,6 @@ static int samsung_usb2phy_probe(struct platform_device *pdev)
363 int ret; 363 int ret;
364 364
365 phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 365 phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
366 if (!phy_mem) {
367 dev_err(dev, "%s: missing mem resource\n", __func__);
368 return -ENODEV;
369 }
370
371 phy_base = devm_ioremap_resource(dev, phy_mem); 366 phy_base = devm_ioremap_resource(dev, phy_mem);
372 if (IS_ERR(phy_base)) 367 if (IS_ERR(phy_base))
373 return PTR_ERR(phy_base); 368 return PTR_ERR(phy_base);
diff --git a/drivers/usb/phy/phy-samsung-usb3.c b/drivers/usb/phy/phy-samsung-usb3.c
index 133f3d0c554f..5a9efcbcb532 100644
--- a/drivers/usb/phy/phy-samsung-usb3.c
+++ b/drivers/usb/phy/phy-samsung-usb3.c
@@ -239,11 +239,6 @@ static int samsung_usb3phy_probe(struct platform_device *pdev)
239 int ret; 239 int ret;
240 240
241 phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 241 phy_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
242 if (!phy_mem) {
243 dev_err(dev, "%s: missing mem resource\n", __func__);
244 return -ENODEV;
245 }
246
247 phy_base = devm_ioremap_resource(dev, phy_mem); 242 phy_base = devm_ioremap_resource(dev, phy_mem);
248 if (IS_ERR(phy_base)) 243 if (IS_ERR(phy_base))
249 return PTR_ERR(phy_base); 244 return PTR_ERR(phy_base);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 242b5776648a..7260ec660347 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -189,6 +189,8 @@ static struct usb_device_id id_table_combined [] = {
189 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, 189 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) },
190 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, 190 { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) },
191 { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, 191 { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) },
192 { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) },
193 { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) },
192 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, 194 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
193 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, 195 { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
194 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, 196 { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
@@ -924,8 +926,8 @@ static int ftdi_tiocmset(struct tty_struct *tty,
924static int ftdi_ioctl(struct tty_struct *tty, 926static int ftdi_ioctl(struct tty_struct *tty,
925 unsigned int cmd, unsigned long arg); 927 unsigned int cmd, unsigned long arg);
926static void ftdi_break_ctl(struct tty_struct *tty, int break_state); 928static void ftdi_break_ctl(struct tty_struct *tty, int break_state);
927static int ftdi_chars_in_buffer(struct tty_struct *tty); 929static bool ftdi_tx_empty(struct usb_serial_port *port);
928static int ftdi_get_modem_status(struct tty_struct *tty, 930static int ftdi_get_modem_status(struct usb_serial_port *port,
929 unsigned char status[2]); 931 unsigned char status[2]);
930 932
931static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base); 933static unsigned short int ftdi_232am_baud_base_to_divisor(int baud, int base);
@@ -961,7 +963,7 @@ static struct usb_serial_driver ftdi_sio_device = {
961 .ioctl = ftdi_ioctl, 963 .ioctl = ftdi_ioctl,
962 .set_termios = ftdi_set_termios, 964 .set_termios = ftdi_set_termios,
963 .break_ctl = ftdi_break_ctl, 965 .break_ctl = ftdi_break_ctl,
964 .chars_in_buffer = ftdi_chars_in_buffer, 966 .tx_empty = ftdi_tx_empty,
965}; 967};
966 968
967static struct usb_serial_driver * const serial_drivers[] = { 969static struct usb_serial_driver * const serial_drivers[] = {
@@ -2056,27 +2058,18 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
2056 2058
2057} 2059}
2058 2060
2059static int ftdi_chars_in_buffer(struct tty_struct *tty) 2061static bool ftdi_tx_empty(struct usb_serial_port *port)
2060{ 2062{
2061 struct usb_serial_port *port = tty->driver_data;
2062 int chars;
2063 unsigned char buf[2]; 2063 unsigned char buf[2];
2064 int ret; 2064 int ret;
2065 2065
2066 chars = usb_serial_generic_chars_in_buffer(tty); 2066 ret = ftdi_get_modem_status(port, buf);
2067 if (chars)
2068 goto out;
2069
2070 /* Check if hardware buffer is empty. */
2071 ret = ftdi_get_modem_status(tty, buf);
2072 if (ret == 2) { 2067 if (ret == 2) {
2073 if (!(buf[1] & FTDI_RS_TEMT)) 2068 if (!(buf[1] & FTDI_RS_TEMT))
2074 chars = 1; 2069 return false;
2075 } 2070 }
2076out:
2077 dev_dbg(&port->dev, "%s - %d\n", __func__, chars);
2078 2071
2079 return chars; 2072 return true;
2080} 2073}
2081 2074
2082/* old_termios contains the original termios settings and tty->termios contains 2075/* old_termios contains the original termios settings and tty->termios contains
@@ -2268,10 +2261,9 @@ no_c_cflag_changes:
2268 * Returns the number of status bytes retrieved (device dependant), or 2261 * Returns the number of status bytes retrieved (device dependant), or
2269 * negative error code. 2262 * negative error code.
2270 */ 2263 */
2271static int ftdi_get_modem_status(struct tty_struct *tty, 2264static int ftdi_get_modem_status(struct usb_serial_port *port,
2272 unsigned char status[2]) 2265 unsigned char status[2])
2273{ 2266{
2274 struct usb_serial_port *port = tty->driver_data;
2275 struct ftdi_private *priv = usb_get_serial_port_data(port); 2267 struct ftdi_private *priv = usb_get_serial_port_data(port);
2276 unsigned char *buf; 2268 unsigned char *buf;
2277 int len; 2269 int len;
@@ -2336,7 +2328,7 @@ static int ftdi_tiocmget(struct tty_struct *tty)
2336 unsigned char buf[2]; 2328 unsigned char buf[2];
2337 int ret; 2329 int ret;
2338 2330
2339 ret = ftdi_get_modem_status(tty, buf); 2331 ret = ftdi_get_modem_status(port, buf);
2340 if (ret < 0) 2332 if (ret < 0)
2341 return ret; 2333 return ret;
2342 2334
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 98528270c43c..6dd79253205d 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -772,6 +772,8 @@
772 */ 772 */
773#define NEWPORT_VID 0x104D 773#define NEWPORT_VID 0x104D
774#define NEWPORT_AGILIS_PID 0x3000 774#define NEWPORT_AGILIS_PID 0x3000
775#define NEWPORT_CONEX_CC_PID 0x3002
776#define NEWPORT_CONEX_AGP_PID 0x3006
775 777
776/* Interbiometrics USB I/O Board */ 778/* Interbiometrics USB I/O Board */
777/* Developed for Interbiometrics by Rudolf Gugler */ 779/* Developed for Interbiometrics by Rudolf Gugler */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 297665fdd16d..ba45170c78e5 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -253,6 +253,37 @@ int usb_serial_generic_chars_in_buffer(struct tty_struct *tty)
253} 253}
254EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer); 254EXPORT_SYMBOL_GPL(usb_serial_generic_chars_in_buffer);
255 255
256void usb_serial_generic_wait_until_sent(struct tty_struct *tty, long timeout)
257{
258 struct usb_serial_port *port = tty->driver_data;
259 unsigned int bps;
260 unsigned long period;
261 unsigned long expire;
262
263 bps = tty_get_baud_rate(tty);
264 if (!bps)
265 bps = 9600; /* B0 */
266 /*
267 * Use a poll-period of roughly the time it takes to send one
268 * character or at least one jiffy.
269 */
270 period = max_t(unsigned long, (10 * HZ / bps), 1);
271 period = min_t(unsigned long, period, timeout);
272
273 dev_dbg(&port->dev, "%s - timeout = %u ms, period = %u ms\n",
274 __func__, jiffies_to_msecs(timeout),
275 jiffies_to_msecs(period));
276 expire = jiffies + timeout;
277 while (!port->serial->type->tx_empty(port)) {
278 schedule_timeout_interruptible(period);
279 if (signal_pending(current))
280 break;
281 if (time_after(jiffies, expire))
282 break;
283 }
284}
285EXPORT_SYMBOL_GPL(usb_serial_generic_wait_until_sent);
286
256static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port, 287static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
257 int index, gfp_t mem_flags) 288 int index, gfp_t mem_flags)
258{ 289{
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index 158bf4bc29cc..1be6ba7bee27 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -2019,8 +2019,6 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
2019 struct edgeport_port *edge_port = usb_get_serial_port_data(port); 2019 struct edgeport_port *edge_port = usb_get_serial_port_data(port);
2020 int chars = 0; 2020 int chars = 0;
2021 unsigned long flags; 2021 unsigned long flags;
2022 int ret;
2023
2024 if (edge_port == NULL) 2022 if (edge_port == NULL)
2025 return 0; 2023 return 0;
2026 2024
@@ -2028,16 +2026,22 @@ static int edge_chars_in_buffer(struct tty_struct *tty)
2028 chars = kfifo_len(&edge_port->write_fifo); 2026 chars = kfifo_len(&edge_port->write_fifo);
2029 spin_unlock_irqrestore(&edge_port->ep_lock, flags); 2027 spin_unlock_irqrestore(&edge_port->ep_lock, flags);
2030 2028
2031 if (!chars) {
2032 ret = tx_active(edge_port);
2033 if (ret > 0)
2034 chars = ret;
2035 }
2036
2037 dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); 2029 dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
2038 return chars; 2030 return chars;
2039} 2031}
2040 2032
2033static bool edge_tx_empty(struct usb_serial_port *port)
2034{
2035 struct edgeport_port *edge_port = usb_get_serial_port_data(port);
2036 int ret;
2037
2038 ret = tx_active(edge_port);
2039 if (ret > 0)
2040 return false;
2041
2042 return true;
2043}
2044
2041static void edge_throttle(struct tty_struct *tty) 2045static void edge_throttle(struct tty_struct *tty)
2042{ 2046{
2043 struct usb_serial_port *port = tty->driver_data; 2047 struct usb_serial_port *port = tty->driver_data;
@@ -2557,6 +2561,7 @@ static struct usb_serial_driver edgeport_1port_device = {
2557 .write = edge_write, 2561 .write = edge_write,
2558 .write_room = edge_write_room, 2562 .write_room = edge_write_room,
2559 .chars_in_buffer = edge_chars_in_buffer, 2563 .chars_in_buffer = edge_chars_in_buffer,
2564 .tx_empty = edge_tx_empty,
2560 .break_ctl = edge_break, 2565 .break_ctl = edge_break,
2561 .read_int_callback = edge_interrupt_callback, 2566 .read_int_callback = edge_interrupt_callback,
2562 .read_bulk_callback = edge_bulk_in_callback, 2567 .read_bulk_callback = edge_bulk_in_callback,
@@ -2589,6 +2594,7 @@ static struct usb_serial_driver edgeport_2port_device = {
2589 .write = edge_write, 2594 .write = edge_write,
2590 .write_room = edge_write_room, 2595 .write_room = edge_write_room,
2591 .chars_in_buffer = edge_chars_in_buffer, 2596 .chars_in_buffer = edge_chars_in_buffer,
2597 .tx_empty = edge_tx_empty,
2592 .break_ctl = edge_break, 2598 .break_ctl = edge_break,
2593 .read_int_callback = edge_interrupt_callback, 2599 .read_int_callback = edge_interrupt_callback,
2594 .read_bulk_callback = edge_bulk_in_callback, 2600 .read_bulk_callback = edge_bulk_in_callback,
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 734372846abb..93d02bc4eb52 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -196,6 +196,7 @@ static void option_instat_callback(struct urb *urb);
196 196
197#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ 197#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */
198#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ 198#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */
199#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */
199 200
200#define KYOCERA_VENDOR_ID 0x0c88 201#define KYOCERA_VENDOR_ID 0x0c88
201#define KYOCERA_PRODUCT_KPC650 0x17da 202#define KYOCERA_PRODUCT_KPC650 0x17da
@@ -341,8 +342,8 @@ static void option_instat_callback(struct urb *urb);
341#define CINTERION_PRODUCT_EU3_E 0x0051 342#define CINTERION_PRODUCT_EU3_E 0x0051
342#define CINTERION_PRODUCT_EU3_P 0x0052 343#define CINTERION_PRODUCT_EU3_P 0x0052
343#define CINTERION_PRODUCT_PH8 0x0053 344#define CINTERION_PRODUCT_PH8 0x0053
344#define CINTERION_PRODUCT_AH6 0x0055 345#define CINTERION_PRODUCT_AHXX 0x0055
345#define CINTERION_PRODUCT_PLS8 0x0060 346#define CINTERION_PRODUCT_PLXX 0x0060
346 347
347/* Olivetti products */ 348/* Olivetti products */
348#define OLIVETTI_VENDOR_ID 0x0b3c 349#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -771,6 +772,7 @@ static const struct usb_device_id option_ids[] = {
771 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ 772 { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */
772 { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, 773 { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) },
773 { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, 774 { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) },
775 { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) },
774 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ 776 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */
775 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, 777 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) },
776 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, 778 { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) },
@@ -966,6 +968,8 @@ static const struct usb_device_id option_ids[] = {
966 .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, 968 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
967 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, 969 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
968 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, 970 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
971 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
972 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
969 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, 973 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
970 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, 974 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
971 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), 975 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
@@ -1264,8 +1268,9 @@ static const struct usb_device_id option_ids[] = {
1264 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, 1268 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
1265 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, 1269 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
1266 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, 1270 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
1267 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, 1271 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
1268 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, 1272 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
1273 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1269 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, 1274 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
1270 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, 1275 { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
1271 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, 1276 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index cac47aef2918..c92c5ed4e580 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -101,6 +101,7 @@ static int ti_write(struct tty_struct *tty, struct usb_serial_port *port,
101 const unsigned char *data, int count); 101 const unsigned char *data, int count);
102static int ti_write_room(struct tty_struct *tty); 102static int ti_write_room(struct tty_struct *tty);
103static int ti_chars_in_buffer(struct tty_struct *tty); 103static int ti_chars_in_buffer(struct tty_struct *tty);
104static bool ti_tx_empty(struct usb_serial_port *port);
104static void ti_throttle(struct tty_struct *tty); 105static void ti_throttle(struct tty_struct *tty);
105static void ti_unthrottle(struct tty_struct *tty); 106static void ti_unthrottle(struct tty_struct *tty);
106static int ti_ioctl(struct tty_struct *tty, 107static int ti_ioctl(struct tty_struct *tty,
@@ -222,6 +223,7 @@ static struct usb_serial_driver ti_1port_device = {
222 .write = ti_write, 223 .write = ti_write,
223 .write_room = ti_write_room, 224 .write_room = ti_write_room,
224 .chars_in_buffer = ti_chars_in_buffer, 225 .chars_in_buffer = ti_chars_in_buffer,
226 .tx_empty = ti_tx_empty,
225 .throttle = ti_throttle, 227 .throttle = ti_throttle,
226 .unthrottle = ti_unthrottle, 228 .unthrottle = ti_unthrottle,
227 .ioctl = ti_ioctl, 229 .ioctl = ti_ioctl,
@@ -253,6 +255,7 @@ static struct usb_serial_driver ti_2port_device = {
253 .write = ti_write, 255 .write = ti_write,
254 .write_room = ti_write_room, 256 .write_room = ti_write_room,
255 .chars_in_buffer = ti_chars_in_buffer, 257 .chars_in_buffer = ti_chars_in_buffer,
258 .tx_empty = ti_tx_empty,
256 .throttle = ti_throttle, 259 .throttle = ti_throttle,
257 .unthrottle = ti_unthrottle, 260 .unthrottle = ti_unthrottle,
258 .ioctl = ti_ioctl, 261 .ioctl = ti_ioctl,
@@ -684,8 +687,6 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
684 struct ti_port *tport = usb_get_serial_port_data(port); 687 struct ti_port *tport = usb_get_serial_port_data(port);
685 int chars = 0; 688 int chars = 0;
686 unsigned long flags; 689 unsigned long flags;
687 int ret;
688 u8 lsr;
689 690
690 if (tport == NULL) 691 if (tport == NULL)
691 return 0; 692 return 0;
@@ -694,16 +695,22 @@ static int ti_chars_in_buffer(struct tty_struct *tty)
694 chars = kfifo_len(&tport->write_fifo); 695 chars = kfifo_len(&tport->write_fifo);
695 spin_unlock_irqrestore(&tport->tp_lock, flags); 696 spin_unlock_irqrestore(&tport->tp_lock, flags);
696 697
697 if (!chars) {
698 ret = ti_get_lsr(tport, &lsr);
699 if (!ret && !(lsr & TI_LSR_TX_EMPTY))
700 chars = 1;
701 }
702
703 dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars); 698 dev_dbg(&port->dev, "%s - returns %d\n", __func__, chars);
704 return chars; 699 return chars;
705} 700}
706 701
702static bool ti_tx_empty(struct usb_serial_port *port)
703{
704 struct ti_port *tport = usb_get_serial_port_data(port);
705 int ret;
706 u8 lsr;
707
708 ret = ti_get_lsr(tport, &lsr);
709 if (!ret && !(lsr & TI_LSR_TX_EMPTY))
710 return false;
711
712 return true;
713}
707 714
708static void ti_throttle(struct tty_struct *tty) 715static void ti_throttle(struct tty_struct *tty)
709{ 716{
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index cf75beb1251b..4753c005cfb6 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -359,20 +359,29 @@ static int serial_chars_in_buffer(struct tty_struct *tty)
359{ 359{
360 struct usb_serial_port *port = tty->driver_data; 360 struct usb_serial_port *port = tty->driver_data;
361 struct usb_serial *serial = port->serial; 361 struct usb_serial *serial = port->serial;
362 int count = 0;
363 362
364 dev_dbg(tty->dev, "%s\n", __func__); 363 dev_dbg(tty->dev, "%s\n", __func__);
365 364
366 mutex_lock(&serial->disc_mutex);
367 /* if the device was unplugged then any remaining characters
368 fell out of the connector ;) */
369 if (serial->disconnected) 365 if (serial->disconnected)
370 count = 0; 366 return 0;
371 else 367
372 count = serial->type->chars_in_buffer(tty); 368 return serial->type->chars_in_buffer(tty);
373 mutex_unlock(&serial->disc_mutex); 369}
370
371static void serial_wait_until_sent(struct tty_struct *tty, int timeout)
372{
373 struct usb_serial_port *port = tty->driver_data;
374 struct usb_serial *serial = port->serial;
375
376 dev_dbg(tty->dev, "%s\n", __func__);
377
378 if (!port->serial->type->wait_until_sent)
379 return;
374 380
375 return count; 381 mutex_lock(&serial->disc_mutex);
382 if (!serial->disconnected)
383 port->serial->type->wait_until_sent(tty, timeout);
384 mutex_unlock(&serial->disc_mutex);
376} 385}
377 386
378static void serial_throttle(struct tty_struct *tty) 387static void serial_throttle(struct tty_struct *tty)
@@ -1191,6 +1200,7 @@ static const struct tty_operations serial_ops = {
1191 .unthrottle = serial_unthrottle, 1200 .unthrottle = serial_unthrottle,
1192 .break_ctl = serial_break, 1201 .break_ctl = serial_break,
1193 .chars_in_buffer = serial_chars_in_buffer, 1202 .chars_in_buffer = serial_chars_in_buffer,
1203 .wait_until_sent = serial_wait_until_sent,
1194 .tiocmget = serial_tiocmget, 1204 .tiocmget = serial_tiocmget,
1195 .tiocmset = serial_tiocmset, 1205 .tiocmset = serial_tiocmset,
1196 .get_icount = serial_get_icount, 1206 .get_icount = serial_get_icount,
@@ -1316,6 +1326,8 @@ static void usb_serial_operations_init(struct usb_serial_driver *device)
1316 set_to_generic_if_null(device, close); 1326 set_to_generic_if_null(device, close);
1317 set_to_generic_if_null(device, write_room); 1327 set_to_generic_if_null(device, write_room);
1318 set_to_generic_if_null(device, chars_in_buffer); 1328 set_to_generic_if_null(device, chars_in_buffer);
1329 if (device->tx_empty)
1330 set_to_generic_if_null(device, wait_until_sent);
1319 set_to_generic_if_null(device, read_bulk_callback); 1331 set_to_generic_if_null(device, read_bulk_callback);
1320 set_to_generic_if_null(device, write_bulk_callback); 1332 set_to_generic_if_null(device, write_bulk_callback);
1321 set_to_generic_if_null(device, process_read_urb); 1333 set_to_generic_if_null(device, process_read_urb);
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 8623577bbbe7..281be56d5648 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -105,8 +105,9 @@ struct rts51x_chip {
105 int status_len; 105 int status_len;
106 106
107 u32 flag; 107 u32 flag;
108#ifdef CONFIG_REALTEK_AUTOPM
109 struct us_data *us; 108 struct us_data *us;
109
110#ifdef CONFIG_REALTEK_AUTOPM
110 struct timer_list rts51x_suspend_timer; 111 struct timer_list rts51x_suspend_timer;
111 unsigned long timer_expires; 112 unsigned long timer_expires;
112 int pwr_state; 113 int pwr_state;
@@ -988,6 +989,7 @@ static int init_realtek_cr(struct us_data *us)
988 us->extra = chip; 989 us->extra = chip;
989 us->extra_destructor = realtek_cr_destructor; 990 us->extra_destructor = realtek_cr_destructor;
990 us->max_lun = chip->max_lun = rts51x_get_max_lun(us); 991 us->max_lun = chip->max_lun = rts51x_get_max_lun(us);
992 chip->us = us;
991 993
992 usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun); 994 usb_stor_dbg(us, "chip->max_lun = %d\n", chip->max_lun);
993 995
@@ -1010,10 +1012,8 @@ static int init_realtek_cr(struct us_data *us)
1010 SET_AUTO_DELINK(chip); 1012 SET_AUTO_DELINK(chip);
1011 } 1013 }
1012#ifdef CONFIG_REALTEK_AUTOPM 1014#ifdef CONFIG_REALTEK_AUTOPM
1013 if (ss_en) { 1015 if (ss_en)
1014 chip->us = us;
1015 realtek_cr_autosuspend_setup(us); 1016 realtek_cr_autosuspend_setup(us);
1016 }
1017#endif 1017#endif
1018 1018
1019 usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag); 1019 usb_stor_dbg(us, "chip->flag = 0x%x\n", chip->flag);
diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
index bff0775e258c..5174ebac288d 100644
--- a/drivers/vhost/vringh.c
+++ b/drivers/vhost/vringh.c
@@ -3,6 +3,7 @@
3 * 3 *
4 * Since these may be in userspace, we use (inline) accessors. 4 * Since these may be in userspace, we use (inline) accessors.
5 */ 5 */
6#include <linux/module.h>
6#include <linux/vringh.h> 7#include <linux/vringh.h>
7#include <linux/virtio_ring.h> 8#include <linux/virtio_ring.h>
8#include <linux/kernel.h> 9#include <linux/kernel.h>
@@ -1005,3 +1006,5 @@ int vringh_need_notify_kern(struct vringh *vrh)
1005 return __vringh_need_notify(vrh, getu16_kern); 1006 return __vringh_need_notify(vrh, getu16_kern);
1006} 1007}
1007EXPORT_SYMBOL(vringh_need_notify_kern); 1008EXPORT_SYMBOL(vringh_need_notify_kern);
1009
1010MODULE_LICENSE("GPL");
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index d71d60f94fc1..2e937bdace6f 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2199,7 +2199,7 @@ config FB_XILINX
2199 2199
2200config FB_GOLDFISH 2200config FB_GOLDFISH
2201 tristate "Goldfish Framebuffer" 2201 tristate "Goldfish Framebuffer"
2202 depends on FB 2202 depends on FB && HAS_DMA
2203 select FB_CFB_FILLRECT 2203 select FB_CFB_FILLRECT
2204 select FB_CFB_COPYAREA 2204 select FB_CFB_COPYAREA
2205 select FB_CFB_IMAGEBLIT 2205 select FB_CFB_IMAGEBLIT
@@ -2453,6 +2453,23 @@ config FB_HYPERV
2453 help 2453 help
2454 This framebuffer driver supports Microsoft Hyper-V Synthetic Video. 2454 This framebuffer driver supports Microsoft Hyper-V Synthetic Video.
2455 2455
2456config FB_SIMPLE
2457 bool "Simple framebuffer support"
2458 depends on (FB = y) && OF
2459 select FB_CFB_FILLRECT
2460 select FB_CFB_COPYAREA
2461 select FB_CFB_IMAGEBLIT
2462 help
2463 Say Y if you want support for a simple frame-buffer.
2464
2465 This driver assumes that the display hardware has been initialized
2466 before the kernel boots, and the kernel will simply render to the
2467 pre-allocated frame buffer surface.
2468
2469 Configuration re: surface address, size, and format must be provided
2470 through device tree, or potentially plain old platform data in the
2471 future.
2472
2456source "drivers/video/omap/Kconfig" 2473source "drivers/video/omap/Kconfig"
2457source "drivers/video/omap2/Kconfig" 2474source "drivers/video/omap2/Kconfig"
2458source "drivers/video/exynos/Kconfig" 2475source "drivers/video/exynos/Kconfig"
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 7234e4a959e8..e8bae8dd4804 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -166,6 +166,7 @@ obj-$(CONFIG_FB_MX3) += mx3fb.o
166obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o 166obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
167obj-$(CONFIG_FB_MXS) += mxsfb.o 167obj-$(CONFIG_FB_MXS) += mxsfb.o
168obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o 168obj-$(CONFIG_FB_SSD1307) += ssd1307fb.o
169obj-$(CONFIG_FB_SIMPLE) += simplefb.o
169 170
170# the test framebuffer is last 171# the test framebuffer is last
171obj-$(CONFIG_FB_VIRTUAL) += vfb.o 172obj-$(CONFIG_FB_VIRTUAL) += vfb.o
diff --git a/drivers/video/console/Makefile b/drivers/video/console/Makefile
index a862e9173ebe..48da25c96cd3 100644
--- a/drivers/video/console/Makefile
+++ b/drivers/video/console/Makefile
@@ -18,6 +18,8 @@ font-objs-$(CONFIG_FONT_MINI_4x6) += font_mini_4x6.o
18 18
19font-objs += $(font-objs-y) 19font-objs += $(font-objs-y)
20 20
21obj-$(CONFIG_FONTS) += font.o
22
21# Each configuration option enables a list of files. 23# Each configuration option enables a list of files.
22 24
23obj-$(CONFIG_DUMMY_CONSOLE) += dummycon.o 25obj-$(CONFIG_DUMMY_CONSOLE) += dummycon.o
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index 17f4d55c621c..a109934c0478 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -1065,10 +1065,6 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
1065 mutex_init(&hdmi.ip_data.lock); 1065 mutex_init(&hdmi.ip_data.lock);
1066 1066
1067 res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0); 1067 res = platform_get_resource(hdmi.pdev, IORESOURCE_MEM, 0);
1068 if (!res) {
1069 DSSERR("can't get IORESOURCE_MEM HDMI\n");
1070 return -EINVAL;
1071 }
1072 1068
1073 /* Base address taken from platform */ 1069 /* Base address taken from platform */
1074 hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res); 1070 hdmi.ip_data.base_wp = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index 5261229c79af..f346b02eee1d 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -353,11 +353,6 @@ static int __init vrfb_probe(struct platform_device *pdev)
353 /* first resource is the register res, the rest are vrfb contexts */ 353 /* first resource is the register res, the rest are vrfb contexts */
354 354
355 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 355 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
356 if (!mem) {
357 dev_err(&pdev->dev, "can't get vrfb base address\n");
358 return -EINVAL;
359 }
360
361 vrfb_base = devm_ioremap_resource(&pdev->dev, mem); 356 vrfb_base = devm_ioremap_resource(&pdev->dev, mem);
362 if (IS_ERR(vrfb_base)) 357 if (IS_ERR(vrfb_base))
363 return PTR_ERR(vrfb_base); 358 return PTR_ERR(vrfb_base);
diff --git a/drivers/video/simplefb.c b/drivers/video/simplefb.c
new file mode 100644
index 000000000000..e2e9e3e61b72
--- /dev/null
+++ b/drivers/video/simplefb.c
@@ -0,0 +1,234 @@
1/*
2 * Simplest possible simple frame-buffer driver, as a platform device
3 *
4 * Copyright (c) 2013, Stephen Warren
5 *
6 * Based on q40fb.c, which was:
7 * Copyright (C) 2001 Richard Zidlicky <rz@linux-m68k.org>
8 *
9 * Also based on offb.c, which was:
10 * Copyright (C) 1997 Geert Uytterhoeven
11 * Copyright (C) 1996 Paul Mackerras
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 */
22
23#include <linux/errno.h>
24#include <linux/fb.h>
25#include <linux/io.h>
26#include <linux/module.h>
27#include <linux/platform_device.h>
28
29static struct fb_fix_screeninfo simplefb_fix = {
30 .id = "simple",
31 .type = FB_TYPE_PACKED_PIXELS,
32 .visual = FB_VISUAL_TRUECOLOR,
33 .accel = FB_ACCEL_NONE,
34};
35
36static struct fb_var_screeninfo simplefb_var = {
37 .height = -1,
38 .width = -1,
39 .activate = FB_ACTIVATE_NOW,
40 .vmode = FB_VMODE_NONINTERLACED,
41};
42
43static int simplefb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
44 u_int transp, struct fb_info *info)
45{
46 u32 *pal = info->pseudo_palette;
47 u32 cr = red >> (16 - info->var.red.length);
48 u32 cg = green >> (16 - info->var.green.length);
49 u32 cb = blue >> (16 - info->var.blue.length);
50 u32 value;
51
52 if (regno >= 16)
53 return -EINVAL;
54
55 value = (cr << info->var.red.offset) |
56 (cg << info->var.green.offset) |
57 (cb << info->var.blue.offset);
58 if (info->var.transp.length > 0) {
59 u32 mask = (1 << info->var.transp.length) - 1;
60 mask <<= info->var.transp.offset;
61 value |= mask;
62 }
63 pal[regno] = value;
64
65 return 0;
66}
67
68static struct fb_ops simplefb_ops = {
69 .owner = THIS_MODULE,
70 .fb_setcolreg = simplefb_setcolreg,
71 .fb_fillrect = cfb_fillrect,
72 .fb_copyarea = cfb_copyarea,
73 .fb_imageblit = cfb_imageblit,
74};
75
76struct simplefb_format {
77 const char *name;
78 u32 bits_per_pixel;
79 struct fb_bitfield red;
80 struct fb_bitfield green;
81 struct fb_bitfield blue;
82 struct fb_bitfield transp;
83};
84
85static struct simplefb_format simplefb_formats[] = {
86 { "r5g6b5", 16, {11, 5}, {5, 6}, {0, 5}, {0, 0} },
87};
88
89struct simplefb_params {
90 u32 width;
91 u32 height;
92 u32 stride;
93 struct simplefb_format *format;
94};
95
96static int simplefb_parse_dt(struct platform_device *pdev,
97 struct simplefb_params *params)
98{
99 struct device_node *np = pdev->dev.of_node;
100 int ret;
101 const char *format;
102 int i;
103
104 ret = of_property_read_u32(np, "width", &params->width);
105 if (ret) {
106 dev_err(&pdev->dev, "Can't parse width property\n");
107 return ret;
108 }
109
110 ret = of_property_read_u32(np, "height", &params->height);
111 if (ret) {
112 dev_err(&pdev->dev, "Can't parse height property\n");
113 return ret;
114 }
115
116 ret = of_property_read_u32(np, "stride", &params->stride);
117 if (ret) {
118 dev_err(&pdev->dev, "Can't parse stride property\n");
119 return ret;
120 }
121
122 ret = of_property_read_string(np, "format", &format);
123 if (ret) {
124 dev_err(&pdev->dev, "Can't parse format property\n");
125 return ret;
126 }
127 params->format = NULL;
128 for (i = 0; i < ARRAY_SIZE(simplefb_formats); i++) {
129 if (strcmp(format, simplefb_formats[i].name))
130 continue;
131 params->format = &simplefb_formats[i];
132 break;
133 }
134 if (!params->format) {
135 dev_err(&pdev->dev, "Invalid format value\n");
136 return -EINVAL;
137 }
138
139 return 0;
140}
141
142static int simplefb_probe(struct platform_device *pdev)
143{
144 int ret;
145 struct simplefb_params params;
146 struct fb_info *info;
147 struct resource *mem;
148
149 if (fb_get_options("simplefb", NULL))
150 return -ENODEV;
151
152 ret = simplefb_parse_dt(pdev, &params);
153 if (ret)
154 return ret;
155
156 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
157 if (!mem) {
158 dev_err(&pdev->dev, "No memory resource\n");
159 return -EINVAL;
160 }
161
162 info = framebuffer_alloc(sizeof(u32) * 16, &pdev->dev);
163 if (!info)
164 return -ENOMEM;
165 platform_set_drvdata(pdev, info);
166
167 info->fix = simplefb_fix;
168 info->fix.smem_start = mem->start;
169 info->fix.smem_len = resource_size(mem);
170 info->fix.line_length = params.stride;
171
172 info->var = simplefb_var;
173 info->var.xres = params.width;
174 info->var.yres = params.height;
175 info->var.xres_virtual = params.width;
176 info->var.yres_virtual = params.height;
177 info->var.bits_per_pixel = params.format->bits_per_pixel;
178 info->var.red = params.format->red;
179 info->var.green = params.format->green;
180 info->var.blue = params.format->blue;
181 info->var.transp = params.format->transp;
182
183 info->fbops = &simplefb_ops;
184 info->flags = FBINFO_DEFAULT;
185 info->screen_base = devm_ioremap(&pdev->dev, info->fix.smem_start,
186 info->fix.smem_len);
187 if (!info->screen_base) {
188 framebuffer_release(info);
189 return -ENODEV;
190 }
191 info->pseudo_palette = (void *)(info + 1);
192
193 ret = register_framebuffer(info);
194 if (ret < 0) {
195 dev_err(&pdev->dev, "Unable to register simplefb: %d\n", ret);
196 framebuffer_release(info);
197 return ret;
198 }
199
200 dev_info(&pdev->dev, "fb%d: simplefb registered!\n", info->node);
201
202 return 0;
203}
204
205static int simplefb_remove(struct platform_device *pdev)
206{
207 struct fb_info *info = platform_get_drvdata(pdev);
208
209 unregister_framebuffer(info);
210 framebuffer_release(info);
211
212 return 0;
213}
214
215static const struct of_device_id simplefb_of_match[] = {
216 { .compatible = "simple-framebuffer", },
217 { },
218};
219MODULE_DEVICE_TABLE(of, simplefb_of_match);
220
221static struct platform_driver simplefb_driver = {
222 .driver = {
223 .name = "simple-framebuffer",
224 .owner = THIS_MODULE,
225 .of_match_table = simplefb_of_match,
226 },
227 .probe = simplefb_probe,
228 .remove = simplefb_remove,
229};
230module_platform_driver(simplefb_driver);
231
232MODULE_AUTHOR("Stephen Warren <swarren@wwwdotorg.org>");
233MODULE_DESCRIPTION("Simple framebuffer driver");
234MODULE_LICENSE("GPL v2");
diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c
index db2390aed387..6e94d8dd3d00 100644
--- a/drivers/w1/masters/omap_hdq.c
+++ b/drivers/w1/masters/omap_hdq.c
@@ -555,11 +555,6 @@ static int omap_hdq_probe(struct platform_device *pdev)
555 platform_set_drvdata(pdev, hdq_data); 555 platform_set_drvdata(pdev, hdq_data);
556 556
557 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 557 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
558 if (!res) {
559 dev_dbg(&pdev->dev, "unable to get resource\n");
560 return -ENXIO;
561 }
562
563 hdq_data->hdq_base = devm_ioremap_resource(dev, res); 558 hdq_data->hdq_base = devm_ioremap_resource(dev, res);
564 if (IS_ERR(hdq_data->hdq_base)) 559 if (IS_ERR(hdq_data->hdq_base))
565 return PTR_ERR(hdq_data->hdq_base); 560 return PTR_ERR(hdq_data->hdq_base);
diff --git a/drivers/watchdog/ath79_wdt.c b/drivers/watchdog/ath79_wdt.c
index d184c48a0482..37cb09b27b63 100644
--- a/drivers/watchdog/ath79_wdt.c
+++ b/drivers/watchdog/ath79_wdt.c
@@ -248,11 +248,6 @@ static int ath79_wdt_probe(struct platform_device *pdev)
248 return -EBUSY; 248 return -EBUSY;
249 249
250 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 250 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
251 if (!res) {
252 dev_err(&pdev->dev, "no memory resource found\n");
253 return -EINVAL;
254 }
255
256 wdt_base = devm_ioremap_resource(&pdev->dev, res); 251 wdt_base = devm_ioremap_resource(&pdev->dev, res);
257 if (IS_ERR(wdt_base)) 252 if (IS_ERR(wdt_base))
258 return PTR_ERR(wdt_base); 253 return PTR_ERR(wdt_base);
diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c
index 100d4fbfde2a..bead7740c86a 100644
--- a/drivers/watchdog/davinci_wdt.c
+++ b/drivers/watchdog/davinci_wdt.c
@@ -217,11 +217,6 @@ static int davinci_wdt_probe(struct platform_device *pdev)
217 dev_info(dev, "heartbeat %d sec\n", heartbeat); 217 dev_info(dev, "heartbeat %d sec\n", heartbeat);
218 218
219 wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 219 wdt_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
220 if (wdt_mem == NULL) {
221 dev_err(dev, "failed to get memory region resource\n");
222 return -ENOENT;
223 }
224
225 wdt_base = devm_ioremap_resource(dev, wdt_mem); 220 wdt_base = devm_ioremap_resource(dev, wdt_mem);
226 if (IS_ERR(wdt_base)) 221 if (IS_ERR(wdt_base))
227 return PTR_ERR(wdt_base); 222 return PTR_ERR(wdt_base);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index ff908823688c..62946c2cb4f8 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -257,11 +257,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
257 struct resource *res; 257 struct resource *res;
258 258
259 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 259 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
260 if (!res) {
261 dev_err(&pdev->dev, "can't get device resources\n");
262 return -ENODEV;
263 }
264
265 imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res); 260 imx2_wdt.base = devm_ioremap_resource(&pdev->dev, res);
266 if (IS_ERR(imx2_wdt.base)) 261 if (IS_ERR(imx2_wdt.base))
267 return PTR_ERR(imx2_wdt.base); 262 return PTR_ERR(imx2_wdt.base);
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index f03bf501527f..9e02d60a364b 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -19,11 +19,10 @@ config XEN_SELFBALLOONING
19 by the current usage of anonymous memory ("committed AS") and 19 by the current usage of anonymous memory ("committed AS") and
20 controlled by various sysfs-settable parameters. Configuring 20 controlled by various sysfs-settable parameters. Configuring
21 FRONTSWAP is highly recommended; if it is not configured, self- 21 FRONTSWAP is highly recommended; if it is not configured, self-
22 ballooning is disabled by default but can be enabled with the 22 ballooning is disabled by default. If FRONTSWAP is configured,
23 'selfballooning' kernel boot parameter. If FRONTSWAP is configured,
24 frontswap-selfshrinking is enabled by default but can be disabled 23 frontswap-selfshrinking is enabled by default but can be disabled
25 with the 'noselfshrink' kernel boot parameter; and self-ballooning 24 with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning
26 is enabled by default but can be disabled with the 'noselfballooning' 25 is enabled by default but can be disabled with the 'tmem.selfballooning=0'
27 kernel boot parameter. Note that systems without a sufficiently 26 kernel boot parameter. Note that systems without a sufficiently
28 large swap device should not enable self-ballooning. 27 large swap device should not enable self-ballooning.
29 28
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index a56776dbe095..930fb6817901 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -407,7 +407,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
407 nr_pages = ARRAY_SIZE(frame_list); 407 nr_pages = ARRAY_SIZE(frame_list);
408 408
409 for (i = 0; i < nr_pages; i++) { 409 for (i = 0; i < nr_pages; i++) {
410 if ((page = alloc_page(gfp)) == NULL) { 410 page = alloc_page(gfp);
411 if (page == NULL) {
411 nr_pages = i; 412 nr_pages = i;
412 state = BP_EAGAIN; 413 state = BP_EAGAIN;
413 break; 414 break;
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index ca2b00e9d558..2cfc24d76fc5 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma)
504 struct page **pages = vma->vm_private_data; 504 struct page **pages = vma->vm_private_data;
505 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 505 int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
506 506
507 if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) 507 if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
508 return; 508 return;
509 509
510 xen_unmap_domain_mfn_range(vma, numpgs, pages); 510 xen_unmap_domain_mfn_range(vma, numpgs, pages);
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index e3600be4e7fa..18e8bd8fa947 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -11,11 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/pagemap.h> 12#include <linux/pagemap.h>
13#include <linux/cleancache.h> 13#include <linux/cleancache.h>
14
15/* temporary ifdef until include/linux/frontswap.h is upstream */
16#ifdef CONFIG_FRONTSWAP
17#include <linux/frontswap.h> 14#include <linux/frontswap.h>
18#endif
19 15
20#include <xen/xen.h> 16#include <xen/xen.h>
21#include <xen/interface/xen.h> 17#include <xen/interface/xen.h>
@@ -24,6 +20,34 @@
24#include <asm/xen/hypervisor.h> 20#include <asm/xen/hypervisor.h>
25#include <xen/tmem.h> 21#include <xen/tmem.h>
26 22
23#ifndef CONFIG_XEN_TMEM_MODULE
24bool __read_mostly tmem_enabled = false;
25
26static int __init enable_tmem(char *s)
27{
28 tmem_enabled = true;
29 return 1;
30}
31__setup("tmem", enable_tmem);
32#endif
33
34#ifdef CONFIG_CLEANCACHE
35static bool cleancache __read_mostly = true;
36module_param(cleancache, bool, S_IRUGO);
37static bool selfballooning __read_mostly = true;
38module_param(selfballooning, bool, S_IRUGO);
39#endif /* CONFIG_CLEANCACHE */
40
41#ifdef CONFIG_FRONTSWAP
42static bool frontswap __read_mostly = true;
43module_param(frontswap, bool, S_IRUGO);
44#endif /* CONFIG_FRONTSWAP */
45
46#ifdef CONFIG_XEN_SELFBALLOONING
47static bool selfshrinking __read_mostly = true;
48module_param(selfshrinking, bool, S_IRUGO);
49#endif /* CONFIG_XEN_SELFBALLOONING */
50
27#define TMEM_CONTROL 0 51#define TMEM_CONTROL 0
28#define TMEM_NEW_POOL 1 52#define TMEM_NEW_POOL 1
29#define TMEM_DESTROY_POOL 2 53#define TMEM_DESTROY_POOL 2
@@ -129,16 +153,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
129 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); 153 return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
130} 154}
131 155
132#ifndef CONFIG_XEN_TMEM_MODULE
133bool __read_mostly tmem_enabled = false;
134
135static int __init enable_tmem(char *s)
136{
137 tmem_enabled = true;
138 return 1;
139}
140__setup("tmem", enable_tmem);
141#endif
142 156
143#ifdef CONFIG_CLEANCACHE 157#ifdef CONFIG_CLEANCACHE
144static int xen_tmem_destroy_pool(u32 pool_id) 158static int xen_tmem_destroy_pool(u32 pool_id)
@@ -230,20 +244,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
230 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); 244 return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
231} 245}
232 246
233static bool disable_cleancache __read_mostly;
234static bool disable_selfballooning __read_mostly;
235#ifdef CONFIG_XEN_TMEM_MODULE
236module_param(disable_cleancache, bool, S_IRUGO);
237module_param(disable_selfballooning, bool, S_IRUGO);
238#else
239static int __init no_cleancache(char *s)
240{
241 disable_cleancache = true;
242 return 1;
243}
244__setup("nocleancache", no_cleancache);
245#endif
246
247static struct cleancache_ops tmem_cleancache_ops = { 247static struct cleancache_ops tmem_cleancache_ops = {
248 .put_page = tmem_cleancache_put_page, 248 .put_page = tmem_cleancache_put_page,
249 .get_page = tmem_cleancache_get_page, 249 .get_page = tmem_cleancache_get_page,
@@ -361,20 +361,6 @@ static void tmem_frontswap_init(unsigned ignored)
361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); 361 xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
362} 362}
363 363
364static bool disable_frontswap __read_mostly;
365static bool disable_frontswap_selfshrinking __read_mostly;
366#ifdef CONFIG_XEN_TMEM_MODULE
367module_param(disable_frontswap, bool, S_IRUGO);
368module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
369#else
370static int __init no_frontswap(char *s)
371{
372 disable_frontswap = true;
373 return 1;
374}
375__setup("nofrontswap", no_frontswap);
376#endif
377
378static struct frontswap_ops tmem_frontswap_ops = { 364static struct frontswap_ops tmem_frontswap_ops = {
379 .store = tmem_frontswap_store, 365 .store = tmem_frontswap_store,
380 .load = tmem_frontswap_load, 366 .load = tmem_frontswap_load,
@@ -382,8 +368,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
382 .invalidate_area = tmem_frontswap_flush_area, 368 .invalidate_area = tmem_frontswap_flush_area,
383 .init = tmem_frontswap_init 369 .init = tmem_frontswap_init
384}; 370};
385#else /* CONFIG_FRONTSWAP */
386#define disable_frontswap_selfshrinking 1
387#endif 371#endif
388 372
389static int xen_tmem_init(void) 373static int xen_tmem_init(void)
@@ -391,7 +375,7 @@ static int xen_tmem_init(void)
391 if (!xen_domain()) 375 if (!xen_domain())
392 return 0; 376 return 0;
393#ifdef CONFIG_FRONTSWAP 377#ifdef CONFIG_FRONTSWAP
394 if (tmem_enabled && !disable_frontswap) { 378 if (tmem_enabled && frontswap) {
395 char *s = ""; 379 char *s = "";
396 struct frontswap_ops *old_ops = 380 struct frontswap_ops *old_ops =
397 frontswap_register_ops(&tmem_frontswap_ops); 381 frontswap_register_ops(&tmem_frontswap_ops);
@@ -408,7 +392,7 @@ static int xen_tmem_init(void)
408#endif 392#endif
409#ifdef CONFIG_CLEANCACHE 393#ifdef CONFIG_CLEANCACHE
410 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); 394 BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
411 if (tmem_enabled && !disable_cleancache) { 395 if (tmem_enabled && cleancache) {
412 char *s = ""; 396 char *s = "";
413 struct cleancache_ops *old_ops = 397 struct cleancache_ops *old_ops =
414 cleancache_register_ops(&tmem_cleancache_ops); 398 cleancache_register_ops(&tmem_cleancache_ops);
@@ -419,8 +403,15 @@ static int xen_tmem_init(void)
419 } 403 }
420#endif 404#endif
421#ifdef CONFIG_XEN_SELFBALLOONING 405#ifdef CONFIG_XEN_SELFBALLOONING
422 xen_selfballoon_init(!disable_selfballooning, 406 /*
423 !disable_frontswap_selfshrinking); 407 * There is no point of driving pages to the swap system if they
408 * aren't going anywhere in tmem universe.
409 */
410 if (!frontswap) {
411 selfshrinking = false;
412 selfballooning = false;
413 }
414 xen_selfballoon_init(selfballooning, selfshrinking);
424#endif 415#endif
425 return 0; 416 return 0;
426} 417}
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index f2ef569c7cc1..f70984a892aa 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -53,15 +53,12 @@
53 * System configuration note: Selfballooning should not be enabled on 53 * System configuration note: Selfballooning should not be enabled on
54 * systems without a sufficiently large swap device configured; for best 54 * systems without a sufficiently large swap device configured; for best
55 * results, it is recommended that total swap be increased by the size 55 * results, it is recommended that total swap be increased by the size
56 * of the guest memory. Also, while technically not required to be 56 * of the guest memory. Note, that selfballooning should be disabled by default
57 * configured, it is highly recommended that frontswap also be configured 57 * if frontswap is not configured. Similarly selfballooning should be enabled
58 * and enabled when selfballooning is running. So, selfballooning 58 * by default if frontswap is configured and can be disabled with the
59 * is disabled by default if frontswap is not configured and can only 59 * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is
60 * be enabled with the "selfballooning" kernel boot option; similarly 60 * configured, frontswap-selfshrinking can be disabled with the
61 * selfballooning is enabled by default if frontswap is configured and 61 * "tmem.selfshrink=0" kernel boot option.
62 * can be disabled with the "noselfballooning" kernel boot option. Finally,
63 * when frontswap is configured, frontswap-selfshrinking can be disabled
64 * with the "noselfshrink" kernel boot option.
65 * 62 *
66 * Selfballooning is disallowed in domain0 and force-disabled. 63 * Selfballooning is disallowed in domain0 and force-disabled.
67 * 64 *
@@ -120,9 +117,6 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
120/* Enable/disable with sysfs. */ 117/* Enable/disable with sysfs. */
121static bool frontswap_selfshrinking __read_mostly; 118static bool frontswap_selfshrinking __read_mostly;
122 119
123/* Enable/disable with kernel boot option. */
124static bool use_frontswap_selfshrink = true;
125
126/* 120/*
127 * The default values for the following parameters were deemed reasonable 121 * The default values for the following parameters were deemed reasonable
128 * by experimentation, may be workload-dependent, and can all be 122 * by experimentation, may be workload-dependent, and can all be
@@ -176,35 +170,6 @@ static void frontswap_selfshrink(void)
176 frontswap_shrink(tgt_frontswap_pages); 170 frontswap_shrink(tgt_frontswap_pages);
177} 171}
178 172
179static int __init xen_nofrontswap_selfshrink_setup(char *s)
180{
181 use_frontswap_selfshrink = false;
182 return 1;
183}
184
185__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
186
187/* Disable with kernel boot option. */
188static bool use_selfballooning = true;
189
190static int __init xen_noselfballooning_setup(char *s)
191{
192 use_selfballooning = false;
193 return 1;
194}
195
196__setup("noselfballooning", xen_noselfballooning_setup);
197#else /* !CONFIG_FRONTSWAP */
198/* Enable with kernel boot option. */
199static bool use_selfballooning;
200
201static int __init xen_selfballooning_setup(char *s)
202{
203 use_selfballooning = true;
204 return 1;
205}
206
207__setup("selfballooning", xen_selfballooning_setup);
208#endif /* CONFIG_FRONTSWAP */ 173#endif /* CONFIG_FRONTSWAP */
209 174
210#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) 175#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index d73000800762..a6f42fc01407 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -70,22 +70,21 @@ static long xenbus_alloc(domid_t domid)
70 return err; 70 return err;
71} 71}
72 72
73static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) 73static long xenbus_backend_ioctl(struct file *file, unsigned int cmd,
74 unsigned long data)
74{ 75{
75 if (!capable(CAP_SYS_ADMIN)) 76 if (!capable(CAP_SYS_ADMIN))
76 return -EPERM; 77 return -EPERM;
77 78
78 switch (cmd) { 79 switch (cmd) {
79 case IOCTL_XENBUS_BACKEND_EVTCHN: 80 case IOCTL_XENBUS_BACKEND_EVTCHN:
80 if (xen_store_evtchn > 0) 81 if (xen_store_evtchn > 0)
81 return xen_store_evtchn; 82 return xen_store_evtchn;
82 return -ENODEV; 83 return -ENODEV;
83 84 case IOCTL_XENBUS_BACKEND_SETUP:
84 case IOCTL_XENBUS_BACKEND_SETUP: 85 return xenbus_alloc(data);
85 return xenbus_alloc(data); 86 default:
86 87 return -ENOTTY;
87 default:
88 return -ENOTTY;
89 } 88 }
90} 89}
91 90
diff --git a/fs/aio.c b/fs/aio.c
index c5b1a8c10411..7fe5bdee1630 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -307,7 +307,9 @@ static void free_ioctx(struct kioctx *ctx)
307 kunmap_atomic(ring); 307 kunmap_atomic(ring);
308 308
309 while (atomic_read(&ctx->reqs_active) > 0) { 309 while (atomic_read(&ctx->reqs_active) > 0) {
310 wait_event(ctx->wait, head != ctx->tail); 310 wait_event(ctx->wait,
311 head != ctx->tail ||
312 atomic_read(&ctx->reqs_active) <= 0);
311 313
312 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; 314 avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
313 315
@@ -1299,8 +1301,7 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb,
1299 * < min_nr if the timeout specified by timeout has elapsed 1301 * < min_nr if the timeout specified by timeout has elapsed
1300 * before sufficient events are available, where timeout == NULL 1302 * before sufficient events are available, where timeout == NULL
1301 * specifies an infinite timeout. Note that the timeout pointed to by 1303 * specifies an infinite timeout. Note that the timeout pointed to by
1302 * timeout is relative and will be updated if not NULL and the 1304 * timeout is relative. Will fail with -ENOSYS if not implemented.
1303 * operation blocks. Will fail with -ENOSYS if not implemented.
1304 */ 1305 */
1305SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, 1306SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
1306 long, min_nr, 1307 long, min_nr,
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b4fb41558111..290e347b6db3 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -918,7 +918,8 @@ again:
918 ref->parent, bsz, 0); 918 ref->parent, bsz, 0);
919 if (!eb || !extent_buffer_uptodate(eb)) { 919 if (!eb || !extent_buffer_uptodate(eb)) {
920 free_extent_buffer(eb); 920 free_extent_buffer(eb);
921 return -EIO; 921 ret = -EIO;
922 goto out;
922 } 923 }
923 ret = find_extent_in_eb(eb, bytenr, 924 ret = find_extent_in_eb(eb, bytenr,
924 *extent_item_pos, &eie); 925 *extent_item_pos, &eie);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 18af6f48781a..1431a6965017 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1700,7 +1700,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1700 unsigned int j; 1700 unsigned int j;
1701 DECLARE_COMPLETION_ONSTACK(complete); 1701 DECLARE_COMPLETION_ONSTACK(complete);
1702 1702
1703 bio = bio_alloc(GFP_NOFS, num_pages - i); 1703 bio = btrfs_io_bio_alloc(GFP_NOFS, num_pages - i);
1704 if (!bio) { 1704 if (!bio) {
1705 printk(KERN_INFO 1705 printk(KERN_INFO
1706 "btrfsic: bio_alloc() for %u pages failed!\n", 1706 "btrfsic: bio_alloc() for %u pages failed!\n",
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index de6de8e60b46..02fae7f7e42c 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -951,10 +951,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
951 BUG_ON(ret); /* -ENOMEM */ 951 BUG_ON(ret); /* -ENOMEM */
952 } 952 }
953 if (new_flags != 0) { 953 if (new_flags != 0) {
954 int level = btrfs_header_level(buf);
955
954 ret = btrfs_set_disk_extent_flags(trans, root, 956 ret = btrfs_set_disk_extent_flags(trans, root,
955 buf->start, 957 buf->start,
956 buf->len, 958 buf->len,
957 new_flags, 0); 959 new_flags, level, 0);
958 if (ret) 960 if (ret)
959 return ret; 961 return ret;
960 } 962 }
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 63c328a9ce95..d6dd49b51ba8 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -88,12 +88,12 @@ struct btrfs_ordered_sum;
88/* holds checksums of all the data extents */ 88/* holds checksums of all the data extents */
89#define BTRFS_CSUM_TREE_OBJECTID 7ULL 89#define BTRFS_CSUM_TREE_OBJECTID 7ULL
90 90
91/* for storing balance parameters in the root tree */
92#define BTRFS_BALANCE_OBJECTID -4ULL
93
94/* holds quota configuration and tracking */ 91/* holds quota configuration and tracking */
95#define BTRFS_QUOTA_TREE_OBJECTID 8ULL 92#define BTRFS_QUOTA_TREE_OBJECTID 8ULL
96 93
94/* for storing balance parameters in the root tree */
95#define BTRFS_BALANCE_OBJECTID -4ULL
96
97/* orhpan objectid for tracking unlinked/truncated files */ 97/* orhpan objectid for tracking unlinked/truncated files */
98#define BTRFS_ORPHAN_OBJECTID -5ULL 98#define BTRFS_ORPHAN_OBJECTID -5ULL
99 99
@@ -3075,7 +3075,7 @@ int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3075int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 3075int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
3076 struct btrfs_root *root, 3076 struct btrfs_root *root,
3077 u64 bytenr, u64 num_bytes, u64 flags, 3077 u64 bytenr, u64 num_bytes, u64 flags,
3078 int is_data); 3078 int level, int is_data);
3079int btrfs_free_extent(struct btrfs_trans_handle *trans, 3079int btrfs_free_extent(struct btrfs_trans_handle *trans,
3080 struct btrfs_root *root, 3080 struct btrfs_root *root,
3081 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, 3081 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index f75fcaf79aeb..70b962cc177d 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -60,6 +60,7 @@ struct btrfs_delayed_ref_node {
60struct btrfs_delayed_extent_op { 60struct btrfs_delayed_extent_op {
61 struct btrfs_disk_key key; 61 struct btrfs_disk_key key;
62 u64 flags_to_set; 62 u64 flags_to_set;
63 int level;
63 unsigned int update_key:1; 64 unsigned int update_key:1;
64 unsigned int update_flags:1; 65 unsigned int update_flags:1;
65 unsigned int is_data:1; 66 unsigned int is_data:1;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 7ba7b3900cb8..65241f32d3f8 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -313,6 +313,11 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
313 struct btrfs_device *tgt_device = NULL; 313 struct btrfs_device *tgt_device = NULL;
314 struct btrfs_device *src_device = NULL; 314 struct btrfs_device *src_device = NULL;
315 315
316 if (btrfs_fs_incompat(fs_info, RAID56)) {
317 pr_warn("btrfs: dev_replace cannot yet handle RAID5/RAID6\n");
318 return -EINVAL;
319 }
320
316 switch (args->start.cont_reading_from_srcdev_mode) { 321 switch (args->start.cont_reading_from_srcdev_mode) {
317 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: 322 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
318 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: 323 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 4e9ebe1f1827..e7b3cb5286a5 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -152,7 +152,7 @@ static struct btrfs_lockdep_keyset {
152 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" }, 152 { .id = BTRFS_DEV_TREE_OBJECTID, .name_stem = "dev" },
153 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" }, 153 { .id = BTRFS_FS_TREE_OBJECTID, .name_stem = "fs" },
154 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" }, 154 { .id = BTRFS_CSUM_TREE_OBJECTID, .name_stem = "csum" },
155 { .id = BTRFS_ORPHAN_OBJECTID, .name_stem = "orphan" }, 155 { .id = BTRFS_QUOTA_TREE_OBJECTID, .name_stem = "quota" },
156 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" }, 156 { .id = BTRFS_TREE_LOG_OBJECTID, .name_stem = "log" },
157 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" }, 157 { .id = BTRFS_TREE_RELOC_OBJECTID, .name_stem = "treloc" },
158 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" }, 158 { .id = BTRFS_DATA_RELOC_TREE_OBJECTID, .name_stem = "dreloc" },
@@ -1513,7 +1513,6 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
1513 } 1513 }
1514 1514
1515 root->commit_root = btrfs_root_node(root); 1515 root->commit_root = btrfs_root_node(root);
1516 BUG_ON(!root->node); /* -ENOMEM */
1517out: 1516out:
1518 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) { 1517 if (location->objectid != BTRFS_TREE_LOG_OBJECTID) {
1519 root->ref_cows = 1; 1518 root->ref_cows = 1;
@@ -1988,30 +1987,33 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
1988{ 1987{
1989 free_extent_buffer(info->tree_root->node); 1988 free_extent_buffer(info->tree_root->node);
1990 free_extent_buffer(info->tree_root->commit_root); 1989 free_extent_buffer(info->tree_root->commit_root);
1991 free_extent_buffer(info->dev_root->node);
1992 free_extent_buffer(info->dev_root->commit_root);
1993 free_extent_buffer(info->extent_root->node);
1994 free_extent_buffer(info->extent_root->commit_root);
1995 free_extent_buffer(info->csum_root->node);
1996 free_extent_buffer(info->csum_root->commit_root);
1997 if (info->quota_root) {
1998 free_extent_buffer(info->quota_root->node);
1999 free_extent_buffer(info->quota_root->commit_root);
2000 }
2001
2002 info->tree_root->node = NULL; 1990 info->tree_root->node = NULL;
2003 info->tree_root->commit_root = NULL; 1991 info->tree_root->commit_root = NULL;
2004 info->dev_root->node = NULL; 1992
2005 info->dev_root->commit_root = NULL; 1993 if (info->dev_root) {
2006 info->extent_root->node = NULL; 1994 free_extent_buffer(info->dev_root->node);
2007 info->extent_root->commit_root = NULL; 1995 free_extent_buffer(info->dev_root->commit_root);
2008 info->csum_root->node = NULL; 1996 info->dev_root->node = NULL;
2009 info->csum_root->commit_root = NULL; 1997 info->dev_root->commit_root = NULL;
1998 }
1999 if (info->extent_root) {
2000 free_extent_buffer(info->extent_root->node);
2001 free_extent_buffer(info->extent_root->commit_root);
2002 info->extent_root->node = NULL;
2003 info->extent_root->commit_root = NULL;
2004 }
2005 if (info->csum_root) {
2006 free_extent_buffer(info->csum_root->node);
2007 free_extent_buffer(info->csum_root->commit_root);
2008 info->csum_root->node = NULL;
2009 info->csum_root->commit_root = NULL;
2010 }
2010 if (info->quota_root) { 2011 if (info->quota_root) {
2012 free_extent_buffer(info->quota_root->node);
2013 free_extent_buffer(info->quota_root->commit_root);
2011 info->quota_root->node = NULL; 2014 info->quota_root->node = NULL;
2012 info->quota_root->commit_root = NULL; 2015 info->quota_root->commit_root = NULL;
2013 } 2016 }
2014
2015 if (chunk_root) { 2017 if (chunk_root) {
2016 free_extent_buffer(info->chunk_root->node); 2018 free_extent_buffer(info->chunk_root->node);
2017 free_extent_buffer(info->chunk_root->commit_root); 2019 free_extent_buffer(info->chunk_root->commit_root);
@@ -3128,7 +3130,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
3128 * caller 3130 * caller
3129 */ 3131 */
3130 device->flush_bio = NULL; 3132 device->flush_bio = NULL;
3131 bio = bio_alloc(GFP_NOFS, 0); 3133 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
3132 if (!bio) 3134 if (!bio)
3133 return -ENOMEM; 3135 return -ENOMEM;
3134 3136
@@ -3659,8 +3661,11 @@ static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
3659 ordered_operations); 3661 ordered_operations);
3660 3662
3661 list_del_init(&btrfs_inode->ordered_operations); 3663 list_del_init(&btrfs_inode->ordered_operations);
3664 spin_unlock(&root->fs_info->ordered_extent_lock);
3662 3665
3663 btrfs_invalidate_inodes(btrfs_inode->root); 3666 btrfs_invalidate_inodes(btrfs_inode->root);
3667
3668 spin_lock(&root->fs_info->ordered_extent_lock);
3664 } 3669 }
3665 3670
3666 spin_unlock(&root->fs_info->ordered_extent_lock); 3671 spin_unlock(&root->fs_info->ordered_extent_lock);
@@ -3782,8 +3787,11 @@ static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root)
3782 list_del_init(&btrfs_inode->delalloc_inodes); 3787 list_del_init(&btrfs_inode->delalloc_inodes);
3783 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST, 3788 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST,
3784 &btrfs_inode->runtime_flags); 3789 &btrfs_inode->runtime_flags);
3790 spin_unlock(&root->fs_info->delalloc_lock);
3785 3791
3786 btrfs_invalidate_inodes(btrfs_inode->root); 3792 btrfs_invalidate_inodes(btrfs_inode->root);
3793
3794 spin_lock(&root->fs_info->delalloc_lock);
3787 } 3795 }
3788 3796
3789 spin_unlock(&root->fs_info->delalloc_lock); 3797 spin_unlock(&root->fs_info->delalloc_lock);
@@ -3808,7 +3816,7 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3808 while (start <= end) { 3816 while (start <= end) {
3809 eb = btrfs_find_tree_block(root, start, 3817 eb = btrfs_find_tree_block(root, start,
3810 root->leafsize); 3818 root->leafsize);
3811 start += eb->len; 3819 start += root->leafsize;
3812 if (!eb) 3820 if (!eb)
3813 continue; 3821 continue;
3814 wait_on_extent_buffer_writeback(eb); 3822 wait_on_extent_buffer_writeback(eb);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 2305b5c5cf00..df472ab1b5ac 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2070,8 +2070,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2070 u32 item_size; 2070 u32 item_size;
2071 int ret; 2071 int ret;
2072 int err = 0; 2072 int err = 0;
2073 int metadata = (node->type == BTRFS_TREE_BLOCK_REF_KEY || 2073 int metadata = !extent_op->is_data;
2074 node->type == BTRFS_SHARED_BLOCK_REF_KEY);
2075 2074
2076 if (trans->aborted) 2075 if (trans->aborted)
2077 return 0; 2076 return 0;
@@ -2086,11 +2085,8 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2086 key.objectid = node->bytenr; 2085 key.objectid = node->bytenr;
2087 2086
2088 if (metadata) { 2087 if (metadata) {
2089 struct btrfs_delayed_tree_ref *tree_ref;
2090
2091 tree_ref = btrfs_delayed_node_to_tree_ref(node);
2092 key.type = BTRFS_METADATA_ITEM_KEY; 2088 key.type = BTRFS_METADATA_ITEM_KEY;
2093 key.offset = tree_ref->level; 2089 key.offset = extent_op->level;
2094 } else { 2090 } else {
2095 key.type = BTRFS_EXTENT_ITEM_KEY; 2091 key.type = BTRFS_EXTENT_ITEM_KEY;
2096 key.offset = node->num_bytes; 2092 key.offset = node->num_bytes;
@@ -2719,7 +2715,7 @@ out:
2719int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans, 2715int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2720 struct btrfs_root *root, 2716 struct btrfs_root *root,
2721 u64 bytenr, u64 num_bytes, u64 flags, 2717 u64 bytenr, u64 num_bytes, u64 flags,
2722 int is_data) 2718 int level, int is_data)
2723{ 2719{
2724 struct btrfs_delayed_extent_op *extent_op; 2720 struct btrfs_delayed_extent_op *extent_op;
2725 int ret; 2721 int ret;
@@ -2732,6 +2728,7 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2732 extent_op->update_flags = 1; 2728 extent_op->update_flags = 1;
2733 extent_op->update_key = 0; 2729 extent_op->update_key = 0;
2734 extent_op->is_data = is_data ? 1 : 0; 2730 extent_op->is_data = is_data ? 1 : 0;
2731 extent_op->level = level;
2735 2732
2736 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr, 2733 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
2737 num_bytes, extent_op); 2734 num_bytes, extent_op);
@@ -3109,6 +3106,11 @@ again:
3109 WARN_ON(ret); 3106 WARN_ON(ret);
3110 3107
3111 if (i_size_read(inode) > 0) { 3108 if (i_size_read(inode) > 0) {
3109 ret = btrfs_check_trunc_cache_free_space(root,
3110 &root->fs_info->global_block_rsv);
3111 if (ret)
3112 goto out_put;
3113
3112 ret = btrfs_truncate_free_space_cache(root, trans, path, 3114 ret = btrfs_truncate_free_space_cache(root, trans, path,
3113 inode); 3115 inode);
3114 if (ret) 3116 if (ret)
@@ -4562,6 +4564,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
4562 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv; 4564 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
4563 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv; 4565 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
4564 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv; 4566 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
4567 if (fs_info->quota_root)
4568 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
4565 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv; 4569 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
4566 4570
4567 update_global_block_rsv(fs_info); 4571 update_global_block_rsv(fs_info);
@@ -6651,51 +6655,51 @@ use_block_rsv(struct btrfs_trans_handle *trans,
6651 struct btrfs_block_rsv *block_rsv; 6655 struct btrfs_block_rsv *block_rsv;
6652 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv; 6656 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
6653 int ret; 6657 int ret;
6658 bool global_updated = false;
6654 6659
6655 block_rsv = get_block_rsv(trans, root); 6660 block_rsv = get_block_rsv(trans, root);
6656 6661
6657 if (block_rsv->size == 0) { 6662 if (unlikely(block_rsv->size == 0))
6658 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 6663 goto try_reserve;
6659 BTRFS_RESERVE_NO_FLUSH); 6664again:
6660 /* 6665 ret = block_rsv_use_bytes(block_rsv, blocksize);
6661 * If we couldn't reserve metadata bytes try and use some from 6666 if (!ret)
6662 * the global reserve.
6663 */
6664 if (ret && block_rsv != global_rsv) {
6665 ret = block_rsv_use_bytes(global_rsv, blocksize);
6666 if (!ret)
6667 return global_rsv;
6668 return ERR_PTR(ret);
6669 } else if (ret) {
6670 return ERR_PTR(ret);
6671 }
6672 return block_rsv; 6667 return block_rsv;
6668
6669 if (block_rsv->failfast)
6670 return ERR_PTR(ret);
6671
6672 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
6673 global_updated = true;
6674 update_global_block_rsv(root->fs_info);
6675 goto again;
6673 } 6676 }
6674 6677
6675 ret = block_rsv_use_bytes(block_rsv, blocksize); 6678 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
6679 static DEFINE_RATELIMIT_STATE(_rs,
6680 DEFAULT_RATELIMIT_INTERVAL * 10,
6681 /*DEFAULT_RATELIMIT_BURST*/ 1);
6682 if (__ratelimit(&_rs))
6683 WARN(1, KERN_DEBUG
6684 "btrfs: block rsv returned %d\n", ret);
6685 }
6686try_reserve:
6687 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
6688 BTRFS_RESERVE_NO_FLUSH);
6676 if (!ret) 6689 if (!ret)
6677 return block_rsv; 6690 return block_rsv;
6678 if (ret && !block_rsv->failfast) { 6691 /*
6679 if (btrfs_test_opt(root, ENOSPC_DEBUG)) { 6692 * If we couldn't reserve metadata bytes try and use some from
6680 static DEFINE_RATELIMIT_STATE(_rs, 6693 * the global reserve if its space type is the same as the global
6681 DEFAULT_RATELIMIT_INTERVAL * 10, 6694 * reservation.
6682 /*DEFAULT_RATELIMIT_BURST*/ 1); 6695 */
6683 if (__ratelimit(&_rs)) 6696 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
6684 WARN(1, KERN_DEBUG 6697 block_rsv->space_info == global_rsv->space_info) {
6685 "btrfs: block rsv returned %d\n", ret); 6698 ret = block_rsv_use_bytes(global_rsv, blocksize);
6686 } 6699 if (!ret)
6687 ret = reserve_metadata_bytes(root, block_rsv, blocksize, 6700 return global_rsv;
6688 BTRFS_RESERVE_NO_FLUSH);
6689 if (!ret) {
6690 return block_rsv;
6691 } else if (ret && block_rsv != global_rsv) {
6692 ret = block_rsv_use_bytes(global_rsv, blocksize);
6693 if (!ret)
6694 return global_rsv;
6695 }
6696 } 6701 }
6697 6702 return ERR_PTR(ret);
6698 return ERR_PTR(-ENOSPC);
6699} 6703}
6700 6704
6701static void unuse_block_rsv(struct btrfs_fs_info *fs_info, 6705static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
@@ -6763,6 +6767,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6763 extent_op->update_key = 1; 6767 extent_op->update_key = 1;
6764 extent_op->update_flags = 1; 6768 extent_op->update_flags = 1;
6765 extent_op->is_data = 0; 6769 extent_op->is_data = 0;
6770 extent_op->level = level;
6766 6771
6767 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans, 6772 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6768 ins.objectid, 6773 ins.objectid,
@@ -6934,7 +6939,8 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
6934 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc); 6939 ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
6935 BUG_ON(ret); /* -ENOMEM */ 6940 BUG_ON(ret); /* -ENOMEM */
6936 ret = btrfs_set_disk_extent_flags(trans, root, eb->start, 6941 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
6937 eb->len, flag, 0); 6942 eb->len, flag,
6943 btrfs_header_level(eb), 0);
6938 BUG_ON(ret); /* -ENOMEM */ 6944 BUG_ON(ret); /* -ENOMEM */
6939 wc->flags[level] |= flag; 6945 wc->flags[level] |= flag;
6940 } 6946 }
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 32d67a822e93..e7e7afb4a872 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -23,6 +23,7 @@
23 23
24static struct kmem_cache *extent_state_cache; 24static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache; 25static struct kmem_cache *extent_buffer_cache;
26static struct bio_set *btrfs_bioset;
26 27
27#ifdef CONFIG_BTRFS_DEBUG 28#ifdef CONFIG_BTRFS_DEBUG
28static LIST_HEAD(buffers); 29static LIST_HEAD(buffers);
@@ -125,10 +126,20 @@ int __init extent_io_init(void)
125 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); 126 SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
126 if (!extent_buffer_cache) 127 if (!extent_buffer_cache)
127 goto free_state_cache; 128 goto free_state_cache;
129
130 btrfs_bioset = bioset_create(BIO_POOL_SIZE,
131 offsetof(struct btrfs_io_bio, bio));
132 if (!btrfs_bioset)
133 goto free_buffer_cache;
128 return 0; 134 return 0;
129 135
136free_buffer_cache:
137 kmem_cache_destroy(extent_buffer_cache);
138 extent_buffer_cache = NULL;
139
130free_state_cache: 140free_state_cache:
131 kmem_cache_destroy(extent_state_cache); 141 kmem_cache_destroy(extent_state_cache);
142 extent_state_cache = NULL;
132 return -ENOMEM; 143 return -ENOMEM;
133} 144}
134 145
@@ -145,6 +156,8 @@ void extent_io_exit(void)
145 kmem_cache_destroy(extent_state_cache); 156 kmem_cache_destroy(extent_state_cache);
146 if (extent_buffer_cache) 157 if (extent_buffer_cache)
147 kmem_cache_destroy(extent_buffer_cache); 158 kmem_cache_destroy(extent_buffer_cache);
159 if (btrfs_bioset)
160 bioset_free(btrfs_bioset);
148} 161}
149 162
150void extent_io_tree_init(struct extent_io_tree *tree, 163void extent_io_tree_init(struct extent_io_tree *tree,
@@ -1948,28 +1961,6 @@ static void check_page_uptodate(struct extent_io_tree *tree, struct page *page)
1948} 1961}
1949 1962
1950/* 1963/*
1951 * helper function to unlock a page if all the extents in the tree
1952 * for that page are unlocked
1953 */
1954static void check_page_locked(struct extent_io_tree *tree, struct page *page)
1955{
1956 u64 start = page_offset(page);
1957 u64 end = start + PAGE_CACHE_SIZE - 1;
1958 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0, NULL))
1959 unlock_page(page);
1960}
1961
1962/*
1963 * helper function to end page writeback if all the extents
1964 * in the tree for that page are done with writeback
1965 */
1966static void check_page_writeback(struct extent_io_tree *tree,
1967 struct page *page)
1968{
1969 end_page_writeback(page);
1970}
1971
1972/*
1973 * When IO fails, either with EIO or csum verification fails, we 1964 * When IO fails, either with EIO or csum verification fails, we
1974 * try other mirrors that might have a good copy of the data. This 1965 * try other mirrors that might have a good copy of the data. This
1975 * io_failure_record is used to record state as we go through all the 1966 * io_failure_record is used to record state as we go through all the
@@ -2046,7 +2037,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
2046 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num)) 2037 if (btrfs_is_parity_mirror(map_tree, logical, length, mirror_num))
2047 return 0; 2038 return 0;
2048 2039
2049 bio = bio_alloc(GFP_NOFS, 1); 2040 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2050 if (!bio) 2041 if (!bio)
2051 return -EIO; 2042 return -EIO;
2052 bio->bi_private = &compl; 2043 bio->bi_private = &compl;
@@ -2336,7 +2327,7 @@ static int bio_readpage_error(struct bio *failed_bio, struct page *page,
2336 return -EIO; 2327 return -EIO;
2337 } 2328 }
2338 2329
2339 bio = bio_alloc(GFP_NOFS, 1); 2330 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
2340 if (!bio) { 2331 if (!bio) {
2341 free_io_failure(inode, failrec, 0); 2332 free_io_failure(inode, failrec, 0);
2342 return -EIO; 2333 return -EIO;
@@ -2398,19 +2389,24 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2398 struct extent_io_tree *tree; 2389 struct extent_io_tree *tree;
2399 u64 start; 2390 u64 start;
2400 u64 end; 2391 u64 end;
2401 int whole_page;
2402 2392
2403 do { 2393 do {
2404 struct page *page = bvec->bv_page; 2394 struct page *page = bvec->bv_page;
2405 tree = &BTRFS_I(page->mapping->host)->io_tree; 2395 tree = &BTRFS_I(page->mapping->host)->io_tree;
2406 2396
2407 start = page_offset(page) + bvec->bv_offset; 2397 /* We always issue full-page reads, but if some block
2408 end = start + bvec->bv_len - 1; 2398 * in a page fails to read, blk_update_request() will
2399 * advance bv_offset and adjust bv_len to compensate.
2400 * Print a warning for nonzero offsets, and an error
2401 * if they don't add up to a full page. */
2402 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2403 printk("%s page write in btrfs with offset %u and length %u\n",
2404 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2405 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2406 bvec->bv_offset, bvec->bv_len);
2409 2407
2410 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2408 start = page_offset(page);
2411 whole_page = 1; 2409 end = start + bvec->bv_offset + bvec->bv_len - 1;
2412 else
2413 whole_page = 0;
2414 2410
2415 if (--bvec >= bio->bi_io_vec) 2411 if (--bvec >= bio->bi_io_vec)
2416 prefetchw(&bvec->bv_page->flags); 2412 prefetchw(&bvec->bv_page->flags);
@@ -2418,10 +2414,7 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2418 if (end_extent_writepage(page, err, start, end)) 2414 if (end_extent_writepage(page, err, start, end))
2419 continue; 2415 continue;
2420 2416
2421 if (whole_page) 2417 end_page_writeback(page);
2422 end_page_writeback(page);
2423 else
2424 check_page_writeback(tree, page);
2425 } while (bvec >= bio->bi_io_vec); 2418 } while (bvec >= bio->bi_io_vec);
2426 2419
2427 bio_put(bio); 2420 bio_put(bio);
@@ -2446,7 +2439,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2446 struct extent_io_tree *tree; 2439 struct extent_io_tree *tree;
2447 u64 start; 2440 u64 start;
2448 u64 end; 2441 u64 end;
2449 int whole_page;
2450 int mirror; 2442 int mirror;
2451 int ret; 2443 int ret;
2452 2444
@@ -2457,19 +2449,26 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2457 struct page *page = bvec->bv_page; 2449 struct page *page = bvec->bv_page;
2458 struct extent_state *cached = NULL; 2450 struct extent_state *cached = NULL;
2459 struct extent_state *state; 2451 struct extent_state *state;
2452 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2460 2453
2461 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " 2454 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2462 "mirror=%ld\n", (u64)bio->bi_sector, err, 2455 "mirror=%lu\n", (u64)bio->bi_sector, err,
2463 (long int)bio->bi_bdev); 2456 io_bio->mirror_num);
2464 tree = &BTRFS_I(page->mapping->host)->io_tree; 2457 tree = &BTRFS_I(page->mapping->host)->io_tree;
2465 2458
2466 start = page_offset(page) + bvec->bv_offset; 2459 /* We always issue full-page reads, but if some block
2467 end = start + bvec->bv_len - 1; 2460 * in a page fails to read, blk_update_request() will
2461 * advance bv_offset and adjust bv_len to compensate.
2462 * Print a warning for nonzero offsets, and an error
2463 * if they don't add up to a full page. */
2464 if (bvec->bv_offset || bvec->bv_len != PAGE_CACHE_SIZE)
2465 printk("%s page read in btrfs with offset %u and length %u\n",
2466 bvec->bv_offset + bvec->bv_len != PAGE_CACHE_SIZE
2467 ? KERN_ERR "partial" : KERN_INFO "incomplete",
2468 bvec->bv_offset, bvec->bv_len);
2468 2469
2469 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE) 2470 start = page_offset(page);
2470 whole_page = 1; 2471 end = start + bvec->bv_offset + bvec->bv_len - 1;
2471 else
2472 whole_page = 0;
2473 2472
2474 if (++bvec <= bvec_end) 2473 if (++bvec <= bvec_end)
2475 prefetchw(&bvec->bv_page->flags); 2474 prefetchw(&bvec->bv_page->flags);
@@ -2485,7 +2484,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2485 } 2484 }
2486 spin_unlock(&tree->lock); 2485 spin_unlock(&tree->lock);
2487 2486
2488 mirror = (int)(unsigned long)bio->bi_bdev; 2487 mirror = io_bio->mirror_num;
2489 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 2488 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2490 ret = tree->ops->readpage_end_io_hook(page, start, end, 2489 ret = tree->ops->readpage_end_io_hook(page, start, end,
2491 state, mirror); 2490 state, mirror);
@@ -2528,39 +2527,35 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2528 } 2527 }
2529 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); 2528 unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC);
2530 2529
2531 if (whole_page) { 2530 if (uptodate) {
2532 if (uptodate) { 2531 SetPageUptodate(page);
2533 SetPageUptodate(page);
2534 } else {
2535 ClearPageUptodate(page);
2536 SetPageError(page);
2537 }
2538 unlock_page(page);
2539 } else { 2532 } else {
2540 if (uptodate) { 2533 ClearPageUptodate(page);
2541 check_page_uptodate(tree, page); 2534 SetPageError(page);
2542 } else {
2543 ClearPageUptodate(page);
2544 SetPageError(page);
2545 }
2546 check_page_locked(tree, page);
2547 } 2535 }
2536 unlock_page(page);
2548 } while (bvec <= bvec_end); 2537 } while (bvec <= bvec_end);
2549 2538
2550 bio_put(bio); 2539 bio_put(bio);
2551} 2540}
2552 2541
2542/*
2543 * this allocates from the btrfs_bioset. We're returning a bio right now
2544 * but you can call btrfs_io_bio for the appropriate container_of magic
2545 */
2553struct bio * 2546struct bio *
2554btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 2547btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2555 gfp_t gfp_flags) 2548 gfp_t gfp_flags)
2556{ 2549{
2557 struct bio *bio; 2550 struct bio *bio;
2558 2551
2559 bio = bio_alloc(gfp_flags, nr_vecs); 2552 bio = bio_alloc_bioset(gfp_flags, nr_vecs, btrfs_bioset);
2560 2553
2561 if (bio == NULL && (current->flags & PF_MEMALLOC)) { 2554 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
2562 while (!bio && (nr_vecs /= 2)) 2555 while (!bio && (nr_vecs /= 2)) {
2563 bio = bio_alloc(gfp_flags, nr_vecs); 2556 bio = bio_alloc_bioset(gfp_flags,
2557 nr_vecs, btrfs_bioset);
2558 }
2564 } 2559 }
2565 2560
2566 if (bio) { 2561 if (bio) {
@@ -2571,6 +2566,19 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2571 return bio; 2566 return bio;
2572} 2567}
2573 2568
2569struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask)
2570{
2571 return bio_clone_bioset(bio, gfp_mask, btrfs_bioset);
2572}
2573
2574
2575/* this also allocates from the btrfs_bioset */
2576struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
2577{
2578 return bio_alloc_bioset(gfp_mask, nr_iovecs, btrfs_bioset);
2579}
2580
2581
2574static int __must_check submit_one_bio(int rw, struct bio *bio, 2582static int __must_check submit_one_bio(int rw, struct bio *bio,
2575 int mirror_num, unsigned long bio_flags) 2583 int mirror_num, unsigned long bio_flags)
2576{ 2584{
@@ -3988,7 +3996,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
3988 last_for_get_extent = isize; 3996 last_for_get_extent = isize;
3989 } 3997 }
3990 3998
3991 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, 3999 lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len - 1, 0,
3992 &cached_state); 4000 &cached_state);
3993 4001
3994 em = get_extent_skip_holes(inode, start, last_for_get_extent, 4002 em = get_extent_skip_holes(inode, start, last_for_get_extent,
@@ -4075,7 +4083,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
4075out_free: 4083out_free:
4076 free_extent_map(em); 4084 free_extent_map(em);
4077out: 4085out:
4078 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len, 4086 unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, start + len - 1,
4079 &cached_state, GFP_NOFS); 4087 &cached_state, GFP_NOFS);
4080 return ret; 4088 return ret;
4081} 4089}
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index a2c03a175009..41fb81e7ec53 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -336,6 +336,8 @@ int extent_clear_unlock_delalloc(struct inode *inode,
336struct bio * 336struct bio *
337btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, 337btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
338 gfp_t gfp_flags); 338 gfp_t gfp_flags);
339struct bio *btrfs_io_bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs);
340struct bio *btrfs_bio_clone(struct bio *bio, gfp_t gfp_mask);
339 341
340struct btrfs_fs_info; 342struct btrfs_fs_info;
341 343
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index ecca6c7375a6..e53009657f0e 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -197,30 +197,32 @@ int create_free_space_inode(struct btrfs_root *root,
197 block_group->key.objectid); 197 block_group->key.objectid);
198} 198}
199 199
200int btrfs_truncate_free_space_cache(struct btrfs_root *root, 200int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
201 struct btrfs_trans_handle *trans, 201 struct btrfs_block_rsv *rsv)
202 struct btrfs_path *path,
203 struct inode *inode)
204{ 202{
205 struct btrfs_block_rsv *rsv;
206 u64 needed_bytes; 203 u64 needed_bytes;
207 loff_t oldsize; 204 int ret;
208 int ret = 0;
209
210 rsv = trans->block_rsv;
211 trans->block_rsv = &root->fs_info->global_block_rsv;
212 205
213 /* 1 for slack space, 1 for updating the inode */ 206 /* 1 for slack space, 1 for updating the inode */
214 needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) + 207 needed_bytes = btrfs_calc_trunc_metadata_size(root, 1) +
215 btrfs_calc_trans_metadata_size(root, 1); 208 btrfs_calc_trans_metadata_size(root, 1);
216 209
217 spin_lock(&trans->block_rsv->lock); 210 spin_lock(&rsv->lock);
218 if (trans->block_rsv->reserved < needed_bytes) { 211 if (rsv->reserved < needed_bytes)
219 spin_unlock(&trans->block_rsv->lock); 212 ret = -ENOSPC;
220 trans->block_rsv = rsv; 213 else
221 return -ENOSPC; 214 ret = 0;
222 } 215 spin_unlock(&rsv->lock);
223 spin_unlock(&trans->block_rsv->lock); 216 return 0;
217}
218
219int btrfs_truncate_free_space_cache(struct btrfs_root *root,
220 struct btrfs_trans_handle *trans,
221 struct btrfs_path *path,
222 struct inode *inode)
223{
224 loff_t oldsize;
225 int ret = 0;
224 226
225 oldsize = i_size_read(inode); 227 oldsize = i_size_read(inode);
226 btrfs_i_size_write(inode, 0); 228 btrfs_i_size_write(inode, 0);
@@ -232,9 +234,7 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
232 */ 234 */
233 ret = btrfs_truncate_inode_items(trans, root, inode, 235 ret = btrfs_truncate_inode_items(trans, root, inode,
234 0, BTRFS_EXTENT_DATA_KEY); 236 0, BTRFS_EXTENT_DATA_KEY);
235
236 if (ret) { 237 if (ret) {
237 trans->block_rsv = rsv;
238 btrfs_abort_transaction(trans, root, ret); 238 btrfs_abort_transaction(trans, root, ret);
239 return ret; 239 return ret;
240 } 240 }
@@ -242,7 +242,6 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root,
242 ret = btrfs_update_inode(trans, root, inode); 242 ret = btrfs_update_inode(trans, root, inode);
243 if (ret) 243 if (ret)
244 btrfs_abort_transaction(trans, root, ret); 244 btrfs_abort_transaction(trans, root, ret);
245 trans->block_rsv = rsv;
246 245
247 return ret; 246 return ret;
248} 247}
@@ -920,10 +919,8 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
920 919
921 /* Make sure we can fit our crcs into the first page */ 920 /* Make sure we can fit our crcs into the first page */
922 if (io_ctl.check_crcs && 921 if (io_ctl.check_crcs &&
923 (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE) { 922 (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
924 WARN_ON(1);
925 goto out_nospc; 923 goto out_nospc;
926 }
927 924
928 io_ctl_set_generation(&io_ctl, trans->transid); 925 io_ctl_set_generation(&io_ctl, trans->transid);
929 926
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 4dc17d8809c7..8b7f19f44961 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -54,6 +54,8 @@ int create_free_space_inode(struct btrfs_root *root,
54 struct btrfs_block_group_cache *block_group, 54 struct btrfs_block_group_cache *block_group,
55 struct btrfs_path *path); 55 struct btrfs_path *path);
56 56
57int btrfs_check_trunc_cache_free_space(struct btrfs_root *root,
58 struct btrfs_block_rsv *rsv);
57int btrfs_truncate_free_space_cache(struct btrfs_root *root, 59int btrfs_truncate_free_space_cache(struct btrfs_root *root,
58 struct btrfs_trans_handle *trans, 60 struct btrfs_trans_handle *trans,
59 struct btrfs_path *path, 61 struct btrfs_path *path,
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index d26f67a59e36..2c66ddbbe670 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -429,11 +429,12 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
429 num_bytes = trans->bytes_reserved; 429 num_bytes = trans->bytes_reserved;
430 /* 430 /*
431 * 1 item for inode item insertion if need 431 * 1 item for inode item insertion if need
432 * 3 items for inode item update (in the worst case) 432 * 4 items for inode item update (in the worst case)
433 * 1 items for slack space if we need do truncation
433 * 1 item for free space object 434 * 1 item for free space object
434 * 3 items for pre-allocation 435 * 3 items for pre-allocation
435 */ 436 */
436 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 8); 437 trans->bytes_reserved = btrfs_calc_trans_metadata_size(root, 10);
437 ret = btrfs_block_rsv_add(root, trans->block_rsv, 438 ret = btrfs_block_rsv_add(root, trans->block_rsv,
438 trans->bytes_reserved, 439 trans->bytes_reserved,
439 BTRFS_RESERVE_NO_FLUSH); 440 BTRFS_RESERVE_NO_FLUSH);
@@ -468,7 +469,8 @@ again:
468 if (i_size_read(inode) > 0) { 469 if (i_size_read(inode) > 0) {
469 ret = btrfs_truncate_free_space_cache(root, trans, path, inode); 470 ret = btrfs_truncate_free_space_cache(root, trans, path, inode);
470 if (ret) { 471 if (ret) {
471 btrfs_abort_transaction(trans, root, ret); 472 if (ret != -ENOSPC)
473 btrfs_abort_transaction(trans, root, ret);
472 goto out_put; 474 goto out_put;
473 } 475 }
474 } 476 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 9b31b3b091fc..af978f7682b3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -715,8 +715,10 @@ retry:
715 async_extent->ram_size - 1, 0); 715 async_extent->ram_size - 1, 0);
716 716
717 em = alloc_extent_map(); 717 em = alloc_extent_map();
718 if (!em) 718 if (!em) {
719 ret = -ENOMEM;
719 goto out_free_reserve; 720 goto out_free_reserve;
721 }
720 em->start = async_extent->start; 722 em->start = async_extent->start;
721 em->len = async_extent->ram_size; 723 em->len = async_extent->ram_size;
722 em->orig_start = em->start; 724 em->orig_start = em->start;
@@ -923,8 +925,10 @@ static noinline int __cow_file_range(struct btrfs_trans_handle *trans,
923 } 925 }
924 926
925 em = alloc_extent_map(); 927 em = alloc_extent_map();
926 if (!em) 928 if (!em) {
929 ret = -ENOMEM;
927 goto out_reserve; 930 goto out_reserve;
931 }
928 em->start = start; 932 em->start = start;
929 em->orig_start = em->start; 933 em->orig_start = em->start;
930 ram_size = ins.offset; 934 ram_size = ins.offset;
@@ -4724,6 +4728,7 @@ void btrfs_evict_inode(struct inode *inode)
4724 btrfs_end_transaction(trans, root); 4728 btrfs_end_transaction(trans, root);
4725 btrfs_btree_balance_dirty(root); 4729 btrfs_btree_balance_dirty(root);
4726no_delete: 4730no_delete:
4731 btrfs_remove_delayed_node(inode);
4727 clear_inode(inode); 4732 clear_inode(inode);
4728 return; 4733 return;
4729} 4734}
@@ -4839,14 +4844,13 @@ static void inode_tree_add(struct inode *inode)
4839 struct rb_node **p; 4844 struct rb_node **p;
4840 struct rb_node *parent; 4845 struct rb_node *parent;
4841 u64 ino = btrfs_ino(inode); 4846 u64 ino = btrfs_ino(inode);
4842again:
4843 p = &root->inode_tree.rb_node;
4844 parent = NULL;
4845 4847
4846 if (inode_unhashed(inode)) 4848 if (inode_unhashed(inode))
4847 return; 4849 return;
4848 4850again:
4851 parent = NULL;
4849 spin_lock(&root->inode_lock); 4852 spin_lock(&root->inode_lock);
4853 p = &root->inode_tree.rb_node;
4850 while (*p) { 4854 while (*p) {
4851 parent = *p; 4855 parent = *p;
4852 entry = rb_entry(parent, struct btrfs_inode, rb_node); 4856 entry = rb_entry(parent, struct btrfs_inode, rb_node);
@@ -6928,7 +6932,11 @@ struct btrfs_dio_private {
6928 /* IO errors */ 6932 /* IO errors */
6929 int errors; 6933 int errors;
6930 6934
6935 /* orig_bio is our btrfs_io_bio */
6931 struct bio *orig_bio; 6936 struct bio *orig_bio;
6937
6938 /* dio_bio came from fs/direct-io.c */
6939 struct bio *dio_bio;
6932}; 6940};
6933 6941
6934static void btrfs_endio_direct_read(struct bio *bio, int err) 6942static void btrfs_endio_direct_read(struct bio *bio, int err)
@@ -6938,6 +6946,7 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
6938 struct bio_vec *bvec = bio->bi_io_vec; 6946 struct bio_vec *bvec = bio->bi_io_vec;
6939 struct inode *inode = dip->inode; 6947 struct inode *inode = dip->inode;
6940 struct btrfs_root *root = BTRFS_I(inode)->root; 6948 struct btrfs_root *root = BTRFS_I(inode)->root;
6949 struct bio *dio_bio;
6941 u64 start; 6950 u64 start;
6942 6951
6943 start = dip->logical_offset; 6952 start = dip->logical_offset;
@@ -6977,14 +6986,15 @@ failed:
6977 6986
6978 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 6987 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6979 dip->logical_offset + dip->bytes - 1); 6988 dip->logical_offset + dip->bytes - 1);
6980 bio->bi_private = dip->private; 6989 dio_bio = dip->dio_bio;
6981 6990
6982 kfree(dip); 6991 kfree(dip);
6983 6992
6984 /* If we had a csum failure make sure to clear the uptodate flag */ 6993 /* If we had a csum failure make sure to clear the uptodate flag */
6985 if (err) 6994 if (err)
6986 clear_bit(BIO_UPTODATE, &bio->bi_flags); 6995 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
6987 dio_end_io(bio, err); 6996 dio_end_io(dio_bio, err);
6997 bio_put(bio);
6988} 6998}
6989 6999
6990static void btrfs_endio_direct_write(struct bio *bio, int err) 7000static void btrfs_endio_direct_write(struct bio *bio, int err)
@@ -6995,6 +7005,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
6995 struct btrfs_ordered_extent *ordered = NULL; 7005 struct btrfs_ordered_extent *ordered = NULL;
6996 u64 ordered_offset = dip->logical_offset; 7006 u64 ordered_offset = dip->logical_offset;
6997 u64 ordered_bytes = dip->bytes; 7007 u64 ordered_bytes = dip->bytes;
7008 struct bio *dio_bio;
6998 int ret; 7009 int ret;
6999 7010
7000 if (err) 7011 if (err)
@@ -7022,14 +7033,15 @@ out_test:
7022 goto again; 7033 goto again;
7023 } 7034 }
7024out_done: 7035out_done:
7025 bio->bi_private = dip->private; 7036 dio_bio = dip->dio_bio;
7026 7037
7027 kfree(dip); 7038 kfree(dip);
7028 7039
7029 /* If we had an error make sure to clear the uptodate flag */ 7040 /* If we had an error make sure to clear the uptodate flag */
7030 if (err) 7041 if (err)
7031 clear_bit(BIO_UPTODATE, &bio->bi_flags); 7042 clear_bit(BIO_UPTODATE, &dio_bio->bi_flags);
7032 dio_end_io(bio, err); 7043 dio_end_io(dio_bio, err);
7044 bio_put(bio);
7033} 7045}
7034 7046
7035static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw, 7047static int __btrfs_submit_bio_start_direct_io(struct inode *inode, int rw,
@@ -7065,10 +7077,10 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
7065 if (!atomic_dec_and_test(&dip->pending_bios)) 7077 if (!atomic_dec_and_test(&dip->pending_bios))
7066 goto out; 7078 goto out;
7067 7079
7068 if (dip->errors) 7080 if (dip->errors) {
7069 bio_io_error(dip->orig_bio); 7081 bio_io_error(dip->orig_bio);
7070 else { 7082 } else {
7071 set_bit(BIO_UPTODATE, &dip->orig_bio->bi_flags); 7083 set_bit(BIO_UPTODATE, &dip->dio_bio->bi_flags);
7072 bio_endio(dip->orig_bio, 0); 7084 bio_endio(dip->orig_bio, 0);
7073 } 7085 }
7074out: 7086out:
@@ -7243,25 +7255,34 @@ out_err:
7243 return 0; 7255 return 0;
7244} 7256}
7245 7257
7246static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, 7258static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7247 loff_t file_offset) 7259 struct inode *inode, loff_t file_offset)
7248{ 7260{
7249 struct btrfs_root *root = BTRFS_I(inode)->root; 7261 struct btrfs_root *root = BTRFS_I(inode)->root;
7250 struct btrfs_dio_private *dip; 7262 struct btrfs_dio_private *dip;
7251 struct bio_vec *bvec = bio->bi_io_vec; 7263 struct bio_vec *bvec = dio_bio->bi_io_vec;
7264 struct bio *io_bio;
7252 int skip_sum; 7265 int skip_sum;
7253 int write = rw & REQ_WRITE; 7266 int write = rw & REQ_WRITE;
7254 int ret = 0; 7267 int ret = 0;
7255 7268
7256 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 7269 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
7257 7270
7271 io_bio = btrfs_bio_clone(dio_bio, GFP_NOFS);
7272
7273 if (!io_bio) {
7274 ret = -ENOMEM;
7275 goto free_ordered;
7276 }
7277
7258 dip = kmalloc(sizeof(*dip), GFP_NOFS); 7278 dip = kmalloc(sizeof(*dip), GFP_NOFS);
7259 if (!dip) { 7279 if (!dip) {
7260 ret = -ENOMEM; 7280 ret = -ENOMEM;
7261 goto free_ordered; 7281 goto free_io_bio;
7262 } 7282 }
7263 7283
7264 dip->private = bio->bi_private; 7284 dip->private = dio_bio->bi_private;
7285 io_bio->bi_private = dio_bio->bi_private;
7265 dip->inode = inode; 7286 dip->inode = inode;
7266 dip->logical_offset = file_offset; 7287 dip->logical_offset = file_offset;
7267 7288
@@ -7269,22 +7290,27 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode,
7269 do { 7290 do {
7270 dip->bytes += bvec->bv_len; 7291 dip->bytes += bvec->bv_len;
7271 bvec++; 7292 bvec++;
7272 } while (bvec <= (bio->bi_io_vec + bio->bi_vcnt - 1)); 7293 } while (bvec <= (dio_bio->bi_io_vec + dio_bio->bi_vcnt - 1));
7273 7294
7274 dip->disk_bytenr = (u64)bio->bi_sector << 9; 7295 dip->disk_bytenr = (u64)dio_bio->bi_sector << 9;
7275 bio->bi_private = dip; 7296 io_bio->bi_private = dip;
7276 dip->errors = 0; 7297 dip->errors = 0;
7277 dip->orig_bio = bio; 7298 dip->orig_bio = io_bio;
7299 dip->dio_bio = dio_bio;
7278 atomic_set(&dip->pending_bios, 0); 7300 atomic_set(&dip->pending_bios, 0);
7279 7301
7280 if (write) 7302 if (write)
7281 bio->bi_end_io = btrfs_endio_direct_write; 7303 io_bio->bi_end_io = btrfs_endio_direct_write;
7282 else 7304 else
7283 bio->bi_end_io = btrfs_endio_direct_read; 7305 io_bio->bi_end_io = btrfs_endio_direct_read;
7284 7306
7285 ret = btrfs_submit_direct_hook(rw, dip, skip_sum); 7307 ret = btrfs_submit_direct_hook(rw, dip, skip_sum);
7286 if (!ret) 7308 if (!ret)
7287 return; 7309 return;
7310
7311free_io_bio:
7312 bio_put(io_bio);
7313
7288free_ordered: 7314free_ordered:
7289 /* 7315 /*
7290 * If this is a write, we need to clean up the reserved space and kill 7316 * If this is a write, we need to clean up the reserved space and kill
@@ -7300,7 +7326,7 @@ free_ordered:
7300 btrfs_put_ordered_extent(ordered); 7326 btrfs_put_ordered_extent(ordered);
7301 btrfs_put_ordered_extent(ordered); 7327 btrfs_put_ordered_extent(ordered);
7302 } 7328 }
7303 bio_endio(bio, ret); 7329 bio_endio(dio_bio, ret);
7304} 7330}
7305 7331
7306static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb, 7332static ssize_t check_direct_IO(struct btrfs_root *root, int rw, struct kiocb *iocb,
@@ -7979,7 +8005,6 @@ void btrfs_destroy_inode(struct inode *inode)
7979 inode_tree_del(inode); 8005 inode_tree_del(inode);
7980 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); 8006 btrfs_drop_extent_cache(inode, 0, (u64)-1, 0);
7981free: 8007free:
7982 btrfs_remove_delayed_node(inode);
7983 call_rcu(&inode->i_rcu, btrfs_i_callback); 8008 call_rcu(&inode->i_rcu, btrfs_i_callback);
7984} 8009}
7985 8010
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0de4a2fcfb24..0f81d67cdc8d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1801,7 +1801,11 @@ static noinline int copy_to_sk(struct btrfs_root *root,
1801 item_off = btrfs_item_ptr_offset(leaf, i); 1801 item_off = btrfs_item_ptr_offset(leaf, i);
1802 item_len = btrfs_item_size_nr(leaf, i); 1802 item_len = btrfs_item_size_nr(leaf, i);
1803 1803
1804 if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) 1804 btrfs_item_key_to_cpu(leaf, key, i);
1805 if (!key_in_sk(key, sk))
1806 continue;
1807
1808 if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE)
1805 item_len = 0; 1809 item_len = 0;
1806 1810
1807 if (sizeof(sh) + item_len + *sk_offset > 1811 if (sizeof(sh) + item_len + *sk_offset >
@@ -1810,10 +1814,6 @@ static noinline int copy_to_sk(struct btrfs_root *root,
1810 goto overflow; 1814 goto overflow;
1811 } 1815 }
1812 1816
1813 btrfs_item_key_to_cpu(leaf, key, i);
1814 if (!key_in_sk(key, sk))
1815 continue;
1816
1817 sh.objectid = key->objectid; 1817 sh.objectid = key->objectid;
1818 sh.offset = key->offset; 1818 sh.offset = key->offset;
1819 sh.type = key->type; 1819 sh.type = key->type;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 0740621daf6c..0525e1389f5b 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1050,7 +1050,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1050 } 1050 }
1051 1051
1052 /* put a new bio on the list */ 1052 /* put a new bio on the list */
1053 bio = bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1); 1053 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
1054 if (!bio) 1054 if (!bio)
1055 return -ENOMEM; 1055 return -ENOMEM;
1056 1056
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 704a1b8d2a2b..395b82031a42 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -1773,7 +1773,7 @@ again:
1773 if (!eb || !extent_buffer_uptodate(eb)) { 1773 if (!eb || !extent_buffer_uptodate(eb)) {
1774 ret = (!eb) ? -ENOMEM : -EIO; 1774 ret = (!eb) ? -ENOMEM : -EIO;
1775 free_extent_buffer(eb); 1775 free_extent_buffer(eb);
1776 return ret; 1776 break;
1777 } 1777 }
1778 btrfs_tree_lock(eb); 1778 btrfs_tree_lock(eb);
1779 if (cow) { 1779 if (cow) {
@@ -3350,6 +3350,11 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info,
3350 } 3350 }
3351 3351
3352truncate: 3352truncate:
3353 ret = btrfs_check_trunc_cache_free_space(root,
3354 &fs_info->global_block_rsv);
3355 if (ret)
3356 goto out;
3357
3353 path = btrfs_alloc_path(); 3358 path = btrfs_alloc_path();
3354 if (!path) { 3359 if (!path) {
3355 ret = -ENOMEM; 3360 ret = -ENOMEM;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f489e24659a4..79bd479317cb 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1296,7 +1296,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1296 } 1296 }
1297 1297
1298 WARN_ON(!page->page); 1298 WARN_ON(!page->page);
1299 bio = bio_alloc(GFP_NOFS, 1); 1299 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1300 if (!bio) { 1300 if (!bio) {
1301 page->io_error = 1; 1301 page->io_error = 1;
1302 sblock->no_io_error_seen = 0; 1302 sblock->no_io_error_seen = 0;
@@ -1431,7 +1431,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1431 return -EIO; 1431 return -EIO;
1432 } 1432 }
1433 1433
1434 bio = bio_alloc(GFP_NOFS, 1); 1434 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1435 if (!bio) 1435 if (!bio)
1436 return -EIO; 1436 return -EIO;
1437 bio->bi_bdev = page_bad->dev->bdev; 1437 bio->bi_bdev = page_bad->dev->bdev;
@@ -1522,7 +1522,7 @@ again:
1522 sbio->dev = wr_ctx->tgtdev; 1522 sbio->dev = wr_ctx->tgtdev;
1523 bio = sbio->bio; 1523 bio = sbio->bio;
1524 if (!bio) { 1524 if (!bio) {
1525 bio = bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio); 1525 bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1526 if (!bio) { 1526 if (!bio) {
1527 mutex_unlock(&wr_ctx->wr_lock); 1527 mutex_unlock(&wr_ctx->wr_lock);
1528 return -ENOMEM; 1528 return -ENOMEM;
@@ -1930,7 +1930,7 @@ again:
1930 sbio->dev = spage->dev; 1930 sbio->dev = spage->dev;
1931 bio = sbio->bio; 1931 bio = sbio->bio;
1932 if (!bio) { 1932 if (!bio) {
1933 bio = bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio); 1933 bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
1934 if (!bio) 1934 if (!bio)
1935 return -ENOMEM; 1935 return -ENOMEM;
1936 sbio->bio = bio; 1936 sbio->bio = bio;
@@ -3307,7 +3307,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3307 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n"); 3307 "btrfs: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
3308 return -EIO; 3308 return -EIO;
3309 } 3309 }
3310 bio = bio_alloc(GFP_NOFS, 1); 3310 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
3311 if (!bio) { 3311 if (!bio) {
3312 spin_lock(&sctx->stat_lock); 3312 spin_lock(&sctx->stat_lock);
3313 sctx->stat.malloc_errors++; 3313 sctx->stat.malloc_errors++;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a4807ced23cc..f0857e092a3c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1263,6 +1263,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1263 1263
1264 btrfs_dev_replace_suspend_for_unmount(fs_info); 1264 btrfs_dev_replace_suspend_for_unmount(fs_info);
1265 btrfs_scrub_cancel(fs_info); 1265 btrfs_scrub_cancel(fs_info);
1266 btrfs_pause_balance(fs_info);
1266 1267
1267 ret = btrfs_commit_super(root); 1268 ret = btrfs_commit_super(root);
1268 if (ret) 1269 if (ret)
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 0e925ced971b..8bffb9174afb 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3120,14 +3120,13 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
3120 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE; 3120 allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
3121 if (num_devices == 1) 3121 if (num_devices == 1)
3122 allowed |= BTRFS_BLOCK_GROUP_DUP; 3122 allowed |= BTRFS_BLOCK_GROUP_DUP;
3123 else if (num_devices < 4) 3123 else if (num_devices > 1)
3124 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1); 3124 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
3125 else 3125 if (num_devices > 2)
3126 allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 | 3126 allowed |= BTRFS_BLOCK_GROUP_RAID5;
3127 BTRFS_BLOCK_GROUP_RAID10 | 3127 if (num_devices > 3)
3128 BTRFS_BLOCK_GROUP_RAID5 | 3128 allowed |= (BTRFS_BLOCK_GROUP_RAID10 |
3129 BTRFS_BLOCK_GROUP_RAID6); 3129 BTRFS_BLOCK_GROUP_RAID6);
3130
3131 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) && 3130 if ((bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
3132 (!alloc_profile_is_valid(bctl->data.target, 1) || 3131 (!alloc_profile_is_valid(bctl->data.target, 1) ||
3133 (bctl->data.target & ~allowed))) { 3132 (bctl->data.target & ~allowed))) {
@@ -5019,42 +5018,16 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5019 return 0; 5018 return 0;
5020} 5019}
5021 5020
5022static void *merge_stripe_index_into_bio_private(void *bi_private,
5023 unsigned int stripe_index)
5024{
5025 /*
5026 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
5027 * at most 1.
5028 * The alternative solution (instead of stealing bits from the
5029 * pointer) would be to allocate an intermediate structure
5030 * that contains the old private pointer plus the stripe_index.
5031 */
5032 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
5033 BUG_ON(stripe_index > 3);
5034 return (void *)(((uintptr_t)bi_private) | stripe_index);
5035}
5036
5037static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
5038{
5039 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
5040}
5041
5042static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
5043{
5044 return (unsigned int)((uintptr_t)bi_private) & 3;
5045}
5046
5047static void btrfs_end_bio(struct bio *bio, int err) 5021static void btrfs_end_bio(struct bio *bio, int err)
5048{ 5022{
5049 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private); 5023 struct btrfs_bio *bbio = bio->bi_private;
5050 int is_orig_bio = 0; 5024 int is_orig_bio = 0;
5051 5025
5052 if (err) { 5026 if (err) {
5053 atomic_inc(&bbio->error); 5027 atomic_inc(&bbio->error);
5054 if (err == -EIO || err == -EREMOTEIO) { 5028 if (err == -EIO || err == -EREMOTEIO) {
5055 unsigned int stripe_index = 5029 unsigned int stripe_index =
5056 extract_stripe_index_from_bio_private( 5030 btrfs_io_bio(bio)->stripe_index;
5057 bio->bi_private);
5058 struct btrfs_device *dev; 5031 struct btrfs_device *dev;
5059 5032
5060 BUG_ON(stripe_index >= bbio->num_stripes); 5033 BUG_ON(stripe_index >= bbio->num_stripes);
@@ -5084,8 +5057,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
5084 } 5057 }
5085 bio->bi_private = bbio->private; 5058 bio->bi_private = bbio->private;
5086 bio->bi_end_io = bbio->end_io; 5059 bio->bi_end_io = bbio->end_io;
5087 bio->bi_bdev = (struct block_device *) 5060 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5088 (unsigned long)bbio->mirror_num;
5089 /* only send an error to the higher layers if it is 5061 /* only send an error to the higher layers if it is
5090 * beyond the tolerance of the btrfs bio 5062 * beyond the tolerance of the btrfs bio
5091 */ 5063 */
@@ -5211,8 +5183,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5211 struct btrfs_device *dev = bbio->stripes[dev_nr].dev; 5183 struct btrfs_device *dev = bbio->stripes[dev_nr].dev;
5212 5184
5213 bio->bi_private = bbio; 5185 bio->bi_private = bbio;
5214 bio->bi_private = merge_stripe_index_into_bio_private( 5186 btrfs_io_bio(bio)->stripe_index = dev_nr;
5215 bio->bi_private, (unsigned int)dev_nr);
5216 bio->bi_end_io = btrfs_end_bio; 5187 bio->bi_end_io = btrfs_end_bio;
5217 bio->bi_sector = physical >> 9; 5188 bio->bi_sector = physical >> 9;
5218#ifdef DEBUG 5189#ifdef DEBUG
@@ -5273,8 +5244,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5273 if (atomic_dec_and_test(&bbio->stripes_pending)) { 5244 if (atomic_dec_and_test(&bbio->stripes_pending)) {
5274 bio->bi_private = bbio->private; 5245 bio->bi_private = bbio->private;
5275 bio->bi_end_io = bbio->end_io; 5246 bio->bi_end_io = bbio->end_io;
5276 bio->bi_bdev = (struct block_device *) 5247 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5277 (unsigned long)bbio->mirror_num;
5278 bio->bi_sector = logical >> 9; 5248 bio->bi_sector = logical >> 9;
5279 kfree(bbio); 5249 kfree(bbio);
5280 bio_endio(bio, -EIO); 5250 bio_endio(bio, -EIO);
@@ -5352,7 +5322,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5352 } 5322 }
5353 5323
5354 if (dev_nr < total_devs - 1) { 5324 if (dev_nr < total_devs - 1) {
5355 bio = bio_clone(first_bio, GFP_NOFS); 5325 bio = btrfs_bio_clone(first_bio, GFP_NOFS);
5356 BUG_ON(!bio); /* -ENOMEM */ 5326 BUG_ON(!bio); /* -ENOMEM */
5357 } else { 5327 } else {
5358 bio = first_bio; 5328 bio = first_bio;
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 845ccbb0d2e3..f6247e2a47f7 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -152,6 +152,26 @@ struct btrfs_fs_devices {
152 int rotating; 152 int rotating;
153}; 153};
154 154
155/*
156 * we need the mirror number and stripe index to be passed around
157 * the call chain while we are processing end_io (especially errors).
158 * Really, what we need is a btrfs_bio structure that has this info
159 * and is properly sized with its stripe array, but we're not there
160 * quite yet. We have our own btrfs bioset, and all of the bios
161 * we allocate are actually btrfs_io_bios. We'll cram as much of
162 * struct btrfs_bio as we can into this over time.
163 */
164struct btrfs_io_bio {
165 unsigned long mirror_num;
166 unsigned long stripe_index;
167 struct bio bio;
168};
169
170static inline struct btrfs_io_bio *btrfs_io_bio(struct bio *bio)
171{
172 return container_of(bio, struct btrfs_io_bio, bio);
173}
174
155struct btrfs_bio_stripe { 175struct btrfs_bio_stripe {
156 struct btrfs_device *dev; 176 struct btrfs_device *dev;
157 u64 physical; 177 u64 physical;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index fc3025199cb3..20efd81266c6 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -171,7 +171,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
171 171
172 if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL) 172 if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL)
173 inode->i_flags |= S_AUTOMOUNT; 173 inode->i_flags |= S_AUTOMOUNT;
174 cifs_set_ops(inode); 174 if (inode->i_state & I_NEW)
175 cifs_set_ops(inode);
175} 176}
176 177
177void 178void
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0aabb344b02e..5aae3d12d400 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -209,7 +209,6 @@ typedef struct ext4_io_end {
209 ssize_t size; /* size of the extent */ 209 ssize_t size; /* size of the extent */
210 struct kiocb *iocb; /* iocb struct for AIO */ 210 struct kiocb *iocb; /* iocb struct for AIO */
211 int result; /* error value for AIO */ 211 int result; /* error value for AIO */
212 atomic_t count; /* reference counter */
213} ext4_io_end_t; 212} ext4_io_end_t;
214 213
215struct ext4_io_submit { 214struct ext4_io_submit {
@@ -2651,14 +2650,11 @@ extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
2651 2650
2652/* page-io.c */ 2651/* page-io.c */
2653extern int __init ext4_init_pageio(void); 2652extern int __init ext4_init_pageio(void);
2653extern void ext4_add_complete_io(ext4_io_end_t *io_end);
2654extern void ext4_exit_pageio(void); 2654extern void ext4_exit_pageio(void);
2655extern void ext4_ioend_shutdown(struct inode *); 2655extern void ext4_ioend_shutdown(struct inode *);
2656extern void ext4_free_io_end(ext4_io_end_t *io);
2656extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags); 2657extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
2657extern ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end);
2658extern int ext4_put_io_end(ext4_io_end_t *io_end);
2659extern void ext4_put_io_end_defer(ext4_io_end_t *io_end);
2660extern void ext4_io_submit_init(struct ext4_io_submit *io,
2661 struct writeback_control *wbc);
2662extern void ext4_end_io_work(struct work_struct *work); 2658extern void ext4_end_io_work(struct work_struct *work);
2663extern void ext4_io_submit(struct ext4_io_submit *io); 2659extern void ext4_io_submit(struct ext4_io_submit *io);
2664extern int ext4_bio_write_page(struct ext4_io_submit *io, 2660extern int ext4_bio_write_page(struct ext4_io_submit *io,
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 107936db244e..bc0f1910b9cf 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3642,7 +3642,7 @@ int ext4_find_delalloc_range(struct inode *inode,
3642{ 3642{
3643 struct extent_status es; 3643 struct extent_status es;
3644 3644
3645 ext4_es_find_delayed_extent(inode, lblk_start, &es); 3645 ext4_es_find_delayed_extent_range(inode, lblk_start, lblk_end, &es);
3646 if (es.es_len == 0) 3646 if (es.es_len == 0)
3647 return 0; /* there is no delay extent in this tree */ 3647 return 0; /* there is no delay extent in this tree */
3648 else if (es.es_lblk <= lblk_start && 3648 else if (es.es_lblk <= lblk_start &&
@@ -4608,9 +4608,10 @@ static int ext4_find_delayed_extent(struct inode *inode,
4608 struct extent_status es; 4608 struct extent_status es;
4609 ext4_lblk_t block, next_del; 4609 ext4_lblk_t block, next_del;
4610 4610
4611 ext4_es_find_delayed_extent(inode, newes->es_lblk, &es);
4612
4613 if (newes->es_pblk == 0) { 4611 if (newes->es_pblk == 0) {
4612 ext4_es_find_delayed_extent_range(inode, newes->es_lblk,
4613 newes->es_lblk + newes->es_len - 1, &es);
4614
4614 /* 4615 /*
4615 * No extent in extent-tree contains block @newes->es_pblk, 4616 * No extent in extent-tree contains block @newes->es_pblk,
4616 * then the block may stay in 1)a hole or 2)delayed-extent. 4617 * then the block may stay in 1)a hole or 2)delayed-extent.
@@ -4630,7 +4631,7 @@ static int ext4_find_delayed_extent(struct inode *inode,
4630 } 4631 }
4631 4632
4632 block = newes->es_lblk + newes->es_len; 4633 block = newes->es_lblk + newes->es_len;
4633 ext4_es_find_delayed_extent(inode, block, &es); 4634 ext4_es_find_delayed_extent_range(inode, block, EXT_MAX_BLOCKS, &es);
4634 if (es.es_len == 0) 4635 if (es.es_len == 0)
4635 next_del = EXT_MAX_BLOCKS; 4636 next_del = EXT_MAX_BLOCKS;
4636 else 4637 else
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index fe3337a85ede..e6941e622d31 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -232,14 +232,16 @@ static struct extent_status *__es_tree_search(struct rb_root *root,
232} 232}
233 233
234/* 234/*
235 * ext4_es_find_delayed_extent: find the 1st delayed extent covering @es->lblk 235 * ext4_es_find_delayed_extent_range: find the 1st delayed extent covering
236 * if it exists, otherwise, the next extent after @es->lblk. 236 * @es->lblk if it exists, otherwise, the next extent after @es->lblk.
237 * 237 *
238 * @inode: the inode which owns delayed extents 238 * @inode: the inode which owns delayed extents
239 * @lblk: the offset where we start to search 239 * @lblk: the offset where we start to search
240 * @end: the offset where we stop to search
240 * @es: delayed extent that we found 241 * @es: delayed extent that we found
241 */ 242 */
242void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, 243void ext4_es_find_delayed_extent_range(struct inode *inode,
244 ext4_lblk_t lblk, ext4_lblk_t end,
243 struct extent_status *es) 245 struct extent_status *es)
244{ 246{
245 struct ext4_es_tree *tree = NULL; 247 struct ext4_es_tree *tree = NULL;
@@ -247,7 +249,8 @@ void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk,
247 struct rb_node *node; 249 struct rb_node *node;
248 250
249 BUG_ON(es == NULL); 251 BUG_ON(es == NULL);
250 trace_ext4_es_find_delayed_extent_enter(inode, lblk); 252 BUG_ON(end < lblk);
253 trace_ext4_es_find_delayed_extent_range_enter(inode, lblk);
251 254
252 read_lock(&EXT4_I(inode)->i_es_lock); 255 read_lock(&EXT4_I(inode)->i_es_lock);
253 tree = &EXT4_I(inode)->i_es_tree; 256 tree = &EXT4_I(inode)->i_es_tree;
@@ -270,6 +273,10 @@ out:
270 if (es1 && !ext4_es_is_delayed(es1)) { 273 if (es1 && !ext4_es_is_delayed(es1)) {
271 while ((node = rb_next(&es1->rb_node)) != NULL) { 274 while ((node = rb_next(&es1->rb_node)) != NULL) {
272 es1 = rb_entry(node, struct extent_status, rb_node); 275 es1 = rb_entry(node, struct extent_status, rb_node);
276 if (es1->es_lblk > end) {
277 es1 = NULL;
278 break;
279 }
273 if (ext4_es_is_delayed(es1)) 280 if (ext4_es_is_delayed(es1))
274 break; 281 break;
275 } 282 }
@@ -285,7 +292,7 @@ out:
285 read_unlock(&EXT4_I(inode)->i_es_lock); 292 read_unlock(&EXT4_I(inode)->i_es_lock);
286 293
287 ext4_es_lru_add(inode); 294 ext4_es_lru_add(inode);
288 trace_ext4_es_find_delayed_extent_exit(inode, es); 295 trace_ext4_es_find_delayed_extent_range_exit(inode, es);
289} 296}
290 297
291static struct extent_status * 298static struct extent_status *
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index d8e2d4dc311e..f740eb03b707 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -62,7 +62,8 @@ extern int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
62 unsigned long long status); 62 unsigned long long status);
63extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 63extern int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
64 ext4_lblk_t len); 64 ext4_lblk_t len);
65extern void ext4_es_find_delayed_extent(struct inode *inode, ext4_lblk_t lblk, 65extern void ext4_es_find_delayed_extent_range(struct inode *inode,
66 ext4_lblk_t lblk, ext4_lblk_t end,
66 struct extent_status *es); 67 struct extent_status *es);
67extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 68extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
68 struct extent_status *es); 69 struct extent_status *es);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 4959e29573b6..b1b4d51b5d86 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -465,7 +465,7 @@ static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
465 * If there is a delay extent at this offset, 465 * If there is a delay extent at this offset,
466 * it will be as a data. 466 * it will be as a data.
467 */ 467 */
468 ext4_es_find_delayed_extent(inode, last, &es); 468 ext4_es_find_delayed_extent_range(inode, last, last, &es);
469 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 469 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
470 if (last != start) 470 if (last != start)
471 dataoff = last << blkbits; 471 dataoff = last << blkbits;
@@ -548,7 +548,7 @@ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
548 * If there is a delay extent at this offset, 548 * If there is a delay extent at this offset,
549 * we will skip this extent. 549 * we will skip this extent.
550 */ 550 */
551 ext4_es_find_delayed_extent(inode, last, &es); 551 ext4_es_find_delayed_extent_range(inode, last, last, &es);
552 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { 552 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
553 last = es.es_lblk + es.es_len; 553 last = es.es_lblk + es.es_len;
554 holeoff = last << blkbits; 554 holeoff = last << blkbits;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0723774bdfb5..d6382b89ecbd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1488,10 +1488,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1488 struct ext4_io_submit io_submit; 1488 struct ext4_io_submit io_submit;
1489 1489
1490 BUG_ON(mpd->next_page <= mpd->first_page); 1490 BUG_ON(mpd->next_page <= mpd->first_page);
1491 ext4_io_submit_init(&io_submit, mpd->wbc); 1491 memset(&io_submit, 0, sizeof(io_submit));
1492 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
1493 if (!io_submit.io_end)
1494 return -ENOMEM;
1495 /* 1492 /*
1496 * We need to start from the first_page to the next_page - 1 1493 * We need to start from the first_page to the next_page - 1
1497 * to make sure we also write the mapped dirty buffer_heads. 1494 * to make sure we also write the mapped dirty buffer_heads.
@@ -1579,8 +1576,6 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
1579 pagevec_release(&pvec); 1576 pagevec_release(&pvec);
1580 } 1577 }
1581 ext4_io_submit(&io_submit); 1578 ext4_io_submit(&io_submit);
1582 /* Drop io_end reference we got from init */
1583 ext4_put_io_end_defer(io_submit.io_end);
1584 return ret; 1579 return ret;
1585} 1580}
1586 1581
@@ -2239,16 +2234,9 @@ static int ext4_writepage(struct page *page,
2239 */ 2234 */
2240 return __ext4_journalled_writepage(page, len); 2235 return __ext4_journalled_writepage(page, len);
2241 2236
2242 ext4_io_submit_init(&io_submit, wbc); 2237 memset(&io_submit, 0, sizeof(io_submit));
2243 io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
2244 if (!io_submit.io_end) {
2245 redirty_page_for_writepage(wbc, page);
2246 return -ENOMEM;
2247 }
2248 ret = ext4_bio_write_page(&io_submit, page, len, wbc); 2238 ret = ext4_bio_write_page(&io_submit, page, len, wbc);
2249 ext4_io_submit(&io_submit); 2239 ext4_io_submit(&io_submit);
2250 /* Drop io_end reference we got from init */
2251 ext4_put_io_end_defer(io_submit.io_end);
2252 return ret; 2240 return ret;
2253} 2241}
2254 2242
@@ -3079,13 +3067,9 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3079 struct inode *inode = file_inode(iocb->ki_filp); 3067 struct inode *inode = file_inode(iocb->ki_filp);
3080 ext4_io_end_t *io_end = iocb->private; 3068 ext4_io_end_t *io_end = iocb->private;
3081 3069
3082 /* if not async direct IO just return */ 3070 /* if not async direct IO or dio with 0 bytes write, just return */
3083 if (!io_end) { 3071 if (!io_end || !size)
3084 inode_dio_done(inode); 3072 goto out;
3085 if (is_async)
3086 aio_complete(iocb, ret, 0);
3087 return;
3088 }
3089 3073
3090 ext_debug("ext4_end_io_dio(): io_end 0x%p " 3074 ext_debug("ext4_end_io_dio(): io_end 0x%p "
3091 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n", 3075 "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
@@ -3093,13 +3077,25 @@ static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3093 size); 3077 size);
3094 3078
3095 iocb->private = NULL; 3079 iocb->private = NULL;
3080
3081 /* if not aio dio with unwritten extents, just free io and return */
3082 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
3083 ext4_free_io_end(io_end);
3084out:
3085 inode_dio_done(inode);
3086 if (is_async)
3087 aio_complete(iocb, ret, 0);
3088 return;
3089 }
3090
3096 io_end->offset = offset; 3091 io_end->offset = offset;
3097 io_end->size = size; 3092 io_end->size = size;
3098 if (is_async) { 3093 if (is_async) {
3099 io_end->iocb = iocb; 3094 io_end->iocb = iocb;
3100 io_end->result = ret; 3095 io_end->result = ret;
3101 } 3096 }
3102 ext4_put_io_end_defer(io_end); 3097
3098 ext4_add_complete_io(io_end);
3103} 3099}
3104 3100
3105/* 3101/*
@@ -3133,7 +3129,6 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3133 get_block_t *get_block_func = NULL; 3129 get_block_t *get_block_func = NULL;
3134 int dio_flags = 0; 3130 int dio_flags = 0;
3135 loff_t final_size = offset + count; 3131 loff_t final_size = offset + count;
3136 ext4_io_end_t *io_end = NULL;
3137 3132
3138 /* Use the old path for reads and writes beyond i_size. */ 3133 /* Use the old path for reads and writes beyond i_size. */
3139 if (rw != WRITE || final_size > inode->i_size) 3134 if (rw != WRITE || final_size > inode->i_size)
@@ -3172,16 +3167,13 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3172 iocb->private = NULL; 3167 iocb->private = NULL;
3173 ext4_inode_aio_set(inode, NULL); 3168 ext4_inode_aio_set(inode, NULL);
3174 if (!is_sync_kiocb(iocb)) { 3169 if (!is_sync_kiocb(iocb)) {
3175 io_end = ext4_init_io_end(inode, GFP_NOFS); 3170 ext4_io_end_t *io_end = ext4_init_io_end(inode, GFP_NOFS);
3176 if (!io_end) { 3171 if (!io_end) {
3177 ret = -ENOMEM; 3172 ret = -ENOMEM;
3178 goto retake_lock; 3173 goto retake_lock;
3179 } 3174 }
3180 io_end->flag |= EXT4_IO_END_DIRECT; 3175 io_end->flag |= EXT4_IO_END_DIRECT;
3181 /* 3176 iocb->private = io_end;
3182 * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3183 */
3184 iocb->private = ext4_get_io_end(io_end);
3185 /* 3177 /*
3186 * we save the io structure for current async direct 3178 * we save the io structure for current async direct
3187 * IO, so that later ext4_map_blocks() could flag the 3179 * IO, so that later ext4_map_blocks() could flag the
@@ -3205,27 +3197,26 @@ static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3205 NULL, 3197 NULL,
3206 dio_flags); 3198 dio_flags);
3207 3199
3200 if (iocb->private)
3201 ext4_inode_aio_set(inode, NULL);
3208 /* 3202 /*
3209 * Put our reference to io_end. This can free the io_end structure e.g. 3203 * The io_end structure takes a reference to the inode, that
3210 * in sync IO case or in case of error. It can even perform extent 3204 * structure needs to be destroyed and the reference to the
3211 * conversion if all bios we submitted finished before we got here. 3205 * inode need to be dropped, when IO is complete, even with 0
3212 * Note that in that case iocb->private can be already set to NULL 3206 * byte write, or failed.
3213 * here. 3207 *
3208 * In the successful AIO DIO case, the io_end structure will
3209 * be destroyed and the reference to the inode will be dropped
3210 * after the end_io call back function is called.
3211 *
3212 * In the case there is 0 byte write, or error case, since VFS
3213 * direct IO won't invoke the end_io call back function, we
3214 * need to free the end_io structure here.
3214 */ 3215 */
3215 if (io_end) { 3216 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
3216 ext4_inode_aio_set(inode, NULL); 3217 ext4_free_io_end(iocb->private);
3217 ext4_put_io_end(io_end); 3218 iocb->private = NULL;
3218 /* 3219 } else if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3219 * In case of error or no write ext4_end_io_dio() was not
3220 * called so we have to put iocb's reference.
3221 */
3222 if (ret <= 0 && ret != -EIOCBQUEUED) {
3223 WARN_ON(iocb->private != io_end);
3224 ext4_put_io_end(io_end);
3225 iocb->private = NULL;
3226 }
3227 }
3228 if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3229 EXT4_STATE_DIO_UNWRITTEN)) { 3220 EXT4_STATE_DIO_UNWRITTEN)) {
3230 int err; 3221 int err;
3231 /* 3222 /*
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index b1ed9e07434b..def84082a9a9 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2105,7 +2105,11 @@ repeat:
2105 group = ac->ac_g_ex.fe_group; 2105 group = ac->ac_g_ex.fe_group;
2106 2106
2107 for (i = 0; i < ngroups; group++, i++) { 2107 for (i = 0; i < ngroups; group++, i++) {
2108 if (group == ngroups) 2108 /*
2109 * Artificially restricted ngroups for non-extent
2110 * files makes group > ngroups possible on first loop.
2111 */
2112 if (group >= ngroups)
2109 group = 0; 2113 group = 0;
2110 2114
2111 /* This now checks without needing the buddy page */ 2115 /* This now checks without needing the buddy page */
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 19599bded62a..4acf1f78881b 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -62,28 +62,15 @@ void ext4_ioend_shutdown(struct inode *inode)
62 cancel_work_sync(&EXT4_I(inode)->i_unwritten_work); 62 cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
63} 63}
64 64
65static void ext4_release_io_end(ext4_io_end_t *io_end) 65void ext4_free_io_end(ext4_io_end_t *io)
66{ 66{
67 BUG_ON(!list_empty(&io_end->list)); 67 BUG_ON(!io);
68 BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN); 68 BUG_ON(!list_empty(&io->list));
69 69 BUG_ON(io->flag & EXT4_IO_END_UNWRITTEN);
70 if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
71 wake_up_all(ext4_ioend_wq(io_end->inode));
72 if (io_end->flag & EXT4_IO_END_DIRECT)
73 inode_dio_done(io_end->inode);
74 if (io_end->iocb)
75 aio_complete(io_end->iocb, io_end->result, 0);
76 kmem_cache_free(io_end_cachep, io_end);
77}
78
79static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
80{
81 struct inode *inode = io_end->inode;
82 70
83 io_end->flag &= ~EXT4_IO_END_UNWRITTEN; 71 if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count))
84 /* Wake up anyone waiting on unwritten extent conversion */ 72 wake_up_all(ext4_ioend_wq(io->inode));
85 if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten)) 73 kmem_cache_free(io_end_cachep, io);
86 wake_up_all(ext4_ioend_wq(inode));
87} 74}
88 75
89/* check a range of space and convert unwritten extents to written. */ 76/* check a range of space and convert unwritten extents to written. */
@@ -106,8 +93,13 @@ static int ext4_end_io(ext4_io_end_t *io)
106 "(inode %lu, offset %llu, size %zd, error %d)", 93 "(inode %lu, offset %llu, size %zd, error %d)",
107 inode->i_ino, offset, size, ret); 94 inode->i_ino, offset, size, ret);
108 } 95 }
109 ext4_clear_io_unwritten_flag(io); 96 /* Wake up anyone waiting on unwritten extent conversion */
110 ext4_release_io_end(io); 97 if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
98 wake_up_all(ext4_ioend_wq(inode));
99 if (io->flag & EXT4_IO_END_DIRECT)
100 inode_dio_done(inode);
101 if (io->iocb)
102 aio_complete(io->iocb, io->result, 0);
111 return ret; 103 return ret;
112} 104}
113 105
@@ -138,7 +130,7 @@ static void dump_completed_IO(struct inode *inode)
138} 130}
139 131
140/* Add the io_end to per-inode completed end_io list. */ 132/* Add the io_end to per-inode completed end_io list. */
141static void ext4_add_complete_io(ext4_io_end_t *io_end) 133void ext4_add_complete_io(ext4_io_end_t *io_end)
142{ 134{
143 struct ext4_inode_info *ei = EXT4_I(io_end->inode); 135 struct ext4_inode_info *ei = EXT4_I(io_end->inode);
144 struct workqueue_struct *wq; 136 struct workqueue_struct *wq;
@@ -175,6 +167,8 @@ static int ext4_do_flush_completed_IO(struct inode *inode)
175 err = ext4_end_io(io); 167 err = ext4_end_io(io);
176 if (unlikely(!ret && err)) 168 if (unlikely(!ret && err))
177 ret = err; 169 ret = err;
170 io->flag &= ~EXT4_IO_END_UNWRITTEN;
171 ext4_free_io_end(io);
178 } 172 }
179 return ret; 173 return ret;
180} 174}
@@ -206,43 +200,10 @@ ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
206 atomic_inc(&EXT4_I(inode)->i_ioend_count); 200 atomic_inc(&EXT4_I(inode)->i_ioend_count);
207 io->inode = inode; 201 io->inode = inode;
208 INIT_LIST_HEAD(&io->list); 202 INIT_LIST_HEAD(&io->list);
209 atomic_set(&io->count, 1);
210 } 203 }
211 return io; 204 return io;
212} 205}
213 206
214void ext4_put_io_end_defer(ext4_io_end_t *io_end)
215{
216 if (atomic_dec_and_test(&io_end->count)) {
217 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
218 ext4_release_io_end(io_end);
219 return;
220 }
221 ext4_add_complete_io(io_end);
222 }
223}
224
225int ext4_put_io_end(ext4_io_end_t *io_end)
226{
227 int err = 0;
228
229 if (atomic_dec_and_test(&io_end->count)) {
230 if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
231 err = ext4_convert_unwritten_extents(io_end->inode,
232 io_end->offset, io_end->size);
233 ext4_clear_io_unwritten_flag(io_end);
234 }
235 ext4_release_io_end(io_end);
236 }
237 return err;
238}
239
240ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
241{
242 atomic_inc(&io_end->count);
243 return io_end;
244}
245
246/* 207/*
247 * Print an buffer I/O error compatible with the fs/buffer.c. This 208 * Print an buffer I/O error compatible with the fs/buffer.c. This
248 * provides compatibility with dmesg scrapers that look for a specific 209 * provides compatibility with dmesg scrapers that look for a specific
@@ -325,7 +286,12 @@ static void ext4_end_bio(struct bio *bio, int error)
325 bi_sector >> (inode->i_blkbits - 9)); 286 bi_sector >> (inode->i_blkbits - 9));
326 } 287 }
327 288
328 ext4_put_io_end_defer(io_end); 289 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
290 ext4_free_io_end(io_end);
291 return;
292 }
293
294 ext4_add_complete_io(io_end);
329} 295}
330 296
331void ext4_io_submit(struct ext4_io_submit *io) 297void ext4_io_submit(struct ext4_io_submit *io)
@@ -339,37 +305,40 @@ void ext4_io_submit(struct ext4_io_submit *io)
339 bio_put(io->io_bio); 305 bio_put(io->io_bio);
340 } 306 }
341 io->io_bio = NULL; 307 io->io_bio = NULL;
342} 308 io->io_op = 0;
343
344void ext4_io_submit_init(struct ext4_io_submit *io,
345 struct writeback_control *wbc)
346{
347 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
348 io->io_bio = NULL;
349 io->io_end = NULL; 309 io->io_end = NULL;
350} 310}
351 311
352static int io_submit_init_bio(struct ext4_io_submit *io, 312static int io_submit_init(struct ext4_io_submit *io,
353 struct buffer_head *bh) 313 struct inode *inode,
314 struct writeback_control *wbc,
315 struct buffer_head *bh)
354{ 316{
317 ext4_io_end_t *io_end;
318 struct page *page = bh->b_page;
355 int nvecs = bio_get_nr_vecs(bh->b_bdev); 319 int nvecs = bio_get_nr_vecs(bh->b_bdev);
356 struct bio *bio; 320 struct bio *bio;
357 321
322 io_end = ext4_init_io_end(inode, GFP_NOFS);
323 if (!io_end)
324 return -ENOMEM;
358 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 325 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
359 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 326 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
360 bio->bi_bdev = bh->b_bdev; 327 bio->bi_bdev = bh->b_bdev;
328 bio->bi_private = io->io_end = io_end;
361 bio->bi_end_io = ext4_end_bio; 329 bio->bi_end_io = ext4_end_bio;
362 bio->bi_private = ext4_get_io_end(io->io_end); 330
363 if (!io->io_end->size) 331 io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
364 io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT) 332
365 + bh_offset(bh);
366 io->io_bio = bio; 333 io->io_bio = bio;
334 io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
367 io->io_next_block = bh->b_blocknr; 335 io->io_next_block = bh->b_blocknr;
368 return 0; 336 return 0;
369} 337}
370 338
371static int io_submit_add_bh(struct ext4_io_submit *io, 339static int io_submit_add_bh(struct ext4_io_submit *io,
372 struct inode *inode, 340 struct inode *inode,
341 struct writeback_control *wbc,
373 struct buffer_head *bh) 342 struct buffer_head *bh)
374{ 343{
375 ext4_io_end_t *io_end; 344 ext4_io_end_t *io_end;
@@ -380,18 +349,18 @@ submit_and_retry:
380 ext4_io_submit(io); 349 ext4_io_submit(io);
381 } 350 }
382 if (io->io_bio == NULL) { 351 if (io->io_bio == NULL) {
383 ret = io_submit_init_bio(io, bh); 352 ret = io_submit_init(io, inode, wbc, bh);
384 if (ret) 353 if (ret)
385 return ret; 354 return ret;
386 } 355 }
387 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
388 if (ret != bh->b_size)
389 goto submit_and_retry;
390 io_end = io->io_end; 356 io_end = io->io_end;
391 if (test_clear_buffer_uninit(bh)) 357 if (test_clear_buffer_uninit(bh))
392 ext4_set_io_unwritten_flag(inode, io_end); 358 ext4_set_io_unwritten_flag(inode, io_end);
393 io_end->size += bh->b_size; 359 io->io_end->size += bh->b_size;
394 io->io_next_block++; 360 io->io_next_block++;
361 ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
362 if (ret != bh->b_size)
363 goto submit_and_retry;
395 return 0; 364 return 0;
396} 365}
397 366
@@ -463,7 +432,7 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
463 do { 432 do {
464 if (!buffer_async_write(bh)) 433 if (!buffer_async_write(bh))
465 continue; 434 continue;
466 ret = io_submit_add_bh(io, inode, bh); 435 ret = io_submit_add_bh(io, inode, wbc, bh);
467 if (ret) { 436 if (ret) {
468 /* 437 /*
469 * We only get here on ENOMEM. Not much else 438 * We only get here on ENOMEM. Not much else
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index dfce656ddb33..5d4513cb1b3c 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -1229,6 +1229,19 @@ static int fat_read_root(struct inode *inode)
1229 return 0; 1229 return 0;
1230} 1230}
1231 1231
1232static unsigned long calc_fat_clusters(struct super_block *sb)
1233{
1234 struct msdos_sb_info *sbi = MSDOS_SB(sb);
1235
1236 /* Divide first to avoid overflow */
1237 if (sbi->fat_bits != 12) {
1238 unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits;
1239 return ent_per_sec * sbi->fat_length;
1240 }
1241
1242 return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits;
1243}
1244
1232/* 1245/*
1233 * Read the super block of an MS-DOS FS. 1246 * Read the super block of an MS-DOS FS.
1234 */ 1247 */
@@ -1434,7 +1447,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1434 sbi->dirty = b->fat16.state & FAT_STATE_DIRTY; 1447 sbi->dirty = b->fat16.state & FAT_STATE_DIRTY;
1435 1448
1436 /* check that FAT table does not overflow */ 1449 /* check that FAT table does not overflow */
1437 fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; 1450 fat_clusters = calc_fat_clusters(sb);
1438 total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); 1451 total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT);
1439 if (total_clusters > MAX_FAT(sb)) { 1452 if (total_clusters > MAX_FAT(sb)) {
1440 if (!silent) 1453 if (!silent)
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index eb08c9e43c2a..5a376ab81feb 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -26,7 +26,7 @@ config GFS2_FS
26config GFS2_FS_LOCKING_DLM 26config GFS2_FS_LOCKING_DLM
27 bool "GFS2 DLM locking" 27 bool "GFS2 DLM locking"
28 depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \ 28 depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && \
29 HOTPLUG && DLM && CONFIGFS_FS && SYSFS 29 HOTPLUG && CONFIGFS_FS && SYSFS && (DLM=y || DLM=GFS2_FS)
30 help 30 help
31 Multiple node locking module for GFS2 31 Multiple node locking module for GFS2
32 32
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index c5fa758fd844..68b4c8f1fce8 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -212,7 +212,7 @@ static void gfs2_end_log_write(struct bio *bio, int error)
212 fs_err(sdp, "Error %d writing to log\n", error); 212 fs_err(sdp, "Error %d writing to log\n", error);
213 } 213 }
214 214
215 bio_for_each_segment(bvec, bio, i) { 215 bio_for_each_segment_all(bvec, bio, i) {
216 page = bvec->bv_page; 216 page = bvec->bv_page;
217 if (page_has_buffers(page)) 217 if (page_has_buffers(page))
218 gfs2_end_log_write_bh(sdp, bvec, error); 218 gfs2_end_log_write_bh(sdp, bvec, error);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c7c840e916f8..c253b13722e8 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -121,7 +121,7 @@ static u64 qd2index(struct gfs2_quota_data *qd)
121{ 121{
122 struct kqid qid = qd->qd_id; 122 struct kqid qid = qd->qd_id;
123 return (2 * (u64)from_kqid(&init_user_ns, qid)) + 123 return (2 * (u64)from_kqid(&init_user_ns, qid)) +
124 (qid.type == USRQUOTA) ? 0 : 1; 124 ((qid.type == USRQUOTA) ? 0 : 1);
125} 125}
126 126
127static u64 qd2offset(struct gfs2_quota_data *qd) 127static u64 qd2offset(struct gfs2_quota_data *qd)
@@ -721,7 +721,7 @@ get_a_page:
721 goto unlock_out; 721 goto unlock_out;
722 } 722 }
723 723
724 gfs2_trans_add_meta(ip->i_gl, bh); 724 gfs2_trans_add_data(ip->i_gl, bh);
725 725
726 kaddr = kmap_atomic(page); 726 kaddr = kmap_atomic(page);
727 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) 727 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 0c5a575b513e..5232525934ae 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1401,9 +1401,14 @@ static void rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1401 u32 extlen; 1401 u32 extlen;
1402 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved; 1402 u32 free_blocks = rgd->rd_free_clone - rgd->rd_reserved;
1403 int ret; 1403 int ret;
1404 struct inode *inode = &ip->i_inode;
1404 1405
1405 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested); 1406 if (S_ISDIR(inode->i_mode))
1406 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks); 1407 extlen = 1;
1408 else {
1409 extlen = max_t(u32, atomic_read(&rs->rs_sizehint), requested);
1410 extlen = clamp(extlen, RGRP_RSRV_MINBLKS, free_blocks);
1411 }
1407 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen)) 1412 if ((rgd->rd_free_clone < rgd->rd_reserved) || (free_blocks < extlen))
1408 return; 1413 return;
1409 1414
diff --git a/fs/hfs/bnode.c b/fs/hfs/bnode.c
index f3b1a15ccd59..d3fa6bd9503e 100644
--- a/fs/hfs/bnode.c
+++ b/fs/hfs/bnode.c
@@ -415,7 +415,11 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
415 spin_lock(&tree->hash_lock); 415 spin_lock(&tree->hash_lock);
416 node = hfs_bnode_findhash(tree, num); 416 node = hfs_bnode_findhash(tree, num);
417 spin_unlock(&tree->hash_lock); 417 spin_unlock(&tree->hash_lock);
418 BUG_ON(node); 418 if (node) {
419 pr_crit("new node %u already hashed?\n", num);
420 WARN_ON(1);
421 return node;
422 }
419 node = __hfs_bnode_create(tree, num); 423 node = __hfs_bnode_create(tree, num);
420 if (!node) 424 if (!node)
421 return ERR_PTR(-ENOMEM); 425 return ERR_PTR(-ENOMEM);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index a13d26ede254..0bc27684ebfa 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -414,7 +414,7 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
414 414
415 spin_lock(&tbl->slot_tbl_lock); 415 spin_lock(&tbl->slot_tbl_lock);
416 /* state manager is resetting the session */ 416 /* state manager is resetting the session */
417 if (test_bit(NFS4_SESSION_DRAINING, &clp->cl_session->session_state)) { 417 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
418 spin_unlock(&tbl->slot_tbl_lock); 418 spin_unlock(&tbl->slot_tbl_lock);
419 status = htonl(NFS4ERR_DELAY); 419 status = htonl(NFS4ERR_DELAY);
420 /* Return NFS4ERR_BADSESSION if we're draining the session 420 /* Return NFS4ERR_BADSESSION if we're draining the session
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 59461c957d9d..a35582c9d444 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -763,7 +763,7 @@ static void nfs4_callback_free_slot(struct nfs4_session *session)
763 * A single slot, so highest used slotid is either 0 or -1 763 * A single slot, so highest used slotid is either 0 or -1
764 */ 764 */
765 tbl->highest_used_slotid = NFS4_NO_SLOT; 765 tbl->highest_used_slotid = NFS4_NO_SLOT;
766 nfs4_session_drain_complete(session, tbl); 766 nfs4_slot_tbl_drain_complete(tbl);
767 spin_unlock(&tbl->slot_tbl_lock); 767 spin_unlock(&tbl->slot_tbl_lock);
768} 768}
769 769
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 947b0c908aa9..4cbad5d6b276 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -203,7 +203,7 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp,
203 __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags); 203 __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
204 error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I); 204 error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I);
205 if (error == -EINVAL) 205 if (error == -EINVAL)
206 error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_NULL); 206 error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
207 if (error < 0) 207 if (error < 0)
208 goto error; 208 goto error;
209 209
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8fbc10054115..4e2fe714d5c2 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -572,7 +572,7 @@ int nfs41_setup_sequence(struct nfs4_session *session,
572 task->tk_timeout = 0; 572 task->tk_timeout = 0;
573 573
574 spin_lock(&tbl->slot_tbl_lock); 574 spin_lock(&tbl->slot_tbl_lock);
575 if (test_bit(NFS4_SESSION_DRAINING, &session->session_state) && 575 if (test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state) &&
576 !args->sa_privileged) { 576 !args->sa_privileged) {
577 /* The state manager will wait until the slot table is empty */ 577 /* The state manager will wait until the slot table is empty */
578 dprintk("%s session is draining\n", __func__); 578 dprintk("%s session is draining\n", __func__);
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index ebda5f4a031b..c4e225e4a9af 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -73,7 +73,7 @@ void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot)
73 tbl->highest_used_slotid = new_max; 73 tbl->highest_used_slotid = new_max;
74 else { 74 else {
75 tbl->highest_used_slotid = NFS4_NO_SLOT; 75 tbl->highest_used_slotid = NFS4_NO_SLOT;
76 nfs4_session_drain_complete(tbl->session, tbl); 76 nfs4_slot_tbl_drain_complete(tbl);
77 } 77 }
78 } 78 }
79 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__, 79 dprintk("%s: slotid %u highest_used_slotid %d\n", __func__,
@@ -226,7 +226,7 @@ static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
226 struct nfs4_slot *slot = pslot; 226 struct nfs4_slot *slot = pslot;
227 struct nfs4_slot_table *tbl = slot->table; 227 struct nfs4_slot_table *tbl = slot->table;
228 228
229 if (nfs4_session_draining(tbl->session) && !args->sa_privileged) 229 if (nfs4_slot_tbl_draining(tbl) && !args->sa_privileged)
230 return false; 230 return false;
231 slot->generation = tbl->generation; 231 slot->generation = tbl->generation;
232 args->sa_slot = slot; 232 args->sa_slot = slot;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 6f3cb39386d4..ff7d9f0f8a65 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -25,6 +25,10 @@ struct nfs4_slot {
25}; 25};
26 26
27/* Sessions */ 27/* Sessions */
28enum nfs4_slot_tbl_state {
29 NFS4_SLOT_TBL_DRAINING,
30};
31
28#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long)) 32#define SLOT_TABLE_SZ DIV_ROUND_UP(NFS4_MAX_SLOT_TABLE, 8*sizeof(long))
29struct nfs4_slot_table { 33struct nfs4_slot_table {
30 struct nfs4_session *session; /* Parent session */ 34 struct nfs4_session *session; /* Parent session */
@@ -43,6 +47,7 @@ struct nfs4_slot_table {
43 unsigned long generation; /* Generation counter for 47 unsigned long generation; /* Generation counter for
44 target_highest_slotid */ 48 target_highest_slotid */
45 struct completion complete; 49 struct completion complete;
50 unsigned long slot_tbl_state;
46}; 51};
47 52
48/* 53/*
@@ -68,7 +73,6 @@ struct nfs4_session {
68 73
69enum nfs4_session_state { 74enum nfs4_session_state {
70 NFS4_SESSION_INITING, 75 NFS4_SESSION_INITING,
71 NFS4_SESSION_DRAINING,
72}; 76};
73 77
74#if defined(CONFIG_NFS_V4_1) 78#if defined(CONFIG_NFS_V4_1)
@@ -88,12 +92,11 @@ extern void nfs4_destroy_session(struct nfs4_session *session);
88extern int nfs4_init_session(struct nfs_server *server); 92extern int nfs4_init_session(struct nfs_server *server);
89extern int nfs4_init_ds_session(struct nfs_client *, unsigned long); 93extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
90 94
91extern void nfs4_session_drain_complete(struct nfs4_session *session, 95extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
92 struct nfs4_slot_table *tbl);
93 96
94static inline bool nfs4_session_draining(struct nfs4_session *session) 97static inline bool nfs4_slot_tbl_draining(struct nfs4_slot_table *tbl)
95{ 98{
96 return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state); 99 return !!test_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
97} 100}
98 101
99bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl, 102bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 300d17d85c0e..1fab140764c4 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -241,7 +241,7 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
241 if (ses == NULL) 241 if (ses == NULL)
242 return; 242 return;
243 tbl = &ses->fc_slot_table; 243 tbl = &ses->fc_slot_table;
244 if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) { 244 if (test_and_clear_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state)) {
245 spin_lock(&tbl->slot_tbl_lock); 245 spin_lock(&tbl->slot_tbl_lock);
246 nfs41_wake_slot_table(tbl); 246 nfs41_wake_slot_table(tbl);
247 spin_unlock(&tbl->slot_tbl_lock); 247 spin_unlock(&tbl->slot_tbl_lock);
@@ -251,15 +251,15 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
251/* 251/*
252 * Signal state manager thread if session fore channel is drained 252 * Signal state manager thread if session fore channel is drained
253 */ 253 */
254void nfs4_session_drain_complete(struct nfs4_session *session, 254void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl)
255 struct nfs4_slot_table *tbl)
256{ 255{
257 if (nfs4_session_draining(session)) 256 if (nfs4_slot_tbl_draining(tbl))
258 complete(&tbl->complete); 257 complete(&tbl->complete);
259} 258}
260 259
261static int nfs4_wait_on_slot_tbl(struct nfs4_slot_table *tbl) 260static int nfs4_drain_slot_tbl(struct nfs4_slot_table *tbl)
262{ 261{
262 set_bit(NFS4_SLOT_TBL_DRAINING, &tbl->slot_tbl_state);
263 spin_lock(&tbl->slot_tbl_lock); 263 spin_lock(&tbl->slot_tbl_lock);
264 if (tbl->highest_used_slotid != NFS4_NO_SLOT) { 264 if (tbl->highest_used_slotid != NFS4_NO_SLOT) {
265 INIT_COMPLETION(tbl->complete); 265 INIT_COMPLETION(tbl->complete);
@@ -275,13 +275,12 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
275 struct nfs4_session *ses = clp->cl_session; 275 struct nfs4_session *ses = clp->cl_session;
276 int ret = 0; 276 int ret = 0;
277 277
278 set_bit(NFS4_SESSION_DRAINING, &ses->session_state);
279 /* back channel */ 278 /* back channel */
280 ret = nfs4_wait_on_slot_tbl(&ses->bc_slot_table); 279 ret = nfs4_drain_slot_tbl(&ses->bc_slot_table);
281 if (ret) 280 if (ret)
282 return ret; 281 return ret;
283 /* fore channel */ 282 /* fore channel */
284 return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); 283 return nfs4_drain_slot_tbl(&ses->fc_slot_table);
285} 284}
286 285
287static void nfs41_finish_session_reset(struct nfs_client *clp) 286static void nfs41_finish_session_reset(struct nfs_client *clp)
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 689fb608648e..bccfec8343c5 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -219,13 +219,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc)
219 219
220static int nilfs_set_page_dirty(struct page *page) 220static int nilfs_set_page_dirty(struct page *page)
221{ 221{
222 int ret = __set_page_dirty_buffers(page); 222 int ret = __set_page_dirty_nobuffers(page);
223 223
224 if (ret) { 224 if (page_has_buffers(page)) {
225 struct inode *inode = page->mapping->host; 225 struct inode *inode = page->mapping->host;
226 unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); 226 unsigned nr_dirty = 0;
227 struct buffer_head *bh, *head;
227 228
228 nilfs_set_file_dirty(inode, nr_dirty); 229 /*
230 * This page is locked by callers, and no other thread
231 * concurrently marks its buffers dirty since they are
232 * only dirtied through routines in fs/buffer.c in
233 * which call sites of mark_buffer_dirty are protected
234 * by page lock.
235 */
236 bh = head = page_buffers(page);
237 do {
238 /* Do not mark hole blocks dirty */
239 if (buffer_dirty(bh) || !buffer_mapped(bh))
240 continue;
241
242 set_buffer_dirty(bh);
243 nr_dirty++;
244 } while (bh = bh->b_this_page, bh != head);
245
246 if (nr_dirty)
247 nilfs_set_file_dirty(inode, nr_dirty);
229 } 248 }
230 return ret; 249 return ret;
231} 250}
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c
index 1c39efb71bab..2487116d0d33 100644
--- a/fs/ocfs2/extent_map.c
+++ b/fs/ocfs2/extent_map.c
@@ -790,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
790 &hole_size, &rec, &is_last); 790 &hole_size, &rec, &is_last);
791 if (ret) { 791 if (ret) {
792 mlog_errno(ret); 792 mlog_errno(ret);
793 goto out; 793 goto out_unlock;
794 } 794 }
795 795
796 if (rec.e_blkno == 0ULL) { 796 if (rec.e_blkno == 0ULL) {
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 8a7509f9e6f5..ff54014a24ec 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2288,7 +2288,7 @@ relock:
2288 ret = ocfs2_inode_lock(inode, NULL, 1); 2288 ret = ocfs2_inode_lock(inode, NULL, 1);
2289 if (ret < 0) { 2289 if (ret < 0) {
2290 mlog_errno(ret); 2290 mlog_errno(ret);
2291 goto out_sems; 2291 goto out;
2292 } 2292 }
2293 2293
2294 ocfs2_inode_unlock(inode, 1); 2294 ocfs2_inode_unlock(inode, 1);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 2b2691b73428..41a695048be7 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -725,6 +725,25 @@ xfs_convert_page(
725 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT, 725 (xfs_off_t)(page->index + 1) << PAGE_CACHE_SHIFT,
726 i_size_read(inode)); 726 i_size_read(inode));
727 727
728 /*
729 * If the current map does not span the entire page we are about to try
730 * to write, then give up. The only way we can write a page that spans
731 * multiple mappings in a single writeback iteration is via the
732 * xfs_vm_writepage() function. Data integrity writeback requires the
733 * entire page to be written in a single attempt, otherwise the part of
734 * the page we don't write here doesn't get written as part of the data
735 * integrity sync.
736 *
737 * For normal writeback, we also don't attempt to write partial pages
738 * here as it simply means that write_cache_pages() will see it under
739 * writeback and ignore the page until some point in the future, at
740 * which time this will be the only page in the file that needs
741 * writeback. Hence for more optimal IO patterns, we should always
742 * avoid partial page writeback due to multiple mappings on a page here.
743 */
744 if (!xfs_imap_valid(inode, imap, end_offset))
745 goto fail_unlock_page;
746
728 len = 1 << inode->i_blkbits; 747 len = 1 << inode->i_blkbits;
729 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1), 748 p_offset = min_t(unsigned long, end_offset & (PAGE_CACHE_SIZE - 1),
730 PAGE_CACHE_SIZE); 749 PAGE_CACHE_SIZE);
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 08d5457c948e..0bce1b348580 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -931,20 +931,22 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
931 */ 931 */
932int 932int
933xfs_attr_shortform_allfit( 933xfs_attr_shortform_allfit(
934 struct xfs_buf *bp, 934 struct xfs_buf *bp,
935 struct xfs_inode *dp) 935 struct xfs_inode *dp)
936{ 936{
937 xfs_attr_leafblock_t *leaf; 937 struct xfs_attr_leafblock *leaf;
938 xfs_attr_leaf_entry_t *entry; 938 struct xfs_attr_leaf_entry *entry;
939 xfs_attr_leaf_name_local_t *name_loc; 939 xfs_attr_leaf_name_local_t *name_loc;
940 int bytes, i; 940 struct xfs_attr3_icleaf_hdr leafhdr;
941 int bytes;
942 int i;
941 943
942 leaf = bp->b_addr; 944 leaf = bp->b_addr;
943 ASSERT(leaf->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC)); 945 xfs_attr3_leaf_hdr_from_disk(&leafhdr, leaf);
946 entry = xfs_attr3_leaf_entryp(leaf);
944 947
945 entry = &leaf->entries[0];
946 bytes = sizeof(struct xfs_attr_sf_hdr); 948 bytes = sizeof(struct xfs_attr_sf_hdr);
947 for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) { 949 for (i = 0; i < leafhdr.count; entry++, i++) {
948 if (entry->flags & XFS_ATTR_INCOMPLETE) 950 if (entry->flags & XFS_ATTR_INCOMPLETE)
949 continue; /* don't copy partial entries */ 951 continue; /* don't copy partial entries */
950 if (!(entry->flags & XFS_ATTR_LOCAL)) 952 if (!(entry->flags & XFS_ATTR_LOCAL))
@@ -954,15 +956,15 @@ xfs_attr_shortform_allfit(
954 return(0); 956 return(0);
955 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX) 957 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
956 return(0); 958 return(0);
957 bytes += sizeof(struct xfs_attr_sf_entry)-1 959 bytes += sizeof(struct xfs_attr_sf_entry) - 1
958 + name_loc->namelen 960 + name_loc->namelen
959 + be16_to_cpu(name_loc->valuelen); 961 + be16_to_cpu(name_loc->valuelen);
960 } 962 }
961 if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) && 963 if ((dp->i_mount->m_flags & XFS_MOUNT_ATTR2) &&
962 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) && 964 (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
963 (bytes == sizeof(struct xfs_attr_sf_hdr))) 965 (bytes == sizeof(struct xfs_attr_sf_hdr)))
964 return(-1); 966 return -1;
965 return(xfs_attr_shortform_bytesfit(dp, bytes)); 967 return xfs_attr_shortform_bytesfit(dp, bytes);
966} 968}
967 969
968/* 970/*
@@ -2330,9 +2332,10 @@ xfs_attr3_leaf_lookup_int(
2330 if (!xfs_attr_namesp_match(args->flags, entry->flags)) 2332 if (!xfs_attr_namesp_match(args->flags, entry->flags))
2331 continue; 2333 continue;
2332 args->index = probe; 2334 args->index = probe;
2335 args->valuelen = be32_to_cpu(name_rmt->valuelen);
2333 args->rmtblkno = be32_to_cpu(name_rmt->valueblk); 2336 args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2334 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount, 2337 args->rmtblkcnt = XFS_B_TO_FSB(args->dp->i_mount,
2335 be32_to_cpu(name_rmt->valuelen)); 2338 args->valuelen);
2336 return XFS_ERROR(EEXIST); 2339 return XFS_ERROR(EEXIST);
2337 } 2340 }
2338 } 2341 }
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 82b70bda9f47..0d2554299688 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1649,7 +1649,7 @@ xfs_alloc_buftarg(
1649{ 1649{
1650 xfs_buftarg_t *btp; 1650 xfs_buftarg_t *btp;
1651 1651
1652 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP); 1652 btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
1653 1653
1654 btp->bt_mount = mp; 1654 btp->bt_mount = mp;
1655 btp->bt_dev = bdev->bd_dev; 1655 btp->bt_dev = bdev->bd_dev;
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 9b26a99ebfe9..0b8b2a13cd24 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -270,6 +270,7 @@ xfs_da3_node_read_verify(
270 break; 270 break;
271 return; 271 return;
272 case XFS_ATTR_LEAF_MAGIC: 272 case XFS_ATTR_LEAF_MAGIC:
273 case XFS_ATTR3_LEAF_MAGIC:
273 bp->b_ops = &xfs_attr3_leaf_buf_ops; 274 bp->b_ops = &xfs_attr3_leaf_buf_ops;
274 bp->b_ops->verify_read(bp); 275 bp->b_ops->verify_read(bp);
275 return; 276 return;
@@ -2464,7 +2465,8 @@ xfs_buf_map_from_irec(
2464 ASSERT(nirecs >= 1); 2465 ASSERT(nirecs >= 1);
2465 2466
2466 if (nirecs > 1) { 2467 if (nirecs > 1) {
2467 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map), KM_SLEEP); 2468 map = kmem_zalloc(nirecs * sizeof(struct xfs_buf_map),
2469 KM_SLEEP | KM_NOFS);
2468 if (!map) 2470 if (!map)
2469 return ENOMEM; 2471 return ENOMEM;
2470 *mapp = map; 2472 *mapp = map;
@@ -2520,7 +2522,8 @@ xfs_dabuf_map(
2520 * Optimize the one-block case. 2522 * Optimize the one-block case.
2521 */ 2523 */
2522 if (nfsb != 1) 2524 if (nfsb != 1)
2523 irecs = kmem_zalloc(sizeof(irec) * nfsb, KM_SLEEP); 2525 irecs = kmem_zalloc(sizeof(irec) * nfsb,
2526 KM_SLEEP | KM_NOFS);
2524 2527
2525 nirecs = nfsb; 2528 nirecs = nfsb;
2526 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs, 2529 error = xfs_bmapi_read(dp, (xfs_fileoff_t)bno, nfsb, irecs,
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index 721ba2fe8e54..da71a1819d78 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -1336,7 +1336,7 @@ xfs_dir2_leaf_getdents(
1336 mp->m_sb.sb_blocksize); 1336 mp->m_sb.sb_blocksize);
1337 map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) + 1337 map_info = kmem_zalloc(offsetof(struct xfs_dir2_leaf_map_info, map) +
1338 (length * sizeof(struct xfs_bmbt_irec)), 1338 (length * sizeof(struct xfs_bmbt_irec)),
1339 KM_SLEEP); 1339 KM_SLEEP | KM_NOFS);
1340 map_info->map_size = length; 1340 map_info->map_size = length;
1341 1341
1342 /* 1342 /*
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index c0f375087efc..452920a3f03f 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -305,11 +305,12 @@ xfs_efi_release(xfs_efi_log_item_t *efip,
305{ 305{
306 ASSERT(atomic_read(&efip->efi_next_extent) >= nextents); 306 ASSERT(atomic_read(&efip->efi_next_extent) >= nextents);
307 if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) { 307 if (atomic_sub_and_test(nextents, &efip->efi_next_extent)) {
308 __xfs_efi_release(efip);
309
310 /* recovery needs us to drop the EFI reference, too */ 308 /* recovery needs us to drop the EFI reference, too */
311 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags)) 309 if (test_bit(XFS_EFI_RECOVERED, &efip->efi_flags))
312 __xfs_efi_release(efip); 310 __xfs_efi_release(efip);
311
312 __xfs_efi_release(efip);
313 /* efip may now have been freed, do not reference it again. */
313 } 314 }
314} 315}
315 316
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index e3d0b85d852b..d0833b54e55d 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -139,7 +139,7 @@ xlog_cil_prepare_log_vecs(
139 139
140 new_lv = kmem_zalloc(sizeof(*new_lv) + 140 new_lv = kmem_zalloc(sizeof(*new_lv) +
141 niovecs * sizeof(struct xfs_log_iovec), 141 niovecs * sizeof(struct xfs_log_iovec),
142 KM_SLEEP); 142 KM_SLEEP|KM_NOFS);
143 143
144 /* The allocated iovec region lies beyond the log vector. */ 144 /* The allocated iovec region lies beyond the log vector. */
145 new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1]; 145 new_lv->lv_iovecp = (struct xfs_log_iovec *)&new_lv[1];
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 1501f4fa51a6..0176bb21f09a 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -1453,7 +1453,7 @@ xfs_free_file_space(
1453 xfs_mount_t *mp; 1453 xfs_mount_t *mp;
1454 int nimap; 1454 int nimap;
1455 uint resblks; 1455 uint resblks;
1456 uint rounding; 1456 xfs_off_t rounding;
1457 int rt; 1457 int rt;
1458 xfs_fileoff_t startoffset_fsb; 1458 xfs_fileoff_t startoffset_fsb;
1459 xfs_trans_t *tp; 1459 xfs_trans_t *tp;
@@ -1482,7 +1482,7 @@ xfs_free_file_space(
1482 inode_dio_wait(VFS_I(ip)); 1482 inode_dio_wait(VFS_I(ip));
1483 } 1483 }
1484 1484
1485 rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1485 rounding = max_t(xfs_off_t, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
1486 ioffset = offset & ~(rounding - 1); 1486 ioffset = offset & ~(rounding - 1);
1487 error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping, 1487 error = -filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1488 ioffset, -1); 1488 ioffset, -1);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 98db31d9f9b4..636c59f2003a 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -377,7 +377,6 @@ acpi_status acpi_bus_get_status_handle(acpi_handle handle,
377 unsigned long long *sta); 377 unsigned long long *sta);
378int acpi_bus_get_status(struct acpi_device *device); 378int acpi_bus_get_status(struct acpi_device *device);
379 379
380#ifdef CONFIG_PM
381int acpi_bus_set_power(acpi_handle handle, int state); 380int acpi_bus_set_power(acpi_handle handle, int state);
382const char *acpi_power_state_string(int state); 381const char *acpi_power_state_string(int state);
383int acpi_device_get_power(struct acpi_device *device, int *state); 382int acpi_device_get_power(struct acpi_device *device, int *state);
@@ -385,41 +384,12 @@ int acpi_device_set_power(struct acpi_device *device, int state);
385int acpi_bus_init_power(struct acpi_device *device); 384int acpi_bus_init_power(struct acpi_device *device);
386int acpi_bus_update_power(acpi_handle handle, int *state_p); 385int acpi_bus_update_power(acpi_handle handle, int *state_p);
387bool acpi_bus_power_manageable(acpi_handle handle); 386bool acpi_bus_power_manageable(acpi_handle handle);
387
388#ifdef CONFIG_PM
388bool acpi_bus_can_wakeup(acpi_handle handle); 389bool acpi_bus_can_wakeup(acpi_handle handle);
389#else /* !CONFIG_PM */ 390#else
390static inline int acpi_bus_set_power(acpi_handle handle, int state) 391static inline bool acpi_bus_can_wakeup(acpi_handle handle) { return false; }
391{ 392#endif
392 return 0;
393}
394static inline const char *acpi_power_state_string(int state)
395{
396 return "D0";
397}
398static inline int acpi_device_get_power(struct acpi_device *device, int *state)
399{
400 return 0;
401}
402static inline int acpi_device_set_power(struct acpi_device *device, int state)
403{
404 return 0;
405}
406static inline int acpi_bus_init_power(struct acpi_device *device)
407{
408 return 0;
409}
410static inline int acpi_bus_update_power(acpi_handle handle, int *state_p)
411{
412 return 0;
413}
414static inline bool acpi_bus_power_manageable(acpi_handle handle)
415{
416 return false;
417}
418static inline bool acpi_bus_can_wakeup(acpi_handle handle)
419{
420 return false;
421}
422#endif /* !CONFIG_PM */
423 393
424#ifdef CONFIG_ACPI_PROC_EVENT 394#ifdef CONFIG_ACPI_PROC_EVENT
425int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data); 395int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 5b3d2bd4813a..64b8c7639520 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -77,7 +77,7 @@ struct acpi_signal_fatal_info {
77/* 77/*
78 * OSL Initialization and shutdown primitives 78 * OSL Initialization and shutdown primitives
79 */ 79 */
80acpi_status __initdata acpi_os_initialize(void); 80acpi_status __init acpi_os_initialize(void);
81 81
82acpi_status acpi_os_terminate(void); 82acpi_status acpi_os_terminate(void);
83 83
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index b327b5a9296d..ea69367fdd3b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -329,10 +329,16 @@ int acpi_processor_power_init(struct acpi_processor *pr);
329int acpi_processor_power_exit(struct acpi_processor *pr); 329int acpi_processor_power_exit(struct acpi_processor *pr);
330int acpi_processor_cst_has_changed(struct acpi_processor *pr); 330int acpi_processor_cst_has_changed(struct acpi_processor *pr);
331int acpi_processor_hotplug(struct acpi_processor *pr); 331int acpi_processor_hotplug(struct acpi_processor *pr);
332int acpi_processor_suspend(struct device *dev);
333int acpi_processor_resume(struct device *dev);
334extern struct cpuidle_driver acpi_idle_driver; 332extern struct cpuidle_driver acpi_idle_driver;
335 333
334#ifdef CONFIG_PM_SLEEP
335void acpi_processor_syscore_init(void);
336void acpi_processor_syscore_exit(void);
337#else
338static inline void acpi_processor_syscore_init(void) {}
339static inline void acpi_processor_syscore_exit(void) {}
340#endif
341
336/* in processor_thermal.c */ 342/* in processor_thermal.c */
337int acpi_processor_get_limit_info(struct acpi_processor *pr); 343int acpi_processor_get_limit_info(struct acpi_processor *pr);
338extern const struct thermal_cooling_device_ops processor_cooling_ops; 344extern const struct thermal_cooling_device_ops processor_cooling_ops;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 61196592152e..63d17ee9eb48 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -316,6 +316,7 @@ struct drm_ioctl_desc {
316 int flags; 316 int flags;
317 drm_ioctl_t *func; 317 drm_ioctl_t *func;
318 unsigned int cmd_drv; 318 unsigned int cmd_drv;
319 const char *name;
319}; 320};
320 321
321/** 322/**
@@ -324,7 +325,7 @@ struct drm_ioctl_desc {
324 */ 325 */
325 326
326#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ 327#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
327 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} 328 [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl, .name = #ioctl}
328 329
329struct drm_magic_entry { 330struct drm_magic_entry {
330 struct list_head head; 331 struct list_head head;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 8230b46fdd73..471f276ce8f7 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -50,13 +50,14 @@ struct drm_fb_helper_surface_size {
50 50
51/** 51/**
52 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library 52 * struct drm_fb_helper_funcs - driver callbacks for the fbdev emulation library
53 * @gamma_set: - Set the given gamma lut register on the given crtc. 53 * @gamma_set: Set the given gamma lut register on the given crtc.
54 * @gamma_get: - Read the given gamma lut register on the given crtc, used to 54 * @gamma_get: Read the given gamma lut register on the given crtc, used to
55 * save the current lut when force-restoring the fbdev for e.g. 55 * save the current lut when force-restoring the fbdev for e.g.
56 * kdbg. 56 * kdbg.
57 * @fb_probe: - Driver callback to allocate and initialize the fbdev info 57 * @fb_probe: Driver callback to allocate and initialize the fbdev info
58 * structure. Futhermore it also needs to allocate the drm 58 * structure. Futhermore it also needs to allocate the drm
59 * framebuffer used to back the fbdev. 59 * framebuffer used to back the fbdev.
60 * @initial_config: Setup an initial fbdev display configuration
60 * 61 *
61 * Driver callbacks used by the fbdev emulation helper library. 62 * Driver callbacks used by the fbdev emulation helper library.
62 */ 63 */
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
index 393369147a2d..675ddf4b441f 100644
--- a/include/drm/drm_os_linux.h
+++ b/include/drm/drm_os_linux.h
@@ -87,15 +87,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
87/** Other copying of data from kernel space */ 87/** Other copying of data from kernel space */
88#define DRM_COPY_TO_USER(arg1, arg2, arg3) \ 88#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
89 copy_to_user(arg1, arg2, arg3) 89 copy_to_user(arg1, arg2, arg3)
90/* Macros for copyfrom user, but checking readability only once */
91#define DRM_VERIFYAREA_READ( uaddr, size ) \
92 (access_ok( VERIFY_READ, uaddr, size ) ? 0 : -EFAULT)
93#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
94 __copy_from_user(arg1, arg2, arg3)
95#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
96 __copy_to_user(arg1, arg2, arg3)
97#define DRM_GET_USER_UNCHECKED(val, uaddr) \
98 __get_user(val, uaddr)
99 90
100#define DRM_HZ HZ 91#define DRM_HZ HZ
101 92
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index c2af598f701d..bb1bc485390b 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -152,6 +152,12 @@
152 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 152 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
153 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 153 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
154 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 154 {0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
158 {0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
159 {0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
160 {0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
155 {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 161 {0x1002, 0x6700, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
156 {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 162 {0x1002, 0x6701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
157 {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \ 163 {0x1002, 0x6702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/acpi_dma.h b/include/linux/acpi_dma.h
index d09deabc7bf6..fb0298082916 100644
--- a/include/linux/acpi_dma.h
+++ b/include/linux/acpi_dma.h
@@ -37,6 +37,8 @@ struct acpi_dma_spec {
37 * @dev: struct device of this controller 37 * @dev: struct device of this controller
38 * @acpi_dma_xlate: callback function to find a suitable channel 38 * @acpi_dma_xlate: callback function to find a suitable channel
39 * @data: private data used by a callback function 39 * @data: private data used by a callback function
40 * @base_request_line: first supported request line (CSRT)
41 * @end_request_line: last supported request line (CSRT)
40 */ 42 */
41struct acpi_dma { 43struct acpi_dma {
42 struct list_head dma_controllers; 44 struct list_head dma_controllers;
@@ -44,6 +46,8 @@ struct acpi_dma {
44 struct dma_chan *(*acpi_dma_xlate) 46 struct dma_chan *(*acpi_dma_xlate)
45 (struct acpi_dma_spec *, struct acpi_dma *); 47 (struct acpi_dma_spec *, struct acpi_dma *);
46 void *data; 48 void *data;
49 unsigned short base_request_line;
50 unsigned short end_request_line;
47}; 51};
48 52
49/* Used with acpi_dma_simple_xlate() */ 53/* Used with acpi_dma_simple_xlate() */
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h
index f14a98a79c9d..2e34db82a643 100644
--- a/include/linux/bcma/bcma.h
+++ b/include/linux/bcma/bcma.h
@@ -134,7 +134,10 @@ struct bcma_host_ops {
134#define BCMA_CORE_I2S 0x834 134#define BCMA_CORE_I2S 0x834
135#define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */ 135#define BCMA_CORE_SDR_DDR1_MEM_CTL 0x835 /* SDR/DDR1 memory controller core */
136#define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */ 136#define BCMA_CORE_SHIM 0x837 /* SHIM component in ubus/6362 */
137#define BCMA_CORE_ARM_CR4 0x83e 137#define BCMA_CORE_PHY_AC 0x83B
138#define BCMA_CORE_PCIE2 0x83C /* PCI Express Gen2 */
139#define BCMA_CORE_USB30_DEV 0x83D
140#define BCMA_CORE_ARM_CR4 0x83E
138#define BCMA_CORE_DEFAULT 0xFFF 141#define BCMA_CORE_DEFAULT 0xFFF
139 142
140#define BCMA_MAX_NR_CORES 16 143#define BCMA_MAX_NR_CORES 16
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index b840a4960282..677b4f01b2d0 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -1,3 +1,6 @@
1#ifndef _LINUX_BRCMPHY_H
2#define _LINUX_BRCMPHY_H
3
1#define PHY_ID_BCM50610 0x0143bd60 4#define PHY_ID_BCM50610 0x0143bd60
2#define PHY_ID_BCM50610M 0x0143bd70 5#define PHY_ID_BCM50610M 0x0143bd70
3#define PHY_ID_BCM5241 0x0143bc30 6#define PHY_ID_BCM5241 0x0143bc30
@@ -29,3 +32,5 @@
29#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000 32#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
30#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000 33#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
31#define PHY_BCM_FLAGS_VALID 0x80000000 34#define PHY_BCM_FLAGS_VALID 0x80000000
35
36#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 13a3da25ff07..98cd41bb39c8 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -30,15 +30,19 @@ struct journal_head {
30 30
31 /* 31 /*
32 * Journalling list for this buffer [jbd_lock_bh_state()] 32 * Journalling list for this buffer [jbd_lock_bh_state()]
33 * NOTE: We *cannot* combine this with b_modified into a bitfield
34 * as gcc would then (which the C standard allows but which is
35 * very unuseful) make 64-bit accesses to the bitfield and clobber
36 * b_jcount if its update races with bitfield modification.
33 */ 37 */
34 unsigned b_jlist:4; 38 unsigned b_jlist;
35 39
36 /* 40 /*
37 * This flag signals the buffer has been modified by 41 * This flag signals the buffer has been modified by
38 * the currently running transaction 42 * the currently running transaction
39 * [jbd_lock_bh_state()] 43 * [jbd_lock_bh_state()]
40 */ 44 */
41 unsigned b_modified:1; 45 unsigned b_modified;
42 46
43 /* 47 /*
44 * Copy of the buffer data frozen for writing to the log. 48 * Copy of the buffer data frozen for writing to the log.
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index e96329ceb28c..e9ef6d6b51d5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -562,6 +562,9 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...);
562extern __printf(2, 3) 562extern __printf(2, 3)
563int __trace_printk(unsigned long ip, const char *fmt, ...); 563int __trace_printk(unsigned long ip, const char *fmt, ...);
564 564
565extern int __trace_bputs(unsigned long ip, const char *str);
566extern int __trace_puts(unsigned long ip, const char *str, int size);
567
565/** 568/**
566 * trace_puts - write a string into the ftrace buffer 569 * trace_puts - write a string into the ftrace buffer
567 * @str: the string to record 570 * @str: the string to record
@@ -587,8 +590,6 @@ int __trace_printk(unsigned long ip, const char *fmt, ...);
587 * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used) 590 * (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
588 */ 591 */
589 592
590extern int __trace_bputs(unsigned long ip, const char *str);
591extern int __trace_puts(unsigned long ip, const char *str, int size);
592#define trace_puts(str) ({ \ 593#define trace_puts(str) ({ \
593 static const char *trace_printk_fmt \ 594 static const char *trace_printk_fmt \
594 __attribute__((section("__trace_printk_fmt"))) = \ 595 __attribute__((section("__trace_printk_fmt"))) = \
diff --git a/include/linux/kref.h b/include/linux/kref.h
index e15828fd71f1..484604d184be 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -19,6 +19,7 @@
19#include <linux/atomic.h> 19#include <linux/atomic.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/spinlock.h>
22 23
23struct kref { 24struct kref {
24 atomic_t refcount; 25 atomic_t refcount;
@@ -98,6 +99,38 @@ static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref)
98 return kref_sub(kref, 1, release); 99 return kref_sub(kref, 1, release);
99} 100}
100 101
102/**
103 * kref_put_spinlock_irqsave - decrement refcount for object.
104 * @kref: object.
105 * @release: pointer to the function that will clean up the object when the
106 * last reference to the object is released.
107 * This pointer is required, and it is not acceptable to pass kfree
108 * in as this function.
109 * @lock: lock to take in release case
110 *
111 * Behaves identical to kref_put with one exception. If the reference count
112 * drops to zero, the lock will be taken atomically wrt dropping the reference
113 * count. The release function has to call spin_unlock() without _irqrestore.
114 */
115static inline int kref_put_spinlock_irqsave(struct kref *kref,
116 void (*release)(struct kref *kref),
117 spinlock_t *lock)
118{
119 unsigned long flags;
120
121 WARN_ON(release == NULL);
122 if (atomic_add_unless(&kref->refcount, -1, 1))
123 return 0;
124 spin_lock_irqsave(lock, flags);
125 if (atomic_dec_and_test(&kref->refcount)) {
126 release(kref);
127 local_irq_restore(flags);
128 return 1;
129 }
130 spin_unlock_irqrestore(lock, flags);
131 return 0;
132}
133
101static inline int kref_put_mutex(struct kref *kref, 134static inline int kref_put_mutex(struct kref *kref,
102 void (*release)(struct kref *kref), 135 void (*release)(struct kref *kref),
103 struct mutex *lock) 136 struct mutex *lock)
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index fb1bf7d6a410..0390d5943ed6 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -373,13 +373,11 @@ struct ab8500_sysctrl_platform_data;
373/** 373/**
374 * struct ab8500_platform_data - AB8500 platform data 374 * struct ab8500_platform_data - AB8500 platform data
375 * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used 375 * @irq_base: start of AB8500 IRQs, AB8500_NR_IRQS will be used
376 * @pm_power_off: Should machine pm power off hook be registered or not
377 * @init: board-specific initialization after detection of ab8500 376 * @init: board-specific initialization after detection of ab8500
378 * @regulator: machine-specific constraints for regulators 377 * @regulator: machine-specific constraints for regulators
379 */ 378 */
380struct ab8500_platform_data { 379struct ab8500_platform_data {
381 int irq_base; 380 int irq_base;
382 bool pm_power_off;
383 void (*init) (struct ab8500 *); 381 void (*init) (struct ab8500 *);
384 struct ab8500_regulator_platform_data *regulator; 382 struct ab8500_regulator_platform_data *regulator;
385 struct abx500_gpio_platform_data *gpio; 383 struct abx500_gpio_platform_data *gpio;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 67f46ad6920a..352eec9df1b8 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -126,7 +126,7 @@ struct mlx4_rss_context {
126 126
127struct mlx4_qp_path { 127struct mlx4_qp_path {
128 u8 fl; 128 u8 fl;
129 u8 reserved1[1]; 129 u8 vlan_control;
130 u8 disable_pkey_check; 130 u8 disable_pkey_check;
131 u8 pkey_index; 131 u8 pkey_index;
132 u8 counter_index; 132 u8 counter_index;
@@ -141,11 +141,32 @@ struct mlx4_qp_path {
141 u8 sched_queue; 141 u8 sched_queue;
142 u8 vlan_index; 142 u8 vlan_index;
143 u8 feup; 143 u8 feup;
144 u8 reserved3; 144 u8 fvl_rx;
145 u8 reserved4[2]; 145 u8 reserved4[2];
146 u8 dmac[6]; 146 u8 dmac[6];
147}; 147};
148 148
149enum { /* fl */
150 MLX4_FL_CV = 1 << 6,
151 MLX4_FL_ETH_HIDE_CQE_VLAN = 1 << 2
152};
153enum { /* vlan_control */
154 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED = 1 << 6,
155 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED = 1 << 2,
156 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED = 1 << 1, /* 802.1p priority tag */
157 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED = 1 << 0
158};
159
160enum { /* feup */
161 MLX4_FEUP_FORCE_ETH_UP = 1 << 6, /* force Eth UP */
162 MLX4_FSM_FORCE_ETH_SRC_MAC = 1 << 5, /* force Source MAC */
163 MLX4_FVL_FORCE_ETH_VLAN = 1 << 3 /* force Eth vlan */
164};
165
166enum { /* fvl_rx */
167 MLX4_FVL_RX_FORCE_ETH_VLAN = 1 << 0 /* enforce Eth rx vlan */
168};
169
149struct mlx4_qp_context { 170struct mlx4_qp_context {
150 __be32 flags; 171 __be32 flags;
151 __be32 pd; 172 __be32 pd;
@@ -185,6 +206,10 @@ struct mlx4_qp_context {
185 u32 reserved5[10]; 206 u32 reserved5[10];
186}; 207};
187 208
209enum { /* param3 */
210 MLX4_STRIP_VLAN = 1 << 30
211};
212
188/* Which firmware version adds support for NEC (NoErrorCompletion) bit */ 213/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
189#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232) 214#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
190 215
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a94a5a0ab122..60584b185a0c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2733,6 +2733,17 @@ static inline netdev_features_t netdev_get_wanted_features(
2733} 2733}
2734netdev_features_t netdev_increment_features(netdev_features_t all, 2734netdev_features_t netdev_increment_features(netdev_features_t all,
2735 netdev_features_t one, netdev_features_t mask); 2735 netdev_features_t one, netdev_features_t mask);
2736
2737/* Allow TSO being used on stacked device :
2738 * Performing the GSO segmentation before last device
2739 * is a performance improvement.
2740 */
2741static inline netdev_features_t netdev_add_tso_features(netdev_features_t features,
2742 netdev_features_t mask)
2743{
2744 return netdev_increment_features(features, NETIF_F_ALL_TSO, mask);
2745}
2746
2736int __netdev_update_features(struct net_device *dev); 2747int __netdev_update_features(struct net_device *dev);
2737void netdev_update_features(struct net_device *dev); 2748void netdev_update_features(struct net_device *dev);
2738void netdev_change_features(struct net_device *dev); 2749void netdev_change_features(struct net_device *dev);
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 3863a4dbdf18..2a93b64a3869 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -11,9 +11,10 @@
11 * 11 *
12 */ 12 */
13 13
14#ifdef CONFIG_OF_DEVICE
15#include <linux/device.h> 14#include <linux/device.h>
16#include <linux/mod_devicetable.h> 15#include <linux/mod_devicetable.h>
16
17#ifdef CONFIG_OF_DEVICE
17#include <linux/pm.h> 18#include <linux/pm.h>
18#include <linux/of_device.h> 19#include <linux/of_device.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
@@ -100,7 +101,7 @@ extern int of_platform_populate(struct device_node *root,
100 101
101#if !defined(CONFIG_OF_ADDRESS) 102#if !defined(CONFIG_OF_ADDRESS)
102struct of_dev_auxdata; 103struct of_dev_auxdata;
103struct device; 104struct device_node;
104static inline int of_platform_populate(struct device_node *root, 105static inline int of_platform_populate(struct device_node *root,
105 const struct of_device_id *matches, 106 const struct of_device_id *matches,
106 const struct of_dev_auxdata *lookup, 107 const struct of_dev_auxdata *lookup,
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 81b31613eb25..170447977278 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -60,11 +60,13 @@ static inline void acpi_pci_slot_remove(struct pci_bus *bus) { }
60void acpiphp_init(void); 60void acpiphp_init(void);
61void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle); 61void acpiphp_enumerate_slots(struct pci_bus *bus, acpi_handle handle);
62void acpiphp_remove_slots(struct pci_bus *bus); 62void acpiphp_remove_slots(struct pci_bus *bus);
63void acpiphp_check_host_bridge(acpi_handle handle);
63#else 64#else
64static inline void acpiphp_init(void) { } 65static inline void acpiphp_init(void) { }
65static inline void acpiphp_enumerate_slots(struct pci_bus *bus, 66static inline void acpiphp_enumerate_slots(struct pci_bus *bus,
66 acpi_handle handle) { } 67 acpi_handle handle) { }
67static inline void acpiphp_remove_slots(struct pci_bus *bus) { } 68static inline void acpiphp_remove_slots(struct pci_bus *bus) { }
69static inline void acpiphp_check_host_bridge(acpi_handle handle) { }
68#endif 70#endif
69 71
70#else /* CONFIG_ACPI */ 72#else /* CONFIG_ACPI */
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 72474e18f1e0..6aa238096622 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -37,17 +37,17 @@
37 * if it is 0, pull-down is disabled. 37 * if it is 0, pull-down is disabled.
38 * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and 38 * @PIN_CONFIG_DRIVE_PUSH_PULL: the pin will be driven actively high and
39 * low, this is the most typical case and is typically achieved with two 39 * low, this is the most typical case and is typically achieved with two
40 * active transistors on the output. Sending this config will enabale 40 * active transistors on the output. Setting this config will enable
41 * push-pull mode, the argument is ignored. 41 * push-pull mode, the argument is ignored.
42 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open 42 * @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
43 * collector) which means it is usually wired with other output ports 43 * collector) which means it is usually wired with other output ports
44 * which are then pulled up with an external resistor. Sending this 44 * which are then pulled up with an external resistor. Setting this
45 * config will enabale open drain mode, the argument is ignored. 45 * config will enable open drain mode, the argument is ignored.
46 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source 46 * @PIN_CONFIG_DRIVE_OPEN_SOURCE: the pin will be driven with open source
47 * (open emitter). Sending this config will enabale open drain mode, the 47 * (open emitter). Setting this config will enable open drain mode, the
48 * argument is ignored. 48 * argument is ignored.
49 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will output the current passed as 49 * @PIN_CONFIG_DRIVE_STRENGTH: the pin will sink or source at most the current
50 * argument. The argument is in mA. 50 * passed as argument. The argument is in mA.
51 * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. 51 * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin.
52 * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, 52 * If the argument != 0, schmitt-trigger mode is enabled. If it's 0,
53 * schmitt-trigger mode is disabled. 53 * schmitt-trigger mode is disabled.
diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/clk-lpss.h
index 528e73ce46d2..23901992b9dd 100644
--- a/include/linux/platform_data/clk-lpss.h
+++ b/include/linux/platform_data/clk-lpss.h
@@ -13,6 +13,11 @@
13#ifndef __CLK_LPSS_H 13#ifndef __CLK_LPSS_H
14#define __CLK_LPSS_H 14#define __CLK_LPSS_H
15 15
16struct lpss_clk_data {
17 const char *name;
18 struct clk *clk;
19};
20
16extern int lpt_clk_init(void); 21extern int lpt_clk_init(void);
17 22
18#endif /* __CLK_LPSS_H */ 23#endif /* __CLK_LPSS_H */
diff --git a/include/linux/platform_data/serial-omap.h b/include/linux/platform_data/serial-omap.h
index ff9b0aab5281..c860c1b314c0 100644
--- a/include/linux/platform_data/serial-omap.h
+++ b/include/linux/platform_data/serial-omap.h
@@ -43,8 +43,6 @@ struct omap_uart_port_info {
43 int DTR_present; 43 int DTR_present;
44 44
45 int (*get_context_loss_count)(struct device *); 45 int (*get_context_loss_count)(struct device *);
46 void (*set_forceidle)(struct device *);
47 void (*set_noidle)(struct device *);
48 void (*enable_wakeup)(struct device *, bool); 46 void (*enable_wakeup)(struct device *, bool);
49}; 47};
50 48
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 6af944ab38f0..22c7052e9372 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -4,6 +4,7 @@
4#include <stdarg.h> 4#include <stdarg.h>
5#include <linux/init.h> 5#include <linux/init.h>
6#include <linux/kern_levels.h> 6#include <linux/kern_levels.h>
7#include <linux/linkage.h>
7 8
8extern const char linux_banner[]; 9extern const char linux_banner[];
9extern const char linux_proc_banner[]; 10extern const char linux_proc_banner[];
diff --git a/include/linux/rio.h b/include/linux/rio.h
index a3e784278667..18e099342e6f 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -83,7 +83,6 @@
83 83
84extern struct bus_type rio_bus_type; 84extern struct bus_type rio_bus_type;
85extern struct device rio_bus; 85extern struct device rio_bus;
86extern struct list_head rio_devices; /* list of all devices */
87 86
88struct rio_mport; 87struct rio_mport;
89struct rio_dev; 88struct rio_dev;
@@ -237,6 +236,7 @@ enum rio_phy_type {
237 * @name: Port name string 236 * @name: Port name string
238 * @priv: Master port private data 237 * @priv: Master port private data
239 * @dma: DMA device associated with mport 238 * @dma: DMA device associated with mport
239 * @nscan: RapidIO network enumeration/discovery operations
240 */ 240 */
241struct rio_mport { 241struct rio_mport {
242 struct list_head dbells; /* list of doorbell events */ 242 struct list_head dbells; /* list of doorbell events */
@@ -262,8 +262,14 @@ struct rio_mport {
262#ifdef CONFIG_RAPIDIO_DMA_ENGINE 262#ifdef CONFIG_RAPIDIO_DMA_ENGINE
263 struct dma_device dma; 263 struct dma_device dma;
264#endif 264#endif
265 struct rio_scan *nscan;
265}; 266};
266 267
268/*
269 * Enumeration/discovery control flags
270 */
271#define RIO_SCAN_ENUM_NO_WAIT 0x00000001 /* Do not wait for enum completed */
272
267struct rio_id_table { 273struct rio_id_table {
268 u16 start; /* logical minimal id */ 274 u16 start; /* logical minimal id */
269 u32 max; /* max number of IDs in table */ 275 u32 max; /* max number of IDs in table */
@@ -460,6 +466,16 @@ static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
460} 466}
461#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ 467#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
462 468
469/**
470 * struct rio_scan - RIO enumeration and discovery operations
471 * @enumerate: Callback to perform RapidIO fabric enumeration.
472 * @discover: Callback to perform RapidIO fabric discovery.
473 */
474struct rio_scan {
475 int (*enumerate)(struct rio_mport *mport, u32 flags);
476 int (*discover)(struct rio_mport *mport, u32 flags);
477};
478
463/* Architecture and hardware-specific functions */ 479/* Architecture and hardware-specific functions */
464extern int rio_register_mport(struct rio_mport *); 480extern int rio_register_mport(struct rio_mport *);
465extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); 481extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index b75c05920ab5..5059994fe297 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -433,5 +433,6 @@ extern u16 rio_local_get_device_id(struct rio_mport *port);
433extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from); 433extern struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from);
434extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did, 434extern struct rio_dev *rio_get_asm(u16 vid, u16 did, u16 asm_vid, u16 asm_did,
435 struct rio_dev *from); 435 struct rio_dev *from);
436extern int rio_init_mports(void);
436 437
437#endif /* LINUX_RIO_DRV_H */ 438#endif /* LINUX_RIO_DRV_H */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 428c37a1f95c..33bf2dfab19d 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -305,7 +305,6 @@ struct ucred {
305 305
306extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred); 306extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred);
307 307
308extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
309extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, 308extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
310 int offset, int len); 309 int offset, int len);
311extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 310extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
@@ -314,7 +313,6 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
314 unsigned int len, __wsum *csump); 313 unsigned int len, __wsum *csump);
315 314
316extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); 315extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
317extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len);
318extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, 316extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
319 int offset, int len); 317 int offset, int len);
320extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 318extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 733eb5ee31c5..6ff26c8db7b9 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -57,7 +57,7 @@ extern struct bus_type spi_bus_type;
57 * @modalias: Name of the driver to use with this device, or an alias 57 * @modalias: Name of the driver to use with this device, or an alias
58 * for that name. This appears in the sysfs "modalias" attribute 58 * for that name. This appears in the sysfs "modalias" attribute
59 * for driver coldplugging, and in uevents used for hotplugging 59 * for driver coldplugging, and in uevents used for hotplugging
60 * @cs_gpio: gpio number of the chipselect line (optional, -EINVAL when 60 * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
61 * when not using a GPIO line) 61 * when not using a GPIO line)
62 * 62 *
63 * A @spi_device is used to interchange data between an SPI slave 63 * A @spi_device is used to interchange data between an SPI slave
@@ -266,7 +266,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
266 * queue so the subsystem notifies the driver that it may relax the 266 * queue so the subsystem notifies the driver that it may relax the
267 * hardware by issuing this call 267 * hardware by issuing this call
268 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS 268 * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
269 * number. Any individual value may be -EINVAL for CS lines that 269 * number. Any individual value may be -ENOENT for CS lines that
270 * are not GPIOs (driven by the SPI controller itself). 270 * are not GPIOs (driven by the SPI controller itself).
271 * 271 *
272 * Each SPI master controller can communicate with one or more @spi_device 272 * Each SPI master controller can communicate with one or more @spi_device
diff --git a/include/linux/time.h b/include/linux/time.h
index 22d81b3c955b..d5d229b2e5af 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -117,14 +117,10 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
117 117
118extern bool persistent_clock_exist; 118extern bool persistent_clock_exist;
119 119
120#ifdef ALWAYS_USE_PERSISTENT_CLOCK
121#define has_persistent_clock() true
122#else
123static inline bool has_persistent_clock(void) 120static inline bool has_persistent_clock(void)
124{ 121{
125 return persistent_clock_exist; 122 return persistent_clock_exist;
126} 123}
127#endif
128 124
129extern void read_persistent_clock(struct timespec *ts); 125extern void read_persistent_clock(struct timespec *ts);
130extern void read_boot_clock(struct timespec *ts); 126extern void read_boot_clock(struct timespec *ts);
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 629aaf51f30b..c55ce243cc09 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -35,4 +35,7 @@ static inline size_t iov_length(const struct iovec *iov, unsigned long nr_segs)
35} 35}
36 36
37unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to); 37unsigned long iov_shorten(struct iovec *iov, unsigned long nr_segs, size_t to);
38
39int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
40int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
38#endif 41#endif
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index c454a88abf2e..f1b0dca60f12 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -563,9 +563,8 @@ static inline int gadget_is_dualspeed(struct usb_gadget *g)
563} 563}
564 564
565/** 565/**
566 * gadget_is_superspeed() - return true if the hardware handles 566 * gadget_is_superspeed() - return true if the hardware handles superspeed
567 * supperspeed 567 * @g: controller that might support superspeed
568 * @g: controller that might support supper speed
569 */ 568 */
570static inline int gadget_is_superspeed(struct usb_gadget *g) 569static inline int gadget_is_superspeed(struct usb_gadget *g)
571{ 570{
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index b9b0f7b4e43b..302ddf55d2da 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -268,6 +268,8 @@ struct usb_serial_driver {
268 struct usb_serial_port *port, struct ktermios *old); 268 struct usb_serial_port *port, struct ktermios *old);
269 void (*break_ctl)(struct tty_struct *tty, int break_state); 269 void (*break_ctl)(struct tty_struct *tty, int break_state);
270 int (*chars_in_buffer)(struct tty_struct *tty); 270 int (*chars_in_buffer)(struct tty_struct *tty);
271 void (*wait_until_sent)(struct tty_struct *tty, long timeout);
272 bool (*tx_empty)(struct usb_serial_port *port);
271 void (*throttle)(struct tty_struct *tty); 273 void (*throttle)(struct tty_struct *tty);
272 void (*unthrottle)(struct tty_struct *tty); 274 void (*unthrottle)(struct tty_struct *tty);
273 int (*tiocmget)(struct tty_struct *tty); 275 int (*tiocmget)(struct tty_struct *tty);
@@ -327,6 +329,8 @@ extern void usb_serial_generic_close(struct usb_serial_port *port);
327extern int usb_serial_generic_resume(struct usb_serial *serial); 329extern int usb_serial_generic_resume(struct usb_serial *serial);
328extern int usb_serial_generic_write_room(struct tty_struct *tty); 330extern int usb_serial_generic_write_room(struct tty_struct *tty);
329extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty); 331extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
332extern void usb_serial_generic_wait_until_sent(struct tty_struct *tty,
333 long timeout);
330extern void usb_serial_generic_read_bulk_callback(struct urb *urb); 334extern void usb_serial_generic_read_bulk_callback(struct urb *urb);
331extern void usb_serial_generic_write_bulk_callback(struct urb *urb); 335extern void usb_serial_generic_write_bulk_callback(struct urb *urb);
332extern void usb_serial_generic_throttle(struct tty_struct *tty); 336extern void usb_serial_generic_throttle(struct tty_struct *tty);
diff --git a/include/linux/vt_kern.h b/include/linux/vt_kern.h
index e8d65718560b..0d33fca48774 100644
--- a/include/linux/vt_kern.h
+++ b/include/linux/vt_kern.h
@@ -36,7 +36,7 @@ extern int fg_console, last_console, want_console;
36int vc_allocate(unsigned int console); 36int vc_allocate(unsigned int console);
37int vc_cons_allocated(unsigned int console); 37int vc_cons_allocated(unsigned int console);
38int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines); 38int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines);
39void vc_deallocate(unsigned int console); 39struct vc_data *vc_deallocate(unsigned int console);
40void reset_palette(struct vc_data *vc); 40void reset_palette(struct vc_data *vc);
41void do_blank_screen(int entering_gfx); 41void do_blank_screen(int entering_gfx);
42void do_unblank_screen(int leaving_gfx); 42void do_unblank_screen(int leaving_gfx);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index ac38be2692d8..1133695eb067 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -217,6 +217,8 @@ do { \
217 if (!ret) \ 217 if (!ret) \
218 break; \ 218 break; \
219 } \ 219 } \
220 if (!ret && (condition)) \
221 ret = 1; \
220 finish_wait(&wq, &__wait); \ 222 finish_wait(&wq, &__wait); \
221} while (0) 223} while (0)
222 224
@@ -233,8 +235,9 @@ do { \
233 * wake_up() has to be called after changing any variable that could 235 * wake_up() has to be called after changing any variable that could
234 * change the result of the wait condition. 236 * change the result of the wait condition.
235 * 237 *
236 * The function returns 0 if the @timeout elapsed, and the remaining 238 * The function returns 0 if the @timeout elapsed, or the remaining
237 * jiffies if the condition evaluated to true before the timeout elapsed. 239 * jiffies (at least 1) if the @condition evaluated to %true before
240 * the @timeout elapsed.
238 */ 241 */
239#define wait_event_timeout(wq, condition, timeout) \ 242#define wait_event_timeout(wq, condition, timeout) \
240({ \ 243({ \
@@ -302,6 +305,8 @@ do { \
302 ret = -ERESTARTSYS; \ 305 ret = -ERESTARTSYS; \
303 break; \ 306 break; \
304 } \ 307 } \
308 if (!ret && (condition)) \
309 ret = 1; \
305 finish_wait(&wq, &__wait); \ 310 finish_wait(&wq, &__wait); \
306} while (0) 311} while (0)
307 312
@@ -318,9 +323,10 @@ do { \
318 * wake_up() has to be called after changing any variable that could 323 * wake_up() has to be called after changing any variable that could
319 * change the result of the wait condition. 324 * change the result of the wait condition.
320 * 325 *
321 * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it 326 * Returns:
322 * was interrupted by a signal, and the remaining jiffies otherwise 327 * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by
323 * if the condition evaluated to true before the timeout elapsed. 328 * a signal, or the remaining jiffies (at least 1) if the @condition
329 * evaluated to %true before the @timeout elapsed.
324 */ 330 */
325#define wait_event_interruptible_timeout(wq, condition, timeout) \ 331#define wait_event_interruptible_timeout(wq, condition, timeout) \
326({ \ 332({ \
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 04c2d4670dc6..885898a40d13 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -3043,7 +3043,8 @@ void ieee80211_napi_complete(struct ieee80211_hw *hw);
3043 * This function may not be called in IRQ context. Calls to this function 3043 * This function may not be called in IRQ context. Calls to this function
3044 * for a single hardware must be synchronized against each other. Calls to 3044 * for a single hardware must be synchronized against each other. Calls to
3045 * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be 3045 * this function, ieee80211_rx_ni() and ieee80211_rx_irqsafe() may not be
3046 * mixed for a single hardware. 3046 * mixed for a single hardware. Must not run concurrently with
3047 * ieee80211_tx_status() or ieee80211_tx_status_ni().
3047 * 3048 *
3048 * In process context use instead ieee80211_rx_ni(). 3049 * In process context use instead ieee80211_rx_ni().
3049 * 3050 *
@@ -3059,7 +3060,8 @@ void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb);
3059 * (internally defers to a tasklet.) 3060 * (internally defers to a tasklet.)
3060 * 3061 *
3061 * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not 3062 * Calls to this function, ieee80211_rx() or ieee80211_rx_ni() may not
3062 * be mixed for a single hardware. 3063 * be mixed for a single hardware.Must not run concurrently with
3064 * ieee80211_tx_status() or ieee80211_tx_status_ni().
3063 * 3065 *
3064 * @hw: the hardware this frame came in on 3066 * @hw: the hardware this frame came in on
3065 * @skb: the buffer to receive, owned by mac80211 after this call 3067 * @skb: the buffer to receive, owned by mac80211 after this call
@@ -3073,7 +3075,8 @@ void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb);
3073 * (internally disables bottom halves). 3075 * (internally disables bottom halves).
3074 * 3076 *
3075 * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may 3077 * Calls to this function, ieee80211_rx() and ieee80211_rx_irqsafe() may
3076 * not be mixed for a single hardware. 3078 * not be mixed for a single hardware. Must not run concurrently with
3079 * ieee80211_tx_status() or ieee80211_tx_status_ni().
3077 * 3080 *
3078 * @hw: the hardware this frame came in on 3081 * @hw: the hardware this frame came in on
3079 * @skb: the buffer to receive, owned by mac80211 after this call 3082 * @skb: the buffer to receive, owned by mac80211 after this call
@@ -3196,7 +3199,8 @@ void ieee80211_get_tx_rates(struct ieee80211_vif *vif,
3196 * This function may not be called in IRQ context. Calls to this function 3199 * This function may not be called in IRQ context. Calls to this function
3197 * for a single hardware must be synchronized against each other. Calls 3200 * for a single hardware must be synchronized against each other. Calls
3198 * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe() 3201 * to this function, ieee80211_tx_status_ni() and ieee80211_tx_status_irqsafe()
3199 * may not be mixed for a single hardware. 3202 * may not be mixed for a single hardware. Must not run concurrently with
3203 * ieee80211_rx() or ieee80211_rx_ni().
3200 * 3204 *
3201 * @hw: the hardware the frame was transmitted by 3205 * @hw: the hardware the frame was transmitted by
3202 * @skb: the frame that was transmitted, owned by mac80211 after this call 3206 * @skb: the frame that was transmitted, owned by mac80211 after this call
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index 31f1fb9eb784..99eac12d040b 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -30,7 +30,8 @@ struct nf_loginfo {
30 } u; 30 } u;
31}; 31};
32 32
33typedef void nf_logfn(u_int8_t pf, 33typedef void nf_logfn(struct net *net,
34 u_int8_t pf,
34 unsigned int hooknum, 35 unsigned int hooknum,
35 const struct sk_buff *skb, 36 const struct sk_buff *skb,
36 const struct net_device *in, 37 const struct net_device *in,
diff --git a/include/net/netfilter/nfnetlink_log.h b/include/net/netfilter/nfnetlink_log.h
index e2dec42c2db2..5ca3f14f0998 100644
--- a/include/net/netfilter/nfnetlink_log.h
+++ b/include/net/netfilter/nfnetlink_log.h
@@ -2,7 +2,8 @@
2#define _KER_NFNETLINK_LOG_H 2#define _KER_NFNETLINK_LOG_H
3 3
4void 4void
5nfulnl_log_packet(u_int8_t pf, 5nfulnl_log_packet(struct net *net,
6 u_int8_t pf,
6 unsigned int hooknum, 7 unsigned int hooknum,
7 const struct sk_buff *skb, 8 const struct sk_buff *skb,
8 const struct net_device *in, 9 const struct net_device *in,
diff --git a/include/net/sock.h b/include/net/sock.h
index 5c97b0fc5623..66772cf8c3c5 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -866,6 +866,18 @@ struct inet_hashinfo;
866struct raw_hashinfo; 866struct raw_hashinfo;
867struct module; 867struct module;
868 868
869/*
870 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
871 * un-modified. Special care is taken when initializing object to zero.
872 */
873static inline void sk_prot_clear_nulls(struct sock *sk, int size)
874{
875 if (offsetof(struct sock, sk_node.next) != 0)
876 memset(sk, 0, offsetof(struct sock, sk_node.next));
877 memset(&sk->sk_node.pprev, 0,
878 size - offsetof(struct sock, sk_node.pprev));
879}
880
869/* Networking protocol blocks we attach to sockets. 881/* Networking protocol blocks we attach to sockets.
870 * socket layer -> transport layer interface 882 * socket layer -> transport layer interface
871 * transport -> network interface is defined by struct inet_proto 883 * transport -> network interface is defined by struct inet_proto
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c4af592f7057..e773dfa5f98f 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -463,7 +463,6 @@ struct se_cmd {
463#define CMD_T_ABORTED (1 << 0) 463#define CMD_T_ABORTED (1 << 0)
464#define CMD_T_ACTIVE (1 << 1) 464#define CMD_T_ACTIVE (1 << 1)
465#define CMD_T_COMPLETE (1 << 2) 465#define CMD_T_COMPLETE (1 << 2)
466#define CMD_T_QUEUED (1 << 3)
467#define CMD_T_SENT (1 << 4) 466#define CMD_T_SENT (1 << 4)
468#define CMD_T_STOP (1 << 5) 467#define CMD_T_STOP (1 << 5)
469#define CMD_T_FAILED (1 << 6) 468#define CMD_T_FAILED (1 << 6)
@@ -572,12 +571,8 @@ struct se_dev_entry {
572 bool def_pr_registered; 571 bool def_pr_registered;
573 /* See transport_lunflags_table */ 572 /* See transport_lunflags_table */
574 u32 lun_flags; 573 u32 lun_flags;
575 u32 deve_cmds;
576 u32 mapped_lun; 574 u32 mapped_lun;
577 u32 average_bytes;
578 u32 last_byte_count;
579 u32 total_cmds; 575 u32 total_cmds;
580 u32 total_bytes;
581 u64 pr_res_key; 576 u64 pr_res_key;
582 u64 creation_time; 577 u64 creation_time;
583 u32 attach_count; 578 u32 attach_count;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d0e686402df8..8ee15b97cd38 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -2139,7 +2139,7 @@ TRACE_EVENT(ext4_es_remove_extent,
2139 __entry->lblk, __entry->len) 2139 __entry->lblk, __entry->len)
2140); 2140);
2141 2141
2142TRACE_EVENT(ext4_es_find_delayed_extent_enter, 2142TRACE_EVENT(ext4_es_find_delayed_extent_range_enter,
2143 TP_PROTO(struct inode *inode, ext4_lblk_t lblk), 2143 TP_PROTO(struct inode *inode, ext4_lblk_t lblk),
2144 2144
2145 TP_ARGS(inode, lblk), 2145 TP_ARGS(inode, lblk),
@@ -2161,7 +2161,7 @@ TRACE_EVENT(ext4_es_find_delayed_extent_enter,
2161 (unsigned long) __entry->ino, __entry->lblk) 2161 (unsigned long) __entry->ino, __entry->lblk)
2162); 2162);
2163 2163
2164TRACE_EVENT(ext4_es_find_delayed_extent_exit, 2164TRACE_EVENT(ext4_es_find_delayed_extent_range_exit,
2165 TP_PROTO(struct inode *inode, struct extent_status *es), 2165 TP_PROTO(struct inode *inode, struct extent_status *es),
2166 2166
2167 TP_ARGS(inode, es), 2167 TP_ARGS(inode, es),
diff --git a/include/uapi/linux/virtio_console.h b/include/uapi/linux/virtio_console.h
index ee13ab6c3614..c312f16bc4e7 100644
--- a/include/uapi/linux/virtio_console.h
+++ b/include/uapi/linux/virtio_console.h
@@ -39,7 +39,7 @@
39#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */ 39#define VIRTIO_CONSOLE_F_SIZE 0 /* Does host provide console size? */
40#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */ 40#define VIRTIO_CONSOLE_F_MULTIPORT 1 /* Does host provide multiple ports? */
41 41
42#define VIRTIO_CONSOLE_BAD_ID (~(u32)0) 42#define VIRTIO_CONSOLE_BAD_ID (~(__u32)0)
43 43
44struct virtio_console_config { 44struct virtio_console_config {
45 /* colums of the screens */ 45 /* colums of the screens */
diff --git a/ipc/sem.c b/ipc/sem.c
index a7e40ed8a076..70480a3aa698 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -752,19 +752,29 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
752 int otime, struct list_head *pt) 752 int otime, struct list_head *pt)
753{ 753{
754 int i; 754 int i;
755 int progress;
755 756
756 if (sma->complex_count || sops == NULL) { 757 progress = 1;
757 if (update_queue(sma, -1, pt)) 758retry_global:
759 if (sma->complex_count) {
760 if (update_queue(sma, -1, pt)) {
761 progress = 1;
758 otime = 1; 762 otime = 1;
763 sops = NULL;
764 }
759 } 765 }
766 if (!progress)
767 goto done;
760 768
761 if (!sops) { 769 if (!sops) {
762 /* No semops; something special is going on. */ 770 /* No semops; something special is going on. */
763 for (i = 0; i < sma->sem_nsems; i++) { 771 for (i = 0; i < sma->sem_nsems; i++) {
764 if (update_queue(sma, i, pt)) 772 if (update_queue(sma, i, pt)) {
765 otime = 1; 773 otime = 1;
774 progress = 1;
775 }
766 } 776 }
767 goto done; 777 goto done_checkretry;
768 } 778 }
769 779
770 /* Check the semaphores that were modified. */ 780 /* Check the semaphores that were modified. */
@@ -772,8 +782,15 @@ static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsop
772 if (sops[i].sem_op > 0 || 782 if (sops[i].sem_op > 0 ||
773 (sops[i].sem_op < 0 && 783 (sops[i].sem_op < 0 &&
774 sma->sem_base[sops[i].sem_num].semval == 0)) 784 sma->sem_base[sops[i].sem_num].semval == 0))
775 if (update_queue(sma, sops[i].sem_num, pt)) 785 if (update_queue(sma, sops[i].sem_num, pt)) {
776 otime = 1; 786 otime = 1;
787 progress = 1;
788 }
789 }
790done_checkretry:
791 if (progress) {
792 progress = 0;
793 goto retry_global;
777 } 794 }
778done: 795done:
779 if (otime) 796 if (otime)
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 83a2970295d1..6bd4a90d1991 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1021,9 +1021,6 @@ static void audit_log_rule_change(char *action, struct audit_krule *rule, int re
1021 * @seq: netlink audit message sequence (serial) number 1021 * @seq: netlink audit message sequence (serial) number
1022 * @data: payload data 1022 * @data: payload data
1023 * @datasz: size of payload data 1023 * @datasz: size of payload data
1024 * @loginuid: loginuid of sender
1025 * @sessionid: sessionid for netlink audit message
1026 * @sid: SE Linux Security ID of sender
1027 */ 1024 */
1028int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz) 1025int audit_receive_filter(int type, int pid, int seq, void *data, size_t datasz)
1029{ 1026{
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index 8b86c0c68edf..d5585f5e038e 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -40,11 +40,13 @@ __setup("hlt", cpu_idle_nopoll_setup);
40 40
41static inline int cpu_idle_poll(void) 41static inline int cpu_idle_poll(void)
42{ 42{
43 rcu_idle_enter();
43 trace_cpu_idle_rcuidle(0, smp_processor_id()); 44 trace_cpu_idle_rcuidle(0, smp_processor_id());
44 local_irq_enable(); 45 local_irq_enable();
45 while (!need_resched()) 46 while (!need_resched())
46 cpu_relax(); 47 cpu_relax();
47 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); 48 trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
49 rcu_idle_exit();
48 return 1; 50 return 1;
49} 51}
50 52
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6b41c1899a8b..9dc297faf7c0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4394,6 +4394,64 @@ perf_event_read_event(struct perf_event *event,
4394 perf_output_end(&handle); 4394 perf_output_end(&handle);
4395} 4395}
4396 4396
4397typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
4398typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4399
4400static void
4401perf_event_aux_ctx(struct perf_event_context *ctx,
4402 perf_event_aux_match_cb match,
4403 perf_event_aux_output_cb output,
4404 void *data)
4405{
4406 struct perf_event *event;
4407
4408 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4409 if (event->state < PERF_EVENT_STATE_INACTIVE)
4410 continue;
4411 if (!event_filter_match(event))
4412 continue;
4413 if (match(event, data))
4414 output(event, data);
4415 }
4416}
4417
4418static void
4419perf_event_aux(perf_event_aux_match_cb match,
4420 perf_event_aux_output_cb output,
4421 void *data,
4422 struct perf_event_context *task_ctx)
4423{
4424 struct perf_cpu_context *cpuctx;
4425 struct perf_event_context *ctx;
4426 struct pmu *pmu;
4427 int ctxn;
4428
4429 rcu_read_lock();
4430 list_for_each_entry_rcu(pmu, &pmus, entry) {
4431 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4432 if (cpuctx->unique_pmu != pmu)
4433 goto next;
4434 perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
4435 if (task_ctx)
4436 goto next;
4437 ctxn = pmu->task_ctx_nr;
4438 if (ctxn < 0)
4439 goto next;
4440 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4441 if (ctx)
4442 perf_event_aux_ctx(ctx, match, output, data);
4443next:
4444 put_cpu_ptr(pmu->pmu_cpu_context);
4445 }
4446
4447 if (task_ctx) {
4448 preempt_disable();
4449 perf_event_aux_ctx(task_ctx, match, output, data);
4450 preempt_enable();
4451 }
4452 rcu_read_unlock();
4453}
4454
4397/* 4455/*
4398 * task tracking -- fork/exit 4456 * task tracking -- fork/exit
4399 * 4457 *
@@ -4416,8 +4474,9 @@ struct perf_task_event {
4416}; 4474};
4417 4475
4418static void perf_event_task_output(struct perf_event *event, 4476static void perf_event_task_output(struct perf_event *event,
4419 struct perf_task_event *task_event) 4477 void *data)
4420{ 4478{
4479 struct perf_task_event *task_event = data;
4421 struct perf_output_handle handle; 4480 struct perf_output_handle handle;
4422 struct perf_sample_data sample; 4481 struct perf_sample_data sample;
4423 struct task_struct *task = task_event->task; 4482 struct task_struct *task = task_event->task;
@@ -4445,62 +4504,11 @@ out:
4445 task_event->event_id.header.size = size; 4504 task_event->event_id.header.size = size;
4446} 4505}
4447 4506
4448static int perf_event_task_match(struct perf_event *event) 4507static int perf_event_task_match(struct perf_event *event,
4449{ 4508 void *data __maybe_unused)
4450 if (event->state < PERF_EVENT_STATE_INACTIVE)
4451 return 0;
4452
4453 if (!event_filter_match(event))
4454 return 0;
4455
4456 if (event->attr.comm || event->attr.mmap ||
4457 event->attr.mmap_data || event->attr.task)
4458 return 1;
4459
4460 return 0;
4461}
4462
4463static void perf_event_task_ctx(struct perf_event_context *ctx,
4464 struct perf_task_event *task_event)
4465{ 4509{
4466 struct perf_event *event; 4510 return event->attr.comm || event->attr.mmap ||
4467 4511 event->attr.mmap_data || event->attr.task;
4468 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4469 if (perf_event_task_match(event))
4470 perf_event_task_output(event, task_event);
4471 }
4472}
4473
4474static void perf_event_task_event(struct perf_task_event *task_event)
4475{
4476 struct perf_cpu_context *cpuctx;
4477 struct perf_event_context *ctx;
4478 struct pmu *pmu;
4479 int ctxn;
4480
4481 rcu_read_lock();
4482 list_for_each_entry_rcu(pmu, &pmus, entry) {
4483 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4484 if (cpuctx->unique_pmu != pmu)
4485 goto next;
4486 perf_event_task_ctx(&cpuctx->ctx, task_event);
4487
4488 ctx = task_event->task_ctx;
4489 if (!ctx) {
4490 ctxn = pmu->task_ctx_nr;
4491 if (ctxn < 0)
4492 goto next;
4493 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4494 if (ctx)
4495 perf_event_task_ctx(ctx, task_event);
4496 }
4497next:
4498 put_cpu_ptr(pmu->pmu_cpu_context);
4499 }
4500 if (task_event->task_ctx)
4501 perf_event_task_ctx(task_event->task_ctx, task_event);
4502
4503 rcu_read_unlock();
4504} 4512}
4505 4513
4506static void perf_event_task(struct task_struct *task, 4514static void perf_event_task(struct task_struct *task,
@@ -4531,7 +4539,10 @@ static void perf_event_task(struct task_struct *task,
4531 }, 4539 },
4532 }; 4540 };
4533 4541
4534 perf_event_task_event(&task_event); 4542 perf_event_aux(perf_event_task_match,
4543 perf_event_task_output,
4544 &task_event,
4545 task_ctx);
4535} 4546}
4536 4547
4537void perf_event_fork(struct task_struct *task) 4548void perf_event_fork(struct task_struct *task)
@@ -4557,8 +4568,9 @@ struct perf_comm_event {
4557}; 4568};
4558 4569
4559static void perf_event_comm_output(struct perf_event *event, 4570static void perf_event_comm_output(struct perf_event *event,
4560 struct perf_comm_event *comm_event) 4571 void *data)
4561{ 4572{
4573 struct perf_comm_event *comm_event = data;
4562 struct perf_output_handle handle; 4574 struct perf_output_handle handle;
4563 struct perf_sample_data sample; 4575 struct perf_sample_data sample;
4564 int size = comm_event->event_id.header.size; 4576 int size = comm_event->event_id.header.size;
@@ -4585,39 +4597,16 @@ out:
4585 comm_event->event_id.header.size = size; 4597 comm_event->event_id.header.size = size;
4586} 4598}
4587 4599
4588static int perf_event_comm_match(struct perf_event *event) 4600static int perf_event_comm_match(struct perf_event *event,
4589{ 4601 void *data __maybe_unused)
4590 if (event->state < PERF_EVENT_STATE_INACTIVE)
4591 return 0;
4592
4593 if (!event_filter_match(event))
4594 return 0;
4595
4596 if (event->attr.comm)
4597 return 1;
4598
4599 return 0;
4600}
4601
4602static void perf_event_comm_ctx(struct perf_event_context *ctx,
4603 struct perf_comm_event *comm_event)
4604{ 4602{
4605 struct perf_event *event; 4603 return event->attr.comm;
4606
4607 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4608 if (perf_event_comm_match(event))
4609 perf_event_comm_output(event, comm_event);
4610 }
4611} 4604}
4612 4605
4613static void perf_event_comm_event(struct perf_comm_event *comm_event) 4606static void perf_event_comm_event(struct perf_comm_event *comm_event)
4614{ 4607{
4615 struct perf_cpu_context *cpuctx;
4616 struct perf_event_context *ctx;
4617 char comm[TASK_COMM_LEN]; 4608 char comm[TASK_COMM_LEN];
4618 unsigned int size; 4609 unsigned int size;
4619 struct pmu *pmu;
4620 int ctxn;
4621 4610
4622 memset(comm, 0, sizeof(comm)); 4611 memset(comm, 0, sizeof(comm));
4623 strlcpy(comm, comm_event->task->comm, sizeof(comm)); 4612 strlcpy(comm, comm_event->task->comm, sizeof(comm));
@@ -4627,24 +4616,11 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
4627 comm_event->comm_size = size; 4616 comm_event->comm_size = size;
4628 4617
4629 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size; 4618 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4630 rcu_read_lock();
4631 list_for_each_entry_rcu(pmu, &pmus, entry) {
4632 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4633 if (cpuctx->unique_pmu != pmu)
4634 goto next;
4635 perf_event_comm_ctx(&cpuctx->ctx, comm_event);
4636 4619
4637 ctxn = pmu->task_ctx_nr; 4620 perf_event_aux(perf_event_comm_match,
4638 if (ctxn < 0) 4621 perf_event_comm_output,
4639 goto next; 4622 comm_event,
4640 4623 NULL);
4641 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4642 if (ctx)
4643 perf_event_comm_ctx(ctx, comm_event);
4644next:
4645 put_cpu_ptr(pmu->pmu_cpu_context);
4646 }
4647 rcu_read_unlock();
4648} 4624}
4649 4625
4650void perf_event_comm(struct task_struct *task) 4626void perf_event_comm(struct task_struct *task)
@@ -4706,8 +4682,9 @@ struct perf_mmap_event {
4706}; 4682};
4707 4683
4708static void perf_event_mmap_output(struct perf_event *event, 4684static void perf_event_mmap_output(struct perf_event *event,
4709 struct perf_mmap_event *mmap_event) 4685 void *data)
4710{ 4686{
4687 struct perf_mmap_event *mmap_event = data;
4711 struct perf_output_handle handle; 4688 struct perf_output_handle handle;
4712 struct perf_sample_data sample; 4689 struct perf_sample_data sample;
4713 int size = mmap_event->event_id.header.size; 4690 int size = mmap_event->event_id.header.size;
@@ -4734,46 +4711,24 @@ out:
4734} 4711}
4735 4712
4736static int perf_event_mmap_match(struct perf_event *event, 4713static int perf_event_mmap_match(struct perf_event *event,
4737 struct perf_mmap_event *mmap_event, 4714 void *data)
4738 int executable)
4739{
4740 if (event->state < PERF_EVENT_STATE_INACTIVE)
4741 return 0;
4742
4743 if (!event_filter_match(event))
4744 return 0;
4745
4746 if ((!executable && event->attr.mmap_data) ||
4747 (executable && event->attr.mmap))
4748 return 1;
4749
4750 return 0;
4751}
4752
4753static void perf_event_mmap_ctx(struct perf_event_context *ctx,
4754 struct perf_mmap_event *mmap_event,
4755 int executable)
4756{ 4715{
4757 struct perf_event *event; 4716 struct perf_mmap_event *mmap_event = data;
4717 struct vm_area_struct *vma = mmap_event->vma;
4718 int executable = vma->vm_flags & VM_EXEC;
4758 4719
4759 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { 4720 return (!executable && event->attr.mmap_data) ||
4760 if (perf_event_mmap_match(event, mmap_event, executable)) 4721 (executable && event->attr.mmap);
4761 perf_event_mmap_output(event, mmap_event);
4762 }
4763} 4722}
4764 4723
4765static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) 4724static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4766{ 4725{
4767 struct perf_cpu_context *cpuctx;
4768 struct perf_event_context *ctx;
4769 struct vm_area_struct *vma = mmap_event->vma; 4726 struct vm_area_struct *vma = mmap_event->vma;
4770 struct file *file = vma->vm_file; 4727 struct file *file = vma->vm_file;
4771 unsigned int size; 4728 unsigned int size;
4772 char tmp[16]; 4729 char tmp[16];
4773 char *buf = NULL; 4730 char *buf = NULL;
4774 const char *name; 4731 const char *name;
4775 struct pmu *pmu;
4776 int ctxn;
4777 4732
4778 memset(tmp, 0, sizeof(tmp)); 4733 memset(tmp, 0, sizeof(tmp));
4779 4734
@@ -4829,27 +4784,10 @@ got_name:
4829 4784
4830 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size; 4785 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4831 4786
4832 rcu_read_lock(); 4787 perf_event_aux(perf_event_mmap_match,
4833 list_for_each_entry_rcu(pmu, &pmus, entry) { 4788 perf_event_mmap_output,
4834 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); 4789 mmap_event,
4835 if (cpuctx->unique_pmu != pmu) 4790 NULL);
4836 goto next;
4837 perf_event_mmap_ctx(&cpuctx->ctx, mmap_event,
4838 vma->vm_flags & VM_EXEC);
4839
4840 ctxn = pmu->task_ctx_nr;
4841 if (ctxn < 0)
4842 goto next;
4843
4844 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4845 if (ctx) {
4846 perf_event_mmap_ctx(ctx, mmap_event,
4847 vma->vm_flags & VM_EXEC);
4848 }
4849next:
4850 put_cpu_ptr(pmu->pmu_cpu_context);
4851 }
4852 rcu_read_unlock();
4853 4791
4854 kfree(buf); 4792 kfree(buf);
4855} 4793}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 1296e72e4161..8241906c4b61 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -569,6 +569,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
569 int retval = 0; 569 int retval = 0;
570 570
571 helper_lock(); 571 helper_lock();
572 if (!sub_info->path) {
573 retval = -EINVAL;
574 goto out;
575 }
576
572 if (sub_info->path[0] == '\0') 577 if (sub_info->path[0] == '\0')
573 goto out; 578 goto out;
574 579
diff --git a/kernel/module.c b/kernel/module.c
index b049939177f6..cab4bce49c23 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -2431,10 +2431,10 @@ static void kmemleak_load_module(const struct module *mod,
2431 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); 2431 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2432 2432
2433 for (i = 1; i < info->hdr->e_shnum; i++) { 2433 for (i = 1; i < info->hdr->e_shnum; i++) {
2434 const char *name = info->secstrings + info->sechdrs[i].sh_name; 2434 /* Scan all writable sections that's not executable */
2435 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC)) 2435 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2436 continue; 2436 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2437 if (!strstarts(name, ".data") && !strstarts(name, ".bss")) 2437 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2438 continue; 2438 continue;
2439 2439
2440 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, 2440 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
@@ -2769,24 +2769,11 @@ static void find_module_sections(struct module *mod, struct load_info *info)
2769 mod->trace_events = section_objs(info, "_ftrace_events", 2769 mod->trace_events = section_objs(info, "_ftrace_events",
2770 sizeof(*mod->trace_events), 2770 sizeof(*mod->trace_events),
2771 &mod->num_trace_events); 2771 &mod->num_trace_events);
2772 /*
2773 * This section contains pointers to allocated objects in the trace
2774 * code and not scanning it leads to false positives.
2775 */
2776 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2777 mod->num_trace_events, GFP_KERNEL);
2778#endif 2772#endif
2779#ifdef CONFIG_TRACING 2773#ifdef CONFIG_TRACING
2780 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", 2774 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2781 sizeof(*mod->trace_bprintk_fmt_start), 2775 sizeof(*mod->trace_bprintk_fmt_start),
2782 &mod->num_trace_bprintk_fmt); 2776 &mod->num_trace_bprintk_fmt);
2783 /*
2784 * This section contains pointers to allocated objects in the trace
2785 * code and not scanning it leads to false positives.
2786 */
2787 kmemleak_scan_area(mod->trace_bprintk_fmt_start,
2788 sizeof(*mod->trace_bprintk_fmt_start) *
2789 mod->num_trace_bprintk_fmt, GFP_KERNEL);
2790#endif 2777#endif
2791#ifdef CONFIG_FTRACE_MCOUNT_RECORD 2778#ifdef CONFIG_FTRACE_MCOUNT_RECORD
2792 /* sechdrs[0].sh_size is always zero */ 2779 /* sechdrs[0].sh_size is always zero */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 170814dc418f..3db5a375d8dd 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -88,7 +88,7 @@ static void __init rcu_bootup_announce_oddness(void)
88#ifdef CONFIG_RCU_NOCB_CPU 88#ifdef CONFIG_RCU_NOCB_CPU
89#ifndef CONFIG_RCU_NOCB_CPU_NONE 89#ifndef CONFIG_RCU_NOCB_CPU_NONE
90 if (!have_rcu_nocb_mask) { 90 if (!have_rcu_nocb_mask) {
91 alloc_bootmem_cpumask_var(&rcu_nocb_mask); 91 zalloc_cpumask_var(&rcu_nocb_mask, GFP_KERNEL);
92 have_rcu_nocb_mask = true; 92 have_rcu_nocb_mask = true;
93 } 93 }
94#ifdef CONFIG_RCU_NOCB_CPU_ZERO 94#ifdef CONFIG_RCU_NOCB_CPU_ZERO
@@ -1667,7 +1667,7 @@ int rcu_needs_cpu(int cpu, unsigned long *dj)
1667 rdtp->last_accelerate = jiffies; 1667 rdtp->last_accelerate = jiffies;
1668 1668
1669 /* Request timer delay depending on laziness, and round. */ 1669 /* Request timer delay depending on laziness, and round. */
1670 if (rdtp->all_lazy) { 1670 if (!rdtp->all_lazy) {
1671 *dj = round_up(rcu_idle_gp_delay + jiffies, 1671 *dj = round_up(rcu_idle_gp_delay + jiffies,
1672 rcu_idle_gp_delay) - jiffies; 1672 rcu_idle_gp_delay) - jiffies;
1673 } else { 1673 } else {
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index e4c07b0692bb..70f27e89012b 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -12,11 +12,6 @@ config CLOCKSOURCE_WATCHDOG
12config ARCH_CLOCKSOURCE_DATA 12config ARCH_CLOCKSOURCE_DATA
13 bool 13 bool
14 14
15# Platforms has a persistent clock
16config ALWAYS_USE_PERSISTENT_CLOCK
17 bool
18 default n
19
20# Timekeeping vsyscall support 15# Timekeeping vsyscall support
21config GENERIC_TIME_VSYSCALL 16config GENERIC_TIME_VSYSCALL
22 bool 17 bool
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 206bbfb34e09..24938d577669 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -786,11 +786,11 @@ bool tick_broadcast_oneshot_available(void)
786 786
787void __init tick_broadcast_init(void) 787void __init tick_broadcast_init(void)
788{ 788{
789 alloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT); 789 zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
790 alloc_cpumask_var(&tmpmask, GFP_NOWAIT); 790 zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
791#ifdef CONFIG_TICK_ONESHOT 791#ifdef CONFIG_TICK_ONESHOT
792 alloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT); 792 zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
793 alloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT); 793 zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
794 alloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT); 794 zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
795#endif 795#endif
796} 796}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index bc67d4245e1d..f4208138fbf4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -717,6 +717,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
717 if (unlikely(!cpu_online(cpu))) { 717 if (unlikely(!cpu_online(cpu))) {
718 if (cpu == tick_do_timer_cpu) 718 if (cpu == tick_do_timer_cpu)
719 tick_do_timer_cpu = TICK_DO_TIMER_NONE; 719 tick_do_timer_cpu = TICK_DO_TIMER_NONE;
720 return false;
720 } 721 }
721 722
722 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) 723 if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
@@ -1168,7 +1169,7 @@ void tick_cancel_sched_timer(int cpu)
1168 hrtimer_cancel(&ts->sched_timer); 1169 hrtimer_cancel(&ts->sched_timer);
1169# endif 1170# endif
1170 1171
1171 ts->nohz_mode = NOHZ_MODE_INACTIVE; 1172 memset(ts, 0, sizeof(*ts));
1172} 1173}
1173#endif 1174#endif
1174 1175
diff --git a/kernel/timer.c b/kernel/timer.c
index a860bba34412..15ffdb3f1948 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1539,12 +1539,12 @@ static int __cpuinit init_timers_cpu(int cpu)
1539 boot_done = 1; 1539 boot_done = 1;
1540 base = &boot_tvec_bases; 1540 base = &boot_tvec_bases;
1541 } 1541 }
1542 spin_lock_init(&base->lock);
1542 tvec_base_done[cpu] = 1; 1543 tvec_base_done[cpu] = 1;
1543 } else { 1544 } else {
1544 base = per_cpu(tvec_bases, cpu); 1545 base = per_cpu(tvec_bases, cpu);
1545 } 1546 }
1546 1547
1547 spin_lock_init(&base->lock);
1548 1548
1549 for (j = 0; j < TVN_SIZE; j++) { 1549 for (j = 0; j < TVN_SIZE; j++) {
1550 INIT_LIST_HEAD(base->tv5.vec + j); 1550 INIT_LIST_HEAD(base->tv5.vec + j);
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 7a0cf68027cc..27963e2bf4bf 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2072,8 +2072,10 @@ event_enable_func(struct ftrace_hash *hash,
2072 out_reg: 2072 out_reg:
2073 /* Don't let event modules unload while probe registered */ 2073 /* Don't let event modules unload while probe registered */
2074 ret = try_module_get(file->event_call->mod); 2074 ret = try_module_get(file->event_call->mod);
2075 if (!ret) 2075 if (!ret) {
2076 ret = -EBUSY;
2076 goto out_free; 2077 goto out_free;
2078 }
2077 2079
2078 ret = __ftrace_event_enable_disable(file, 1, 1); 2080 ret = __ftrace_event_enable_disable(file, 1, 1);
2079 if (ret < 0) 2081 if (ret < 0)
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index a6361178de5a..e1b653f7e1ca 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -750,7 +750,11 @@ static int filter_set_pred(struct event_filter *filter,
750 750
751static void __free_preds(struct event_filter *filter) 751static void __free_preds(struct event_filter *filter)
752{ 752{
753 int i;
754
753 if (filter->preds) { 755 if (filter->preds) {
756 for (i = 0; i < filter->n_preds; i++)
757 kfree(filter->preds[i].ops);
754 kfree(filter->preds); 758 kfree(filter->preds);
755 filter->preds = NULL; 759 filter->preds = NULL;
756 } 760 }
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 636d45fe69b3..9f46e98ba8f2 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -35,7 +35,7 @@ struct trace_probe {
35 const char *symbol; /* symbol name */ 35 const char *symbol; /* symbol name */
36 struct ftrace_event_class class; 36 struct ftrace_event_class class;
37 struct ftrace_event_call call; 37 struct ftrace_event_call call;
38 struct ftrace_event_file **files; 38 struct ftrace_event_file * __rcu *files;
39 ssize_t size; /* trace entry size */ 39 ssize_t size; /* trace entry size */
40 unsigned int nr_args; 40 unsigned int nr_args;
41 struct probe_arg args[]; 41 struct probe_arg args[];
@@ -185,9 +185,14 @@ static struct trace_probe *find_trace_probe(const char *event,
185 185
186static int trace_probe_nr_files(struct trace_probe *tp) 186static int trace_probe_nr_files(struct trace_probe *tp)
187{ 187{
188 struct ftrace_event_file **file = tp->files; 188 struct ftrace_event_file **file;
189 int ret = 0; 189 int ret = 0;
190 190
191 /*
192 * Since all tp->files updater is protected by probe_enable_lock,
193 * we don't need to lock an rcu_read_lock.
194 */
195 file = rcu_dereference_raw(tp->files);
191 if (file) 196 if (file)
192 while (*(file++)) 197 while (*(file++))
193 ret++; 198 ret++;
@@ -209,9 +214,10 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
209 mutex_lock(&probe_enable_lock); 214 mutex_lock(&probe_enable_lock);
210 215
211 if (file) { 216 if (file) {
212 struct ftrace_event_file **new, **old = tp->files; 217 struct ftrace_event_file **new, **old;
213 int n = trace_probe_nr_files(tp); 218 int n = trace_probe_nr_files(tp);
214 219
220 old = rcu_dereference_raw(tp->files);
215 /* 1 is for new one and 1 is for stopper */ 221 /* 1 is for new one and 1 is for stopper */
216 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *), 222 new = kzalloc((n + 2) * sizeof(struct ftrace_event_file *),
217 GFP_KERNEL); 223 GFP_KERNEL);
@@ -251,11 +257,17 @@ enable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
251static int 257static int
252trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file) 258trace_probe_file_index(struct trace_probe *tp, struct ftrace_event_file *file)
253{ 259{
260 struct ftrace_event_file **files;
254 int i; 261 int i;
255 262
256 if (tp->files) { 263 /*
257 for (i = 0; tp->files[i]; i++) 264 * Since all tp->files updater is protected by probe_enable_lock,
258 if (tp->files[i] == file) 265 * we don't need to lock an rcu_read_lock.
266 */
267 files = rcu_dereference_raw(tp->files);
268 if (files) {
269 for (i = 0; files[i]; i++)
270 if (files[i] == file)
259 return i; 271 return i;
260 } 272 }
261 273
@@ -274,10 +286,11 @@ disable_trace_probe(struct trace_probe *tp, struct ftrace_event_file *file)
274 mutex_lock(&probe_enable_lock); 286 mutex_lock(&probe_enable_lock);
275 287
276 if (file) { 288 if (file) {
277 struct ftrace_event_file **new, **old = tp->files; 289 struct ftrace_event_file **new, **old;
278 int n = trace_probe_nr_files(tp); 290 int n = trace_probe_nr_files(tp);
279 int i, j; 291 int i, j;
280 292
293 old = rcu_dereference_raw(tp->files);
281 if (n == 0 || trace_probe_file_index(tp, file) < 0) { 294 if (n == 0 || trace_probe_file_index(tp, file) < 0) {
282 ret = -EINVAL; 295 ret = -EINVAL;
283 goto out_unlock; 296 goto out_unlock;
@@ -872,9 +885,16 @@ __kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs,
872static __kprobes void 885static __kprobes void
873kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs) 886kprobe_trace_func(struct trace_probe *tp, struct pt_regs *regs)
874{ 887{
875 struct ftrace_event_file **file = tp->files; 888 /*
889 * Note: preempt is already disabled around the kprobe handler.
890 * However, we still need an smp_read_barrier_depends() corresponding
891 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
892 */
893 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
894
895 if (unlikely(!file))
896 return;
876 897
877 /* Note: preempt is already disabled around the kprobe handler */
878 while (*file) { 898 while (*file) {
879 __kprobe_trace_func(tp, regs, *file); 899 __kprobe_trace_func(tp, regs, *file);
880 file++; 900 file++;
@@ -925,9 +945,16 @@ static __kprobes void
925kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri, 945kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
926 struct pt_regs *regs) 946 struct pt_regs *regs)
927{ 947{
928 struct ftrace_event_file **file = tp->files; 948 /*
949 * Note: preempt is already disabled around the kprobe handler.
950 * However, we still need an smp_read_barrier_depends() corresponding
951 * to smp_wmb() in rcu_assign_pointer() to access the pointer.
952 */
953 struct ftrace_event_file **file = rcu_dereference_raw(tp->files);
954
955 if (unlikely(!file))
956 return;
929 957
930 /* Note: preempt is already disabled around the kprobe handler */
931 while (*file) { 958 while (*file) {
932 __kretprobe_trace_func(tp, ri, regs, *file); 959 __kretprobe_trace_func(tp, ri, regs, *file);
933 file++; 960 file++;
@@ -935,7 +962,7 @@ kretprobe_trace_func(struct trace_probe *tp, struct kretprobe_instance *ri,
935} 962}
936 963
937/* Event entry printers */ 964/* Event entry printers */
938enum print_line_t 965static enum print_line_t
939print_kprobe_event(struct trace_iterator *iter, int flags, 966print_kprobe_event(struct trace_iterator *iter, int flags,
940 struct trace_event *event) 967 struct trace_event *event)
941{ 968{
@@ -971,7 +998,7 @@ partial:
971 return TRACE_TYPE_PARTIAL_LINE; 998 return TRACE_TYPE_PARTIAL_LINE;
972} 999}
973 1000
974enum print_line_t 1001static enum print_line_t
975print_kretprobe_event(struct trace_iterator *iter, int flags, 1002print_kretprobe_event(struct trace_iterator *iter, int flags,
976 struct trace_event *event) 1003 struct trace_event *event)
977{ 1004{
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4aa9f5bc6b2d..ee8e29a2320c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -296,7 +296,7 @@ static DEFINE_HASHTABLE(unbound_pool_hash, UNBOUND_POOL_HASH_ORDER);
296static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS]; 296static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
297 297
298struct workqueue_struct *system_wq __read_mostly; 298struct workqueue_struct *system_wq __read_mostly;
299EXPORT_SYMBOL_GPL(system_wq); 299EXPORT_SYMBOL(system_wq);
300struct workqueue_struct *system_highpri_wq __read_mostly; 300struct workqueue_struct *system_highpri_wq __read_mostly;
301EXPORT_SYMBOL_GPL(system_highpri_wq); 301EXPORT_SYMBOL_GPL(system_highpri_wq);
302struct workqueue_struct *system_long_wq __read_mostly; 302struct workqueue_struct *system_long_wq __read_mostly;
@@ -1411,7 +1411,7 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq,
1411 local_irq_restore(flags); 1411 local_irq_restore(flags);
1412 return ret; 1412 return ret;
1413} 1413}
1414EXPORT_SYMBOL_GPL(queue_work_on); 1414EXPORT_SYMBOL(queue_work_on);
1415 1415
1416void delayed_work_timer_fn(unsigned long __data) 1416void delayed_work_timer_fn(unsigned long __data)
1417{ 1417{
@@ -1485,7 +1485,7 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
1485 local_irq_restore(flags); 1485 local_irq_restore(flags);
1486 return ret; 1486 return ret;
1487} 1487}
1488EXPORT_SYMBOL_GPL(queue_delayed_work_on); 1488EXPORT_SYMBOL(queue_delayed_work_on);
1489 1489
1490/** 1490/**
1491 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU 1491 * mod_delayed_work_on - modify delay of or queue a delayed work on specific CPU
@@ -2059,6 +2059,7 @@ static bool manage_workers(struct worker *worker)
2059 if (unlikely(!mutex_trylock(&pool->manager_mutex))) { 2059 if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
2060 spin_unlock_irq(&pool->lock); 2060 spin_unlock_irq(&pool->lock);
2061 mutex_lock(&pool->manager_mutex); 2061 mutex_lock(&pool->manager_mutex);
2062 spin_lock_irq(&pool->lock);
2062 ret = true; 2063 ret = true;
2063 } 2064 }
2064 2065
@@ -4311,6 +4312,12 @@ bool current_is_workqueue_rescuer(void)
4311 * no synchronization around this function and the test result is 4312 * no synchronization around this function and the test result is
4312 * unreliable and only useful as advisory hints or for debugging. 4313 * unreliable and only useful as advisory hints or for debugging.
4313 * 4314 *
4315 * If @cpu is WORK_CPU_UNBOUND, the test is performed on the local CPU.
4316 * Note that both per-cpu and unbound workqueues may be associated with
4317 * multiple pool_workqueues which have separate congested states. A
4318 * workqueue being congested on one CPU doesn't mean the workqueue is also
4319 * contested on other CPUs / NUMA nodes.
4320 *
4314 * RETURNS: 4321 * RETURNS:
4315 * %true if congested, %false otherwise. 4322 * %true if congested, %false otherwise.
4316 */ 4323 */
@@ -4321,6 +4328,9 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq)
4321 4328
4322 rcu_read_lock_sched(); 4329 rcu_read_lock_sched();
4323 4330
4331 if (cpu == WORK_CPU_UNBOUND)
4332 cpu = smp_processor_id();
4333
4324 if (!(wq->flags & WQ_UNBOUND)) 4334 if (!(wq->flags & WQ_UNBOUND))
4325 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); 4335 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
4326 else 4336 else
@@ -4895,7 +4905,8 @@ static void __init wq_numa_init(void)
4895 BUG_ON(!tbl); 4905 BUG_ON(!tbl);
4896 4906
4897 for_each_node(node) 4907 for_each_node(node)
4898 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, node)); 4908 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
4909 node_online(node) ? node : NUMA_NO_NODE));
4899 4910
4900 for_each_possible_cpu(cpu) { 4911 for_each_possible_cpu(cpu) {
4901 node = cpu_to_node(cpu); 4912 node = cpu_to_node(cpu);
diff --git a/lib/Makefile b/lib/Makefile
index e9c52e1b853a..c55a037a354e 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@ lib-y += kobject.o klist.o
23 23
24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ 24obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 25 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
26 gcd.o lcm.o list_sort.o uuid.o flex_array.o \ 26 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o \
27 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o 27 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o
28obj-y += string_helpers.o 28obj-y += string_helpers.o
29obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 29obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
diff --git a/lib/iovec.c b/lib/iovec.c
new file mode 100644
index 000000000000..454baa88bf27
--- /dev/null
+++ b/lib/iovec.c
@@ -0,0 +1,53 @@
1#include <linux/uaccess.h>
2#include <linux/export.h>
3#include <linux/uio.h>
4
5/*
6 * Copy iovec to kernel. Returns -EFAULT on error.
7 *
8 * Note: this modifies the original iovec.
9 */
10
11int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
12{
13 while (len > 0) {
14 if (iov->iov_len) {
15 int copy = min_t(unsigned int, len, iov->iov_len);
16 if (copy_from_user(kdata, iov->iov_base, copy))
17 return -EFAULT;
18 len -= copy;
19 kdata += copy;
20 iov->iov_base += copy;
21 iov->iov_len -= copy;
22 }
23 iov++;
24 }
25
26 return 0;
27}
28EXPORT_SYMBOL(memcpy_fromiovec);
29
30/*
31 * Copy kernel to iovec. Returns -EFAULT on error.
32 *
33 * Note: this modifies the original iovec.
34 */
35
36int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
37{
38 while (len > 0) {
39 if (iov->iov_len) {
40 int copy = min_t(unsigned int, iov->iov_len, len);
41 if (copy_to_user(iov->iov_base, kdata, copy))
42 return -EFAULT;
43 kdata += copy;
44 len -= copy;
45 iov->iov_len -= copy;
46 iov->iov_base += copy;
47 }
48 iov++;
49 }
50
51 return 0;
52}
53EXPORT_SYMBOL(memcpy_toiovec);
diff --git a/lib/klist.c b/lib/klist.c
index 0874e41609a6..358a368a2947 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -193,10 +193,10 @@ static void klist_release(struct kref *kref)
193 if (waiter->node != n) 193 if (waiter->node != n)
194 continue; 194 continue;
195 195
196 list_del(&waiter->list);
196 waiter->woken = 1; 197 waiter->woken = 1;
197 mb(); 198 mb();
198 wake_up_process(waiter->process); 199 wake_up_process(waiter->process);
199 list_del(&waiter->list);
200 } 200 }
201 spin_unlock(&klist_remove_lock); 201 spin_unlock(&klist_remove_lock);
202 knode_set_klist(n, NULL); 202 knode_set_klist(n, NULL);
diff --git a/lib/mpi/longlong.h b/lib/mpi/longlong.h
index 095ab157a521..d411355f238e 100644
--- a/lib/mpi/longlong.h
+++ b/lib/mpi/longlong.h
@@ -318,7 +318,8 @@ extern UDItype __udiv_qrnnd();
318 "rM" ((USItype)(bh)), \ 318 "rM" ((USItype)(bh)), \
319 "rM" ((USItype)(al)), \ 319 "rM" ((USItype)(al)), \
320 "rM" ((USItype)(bl))) 320 "rM" ((USItype)(bl)))
321#if defined(_PA_RISC1_1) 321#if 0 && defined(_PA_RISC1_1)
322/* xmpyu uses floating point register which is not allowed in Linux kernel. */
322#define umul_ppmm(wh, wl, u, v) \ 323#define umul_ppmm(wh, wl, u, v) \
323do { \ 324do { \
324 union {UDItype __ll; \ 325 union {UDItype __ll; \
@@ -337,7 +338,7 @@ do { \
337#define UMUL_TIME 40 338#define UMUL_TIME 40
338#define UDIV_TIME 80 339#define UDIV_TIME 80
339#endif 340#endif
340#ifndef LONGLONG_STANDALONE 341#if 0 /* #ifndef LONGLONG_STANDALONE */
341#define udiv_qrnnd(q, r, n1, n0, d) \ 342#define udiv_qrnnd(q, r, n1, n0, d) \
342do { USItype __r; \ 343do { USItype __r; \
343 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ 344 (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 03a89a2f464b..362c329b83fe 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2325,7 +2325,12 @@ static void collapse_huge_page(struct mm_struct *mm,
2325 pte_unmap(pte); 2325 pte_unmap(pte);
2326 spin_lock(&mm->page_table_lock); 2326 spin_lock(&mm->page_table_lock);
2327 BUG_ON(!pmd_none(*pmd)); 2327 BUG_ON(!pmd_none(*pmd));
2328 set_pmd_at(mm, address, pmd, _pmd); 2328 /*
2329 * We can only use set_pmd_at when establishing
2330 * hugepmds and never for establishing regular pmds that
2331 * points to regular pagetables. Use pmd_populate for that
2332 */
2333 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2329 spin_unlock(&mm->page_table_lock); 2334 spin_unlock(&mm->page_table_lock);
2330 anon_vma_unlock_write(vma->anon_vma); 2335 anon_vma_unlock_write(vma->anon_vma);
2331 goto out; 2336 goto out;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index cb1c9dedf9b6..010d6c14129a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4108,8 +4108,6 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype,
4108 if (mem_cgroup_disabled()) 4108 if (mem_cgroup_disabled())
4109 return NULL; 4109 return NULL;
4110 4110
4111 VM_BUG_ON(PageSwapCache(page));
4112
4113 if (PageTransHuge(page)) { 4111 if (PageTransHuge(page)) {
4114 nr_pages <<= compound_order(page); 4112 nr_pages <<= compound_order(page);
4115 VM_BUG_ON(!PageTransHuge(page)); 4113 VM_BUG_ON(!PageTransHuge(page));
@@ -4205,6 +4203,18 @@ void mem_cgroup_uncharge_page(struct page *page)
4205 if (page_mapped(page)) 4203 if (page_mapped(page))
4206 return; 4204 return;
4207 VM_BUG_ON(page->mapping && !PageAnon(page)); 4205 VM_BUG_ON(page->mapping && !PageAnon(page));
4206 /*
4207 * If the page is in swap cache, uncharge should be deferred
4208 * to the swap path, which also properly accounts swap usage
4209 * and handles memcg lifetime.
4210 *
4211 * Note that this check is not stable and reclaim may add the
4212 * page to swap cache at any time after this. However, if the
4213 * page is not in swap cache by the time page->mapcount hits
4214 * 0, there won't be any page table references to the swap
4215 * slot, and reclaim will free it and not actually write the
4216 * page to disk.
4217 */
4208 if (PageSwapCache(page)) 4218 if (PageSwapCache(page))
4209 return; 4219 return;
4210 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false); 4220 __mem_cgroup_uncharge_common(page, MEM_CGROUP_CHARGE_TYPE_ANON, false);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a221fac1f47d..1ad92b46753e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -720,9 +720,12 @@ int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
720 start = phys_start_pfn << PAGE_SHIFT; 720 start = phys_start_pfn << PAGE_SHIFT;
721 size = nr_pages * PAGE_SIZE; 721 size = nr_pages * PAGE_SIZE;
722 ret = release_mem_region_adjustable(&iomem_resource, start, size); 722 ret = release_mem_region_adjustable(&iomem_resource, start, size);
723 if (ret) 723 if (ret) {
724 pr_warn("Unable to release resource <%016llx-%016llx> (%d)\n", 724 resource_size_t endres = start + size - 1;
725 start, start + size - 1, ret); 725
726 pr_warn("Unable to release resource <%pa-%pa> (%d)\n",
727 &start, &endres, ret);
728 }
726 729
727 sections_to_remove = nr_pages / PAGES_PER_SECTION; 730 sections_to_remove = nr_pages / PAGES_PER_SECTION;
728 for (i = 0; i < sections_to_remove; i++) { 731 for (i = 0; i < sections_to_remove; i++) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 27ed22579fd9..b1f57501de9c 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -165,7 +165,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
165 pte = arch_make_huge_pte(pte, vma, new, 0); 165 pte = arch_make_huge_pte(pte, vma, new, 0);
166 } 166 }
167#endif 167#endif
168 flush_cache_page(vma, addr, pte_pfn(pte)); 168 flush_dcache_page(new);
169 set_pte_at(mm, addr, ptep, pte); 169 set_pte_at(mm, addr, ptep, pte);
170 170
171 if (PageHuge(new)) { 171 if (PageHuge(new)) {
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index be04122fb277..6725ff183374 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -40,48 +40,44 @@ void __mmu_notifier_release(struct mm_struct *mm)
40 int id; 40 int id;
41 41
42 /* 42 /*
43 * srcu_read_lock() here will block synchronize_srcu() in 43 * SRCU here will block mmu_notifier_unregister until
44 * mmu_notifier_unregister() until all registered 44 * ->release returns.
45 * ->release() callouts this function makes have
46 * returned.
47 */ 45 */
48 id = srcu_read_lock(&srcu); 46 id = srcu_read_lock(&srcu);
47 hlist_for_each_entry_rcu(mn, &mm->mmu_notifier_mm->list, hlist)
48 /*
49 * If ->release runs before mmu_notifier_unregister it must be
50 * handled, as it's the only way for the driver to flush all
51 * existing sptes and stop the driver from establishing any more
52 * sptes before all the pages in the mm are freed.
53 */
54 if (mn->ops->release)
55 mn->ops->release(mn, mm);
56 srcu_read_unlock(&srcu, id);
57
49 spin_lock(&mm->mmu_notifier_mm->lock); 58 spin_lock(&mm->mmu_notifier_mm->lock);
50 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { 59 while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
51 mn = hlist_entry(mm->mmu_notifier_mm->list.first, 60 mn = hlist_entry(mm->mmu_notifier_mm->list.first,
52 struct mmu_notifier, 61 struct mmu_notifier,
53 hlist); 62 hlist);
54
55 /* 63 /*
56 * Unlink. This will prevent mmu_notifier_unregister() 64 * We arrived before mmu_notifier_unregister so
57 * from also making the ->release() callout. 65 * mmu_notifier_unregister will do nothing other than to wait
66 * for ->release to finish and for mmu_notifier_unregister to
67 * return.
58 */ 68 */
59 hlist_del_init_rcu(&mn->hlist); 69 hlist_del_init_rcu(&mn->hlist);
60 spin_unlock(&mm->mmu_notifier_mm->lock);
61
62 /*
63 * Clear sptes. (see 'release' description in mmu_notifier.h)
64 */
65 if (mn->ops->release)
66 mn->ops->release(mn, mm);
67
68 spin_lock(&mm->mmu_notifier_mm->lock);
69 } 70 }
70 spin_unlock(&mm->mmu_notifier_mm->lock); 71 spin_unlock(&mm->mmu_notifier_mm->lock);
71 72
72 /* 73 /*
73 * All callouts to ->release() which we have done are complete. 74 * synchronize_srcu here prevents mmu_notifier_release from returning to
74 * Allow synchronize_srcu() in mmu_notifier_unregister() to complete 75 * exit_mmap (which would proceed with freeing all pages in the mm)
75 */ 76 * until the ->release method returns, if it was invoked by
76 srcu_read_unlock(&srcu, id); 77 * mmu_notifier_unregister.
77 78 *
78 /* 79 * The mmu_notifier_mm can't go away from under us because one mm_count
79 * mmu_notifier_unregister() may have unlinked a notifier and may 80 * is held by exit_mmap.
80 * still be calling out to it. Additionally, other notifiers
81 * may have been active via vmtruncate() et. al. Block here
82 * to ensure that all notifier callouts for this mm have been
83 * completed and the sptes are really cleaned up before returning
84 * to exit_mmap().
85 */ 81 */
86 synchronize_srcu(&srcu); 82 synchronize_srcu(&srcu);
87} 83}
@@ -292,31 +288,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
292{ 288{
293 BUG_ON(atomic_read(&mm->mm_count) <= 0); 289 BUG_ON(atomic_read(&mm->mm_count) <= 0);
294 290
295 spin_lock(&mm->mmu_notifier_mm->lock);
296 if (!hlist_unhashed(&mn->hlist)) { 291 if (!hlist_unhashed(&mn->hlist)) {
292 /*
293 * SRCU here will force exit_mmap to wait for ->release to
294 * finish before freeing the pages.
295 */
297 int id; 296 int id;
298 297
298 id = srcu_read_lock(&srcu);
299 /* 299 /*
300 * Ensure we synchronize up with __mmu_notifier_release(). 300 * exit_mmap will block in mmu_notifier_release to guarantee
301 * that ->release is called before freeing the pages.
301 */ 302 */
302 id = srcu_read_lock(&srcu);
303
304 hlist_del_rcu(&mn->hlist);
305 spin_unlock(&mm->mmu_notifier_mm->lock);
306
307 if (mn->ops->release) 303 if (mn->ops->release)
308 mn->ops->release(mn, mm); 304 mn->ops->release(mn, mm);
305 srcu_read_unlock(&srcu, id);
309 306
307 spin_lock(&mm->mmu_notifier_mm->lock);
310 /* 308 /*
311 * Allow __mmu_notifier_release() to complete. 309 * Can not use list_del_rcu() since __mmu_notifier_release
310 * can delete it before we hold the lock.
312 */ 311 */
313 srcu_read_unlock(&srcu, id); 312 hlist_del_init_rcu(&mn->hlist);
314 } else
315 spin_unlock(&mm->mmu_notifier_mm->lock); 313 spin_unlock(&mm->mmu_notifier_mm->lock);
314 }
316 315
317 /* 316 /*
318 * Wait for any running method to finish, including ->release() if it 317 * Wait for any running method to finish, of course including
319 * was run by __mmu_notifier_release() instead of us. 318 * ->release if it was run by mmu_notifier_relase instead of us.
320 */ 319 */
321 synchronize_srcu(&srcu); 320 synchronize_srcu(&srcu);
322 321
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 98cbdf6e5532..378a15bcd649 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5158,7 +5158,7 @@ unsigned long free_reserved_area(unsigned long start, unsigned long end,
5158 for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) { 5158 for (pages = 0; pos < end; pos += PAGE_SIZE, pages++) {
5159 if (poison) 5159 if (poison)
5160 memset((void *)pos, poison, PAGE_SIZE); 5160 memset((void *)pos, poison, PAGE_SIZE);
5161 free_reserved_page(virt_to_page(pos)); 5161 free_reserved_page(virt_to_page((void *)pos));
5162 } 5162 }
5163 5163
5164 if (pages && s) 5164 if (pages && s)
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 35aa294656cd..5da2cbcfdbb5 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -127,28 +127,7 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
127 return 0; 127 return 0;
128} 128}
129 129
130static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
131{
132 struct vm_area_struct *vma;
133
134 /* We don't need vma lookup at all. */
135 if (!walk->hugetlb_entry)
136 return NULL;
137
138 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
139 vma = find_vma(walk->mm, addr);
140 if (vma && vma->vm_start <= addr && is_vm_hugetlb_page(vma))
141 return vma;
142
143 return NULL;
144}
145
146#else /* CONFIG_HUGETLB_PAGE */ 130#else /* CONFIG_HUGETLB_PAGE */
147static struct vm_area_struct* hugetlb_vma(unsigned long addr, struct mm_walk *walk)
148{
149 return NULL;
150}
151
152static int walk_hugetlb_range(struct vm_area_struct *vma, 131static int walk_hugetlb_range(struct vm_area_struct *vma,
153 unsigned long addr, unsigned long end, 132 unsigned long addr, unsigned long end,
154 struct mm_walk *walk) 133 struct mm_walk *walk)
@@ -198,30 +177,53 @@ int walk_page_range(unsigned long addr, unsigned long end,
198 if (!walk->mm) 177 if (!walk->mm)
199 return -EINVAL; 178 return -EINVAL;
200 179
180 VM_BUG_ON(!rwsem_is_locked(&walk->mm->mmap_sem));
181
201 pgd = pgd_offset(walk->mm, addr); 182 pgd = pgd_offset(walk->mm, addr);
202 do { 183 do {
203 struct vm_area_struct *vma; 184 struct vm_area_struct *vma = NULL;
204 185
205 next = pgd_addr_end(addr, end); 186 next = pgd_addr_end(addr, end);
206 187
207 /* 188 /*
208 * handle hugetlb vma individually because pagetable walk for 189 * This function was not intended to be vma based.
209 * the hugetlb page is dependent on the architecture and 190 * But there are vma special cases to be handled:
210 * we can't handled it in the same manner as non-huge pages. 191 * - hugetlb vma's
192 * - VM_PFNMAP vma's
211 */ 193 */
212 vma = hugetlb_vma(addr, walk); 194 vma = find_vma(walk->mm, addr);
213 if (vma) { 195 if (vma) {
214 if (vma->vm_end < next) 196 /*
197 * There are no page structures backing a VM_PFNMAP
198 * range, so do not allow split_huge_page_pmd().
199 */
200 if ((vma->vm_start <= addr) &&
201 (vma->vm_flags & VM_PFNMAP)) {
215 next = vma->vm_end; 202 next = vma->vm_end;
203 pgd = pgd_offset(walk->mm, next);
204 continue;
205 }
216 /* 206 /*
217 * Hugepage is very tightly coupled with vma, so 207 * Handle hugetlb vma individually because pagetable
218 * walk through hugetlb entries within a given vma. 208 * walk for the hugetlb page is dependent on the
209 * architecture and we can't handled it in the same
210 * manner as non-huge pages.
219 */ 211 */
220 err = walk_hugetlb_range(vma, addr, next, walk); 212 if (walk->hugetlb_entry && (vma->vm_start <= addr) &&
221 if (err) 213 is_vm_hugetlb_page(vma)) {
222 break; 214 if (vma->vm_end < next)
223 pgd = pgd_offset(walk->mm, next); 215 next = vma->vm_end;
224 continue; 216 /*
217 * Hugepage is very tightly coupled with vma,
218 * so walk through hugetlb entries within a
219 * given vma.
220 */
221 err = walk_hugetlb_range(vma, addr, next, walk);
222 if (err)
223 break;
224 pgd = pgd_offset(walk->mm, next);
225 continue;
226 }
225 } 227 }
226 228
227 if (pgd_none_or_clear_bad(pgd)) { 229 if (pgd_none_or_clear_bad(pgd)) {
diff --git a/net/802/mrp.c b/net/802/mrp.c
index e085bcc754f6..1eb05d80b07b 100644
--- a/net/802/mrp.c
+++ b/net/802/mrp.c
@@ -871,10 +871,10 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)
871 */ 871 */
872 del_timer_sync(&app->join_timer); 872 del_timer_sync(&app->join_timer);
873 873
874 spin_lock(&app->lock); 874 spin_lock_bh(&app->lock);
875 mrp_mad_event(app, MRP_EVENT_TX); 875 mrp_mad_event(app, MRP_EVENT_TX);
876 mrp_pdu_queue(app); 876 mrp_pdu_queue(app);
877 spin_unlock(&app->lock); 877 spin_unlock_bh(&app->lock);
878 878
879 mrp_queue_xmit(app); 879 mrp_queue_xmit(app);
880 880
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 8e15d966d9b0..239992021b1d 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -837,6 +837,19 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
837 837
838 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst); 838 dat_entry = batadv_dat_entry_hash_find(bat_priv, ip_dst);
839 if (dat_entry) { 839 if (dat_entry) {
840 /* If the ARP request is destined for a local client the local
841 * client will answer itself. DAT would only generate a
842 * duplicate packet.
843 *
844 * Moreover, if the soft-interface is enslaved into a bridge, an
845 * additional DAT answer may trigger kernel warnings about
846 * a packet coming from the wrong port.
847 */
848 if (batadv_is_my_client(bat_priv, dat_entry->mac_addr)) {
849 ret = true;
850 goto out;
851 }
852
840 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src, 853 skb_new = arp_create(ARPOP_REPLY, ETH_P_ARP, ip_src,
841 bat_priv->soft_iface, ip_dst, hw_src, 854 bat_priv->soft_iface, ip_dst, hw_src,
842 dat_entry->mac_addr, hw_src); 855 dat_entry->mac_addr, hw_src);
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index 3e30a0f1b908..51aafd669cbb 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -163,16 +163,25 @@ void batadv_mesh_free(struct net_device *soft_iface)
163 batadv_vis_quit(bat_priv); 163 batadv_vis_quit(bat_priv);
164 164
165 batadv_gw_node_purge(bat_priv); 165 batadv_gw_node_purge(bat_priv);
166 batadv_originator_free(bat_priv);
167 batadv_nc_free(bat_priv); 166 batadv_nc_free(bat_priv);
167 batadv_dat_free(bat_priv);
168 batadv_bla_free(bat_priv);
168 169
170 /* Free the TT and the originator tables only after having terminated
171 * all the other depending components which may use these structures for
172 * their purposes.
173 */
169 batadv_tt_free(bat_priv); 174 batadv_tt_free(bat_priv);
170 175
171 batadv_bla_free(bat_priv); 176 /* Since the originator table clean up routine is accessing the TT
172 177 * tables as well, it has to be invoked after the TT tables have been
173 batadv_dat_free(bat_priv); 178 * freed and marked as empty. This ensures that no cleanup RCU callbacks
179 * accessing the TT data are scheduled for later execution.
180 */
181 batadv_originator_free(bat_priv);
174 182
175 free_percpu(bat_priv->bat_counters); 183 free_percpu(bat_priv->bat_counters);
184 bat_priv->bat_counters = NULL;
176 185
177 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); 186 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
178} 187}
@@ -475,7 +484,7 @@ static int batadv_param_set_ra(const char *val, const struct kernel_param *kp)
475 char *algo_name = (char *)val; 484 char *algo_name = (char *)val;
476 size_t name_len = strlen(algo_name); 485 size_t name_len = strlen(algo_name);
477 486
478 if (algo_name[name_len - 1] == '\n') 487 if (name_len > 0 && algo_name[name_len - 1] == '\n')
479 algo_name[name_len - 1] = '\0'; 488 algo_name[name_len - 1] = '\0';
480 489
481 bat_algo_ops = batadv_algo_get(algo_name); 490 bat_algo_ops = batadv_algo_get(algo_name);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index f7c54305a918..e84629ece9b7 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1514,6 +1514,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1514 struct ethhdr *ethhdr, ethhdr_tmp; 1514 struct ethhdr *ethhdr, ethhdr_tmp;
1515 uint8_t *orig_dest, ttl, ttvn; 1515 uint8_t *orig_dest, ttl, ttvn;
1516 unsigned int coding_len; 1516 unsigned int coding_len;
1517 int err;
1517 1518
1518 /* Save headers temporarily */ 1519 /* Save headers temporarily */
1519 memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp)); 1520 memcpy(&coded_packet_tmp, skb->data, sizeof(coded_packet_tmp));
@@ -1568,8 +1569,11 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
1568 coding_len); 1569 coding_len);
1569 1570
1570 /* Resize decoded skb if decoded with larger packet */ 1571 /* Resize decoded skb if decoded with larger packet */
1571 if (nc_packet->skb->len > coding_len + h_size) 1572 if (nc_packet->skb->len > coding_len + h_size) {
1572 pskb_trim_rcsum(skb, coding_len + h_size); 1573 err = pskb_trim_rcsum(skb, coding_len + h_size);
1574 if (err)
1575 return NULL;
1576 }
1573 1577
1574 /* Create decoded unicast packet */ 1578 /* Create decoded unicast packet */
1575 unicast_packet = (struct batadv_unicast_packet *)skb->data; 1579 unicast_packet = (struct batadv_unicast_packet *)skb->data;
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 2f3452546636..fad1a2093e15 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -156,12 +156,28 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
156 kfree(orig_node); 156 kfree(orig_node);
157} 157}
158 158
159/**
160 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
161 * schedule an rcu callback for freeing it
162 * @orig_node: the orig node to free
163 */
159void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node) 164void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
160{ 165{
161 if (atomic_dec_and_test(&orig_node->refcount)) 166 if (atomic_dec_and_test(&orig_node->refcount))
162 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu); 167 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
163} 168}
164 169
170/**
171 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
172 * possibly free it (without rcu callback)
173 * @orig_node: the orig node to free
174 */
175void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
176{
177 if (atomic_dec_and_test(&orig_node->refcount))
178 batadv_orig_node_free_rcu(&orig_node->rcu);
179}
180
165void batadv_originator_free(struct batadv_priv *bat_priv) 181void batadv_originator_free(struct batadv_priv *bat_priv)
166{ 182{
167 struct batadv_hashtable *hash = bat_priv->orig_hash; 183 struct batadv_hashtable *hash = bat_priv->orig_hash;
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 7df48fa7669d..734e5a3d8a5b 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -26,6 +26,7 @@ int batadv_originator_init(struct batadv_priv *bat_priv);
26void batadv_originator_free(struct batadv_priv *bat_priv); 26void batadv_originator_free(struct batadv_priv *bat_priv);
27void batadv_purge_orig_ref(struct batadv_priv *bat_priv); 27void batadv_purge_orig_ref(struct batadv_priv *bat_priv);
28void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node); 28void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node);
29void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
29struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv, 30struct batadv_orig_node *batadv_get_orig_node(struct batadv_priv *bat_priv,
30 const uint8_t *addr); 31 const uint8_t *addr);
31struct batadv_neigh_node * 32struct batadv_neigh_node *
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 6f20d339e33a..819dfb006cdf 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -505,6 +505,7 @@ unreg_debugfs:
505 batadv_debugfs_del_meshif(dev); 505 batadv_debugfs_del_meshif(dev);
506free_bat_counters: 506free_bat_counters:
507 free_percpu(bat_priv->bat_counters); 507 free_percpu(bat_priv->bat_counters);
508 bat_priv->bat_counters = NULL;
508 509
509 return ret; 510 return ret;
510} 511}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 5e89deeb9542..9e8748575845 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -144,7 +144,12 @@ static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
144 struct batadv_tt_orig_list_entry *orig_entry; 144 struct batadv_tt_orig_list_entry *orig_entry;
145 145
146 orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu); 146 orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
147 batadv_orig_node_free_ref(orig_entry->orig_node); 147
148 /* We are in an rcu callback here, therefore we cannot use
149 * batadv_orig_node_free_ref() and its call_rcu():
150 * An rcu_barrier() wouldn't wait for that to finish
151 */
152 batadv_orig_node_free_ref_now(orig_entry->orig_node);
148 kfree(orig_entry); 153 kfree(orig_entry);
149} 154}
150 155
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c
index 9878eb8204c5..19c37a4929bc 100644
--- a/net/bridge/netfilter/ebt_log.c
+++ b/net/bridge/netfilter/ebt_log.c
@@ -72,13 +72,12 @@ print_ports(const struct sk_buff *skb, uint8_t protocol, int offset)
72} 72}
73 73
74static void 74static void
75ebt_log_packet(u_int8_t pf, unsigned int hooknum, 75ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
76 const struct sk_buff *skb, const struct net_device *in, 76 const struct sk_buff *skb, const struct net_device *in,
77 const struct net_device *out, const struct nf_loginfo *loginfo, 77 const struct net_device *out, const struct nf_loginfo *loginfo,
78 const char *prefix) 78 const char *prefix)
79{ 79{
80 unsigned int bitmask; 80 unsigned int bitmask;
81 struct net *net = dev_net(in ? in : out);
82 81
83 /* FIXME: Disabled from containers until syslog ns is supported */ 82 /* FIXME: Disabled from containers until syslog ns is supported */
84 if (!net_eq(net, &init_net)) 83 if (!net_eq(net, &init_net))
@@ -191,7 +190,7 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
191 nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, 190 nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
192 par->in, par->out, &li, "%s", info->prefix); 191 par->in, par->out, &li, "%s", info->prefix);
193 else 192 else
194 ebt_log_packet(NFPROTO_BRIDGE, par->hooknum, skb, par->in, 193 ebt_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb, par->in,
195 par->out, &li, info->prefix); 194 par->out, &li, info->prefix);
196 return EBT_CONTINUE; 195 return EBT_CONTINUE;
197} 196}
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
index fc1905c51417..df0364aa12d5 100644
--- a/net/bridge/netfilter/ebt_ulog.c
+++ b/net/bridge/netfilter/ebt_ulog.c
@@ -131,14 +131,16 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
131 return skb; 131 return skb;
132} 132}
133 133
134static void ebt_ulog_packet(unsigned int hooknr, const struct sk_buff *skb, 134static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
135 const struct net_device *in, const struct net_device *out, 135 const struct sk_buff *skb,
136 const struct ebt_ulog_info *uloginfo, const char *prefix) 136 const struct net_device *in,
137 const struct net_device *out,
138 const struct ebt_ulog_info *uloginfo,
139 const char *prefix)
137{ 140{
138 ebt_ulog_packet_msg_t *pm; 141 ebt_ulog_packet_msg_t *pm;
139 size_t size, copy_len; 142 size_t size, copy_len;
140 struct nlmsghdr *nlh; 143 struct nlmsghdr *nlh;
141 struct net *net = dev_net(in ? in : out);
142 struct ebt_ulog_net *ebt = ebt_ulog_pernet(net); 144 struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
143 unsigned int group = uloginfo->nlgroup; 145 unsigned int group = uloginfo->nlgroup;
144 ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group]; 146 ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
@@ -233,7 +235,7 @@ unlock:
233} 235}
234 236
235/* this function is registered with the netfilter core */ 237/* this function is registered with the netfilter core */
236static void ebt_log_packet(u_int8_t pf, unsigned int hooknum, 238static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
237 const struct sk_buff *skb, const struct net_device *in, 239 const struct sk_buff *skb, const struct net_device *in,
238 const struct net_device *out, const struct nf_loginfo *li, 240 const struct net_device *out, const struct nf_loginfo *li,
239 const char *prefix) 241 const char *prefix)
@@ -252,13 +254,15 @@ static void ebt_log_packet(u_int8_t pf, unsigned int hooknum,
252 strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix)); 254 strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
253 } 255 }
254 256
255 ebt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); 257 ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
256} 258}
257 259
258static unsigned int 260static unsigned int
259ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par) 261ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
260{ 262{
261 ebt_ulog_packet(par->hooknum, skb, par->in, par->out, 263 struct net *net = dev_net(par->in ? par->in : par->out);
264
265 ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
262 par->targinfo, NULL); 266 par->targinfo, NULL);
263 return EBT_CONTINUE; 267 return EBT_CONTINUE;
264} 268}
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index a3395fdfbd4f..d5953b87918c 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1204,6 +1204,7 @@ void ceph_osdc_unregister_linger_request(struct ceph_osd_client *osdc,
1204 mutex_lock(&osdc->request_mutex); 1204 mutex_lock(&osdc->request_mutex);
1205 if (req->r_linger) { 1205 if (req->r_linger) {
1206 __unregister_linger_request(osdc, req); 1206 __unregister_linger_request(osdc, req);
1207 req->r_linger = 0;
1207 ceph_osdc_put_request(req); 1208 ceph_osdc_put_request(req);
1208 } 1209 }
1209 mutex_unlock(&osdc->request_mutex); 1210 mutex_unlock(&osdc->request_mutex);
@@ -2120,7 +2121,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
2120 down_read(&osdc->map_sem); 2121 down_read(&osdc->map_sem);
2121 mutex_lock(&osdc->request_mutex); 2122 mutex_lock(&osdc->request_mutex);
2122 __register_request(osdc, req); 2123 __register_request(osdc, req);
2123 WARN_ON(req->r_sent); 2124 req->r_sent = 0;
2125 req->r_got_reply = 0;
2126 req->r_completed = 0;
2124 rc = __map_request(osdc, req, 0); 2127 rc = __map_request(osdc, req, 0);
2125 if (rc < 0) { 2128 if (rc < 0) {
2126 if (nofail) { 2129 if (nofail) {
diff --git a/net/core/iovec.c b/net/core/iovec.c
index 7e7aeb01de45..de178e462682 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,31 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75 75
76/* 76/*
77 * Copy kernel to iovec. Returns -EFAULT on error. 77 * Copy kernel to iovec. Returns -EFAULT on error.
78 *
79 * Note: this modifies the original iovec.
80 */
81
82int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
83{
84 while (len > 0) {
85 if (iov->iov_len) {
86 int copy = min_t(unsigned int, iov->iov_len, len);
87 if (copy_to_user(iov->iov_base, kdata, copy))
88 return -EFAULT;
89 kdata += copy;
90 len -= copy;
91 iov->iov_len -= copy;
92 iov->iov_base += copy;
93 }
94 iov++;
95 }
96
97 return 0;
98}
99EXPORT_SYMBOL(memcpy_toiovec);
100
101/*
102 * Copy kernel to iovec. Returns -EFAULT on error.
103 */ 78 */
104 79
105int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata, 80int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
@@ -125,31 +100,6 @@ int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
125EXPORT_SYMBOL(memcpy_toiovecend); 100EXPORT_SYMBOL(memcpy_toiovecend);
126 101
127/* 102/*
128 * Copy iovec to kernel. Returns -EFAULT on error.
129 *
130 * Note: this modifies the original iovec.
131 */
132
133int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len)
134{
135 while (len > 0) {
136 if (iov->iov_len) {
137 int copy = min_t(unsigned int, len, iov->iov_len);
138 if (copy_from_user(kdata, iov->iov_base, copy))
139 return -EFAULT;
140 len -= copy;
141 kdata += copy;
142 iov->iov_base += copy;
143 iov->iov_len -= copy;
144 }
145 iov++;
146 }
147
148 return 0;
149}
150EXPORT_SYMBOL(memcpy_fromiovec);
151
152/*
153 * Copy iovec from kernel. Returns -EFAULT on error. 103 * Copy iovec from kernel. Returns -EFAULT on error.
154 */ 104 */
155 105
diff --git a/net/core/sock.c b/net/core/sock.c
index d4f4cea726e7..6ba327da79e1 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1217,18 +1217,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk)
1217#endif 1217#endif
1218} 1218}
1219 1219
1220/*
1221 * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
1222 * un-modified. Special care is taken when initializing object to zero.
1223 */
1224static inline void sk_prot_clear_nulls(struct sock *sk, int size)
1225{
1226 if (offsetof(struct sock, sk_node.next) != 0)
1227 memset(sk, 0, offsetof(struct sock, sk_node.next));
1228 memset(&sk->sk_node.pprev, 0,
1229 size - offsetof(struct sock, sk_node.pprev));
1230}
1231
1232void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) 1220void sk_prot_clear_portaddr_nulls(struct sock *sk, int size)
1233{ 1221{
1234 unsigned long nulls1, nulls2; 1222 unsigned long nulls1, nulls2;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index c625e4dad4b0..2a83591492dd 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -235,7 +235,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
235 */ 235 */
236 struct net *net = dev_net(skb->dev); 236 struct net *net = dev_net(skb->dev);
237 struct ip_tunnel_net *itn; 237 struct ip_tunnel_net *itn;
238 const struct iphdr *iph = (const struct iphdr *)skb->data; 238 const struct iphdr *iph;
239 const int type = icmp_hdr(skb)->type; 239 const int type = icmp_hdr(skb)->type;
240 const int code = icmp_hdr(skb)->code; 240 const int code = icmp_hdr(skb)->code;
241 struct ip_tunnel *t; 241 struct ip_tunnel *t;
@@ -281,6 +281,7 @@ static void ipgre_err(struct sk_buff *skb, u32 info)
281 else 281 else
282 itn = net_generic(net, ipgre_net_id); 282 itn = net_generic(net, ipgre_net_id);
283 283
284 iph = (const struct iphdr *)skb->data;
284 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags, 285 t = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi.flags,
285 iph->daddr, iph->saddr, tpi.key); 286 iph->daddr, iph->saddr, tpi.key);
286 287
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 147abf5275aa..4bcabf3ab4ca 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -84,7 +84,7 @@ int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
84EXPORT_SYMBOL(sysctl_ip_default_ttl); 84EXPORT_SYMBOL(sysctl_ip_default_ttl);
85 85
86/* Generate a checksum for an outgoing IP datagram. */ 86/* Generate a checksum for an outgoing IP datagram. */
87__inline__ void ip_send_check(struct iphdr *iph) 87void ip_send_check(struct iphdr *iph)
88{ 88{
89 iph->check = 0; 89 iph->check = 0;
90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); 90 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index f8a222cb6448..cf08218ddbcf 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -162,7 +162,8 @@ static struct sk_buff *ulog_alloc_skb(unsigned int size)
162 return skb; 162 return skb;
163} 163}
164 164
165static void ipt_ulog_packet(unsigned int hooknum, 165static void ipt_ulog_packet(struct net *net,
166 unsigned int hooknum,
166 const struct sk_buff *skb, 167 const struct sk_buff *skb,
167 const struct net_device *in, 168 const struct net_device *in,
168 const struct net_device *out, 169 const struct net_device *out,
@@ -174,7 +175,6 @@ static void ipt_ulog_packet(unsigned int hooknum,
174 size_t size, copy_len; 175 size_t size, copy_len;
175 struct nlmsghdr *nlh; 176 struct nlmsghdr *nlh;
176 struct timeval tv; 177 struct timeval tv;
177 struct net *net = dev_net(in ? in : out);
178 struct ulog_net *ulog = ulog_pernet(net); 178 struct ulog_net *ulog = ulog_pernet(net);
179 179
180 /* ffs == find first bit set, necessary because userspace 180 /* ffs == find first bit set, necessary because userspace
@@ -291,12 +291,15 @@ alloc_failure:
291static unsigned int 291static unsigned int
292ulog_tg(struct sk_buff *skb, const struct xt_action_param *par) 292ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
293{ 293{
294 ipt_ulog_packet(par->hooknum, skb, par->in, par->out, 294 struct net *net = dev_net(par->in ? par->in : par->out);
295
296 ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
295 par->targinfo, NULL); 297 par->targinfo, NULL);
296 return XT_CONTINUE; 298 return XT_CONTINUE;
297} 299}
298 300
299static void ipt_logfn(u_int8_t pf, 301static void ipt_logfn(struct net *net,
302 u_int8_t pf,
300 unsigned int hooknum, 303 unsigned int hooknum,
301 const struct sk_buff *skb, 304 const struct sk_buff *skb,
302 const struct net_device *in, 305 const struct net_device *in,
@@ -318,7 +321,7 @@ static void ipt_logfn(u_int8_t pf,
318 strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix)); 321 strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
319 } 322 }
320 323
321 ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix); 324 ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
322} 325}
323 326
324static int ulog_tg_check(const struct xt_tgchk_param *par) 327static int ulog_tg_check(const struct xt_tgchk_param *par)
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index dcb116dde216..ab450c099aa4 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2887,6 +2887,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2887 unsigned int mss; 2887 unsigned int mss;
2888 struct sk_buff *gso_skb = skb; 2888 struct sk_buff *gso_skb = skb;
2889 __sum16 newcheck; 2889 __sum16 newcheck;
2890 bool ooo_okay, copy_destructor;
2890 2891
2891 if (!pskb_may_pull(skb, sizeof(*th))) 2892 if (!pskb_may_pull(skb, sizeof(*th)))
2892 goto out; 2893 goto out;
@@ -2927,10 +2928,18 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2927 goto out; 2928 goto out;
2928 } 2929 }
2929 2930
2931 copy_destructor = gso_skb->destructor == tcp_wfree;
2932 ooo_okay = gso_skb->ooo_okay;
2933 /* All segments but the first should have ooo_okay cleared */
2934 skb->ooo_okay = 0;
2935
2930 segs = skb_segment(skb, features); 2936 segs = skb_segment(skb, features);
2931 if (IS_ERR(segs)) 2937 if (IS_ERR(segs))
2932 goto out; 2938 goto out;
2933 2939
2940 /* Only first segment might have ooo_okay set */
2941 segs->ooo_okay = ooo_okay;
2942
2934 delta = htonl(oldlen + (thlen + mss)); 2943 delta = htonl(oldlen + (thlen + mss));
2935 2944
2936 skb = segs; 2945 skb = segs;
@@ -2950,6 +2959,17 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2950 thlen, skb->csum)); 2959 thlen, skb->csum));
2951 2960
2952 seq += mss; 2961 seq += mss;
2962 if (copy_destructor) {
2963 skb->destructor = gso_skb->destructor;
2964 skb->sk = gso_skb->sk;
2965 /* {tcp|sock}_wfree() use exact truesize accounting :
2966 * sum(skb->truesize) MUST be exactly be gso_skb->truesize
2967 * So we account mss bytes of 'true size' for each segment.
2968 * The last segment will contain the remaining.
2969 */
2970 skb->truesize = mss;
2971 gso_skb->truesize -= mss;
2972 }
2953 skb = skb->next; 2973 skb = skb->next;
2954 th = tcp_hdr(skb); 2974 th = tcp_hdr(skb);
2955 2975
@@ -2962,7 +2982,7 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb,
2962 * is freed at TX completion, and not right now when gso_skb 2982 * is freed at TX completion, and not right now when gso_skb
2963 * is freed by GSO engine 2983 * is freed by GSO engine
2964 */ 2984 */
2965 if (gso_skb->destructor == tcp_wfree) { 2985 if (copy_destructor) {
2966 swap(gso_skb->sk, skb->sk); 2986 swap(gso_skb->sk, skb->sk);
2967 swap(gso_skb->destructor, skb->destructor); 2987 swap(gso_skb->destructor, skb->destructor);
2968 swap(gso_skb->truesize, skb->truesize); 2988 swap(gso_skb->truesize, skb->truesize);
@@ -3269,8 +3289,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3269 3289
3270 for (i = 0; i < shi->nr_frags; ++i) { 3290 for (i = 0; i < shi->nr_frags; ++i) {
3271 const struct skb_frag_struct *f = &shi->frags[i]; 3291 const struct skb_frag_struct *f = &shi->frags[i];
3272 struct page *page = skb_frag_page(f); 3292 unsigned int offset = f->page_offset;
3273 sg_set_page(&sg, page, skb_frag_size(f), f->page_offset); 3293 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3294
3295 sg_set_page(&sg, page, skb_frag_size(f),
3296 offset_in_page(offset));
3274 if (crypto_hash_update(desc, &sg, skb_frag_size(f))) 3297 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3275 return 1; 3298 return 1;
3276 } 3299 }
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 08bbe6096528..9c6225780bd5 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2743,8 +2743,8 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
2743 * tcp_xmit_retransmit_queue(). 2743 * tcp_xmit_retransmit_queue().
2744 */ 2744 */
2745static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, 2745static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2746 int prior_sacked, bool is_dupack, 2746 int prior_sacked, int prior_packets,
2747 int flag) 2747 bool is_dupack, int flag)
2748{ 2748{
2749 struct inet_connection_sock *icsk = inet_csk(sk); 2749 struct inet_connection_sock *icsk = inet_csk(sk);
2750 struct tcp_sock *tp = tcp_sk(sk); 2750 struct tcp_sock *tp = tcp_sk(sk);
@@ -2804,7 +2804,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2804 tcp_add_reno_sack(sk); 2804 tcp_add_reno_sack(sk);
2805 } else 2805 } else
2806 do_lost = tcp_try_undo_partial(sk, pkts_acked); 2806 do_lost = tcp_try_undo_partial(sk, pkts_acked);
2807 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; 2807 newly_acked_sacked = prior_packets - tp->packets_out +
2808 tp->sacked_out - prior_sacked;
2808 break; 2809 break;
2809 case TCP_CA_Loss: 2810 case TCP_CA_Loss:
2810 tcp_process_loss(sk, flag, is_dupack); 2811 tcp_process_loss(sk, flag, is_dupack);
@@ -2818,7 +2819,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
2818 if (is_dupack) 2819 if (is_dupack)
2819 tcp_add_reno_sack(sk); 2820 tcp_add_reno_sack(sk);
2820 } 2821 }
2821 newly_acked_sacked = pkts_acked + tp->sacked_out - prior_sacked; 2822 newly_acked_sacked = prior_packets - tp->packets_out +
2823 tp->sacked_out - prior_sacked;
2822 2824
2823 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 2825 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
2824 tcp_try_undo_dsack(sk); 2826 tcp_try_undo_dsack(sk);
@@ -3330,9 +3332,10 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3330 bool is_dupack = false; 3332 bool is_dupack = false;
3331 u32 prior_in_flight; 3333 u32 prior_in_flight;
3332 u32 prior_fackets; 3334 u32 prior_fackets;
3333 int prior_packets; 3335 int prior_packets = tp->packets_out;
3334 int prior_sacked = tp->sacked_out; 3336 int prior_sacked = tp->sacked_out;
3335 int pkts_acked = 0; 3337 int pkts_acked = 0;
3338 int previous_packets_out = 0;
3336 3339
3337 /* If the ack is older than previous acks 3340 /* If the ack is older than previous acks
3338 * then we can probably ignore it. 3341 * then we can probably ignore it.
@@ -3403,14 +3406,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3403 sk->sk_err_soft = 0; 3406 sk->sk_err_soft = 0;
3404 icsk->icsk_probes_out = 0; 3407 icsk->icsk_probes_out = 0;
3405 tp->rcv_tstamp = tcp_time_stamp; 3408 tp->rcv_tstamp = tcp_time_stamp;
3406 prior_packets = tp->packets_out;
3407 if (!prior_packets) 3409 if (!prior_packets)
3408 goto no_queue; 3410 goto no_queue;
3409 3411
3410 /* See if we can take anything off of the retransmit queue. */ 3412 /* See if we can take anything off of the retransmit queue. */
3413 previous_packets_out = tp->packets_out;
3411 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una); 3414 flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una);
3412 3415
3413 pkts_acked = prior_packets - tp->packets_out; 3416 pkts_acked = previous_packets_out - tp->packets_out;
3414 3417
3415 if (tcp_ack_is_dubious(sk, flag)) { 3418 if (tcp_ack_is_dubious(sk, flag)) {
3416 /* Advance CWND, if state allows this. */ 3419 /* Advance CWND, if state allows this. */
@@ -3418,7 +3421,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3418 tcp_cong_avoid(sk, ack, prior_in_flight); 3421 tcp_cong_avoid(sk, ack, prior_in_flight);
3419 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3422 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
3420 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, 3423 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3421 is_dupack, flag); 3424 prior_packets, is_dupack, flag);
3422 } else { 3425 } else {
3423 if (flag & FLAG_DATA_ACKED) 3426 if (flag & FLAG_DATA_ACKED)
3424 tcp_cong_avoid(sk, ack, prior_in_flight); 3427 tcp_cong_avoid(sk, ack, prior_in_flight);
@@ -3441,7 +3444,7 @@ no_queue:
3441 /* If data was DSACKed, see if we can undo a cwnd reduction. */ 3444 /* If data was DSACKed, see if we can undo a cwnd reduction. */
3442 if (flag & FLAG_DSACKING_ACK) 3445 if (flag & FLAG_DSACKING_ACK)
3443 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, 3446 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3444 is_dupack, flag); 3447 prior_packets, is_dupack, flag);
3445 /* If this ack opens up a zero window, clear backoff. It was 3448 /* If this ack opens up a zero window, clear backoff. It was
3446 * being used to time the probes, and is probably far higher than 3449 * being used to time the probes, and is probably far higher than
3447 * it needs to be for normal retransmission. 3450 * it needs to be for normal retransmission.
@@ -3464,7 +3467,7 @@ old_ack:
3464 if (TCP_SKB_CB(skb)->sacked) { 3467 if (TCP_SKB_CB(skb)->sacked) {
3465 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); 3468 flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
3466 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked, 3469 tcp_fastretrans_alert(sk, pkts_acked, prior_sacked,
3467 is_dupack, flag); 3470 prior_packets, is_dupack, flag);
3468 } 3471 }
3469 3472
3470 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt); 3473 SOCK_DEBUG(sk, "Ack %u before %u:%u\n", ack, tp->snd_una, tp->snd_nxt);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 536d40929ba6..ec335fabd5cc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -874,11 +874,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
874 &md5); 874 &md5);
875 tcp_header_size = tcp_options_size + sizeof(struct tcphdr); 875 tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
876 876
877 if (tcp_packets_in_flight(tp) == 0) { 877 if (tcp_packets_in_flight(tp) == 0)
878 tcp_ca_event(sk, CA_EVENT_TX_START); 878 tcp_ca_event(sk, CA_EVENT_TX_START);
879 skb->ooo_okay = 1; 879
880 } else 880 /* if no packet is in qdisc/device queue, then allow XPS to select
881 skb->ooo_okay = 0; 881 * another queue.
882 */
883 skb->ooo_okay = sk_wmem_alloc_get(sk) == 0;
882 884
883 skb_push(skb, tcp_header_size); 885 skb_push(skb, tcp_header_size);
884 skb_reset_transport_header(skb); 886 skb_reset_transport_header(skb);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index d3ddd8400354..ecd60733e5e2 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1081,6 +1081,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1081 } 1081 }
1082 if (t == NULL) 1082 if (t == NULL)
1083 t = netdev_priv(dev); 1083 t = netdev_priv(dev);
1084 memset(&p, 0, sizeof(p));
1084 ip6gre_tnl_parm_to_user(&p, &t->parms); 1085 ip6gre_tnl_parm_to_user(&p, &t->parms);
1085 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1086 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1086 err = -EFAULT; 1087 err = -EFAULT;
@@ -1128,6 +1129,7 @@ static int ip6gre_tunnel_ioctl(struct net_device *dev,
1128 if (t) { 1129 if (t) {
1129 err = 0; 1130 err = 0;
1130 1131
1132 memset(&p, 0, sizeof(p));
1131 ip6gre_tnl_parm_to_user(&p, &t->parms); 1133 ip6gre_tnl_parm_to_user(&p, &t->parms);
1132 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p))) 1134 if (copy_to_user(ifr->ifr_ifru.ifru_data, &p, sizeof(p)))
1133 err = -EFAULT; 1135 err = -EFAULT;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d2eedf192330..dae1949019d7 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1147,7 +1147,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1147 if (WARN_ON(np->cork.opt)) 1147 if (WARN_ON(np->cork.opt))
1148 return -EINVAL; 1148 return -EINVAL;
1149 1149
1150 np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation); 1150 np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation);
1151 if (unlikely(np->cork.opt == NULL)) 1151 if (unlikely(np->cork.opt == NULL))
1152 return -ENOBUFS; 1152 return -ENOBUFS;
1153 1153
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 71167069b394..0a17ed9eaf39 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1890,6 +1890,17 @@ void tcp6_proc_exit(struct net *net)
1890} 1890}
1891#endif 1891#endif
1892 1892
1893static void tcp_v6_clear_sk(struct sock *sk, int size)
1894{
1895 struct inet_sock *inet = inet_sk(sk);
1896
1897 /* we do not want to clear pinet6 field, because of RCU lookups */
1898 sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6));
1899
1900 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1901 memset(&inet->pinet6 + 1, 0, size);
1902}
1903
1893struct proto tcpv6_prot = { 1904struct proto tcpv6_prot = {
1894 .name = "TCPv6", 1905 .name = "TCPv6",
1895 .owner = THIS_MODULE, 1906 .owner = THIS_MODULE,
@@ -1933,6 +1944,7 @@ struct proto tcpv6_prot = {
1933#ifdef CONFIG_MEMCG_KMEM 1944#ifdef CONFIG_MEMCG_KMEM
1934 .proto_cgroup = tcp_proto_cgroup, 1945 .proto_cgroup = tcp_proto_cgroup,
1935#endif 1946#endif
1947 .clear_sk = tcp_v6_clear_sk,
1936}; 1948};
1937 1949
1938static const struct inet6_protocol tcpv6_protocol = { 1950static const struct inet6_protocol tcpv6_protocol = {
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index d4defdd44937..42923b14dfa6 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1432,6 +1432,17 @@ void udp6_proc_exit(struct net *net) {
1432} 1432}
1433#endif /* CONFIG_PROC_FS */ 1433#endif /* CONFIG_PROC_FS */
1434 1434
1435void udp_v6_clear_sk(struct sock *sk, int size)
1436{
1437 struct inet_sock *inet = inet_sk(sk);
1438
1439 /* we do not want to clear pinet6 field, because of RCU lookups */
1440 sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6));
1441
1442 size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6);
1443 memset(&inet->pinet6 + 1, 0, size);
1444}
1445
1435/* ------------------------------------------------------------------------ */ 1446/* ------------------------------------------------------------------------ */
1436 1447
1437struct proto udpv6_prot = { 1448struct proto udpv6_prot = {
@@ -1462,7 +1473,7 @@ struct proto udpv6_prot = {
1462 .compat_setsockopt = compat_udpv6_setsockopt, 1473 .compat_setsockopt = compat_udpv6_setsockopt,
1463 .compat_getsockopt = compat_udpv6_getsockopt, 1474 .compat_getsockopt = compat_udpv6_getsockopt,
1464#endif 1475#endif
1465 .clear_sk = sk_prot_clear_portaddr_nulls, 1476 .clear_sk = udp_v6_clear_sk,
1466}; 1477};
1467 1478
1468static struct inet_protosw udpv6_protosw = { 1479static struct inet_protosw udpv6_protosw = {
diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h
index d7571046bfc4..4691ed50a928 100644
--- a/net/ipv6/udp_impl.h
+++ b/net/ipv6/udp_impl.h
@@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk,
31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); 31extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb);
32extern void udpv6_destroy_sock(struct sock *sk); 32extern void udpv6_destroy_sock(struct sock *sk);
33 33
34extern void udp_v6_clear_sk(struct sock *sk, int size);
35
34#ifdef CONFIG_PROC_FS 36#ifdef CONFIG_PROC_FS
35extern int udp6_seq_show(struct seq_file *seq, void *v); 37extern int udp6_seq_show(struct seq_file *seq, void *v);
36#endif 38#endif
diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c
index 1d08e21d9f69..dfcc4be46898 100644
--- a/net/ipv6/udplite.c
+++ b/net/ipv6/udplite.c
@@ -56,7 +56,7 @@ struct proto udplitev6_prot = {
56 .compat_setsockopt = compat_udpv6_setsockopt, 56 .compat_setsockopt = compat_udpv6_setsockopt,
57 .compat_getsockopt = compat_udpv6_getsockopt, 57 .compat_getsockopt = compat_udpv6_getsockopt,
58#endif 58#endif
59 .clear_sk = sk_prot_clear_portaddr_nulls, 59 .clear_sk = udp_v6_clear_sk,
60}; 60};
61 61
62static struct inet_protosw udplite6_protosw = { 62static struct inet_protosw udplite6_protosw = {
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4ef7bdb65440..23ed03d786c8 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -103,8 +103,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
103 dev_hold(dev); 103 dev_hold(dev);
104 104
105 xdst->u.rt6.rt6i_idev = in6_dev_get(dev); 105 xdst->u.rt6.rt6i_idev = in6_dev_get(dev);
106 if (!xdst->u.rt6.rt6i_idev) 106 if (!xdst->u.rt6.rt6i_idev) {
107 dev_put(dev);
107 return -ENODEV; 108 return -ENODEV;
109 }
108 110
109 rt6_transfer_peer(&xdst->u.rt6, rt); 111 rt6_transfer_peer(&xdst->u.rt6, rt);
110 112
diff --git a/net/irda/irlap_frame.c b/net/irda/irlap_frame.c
index 8c004161a843..9ea0c933b9ff 100644
--- a/net/irda/irlap_frame.c
+++ b/net/irda/irlap_frame.c
@@ -544,7 +544,7 @@ static void irlap_recv_discovery_xid_cmd(struct irlap_cb *self,
544 /* 544 /*
545 * We now have some discovery info to deliver! 545 * We now have some discovery info to deliver!
546 */ 546 */
547 discovery = kmalloc(sizeof(discovery_t), GFP_ATOMIC); 547 discovery = kzalloc(sizeof(discovery_t), GFP_ATOMIC);
548 if (!discovery) { 548 if (!discovery) {
549 IRDA_WARNING("%s: unable to malloc!\n", __func__); 549 IRDA_WARNING("%s: unable to malloc!\n", __func__);
550 return; 550 return;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 158e6eb188d3..44be28cfc6c4 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1267,6 +1267,7 @@ void ieee80211_sta_reset_conn_monitor(struct ieee80211_sub_if_data *sdata);
1267void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata); 1267void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata);
1268void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata, 1268void ieee80211_mgd_conn_tx_status(struct ieee80211_sub_if_data *sdata,
1269 __le16 fc, bool acked); 1269 __le16 fc, bool acked);
1270void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata);
1270 1271
1271/* IBSS code */ 1272/* IBSS code */
1272void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local); 1273void ieee80211_ibss_notify_scan_completed(struct ieee80211_local *local);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 29620bfc7a69..a46e490f20dd 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1015,7 +1015,8 @@ static void ieee80211_chswitch_timer(unsigned long data)
1015 1015
1016static void 1016static void
1017ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata, 1017ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1018 u64 timestamp, struct ieee802_11_elems *elems) 1018 u64 timestamp, struct ieee802_11_elems *elems,
1019 bool beacon)
1019{ 1020{
1020 struct ieee80211_local *local = sdata->local; 1021 struct ieee80211_local *local = sdata->local;
1021 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1022 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -1032,6 +1033,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1032 struct cfg80211_chan_def new_vht_chandef = {}; 1033 struct cfg80211_chan_def new_vht_chandef = {};
1033 const struct ieee80211_sec_chan_offs_ie *sec_chan_offs; 1034 const struct ieee80211_sec_chan_offs_ie *sec_chan_offs;
1034 const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie; 1035 const struct ieee80211_wide_bw_chansw_ie *wide_bw_chansw_ie;
1036 const struct ieee80211_ht_operation *ht_oper;
1035 int secondary_channel_offset = -1; 1037 int secondary_channel_offset = -1;
1036 1038
1037 ASSERT_MGD_MTX(ifmgd); 1039 ASSERT_MGD_MTX(ifmgd);
@@ -1048,11 +1050,14 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1048 1050
1049 sec_chan_offs = elems->sec_chan_offs; 1051 sec_chan_offs = elems->sec_chan_offs;
1050 wide_bw_chansw_ie = elems->wide_bw_chansw_ie; 1052 wide_bw_chansw_ie = elems->wide_bw_chansw_ie;
1053 ht_oper = elems->ht_operation;
1051 1054
1052 if (ifmgd->flags & (IEEE80211_STA_DISABLE_HT | 1055 if (ifmgd->flags & (IEEE80211_STA_DISABLE_HT |
1053 IEEE80211_STA_DISABLE_40MHZ)) { 1056 IEEE80211_STA_DISABLE_40MHZ)) {
1054 sec_chan_offs = NULL; 1057 sec_chan_offs = NULL;
1055 wide_bw_chansw_ie = NULL; 1058 wide_bw_chansw_ie = NULL;
1059 /* only used for bandwidth here */
1060 ht_oper = NULL;
1056 } 1061 }
1057 1062
1058 if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT) 1063 if (ifmgd->flags & IEEE80211_STA_DISABLE_VHT)
@@ -1094,10 +1099,20 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1094 return; 1099 return;
1095 } 1100 }
1096 1101
1097 if (sec_chan_offs) { 1102 if (!beacon && sec_chan_offs) {
1098 secondary_channel_offset = sec_chan_offs->sec_chan_offs; 1103 secondary_channel_offset = sec_chan_offs->sec_chan_offs;
1104 } else if (beacon && ht_oper) {
1105 secondary_channel_offset =
1106 ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
1099 } else if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) { 1107 } else if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
1100 /* if HT is enabled and the IE not present, it's still HT */ 1108 /*
1109 * If it's not a beacon, HT is enabled and the IE not present,
1110 * it's 20 MHz, 802.11-2012 8.5.2.6:
1111 * This element [the Secondary Channel Offset Element] is
1112 * present when switching to a 40 MHz channel. It may be
1113 * present when switching to a 20 MHz channel (in which
1114 * case the secondary channel offset is set to SCN).
1115 */
1101 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; 1116 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
1102 } 1117 }
1103 1118
@@ -2796,7 +2811,8 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
2796 mutex_unlock(&local->iflist_mtx); 2811 mutex_unlock(&local->iflist_mtx);
2797 } 2812 }
2798 2813
2799 ieee80211_sta_process_chanswitch(sdata, rx_status->mactime, elems); 2814 ieee80211_sta_process_chanswitch(sdata, rx_status->mactime,
2815 elems, true);
2800 2816
2801} 2817}
2802 2818
@@ -3210,7 +3226,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
3210 3226
3211 ieee80211_sta_process_chanswitch(sdata, 3227 ieee80211_sta_process_chanswitch(sdata,
3212 rx_status->mactime, 3228 rx_status->mactime,
3213 &elems); 3229 &elems, false);
3214 } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { 3230 } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) {
3215 ies_len = skb->len - 3231 ies_len = skb->len -
3216 offsetof(struct ieee80211_mgmt, 3232 offsetof(struct ieee80211_mgmt,
@@ -3232,7 +3248,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
3232 3248
3233 ieee80211_sta_process_chanswitch(sdata, 3249 ieee80211_sta_process_chanswitch(sdata,
3234 rx_status->mactime, 3250 rx_status->mactime,
3235 &elems); 3251 &elems, false);
3236 } 3252 }
3237 break; 3253 break;
3238 } 3254 }
@@ -3623,6 +3639,31 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
3623 } 3639 }
3624} 3640}
3625 3641
3642#ifdef CONFIG_PM
3643void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
3644{
3645 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
3646
3647 mutex_lock(&ifmgd->mtx);
3648 if (!ifmgd->associated) {
3649 mutex_unlock(&ifmgd->mtx);
3650 return;
3651 }
3652
3653 if (sdata->flags & IEEE80211_SDATA_DISCONNECT_RESUME) {
3654 sdata->flags &= ~IEEE80211_SDATA_DISCONNECT_RESUME;
3655 mlme_dbg(sdata, "driver requested disconnect after resume\n");
3656 ieee80211_sta_connection_lost(sdata,
3657 ifmgd->associated->bssid,
3658 WLAN_REASON_UNSPECIFIED,
3659 true);
3660 mutex_unlock(&ifmgd->mtx);
3661 return;
3662 }
3663 mutex_unlock(&ifmgd->mtx);
3664}
3665#endif
3666
3626/* interface setup */ 3667/* interface setup */
3627void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata) 3668void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
3628{ 3669{
@@ -4329,7 +4370,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
4329 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 4370 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
4330 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; 4371 u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
4331 bool tx = !req->local_state_change; 4372 bool tx = !req->local_state_change;
4332 bool sent_frame = false; 4373 bool report_frame = false;
4333 4374
4334 mutex_lock(&ifmgd->mtx); 4375 mutex_lock(&ifmgd->mtx);
4335 4376
@@ -4346,7 +4387,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
4346 ieee80211_destroy_auth_data(sdata, false); 4387 ieee80211_destroy_auth_data(sdata, false);
4347 mutex_unlock(&ifmgd->mtx); 4388 mutex_unlock(&ifmgd->mtx);
4348 4389
4349 sent_frame = tx; 4390 report_frame = true;
4350 goto out; 4391 goto out;
4351 } 4392 }
4352 4393
@@ -4354,12 +4395,12 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
4354 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) { 4395 ether_addr_equal(ifmgd->associated->bssid, req->bssid)) {
4355 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, 4396 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
4356 req->reason_code, tx, frame_buf); 4397 req->reason_code, tx, frame_buf);
4357 sent_frame = tx; 4398 report_frame = true;
4358 } 4399 }
4359 mutex_unlock(&ifmgd->mtx); 4400 mutex_unlock(&ifmgd->mtx);
4360 4401
4361 out: 4402 out:
4362 if (sent_frame) 4403 if (report_frame)
4363 __cfg80211_send_deauth(sdata->dev, frame_buf, 4404 __cfg80211_send_deauth(sdata->dev, frame_buf,
4364 IEEE80211_DEAUTH_FRAME_LEN); 4405 IEEE80211_DEAUTH_FRAME_LEN);
4365 4406
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 0d51877efdb7..d3f414fe67e0 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -688,8 +688,15 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
688 struct ieee80211_sta *pubsta, 688 struct ieee80211_sta *pubsta,
689 struct ieee80211_sta_rates *rates) 689 struct ieee80211_sta_rates *rates)
690{ 690{
691 struct ieee80211_sta_rates *old = rcu_dereference(pubsta->rates); 691 struct ieee80211_sta_rates *old;
692 692
693 /*
694 * mac80211 guarantees that this function will not be called
695 * concurrently, so the following RCU access is safe, even without
696 * extra locking. This can not be checked easily, so we just set
697 * the condition to true.
698 */
699 old = rcu_dereference_protected(pubsta->rates, true);
693 rcu_assign_pointer(pubsta->rates, rates); 700 rcu_assign_pointer(pubsta->rates, rates);
694 if (old) 701 if (old)
695 kfree_rcu(old, rcu_head); 702 kfree_rcu(old, rcu_head);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c8447af76ead..8e2952620256 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -3036,6 +3036,9 @@ static int prepare_for_handlers(struct ieee80211_rx_data *rx,
3036 * and location updates. Note that mac80211 3036 * and location updates. Note that mac80211
3037 * itself never looks at these frames. 3037 * itself never looks at these frames.
3038 */ 3038 */
3039 if (!multicast &&
3040 !ether_addr_equal(sdata->vif.addr, hdr->addr1))
3041 return 0;
3039 if (ieee80211_is_public_action(hdr, skb->len)) 3042 if (ieee80211_is_public_action(hdr, skb->len))
3040 return 1; 3043 return 1;
3041 if (!ieee80211_is_beacon(hdr->frame_control)) 3044 if (!ieee80211_is_beacon(hdr->frame_control))
diff --git a/net/mac80211/tkip.c b/net/mac80211/tkip.c
index 3ed801d90f1e..124b1fdc20d0 100644
--- a/net/mac80211/tkip.c
+++ b/net/mac80211/tkip.c
@@ -208,10 +208,10 @@ void ieee80211_get_tkip_p2k(struct ieee80211_key_conf *keyconf,
208 u32 iv32 = get_unaligned_le32(&data[4]); 208 u32 iv32 = get_unaligned_le32(&data[4]);
209 u16 iv16 = data[2] | (data[0] << 8); 209 u16 iv16 = data[2] | (data[0] << 8);
210 210
211 spin_lock_bh(&key->u.tkip.txlock); 211 spin_lock(&key->u.tkip.txlock);
212 ieee80211_compute_tkip_p1k(key, iv32); 212 ieee80211_compute_tkip_p1k(key, iv32);
213 tkip_mixing_phase2(tk, ctx, iv16, p2k); 213 tkip_mixing_phase2(tk, ctx, iv16, p2k);
214 spin_unlock_bh(&key->u.tkip.txlock); 214 spin_unlock(&key->u.tkip.txlock);
215} 215}
216EXPORT_SYMBOL(ieee80211_get_tkip_p2k); 216EXPORT_SYMBOL(ieee80211_get_tkip_p2k);
217 217
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 3f87fa468b1f..27e07150eb46 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1740,6 +1740,13 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1740 mb(); 1740 mb();
1741 local->resuming = false; 1741 local->resuming = false;
1742 1742
1743 list_for_each_entry(sdata, &local->interfaces, list) {
1744 if (!ieee80211_sdata_running(sdata))
1745 continue;
1746 if (sdata->vif.type == NL80211_IFTYPE_STATION)
1747 ieee80211_sta_restart(sdata);
1748 }
1749
1743 mod_timer(&local->sta_cleanup, jiffies + 1); 1750 mod_timer(&local->sta_cleanup, jiffies + 1);
1744#else 1751#else
1745 WARN_ON(1); 1752 WARN_ON(1);
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 388656d5a9ec..3b18dd1be7d9 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -148,7 +148,7 @@ void nf_log_packet(struct net *net,
148 va_start(args, fmt); 148 va_start(args, fmt);
149 vsnprintf(prefix, sizeof(prefix), fmt, args); 149 vsnprintf(prefix, sizeof(prefix), fmt, args);
150 va_end(args); 150 va_end(args);
151 logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix); 151 logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix);
152 } 152 }
153 rcu_read_unlock(); 153 rcu_read_unlock();
154} 154}
@@ -368,17 +368,20 @@ static int __net_init nf_log_net_init(struct net *net)
368 return 0; 368 return 0;
369 369
370out_sysctl: 370out_sysctl:
371#ifdef CONFIG_PROC_FS
371 /* For init_net: errors will trigger panic, don't unroll on error. */ 372 /* For init_net: errors will trigger panic, don't unroll on error. */
372 if (!net_eq(net, &init_net)) 373 if (!net_eq(net, &init_net))
373 remove_proc_entry("nf_log", net->nf.proc_netfilter); 374 remove_proc_entry("nf_log", net->nf.proc_netfilter);
374 375#endif
375 return ret; 376 return ret;
376} 377}
377 378
378static void __net_exit nf_log_net_exit(struct net *net) 379static void __net_exit nf_log_net_exit(struct net *net)
379{ 380{
380 netfilter_log_sysctl_exit(net); 381 netfilter_log_sysctl_exit(net);
382#ifdef CONFIG_PROC_FS
381 remove_proc_entry("nf_log", net->nf.proc_netfilter); 383 remove_proc_entry("nf_log", net->nf.proc_netfilter);
384#endif
382} 385}
383 386
384static struct pernet_operations nf_log_net_ops = { 387static struct pernet_operations nf_log_net_ops = {
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index faf1e9300d8a..962e9792e317 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -602,7 +602,8 @@ static struct nf_loginfo default_loginfo = {
602 602
603/* log handler for internal netfilter logging api */ 603/* log handler for internal netfilter logging api */
604void 604void
605nfulnl_log_packet(u_int8_t pf, 605nfulnl_log_packet(struct net *net,
606 u_int8_t pf,
606 unsigned int hooknum, 607 unsigned int hooknum,
607 const struct sk_buff *skb, 608 const struct sk_buff *skb,
608 const struct net_device *in, 609 const struct net_device *in,
@@ -615,7 +616,6 @@ nfulnl_log_packet(u_int8_t pf,
615 const struct nf_loginfo *li; 616 const struct nf_loginfo *li;
616 unsigned int qthreshold; 617 unsigned int qthreshold;
617 unsigned int plen; 618 unsigned int plen;
618 struct net *net = dev_net(in ? in : out);
619 struct nfnl_log_net *log = nfnl_log_pernet(net); 619 struct nfnl_log_net *log = nfnl_log_pernet(net);
620 620
621 if (li_user && li_user->type == NF_LOG_TYPE_ULOG) 621 if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
@@ -1045,7 +1045,9 @@ static int __net_init nfnl_log_net_init(struct net *net)
1045 1045
1046static void __net_exit nfnl_log_net_exit(struct net *net) 1046static void __net_exit nfnl_log_net_exit(struct net *net)
1047{ 1047{
1048#ifdef CONFIG_PROC_FS
1048 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter); 1049 remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
1050#endif
1049} 1051}
1050 1052
1051static struct pernet_operations nfnl_log_net_ops = { 1053static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index 2e0e835baf72..4e27fa035814 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -1285,7 +1285,9 @@ static int __net_init nfnl_queue_net_init(struct net *net)
1285 1285
1286static void __net_exit nfnl_queue_net_exit(struct net *net) 1286static void __net_exit nfnl_queue_net_exit(struct net *net)
1287{ 1287{
1288#ifdef CONFIG_PROC_FS
1288 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter); 1289 remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1290#endif
1289} 1291}
1290 1292
1291static struct pernet_operations nfnl_queue_net_ops = { 1293static struct pernet_operations nfnl_queue_net_ops = {
diff --git a/net/netfilter/xt_LOG.c b/net/netfilter/xt_LOG.c
index fe573f6c9e91..491c7d821a0b 100644
--- a/net/netfilter/xt_LOG.c
+++ b/net/netfilter/xt_LOG.c
@@ -466,7 +466,8 @@ log_packet_common(struct sbuff *m,
466 466
467 467
468static void 468static void
469ipt_log_packet(u_int8_t pf, 469ipt_log_packet(struct net *net,
470 u_int8_t pf,
470 unsigned int hooknum, 471 unsigned int hooknum,
471 const struct sk_buff *skb, 472 const struct sk_buff *skb,
472 const struct net_device *in, 473 const struct net_device *in,
@@ -475,7 +476,6 @@ ipt_log_packet(u_int8_t pf,
475 const char *prefix) 476 const char *prefix)
476{ 477{
477 struct sbuff *m; 478 struct sbuff *m;
478 struct net *net = dev_net(in ? in : out);
479 479
480 /* FIXME: Disabled from containers until syslog ns is supported */ 480 /* FIXME: Disabled from containers until syslog ns is supported */
481 if (!net_eq(net, &init_net)) 481 if (!net_eq(net, &init_net))
@@ -797,7 +797,8 @@ fallback:
797} 797}
798 798
799static void 799static void
800ip6t_log_packet(u_int8_t pf, 800ip6t_log_packet(struct net *net,
801 u_int8_t pf,
801 unsigned int hooknum, 802 unsigned int hooknum,
802 const struct sk_buff *skb, 803 const struct sk_buff *skb,
803 const struct net_device *in, 804 const struct net_device *in,
@@ -806,7 +807,6 @@ ip6t_log_packet(u_int8_t pf,
806 const char *prefix) 807 const char *prefix)
807{ 808{
808 struct sbuff *m; 809 struct sbuff *m;
809 struct net *net = dev_net(in ? in : out);
810 810
811 /* FIXME: Disabled from containers until syslog ns is supported */ 811 /* FIXME: Disabled from containers until syslog ns is supported */
812 if (!net_eq(net, &init_net)) 812 if (!net_eq(net, &init_net))
@@ -833,17 +833,18 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
833{ 833{
834 const struct xt_log_info *loginfo = par->targinfo; 834 const struct xt_log_info *loginfo = par->targinfo;
835 struct nf_loginfo li; 835 struct nf_loginfo li;
836 struct net *net = dev_net(par->in ? par->in : par->out);
836 837
837 li.type = NF_LOG_TYPE_LOG; 838 li.type = NF_LOG_TYPE_LOG;
838 li.u.log.level = loginfo->level; 839 li.u.log.level = loginfo->level;
839 li.u.log.logflags = loginfo->logflags; 840 li.u.log.logflags = loginfo->logflags;
840 841
841 if (par->family == NFPROTO_IPV4) 842 if (par->family == NFPROTO_IPV4)
842 ipt_log_packet(NFPROTO_IPV4, par->hooknum, skb, par->in, 843 ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
843 par->out, &li, loginfo->prefix); 844 par->out, &li, loginfo->prefix);
844#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 845#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
845 else if (par->family == NFPROTO_IPV6) 846 else if (par->family == NFPROTO_IPV6)
846 ip6t_log_packet(NFPROTO_IPV6, par->hooknum, skb, par->in, 847 ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
847 par->out, &li, loginfo->prefix); 848 par->out, &li, loginfo->prefix);
848#endif 849#endif
849 else 850 else
diff --git a/net/netfilter/xt_NFLOG.c b/net/netfilter/xt_NFLOG.c
index a17dd0f589b2..fb7497c928a0 100644
--- a/net/netfilter/xt_NFLOG.c
+++ b/net/netfilter/xt_NFLOG.c
@@ -26,13 +26,14 @@ nflog_tg(struct sk_buff *skb, const struct xt_action_param *par)
26{ 26{
27 const struct xt_nflog_info *info = par->targinfo; 27 const struct xt_nflog_info *info = par->targinfo;
28 struct nf_loginfo li; 28 struct nf_loginfo li;
29 struct net *net = dev_net(par->in ? par->in : par->out);
29 30
30 li.type = NF_LOG_TYPE_ULOG; 31 li.type = NF_LOG_TYPE_ULOG;
31 li.u.ulog.copy_len = info->len; 32 li.u.ulog.copy_len = info->len;
32 li.u.ulog.group = info->group; 33 li.u.ulog.group = info->group;
33 li.u.ulog.qthreshold = info->threshold; 34 li.u.ulog.qthreshold = info->threshold;
34 35
35 nfulnl_log_packet(par->family, par->hooknum, skb, par->in, 36 nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in,
36 par->out, &li, info->prefix); 37 par->out, &li, info->prefix);
37 return XT_CONTINUE; 38 return XT_CONTINUE;
38} 39}
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 25fd1c4e1eec..1eb1a44bfd3d 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -30,17 +30,28 @@ static inline unsigned int optlen(const u_int8_t *opt, unsigned int offset)
30 30
31static unsigned int 31static unsigned int
32tcpoptstrip_mangle_packet(struct sk_buff *skb, 32tcpoptstrip_mangle_packet(struct sk_buff *skb,
33 const struct xt_tcpoptstrip_target_info *info, 33 const struct xt_action_param *par,
34 unsigned int tcphoff, unsigned int minlen) 34 unsigned int tcphoff, unsigned int minlen)
35{ 35{
36 const struct xt_tcpoptstrip_target_info *info = par->targinfo;
36 unsigned int optl, i, j; 37 unsigned int optl, i, j;
37 struct tcphdr *tcph; 38 struct tcphdr *tcph;
38 u_int16_t n, o; 39 u_int16_t n, o;
39 u_int8_t *opt; 40 u_int8_t *opt;
41 int len;
42
43 /* This is a fragment, no TCP header is available */
44 if (par->fragoff != 0)
45 return XT_CONTINUE;
40 46
41 if (!skb_make_writable(skb, skb->len)) 47 if (!skb_make_writable(skb, skb->len))
42 return NF_DROP; 48 return NF_DROP;
43 49
50 len = skb->len - tcphoff;
51 if (len < (int)sizeof(struct tcphdr) ||
52 tcp_hdr(skb)->doff * 4 > len)
53 return NF_DROP;
54
44 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff); 55 tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
45 opt = (u_int8_t *)tcph; 56 opt = (u_int8_t *)tcph;
46 57
@@ -76,7 +87,7 @@ tcpoptstrip_mangle_packet(struct sk_buff *skb,
76static unsigned int 87static unsigned int
77tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par) 88tcpoptstrip_tg4(struct sk_buff *skb, const struct xt_action_param *par)
78{ 89{
79 return tcpoptstrip_mangle_packet(skb, par->targinfo, ip_hdrlen(skb), 90 return tcpoptstrip_mangle_packet(skb, par, ip_hdrlen(skb),
80 sizeof(struct iphdr) + sizeof(struct tcphdr)); 91 sizeof(struct iphdr) + sizeof(struct tcphdr));
81} 92}
82 93
@@ -94,7 +105,7 @@ tcpoptstrip_tg6(struct sk_buff *skb, const struct xt_action_param *par)
94 if (tcphoff < 0) 105 if (tcphoff < 0)
95 return NF_DROP; 106 return NF_DROP;
96 107
97 return tcpoptstrip_mangle_packet(skb, par->targinfo, tcphoff, 108 return tcpoptstrip_mangle_packet(skb, par, tcphoff,
98 sizeof(*ipv6h) + sizeof(struct tcphdr)); 109 sizeof(*ipv6h) + sizeof(struct tcphdr));
99} 110}
100#endif 111#endif
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index d8d424337550..6bb1d42f0fac 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -245,6 +245,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry,
245 } 245 }
246} 246}
247 247
248/**
249 * netlbl_domhsh_validate - Validate a new domain mapping entry
250 * @entry: the entry to validate
251 *
252 * This function validates the new domain mapping entry to ensure that it is
253 * a valid entry. Returns zero on success, negative values on failure.
254 *
255 */
256static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry)
257{
258 struct netlbl_af4list *iter4;
259 struct netlbl_domaddr4_map *map4;
260#if IS_ENABLED(CONFIG_IPV6)
261 struct netlbl_af6list *iter6;
262 struct netlbl_domaddr6_map *map6;
263#endif /* IPv6 */
264
265 if (entry == NULL)
266 return -EINVAL;
267
268 switch (entry->type) {
269 case NETLBL_NLTYPE_UNLABELED:
270 if (entry->type_def.cipsov4 != NULL ||
271 entry->type_def.addrsel != NULL)
272 return -EINVAL;
273 break;
274 case NETLBL_NLTYPE_CIPSOV4:
275 if (entry->type_def.cipsov4 == NULL)
276 return -EINVAL;
277 break;
278 case NETLBL_NLTYPE_ADDRSELECT:
279 netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) {
280 map4 = netlbl_domhsh_addr4_entry(iter4);
281 switch (map4->type) {
282 case NETLBL_NLTYPE_UNLABELED:
283 if (map4->type_def.cipsov4 != NULL)
284 return -EINVAL;
285 break;
286 case NETLBL_NLTYPE_CIPSOV4:
287 if (map4->type_def.cipsov4 == NULL)
288 return -EINVAL;
289 break;
290 default:
291 return -EINVAL;
292 }
293 }
294#if IS_ENABLED(CONFIG_IPV6)
295 netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) {
296 map6 = netlbl_domhsh_addr6_entry(iter6);
297 switch (map6->type) {
298 case NETLBL_NLTYPE_UNLABELED:
299 break;
300 default:
301 return -EINVAL;
302 }
303 }
304#endif /* IPv6 */
305 break;
306 default:
307 return -EINVAL;
308 }
309
310 return 0;
311}
312
248/* 313/*
249 * Domain Hash Table Functions 314 * Domain Hash Table Functions
250 */ 315 */
@@ -311,6 +376,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry,
311 struct netlbl_af6list *tmp6; 376 struct netlbl_af6list *tmp6;
312#endif /* IPv6 */ 377#endif /* IPv6 */
313 378
379 ret_val = netlbl_domhsh_validate(entry);
380 if (ret_val != 0)
381 return ret_val;
382
314 /* XXX - we can remove this RCU read lock as the spinlock protects the 383 /* XXX - we can remove this RCU read lock as the spinlock protects the
315 * entire function, but before we do we need to fixup the 384 * entire function, but before we do we need to fixup the
316 * netlbl_af[4,6]list RCU functions to do "the right thing" with 385 * netlbl_af[4,6]list RCU functions to do "the right thing" with
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 7da6b457f66a..fc2f78d6a9b4 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -52,6 +52,8 @@
52#include <linux/sunrpc/gss_api.h> 52#include <linux/sunrpc/gss_api.h>
53#include <asm/uaccess.h> 53#include <asm/uaccess.h>
54 54
55#include "../netns.h"
56
55static const struct rpc_authops authgss_ops; 57static const struct rpc_authops authgss_ops;
56 58
57static const struct rpc_credops gss_credops; 59static const struct rpc_credops gss_credops;
@@ -85,8 +87,6 @@ struct gss_auth {
85}; 87};
86 88
87/* pipe_version >= 0 if and only if someone has a pipe open. */ 89/* pipe_version >= 0 if and only if someone has a pipe open. */
88static int pipe_version = -1;
89static atomic_t pipe_users = ATOMIC_INIT(0);
90static DEFINE_SPINLOCK(pipe_version_lock); 90static DEFINE_SPINLOCK(pipe_version_lock);
91static struct rpc_wait_queue pipe_version_rpc_waitqueue; 91static struct rpc_wait_queue pipe_version_rpc_waitqueue;
92static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue); 92static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
@@ -266,24 +266,27 @@ struct gss_upcall_msg {
266 char databuf[UPCALL_BUF_LEN]; 266 char databuf[UPCALL_BUF_LEN];
267}; 267};
268 268
269static int get_pipe_version(void) 269static int get_pipe_version(struct net *net)
270{ 270{
271 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
271 int ret; 272 int ret;
272 273
273 spin_lock(&pipe_version_lock); 274 spin_lock(&pipe_version_lock);
274 if (pipe_version >= 0) { 275 if (sn->pipe_version >= 0) {
275 atomic_inc(&pipe_users); 276 atomic_inc(&sn->pipe_users);
276 ret = pipe_version; 277 ret = sn->pipe_version;
277 } else 278 } else
278 ret = -EAGAIN; 279 ret = -EAGAIN;
279 spin_unlock(&pipe_version_lock); 280 spin_unlock(&pipe_version_lock);
280 return ret; 281 return ret;
281} 282}
282 283
283static void put_pipe_version(void) 284static void put_pipe_version(struct net *net)
284{ 285{
285 if (atomic_dec_and_lock(&pipe_users, &pipe_version_lock)) { 286 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
286 pipe_version = -1; 287
288 if (atomic_dec_and_lock(&sn->pipe_users, &pipe_version_lock)) {
289 sn->pipe_version = -1;
287 spin_unlock(&pipe_version_lock); 290 spin_unlock(&pipe_version_lock);
288 } 291 }
289} 292}
@@ -291,9 +294,10 @@ static void put_pipe_version(void)
291static void 294static void
292gss_release_msg(struct gss_upcall_msg *gss_msg) 295gss_release_msg(struct gss_upcall_msg *gss_msg)
293{ 296{
297 struct net *net = rpc_net_ns(gss_msg->auth->client);
294 if (!atomic_dec_and_test(&gss_msg->count)) 298 if (!atomic_dec_and_test(&gss_msg->count))
295 return; 299 return;
296 put_pipe_version(); 300 put_pipe_version(net);
297 BUG_ON(!list_empty(&gss_msg->list)); 301 BUG_ON(!list_empty(&gss_msg->list));
298 if (gss_msg->ctx != NULL) 302 if (gss_msg->ctx != NULL)
299 gss_put_ctx(gss_msg->ctx); 303 gss_put_ctx(gss_msg->ctx);
@@ -439,7 +443,10 @@ static void gss_encode_msg(struct gss_upcall_msg *gss_msg,
439 struct rpc_clnt *clnt, 443 struct rpc_clnt *clnt,
440 const char *service_name) 444 const char *service_name)
441{ 445{
442 if (pipe_version == 0) 446 struct net *net = rpc_net_ns(clnt);
447 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
448
449 if (sn->pipe_version == 0)
443 gss_encode_v0_msg(gss_msg); 450 gss_encode_v0_msg(gss_msg);
444 else /* pipe_version == 1 */ 451 else /* pipe_version == 1 */
445 gss_encode_v1_msg(gss_msg, clnt, service_name); 452 gss_encode_v1_msg(gss_msg, clnt, service_name);
@@ -455,7 +462,7 @@ gss_alloc_msg(struct gss_auth *gss_auth, struct rpc_clnt *clnt,
455 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS); 462 gss_msg = kzalloc(sizeof(*gss_msg), GFP_NOFS);
456 if (gss_msg == NULL) 463 if (gss_msg == NULL)
457 return ERR_PTR(-ENOMEM); 464 return ERR_PTR(-ENOMEM);
458 vers = get_pipe_version(); 465 vers = get_pipe_version(rpc_net_ns(clnt));
459 if (vers < 0) { 466 if (vers < 0) {
460 kfree(gss_msg); 467 kfree(gss_msg);
461 return ERR_PTR(vers); 468 return ERR_PTR(vers);
@@ -559,24 +566,34 @@ out:
559static inline int 566static inline int
560gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) 567gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
561{ 568{
569 struct net *net = rpc_net_ns(gss_auth->client);
570 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
562 struct rpc_pipe *pipe; 571 struct rpc_pipe *pipe;
563 struct rpc_cred *cred = &gss_cred->gc_base; 572 struct rpc_cred *cred = &gss_cred->gc_base;
564 struct gss_upcall_msg *gss_msg; 573 struct gss_upcall_msg *gss_msg;
574 unsigned long timeout;
565 DEFINE_WAIT(wait); 575 DEFINE_WAIT(wait);
566 int err = 0; 576 int err;
567 577
568 dprintk("RPC: %s for uid %u\n", 578 dprintk("RPC: %s for uid %u\n",
569 __func__, from_kuid(&init_user_ns, cred->cr_uid)); 579 __func__, from_kuid(&init_user_ns, cred->cr_uid));
570retry: 580retry:
581 err = 0;
582 /* Default timeout is 15s unless we know that gssd is not running */
583 timeout = 15 * HZ;
584 if (!sn->gssd_running)
585 timeout = HZ >> 2;
571 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred); 586 gss_msg = gss_setup_upcall(gss_auth->client, gss_auth, cred);
572 if (PTR_ERR(gss_msg) == -EAGAIN) { 587 if (PTR_ERR(gss_msg) == -EAGAIN) {
573 err = wait_event_interruptible_timeout(pipe_version_waitqueue, 588 err = wait_event_interruptible_timeout(pipe_version_waitqueue,
574 pipe_version >= 0, 15*HZ); 589 sn->pipe_version >= 0, timeout);
575 if (pipe_version < 0) { 590 if (sn->pipe_version < 0) {
591 if (err == 0)
592 sn->gssd_running = 0;
576 warn_gssd(); 593 warn_gssd();
577 err = -EACCES; 594 err = -EACCES;
578 } 595 }
579 if (err) 596 if (err < 0)
580 goto out; 597 goto out;
581 goto retry; 598 goto retry;
582 } 599 }
@@ -707,20 +724,22 @@ out:
707 724
708static int gss_pipe_open(struct inode *inode, int new_version) 725static int gss_pipe_open(struct inode *inode, int new_version)
709{ 726{
727 struct net *net = inode->i_sb->s_fs_info;
728 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
710 int ret = 0; 729 int ret = 0;
711 730
712 spin_lock(&pipe_version_lock); 731 spin_lock(&pipe_version_lock);
713 if (pipe_version < 0) { 732 if (sn->pipe_version < 0) {
714 /* First open of any gss pipe determines the version: */ 733 /* First open of any gss pipe determines the version: */
715 pipe_version = new_version; 734 sn->pipe_version = new_version;
716 rpc_wake_up(&pipe_version_rpc_waitqueue); 735 rpc_wake_up(&pipe_version_rpc_waitqueue);
717 wake_up(&pipe_version_waitqueue); 736 wake_up(&pipe_version_waitqueue);
718 } else if (pipe_version != new_version) { 737 } else if (sn->pipe_version != new_version) {
719 /* Trying to open a pipe of a different version */ 738 /* Trying to open a pipe of a different version */
720 ret = -EBUSY; 739 ret = -EBUSY;
721 goto out; 740 goto out;
722 } 741 }
723 atomic_inc(&pipe_users); 742 atomic_inc(&sn->pipe_users);
724out: 743out:
725 spin_unlock(&pipe_version_lock); 744 spin_unlock(&pipe_version_lock);
726 return ret; 745 return ret;
@@ -740,6 +759,7 @@ static int gss_pipe_open_v1(struct inode *inode)
740static void 759static void
741gss_pipe_release(struct inode *inode) 760gss_pipe_release(struct inode *inode)
742{ 761{
762 struct net *net = inode->i_sb->s_fs_info;
743 struct rpc_pipe *pipe = RPC_I(inode)->pipe; 763 struct rpc_pipe *pipe = RPC_I(inode)->pipe;
744 struct gss_upcall_msg *gss_msg; 764 struct gss_upcall_msg *gss_msg;
745 765
@@ -758,7 +778,7 @@ restart:
758 } 778 }
759 spin_unlock(&pipe->lock); 779 spin_unlock(&pipe->lock);
760 780
761 put_pipe_version(); 781 put_pipe_version(net);
762} 782}
763 783
764static void 784static void
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h
index 7111a4c9113b..74d948f5d5a1 100644
--- a/net/sunrpc/netns.h
+++ b/net/sunrpc/netns.h
@@ -28,7 +28,11 @@ struct sunrpc_net {
28 wait_queue_head_t gssp_wq; 28 wait_queue_head_t gssp_wq;
29 struct rpc_clnt *gssp_clnt; 29 struct rpc_clnt *gssp_clnt;
30 int use_gss_proxy; 30 int use_gss_proxy;
31 int pipe_version;
32 atomic_t pipe_users;
31 struct proc_dir_entry *use_gssp_proc; 33 struct proc_dir_entry *use_gssp_proc;
34
35 unsigned int gssd_running;
32}; 36};
33 37
34extern int sunrpc_net_id; 38extern int sunrpc_net_id;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index a9129f8d7070..e7ce4b3eb0bd 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -216,11 +216,14 @@ rpc_destroy_inode(struct inode *inode)
216static int 216static int
217rpc_pipe_open(struct inode *inode, struct file *filp) 217rpc_pipe_open(struct inode *inode, struct file *filp)
218{ 218{
219 struct net *net = inode->i_sb->s_fs_info;
220 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
219 struct rpc_pipe *pipe; 221 struct rpc_pipe *pipe;
220 int first_open; 222 int first_open;
221 int res = -ENXIO; 223 int res = -ENXIO;
222 224
223 mutex_lock(&inode->i_mutex); 225 mutex_lock(&inode->i_mutex);
226 sn->gssd_running = 1;
224 pipe = RPC_I(inode)->pipe; 227 pipe = RPC_I(inode)->pipe;
225 if (pipe == NULL) 228 if (pipe == NULL)
226 goto out; 229 goto out;
@@ -1069,6 +1072,8 @@ void rpc_pipefs_init_net(struct net *net)
1069 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 1072 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
1070 1073
1071 mutex_init(&sn->pipefs_sb_lock); 1074 mutex_init(&sn->pipefs_sb_lock);
1075 sn->gssd_running = 1;
1076 sn->pipe_version = -1;
1072} 1077}
1073 1078
1074/* 1079/*
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index f8529fc8e542..5356b120dbf8 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -324,11 +324,17 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
324 * Note: If the task is ASYNC, and is being made runnable after sitting on an 324 * Note: If the task is ASYNC, and is being made runnable after sitting on an
325 * rpc_wait_queue, this must be called with the queue spinlock held to protect 325 * rpc_wait_queue, this must be called with the queue spinlock held to protect
326 * the wait queue operation. 326 * the wait queue operation.
327 * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
328 * which is needed to ensure that __rpc_execute() doesn't loop (due to the
329 * lockless RPC_IS_QUEUED() test) before we've had a chance to test
330 * the RPC_TASK_RUNNING flag.
327 */ 331 */
328static void rpc_make_runnable(struct rpc_task *task) 332static void rpc_make_runnable(struct rpc_task *task)
329{ 333{
334 bool need_wakeup = !rpc_test_and_set_running(task);
335
330 rpc_clear_queued(task); 336 rpc_clear_queued(task);
331 if (rpc_test_and_set_running(task)) 337 if (!need_wakeup)
332 return; 338 return;
333 if (RPC_IS_ASYNC(task)) { 339 if (RPC_IS_ASYNC(task)) {
334 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 340 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 84c9ad7e1dca..73405e00c800 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -638,17 +638,21 @@ int wiphy_register(struct wiphy *wiphy)
638 * cfg80211_mutex lock 638 * cfg80211_mutex lock
639 */ 639 */
640 res = rfkill_register(rdev->rfkill); 640 res = rfkill_register(rdev->rfkill);
641 if (res) 641 if (res) {
642 goto out_rm_dev; 642 device_del(&rdev->wiphy.dev);
643
644 mutex_lock(&cfg80211_mutex);
645 debugfs_remove_recursive(rdev->wiphy.debugfsdir);
646 list_del_rcu(&rdev->list);
647 wiphy_regulatory_deregister(wiphy);
648 mutex_unlock(&cfg80211_mutex);
649 return res;
650 }
643 651
644 rtnl_lock(); 652 rtnl_lock();
645 rdev->wiphy.registered = true; 653 rdev->wiphy.registered = true;
646 rtnl_unlock(); 654 rtnl_unlock();
647 return 0; 655 return 0;
648
649out_rm_dev:
650 device_del(&rdev->wiphy.dev);
651 return res;
652} 656}
653EXPORT_SYMBOL(wiphy_register); 657EXPORT_SYMBOL(wiphy_register);
654 658
@@ -866,7 +870,6 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
866#endif 870#endif
867 __cfg80211_disconnect(rdev, dev, 871 __cfg80211_disconnect(rdev, dev,
868 WLAN_REASON_DEAUTH_LEAVING, true); 872 WLAN_REASON_DEAUTH_LEAVING, true);
869 cfg80211_mlme_down(rdev, dev);
870 wdev_unlock(wdev); 873 wdev_unlock(wdev);
871 break; 874 break;
872 case NL80211_IFTYPE_MESH_POINT: 875 case NL80211_IFTYPE_MESH_POINT:
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index afa283841e8c..dfdb5e643211 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -7577,6 +7577,8 @@ static int nl80211_send_wowlan_tcp(struct sk_buff *msg,
7577 &tcp->payload_tok)) 7577 &tcp->payload_tok))
7578 return -ENOBUFS; 7578 return -ENOBUFS;
7579 7579
7580 nla_nest_end(msg, nl_tcp);
7581
7580 return 0; 7582 return 0;
7581} 7583}
7582 7584
@@ -9970,6 +9972,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
9970 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 9972 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
9971 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, 9973 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
9972 netdev->ifindex)) || 9974 netdev->ifindex)) ||
9975 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
9973 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) || 9976 nla_put_u32(msg, NL80211_ATTR_WIPHY_FREQ, freq) ||
9974 (sig_dbm && 9977 (sig_dbm &&
9975 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) || 9978 nla_put_u32(msg, NL80211_ATTR_RX_SIGNAL_DBM, sig_dbm)) ||
@@ -10010,6 +10013,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
10010 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) || 10013 if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
10011 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX, 10014 (netdev && nla_put_u32(msg, NL80211_ATTR_IFINDEX,
10012 netdev->ifindex)) || 10015 netdev->ifindex)) ||
10016 nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)) ||
10013 nla_put(msg, NL80211_ATTR_FRAME, len, buf) || 10017 nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
10014 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) || 10018 nla_put_u64(msg, NL80211_ATTR_COOKIE, cookie) ||
10015 (ack && nla_put_flag(msg, NL80211_ATTR_ACK))) 10019 (ack && nla_put_flag(msg, NL80211_ATTR_ACK)))
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a9dc5c736df0..8b5eddfba1e5 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -961,7 +961,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
961 /* was it connected by userspace SME? */ 961 /* was it connected by userspace SME? */
962 if (!wdev->conn) { 962 if (!wdev->conn) {
963 cfg80211_mlme_down(rdev, dev); 963 cfg80211_mlme_down(rdev, dev);
964 return 0; 964 goto disconnect;
965 } 965 }
966 966
967 if (wdev->sme_state == CFG80211_SME_CONNECTING && 967 if (wdev->sme_state == CFG80211_SME_CONNECTING &&
@@ -987,6 +987,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
987 return err; 987 return err;
988 } 988 }
989 989
990 disconnect:
990 if (wdev->sme_state == CFG80211_SME_CONNECTED) 991 if (wdev->sme_state == CFG80211_SME_CONNECTED)
991 __cfg80211_disconnected(dev, NULL, 0, 0, false); 992 __cfg80211_disconnected(dev, NULL, 0, 0, false);
992 else if (wdev->sme_state == CFG80211_SME_CONNECTING) 993 else if (wdev->sme_state == CFG80211_SME_CONNECTING)
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index ecd4fcec3c94..5755bc14abbd 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2441,6 +2441,7 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup,
2441 TP_STRUCT__entry( 2441 TP_STRUCT__entry(
2442 WIPHY_ENTRY 2442 WIPHY_ENTRY
2443 WDEV_ENTRY 2443 WDEV_ENTRY
2444 __field(bool, non_wireless)
2444 __field(bool, disconnect) 2445 __field(bool, disconnect)
2445 __field(bool, magic_pkt) 2446 __field(bool, magic_pkt)
2446 __field(bool, gtk_rekey_failure) 2447 __field(bool, gtk_rekey_failure)
@@ -2449,20 +2450,22 @@ TRACE_EVENT(cfg80211_report_wowlan_wakeup,
2449 __field(bool, rfkill_release) 2450 __field(bool, rfkill_release)
2450 __field(s32, pattern_idx) 2451 __field(s32, pattern_idx)
2451 __field(u32, packet_len) 2452 __field(u32, packet_len)
2452 __dynamic_array(u8, packet, wakeup->packet_present_len) 2453 __dynamic_array(u8, packet,
2454 wakeup ? wakeup->packet_present_len : 0)
2453 ), 2455 ),
2454 TP_fast_assign( 2456 TP_fast_assign(
2455 WIPHY_ASSIGN; 2457 WIPHY_ASSIGN;
2456 WDEV_ASSIGN; 2458 WDEV_ASSIGN;
2457 __entry->disconnect = wakeup->disconnect; 2459 __entry->non_wireless = !wakeup;
2458 __entry->magic_pkt = wakeup->magic_pkt; 2460 __entry->disconnect = wakeup ? wakeup->disconnect : false;
2459 __entry->gtk_rekey_failure = wakeup->gtk_rekey_failure; 2461 __entry->magic_pkt = wakeup ? wakeup->magic_pkt : false;
2460 __entry->eap_identity_req = wakeup->eap_identity_req; 2462 __entry->gtk_rekey_failure = wakeup ? wakeup->gtk_rekey_failure : false;
2461 __entry->four_way_handshake = wakeup->four_way_handshake; 2463 __entry->eap_identity_req = wakeup ? wakeup->eap_identity_req : false;
2462 __entry->rfkill_release = wakeup->rfkill_release; 2464 __entry->four_way_handshake = wakeup ? wakeup->four_way_handshake : false;
2463 __entry->pattern_idx = wakeup->pattern_idx; 2465 __entry->rfkill_release = wakeup ? wakeup->rfkill_release : false;
2464 __entry->packet_len = wakeup->packet_len; 2466 __entry->pattern_idx = wakeup ? wakeup->pattern_idx : false;
2465 if (wakeup->packet && wakeup->packet_present_len) 2467 __entry->packet_len = wakeup ? wakeup->packet_len : false;
2468 if (wakeup && wakeup->packet && wakeup->packet_present_len)
2466 memcpy(__get_dynamic_array(packet), wakeup->packet, 2469 memcpy(__get_dynamic_array(packet), wakeup->packet,
2467 wakeup->packet_present_len); 2470 wakeup->packet_present_len);
2468 ), 2471 ),
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index bcfda8921b5b..0cf003dfa8fc 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -64,6 +64,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
64 64
65 if (unlikely(x->km.state != XFRM_STATE_VALID)) { 65 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
66 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID); 66 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
67 err = -EINVAL;
67 goto error; 68 goto error;
68 } 69 }
69 70
diff --git a/scripts/package/Makefile b/scripts/package/Makefile
index 84a406070f6f..a4f31c900fa6 100644
--- a/scripts/package/Makefile
+++ b/scripts/package/Makefile
@@ -63,7 +63,7 @@ binrpm-pkg: FORCE
63 mv -f $(objtree)/.tmp_version $(objtree)/.version 63 mv -f $(objtree)/.tmp_version $(objtree)/.version
64 64
65 $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \ 65 $(RPM) $(RPMOPTS) --define "_builddir $(objtree)" --target \
66 $(UTS_MACHINE) -bb $< 66 $(UTS_MACHINE) -bb $(objtree)/binkernel.spec
67 rm binkernel.spec 67 rm binkernel.spec
68 68
69# Deb target 69# Deb target
diff --git a/sound/aoa/fabrics/layout.c b/sound/aoa/fabrics/layout.c
index 552b97afbca5..61ab640e195f 100644
--- a/sound/aoa/fabrics/layout.c
+++ b/sound/aoa/fabrics/layout.c
@@ -113,6 +113,7 @@ MODULE_ALIAS("sound-layout-100");
113MODULE_ALIAS("aoa-device-id-14"); 113MODULE_ALIAS("aoa-device-id-14");
114MODULE_ALIAS("aoa-device-id-22"); 114MODULE_ALIAS("aoa-device-id-22");
115MODULE_ALIAS("aoa-device-id-35"); 115MODULE_ALIAS("aoa-device-id-35");
116MODULE_ALIAS("aoa-device-id-44");
116 117
117/* onyx with all but microphone connected */ 118/* onyx with all but microphone connected */
118static struct codec_connection onyx_connections_nomic[] = { 119static struct codec_connection onyx_connections_nomic[] = {
@@ -361,6 +362,13 @@ static struct layout layouts[] = {
361 .connections = tas_connections_nolineout, 362 .connections = tas_connections_nolineout,
362 }, 363 },
363 }, 364 },
365 /* PowerBook6,5 */
366 { .device_id = 44,
367 .codecs[0] = {
368 .name = "tas",
369 .connections = tas_connections_all,
370 },
371 },
364 /* PowerBook6,7 */ 372 /* PowerBook6,7 */
365 { .layout_id = 80, 373 { .layout_id = 80,
366 .codecs[0] = { 374 .codecs[0] = {
diff --git a/sound/aoa/soundbus/i2sbus/core.c b/sound/aoa/soundbus/i2sbus/core.c
index 010658335881..15e76131b501 100644
--- a/sound/aoa/soundbus/i2sbus/core.c
+++ b/sound/aoa/soundbus/i2sbus/core.c
@@ -200,7 +200,8 @@ static int i2sbus_add_dev(struct macio_dev *macio,
200 * We probably cannot handle all device-id machines, 200 * We probably cannot handle all device-id machines,
201 * so restrict to those we do handle for now. 201 * so restrict to those we do handle for now.
202 */ 202 */
203 if (id && (*id == 22 || *id == 14 || *id == 35)) { 203 if (id && (*id == 22 || *id == 14 || *id == 35 ||
204 *id == 44)) {
204 snprintf(dev->sound.modalias, 32, 205 snprintf(dev->sound.modalias, 32,
205 "aoa-device-id-%d", *id); 206 "aoa-device-id-%d", *id);
206 ok = 1; 207 ok = 1;
diff --git a/sound/oss/Kconfig b/sound/oss/Kconfig
index 51c4ba95a32d..1a9640254433 100644
--- a/sound/oss/Kconfig
+++ b/sound/oss/Kconfig
@@ -250,7 +250,7 @@ config MSND_FIFOSIZE
250menuconfig SOUND_OSS 250menuconfig SOUND_OSS
251 tristate "OSS sound modules" 251 tristate "OSS sound modules"
252 depends on ISA_DMA_API && VIRT_TO_BUS 252 depends on ISA_DMA_API && VIRT_TO_BUS
253 depends on !ISA_DMA_SUPPORT_BROKEN 253 depends on !GENERIC_ISA_DMA_SUPPORT_BROKEN
254 help 254 help
255 OSS is the Open Sound System suite of sound card drivers. They make 255 OSS is the Open Sound System suite of sound card drivers. They make
256 sound programming easier since they provide a common API. Say Y or 256 sound programming easier since they provide a common API. Say Y or
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index ac079f93c535..ae85bbd2e6f8 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -606,6 +606,10 @@ static bool is_active_nid(struct hda_codec *codec, hda_nid_t nid,
606 return false; 606 return false;
607} 607}
608 608
609/* check whether the NID is referred by any active paths */
610#define is_active_nid_for_any(codec, nid) \
611 is_active_nid(codec, nid, HDA_OUTPUT, 0)
612
609/* get the default amp value for the target state */ 613/* get the default amp value for the target state */
610static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid, 614static int get_amp_val_to_activate(struct hda_codec *codec, hda_nid_t nid,
611 int dir, unsigned int caps, bool enable) 615 int dir, unsigned int caps, bool enable)
@@ -759,7 +763,8 @@ static void path_power_down_sync(struct hda_codec *codec, struct nid_path *path)
759 763
760 for (i = 0; i < path->depth; i++) { 764 for (i = 0; i < path->depth; i++) {
761 hda_nid_t nid = path->path[i]; 765 hda_nid_t nid = path->path[i];
762 if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3)) { 766 if (!snd_hda_check_power_state(codec, nid, AC_PWRST_D3) &&
767 !is_active_nid_for_any(codec, nid)) {
763 snd_hda_codec_write(codec, nid, 0, 768 snd_hda_codec_write(codec, nid, 0,
764 AC_VERB_SET_POWER_STATE, 769 AC_VERB_SET_POWER_STATE,
765 AC_PWRST_D3); 770 AC_PWRST_D3);
@@ -4157,7 +4162,7 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
4157 return power_state; 4162 return power_state;
4158 if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER) 4163 if (get_wcaps_type(get_wcaps(codec, nid)) >= AC_WID_POWER)
4159 return power_state; 4164 return power_state;
4160 if (is_active_nid(codec, nid, HDA_OUTPUT, 0)) 4165 if (is_active_nid_for_any(codec, nid))
4161 return power_state; 4166 return power_state;
4162 return AC_PWRST_D3; 4167 return AC_PWRST_D3;
4163} 4168}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 6bf47f7326ad..59d2e91a9ab6 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3482,6 +3482,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3482 SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3482 SND_PCI_QUIRK(0x1028, 0x05c9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3483 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3483 SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3484 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE), 3484 SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3485 SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
3485 SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3486 SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3486 SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3487 SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
3487 SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 3488 SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
diff --git a/sound/soc/codecs/ab8500-codec.h b/sound/soc/codecs/ab8500-codec.h
index 114f69a0c629..306d0bc8455f 100644
--- a/sound/soc/codecs/ab8500-codec.h
+++ b/sound/soc/codecs/ab8500-codec.h
@@ -348,25 +348,25 @@
348 348
349/* AB8500_ADSLOTSELX */ 349/* AB8500_ADSLOTSELX */
350#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00 350#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_ODD 0x00
351#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x01 351#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_ODD 0x10
352#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x02 352#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_ODD 0x20
353#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x03 353#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_ODD 0x30
354#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x04 354#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_ODD 0x40
355#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x05 355#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_ODD 0x50
356#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x06 356#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_ODD 0x60
357#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x07 357#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_ODD 0x70
358#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x08 358#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_ODD 0x80
359#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0x0F 359#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_ODD 0xF0
360#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00 360#define AB8500_ADSLOTSELX_AD_OUT1_TO_SLOT_EVEN 0x00
361#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x10 361#define AB8500_ADSLOTSELX_AD_OUT2_TO_SLOT_EVEN 0x01
362#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x20 362#define AB8500_ADSLOTSELX_AD_OUT3_TO_SLOT_EVEN 0x02
363#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x30 363#define AB8500_ADSLOTSELX_AD_OUT4_TO_SLOT_EVEN 0x03
364#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x40 364#define AB8500_ADSLOTSELX_AD_OUT5_TO_SLOT_EVEN 0x04
365#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x50 365#define AB8500_ADSLOTSELX_AD_OUT6_TO_SLOT_EVEN 0x05
366#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x60 366#define AB8500_ADSLOTSELX_AD_OUT7_TO_SLOT_EVEN 0x06
367#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x70 367#define AB8500_ADSLOTSELX_AD_OUT8_TO_SLOT_EVEN 0x07
368#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x80 368#define AB8500_ADSLOTSELX_ZEROES_TO_SLOT_EVEN 0x08
369#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0xF0 369#define AB8500_ADSLOTSELX_TRISTATE_TO_SLOT_EVEN 0x0F
370#define AB8500_ADSLOTSELX_EVEN_SHIFT 0 370#define AB8500_ADSLOTSELX_EVEN_SHIFT 0
371#define AB8500_ADSLOTSELX_ODD_SHIFT 4 371#define AB8500_ADSLOTSELX_ODD_SHIFT 4
372 372
diff --git a/sound/soc/codecs/da7213.c b/sound/soc/codecs/da7213.c
index 41230ad1c3e0..4a6f1daf911f 100644
--- a/sound/soc/codecs/da7213.c
+++ b/sound/soc/codecs/da7213.c
@@ -1488,17 +1488,17 @@ static int da7213_probe(struct snd_soc_codec *codec)
1488 DA7213_DMIC_DATA_SEL_SHIFT); 1488 DA7213_DMIC_DATA_SEL_SHIFT);
1489 break; 1489 break;
1490 } 1490 }
1491 switch (pdata->dmic_data_sel) { 1491 switch (pdata->dmic_samplephase) {
1492 case DA7213_DMIC_SAMPLE_ON_CLKEDGE: 1492 case DA7213_DMIC_SAMPLE_ON_CLKEDGE:
1493 case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE: 1493 case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE:
1494 dmic_cfg |= (pdata->dmic_data_sel << 1494 dmic_cfg |= (pdata->dmic_samplephase <<
1495 DA7213_DMIC_SAMPLEPHASE_SHIFT); 1495 DA7213_DMIC_SAMPLEPHASE_SHIFT);
1496 break; 1496 break;
1497 } 1497 }
1498 switch (pdata->dmic_data_sel) { 1498 switch (pdata->dmic_clk_rate) {
1499 case DA7213_DMIC_CLK_3_0MHZ: 1499 case DA7213_DMIC_CLK_3_0MHZ:
1500 case DA7213_DMIC_CLK_1_5MHZ: 1500 case DA7213_DMIC_CLK_1_5MHZ:
1501 dmic_cfg |= (pdata->dmic_data_sel << 1501 dmic_cfg |= (pdata->dmic_clk_rate <<
1502 DA7213_DMIC_CLK_RATE_SHIFT); 1502 DA7213_DMIC_CLK_RATE_SHIFT);
1503 break; 1503 break;
1504 } 1504 }
diff --git a/sound/soc/codecs/wm0010.c b/sound/soc/codecs/wm0010.c
index 8df2b6e1a1a6..370af0cbcc9a 100644
--- a/sound/soc/codecs/wm0010.c
+++ b/sound/soc/codecs/wm0010.c
@@ -667,6 +667,7 @@ static int wm0010_boot(struct snd_soc_codec *codec)
667 /* On wm0010 only the CLKCTRL1 value is used */ 667 /* On wm0010 only the CLKCTRL1 value is used */
668 pll_rec.clkctrl1 = wm0010->pll_clkctrl1; 668 pll_rec.clkctrl1 = wm0010->pll_clkctrl1;
669 669
670 ret = -ENOMEM;
670 len = pll_rec.length + 8; 671 len = pll_rec.length + 8;
671 out = kzalloc(len, GFP_KERNEL); 672 out = kzalloc(len, GFP_KERNEL);
672 if (!out) { 673 if (!out) {
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index 902fab02b851..c6fa03e2114a 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -540,11 +540,6 @@ static int imx_ssi_probe(struct platform_device *pdev)
540 clk_prepare_enable(ssi->clk); 540 clk_prepare_enable(ssi->clk);
541 541
542 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 542 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
543 if (!res) {
544 ret = -ENODEV;
545 goto failed_get_resource;
546 }
547
548 ssi->base = devm_ioremap_resource(&pdev->dev, res); 543 ssi->base = devm_ioremap_resource(&pdev->dev, res);
549 if (IS_ERR(ssi->base)) { 544 if (IS_ERR(ssi->base)) {
550 ret = PTR_ERR(ssi->base); 545 ret = PTR_ERR(ssi->base);
@@ -633,7 +628,6 @@ failed_pdev_fiq_alloc:
633 snd_soc_unregister_component(&pdev->dev); 628 snd_soc_unregister_component(&pdev->dev);
634failed_register: 629failed_register:
635 release_mem_region(res->start, resource_size(res)); 630 release_mem_region(res->start, resource_size(res));
636failed_get_resource:
637 clk_disable_unprepare(ssi->clk); 631 clk_disable_unprepare(ssi->clk);
638failed_clk: 632failed_clk:
639 633
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index befe68f59285..4c9dad3263c5 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -471,11 +471,6 @@ static int kirkwood_i2s_dev_probe(struct platform_device *pdev)
471 dev_set_drvdata(&pdev->dev, priv); 471 dev_set_drvdata(&pdev->dev, priv);
472 472
473 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 473 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
474 if (!mem) {
475 dev_err(&pdev->dev, "platform_get_resource failed\n");
476 return -ENXIO;
477 }
478
479 priv->io = devm_ioremap_resource(&pdev->dev, mem); 474 priv->io = devm_ioremap_resource(&pdev->dev, mem);
480 if (IS_ERR(priv->io)) 475 if (IS_ERR(priv->io))
481 return PTR_ERR(priv->io); 476 return PTR_ERR(priv->io);
diff --git a/sound/usb/proc.c b/sound/usb/proc.c
index 135c76871063..5f761ab34c01 100644
--- a/sound/usb/proc.c
+++ b/sound/usb/proc.c
@@ -116,21 +116,22 @@ static void proc_dump_substream_formats(struct snd_usb_substream *subs, struct s
116} 116}
117 117
118static void proc_dump_ep_status(struct snd_usb_substream *subs, 118static void proc_dump_ep_status(struct snd_usb_substream *subs,
119 struct snd_usb_endpoint *ep, 119 struct snd_usb_endpoint *data_ep,
120 struct snd_usb_endpoint *sync_ep,
120 struct snd_info_buffer *buffer) 121 struct snd_info_buffer *buffer)
121{ 122{
122 if (!ep) 123 if (!data_ep)
123 return; 124 return;
124 snd_iprintf(buffer, " Packet Size = %d\n", ep->curpacksize); 125 snd_iprintf(buffer, " Packet Size = %d\n", data_ep->curpacksize);
125 snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n", 126 snd_iprintf(buffer, " Momentary freq = %u Hz (%#x.%04x)\n",
126 subs->speed == USB_SPEED_FULL 127 subs->speed == USB_SPEED_FULL
127 ? get_full_speed_hz(ep->freqm) 128 ? get_full_speed_hz(data_ep->freqm)
128 : get_high_speed_hz(ep->freqm), 129 : get_high_speed_hz(data_ep->freqm),
129 ep->freqm >> 16, ep->freqm & 0xffff); 130 data_ep->freqm >> 16, data_ep->freqm & 0xffff);
130 if (ep->freqshift != INT_MIN) { 131 if (sync_ep && data_ep->freqshift != INT_MIN) {
131 int res = 16 - ep->freqshift; 132 int res = 16 - data_ep->freqshift;
132 snd_iprintf(buffer, " Feedback Format = %d.%d\n", 133 snd_iprintf(buffer, " Feedback Format = %d.%d\n",
133 (ep->syncmaxsize > 3 ? 32 : 24) - res, res); 134 (sync_ep->syncmaxsize > 3 ? 32 : 24) - res, res);
134 } 135 }
135} 136}
136 137
@@ -140,8 +141,7 @@ static void proc_dump_substream_status(struct snd_usb_substream *subs, struct sn
140 snd_iprintf(buffer, " Status: Running\n"); 141 snd_iprintf(buffer, " Status: Running\n");
141 snd_iprintf(buffer, " Interface = %d\n", subs->interface); 142 snd_iprintf(buffer, " Interface = %d\n", subs->interface);
142 snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx); 143 snd_iprintf(buffer, " Altset = %d\n", subs->altset_idx);
143 proc_dump_ep_status(subs, subs->data_endpoint, buffer); 144 proc_dump_ep_status(subs, subs->data_endpoint, subs->sync_endpoint, buffer);
144 proc_dump_ep_status(subs, subs->sync_endpoint, buffer);
145 } else { 145 } else {
146 snd_iprintf(buffer, " Status: Stop\n"); 146 snd_iprintf(buffer, " Status: Stop\n");
147 } 147 }
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py
index a4ffc9500023..b5740599aabd 100755
--- a/tools/perf/scripts/python/net_dropmonitor.py
+++ b/tools/perf/scripts/python/net_dropmonitor.py
@@ -15,35 +15,38 @@ kallsyms = []
15 15
16def get_kallsyms_table(): 16def get_kallsyms_table():
17 global kallsyms 17 global kallsyms
18
18 try: 19 try:
19 f = open("/proc/kallsyms", "r") 20 f = open("/proc/kallsyms", "r")
20 linecount = 0
21 for line in f:
22 linecount = linecount+1
23 f.seek(0)
24 except: 21 except:
25 return 22 return
26 23
27
28 j = 0
29 for line in f: 24 for line in f:
30 loc = int(line.split()[0], 16) 25 loc = int(line.split()[0], 16)
31 name = line.split()[2] 26 name = line.split()[2]
32 j = j +1 27 kallsyms.append((loc, name))
33 if ((j % 100) == 0):
34 print "\r" + str(j) + "/" + str(linecount),
35 kallsyms.append({ 'loc': loc, 'name' : name})
36
37 print "\r" + str(j) + "/" + str(linecount)
38 kallsyms.sort() 28 kallsyms.sort()
39 return
40 29
41def get_sym(sloc): 30def get_sym(sloc):
42 loc = int(sloc) 31 loc = int(sloc)
43 for i in kallsyms: 32
44 if (i['loc'] >= loc): 33 # Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
45 return (i['name'], i['loc']-loc) 34 # kallsyms[i][0] > loc for all end <= i < len(kallsyms)
46 return (None, 0) 35 start, end = -1, len(kallsyms)
36 while end != start + 1:
37 pivot = (start + end) // 2
38 if loc < kallsyms[pivot][0]:
39 end = pivot
40 else:
41 start = pivot
42
43 # Now (start == -1 or kallsyms[start][0] <= loc)
44 # and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
45 if start >= 0:
46 symloc, name = kallsyms[start]
47 return (name, loc - symloc)
48 else:
49 return (None, 0)
47 50
48def print_drop_table(): 51def print_drop_table():
49 print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") 52 print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
@@ -64,7 +67,7 @@ def trace_end():
64 67
65# called from perf, when it finds a correspoinding event 68# called from perf, when it finds a correspoinding event
66def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, 69def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
67 skbaddr, protocol, location): 70 skbaddr, location, protocol):
68 slocation = str(location) 71 slocation = str(location)
69 try: 72 try:
70 drop_log[slocation] = drop_log[slocation] + 1 73 drop_log[slocation] = drop_log[slocation] + 1
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index d4abc59ce1d9..0a63658065f0 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -6,7 +6,6 @@ TARGETS += memory-hotplug
6TARGETS += mqueue 6TARGETS += mqueue
7TARGETS += net 7TARGETS += net
8TARGETS += ptrace 8TARGETS += ptrace
9TARGETS += soft-dirty
10TARGETS += vm 9TARGETS += vm
11 10
12all: 11all:
diff --git a/tools/testing/selftests/soft-dirty/Makefile b/tools/testing/selftests/soft-dirty/Makefile
deleted file mode 100644
index a9cdc823d6e0..000000000000
--- a/tools/testing/selftests/soft-dirty/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1CFLAGS += -iquote../../../../include/uapi -Wall
2soft-dirty: soft-dirty.c
3
4all: soft-dirty
5
6clean:
7 rm -f soft-dirty
8
9run_tests: all
10 @./soft-dirty || echo "soft-dirty selftests: [FAIL]"
diff --git a/tools/testing/selftests/soft-dirty/soft-dirty.c b/tools/testing/selftests/soft-dirty/soft-dirty.c
deleted file mode 100644
index aba4f87f87f0..000000000000
--- a/tools/testing/selftests/soft-dirty/soft-dirty.c
+++ /dev/null
@@ -1,114 +0,0 @@
1#include <stdlib.h>
2#include <stdio.h>
3#include <sys/mman.h>
4#include <unistd.h>
5#include <fcntl.h>
6#include <sys/types.h>
7
8typedef unsigned long long u64;
9
10#define PME_PRESENT (1ULL << 63)
11#define PME_SOFT_DIRTY (1Ull << 55)
12
13#define PAGES_TO_TEST 3
14#ifndef PAGE_SIZE
15#define PAGE_SIZE 4096
16#endif
17
18static void get_pagemap2(char *mem, u64 *map)
19{
20 int fd;
21
22 fd = open("/proc/self/pagemap2", O_RDONLY);
23 if (fd < 0) {
24 perror("Can't open pagemap2");
25 exit(1);
26 }
27
28 lseek(fd, (unsigned long)mem / PAGE_SIZE * sizeof(u64), SEEK_SET);
29 read(fd, map, sizeof(u64) * PAGES_TO_TEST);
30 close(fd);
31}
32
33static inline char map_p(u64 map)
34{
35 return map & PME_PRESENT ? 'p' : '-';
36}
37
38static inline char map_sd(u64 map)
39{
40 return map & PME_SOFT_DIRTY ? 'd' : '-';
41}
42
43static int check_pte(int step, int page, u64 *map, u64 want)
44{
45 if ((map[page] & want) != want) {
46 printf("Step %d Page %d has %c%c, want %c%c\n",
47 step, page,
48 map_p(map[page]), map_sd(map[page]),
49 map_p(want), map_sd(want));
50 return 1;
51 }
52
53 return 0;
54}
55
56static void clear_refs(void)
57{
58 int fd;
59 char *v = "4";
60
61 fd = open("/proc/self/clear_refs", O_WRONLY);
62 if (write(fd, v, 3) < 3) {
63 perror("Can't clear soft-dirty bit");
64 exit(1);
65 }
66 close(fd);
67}
68
69int main(void)
70{
71 char *mem, x;
72 u64 map[PAGES_TO_TEST];
73
74 mem = mmap(NULL, PAGES_TO_TEST * PAGE_SIZE,
75 PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0);
76
77 x = mem[0];
78 mem[2 * PAGE_SIZE] = 'c';
79 get_pagemap2(mem, map);
80
81 if (check_pte(1, 0, map, PME_PRESENT))
82 return 1;
83 if (check_pte(1, 1, map, 0))
84 return 1;
85 if (check_pte(1, 2, map, PME_PRESENT | PME_SOFT_DIRTY))
86 return 1;
87
88 clear_refs();
89 get_pagemap2(mem, map);
90
91 if (check_pte(2, 0, map, PME_PRESENT))
92 return 1;
93 if (check_pte(2, 1, map, 0))
94 return 1;
95 if (check_pte(2, 2, map, PME_PRESENT))
96 return 1;
97
98 mem[0] = 'a';
99 mem[PAGE_SIZE] = 'b';
100 x = mem[2 * PAGE_SIZE];
101 get_pagemap2(mem, map);
102
103 if (check_pte(3, 0, map, PME_PRESENT | PME_SOFT_DIRTY))
104 return 1;
105 if (check_pte(3, 1, map, PME_PRESENT | PME_SOFT_DIRTY))
106 return 1;
107 if (check_pte(3, 2, map, PME_PRESENT))
108 return 1;
109
110 (void)x; /* gcc warn */
111
112 printf("PASS\n");
113 return 0;
114}